gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
from __future__ import print_function, division, absolute_import
import io
import os
from toolz import merge, partial
from warnings import warn
from .compression import seekable_files, files as compress_files
from .utils import SeekableFile, read_block
from ..compatibility import PY2, unicode
from ..base import tokenize, normalize_token
from ..delayed import delayed
from ..utils import (build_name_function, infer_compression, import_required,
ensure_bytes, ensure_unicode, infer_storage_options)
# delayed = delayed(pure=True)
# Global registration dictionaries for backend storage functions
# See docstrings to functions below for more information
_read_bytes = dict()
_open_files_write = dict()
_open_files = dict()
_open_text_files = dict()
def write_block_to_file(data, lazy_file):
"""
Parameters
----------
data : data to write
Either str/bytes, or iterable producing those, or something file-like
which can be read.
lazy_file : file-like or file context
gives writable backend-dependent file-like object when used with `with`
"""
binary = 'b' in str(getattr(lazy_file, 'mode', 'b'))
with lazy_file as f:
if isinstance(f, io.TextIOWrapper):
binary = False
if binary:
ensure = ensure_bytes
else:
ensure = ensure_unicode
if isinstance(data, (str, bytes, unicode)):
f.write(ensure(data))
elif isinstance(data, io.IOBase):
# file-like
out = True
while out:
out = data.read(64 * 2 ** 10)
f.write(ensure(out))
else:
# iterable, e.g., bag contents
start = False
for d in data:
if start:
if binary:
try:
f.write(b'\n')
except TypeError:
binary = False
f.write('\n')
else:
f.write(u'\n')
else:
start = True
f.write(ensure(d))
def write_bytes(data, urlpath, name_function=None, compression=None,
encoding=None, **kwargs):
"""Write dask data to a set of files
Parameters
----------
data: list of delayed objects
Producing data to write
urlpath: list or template
Location(s) to write to, including backend specifier.
name_function: function or None
If urlpath is a template, use this function to create a string out
of the sequence number.
compression: str or None
Compression algorithm to apply (e.g., gzip), if any
encoding: str or None
If None, data must produce bytes, else will be encoded.
kwargs: passed to filesystem constructor
"""
mode = 'wb' if encoding is None else 'wt'
fs, names, myopen = get_fs_paths_myopen(urlpath, compression, mode,
name_function=name_function,
num=len(data), encoding=encoding,
**kwargs)
return [delayed(write_block_to_file, pure=False)(d, myopen(f, mode='wb'))
for d, f in zip(data, names)]
def read_bytes(urlpath, delimiter=None, not_zero=False, blocksize=2**27,
sample=True, compression=None, **kwargs):
""" Convert path to a list of delayed values
The path may be a filename like ``'2015-01-01.csv'`` or a globstring
like ``'2015-*-*.csv'``.
The path may be preceded by a protocol, like ``s3://`` or ``hdfs://`` if
those libraries are installed.
This cleanly breaks data by a delimiter if given, so that block boundaries
start directly after a delimiter and end on the delimiter.
Parameters
----------
urlpath: string
Absolute or relative filepath, URL (may include protocols like
``s3://``), or globstring pointing to data.
delimiter: bytes
An optional delimiter, like ``b'\\n'`` on which to split blocks of
bytes.
not_zero: bool
Force seek of start-of-file delimiter, discarding header.
blocksize: int (=128MB)
Chunk size
compression: string or None
String like 'gzip' or 'xz'. Must support efficient random access.
sample: bool or int
Whether or not to return a header sample. If an integer is given it is
used as sample size, otherwise the default sample size is 10kB.
**kwargs: dict
Extra options that make sense to a particular storage connection, e.g.
host, port, username, password, etc.
Examples
--------
>>> sample, blocks = read_bytes('2015-*-*.csv', delimiter=b'\\n') # doctest: +SKIP
>>> sample, blocks = read_bytes('s3://bucket/2015-*-*.csv', delimiter=b'\\n') # doctest: +SKIP
Returns
-------
A sample header and list of ``dask.Delayed`` objects or list of lists of
delayed objects if ``fn`` is a globstring.
"""
fs, paths, myopen = get_fs_paths_myopen(urlpath, compression, 'rb',
None, **kwargs)
client = None
if len(paths) == 0:
raise IOError("%s resolved to no files" % urlpath)
blocks, lengths, machines = fs.get_block_locations(paths)
if blocks:
offsets = blocks
elif blocksize is None:
offsets = [[0]] * len(paths)
lengths = [[None]] * len(offsets)
machines = [[None]] * len(offsets)
else:
offsets = []
lengths = []
for path in paths:
try:
size = fs.logical_size(path, compression)
except KeyError:
raise ValueError('Cannot read compressed files (%s) in byte chunks,'
'use blocksize=None' % infer_compression(urlpath))
off = list(range(0, size, blocksize))
length = [blocksize] * len(off)
if not_zero:
off[0] = 1
length[0] -= 1
offsets.append(off)
lengths.append(length)
machines = [[None]] * len(offsets)
out = []
for path, offset, length, machine in zip(paths, offsets, lengths, machines):
ukey = fs.ukey(path)
keys = ['read-block-%s-%s' %
(o, tokenize(path, compression, offset, ukey, kwargs, delimiter))
for o in offset]
L = [delayed(read_block_from_file)(myopen(path, mode='rb'), o,
l, delimiter, dask_key_name=key)
for (o, key, l) in zip(offset, keys, length)]
out.append(L)
if machine is not None: # blocks are in preferred locations
if client is None:
try:
from distributed.client import default_client
client = default_client()
except (ImportError, ValueError): # no distributed client
client = False
if client:
restrictions = {key: w for key, w in zip(keys, machine)}
client._send_to_scheduler({'op': 'update-graph', 'tasks': {},
'dependencies': [], 'keys': [],
'restrictions': restrictions,
'loose_restrictions': list(restrictions),
'client': client.id})
if sample is not True:
nbytes = sample
else:
nbytes = 10000
if sample:
# myopen = OpenFileCreator(urlpath, compression)
with myopen(paths[0], 'rb') as f:
sample = read_block(f, 0, nbytes, delimiter)
return sample, out
def read_block_from_file(lazy_file, off, bs, delimiter):
with lazy_file as f:
return read_block(f, off, bs, delimiter)
class OpenFileCreator(object):
"""
Produces a function-like instance, which generates open file contexts
Analyses the passed URL to determine the appropriate backend (local file,
s3, etc.), and then acts something like the builtin `open` in
with a context, where the further options such as compression are applied
to the file to be opened.
Parameters
----------
urlpath: str
Template URL, like the files we wish to access, with optional
backend-specific parts
compression: str or None
One of the keys of `compress_files` or None; all files opened will use
this compression. If `'infer'`, will choose based on the urlpath
text: bool
Whether files should be binary or text
encoding: str
If files are text, the encoding to use
errors: str ['strict']
How to handle encoding errors for text files
kwargs: passed to filesystem instance constructor
Examples
--------
>>> ofc = OpenFileCreator('2015-*-*.csv') # doctest: +SKIP
>>> with ofc('2015-12-10.csv', 'rb') as f: # doctest: +SKIP
... f.read(10) # doctest: +SKIP
"""
def __init__(self, urlpath, compression=None, text=False, encoding='utf8',
errors=None, **kwargs):
if compression == 'infer':
compression = infer_compression(urlpath)
if compression is not None and compression not in compress_files:
raise ValueError("Compression type %s not supported" % compression)
self.compression = compression
self.text = text
self.encoding = encoding
self.errors = errors
self.storage_options = infer_storage_options(urlpath, inherit_storage_options=kwargs)
self.protocol = self.storage_options.pop('protocol')
ensure_protocol(self.protocol)
try:
self.fs = _filesystems[self.protocol](**self.storage_options)
except KeyError:
raise NotImplementedError("Unknown protocol %s (%s)" %
(self.protocol, urlpath))
def __call__(self, path, mode='rb'):
"""Produces `OpenFile` instance"""
return OpenFile(self.fs.open, path, self.compression, mode,
self.text, self.encoding, self.errors)
@partial(normalize_token.register, OpenFileCreator)
def normalize_OpenFileCreator(ofc):
return ofc.compression, ofc.text, ofc.encoding, ofc.protocol, ofc.storage_options
class OpenFile(object):
"""
File-like object to be used in a context
These instances are safe to serialize, as the low-level file object
is not created until invoked using `with`.
Parameters
----------
myopen: function
Opens the backend file. Should accept path and mode, as the builtin open
path: str
Location to open
compression: str or None
Compression to apply
mode: str like 'rb'
Mode of the opened file
text: bool
Whether to wrap the file to be text-like
encoding: if using text
errors: if using text
"""
def __init__(self, myopen, path, compression, mode, text, encoding,
errors=None):
self.myopen = myopen
self.path = path
self.compression = compression
self.mode = mode
self.text = text
self.encoding = encoding
self.closers = None
self.fobjects = None
self.errors = errors
self.f = None
def __enter__(self):
mode = self.mode.replace('t', '').replace('b', '') + 'b'
f = f2 = self.myopen(self.path, mode=mode)
CompressFile = merge(seekable_files, compress_files)[self.compression]
if PY2:
f2 = SeekableFile(f)
f3 = CompressFile(f2, mode=mode)
if self.text:
f4 = io.TextIOWrapper(f3, encoding=self.encoding,
errors=self.errors)
else:
f4 = f3
self.closers = [f4.close, f3.close, f2.close, f.close]
self.fobjects = [f4, f3, f2, f]
self.f = f4
f4.close = self.close
return f4
def __exit__(self, *args):
self.close()
def close(self):
""" Close all encapsulated file objects
"""
[_() for _ in self.closers]
del self.closers[:]
del self.fobjects[:]
self.f = None
def open_files(urlpath, compression=None, mode='rb', encoding='utf8',
errors=None, name_function=None, num=1, **kwargs):
""" Given path return dask.delayed file-like objects
Parameters
----------
urlpath: string
Absolute or relative filepath, URL (may include protocols like
``s3://``), or globstring pointing to data.
compression: string
Compression to use. See ``dask.bytes.compression.files`` for options.
mode: 'rb', 'wt', etc.
encoding: str
For text mode only
errors: None or str
Passed to TextIOWrapper in text mode
name_function: function or None
if opening a set of files for writing, those files do not yet exist,
so we need to generate their names by formatting the urlpath for
each sequence number
num: int [1]
if writing mode, number of files we expect to create (passed to
name+function)
**kwargs: dict
Extra options that make sense to a particular storage connection, e.g.
host, port, username, password, etc.
Examples
--------
>>> files = open_files('2015-*-*.csv') # doctest: +SKIP
>>> files = open_files('s3://bucket/2015-*-*.csv.gz', compression='gzip') # doctest: +SKIP
Returns
-------
List of ``dask.delayed`` objects that compute to file-like objects
"""
fs, paths, myopen = get_fs_paths_myopen(urlpath, compression, mode,
encoding=encoding, num=num,
name_function=name_function,
errors=errors, **kwargs)
return [myopen(path, mode) for path in paths]
def get_fs_paths_myopen(urlpath, compression, mode, encoding='utf8',
errors='strict', num=1, name_function=None, **kwargs):
if isinstance(urlpath, (str, unicode)):
myopen = OpenFileCreator(urlpath, compression, text='b' not in mode,
encoding=encoding, errors=errors, **kwargs)
if 'w' in mode:
paths = _expand_paths(urlpath, name_function, num)
elif "*" in urlpath:
paths = myopen.fs.glob(urlpath)
else:
paths = [urlpath]
elif isinstance(urlpath, (list, set, tuple, dict)):
myopen = OpenFileCreator(urlpath[0], compression, text='b' not in mode,
encoding=encoding, **kwargs)
paths = urlpath
else:
raise ValueError('url type not understood: %s' % urlpath)
return myopen.fs, paths, myopen
def open_text_files(urlpath, compression=None, mode='rt', encoding='utf8',
errors='strict', **kwargs):
""" Given path return dask.delayed file-like objects in text mode
Parameters
----------
urlpath: string
Absolute or relative filepath, URL (may include protocols like
``s3://``), or globstring pointing to data.
encoding: string
errors: string
compression: string
Compression to use. See ``dask.bytes.compression.files`` for options.
**kwargs: dict
Extra options that make sense to a particular storage connection, e.g.
host, port, username, password, etc.
Examples
--------
>>> files = open_text_files('2015-*-*.csv', encoding='utf-8') # doctest: +SKIP
>>> files = open_text_files('s3://bucket/2015-*-*.csv') # doctest: +SKIP
Returns
-------
List of ``dask.delayed`` objects that compute to text file-like objects
"""
return open_files(urlpath, compression, mode.replace('b', 't'), encoding,
errors=errors, **kwargs)
def _expand_paths(path, name_function, num):
if isinstance(path, (str, unicode)):
if path.count('*') > 1:
raise ValueError("Output path spec must contain at most one '*'.")
if name_function is None:
name_function = build_name_function(num - 1)
if '*' not in path:
path = os.path.join(path, '*.part')
formatted_names = [name_function(i) for i in range(num)]
if formatted_names != sorted(formatted_names):
warn("In order to preserve order between partitions "
"name_function must preserve the order of its input")
paths = [path.replace('*', name_function(i))
for i in range(num)]
elif isinstance(path, (tuple, list, set)):
assert len(path) == num
paths = path
else:
raise ValueError("""Path should be either"
1. A list of paths -- ['foo.json', 'bar.json', ...]
2. A directory -- 'foo/
3. A path with a * in it -- 'foo.*.json'""")
return paths
def ensure_protocol(protocol):
if (protocol not in ('s3', 'hdfs') and ((protocol in _read_bytes) or
(protocol in _filesystems))):
return
if protocol == 's3':
import_required('s3fs',
"Need to install `s3fs` library for s3 support\n"
" conda install s3fs -c conda-forge\n"
" or\n"
" pip install s3fs")
elif protocol == 'hdfs':
msg = ("Need to install `distributed` and `hdfs3` "
"for HDFS support\n"
" conda install distributed hdfs3 -c conda-forge")
import_required('distributed.hdfs', msg)
import_required('hdfs3', msg)
else:
raise ValueError("Unknown protocol %s" % protocol)
_filesystems = dict()
# see .local.LocalFileSystem for reference implementation
class FileSystem(object):
def logical_size(self, path, compression):
if compression == 'infer':
compression = infer_compression(path)
if compression is None:
return self.size(path)
else:
with self.open(path, 'rb') as f:
f = SeekableFile(f)
g = seekable_files[compression](f)
g.seek(0, 2)
result = g.tell()
g.close()
return result
def get_block_locations(self, path):
return None, None, None
| |
# Copyright (c) 2014-2015, NVIDIA CORPORATION. All rights reserved.
import json
import os
import shutil
import tempfile
import time
import unittest
import itertools
import urllib
from gevent import monkey
monkey.patch_all()
from bs4 import BeautifulSoup
import PIL.Image
from urlparse import urlparse
from cStringIO import StringIO
import digits.test_views
from test_imageset_creator import create_classification_imageset, IMAGE_SIZE as DUMMY_IMAGE_SIZE, IMAGE_COUNT as DUMMY_IMAGE_COUNT
# May be too short on a slow system
TIMEOUT_DATASET = 15
################################################################################
# Base classes (they don't start with "Test" so nose won't run them)
################################################################################
class BaseViewsTest(digits.test_views.BaseViewsTest):
"""
Provides some functions
"""
@classmethod
def dataset_exists(cls, job_id):
return cls.job_exists(job_id, 'datasets')
@classmethod
def dataset_status(cls, job_id):
return cls.job_status(job_id, 'datasets')
@classmethod
def dataset_info(cls, job_id):
return cls.job_info(job_id, 'datasets')
@classmethod
def abort_dataset(cls, job_id):
return cls.abort_job(job_id, job_type='datasets')
@classmethod
def dataset_wait_completion(cls, job_id, **kwargs):
kwargs['job_type'] = 'datasets'
if 'timeout' not in kwargs:
kwargs['timeout'] = TIMEOUT_DATASET
return cls.job_wait_completion(job_id, **kwargs)
@classmethod
def delete_dataset(cls, job_id):
return cls.delete_job(job_id, job_type='datasets')
class BaseViewsTestWithImageset(BaseViewsTest):
"""
Provides an imageset and some functions
"""
# Inherited classes may want to override these attributes
IMAGE_HEIGHT = 10
IMAGE_WIDTH = 10
IMAGE_CHANNELS = 3
BACKEND = 'lmdb'
COMPRESSION = 'none'
UNBALANCED_CATEGORY = False
@classmethod
def setUpClass(cls):
super(BaseViewsTestWithImageset, cls).setUpClass()
cls.imageset_folder = tempfile.mkdtemp()
# create imageset
cls.imageset_paths = create_classification_imageset(cls.imageset_folder,
add_unbalanced_category=cls.UNBALANCED_CATEGORY)
cls.created_datasets = []
@classmethod
def tearDownClass(cls):
# delete any created datasets
for job_id in cls.created_datasets:
cls.delete_dataset(job_id)
# delete imageset
shutil.rmtree(cls.imageset_folder)
super(BaseViewsTestWithImageset, cls).tearDownClass()
@classmethod
def create_dataset(cls, **kwargs):
"""
Create a dataset
Returns the job_id
Raises RuntimeError if job fails to create
Keyword arguments:
**kwargs -- data to be sent with POST request
"""
data = {
'dataset_name': 'test_dataset',
'method': 'folder',
'folder_train': cls.imageset_folder,
'resize_channels': cls.IMAGE_CHANNELS,
'resize_width': cls.IMAGE_WIDTH,
'resize_height': cls.IMAGE_HEIGHT,
'backend': cls.BACKEND,
'compression': cls.COMPRESSION,
}
data.update(kwargs)
request_json = data.pop('json', False)
url = '/datasets/images/classification'
if request_json:
url += '.json'
rv = cls.app.post(url, data=data)
if request_json:
if rv.status_code != 200:
print json.loads(rv.data)
raise RuntimeError('Model creation failed with %s' % rv.status_code)
return json.loads(rv.data)['id']
# expect a redirect
if not 300 <= rv.status_code <= 310:
s = BeautifulSoup(rv.data)
div = s.select('div.alert-danger')
if div:
raise RuntimeError(div[0])
else:
raise RuntimeError('Failed to create dataset')
job_id = cls.job_id_from_response(rv)
assert cls.dataset_exists(job_id), 'dataset not found after successful creation'
cls.created_datasets.append(job_id)
return job_id
@classmethod
def categoryCount(cls):
return len(cls.imageset_paths.keys())
class BaseViewsTestWithDataset(BaseViewsTestWithImageset):
"""
Provides a dataset and some functions
"""
@classmethod
def setUpClass(cls):
super(BaseViewsTestWithDataset, cls).setUpClass()
cls.dataset_id = cls.create_dataset(json=True)
assert cls.dataset_wait_completion(cls.dataset_id) == 'Done', 'create failed'
################################################################################
# Test classes
################################################################################
class TestViews(BaseViewsTest):
"""
Tests which don't require an imageset or a dataset
"""
def test_page_dataset_new(self):
rv = self.app.get('/datasets/images/classification/new')
assert rv.status_code == 200, 'page load failed with %s' % rv.status_code
assert 'New Image Classification Dataset' in rv.data, 'unexpected page format'
def test_nonexistent_dataset(self):
assert not self.dataset_exists('foo'), "dataset shouldn't exist"
class TestCreation(BaseViewsTestWithImageset):
"""
Dataset creation tests
"""
def test_nonexistent_folder(self):
try:
job_id = self.create_dataset(
folder_train = '/not-a-directory'
)
except RuntimeError:
return
raise AssertionError('Should have failed')
def test_create_json(self):
job_id = self.create_dataset(json=True)
self.abort_dataset(job_id)
def test_create_delete(self):
job_id = self.create_dataset()
assert self.delete_dataset(job_id) == 200, 'delete failed'
assert not self.dataset_exists(job_id), 'dataset exists after delete'
def test_create_abort_delete(self):
job_id = self.create_dataset()
assert self.abort_dataset(job_id) == 200, 'abort failed'
assert self.delete_dataset(job_id) == 200, 'delete failed'
assert not self.dataset_exists(job_id), 'dataset exists after delete'
def test_create_wait_delete(self):
job_id = self.create_dataset()
assert self.dataset_wait_completion(job_id) == 'Done', 'create failed'
assert self.delete_dataset(job_id) == 200, 'delete failed'
assert not self.dataset_exists(job_id), 'dataset exists after delete'
def test_textfiles(self):
for absolute_path in (True, False):
for local_path in (True, False):
yield self.check_textfiles, absolute_path, local_path
def check_textfiles(self, absolute_path=True, local_path=True):
"""
Create a dataset from textfiles
Arguments:
absolute_path -- if False, give relative paths and image folders
"""
textfile_train_images = ''
textfile_labels_file = ''
label_id = 0
for label, images in self.imageset_paths.iteritems():
textfile_labels_file += '%s\n' % label
for image in images:
image_path = image
if absolute_path:
image_path = os.path.join(self.imageset_folder, image_path)
textfile_train_images += '%s %d\n' % (image_path, label_id)
label_id += 1
data = {
'method': 'textfile',
'textfile_use_val': 'y',
}
if local_path:
train_file = os.path.join(self.imageset_folder, "local_train.txt")
labels_file = os.path.join(self.imageset_folder, "local_labels.txt")
# create files in local filesystem - these will be removed in tearDownClass() function
with open(train_file, "w") as outfile:
outfile.write(textfile_train_images)
with open(labels_file, "w") as outfile:
outfile.write(textfile_labels_file)
data['textfile_use_local_files'] = 'True'
data['textfile_local_train_images'] = train_file
# Use the same file for training and validation.
data['textfile_local_val_images'] = train_file
data['textfile_local_labels_file'] = labels_file
else:
# StringIO wrapping is needed to simulate POST file upload.
train_upload = (StringIO(textfile_train_images), "train.txt")
# Use the same list for training and validation.
val_upload = (StringIO(textfile_train_images), "val.txt")
labels_upload = (StringIO(textfile_labels_file), "labels.txt")
data['textfile_train_images'] = train_upload
data['textfile_val_images'] = val_upload
data['textfile_labels_file'] = labels_upload
if not absolute_path:
data['textfile_train_folder'] = self.imageset_folder
data['textfile_val_folder'] = self.imageset_folder
job_id = self.create_dataset(**data)
assert self.dataset_wait_completion(job_id) == 'Done', 'create failed'
class TestImageCount(BaseViewsTestWithImageset):
def test_image_count(self):
for type in ['train','val','test']:
yield self.check_image_count, type
def check_image_count(self, type):
data = {'folder_pct_val': 20,
'folder_pct_test': 10}
if type == 'val':
data['has_val_folder'] = 'True'
data['folder_val'] = self.imageset_folder
elif type == 'test':
data['has_test_folder'] = 'True'
data['folder_test'] = self.imageset_folder
job_id = self.create_dataset(**data)
assert self.dataset_wait_completion(job_id) == 'Done', 'create failed'
info = self.dataset_info(job_id)
if type == 'train':
assert len(info['ParseFolderTasks']) == 1, 'expected exactly one ParseFolderTasks'
parse_info = info['ParseFolderTasks'][0]
image_count = parse_info['train_count'] + parse_info['val_count'] + parse_info['test_count']
assert parse_info['val_count'] == 0.2 * image_count
assert parse_info['test_count'] == 0.1 * image_count
else:
assert len(info['ParseFolderTasks']) == 2, 'expected exactly one ParseFolderTasks'
parse_info = info['ParseFolderTasks'][1]
if type == 'val':
assert parse_info['train_count'] == 0
assert parse_info['test_count'] == 0
image_count = parse_info['val_count']
else:
assert parse_info['train_count'] == 0
assert parse_info['val_count'] == 0
image_count = parse_info['test_count']
assert self.categoryCount() == parse_info['label_count']
assert image_count == DUMMY_IMAGE_COUNT * parse_info['label_count'], 'image count mismatch'
assert self.delete_dataset(job_id) == 200, 'delete failed'
assert not self.dataset_exists(job_id), 'dataset exists after delete'
class TestMaxPerClass(BaseViewsTestWithImageset):
def test_max_per_class(self):
for type in ['train','val','test']:
yield self.check_max_per_class, type
def check_max_per_class(self, type):
# create dataset, asking for at most DUMMY_IMAGE_COUNT/2 images per class
assert DUMMY_IMAGE_COUNT%2 == 0
max_per_class = DUMMY_IMAGE_COUNT/2
data = {'folder_pct_val': 0}
if type == 'train':
data['folder_train_max_per_class'] = max_per_class
if type == 'val':
data['has_val_folder'] = 'True'
data['folder_val'] = self.imageset_folder
data['folder_val_max_per_class'] = max_per_class
elif type == 'test':
data['has_test_folder'] = 'True'
data['folder_test'] = self.imageset_folder
data['folder_test_max_per_class'] = max_per_class
job_id = self.create_dataset(**data)
assert self.dataset_wait_completion(job_id) == 'Done', 'create failed'
info = self.dataset_info(job_id)
if type == 'train':
assert len(info['ParseFolderTasks']) == 1, 'expected exactly one ParseFolderTasks'
parse_info = info['ParseFolderTasks'][0]
else:
assert len(info['ParseFolderTasks']) == 2, 'expected exactly one ParseFolderTasks'
parse_info = info['ParseFolderTasks'][1]
image_count = parse_info['train_count'] + parse_info['val_count'] + parse_info['test_count']
assert image_count == max_per_class * parse_info['label_count'], 'image count mismatch'
assert self.delete_dataset(job_id) == 200, 'delete failed'
assert not self.dataset_exists(job_id), 'dataset exists after delete'
class TestMinPerClass(BaseViewsTestWithImageset):
UNBALANCED_CATEGORY = True
def test_min_per_class(self):
for type in ['train','val','test']:
yield self.check_min_per_class, type
def check_min_per_class(self, type):
# create dataset, asking for one more image per class
# than available in the "unbalanced" category
min_per_class = DUMMY_IMAGE_COUNT/2+1
data = {'folder_pct_val': 0}
if type == 'train':
data['folder_train_min_per_class'] = min_per_class
if type == 'val':
data['has_val_folder'] = 'True'
data['folder_val'] = self.imageset_folder
data['folder_val_min_per_class'] = min_per_class
elif type == 'test':
data['has_test_folder'] = 'True'
data['folder_test'] = self.imageset_folder
data['folder_test_min_per_class'] = min_per_class
job_id = self.create_dataset(**data)
assert self.dataset_wait_completion(job_id) == 'Done', 'create failed'
info = self.dataset_info(job_id)
if type == 'train':
assert len(info['ParseFolderTasks']) == 1, 'expected exactly one ParseFolderTasks'
parse_info = info['ParseFolderTasks'][0]
else:
assert len(info['ParseFolderTasks']) == 2, 'expected exactly two ParseFolderTasks'
parse_info = info['ParseFolderTasks'][1]
assert self.categoryCount() == parse_info['label_count']+1
assert self.delete_dataset(job_id) == 200, 'delete failed'
assert not self.dataset_exists(job_id), 'dataset exists after delete'
class TestCreated(BaseViewsTestWithDataset):
"""
Tests on a dataset that has already been created
"""
def test_index_json(self):
rv = self.app.get('/index.json')
assert rv.status_code == 200, 'page load failed with %s' % rv.status_code
content = json.loads(rv.data)
found = False
for d in content['datasets']:
if d['id'] == self.dataset_id:
found = True
break
assert found, 'dataset not found in list'
def test_dataset_json(self):
rv = self.app.get('/datasets/%s.json' % self.dataset_id)
assert rv.status_code == 200, 'page load failed with %s' % rv.status_code
content = json.loads(rv.data)
assert content['id'] == self.dataset_id, 'expected different job_id'
def test_mean_dimensions(self):
img_url = '/files/%s/mean.jpg' % self.dataset_id
rv = self.app.get(img_url)
assert rv.status_code == 200, 'GET on %s returned %s' % (img_url, rv.status_code)
buff = StringIO(rv.data)
buff.seek(0)
pil_image = PIL.Image.open(buff)
assert pil_image.size == (self.IMAGE_WIDTH, self.IMAGE_HEIGHT), 'image size is %s' % (pil_image.size,)
def test_edit_name(self):
status = self.edit_job(
self.dataset_id,
name='new name'
)
assert status == 200, 'failed with %s' % status
def test_edit_notes(self):
status = self.edit_job(
self.dataset_id,
notes='new notes'
)
assert status == 200, 'failed with %s' % status
def test_backend_selection(self):
rv = self.app.get('/datasets/%s.json' % self.dataset_id)
content = json.loads(rv.data)
for task in content['CreateDbTasks']:
assert task['backend'] == self.BACKEND
class TestCreatedGrayscale(TestCreated):
IMAGE_CHANNELS = 1
class TestCreatedWide(TestCreated):
IMAGE_WIDTH = 20
class TestCreatedTall(TestCreated):
IMAGE_HEIGHT = 20
class TestCreatedHdf5(TestCreated):
BACKEND = 'hdf5'
def test_compression_method(self):
rv = self.app.get('/datasets/%s.json' % self.dataset_id)
content = json.loads(rv.data)
for task in content['CreateDbTasks']:
assert task['compression'] == self.COMPRESSION
class TestCreatedHdf5Gzip(TestCreatedHdf5):
COMPRESSION = 'gzip'
| |
# Copyright (c) 2015-2020 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import unittest
from cloudify.state import current_ctx
from cloudify import mocks as cfy_mocks
from cloudify.exceptions import NonRecoverableError
from . import compose_not_found_cloud_error
from cloudify_azure.resources.compute import managed_cluster
from cloudify_azure.utils import handle_resource_config_params
@mock.patch('azure_sdk.common.ServicePrincipalCredentials')
@mock.patch('azure_sdk.resources.compute.'
'managed_cluster.ContainerServiceClient')
class ManagedClusterTest(unittest.TestCase):
def _get_mock_context_for_run(self, operation=None):
operation = operation or {
'name': 'cloudify.interfaces.lifecycle.create'}
fake_ctx = cfy_mocks.MockCloudifyContext(operation=operation)
instance = mock.Mock()
instance.runtime_properties = {}
fake_ctx._instance = instance
node = mock.Mock()
fake_ctx._node = node
node.properties = {}
node.runtime_properties = {}
node.type_hierarchy = ['ctx.nodes.Root']
fake_ctx.get_resource = mock.MagicMock(
return_value=""
)
return fake_ctx, node, instance
def setUp(self):
self.fake_ctx, self.node, self.instance = \
self._get_mock_context_for_run()
self.dummy_azure_credentials = {
'client_id': 'dummy',
'client_secret': 'dummy',
'subscription_id': 'dummy',
'tenant_id': 'dummy'
}
current_ctx.set(self.fake_ctx)
def test_create(self, client, credentials):
self.node.properties['azure_config'] = self.dummy_azure_credentials
resource_group = 'sample_resource_group'
cluster_name = 'mc_name'
self.node.properties['name'] = cluster_name
self.node.properties['resource_group'] = resource_group
self.node.properties['store_kube_config_in_runtime'] = False
managed_cluster_config = {
'network_profile': None,
'addon_profiles': None,
'windows_profile': None,
'dns_prefix': 'dummy-dns',
'linux_profile': None,
'agent_pool_profiles': None,
'service_principal_profile': None,
'location': 'westus',
'enable_rbac': True,
'kubernetes_version': None,
'tags': None
}
cluster_payload = {}
cluster_payload = \
handle_resource_config_params(cluster_payload,
managed_cluster_config)
err = compose_not_found_cloud_error()
client().managed_clusters.get.side_effect = err
with mock.patch('cloudify_azure.utils.secure_logging_content',
mock.Mock()):
managed_cluster.create(ctx=self.fake_ctx,
resource_group=resource_group,
cluster_name=cluster_name,
resource_config=managed_cluster_config)
client().managed_clusters.get.assert_called_with(
resource_group_name=resource_group,
resource_name=cluster_name,
)
client().managed_clusters.create_or_update.assert_called_with(
resource_group_name=resource_group,
resource_name=cluster_name,
parameters=cluster_payload
)
self.assertEquals(
self.fake_ctx.instance.runtime_properties.get("name"),
cluster_name
)
self.assertEquals(
self.fake_ctx.instance.runtime_properties.get(
"resource_group"),
resource_group
)
def test_create_if_missing(self, client, credentials):
self.node.properties['azure_config'] = self.dummy_azure_credentials
resource_group = 'sample_resource_group'
cluster_name = 'mc_name'
self.node.properties['name'] = cluster_name
self.node.properties['resource_group'] = resource_group
self.node.properties['store_kube_config_in_runtime'] = False
self.node.properties['use_external_resource'] = True
self.node.properties['create_if_missing'] = True
managed_cluster_config = {
'network_profile': None,
'addon_profiles': None,
'windows_profile': None,
'dns_prefix': 'dummy-dns',
'linux_profile': None,
'agent_pool_profiles': None,
'service_principal_profile': None,
'location': 'westus',
'enable_rbac': True,
'kubernetes_version': None,
'tags': None
}
cluster_payload = {}
cluster_payload = \
handle_resource_config_params(cluster_payload,
managed_cluster_config)
err = compose_not_found_cloud_error()
client().managed_clusters.get.side_effect = err
with mock.patch('cloudify_azure.utils.secure_logging_content',
mock.Mock()):
managed_cluster.create(ctx=self.fake_ctx,
resource_group=resource_group,
cluster_name=cluster_name,
resource_config=managed_cluster_config)
client().managed_clusters.get.assert_called_with(
resource_group_name=resource_group,
resource_name=cluster_name,
)
client().managed_clusters.create_or_update.assert_called_with(
resource_group_name=resource_group,
resource_name=cluster_name,
parameters=cluster_payload
)
self.assertEquals(
self.fake_ctx.instance.runtime_properties.get("name"),
cluster_name
)
self.assertEquals(
self.fake_ctx.instance.runtime_properties.get(
"resource_group"),
resource_group
)
def test_create_already_exists_but_not_using_external_resource(
self,
client,
credentials):
self.node.properties['azure_config'] = self.dummy_azure_credentials
resource_group = 'sample_resource_group'
cluster_name = 'mc_name'
self.node.properties['name'] = cluster_name
self.node.properties['resource_group'] = resource_group
self.node.properties['store_kube_config_in_runtime'] = False
managed_cluster_config = {
'network_profile': None,
'addon_profiles': None,
'windows_profile': None,
'dns_prefix': 'dummy-dns',
'linux_profile': None,
'agent_pool_profiles': None,
'service_principal_profile': None,
'location': 'westus',
'enable_rbac': True,
'kubernetes_version': None,
'tags': None
}
client().managed_clusters.get.return_value = mock.Mock()
with mock.patch('cloudify_azure.utils.secure_logging_content',
mock.Mock()):
with self.assertRaisesRegexp(
NonRecoverableError,
'Cannot create/update'):
managed_cluster.create(ctx=self.fake_ctx,
resource_group=resource_group,
cluster_name=cluster_name,
resource_config=managed_cluster_config)
client().managed_clusters.get.assert_called_with(
resource_group_name=resource_group,
resource_name=cluster_name,
)
client().managed_clusters.create_or_update.assert_not_called()
def test_create_already_exists_and_use_external_resource(self,
client,
credentials):
self.node.properties['azure_config'] = self.dummy_azure_credentials
resource_group = 'sample_resource_group'
cluster_name = 'mc_name'
self.node.properties['name'] = cluster_name
self.node.properties['resource_group'] = resource_group
self.node.properties['use_external_resource'] = True
self.node.properties['store_kube_config_in_runtime'] = False
managed_cluster_config = {
'network_profile': None,
'addon_profiles': None,
'windows_profile': None,
'dns_prefix': 'dummy-dns',
'linux_profile': None,
'agent_pool_profiles': None,
'service_principal_profile': None,
'location': 'westus',
'enable_rbac': True,
'kubernetes_version': None,
'tags': None
}
client().managed_clusters.get.return_value = mock.Mock()
with mock.patch('cloudify_azure.utils.secure_logging_content',
mock.Mock()):
managed_cluster.create(ctx=self.fake_ctx,
resource_group=resource_group,
cluster_name=cluster_name,
resource_config=managed_cluster_config)
client().managed_clusters.get.assert_called_with(
resource_group_name=resource_group,
resource_name=cluster_name,
)
client().managed_clusters.create_or_update.assert_not_called()
def test_delete(self, client, credentials):
fake_ctx, _, __ = self._get_mock_context_for_run(
operation={'name': 'cloudify.interfaces.lifecycle.delete'})
fake_ctx.node.properties['azure_config'] = self.dummy_azure_credentials
resource_group = 'sample_resource_group'
cluster_name = 'mc_name'
fake_ctx.instance.runtime_properties['resource_group'] = resource_group
fake_ctx.instance.runtime_properties['name'] = cluster_name
fake_ctx.node.properties['azure_config'] = self.dummy_azure_credentials
with mock.patch('cloudify_azure.utils.secure_logging_content',
mock.Mock()):
managed_cluster.delete(ctx=fake_ctx)
client().managed_clusters.delete.assert_called_with(
resource_group_name=resource_group,
resource_name=cluster_name
)
def test_delete_do_not_exist(self, client, credentials):
fake_ctx, _, __ = self._get_mock_context_for_run(
operation={'name': 'cloudify.interfaces.lifecycle.delete'})
fake_ctx.node.properties['azure_config'] = self.dummy_azure_credentials
resource_group = 'sample_resource_group'
cluster_name = 'mc_name'
fake_ctx.instance.runtime_properties['resource_group'] = resource_group
fake_ctx.instance.runtime_properties['name'] = cluster_name
fake_ctx.node.properties['azure_config'] = self.dummy_azure_credentials
err = compose_not_found_cloud_error()
client().managed_clusters.get.side_effect = err
with mock.patch('cloudify_azure.utils.secure_logging_content',
mock.Mock()):
managed_cluster.delete(ctx=fake_ctx)
client().managed_clusters.delete.assert_not_called()
| |
"""Tests for the EntityPlatform helper."""
import asyncio
import logging
import unittest
from unittest.mock import patch, Mock, MagicMock
from datetime import timedelta
import pytest
from homeassistant.exceptions import PlatformNotReady
import homeassistant.loader as loader
from homeassistant.helpers.entity import generate_entity_id
from homeassistant.helpers.entity_component import (
EntityComponent, DEFAULT_SCAN_INTERVAL)
from homeassistant.helpers import entity_platform, entity_registry
import homeassistant.util.dt as dt_util
from tests.common import (
get_test_home_assistant, MockPlatform, fire_time_changed, mock_registry,
MockEntity, MockEntityPlatform, MockConfigEntry)
_LOGGER = logging.getLogger(__name__)
DOMAIN = "test_domain"
PLATFORM = 'test_platform'
class TestHelpersEntityPlatform(unittest.TestCase):
"""Test homeassistant.helpers.entity_component module."""
def setUp(self): # pylint: disable=invalid-name
"""Initialize a test Home Assistant instance."""
self.hass = get_test_home_assistant()
def tearDown(self): # pylint: disable=invalid-name
"""Clean up the test Home Assistant instance."""
self.hass.stop()
def test_polling_only_updates_entities_it_should_poll(self):
"""Test the polling of only updated entities."""
component = EntityComponent(
_LOGGER, DOMAIN, self.hass, timedelta(seconds=20))
no_poll_ent = MockEntity(should_poll=False)
no_poll_ent.async_update = Mock()
poll_ent = MockEntity(should_poll=True)
poll_ent.async_update = Mock()
component.add_entities([no_poll_ent, poll_ent])
no_poll_ent.async_update.reset_mock()
poll_ent.async_update.reset_mock()
fire_time_changed(self.hass, dt_util.utcnow() + timedelta(seconds=20))
self.hass.block_till_done()
assert not no_poll_ent.async_update.called
assert poll_ent.async_update.called
def test_polling_updates_entities_with_exception(self):
"""Test the updated entities that not break with an exception."""
component = EntityComponent(
_LOGGER, DOMAIN, self.hass, timedelta(seconds=20))
update_ok = []
update_err = []
def update_mock():
"""Mock normal update."""
update_ok.append(None)
def update_mock_err():
"""Mock error update."""
update_err.append(None)
raise AssertionError("Fake error update")
ent1 = MockEntity(should_poll=True)
ent1.update = update_mock_err
ent2 = MockEntity(should_poll=True)
ent2.update = update_mock
ent3 = MockEntity(should_poll=True)
ent3.update = update_mock
ent4 = MockEntity(should_poll=True)
ent4.update = update_mock
component.add_entities([ent1, ent2, ent3, ent4])
update_ok.clear()
update_err.clear()
fire_time_changed(self.hass, dt_util.utcnow() + timedelta(seconds=20))
self.hass.block_till_done()
assert len(update_ok) == 3
assert len(update_err) == 1
def test_update_state_adds_entities(self):
"""Test if updating poll entities cause an entity to be added works."""
component = EntityComponent(_LOGGER, DOMAIN, self.hass)
ent1 = MockEntity()
ent2 = MockEntity(should_poll=True)
component.add_entities([ent2])
assert 1 == len(self.hass.states.entity_ids())
ent2.update = lambda *_: component.add_entities([ent1])
fire_time_changed(
self.hass, dt_util.utcnow() + DEFAULT_SCAN_INTERVAL
)
self.hass.block_till_done()
assert 2 == len(self.hass.states.entity_ids())
def test_update_state_adds_entities_with_update_before_add_true(self):
"""Test if call update before add to state machine."""
component = EntityComponent(_LOGGER, DOMAIN, self.hass)
ent = MockEntity()
ent.update = Mock(spec_set=True)
component.add_entities([ent], True)
self.hass.block_till_done()
assert 1 == len(self.hass.states.entity_ids())
assert ent.update.called
def test_update_state_adds_entities_with_update_before_add_false(self):
"""Test if not call update before add to state machine."""
component = EntityComponent(_LOGGER, DOMAIN, self.hass)
ent = MockEntity()
ent.update = Mock(spec_set=True)
component.add_entities([ent], False)
self.hass.block_till_done()
assert 1 == len(self.hass.states.entity_ids())
assert not ent.update.called
@patch('homeassistant.helpers.entity_platform.'
'async_track_time_interval')
def test_set_scan_interval_via_platform(self, mock_track):
"""Test the setting of the scan interval via platform."""
def platform_setup(hass, config, add_entities, discovery_info=None):
"""Test the platform setup."""
add_entities([MockEntity(should_poll=True)])
platform = MockPlatform(platform_setup)
platform.SCAN_INTERVAL = timedelta(seconds=30)
loader.set_component(self.hass, 'test_domain.platform', platform)
component = EntityComponent(_LOGGER, DOMAIN, self.hass)
component.setup({
DOMAIN: {
'platform': 'platform',
}
})
self.hass.block_till_done()
assert mock_track.called
assert timedelta(seconds=30) == mock_track.call_args[0][2]
def test_adding_entities_with_generator_and_thread_callback(self):
"""Test generator in add_entities that calls thread method.
We should make sure we resolve the generator to a list before passing
it into an async context.
"""
component = EntityComponent(_LOGGER, DOMAIN, self.hass)
def create_entity(number):
"""Create entity helper."""
entity = MockEntity()
entity.entity_id = generate_entity_id(DOMAIN + '.{}',
'Number', hass=self.hass)
return entity
component.add_entities(create_entity(i) for i in range(2))
@asyncio.coroutine
def test_platform_warn_slow_setup(hass):
"""Warn we log when platform setup takes a long time."""
platform = MockPlatform()
loader.set_component(hass, 'test_domain.platform', platform)
component = EntityComponent(_LOGGER, DOMAIN, hass)
with patch.object(hass.loop, 'call_later', MagicMock()) \
as mock_call:
yield from component.async_setup({
DOMAIN: {
'platform': 'platform',
}
})
assert mock_call.called
timeout, logger_method = mock_call.mock_calls[0][1][:2]
assert timeout == entity_platform.SLOW_SETUP_WARNING
assert logger_method == _LOGGER.warning
assert mock_call().cancel.called
@asyncio.coroutine
def test_platform_error_slow_setup(hass, caplog):
"""Don't block startup more than SLOW_SETUP_MAX_WAIT."""
with patch.object(entity_platform, 'SLOW_SETUP_MAX_WAIT', 0):
called = []
@asyncio.coroutine
def setup_platform(*args):
called.append(1)
yield from asyncio.sleep(1, loop=hass.loop)
platform = MockPlatform(async_setup_platform=setup_platform)
component = EntityComponent(_LOGGER, DOMAIN, hass)
loader.set_component(hass, 'test_domain.test_platform', platform)
yield from component.async_setup({
DOMAIN: {
'platform': 'test_platform',
}
})
assert len(called) == 1
assert 'test_domain.test_platform' not in hass.config.components
assert 'test_platform is taking longer than 0 seconds' in caplog.text
@asyncio.coroutine
def test_updated_state_used_for_entity_id(hass):
"""Test that first update results used for entity ID generation."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
class MockEntityNameFetcher(MockEntity):
"""Mock entity that fetches a friendly name."""
@asyncio.coroutine
def async_update(self):
"""Mock update that assigns a name."""
self._values['name'] = "Living Room"
yield from component.async_add_entities([MockEntityNameFetcher()], True)
entity_ids = hass.states.async_entity_ids()
assert 1 == len(entity_ids)
assert entity_ids[0] == "test_domain.living_room"
async def test_parallel_updates_async_platform(hass):
"""Test async platform does not have parallel_updates limit by default."""
platform = MockPlatform()
loader.set_component(hass, 'test_domain.platform', platform)
component = EntityComponent(_LOGGER, DOMAIN, hass)
component._platforms = {}
await component.async_setup({
DOMAIN: {
'platform': 'platform',
}
})
handle = list(component._platforms.values())[-1]
assert handle.parallel_updates is None
class AsyncEntity(MockEntity):
"""Mock entity that has async_update."""
async def async_update(self):
pass
entity = AsyncEntity()
await handle.async_add_entities([entity])
assert entity.parallel_updates is None
async def test_parallel_updates_async_platform_with_constant(hass):
"""Test async platform can set parallel_updates limit."""
platform = MockPlatform()
platform.PARALLEL_UPDATES = 2
loader.set_component(hass, 'test_domain.platform', platform)
component = EntityComponent(_LOGGER, DOMAIN, hass)
component._platforms = {}
await component.async_setup({
DOMAIN: {
'platform': 'platform',
}
})
handle = list(component._platforms.values())[-1]
assert handle.parallel_updates == 2
class AsyncEntity(MockEntity):
"""Mock entity that has async_update."""
async def async_update(self):
pass
entity = AsyncEntity()
await handle.async_add_entities([entity])
assert entity.parallel_updates is not None
assert entity.parallel_updates._value == 2
async def test_parallel_updates_sync_platform(hass):
"""Test sync platform parallel_updates default set to 1."""
platform = MockPlatform()
loader.set_component(hass, 'test_domain.platform', platform)
component = EntityComponent(_LOGGER, DOMAIN, hass)
component._platforms = {}
await component.async_setup({
DOMAIN: {
'platform': 'platform',
}
})
handle = list(component._platforms.values())[-1]
assert handle.parallel_updates is None
class SyncEntity(MockEntity):
"""Mock entity that has update."""
async def update(self):
pass
entity = SyncEntity()
await handle.async_add_entities([entity])
assert entity.parallel_updates is not None
assert entity.parallel_updates._value == 1
async def test_parallel_updates_sync_platform_with_constant(hass):
"""Test sync platform can set parallel_updates limit."""
platform = MockPlatform()
platform.PARALLEL_UPDATES = 2
loader.set_component(hass, 'test_domain.platform', platform)
component = EntityComponent(_LOGGER, DOMAIN, hass)
component._platforms = {}
await component.async_setup({
DOMAIN: {
'platform': 'platform',
}
})
handle = list(component._platforms.values())[-1]
assert handle.parallel_updates == 2
class SyncEntity(MockEntity):
"""Mock entity that has update."""
async def update(self):
pass
entity = SyncEntity()
await handle.async_add_entities([entity])
assert entity.parallel_updates is not None
assert entity.parallel_updates._value == 2
@asyncio.coroutine
def test_raise_error_on_update(hass):
"""Test the add entity if they raise an error on update."""
updates = []
component = EntityComponent(_LOGGER, DOMAIN, hass)
entity1 = MockEntity(name='test_1')
entity2 = MockEntity(name='test_2')
def _raise():
"""Raise an exception."""
raise AssertionError
entity1.update = _raise
entity2.update = lambda: updates.append(1)
yield from component.async_add_entities([entity1, entity2], True)
assert len(updates) == 1
assert 1 in updates
@asyncio.coroutine
def test_async_remove_with_platform(hass):
"""Remove an entity from a platform."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
entity1 = MockEntity(name='test_1')
yield from component.async_add_entities([entity1])
assert len(hass.states.async_entity_ids()) == 1
yield from entity1.async_remove()
assert len(hass.states.async_entity_ids()) == 0
@asyncio.coroutine
def test_not_adding_duplicate_entities_with_unique_id(hass):
"""Test for not adding duplicate entities."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
yield from component.async_add_entities([
MockEntity(name='test1', unique_id='not_very_unique')])
assert len(hass.states.async_entity_ids()) == 1
yield from component.async_add_entities([
MockEntity(name='test2', unique_id='not_very_unique')])
assert len(hass.states.async_entity_ids()) == 1
@asyncio.coroutine
def test_using_prescribed_entity_id(hass):
"""Test for using predefined entity ID."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
yield from component.async_add_entities([
MockEntity(name='bla', entity_id='hello.world')])
assert 'hello.world' in hass.states.async_entity_ids()
@asyncio.coroutine
def test_using_prescribed_entity_id_with_unique_id(hass):
"""Test for ammending predefined entity ID because currently exists."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
yield from component.async_add_entities([
MockEntity(entity_id='test_domain.world')])
yield from component.async_add_entities([
MockEntity(entity_id='test_domain.world', unique_id='bla')])
assert 'test_domain.world_2' in hass.states.async_entity_ids()
@asyncio.coroutine
def test_using_prescribed_entity_id_which_is_registered(hass):
"""Test not allowing predefined entity ID that already registered."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
registry = mock_registry(hass)
# Register test_domain.world
registry.async_get_or_create(
DOMAIN, 'test', '1234', suggested_object_id='world')
# This entity_id will be rewritten
yield from component.async_add_entities([
MockEntity(entity_id='test_domain.world')])
assert 'test_domain.world_2' in hass.states.async_entity_ids()
@asyncio.coroutine
def test_name_which_conflict_with_registered(hass):
"""Test not generating conflicting entity ID based on name."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
registry = mock_registry(hass)
# Register test_domain.world
registry.async_get_or_create(
DOMAIN, 'test', '1234', suggested_object_id='world')
yield from component.async_add_entities([
MockEntity(name='world')])
assert 'test_domain.world_2' in hass.states.async_entity_ids()
@asyncio.coroutine
def test_entity_with_name_and_entity_id_getting_registered(hass):
"""Ensure that entity ID is used for registration."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
yield from component.async_add_entities([
MockEntity(unique_id='1234', name='bla',
entity_id='test_domain.world')])
assert 'test_domain.world' in hass.states.async_entity_ids()
@asyncio.coroutine
def test_overriding_name_from_registry(hass):
"""Test that we can override a name via the Entity Registry."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
mock_registry(hass, {
'test_domain.world': entity_registry.RegistryEntry(
entity_id='test_domain.world',
unique_id='1234',
# Using component.async_add_entities is equal to platform "domain"
platform='test_domain',
name='Overridden'
)
})
yield from component.async_add_entities([
MockEntity(unique_id='1234', name='Device Name')])
state = hass.states.get('test_domain.world')
assert state is not None
assert state.name == 'Overridden'
@asyncio.coroutine
def test_registry_respect_entity_namespace(hass):
"""Test that the registry respects entity namespace."""
mock_registry(hass)
platform = MockEntityPlatform(hass, entity_namespace='ns')
entity = MockEntity(unique_id='1234', name='Device Name')
yield from platform.async_add_entities([entity])
assert entity.entity_id == 'test_domain.ns_device_name'
@asyncio.coroutine
def test_registry_respect_entity_disabled(hass):
"""Test that the registry respects entity disabled."""
mock_registry(hass, {
'test_domain.world': entity_registry.RegistryEntry(
entity_id='test_domain.world',
unique_id='1234',
# Using component.async_add_entities is equal to platform "domain"
platform='test_platform',
disabled_by=entity_registry.DISABLED_USER
)
})
platform = MockEntityPlatform(hass)
entity = MockEntity(unique_id='1234')
yield from platform.async_add_entities([entity])
assert entity.entity_id is None
assert hass.states.async_entity_ids() == []
async def test_entity_registry_updates_name(hass):
"""Test that updates on the entity registry update platform entities."""
registry = mock_registry(hass, {
'test_domain.world': entity_registry.RegistryEntry(
entity_id='test_domain.world',
unique_id='1234',
# Using component.async_add_entities is equal to platform "domain"
platform='test_platform',
name='before update'
)
})
platform = MockEntityPlatform(hass)
entity = MockEntity(unique_id='1234')
await platform.async_add_entities([entity])
state = hass.states.get('test_domain.world')
assert state is not None
assert state.name == 'before update'
registry.async_update_entity('test_domain.world', name='after update')
await hass.async_block_till_done()
await hass.async_block_till_done()
state = hass.states.get('test_domain.world')
assert state.name == 'after update'
async def test_setup_entry(hass):
"""Test we can setup an entry."""
registry = mock_registry(hass)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Mock setup entry method."""
async_add_entities([
MockEntity(name='test1', unique_id='unique')
])
return True
platform = MockPlatform(
async_setup_entry=async_setup_entry
)
config_entry = MockConfigEntry(entry_id='super-mock-id')
entity_platform = MockEntityPlatform(
hass,
platform_name=config_entry.domain,
platform=platform
)
assert await entity_platform.async_setup_entry(config_entry)
await hass.async_block_till_done()
full_name = '{}.{}'.format(entity_platform.domain, config_entry.domain)
assert full_name in hass.config.components
assert len(hass.states.async_entity_ids()) == 1
assert len(registry.entities) == 1
assert registry.entities['test_domain.test1'].config_entry_id == \
'super-mock-id'
async def test_setup_entry_platform_not_ready(hass, caplog):
"""Test when an entry is not ready yet."""
async_setup_entry = Mock(side_effect=PlatformNotReady)
platform = MockPlatform(
async_setup_entry=async_setup_entry
)
config_entry = MockConfigEntry()
ent_platform = MockEntityPlatform(
hass,
platform_name=config_entry.domain,
platform=platform
)
with patch.object(entity_platform, 'async_call_later') as mock_call_later:
assert not await ent_platform.async_setup_entry(config_entry)
full_name = '{}.{}'.format(ent_platform.domain, config_entry.domain)
assert full_name not in hass.config.components
assert len(async_setup_entry.mock_calls) == 1
assert 'Platform test not ready yet' in caplog.text
assert len(mock_call_later.mock_calls) == 1
async def test_reset_cancels_retry_setup(hass):
"""Test that resetting a platform will cancel scheduled a setup retry."""
async_setup_entry = Mock(side_effect=PlatformNotReady)
platform = MockPlatform(
async_setup_entry=async_setup_entry
)
config_entry = MockConfigEntry()
ent_platform = MockEntityPlatform(
hass,
platform_name=config_entry.domain,
platform=platform
)
with patch.object(entity_platform, 'async_call_later') as mock_call_later:
assert not await ent_platform.async_setup_entry(config_entry)
assert len(mock_call_later.mock_calls) == 1
assert len(mock_call_later.return_value.mock_calls) == 0
assert ent_platform._async_cancel_retry_setup is not None
await ent_platform.async_reset()
assert len(mock_call_later.return_value.mock_calls) == 1
assert ent_platform._async_cancel_retry_setup is None
@asyncio.coroutine
def test_not_fails_with_adding_empty_entities_(hass):
"""Test for not fails on empty entities list."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
yield from component.async_add_entities([])
assert len(hass.states.async_entity_ids()) == 0
async def test_entity_registry_updates_entity_id(hass):
"""Test that updates on the entity registry update platform entities."""
registry = mock_registry(hass, {
'test_domain.world': entity_registry.RegistryEntry(
entity_id='test_domain.world',
unique_id='1234',
# Using component.async_add_entities is equal to platform "domain"
platform='test_platform',
name='Some name'
)
})
platform = MockEntityPlatform(hass)
entity = MockEntity(unique_id='1234')
await platform.async_add_entities([entity])
state = hass.states.get('test_domain.world')
assert state is not None
assert state.name == 'Some name'
registry.async_update_entity('test_domain.world',
new_entity_id='test_domain.planet')
await hass.async_block_till_done()
await hass.async_block_till_done()
assert hass.states.get('test_domain.world') is None
assert hass.states.get('test_domain.planet') is not None
async def test_entity_registry_updates_invalid_entity_id(hass):
"""Test that we can't update to an invalid entity id."""
registry = mock_registry(hass, {
'test_domain.world': entity_registry.RegistryEntry(
entity_id='test_domain.world',
unique_id='1234',
# Using component.async_add_entities is equal to platform "domain"
platform='test_platform',
name='Some name'
),
'test_domain.existing': entity_registry.RegistryEntry(
entity_id='test_domain.existing',
unique_id='5678',
platform='test_platform',
),
})
platform = MockEntityPlatform(hass)
entity = MockEntity(unique_id='1234')
await platform.async_add_entities([entity])
state = hass.states.get('test_domain.world')
assert state is not None
assert state.name == 'Some name'
with pytest.raises(ValueError):
registry.async_update_entity('test_domain.world',
new_entity_id='test_domain.existing')
with pytest.raises(ValueError):
registry.async_update_entity('test_domain.world',
new_entity_id='invalid_entity_id')
with pytest.raises(ValueError):
registry.async_update_entity('test_domain.world',
new_entity_id='diff_domain.world')
await hass.async_block_till_done()
await hass.async_block_till_done()
assert hass.states.get('test_domain.world') is not None
assert hass.states.get('invalid_entity_id') is None
assert hass.states.get('diff_domain.world') is None
async def test_device_info_called(hass):
"""Test device info is forwarded correctly."""
registry = await hass.helpers.device_registry.async_get_registry()
hub = registry.async_get_or_create(
config_entry_id='123',
connections=set(),
identifiers={('hue', 'hub-id')},
manufacturer='manufacturer', model='hub'
)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Mock setup entry method."""
async_add_entities([
# Invalid device info
MockEntity(unique_id='abcd', device_info={}),
# Valid device info
MockEntity(unique_id='qwer', device_info={
'identifiers': {('hue', '1234')},
'connections': {('mac', 'abcd')},
'manufacturer': 'test-manuf',
'model': 'test-model',
'name': 'test-name',
'sw_version': 'test-sw',
'via_hub': ('hue', 'hub-id'),
}),
])
return True
platform = MockPlatform(
async_setup_entry=async_setup_entry
)
config_entry = MockConfigEntry(entry_id='super-mock-id')
entity_platform = MockEntityPlatform(
hass,
platform_name=config_entry.domain,
platform=platform
)
assert await entity_platform.async_setup_entry(config_entry)
await hass.async_block_till_done()
assert len(hass.states.async_entity_ids()) == 2
device = registry.async_get_device({('hue', '1234')}, set())
assert device is not None
assert device.identifiers == {('hue', '1234')}
assert device.connections == {('mac', 'abcd')}
assert device.manufacturer == 'test-manuf'
assert device.model == 'test-model'
assert device.name == 'test-name'
assert device.sw_version == 'test-sw'
assert device.hub_device_id == hub.id
async def test_device_info_not_overrides(hass):
"""Test device info is forwarded correctly."""
registry = await hass.helpers.device_registry.async_get_registry()
device = registry.async_get_or_create(
config_entry_id='bla',
connections={('mac', 'abcd')},
manufacturer='test-manufacturer',
model='test-model'
)
assert device.manufacturer == 'test-manufacturer'
assert device.model == 'test-model'
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Mock setup entry method."""
async_add_entities([
MockEntity(unique_id='qwer', device_info={
'connections': {('mac', 'abcd')},
}),
])
return True
platform = MockPlatform(
async_setup_entry=async_setup_entry
)
config_entry = MockConfigEntry(entry_id='super-mock-id')
entity_platform = MockEntityPlatform(
hass,
platform_name=config_entry.domain,
platform=platform
)
assert await entity_platform.async_setup_entry(config_entry)
await hass.async_block_till_done()
device2 = registry.async_get_device(set(), {('mac', 'abcd')})
assert device2 is not None
assert device.id == device2.id
assert device2.manufacturer == 'test-manufacturer'
assert device2.model == 'test-model'
| |
import copy
import re
from django.core.exceptions import MultipleObjectsReturned, ObjectDoesNotExist
from django.db import connection
from django.db.models import Q
from django.db.models.sql.query import AND, OR
from django.utils.tree import Node
ORDER_PATTERN = re.compile(r'^[-+]?[a-zA-Z0-9_]+$')
FIELD_PATTERN = re.compile(r'^[a-zA-Z0-9_\.]+$')
RAW_FILTER_PATTERN = re.compile(
r'^(?P<field>[a-zA-Z0-9_\.]+)\s*(?P<op>=|>|<|>=|<=|!=|IN|LIKE|ILIKE)\s*$',
re.I)
class LazyRawSQLManager(object):
"""A deferred manager to work around metaclass lameness."""
def __init__(self, sql_model_class):
self.__sql_model_class = sql_model_class
self.__manager = None
def __getattr__(self, name):
if not self.__manager:
self.__manager = RawSQLManager(self.__sql_model_class())
return getattr(self.__manager, name)
class RawSQLManager(object):
"""Raw SQL Manager for a Raw SQL Model.
This provides a very minimal set of features in the Query Set API.
"""
def __init__(self, sql_model, base_query=None):
self.sql_model = sql_model
if not base_query:
base_query = copy.deepcopy(sql_model.base_query())
self.base_query = base_query
if 'where' not in self.base_query:
self.base_query['where'] = []
if 'having' not in self.base_query:
self.base_query['having'] = []
if 'order_by' not in self.base_query:
self.base_query['order_by'] = []
if 'limit' not in self.base_query:
self.base_query['limit'] = []
if '_args' not in self.base_query:
self.base_query['_args'] = {}
self._cursor = None
self._record_set = []
def __iter__(self):
self._build_cursor()
for row in self._iter_cursor_results():
yield row
def __getitem__(self, key):
if isinstance(key, slice):
if key.start and key.stop:
# Translate slice into LIMIT, e.g.
# [0:2] ->
# LIMIT 0, 2
# [10:15] ->
# LIMIT 10, 5
offset = self._check_limit(key.start)
end = self._check_limit(key.stop)
row_count = max(0, end - offset)
self.base_query['limit'] = [offset, row_count]
elif key.start:
self.base_query['limit'] = [self._check_limit(key.start)]
elif key.stop:
self.base_query['limit'] = [0, self._check_limit(key.stop)]
self._build_cursor()
self._build_record_set()
return self._record_set
elif isinstance(key, int):
if not len(self._record_set):
# Get all rows! Better to use a limit with slices.
self._build_cursor()
self._build_record_set()
return self._record_set[key]
else:
raise TypeError('Key must be a slice or integer.')
def __len__(self):
return self.count()
def all(self):
return self._clone()
def count(self):
"""Count of all results, preserving aggregate grouping."""
self._execute('SELECT count(*) from (%s) as q' % self.as_sql())
return self._cursor.fetchone()[0]
def get(self, **kw):
clone = self._clone()
if kw:
clone = clone.filter(**kw)
cnt = clone.count()
if cnt > 1:
raise clone.sql_model.MultipleObjectsReturned(
'get() returned more than one row -- it returned %s!' % cnt)
elif cnt == 0:
raise clone.sql_model.DoesNotExist(
'%s matching query does not exist.' %
self.sql_model.__class__.__name__)
else:
return clone[0:1][0]
def exclude(self, *args, **kw):
raise NotImplementedError()
def filter(self, *args, **kw):
"""Adds a where clause with keyword args.
Example::
qs = qs.filter(category='trees')
qs = qs.filter(Q(type=1) | Q(name='foo'))
"""
clone = self._clone()
for arg in args:
if isinstance(arg, Q):
clone.base_query['where'].append(
'(%s)' % (clone._flatten_q(arg, clone._kw_clause_from_q)))
else:
raise TypeError(
'non keyword args should be Q objects, got %r' % arg)
for field, val in kw.items():
clone.base_query['where'].append(clone._kw_filter_to_clause(field,
val))
return clone
def filter_raw(self, *args):
"""Adds a where clause in limited SQL.
Examples::
qs = qs.filter_raw('total >', 1)
qs = qs.filter_raw('total >=', 1)
qs = qs.filter_raw(Q('name LIKE', '%foo%') |
Q('status IN', [1, 2, 3]))
The field on the leftside can be a key in the select dictionary.
That is, it will be replaced with the actual expression when the
query is built.
"""
clone = self._clone()
specs = []
for arg in args:
if isinstance(arg, Q):
clone.base_query['where'].append(
'(%s)' % (clone._flatten_q(arg, clone._filter_to_clause)))
else:
specs.append(arg)
if len(specs):
clone.base_query['where'].append(clone._filter_to_clause(*specs))
return clone
def having(self, spec, val):
"""Adds a having clause in limited SQL.
Examples::
qs = qs.having('total >', 1)
qs = qs.having('total >=', 1)
The field on the leftside can be a key in the select dictionary.
That is, it will be replaced with the actual expression when the
query is built.
"""
clone = self._clone()
clone.base_query['having'].append(clone._filter_to_clause(spec, val))
return clone
def latest(self, column):
"""Return the latest item, based on the given column."""
clone = self._clone()
clone.order_by('-%s' % column)
if clone.count() == 0:
raise clone.sql_model.DoesNotExist(
'%s matching query does not exist.' %
self.sql_model.__class__.__name__)
return clone[0]
def order_by(self, spec):
"""Order by column (ascending) or -column (descending)."""
if not ORDER_PATTERN.match(spec):
raise ValueError('Invalid order by value: %r' % spec)
if spec.startswith('-'):
dir = 'DESC'
field = spec[1:]
else:
dir = 'ASC'
field = spec
clone = self._clone()
clone.base_query['order_by'].append('%s %s' %
(clone._resolve_alias(field), dir))
return clone
def as_sql(self):
stmt = self._compile(self.base_query)
return stmt
def _clone(self):
return self.__class__(self.sql_model,
base_query=copy.deepcopy(self.base_query))
def _flatten_q(self, q_object, join_specs, stack=None):
"""Makes a WHERE clause out of a Q object (supports nested Q objects).
Pass in join_specs(*specs) based on what kind of arguments you think
the Q object will have. filter() Qs are different from
filter_raw() Qs.
"""
specs = []
if stack is None:
stack = [None]
# TODO(Kumar): construct NOT clause:
if q_object.negated:
raise NotImplementedError('negated Q objects')
connector = q_object.connector
def add(specs):
c = join_specs(*specs, connector=connector)
if stack[-1] in (AND, OR):
c = u'(%s)' % (c)
elif stack[-1] is not None:
stack.append(connector)
if c:
stack.append(c)
for child in q_object.children:
if isinstance(child, Node):
add(specs)
specs[:] = []
self._flatten_q(child, join_specs, stack=stack)
else:
specs.append(child)
if len(specs):
add(specs)
return u' '.join([c for c in stack if c])
def _kw_clause_from_q(self, *pairs, **kw):
"""Makes a WHERE clause out of pairs of (key, val) from Q objects."""
connector = kw.get('connector', AND)
stmt = []
for field, val in pairs:
stmt.append(self._kw_filter_to_clause(field, val))
return (u' %s ' % connector).join(stmt)
def _kw_filter_to_clause(self, field, val):
"""Makes a WHERE clause out of field = val."""
if not FIELD_PATTERN.match(field):
raise ValueError('Not a valid field for where clause: %r' % field)
field = self._resolve_alias(field)
if val is None:
return u'%s IS NULL' % (field, )
else:
param_k = self._param(val)
return u'%s = %%(%s)s' % (field, param_k)
def _filter_to_clause(self, *specs, **kw):
"""Makes a WHERE clause out of filter_raw() arguments."""
connector = kw.get('connector', AND)
specs = list(specs)
if (len(specs) % 2) != 0:
raise TypeError(
"Expected pairs of 'spec =', 'val'. Got: %r" % specs)
full_clause = []
while len(specs):
spec, val = specs.pop(0), specs.pop(0)
clause = RAW_FILTER_PATTERN.match(spec)
if not clause:
raise ValueError(
'This is not a valid clause: %r; must match: %s' % (
spec, RAW_FILTER_PATTERN.pattern))
field = clause.group('field')
field = self._resolve_alias(field)
if clause.group('op').lower() == 'in':
# eg. WHERE foo IN (%(param_0)s, %(param_1)s, %(param_2)s)
# WHERE foo IN (1, 2, 3)
parts = ['%(' + self._param(p) + ')s' for p in iter(val)]
param = '(%s)' % ', '.join(parts)
else:
param = '%%(%s)s' % self._param(val)
full_clause.append('%s %s %s' % (field, clause.group('op'), param))
c = (u' %s ' % connector).join(full_clause)
if len(full_clause) > 1:
# Protect OR clauses
c = u'(%s)' % c
return c
def _resolve_alias(self, field):
"""Access a field (or expression) by alias, similar to how a view works.
"""
if field in self.base_query['select']:
field = self.base_query['select'][field]
return field
def _compile(self, parts):
sep = u",\n"
and_ = u' %s\n' % AND
select = [u'%s AS `%s`' % (v, k) for k, v in parts['select'].items()]
stmt = u"SELECT\n%s\nFROM\n%s" % (sep.join(select),
u"\n".join(parts['from']))
if parts.get('where'):
stmt = u"%s\nWHERE\n%s" % (stmt, and_.join(parts['where']))
if parts.get('group_by'):
stmt = u"%s\nGROUP BY\n%s" % (stmt, parts['group_by'])
if parts.get('having'):
stmt = u"%s\nHAVING\n%s" % (stmt, and_.join(parts['having']))
if parts.get('order_by'):
stmt = u"%s\nORDER BY\n%s" % (stmt, sep.join(parts['order_by']))
if len(parts['limit']):
stmt = u"%s\nLIMIT %s" % (stmt, ', '.join([str(i) for i in
parts['limit']]))
return stmt
def _execute(self, sql):
self._record_set = []
self._cursor = connection.cursor()
self._cursor.execute(sql, self.base_query['_args'])
def _param(self, val):
param_k = 'param_%s' % len(self.base_query['_args'].keys())
self.base_query['_args'][param_k] = val
return param_k
def _build_cursor(self):
self._execute(self.as_sql())
def _build_record_set(self):
self._record_set = []
for row in self._iter_cursor_results():
self._record_set.append(row)
def _iter_cursor_results(self):
col_names = [c[0] for c in self._cursor.description]
while True:
row = self._cursor.fetchone()
if row is None:
break
yield self._make_row(row, col_names)
def _make_row(self, row, col_names):
values = dict(zip(col_names, row))
return self.sql_model.__class__(**values)
def _check_limit(self, i):
i = int(i)
if i < 0:
raise IndexError("Negative indexing is not supported")
return i
class RawSQLModelMeta(type):
def __new__(cls, name, bases, attrs):
super_new = super(RawSQLModelMeta, cls).__new__
cls = super_new(cls, name, bases, attrs)
cls.objects = LazyRawSQLManager(cls)
return cls
class RawSQLModel(object):
"""Very minimal model-like object based on a SQL query.
It supports barely enough for django-tables and the Django paginator.
Why not use database views and Meta.managed=False? Good question!
This is for rare cases when you need the speed and optimization of
building a query with many different types of where clauses.
"""
__metaclass__ = RawSQLModelMeta
# django-tables2 looks for this to decide what Columns to add.
class _meta(object):
fields = []
class DoesNotExist(ObjectDoesNotExist):
pass
MultipleObjectsReturned = MultipleObjectsReturned
def __init__(self, **kwargs):
for key, val in kwargs.items():
field = getattr(self.__class__, key, None)
if field is None:
raise TypeError(
'Field %r returned from raw SQL query does not have '
'a column defined in the model' % key)
setattr(self, field.get_attname() or key, field.to_python(val))
def base_query(self):
"""Returns a dict with parts of the raw SQL query.
Example::
def base_query(self):
return {
'select': {
'category': 'c.name',
'total': 'count(*)',
'latest_product_date': 'max(p.created)'
},
'from': [
'product p',
'join product_cat x on x.product_id=p.id',
'join category c on x.category_id=c.id'],
'where': [],
'group_by': 'category',
'having': []
}
"""
return {}
def _explode_concat(self, value, sep=',', cast=int):
"""Returns list of IDs in a MySQL GROUP_CONCAT(field) result."""
if value is None:
# for NULL fields, ala left joins
return []
# Cope with a value like ...1261530,1261530, which occurs because of:
# 1 line(s) were cut by GROUP_CONCAT()
return [cast(i) for i in value.split(sep) if i]
| |
import os
import click
import logbook
import multiprocessing
from pathlib import Path
import random
import shutil
import string
import subprocess
import sys
from tempfile import mkdtemp
import gunicorn.app.base
from werkzeug.middleware.proxy_fix import ProxyFix
import yaml
from ..app import build_app
from ..bootstrapping import ensure_project_bootstrapped
from ..exceptions import TestsFailed, DockerComposeValidationFailed
from ..utils.config import get_etc_config_path
from ..utils.docker import docker_cmd, docker_compose_cmd
from ..utils.develop import is_develop, cob_root
from ..utils.network import wait_for_app_services, wait_for_tcp
from ..utils.templates import load_template
from ..project import get_project
import pkg_resources
_COB_VERSION = pkg_resources.get_distribution('cob').version # pylint: disable=no-member
_logger = logbook.Logger(__name__)
_CUSTOM_DOCKERFILE = "custom.docker"
def _get_user_steps():
if not os.path.isfile(_CUSTOM_DOCKERFILE):
return ''
with open(_CUSTOM_DOCKERFILE) as f:
return f.read()
@click.group()
def docker():
pass
@docker.command(name="generate-docker-file")
def generate_dockerfile():
proj = get_project()
template = load_template('Dockerfile')
if is_develop():
sdist_file_name = os.path.join(proj.root, '.cob-sdist.tar.gz')
_build_cob_sdist(filename=sdist_file_name)
else:
sdist_file_name = None
_specific_vers = proj.config.get('specific_virtualenv_pkgs', 'pip setuptools')
with open(".Dockerfile", "w") as f:
f.write(template.render(
project=proj,
deployment_base_image='python:3.6-jessie',
python_version='3.6',
specific_vers=_specific_vers,
is_develop=is_develop(),
cob_sdist_filename=os.path.basename(sdist_file_name) if sdist_file_name else None,
cob_root=cob_root() if is_develop() else None,
user_steps=_get_user_steps()))
def _build_cob_sdist(filename):
sdist_filename = os.environ.get('COB_SDIST_FILENAME')
if sdist_filename is not None:
shutil.copy(sdist_filename, filename)
else:
tmpdir = mkdtemp()
try:
subprocess.check_call(
f'python setup.py sdist -d {tmpdir}', cwd=cob_root(), shell=True)
[distfile] = os.listdir(tmpdir)
sdist_filename = os.path.join(tmpdir, distfile)
shutil.move(sdist_filename, str(filename))
finally:
shutil.rmtree(tmpdir)
@docker.command(name='build')
@click.option('--sudo/--no-sudo', is_flag=True, default=None, help="Run docker build with sudo")
@click.option('--extra-build-args', '-e', default="", help="Arguments to pass to docker build")
@click.option('--release', is_flag=True, default=False)
def docker_build(sudo, extra_build_args="", use_exec=True, image_name=None, release=False, use_cache=True):
project = get_project()
if image_name is None:
image_name = f'{project.get_docker_image_name()}:{"latest" if release else "dev"}'.format(project.name, 'latest' if release else 'dev')
generate_dockerfile.callback()
cmd = docker_cmd.build(['-t', image_name, '-f', '.Dockerfile', '.', *extra_build_args.split()]).force_sudo(sudo)
if not use_cache:
cmd = cmd.args(['--no-cache'])
_logger.debug('Running Command: {}', cmd)
cmd.run(use_exec=use_exec)
@docker.command(name='wsgi-start')
def start_wsgi():
logbook.StderrHandler(level=logbook.DEBUG).push_application()
_ensure_secret_config()
ensure_project_bootstrapped()
project = get_project()
app = build_app(config_overrides={'PROPAGATE_EXCEPTIONS': True})
app.wsgi_app = ProxyFix(app.wsgi_app)
wait_for_app_services(app)
if project.subsystems.has_database():
with app.app_context():
project.setup_db()
workers_count = (multiprocessing.cpu_count() * 2) + 1
class StandaloneApplication(gunicorn.app.base.BaseApplication): # pylint: disable=abstract-method
def __init__(self, app, options=None):
self.options = options or {}
self.application = app
super(StandaloneApplication, self).__init__()
def load_config(self):
config = {key: value for key, value in self.options.items()
if key in self.cfg.settings and value is not None}
for key, value in config.items():
self.cfg.set(key.lower(), value)
def load(self):
return self.application
options = {
'bind': '0.0.0.0:8000',
'workers': workers_count,
}
options.update(project.config.get('gunicorn', {}))
StandaloneApplication(app, options).run()
def _ensure_secret_config():
conf_dir = os.environ.get('COB_CONFIG_DIR')
if not conf_dir:
return
secret_file = os.path.join(conf_dir, '000-cob-private.yml')
if os.path.isfile(secret_file):
return
with open(secret_file, 'w') as f:
f.write('flask_config:\n')
for secret_name in ('SECRET_KEY', 'SECURITY_PASSWORD_SALT'):
f.write(f' {secret_name}: {_generate_secret_string()!r}\n')
def _generate_secret_string(length=50):
return "".join([random.choice(string.ascii_letters) for i in range(length)])
@docker.command(name='nginx-start')
@click.option('--print-config', is_flag=True, default=False)
def start_nginx(print_config):
project = get_project()
template = load_template('nginx_config')
config = template.render({'use_ssl': False, 'hostname': None, 'project': project, 'os': os})
if print_config:
print(config)
return
wait_for_tcp('wsgi', 8000)
with open('/etc/nginx/conf.d/webapp.conf', 'w') as f:
f.write(config)
nginx_path = '/usr/sbin/nginx'
os.execv(nginx_path, [nginx_path, '-g', 'daemon off; error_log /dev/stdout info;'])
@docker.command()
@click.option('--http-port', default=None)
@click.option('--build', is_flag=True, default=False)
@click.option('compose_overrides', '-o', '--overlay-compose-file', default=[], multiple=True)
@click.option('-d', '--detach', is_flag=True, default=False)
@click.option('--image-name', default=None, help='Image to use for the main docker containers')
def run(http_port, build, detach, image_name, compose_overrides):
project = get_project()
if build:
docker_build.callback(sudo=False, use_exec=False)
if image_name is None:
image_name = f'{project.get_docker_image_name()}:dev'
run_image.callback(image_name=image_name, detach=detach, http_port=http_port, compose_overrides=compose_overrides)
@docker.command()
def stop():
_exec_docker_compose(['down'])
@docker.command()
def logs():
_exec_docker_compose(['logs'])
def _exec_docker_compose(cmd, **kwargs):
project = get_project()
compose_filename = f'/tmp/__{project.name}-docker-compose.yml'
with open(compose_filename, 'w') as f:
f.write(_generate_compose_file_string(**kwargs))
cmd = docker_compose_cmd.args(['-f', compose_filename, '-p', project.name, *cmd])
cmd.execv()
@docker.command(name='generate-docker-compose-file')
@click.option('--image-name', default=None)
@click.option('--http-port', type=int, default=None)
@click.option('--force-config-override', is_flag=True, default=False)
@click.option('--log-driver', default='syslog')
def generate_docker_compose_file(image_name, force_config_override, http_port, log_driver):
"""Prints out a docker-compose.yml for this project"""
print(_generate_compose_file_string(image_name=image_name, force_config_override=force_config_override, http_port=http_port, log_driver=log_driver))
def _generate_compose_file_dict(*, http_port=None, image_name=None, force_config_override=False, log_driver='syslog'):
project = get_project()
if image_name is None:
image_name = f'{project.get_docker_image_name()}:dev'
config = {
'version': '3',
'volumes': {
'conf': None
},
}
common_environment = {
'COB_DATABASE_URI': 'postgresql://{0}@db/{0}'.format(project.name),
'COB_CELERY_BROKER_URL': 'amqp://guest:guest@rabbitmq',
'COB_CONFIG_DIR': '/conf',
}
services = config['services'] = {
'wsgi': {
'image': image_name,
'command': 'cob docker wsgi-start',
'environment': common_environment,
'depends_on': [],
'volumes': [
'conf:/conf',
],
},
'nginx': {
'image': image_name,
'command': 'cob docker nginx-start',
'ports': [f'{http_port or 8000}:80'],
'depends_on': ['wsgi'],
}
}
if project.subsystems.has_database():
services['wsgi']['depends_on'].append('db')
services['db'] = {
'image': 'postgres:9.6',
'volumes': [
'db:/var/lib/postgresql/data'
],
'environment': {
'POSTGRES_USER': project.name,
'POSTGRES_DB': project.name,
'POSTGRES_HOST_AUTH_METHOD': 'trust',
}
}
config['volumes']['db'] = None
if project.subsystems.has_tasks():
services['rabbitmq'] = {
'image': 'rabbitmq',
}
services['worker'] = {
'image': image_name,
'command': 'cob celery start-worker',
'environment': common_environment,
}
if project.services.redis:
services['redis'] = {
'image': 'redis',
}
config_override_path = get_etc_config_path(project.name).resolve()
if force_config_override or config_override_path.is_dir():
for service_config in services.values():
service_config.setdefault('volumes', []).append('{0}:{0}'.format(config_override_path))
for service_config in services.values():
service_config.setdefault('volumes', []).append(
'/etc/localtime:/etc/localtime:ro',
)
service_config.setdefault('logging', {'driver': log_driver})
for service_name, service_ports in project.config.get('docker', {}).get('exposed_ports', {}).items():
services[service_name].setdefault('ports', []).extend(service_ports)
return config
def _generate_compose_file_string(**kwargs):
config = _generate_compose_file_dict(**kwargs)
return _dump_yaml(config)
def _dump_yaml(config, *, stream=None):
return yaml.safe_dump(config, stream, allow_unicode=True, default_flow_style=False)
@docker.command()
@click.option('build_image', '--no-build', is_flag=True, default=True)
@click.option('use_cache', '--no-cache', is_flag=True, default=True)
@click.option('--sudo/--no-sudo', is_flag=True, default=None, help="Run docker build with sudo")
@click.option('compose_overrides', '-o', '--overlay-compose-file', default=[], multiple=True,
help="addional docker compose file name to load (can be used multiple times)")
@click.option('--depend', '-d', default=[],
help="a service from overlay compose to depend on (can be used multiple times)",
multiple=True)
@click.option('--use-testing-conf/--no-use-testing-conf', is_flag=True, default=False,
help="use specific configuration for testing (not used by default)")
@click.argument('pytest_args', nargs=-1, type=click.UNPROCESSED)
def test(build_image, sudo, use_cache, pytest_args, use_testing_conf, compose_overrides, depend):
if depend and not compose_overrides:
raise click.ClickException('No dependencies can be added when not providing compose_overrides')
project = get_project()
image_name = f"{project.get_docker_image_name()}:dev"
if build_image:
docker_build.callback(sudo=sudo, use_exec=False, image_name=image_name, use_cache=use_cache)
compose_file_dict = _generate_compose_file_dict(image_name=image_name)
compose_file_dict['services'].pop('nginx')
test_config = compose_file_dict['services'].pop('wsgi')
test_config['tty'] = True
test_config['depends_on'] = sorted(set(compose_file_dict['services']) - {'test'})
test_config['depends_on'].extend(depend)
test_config['stdin_open'] = True
test_config['environment']['COB_TESTING'] = 1
compose_file_dict['services']['test'] = test_config
if use_testing_conf and os.path.isdir(project.tst_cfg_dir):
for _, service in compose_file_dict['services'].items():
service['volumes'].pop(service['volumes'].index('/etc/cob/conf.d/dhcpawn:/etc/cob/conf.d/dhcpawn'))
service['volumes'].append(f'{project.tst_cfg_dir}:/etc/cob/conf.d/dhcpawn')
compose_filename = f'/tmp/__{project.name}-test-docker-compose.yml'
with open(compose_filename, 'w') as f:
_dump_yaml(compose_file_dict, stream=f)
docker_compose_name = f'{project.name}-test'
test_cmd = "cob test --migrate " + ' '.join(pytest_args)
if not build_image:
test_cmd = f'rsync -rvP --delete --exclude .cob /localdir/ /app/ && {test_cmd}'
cmd_args = ['-f', compose_filename, '-p', docker_compose_name]
# ocf_dir = overlay_compose_files directory
for ocf in compose_overrides:
if _validate_compose_file(f'{project.ocf_dir}/{ocf}'):
cmd_args.extend(['-f', f'{project.ocf_dir}/{ocf}'])
cmd = docker_compose_cmd.args([
*cmd_args, 'run',
'-w', '/app', '-v', f'{os.path.abspath(".")}:/localdir',
'test',
"dockerize", "-timeout", "60s", "-wait", "tcp://db:5432",
"bash", "-c", test_cmd
])
try:
if cmd.popen().wait() != 0:
raise TestsFailed('Tests failed')
finally:
docker_compose_cmd.args([
*cmd_args, 'stop']).popen().wait()
def _validate_compose_file(compose_file):
if not os.path.isfile(compose_file):
raise IOError(f'There is no such compose file {compose_file}')
cmd = docker_compose_cmd.args(['-f', compose_file, 'config', '-q'])
if cmd.popen().wait() != 0:
raise DockerComposeValidationFailed('Docker Compose validation failed')
return True
@docker.command(name="run-image", help='Runs a cob project in a pre-built docker image')
@click.argument('image_name')
@click.option('-d', '--detach', is_flag=True, default=False)
@click.option('compose_overrides', '-o', '--overlay-compose-file', default=[], multiple=True)
@click.option('--http-port', default=None)
def run_image(image_name, detach, compose_overrides, http_port):
project_name, compose_path = _generate_compose_file_from_image(image_name, http_port=http_port)
cmd = docker_compose_cmd.args(['-p', project_name, '-f', compose_path])
for compose_override in compose_overrides:
cmd.args(['-f', compose_override])
cmd.args(['up'])
if detach:
cmd = cmd.args(['-d'])
cmd.execv()
@docker.command(name="stop-image")
@click.argument('image_name')
def stop_image(image_name):
project_name, compose_path = _generate_compose_file_from_image(image_name)
docker_compose_cmd.args(['-p', project_name, '-f', compose_path, 'down']).execv()
def _generate_compose_file_from_image(image_name, *, http_port=None):
project_name = _get_project_name_from_image(image_name)
cmd = docker_cmd.run(['--rm', image_name, 'cob', 'docker', 'generate-docker-compose-file', '--image-name', image_name, '--http-port', '80'])
if _is_journald_system():
cmd.args(['--log-driver=journald'])
if get_etc_config_path(project_name).is_dir():
cmd.args(['--force-config-override'])
if http_port is not None:
cmd.args(['--http-port', str(http_port)])
compose_file_contents = cmd.check_output()
compose_path = Path('/tmp') / f"__cob_docker_compose_{image_name.replace(':', '_').replace('/', '_')}.yml"
with compose_path.open('wb') as f:
f.write(compose_file_contents)
return project_name, compose_path
@docker.command(name='deploy', help='Deploys an dockerized cob app image to the local systemd-based machine')
@click.option('--force', is_flag=True, default=False)
@click.option('--compose-override', 'compose_overrides', multiple=True)
@click.argument('image_name')
def deploy_image(image_name, force, compose_overrides):
click.echo(f'Obtaining project information for {image_name}...')
project_name = _get_project_name_from_image(image_name)
unit_template = load_template('systemd_unit')
filename = Path('/etc/systemd/system') / f'{project_name}-docker.service'
for override_file in compose_overrides:
docker_compose_override_file = Path(override_file)
_logger.debug(f'Checking the existance of a docker compose override file under project root {docker_compose_override_file}')
if not docker_compose_override_file.exists():
raise click.ClickException(f'File {docker_compose_override_file} does not exist')
click.echo(f'Writing systemd unit file under {filename}...')
if filename.exists() and not force:
click.confirm(f'{filename} already exists. Overwrite?', abort=True)
tmp_filename = Path(mkdtemp()) / 'systemd-unit'
with tmp_filename.open('w') as f: # pylint: disable=no-member
f.write(unit_template.render(project_name=project_name, image_name=image_name, compose_overrides=compose_overrides, cob=f'{sys.executable} -m cob.cli.main'))
subprocess.check_call(f'sudo -p "Enter password to deploy service" mv {tmp_filename} {filename}', shell=True)
click.echo(f'Starting systemd service {filename.stem}...')
subprocess.check_call('sudo systemctl daemon-reload', shell=True)
subprocess.check_call(f'sudo systemctl enable --now {filename.name}', shell=True)
def _get_project_name_from_image(image_name):
return docker_cmd.run(['--rm', image_name, 'cob', 'info', 'project-name']).check_output(encoding='utf-8').strip()
def _is_journald_system():
return shutil.which('journalctl') is not None
@docker.command(name='tag-latest', help='Tags the latest development image as latest')
def tag_latest():
project = get_project()
docker_cmd.tag([
f'{project.get_docker_image_name()}:dev',
f'{project.get_docker_image_name()}:latest']
).execv()
@docker.command(name='push')
def push():
project = get_project()
docker_cmd.push([f'{project.get_docker_image_name()}:latest']).execv()
| |
import socket
print(socket.gethostbyname("localhost"))
import argparse
import itertools
import logging
import pymongo
# from scoop import futures
import sys, os
import re
from numpy import linspace
# import random
# add ofspy to system path
sys.path.append(os.path.abspath('..'))
db = None # lazy-load if required
from ofspy.ofsLite import OFSL
import json
def execute(dbHost, dbPort, experiment, start, stop, design, numPlayers, numTurns, fops, capacity, links):
"""
Executes a general experiment.
@param dbHost: the database host
@type dbHost: L{str}
@param dbPort: the database port
@type dbPort: L{int}
@param dbName: the database collection name
@type dbName: L{str}
@param start: the starting seed
@type start: L{int}
@param stop: the stopping seed
@type stop: L{int}
@param design: the list of designs to execute
@type design: L{list}
@param numPlayers: the number of players
@type numPlayers: L{int}
@param initialCash: the initial cash
@type initialCash: L{int}
@param numTurns: the number of turns
@param numTurns: the number of turns
@type numTurns: L{int}
@param ops: the operations definition
@type ops: L{str}
@param fops: the federation operations definition
@type fops: L{str}
"""
# print "design:", design
# print start, stop
executions = [(dbHost, dbPort, experiment,
[e for e in elements.split(' ') if e != ''],
numPlayers, numTurns, seed, fops, capacity, links)
for (seed, elements) in itertools.product(range(start, stop), design)]
numComplete = 0.0
logging.info('Executing {} design with seeds from {} to {} for {} total executions.'
.format(len(design), start, stop, len(executions)))
# for results in futures.map(queryCase, executions):
# results = futures.map(queryCase, executions)
# print(len(list(executions)))
# print([list(e) for e in executions])
# map(queryCase, executions)
for execution in executions:
argslist = list(execution)
# print(argslist)
queryCase(*argslist)
# print "results :", results
# N = len(results[0])
# This line calculates the average of each element of each tuple for all the lists in the results, in other words assuming that each tuple of each results shows one seed of the same identity
# print [[sum(x)/float(N) for x in zip(*l)] for l in [[l[j] for l in results] for j in range(N)]]
def queryCase(dbHost, dbPort, experiment, elements, numPlayers, numTurns, seed, fops, capacity, links):
"""
Queries and retrieves existing results or executes an OFS simulation.
@param dbHost: the database host
@type dbHost: L{str}
@param dbPort: the database port
@type dbPort: L{int}
@param dbName: the database collection name
@type dbName: L{str}
@param elements: the design specifications
@type elements: L{list}
@param numPlayers: the number of players
@type numPlayers: L{int}
@param initialCash: the initial cash
@type initialCash: L{int}
@param numTurns: the number of turns
@type numTurns: L{int}
@param seed: the random number seed
@type seed: L{int}
@param ops: the operations definition
@type ops: L{str}
@param fops: the federation operations definition
@type fops: L{str}
@return: L{list}
"""
# print "elementlist:", elementlist
# executeCase(elementlist, numPlayers, initialCash,
# numTurns, seed, ops, fops)
# experiment = experiment
global db
dbName = None
# dbHost = socket.gethostbyname(socket.gethostname())
dbHost = "127.0.0.1"
# dbHost = "155.246.119.30"
# print dbHost, dbPort, dbName, db
# print "fops:", fops
# print costISL, costSGL
if db is None and dbHost is None:
# print "db is None adn dbHOst is None"
return executeCase(elements, numPlayers,
numTurns, seed, ops, fops, capacity)
elif db is None and dbHost is not None:
# print "read from database"
db = pymongo.MongoClient(dbHost, dbPort).ofs
query = {u'experiment': experiment,
u'elementlist': ' '.join(elements),
u'fops': fops,
u'numTurns': numTurns,
u'seed': seed,
u'capacity': capacity,
u'links': links,
}
doc = None
if dbName is not None:
doc = db[dbName].find_one(query)
if doc is None:
# if '-1' in doc['fops'] or '-2' in doc['fops']:
# db.results.remove(query) #this is temporary, should be removed afterwards
doc = db.results.find_one(query)
if doc:
# print("Found in DB,elements, storage, sgl, isl, results: ")
print([doc[k] for k in ['seed', 'elementlist', 'experiment', 'fops', 'capacity', 'links','results']])
if doc is None:
if '-' not in fops:
M = 10
else:
M = 1
results = executeCase(experiment, elements, numPlayers, int(numTurns/M), seed, fops, capacity, links)
doc = {u'experiment': experiment,
u'elementlist': ' '.join(elements),
u'fops': fops,
u'numTurns': numTurns,
u'seed': seed,
u'capacity': capacity,
u'links': links,
u'results': json.dumps([(a,b*M) for a,b in results]),
}
# print("Not Found in DB", doc['results'])
# print("Not in DB,elements, storage, sgl, isl, results: ")
print([doc[k] for k in ['seed', 'elementlist', 'experiment', 'fops', 'capacity', 'links', 'results']])
db.results.insert_one(doc)
if dbName is not None:
db[dbName].insert_one(doc)
return [tuple(result) for result in doc[u'results']]
def executeCase(experiment, elements, numPlayers, numTurns, seed, fops, capacity, links):
"""
Executes an OFS simulation.
@param elements: the design specifications
@type elements: L{list}
@param numPlayers: the number of players
@type numPlayers: L{int}
@param initialCash: the initial cash
@type initialCash: L{int}
@param numTurns: the number of turns
@type numTurns: L{int}
@param seed: the random number seed
@type seed: L{int}
@param ops: the operations definition
@type ops: L{str}
@param fops: the federation operations definition
@type fops: L{str}
"""
# print "ofs-exp-vs elementlist: ", elementlist
#
# return OFSL(elementlist=elementlist, numPlayers=numPlayers, initialCash=initialCash, numTurns=numTurns, seed=seed, ops=ops, fops=fops).execute()
ofsl = OFSL(experiment = experiment, elements=elements, numPlayers=numPlayers, numTurns=numTurns, seed=seed, fops=fops, capacity = capacity, links = links)
return ofsl.execute()
def fopsGenStorage(costrange, storange, numplayers):
# costSGLList = list(range(0, 1001, 200))
# # costISLList = [c/2. for c in costSGLList]
# storagePenalty = list(range(0, 1001, 200))+[-1]
# yield numplayers * ["x%d,%d" % (-2, -2)]
for sgl in costrange:
for s in storange:
# yield ["x%d,%d,%d"%(sgl, sgl, -2)] + (numplayers-1)*["x%d,%d,%d"%(sgl, sgl, s)]
yield numplayers* ["x%d,%1.2f,%d"%(sgl, s, -1)]
# yield numplayers* ["x%d,%d,%d"%(-3, s, -1)]
# yield numplayers * ["x%d,%d,%d" % (sgl, -1, -1)]
def fopsGenAdaptive(costrange, numplayers):
for sgl in costrange:
if sgl == -2:
yield numplayers * ["x%d,%d,%d" % (sgl, -1, -1)]
yield numplayers * ["x%d,%d,%d" % (-2, -1, 1)]
else:
# if sgl == -3:
# print(["x%d,%d,%d"%(-2, -1, -1)] + (numplayers-1)*["x%d,%d,%d"%(sgl, -1, -1)])
for n in range(numplayers):
fops = []
fops.extend(n*["x%d,%d,%d"%(-2, -1, -1)])
fops.extend(["x%d,%d,%d" % (sgl, -1, -1)])
fops.extend((numplayers-n-1)*["x%d,%d,%d"%(-2, -1, -1)])
# yield n*[] + ["x%d,%d,%d"%(-2, -1, -1)] + (numplayers-1)*["x%d,%d,%d"%(sgl, -1, -1)]
yield fops
for n in range(numplayers):
fops = []
fops.extend(n*["x%d,%d,%d"%(sgl, -1, -1)])
fops.extend(["x%d,%d,%d"%(-2, -1, -1)])
fops.extend((numplayers-n-1)*["x%d,%d,%d"%(sgl, -1, -1)])
# yield n*[] + ["x%d,%d,%d"%(-2, -1, -1)] + (numplayers-1)*["x%d,%d,%d"%(sgl, -1, -1)]
yield fops
# yield 2 * ["x%d,%d,%d"%(-2, -1, -1)] + (numplayers - 2) * ["x%d,%d,%d"%(sgl, -1, -1)]
yield numplayers * ["x%d,%d,%d" % (sgl, -1, -1)]
yield numplayers * ["x%d,%d,%d"%(-2, -1, -1)]
# yield 2*["x%d,%d" % (-2, -1)] + (numplayers-2)*["x%d,%d"%(sgl, -1)]
# def generateFops(costrange, storange):
# fops = []
# for cost in costrange:
# costsgl = cost
# costisl = cost
#
# for sto in storange:
# stopen = sto
# for sto2 in storange:
# stopen2 = sto2
# yield ["x%d,%d,%d" % (costsgl, costisl, stopen2), "x%d,%d,%d" % (costsgl, costisl, stopen), "x%d,%d,%d" % (costsgl, costisl, stopen)]
# def fopsGenStorage(numPlayers):
# yield numPlayers * ["x%d,%1.2f,%d" % (600, 400, -1)]
# yield numPlayers * ["x%d,%1.2f,%d" % (600, 800, -1)]
# yield numPlayers * ["x%d,%1.2f,%d" % (-3, 400, -1)]
# yield numPlayers * ["x%d,%1.2f,%d" % (-3, 800, -1)]
# for k in linspace(0., 1.99, 19):
# yield numPlayers * ["x%d,%1.2f,%d" % (-3, -1*k, -1)]
# yield numPlayers * ["x%d,%1.2f,%d" % (600, -1*k, -1)]
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="This program runs an OFS experiment.")
# parser.add_argument('-e', help = 'experiment to run', type=str, nargs='+', default= 'Adaptive'
# help='the experiment to run: adaptive, auctioneer')
parser.add_argument('-d', '--numTurns', type=int, default=2400,
help='simulation duration (number of turns)')
parser.add_argument('-p', '--numPlayers', type=int, default=None,
help='number of players')
parser.add_argument('-c', '--capacity', type=int, default=2.,
help='satellite capacity')
parser.add_argument('-l', '--links', type=int, default=2.,
help='links per edge')
# parser.add_argument('-o', '--ops', type=str, default='d6',
# help='federate operations model specification')
parser.add_argument('-f', '--fops', type=str, default='',
help='federation operations model specification')
# parser.add_argument('-l', '--logging', type=str, default='error',
# choices=['debug', 'info', 'warning', 'error'],
# help='logging level')
parser.add_argument('-s', '--start', type=int, default=0,
help='starting random number seed')
parser.add_argument('-t', '--stop', type=int, default=30,
help='stopping random number seed')
parser.add_argument('--dbHost', type=str, default=None,
help='database host')
parser.add_argument('--dbPort', type=int, default=27017,
help='database port')
args = parser.parse_args()
# count number of players
numPlayers = args.numPlayers if 'numPlayers' in args else 2
# with open('designs.txt', 'r') as f:
# hardcoded_designs = f.readlines()
# # for l in f:
# # print l
# # hardcoded_designs.append(l)
# hardcoded_designs = [x.strip() for x in hardcoded_designs]
hardcoded_designs = (
# "1.GroundSta@SUR1 2.GroundSta@SUR4 1.Sat@MEO1 2.Sat@MEO3 1.Sat@LEO1 2.Sat@LEO2",
# "1.GroundSta@SUR1 2.GroundSta@SUR4 1.Sat@GEO1 1.Sat@MEO1 2.Sat@MEO3 1.Sat@LEO1 2.Sat@LEO2",
"1.GroundSta@SUR1 2.GroundSta@SUR4 1.Sat@MEO1 1.Sat@MEO4 2.Sat@MEO5 1.Sat@LEO1 2.Sat@LEO2",
# "1.GroundSta@SUR1 2.GroundSta@SUR4 1.Sat@MEO1 1.Sat@MEO3 1.Sat@MEO4 2.Sat@MEO5 2.Sat@MEO6",
"1.GroundSta@SUR1 2.GroundSta@SUR4 2.Sat@GEO4 1.Sat@MEO1 1.Sat@MEO4 2.Sat@MEO5 1.Sat@LEO1 2.Sat@LEO2",
# "1.GroundSta@SUR1 2.GroundSta@SUR3 3.GroundSta@SUR5 2.Sat@GEO3 1.Sat@MEO1 2.Sat@MEO3 3.Sat@MEO6 1.Sat@LEO2",
"1.GroundSta@SUR1 2.GroundSta@SUR3 3.GroundSta@SUR5 1.Sat@MEO1 1.Sat@MEO2 2.Sat@MEO3 2.Sat@MEO5 3.Sat@MEO6",
"1.GroundSta@SUR1 2.GroundSta@SUR3 3.GroundSta@SUR5 3.Sat@GEO5 1.Sat@MEO1 1.Sat@MEO2 2.Sat@MEO3 2.Sat@MEO5 3.Sat@MEO6",
#***"1.GroundSta@SUR1 2.GroundSta@SUR3 3.GroundSta@SUR5 1.Sat@MEO1 2.Sat@MEO2 3.Sat@MEO5 1.Sat@LEO2 2.Sat@LEO4 3.Sat@LEO6",
# "1.GroundSta@SUR1 2.GroundSta@SUR3 3.GroundSta@SUR5 1.Sat@GEO1 1.Sat@MEO1 2.Sat@MEO4 3.Sat@MEO5 2.Sat@LEO4 3.Sat@LEO6",
"1.GroundSta@SUR1 2.GroundSta@SUR3 3.GroundSta@SUR5 1.Sat@MEO1 2.Sat@MEO2 3.Sat@MEO3 1.Sat@LEO1 2.Sat@LEO2 3.Sat@LEO3",
)
experiment = 'auctioneer'
# hardcoded_designs = list(hardcoded_designs)
# random.shuffle(hardcoded_designs)
for design in hardcoded_designs:
# print design
if '4.' in design:
numPlayers = 4
elif '3.' in design:
numPlayers = 3
else:
numPlayers = 2
args.stop = args.start + 10
argsdict = vars(args)
argsdict['design'] = [design]
# argsdict.pop('logging')
# argsdict.pop('dbName')
# costrange = list(range(700, 701, 100))
# costrange = [-3]
# costrange = [-3, 0, 1200, 600]
# costrange = [-2]
costrange = [-3, 600]
storange = list([400, 800, -1])
# for fops in reversed(list(fopsGenAdaptive(costrange, numPlayers))):
for fops in fopsGenStorage(costrange, storange, numPlayers):
print(fops)
# print(argsdict)
reres = re.search(r'x([-\d]+),([-\.\d]+),([-\d]+)', fops[0])
sgl = int(reres.group(1))
strg = float(reres.group(2))
auc = int(reres.group(3))
# argsdict['experiment'] = 'Adaptive Cost V2'
argsdict['experiment'] = 'Storage Penalty V2'
# if sgl == -2 :
# argsdict['experiment'] = 'Adaptive Cost'
#
# elif strg == -1 and sgl > 0:
# argsdict['experiment'] = 'Fixed Cost Storage Penalty'
#
# elif strg == -1 and sgl == -1:
# argsdict['experiment'] = 'Stochastic Cost Storage Penalty'
if auc == 1:
argsdict['experiment'] += ' Auctioneer'
argsdict['fops'] = json.dumps(fops)
for capacity,links in [(2,2)]:
# for links in [1,2]:
argsdict['capacity'] = capacity
argsdict['links'] = links
execute(**argsdict)
# execute(args.dbHost, args.dbPort, None, args.start, args.stop,
# [design],
# numPlayers, args.initialCash, args.numTurns,
# None, fops)
# for ops, fops in [('d6,a,1', 'x')]:#[('n', 'd6,a,1'), ('d6,a,1', 'n'), ('d6,a,1', 'x')]:#[('d6,a,1', 'x')]:#
# if 'x' in fops:
# stop = args.start + 1
# # costsgl = costisl = 'v'
# for costsgl in [a for a in range(500, 601, 200)]:# if a not in range(0, 1501,100)]:
# # print "cost SGL:", costsgl
# costisl = costsgl//2
# fops = "x%s,%s,6,a,1"%(str(costsgl),str(costisl))
# # print "fops:", fops
# # print "ofs-exp-vs Design:",
# execute(args.dbHost, args.dbPort, None, args.start, stop,
# [design],
# numPlayers, args.initialCash, args.numTurns,
# ops, fops, experiment)
# else:
# stop = args.start + 1
# # print "fops:", fops
# execute(args.dbHost, args.dbPort, None, args.start, stop,
# [design],
# numPlayers, args.initialCash, args.numTurns,
# ops, fops, experiment)
# else:
# execute(args.dbHost, args.dbPort, None, args.start, args.stop,
# [' '.join(args.experiment)],
# numPlayers, args.initialCash, args.numTurns,
# args.ops, args.fops, 0, 0)
| |
import sys
import itertools
import numpy as np
from phonopy.structure.symmetry import Symmetry
class StructureAnalyzer:
def __init__(self, atoms):
self._atoms = atoms
self._filename = None
self._dictionary = {}
def update_attributes(self):
"""Update attributes of the class Poscar from atoms attribute.
This method should be called after tne update of atoms attribute.
"""
self.generate_dictionary()
self.deform_cell = self.deform_cell_right # alias
def generate_dictionary(self):
cell = self._atoms.get_cell()
number_of_atoms = self._atoms.get_number_of_atoms()
chemical_symbols = self._atoms.get_chemical_symbols()
dictionary = convert_cell_to_lc(cell)
volume = dictionary["volume"]
dictionary.update({
"filename": self._filename,
"number_of_atoms": number_of_atoms,
"chemical_symbols": chemical_symbols,
"volume_per_atom": volume / number_of_atoms,
})
self._dictionary = dictionary
self.create_symmetry_dataset()
return self
def create_symmetry_dataset(self):
symmetry_dataset = self.get_symmetry_dataset()
self._dictionary.update({
"spg_number": symmetry_dataset["number"],
"spg_international": symmetry_dataset["international"],
})
def generate_distance_matrix(self):
cell = self._atoms.get_cell()
scaled_positions = self._atoms.get_scaled_positions()
number_of_atoms = self._atoms.get_number_of_atoms()
expansion = range(-1, 2)
distance_matrix = np.zeros((number_of_atoms, number_of_atoms))
distance_matrix *= np.nan # initialization
scaled_distances = np.zeros((number_of_atoms, number_of_atoms, 3))
scaled_distances *= np.nan # initialization
for i1, p1 in enumerate(scaled_positions):
for i2, p2 in enumerate(scaled_positions):
distance = 100000 # np.inf
for addition in itertools.product(expansion, repeat=3):
scaled_distance_new = p2 - p1
scaled_distance_new -= np.rint(scaled_distance_new)
scaled_distance_new += addition
distance_new = np.linalg.norm(
np.dot(cell.T, scaled_distance_new))
if distance > distance_new:
distance = distance_new
scaled_distance = scaled_distance_new
distance_matrix[i1, i2] = distance
scaled_distances[i1, i2] = scaled_distance
self._distance_matrix = distance_matrix
self._scaled_distances = scaled_distances
return self
def write_properties(self, precision=16):
width = precision + 6
width_int = 5
key_order = [
"filename",
"number_of_atoms",
"volume",
"volume_per_atom",
"a",
"b",
"c",
"b/a",
"c/a",
"a/b",
"c/b",
"a/c",
"b/c",
"alpha",
"beta",
"gamma",
"b_x_c",
"c_x_a",
"a_x_b",
"spg_number",
"spg_international",
]
print("-" * 80)
print(self._filename)
print("-" * 80)
for k in key_order:
if k not in self._dictionary:
continue
value = self._dictionary[k]
sys.stdout.write("{:s}".format(k))
sys.stdout.write(": ")
if isinstance(value, float):
sys.stdout.write(
"{:{width}.{precision}f}".format(
value,
width=width,
precision=precision,))
elif isinstance(value, (int, long)):
sys.stdout.write(
"{:{width}d}".format(
value,
width=width_int,))
else:
sys.stdout.write("{:s}".format(value))
sys.stdout.write("\n")
def write_specified_properties(self, keys, precision=16):
width = precision + 6
width_int = 5
for k in keys:
value = self._dictionary[k]
sys.stdout.write(" ")
sys.stdout.write("{:s}".format(k))
sys.stdout.write(" ")
if isinstance(value, float):
sys.stdout.write(
"{:{width}.{precision}f}".format(
value,
width=width,
precision=precision,))
elif isinstance(value, (int, long)):
sys.stdout.write(
"{:{width}d}".format(
value,
width=width_int,))
sys.stdout.write(" " * (precision + 1))
else:
sys.stdout.write("{:s}".format(value))
sys.stdout.write("\n")
def get_index_from_position(self, position, symprec=1e-6):
for i, p in enumerate(self._atoms.get_scaled_positions()):
diff = position - p
diff -= np.rint(diff)
if all([abs(x) < symprec for x in diff]):
return i
print("WARNING: {}".format(__name__))
print("Index for the specified position cannot be found.")
return None
def write_distance_matrix(self):
number_of_atoms = self._atoms.get_number_of_atoms()
for i1 in range(number_of_atoms):
for i2 in range(number_of_atoms):
distance = self._distance_matrix[i1, i2]
sys.stdout.write("{:22.16f}".format(distance))
sys.stdout.write("\n")
def write_sorted_distance_matrix(self):
"""Write distances between an atom and another one.
"""
number_of_atoms = self._atoms.get_number_of_atoms()
chemical_symbols = self._atoms.get_chemical_symbols()
positions = self._atoms.get_scaled_positions()
# sys.stdout.write("# {:4d}\n".format(number_of_atoms))
for i1, c1 in enumerate(chemical_symbols):
distances_index = np.argsort(self._distance_matrix[i1])
for i2 in distances_index:
c2 = chemical_symbols[i2]
d = self._distance_matrix[i1, i2]
# dp = positions[i1] - positions[i2]
dp = self._scaled_distances[i1, i2]
sys.stdout.write("{:6d}".format(i1))
sys.stdout.write("{:>6s}".format(c1))
sys.stdout.write("{:6d}".format(i2))
sys.stdout.write("{:>6s}".format(c2))
sys.stdout.write("{:12.6f}".format(d))
sys.stdout.write(" ")
sys.stdout.write(("{:12.6f}" * 3).format(*dp))
sys.stdout.write("\n")
sys.stdout.write("\n")
def set_atoms(self, atoms):
self._atoms = atoms
return self
def set_scaled_positions(self, scaled_positions):
self._atoms.set_scaled_positions(scaled_positions)
return self
def set_positions(self, positions):
cell = self._atoms.get_cell()
scaled_positions = np.dot(positions, np.linalg.inv(cell))
return self.set_scaled_positions(scaled_positions)
def displace_scaled_positions(self, scaled_displacements):
scaled_positions = self._atoms.get_scaled_positions()
scaled_positions += scaled_displacements
self._atoms.set_scaled_positions(scaled_positions)
return self
def displace_positions(self, displacements):
cell = self._atoms.get_cell()
scaled_displacements = np.dot(displacements, np.linalg.inv(cell))
return self.displace_scaled_positions(scaled_displacements)
def shift_to_origin(self, index):
scaled_displacements = -self._atoms.get_scaled_positions()[index]
return self.displace_scaled_positions(scaled_displacements)
def remove_atoms_indices(self, indices):
if isinstance(indices, int):
indices = [indices]
indices = sorted(set(indices), reverse=True)
scaled_positions = self._atoms.get_scaled_positions()
chemical_symbols = self._atoms.get_chemical_symbols()
for i in indices:
scaled_positions = np.delete(scaled_positions, i, 0)
del chemical_symbols[i]
self.set_scaled_positions(scaled_positions)
self.set_chemical_symbols(chemical_symbols)
self._atoms._symbols_to_numbers()
self._atoms._symbols_to_masses()
return self
def remove_atoms_outside(self, region):
"""
region: 3 x 2 arrays given by direct coordinates.
[[a-, a+],
[b-, b+],
[c-, c+]]
"""
self.wrap_into_cell()
region = np.array(region)
scaled_positions = self._atoms.get_scaled_positions()
indices_removed = []
for ix in range(3):
for i, sp in enumerate(scaled_positions):
if (sp[ix] < region[ix, 0] or region[ix, 1] < sp[ix]):
indices_removed.append(i)
return self.remove_atoms_indices(indices_removed)
def add_vacuum_layer(self, vacuum_layer):
"""
vacuum_layer: 3 x 2 arrays given by direct coordinates.
[[a-, a+],
[b-, b+],
[c-, c+]]
"""
self.wrap_into_cell()
vacuum_layer = np.array(vacuum_layer)
cell = self._atoms.get_cell()
scaled_positions = self._atoms.get_scaled_positions()
natoms = self._atoms.get_number_of_atoms()
for ix in range(3):
cell[ix] *= (1.0 + sum(vacuum_layer[ix, :]))
for i in range(natoms):
scaled_positions[i, ix] += vacuum_layer[ix, 0]
scaled_positions[i, ix] /= (1.0 + sum(vacuum_layer[ix, :]))
self.set_cell(cell)
self.set_scaled_positions(scaled_positions)
return self
def wrap_into_cell(self):
scaled_positions = self.get_atoms().get_scaled_positions()
scaled_positions -= np.floor(scaled_positions)
self.set_scaled_positions(scaled_positions)
return self
def set_cell(self, cell):
self._atoms.set_cell(cell)
return self
def set_chemical_symbols(self, symbols):
self._atoms.set_chemical_symbols(symbols)
return self
def deform_cell_left(self, matrix):
"""Deform cell as (a, b, c) = M * (a, b, c)
"""
matrix = _get_matrix(matrix)
# Generate lattice vectors for the deformed cell.
cell = self._atoms.get_cell()
cell = np.dot(matrix, cell.T).T
self._atoms.set_cell(cell)
self.update_attributes()
return self
def deform_cell_right(self, matrix):
"""Deform cell as (a, b, c) = (a, b, c) * M
"""
matrix = _get_matrix(matrix)
# Generate lattice vectors for the deformed cell.
cell = self._atoms.get_cell()
cell = np.dot(cell.T, matrix).T
self._atoms.set_cell(cell)
self.update_attributes()
return self
def generate_supercell(self, dim, prec=1e-9):
"""Generate supercell according to "dim".
(a_s, b_s, c_s) = (a_u, b_u, c_u) * dim
"""
dim = _get_matrix(dim)
# Generate lattice vectors for the suprecell.
cell = self._atoms.get_cell()
cell = np.dot(cell.T, dim).T
self._atoms.set_cell(cell)
self._generate_supercell_positions(dim, prec)
nexpansion = np.rint(np.abs(np.linalg.det(dim)))
chemical_symbols_new = []
for chemical_symbol in self._atoms.get_chemical_symbols():
chemical_symbols_new += [chemical_symbol] * nexpansion
self._atoms.set_chemical_symbols(chemical_symbols_new)
self._atoms._symbols_to_numbers()
self._atoms._symbols_to_masses()
self.update_attributes()
return self
def _generate_supercell_positions(self, dim, prec=1e-9):
"""Generate scaled positions in the supercell."""
translation_vectors = find_lattice_vectors(dim, prec=prec)
positions = self._atoms.get_scaled_positions()
# Convert positions to into the fractional coordinates for SC.
positions = np.dot(np.linalg.inv(dim), positions.T).T
supercell_positions = (positions[:, None] +
translation_vectors[None, :])
supercell_positions = supercell_positions.reshape(-1, 3)
self.set_scaled_positions(supercell_positions)
def sort_by_coordinates(self, index, sorted_by_symbols=False):
"""
index:
0: a, 1: b, 2: c
"""
symbols = self._atoms.get_chemical_symbols()
positions = self._atoms.get_scaled_positions()
order = list(symbols)
data = zip(symbols, positions)
data = sorted(data, key=lambda x: x[1][index])
self.set_chemical_symbols(zip(*data)[0])
self.set_scaled_positions(zip(*data)[1])
if sorted_by_symbols:
self = self.sort_by_symbols(order=order)
self._atoms._symbols_to_numbers()
self._atoms._symbols_to_masses()
return self
def sort_by_symbols(self, order=None):
"""Combine the same chemical symbols.
Positions are sorted by the combined chemical symbols.
"""
symbols = self._atoms.get_chemical_symbols()
positions = self._atoms.get_scaled_positions()
if order is None:
order = list(symbols)
data = zip(symbols, positions)
data = sorted(data, key=lambda x: order.index(x[0]))
self.set_chemical_symbols(zip(*data)[0])
self.set_scaled_positions(zip(*data)[1])
self._atoms._symbols_to_numbers()
self._atoms._symbols_to_masses()
return self
def get_dictionary(self):
return self._dictionary
def get_atoms(self):
return self._atoms
def get_cell(self):
return self._atoms.get_cell()
def get_scaled_distances(self):
return self._scaled_distances.copy()
def get_distance_matrix(self):
return self._distance_matrix.copy()
def change_volume(self, volume):
cell_current = self._atoms.get_cell()
volume_current = np.linalg.det(cell_current)
scale = (volume / volume_current) ** (1.0 / 3.0)
self._atoms.set_cell(cell_current * scale)
return self
def change_volume_per_atom(self, volume_per_atom):
volume = volume_per_atom * self._atoms.get_number_of_atoms()
return self.change_volume(volume)
def get_symmetry_dataset(self):
return Symmetry(self._atoms).get_dataset()
def get_mappings_for_symops(self, prec=1e-6):
"""Get mappings for symmetry operations."""
natoms = self._atoms.get_number_of_atoms()
dataset = self.get_symmetry_dataset()
rotations = dataset["rotations"]
translations = dataset["translations"]
nopr = len(rotations)
mappings = -1 * np.ones((nopr, natoms), dtype=int)
for iopr, (r, t) in enumerate(zip(rotations, translations)):
mappings[iopr] = self.extract_mapping_for_symopr(r, t, prec)
if -1 in mappings:
print("ERROR: {}".format(__name__))
print("Some atoms are not mapped by some symmetry operations.")
raise ValueError
sys.exit(1)
return mappings
def extract_transformed_scaled_positions(self, rotation, translation):
"""Extract transformed scaled positions.
Args:
rotation (3x3 array): Rotation matrix.
translation (3 array): Translation vector.
Returns:
Transformed scaled positions by the rotation and translation.
Note that if the rotation and the translation is not a symmetry
operations, the returned values could be strange.
"""
scaled_positions = self._atoms.get_scaled_positions()
transformed_scaled_positions = transform_scaled_positions(
scaled_positions, rotation, translation)
return transformed_scaled_positions
def extract_mapping_for_symopr(self, rotation, translation, prec=1e-6):
"""Extract a mapping for a pair of a symmetry operation.
Args:
rotation (3x3 array): Rotation matrix.
translation (3 array): Translation vector.
Returns:
mapping (n integral array):
Indices are for new numbers and contents are for old ones.
"""
chemical_symbols = self._atoms.get_chemical_symbols()
transformed_scaled_positions = (
self.extract_transformed_scaled_positions(rotation, translation))
mapping = self.extract_mapping_for_atoms(
chemical_symbols, transformed_scaled_positions, prec)
return mapping
def extract_mapping_for_atoms(self, symbols_new, positions_new, prec=1e-6):
"""
Args:
symbols_new: Chemical symbols for the transformed structures.
positions_new: Fractional positions for the transformed structures.
Return:
mapping (n integral array):
Indices are for new numbers and contents are for old ones.
"""
natoms = self._atoms.get_number_of_atoms()
symbols_old = np.array(self._atoms.get_chemical_symbols())
positions_old = self._atoms.get_scaled_positions()
diff = positions_new[:, None, :] - positions_old[None, :, :]
wrapped_dpos = diff - np.rint(diff)
tmp, mapping = np.where(np.all(np.abs(wrapped_dpos) < prec, axis=2))
# Guarantee one-to-one correspondence
if not np.array_equal(tmp, np.arange(natoms, dtype=int)):
raise ValueError('Mapping is failed.')
if not np.array_equal(symbols_new, symbols_old[mapping]):
raise ValueError('Symbols do not correspond.')
return mapping
def _get_matrix(matrix):
matrix = np.array(matrix)
if matrix.size == 1 or matrix.size == 3:
matrix = matrix * np.eye(3)
elif matrix.size == 9:
matrix = matrix.reshape((3, 3))
else:
print("ERROR {}:".format(__name__))
print("Size of matrix must be 1, 3 or 9.")
print("The current size is {}.".format(matrix.size))
raise ValueError
return matrix
def convert_cell_to_lc(cell):
"""Convert lattice vectors to lattice constants.
Args:
cell: [[a_x, a_y, a_z], [b_x, b_y, b_z], [c_x, c_y, c_z]]
Returns:
lc: a dictionary like
{"a": ..., "b": ..., "c": ...,
"alpha": ..., "beta": ..., "gamma": ...}
"""
a = np.linalg.norm(cell[0])
b = np.linalg.norm(cell[1])
c = np.linalg.norm(cell[2])
alpha = np.dot(cell[1], cell[2]) / (b * c)
beta = np.dot(cell[2], cell[0]) / (c * a)
gamma = np.dot(cell[0], cell[1]) / (a * b)
alpha = np.arccos(alpha) * 180.0 / np.pi
beta = np.arccos(beta) * 180.0 / np.pi
gamma = np.arccos(gamma) * 180.0 / np.pi
b_x_c = np.cross(cell[1], cell[2])
c_x_a = np.cross(cell[2], cell[0])
a_x_b = np.cross(cell[0], cell[1])
volume = np.linalg.det(cell)
lc = {
"volume": volume,
"a": a,
"b": b,
"c": c,
"b/a": b / a,
"c/a": c / a,
"a/b": a / b,
"c/b": c / b,
"a/c": a / c,
"b/c": b / c,
"alpha": alpha,
"beta": beta,
"gamma": gamma,
"b_x_c": np.linalg.norm(b_x_c),
"c_x_a": np.linalg.norm(c_x_a),
"a_x_b": np.linalg.norm(a_x_b),
}
return lc
def convert_lc_to_cell(lc, prec=1e-12):
"""Convert lattice constants to lattice vectors.
Args:
lc: a dictionary like
{"a": ..., "b": ..., "c": ...,
"alpha": ..., "beta": ..., "gamma": ...}
Returns:
cell: [[a_x, a_y, a_z], [b_x, b_y, b_z], [c_x, c_y, c_z]]
"""
a = lc["a"]
b = lc["b"]
c = lc["c"]
alpha = np.radians(lc["alpha"])
beta = np.radians(lc["beta"])
gamma = np.radians(lc["gamma"])
cell = np.zeros((3, 3))
cell[0, 0] = a
cell[1, 0] = b * np.cos(gamma)
cell[1, 1] = b * np.sin(gamma)
cell[2, 0] = c * np.cos(beta)
tmp = np.cos(alpha) - np.cos(gamma) * np.cos(beta)
tmp /= np.sin(gamma)
cell[2, 1] = c * tmp
cell[2, 2] = c * np.sqrt(np.sin(beta) ** 2 - tmp ** 2)
cell[abs(cell) < prec] = 0.0 # Small values are replaced by zero.
return cell
def find_lattice_vectors(supercell_matrix, prec=1e-9):
"""Find the set of latice vectors inside the supercell.
Args:
supercell_matrix (3x3 array):
(a_s, b_s, c_s) = (a_u, b_u, c_u) * supercell_matrix
Returns:
the set of translation vectors (fractional coordinates for SC).
The 1st index moves the fastest.
"""
nexpansion = np.rint(np.abs(np.linalg.det(supercell_matrix)))
def generate_lv_range(i):
low = 0
high = 0
for j in supercell_matrix[i]:
if j > 0:
high += j
else:
low += j
return np.arange(low, high + 1)
range_a = generate_lv_range(0)[:, None] * np.array([1, 0, 0])[None, :]
range_b = generate_lv_range(1)[:, None] * np.array([0, 1, 0])[None, :]
range_c = generate_lv_range(2)[:, None] * np.array([0, 0, 1])[None, :]
# translation vectors in unit cell units
translation_vectors = (range_c[:, None, None] +
range_b[None, :, None] +
range_a[None, None, :])
translation_vectors = translation_vectors.reshape((-1, 3))
translation_vectors = np.dot(
np.linalg.inv(supercell_matrix), translation_vectors.T).T
translation_vectors = translation_vectors[
np.where(np.all(translation_vectors < 1 - prec, axis=1) &
np.all(translation_vectors >= -prec, axis=1))
]
if len(translation_vectors) != nexpansion:
print("ERROR: {}".format(__name__))
print("len(translation_vectors) != abs(det(supercell_matrix))")
print(len(translation_vectors), nexpansion)
raise ValueError
return translation_vectors
def transform_scaled_positions(scaled_positions, rotation, translation):
"""
Args:
scaled_positions (nx3 array): Scaled positions.
rotation (3x3 array): Rotation matrix.
translation (3 array): Translation vector.
Returns:
transformed_scaled_positions.
"""
transformed_scaled_positions = np.dot(rotation, scaled_positions.T).T
transformed_scaled_positions += translation
return transformed_scaled_positions
| |
#!/usr/bin/python
# coding=utf-8
##########################################################################
from mock import Mock
from mock import patch, call
from diamond.collector import Collector
from pytest_diamond import get_collector_config, CollectorTestCase
from diamond_redis import RedisCollector
##########################################################################
def test_import():
assert RedisCollector
class TestRedisCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('RedisCollector', {
'interval': '1',
'databases': 1,
})
self.collector = RedisCollector(config, None)
@patch.object(Collector, 'publish')
def test_real_data(self, publish_mock):
data_1 = {'pubsub_channels': 0,
'used_memory_peak_human': '700.71K',
'bgrewriteaof_in_progress': 0,
'connected_slaves': 0,
'uptime_in_days': 0,
'multiplexing_api': 'epoll',
'lru_clock': 954113,
'last_save_time': 1351718385,
'redis_version': '2.4.10',
'redis_git_sha1': 0,
'gcc_version': '4.4.6',
'connected_clients': 1,
'keyspace_misses': 0,
'used_memory': 726144,
'vm_enabled': 0,
'used_cpu_user_children': '0.00',
'used_memory_peak': 717528,
'role': 'master',
'total_commands_processed': 1,
'latest_fork_usec': 0,
'loading': 0,
'used_memory_rss': 7254016,
'total_connections_received': 1,
'pubsub_patterns': 0,
'aof_enabled': 0,
'used_cpu_sys': '0.02',
'used_memory_human': '709.12K',
'used_cpu_sys_children': '0.00',
'blocked_clients': 0,
'used_cpu_user': '0.00',
'client_biggest_input_buf': 0,
'arch_bits': 64,
'mem_fragmentation_ratio': '9.99',
'expired_keys': 0,
'evicted_keys': 0,
'bgsave_in_progress': 0,
'client_longest_output_list': 0,
'mem_allocator': 'jemalloc-2.2.5',
'process_id': 3020,
'uptime_in_seconds': 32,
'changes_since_last_save': 0,
'redis_git_dirty': 0,
'keyspace_hits': 0
}
data_2 = {'pubsub_channels': 1,
'used_memory_peak_human': '1700.71K',
'bgrewriteaof_in_progress': 4,
'connected_slaves': 2,
'master_last_io_seconds_ago': 7,
'uptime_in_days': 1,
'multiplexing_api': 'epoll',
'lru_clock': 5954113,
'last_save_time': 51351718385,
'redis_version': '2.4.10',
'redis_git_sha1': 0,
'gcc_version': '4.4.6',
'connected_clients': 100,
'keyspace_misses': 670,
'used_memory': 1726144,
'vm_enabled': 0,
'used_cpu_user_children': '2.00',
'used_memory_peak': 1717528,
'role': 'master',
'total_commands_processed': 19764,
'latest_fork_usec': 8,
'loading': 0,
'used_memory_rss': 17254016,
'total_connections_received': 18764,
'pubsub_patterns': 0,
'aof_enabled': 0,
'used_cpu_sys': '0.05',
'used_memory_human': '1709.12K',
'used_cpu_sys_children': '0.09',
'blocked_clients': 8,
'used_cpu_user': '0.09',
'client_biggest_input_buf': 40,
'arch_bits': 64,
'mem_fragmentation_ratio': '0.99',
'expired_keys': 0,
'evicted_keys': 0,
'bgsave_in_progress': 0,
'client_longest_output_list': 0,
'mem_allocator': 'jemalloc-2.2.5',
'process_id': 3020,
'uptime_in_seconds': 95732,
'changes_since_last_save': 759,
'redis_git_dirty': 0,
'keyspace_hits': 5700
}
patch_collector = patch.object(RedisCollector, '_get_info',
Mock(return_value=data_1))
patch_time = patch('time.time', Mock(return_value=10))
patch_collector.start()
patch_time.start()
self.collector.collect()
patch_collector.stop()
patch_time.stop()
self.assertPublishedMany(publish_mock, {})
patch_collector = patch.object(RedisCollector, '_get_info',
Mock(return_value=data_2))
patch_time = patch('time.time', Mock(return_value=20))
patch_collector.start()
patch_time.start()
self.collector.collect()
patch_collector.stop()
patch_time.stop()
metrics = {'6379.process.uptime': 95732,
'6379.pubsub.channels': 1,
'6379.slaves.connected': 2,
'6379.slaves.last_io': 7,
'6379.process.connections_received': 18764,
'6379.clients.longest_output_list': 0,
'6379.process.commands_processed': 19764,
'6379.last_save.changes_since': 759,
'6379.memory.external_view': 17254016,
'6379.memory.fragmentation_ratio': 0.99,
'6379.last_save.time': 51351718385,
'6379.clients.connected': 100,
'6379.clients.blocked': 8,
'6379.pubsub.patterns': 0,
'6379.cpu.parent.user': 0.09,
'6379.last_save.time_since': -51351718365,
'6379.memory.internal_view': 1726144,
'6379.cpu.parent.sys': 0.05,
'6379.keyspace.misses': 670,
'6379.keys.expired': 0,
'6379.keys.evicted': 0,
'6379.keyspace.hits': 5700,
}
self.assertPublishedMany(publish_mock, metrics)
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
@patch.object(Collector, 'publish')
def test_hostport_or_instance_config(self, publish_mock):
testcases = {
'default': {
'config': {}, # test default settings
'calls': [call('6379', 'localhost', 6379, None, None)],
},
'host_set': {
'config': {'host': 'myhost'},
'calls': [call('6379', 'myhost', 6379, None, None)],
},
'port_set': {
'config': {'port': 5005},
'calls': [call('5005', 'localhost', 5005, None, None)],
},
'hostport_set': {
'config': {'host': 'megahost', 'port': 5005},
'calls': [call('5005', 'megahost', 5005, None, None)],
},
'portauth_set': {
'config': {'port': 5005, 'auth': 'pass'},
'calls': [call('5005', 'localhost', 5005, None, 'pass')],
},
'unix_socket_host_set': {
'config': {'host': 'unix:/var/run/redis/myhost.sock'},
'calls': [call('myhost', 'localhost', 6379,
'/var/run/redis/myhost.sock', None)],
},
'instance_1_host': {
'config': {'instances': ['nick@myhost']},
'calls': [call('nick', 'myhost', 6379, None, None)],
},
'unix_socket_instance_1_host': {
'config': {'instances': [
'nick@unix:/var/run/redis/myhost.sock'
]},
'calls': [call('nick', 'localhost', 6379,
'/var/run/redis/myhost.sock', None)],
},
'unix_socket_instance_1_hostauth': {
'config': {'instances': [
'nick@unix:/var/run/redis/myhost.sock:/pass'
]},
'calls': [call('nick', 'localhost', 6379,
'/var/run/redis/myhost.sock', 'pass')],
},
'instance_1_port': {
'config': {'instances': ['nick@:9191']},
'calls': [call('nick', 'localhost', 9191, None, None)],
},
'instance_1_hostport': {
'config': {'instances': ['nick@host1:8765']},
'calls': [call('nick', 'host1', 8765, None, None)],
},
'instance_2': {
'config': {'instances': [
'foo@hostX',
'bar@:1000/pass',
'unix:/var/run/redis/myhost.sock:1/pass'
]},
'calls': [
call('foo', 'hostX', 6379, None, None),
call('bar', 'localhost', 1000, None, 'pass'),
call('myhost', 'localhost', 6379,
'/var/run/redis/myhost.sock', 'pass'),
],
},
'old_and_new': {
'config': {
'host': 'myhost',
'port': 1234,
'instances': [
'foo@hostX',
'bar@:1000',
'hostonly',
':1234'
]
},
'calls': [
call('foo', 'hostX', 6379, None, None),
call('bar', 'localhost', 1000, None, None),
call('6379', 'hostonly', 6379, None, None),
call('1234', 'localhost', 1234, None, None),
],
},
}
for testname, data in testcases.items():
config = get_collector_config('RedisCollector', data['config'])
collector = RedisCollector(config, None)
mock = Mock(return_value={}, name=testname)
patch_c = patch.object(RedisCollector, 'collect_instance', mock)
patch_c.start()
collector.collect()
patch_c.stop()
expected_call_count = len(data['calls'])
self.assertEqual(mock.call_count, expected_call_count,
msg='[%s] mock.calls=%d != expected_calls=%d' %
(testname, mock.call_count, expected_call_count))
for exp_call in data['calls']:
# Test expected calls 1 by 1,
# because self.instances is a dict (=random order)
mock.assert_has_calls(exp_call)
@patch.object(Collector, 'publish')
def test_key_naming_when_using_instances(self, publish_mock):
config_data = {
'instances': [
'nick1@host1:1111',
'nick2@:2222',
'nick3@host3',
'nick4@host4:3333/@password',
'bla'
]
}
get_info_data = {
'total_connections_received': 200,
'total_commands_processed': 100,
}
expected_calls = [
call('nick1.process.connections_received', 200, precision=0,
metric_type='GAUGE'),
call('nick1.process.commands_processed', 100, precision=0,
metric_type='GAUGE'),
call('nick2.process.connections_received', 200, precision=0,
metric_type='GAUGE'),
call('nick2.process.commands_processed', 100, precision=0,
metric_type='GAUGE'),
call('nick3.process.connections_received', 200, precision=0,
metric_type='GAUGE'),
call('nick3.process.commands_processed', 100, precision=0,
metric_type='GAUGE'),
call('nick4.process.connections_received', 200, precision=0,
metric_type='GAUGE'),
call('nick4.process.commands_processed', 100, precision=0,
metric_type='GAUGE'),
call('6379.process.connections_received', 200, precision=0,
metric_type='GAUGE'),
call('6379.process.commands_processed', 100, precision=0,
metric_type='GAUGE'),
]
config = get_collector_config('RedisCollector', config_data)
collector = RedisCollector(config, None)
patch_c = patch.object(RedisCollector, '_get_info',
Mock(return_value=get_info_data))
patch_c.start()
collector.collect()
patch_c.stop()
self.assertEqual(publish_mock.call_count, len(expected_calls))
for exp_call in expected_calls:
# Test expected calls 1 by 1,
# because self.instances is a dict (=random order)
publish_mock.assert_has_calls(exp_call)
| |
"""
Custom integration to integrate frigate with Home Assistant.
For more details about this integration, please refer to
https://github.com/blakeblackshear/frigate-hass-integration
"""
from __future__ import annotations
from datetime import timedelta
import logging
import re
from typing import Any, Callable, Final
from awesomeversion import AwesomeVersion
from custom_components.frigate.config_flow import get_config_entry_title
from homeassistant.components.mqtt.models import ReceiveMessage
from homeassistant.components.mqtt.subscription import (
EntitySubscription,
async_subscribe_topics,
async_unsubscribe_topics,
)
from homeassistant.components.switch import DOMAIN as SWITCH_DOMAIN
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_MODEL, CONF_HOST, CONF_URL
from homeassistant.core import Config, HomeAssistant, callback
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import device_registry as dr, entity_registry as er
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
from homeassistant.loader import async_get_integration
from homeassistant.util import slugify
# TODO(@dermotduffy): This section can be removed some safe distance from the
# official release of 2022.3 (and the contents of the first version of
# `subscribe_topics` can be moved into async_added_to_hass below).
try:
from homeassistant.components.mqtt.subscription import (
async_prepare_subscribe_topics,
)
async def subscribe_topics(
hass: HomeAssistant,
state: dict[str, EntitySubscription] | None,
topics: dict[str, Any],
) -> Any: # pragma: no cover
"""Subscribe to MQTT topic."""
state = async_prepare_subscribe_topics(hass, state, topics)
# pylint: disable=no-value-for-parameter
return await async_subscribe_topics(hass, state)
except ImportError:
async def subscribe_topics(
hass: HomeAssistant,
state: dict[str, EntitySubscription] | None,
topics: dict[str, Any],
) -> Any: # pragma: no cover
"""Subscribe to MQTT topic."""
return await async_subscribe_topics(hass, state, topics)
from .api import FrigateApiClient, FrigateApiClientError
from .const import (
ATTR_CLIENT,
ATTR_CONFIG,
ATTR_COORDINATOR,
CONF_CAMERA_STATIC_IMAGE_HEIGHT,
DOMAIN,
FRIGATE_RELEASES_URL,
FRIGATE_VERSION_ERROR_CUTOFF,
NAME,
PLATFORMS,
STARTUP_MESSAGE,
)
from .views import (
JSMPEGProxyView,
NotificationsProxyView,
SnapshotsProxyView,
VodProxyView,
VodSegmentProxyView,
)
SCAN_INTERVAL = timedelta(seconds=5)
_LOGGER: logging.Logger = logging.getLogger(__name__)
# Typing notes:
# - The HomeAssistant library does not provide usable type hints for custom
# components. Certain type checks (e.g. decorators and class inheritance) need
# to be marked as ignored or casted, when using the default Home Assistant
# mypy settings. Using the same settings is preferable, to smoothen a future
# migration to Home Assistant Core.
def get_frigate_device_identifier(
entry: ConfigEntry, camera_name: str | None = None
) -> tuple[str, str]:
"""Get a device identifier."""
if camera_name:
return (DOMAIN, f"{entry.entry_id}:{slugify(camera_name)}")
else:
return (DOMAIN, entry.entry_id)
def get_frigate_entity_unique_id(
config_entry_id: str, type_name: str, name: str
) -> str:
"""Get the unique_id for a Frigate entity."""
return f"{config_entry_id}:{type_name}:{name}"
def get_friendly_name(name: str) -> str:
"""Get a friendly version of a name."""
return name.replace("_", " ").title()
def get_cameras_and_objects(config: dict[str, Any]) -> set[tuple[str, str]]:
"""Get cameras and tracking object tuples."""
camera_objects = set()
for cam_name, cam_config in config["cameras"].items():
for obj in cam_config["objects"]["track"]:
camera_objects.add((cam_name, obj))
return camera_objects
def get_cameras_zones_and_objects(config: dict[str, Any]) -> set[tuple[str, str]]:
"""Get cameras/zones and tracking object tuples."""
camera_objects = get_cameras_and_objects(config)
zone_objects = set()
for cam_name, obj in camera_objects:
for zone_name in config["cameras"][cam_name]["zones"]:
zone_name_objects = config["cameras"][cam_name]["zones"][zone_name].get(
"objects"
)
if not zone_name_objects or obj in zone_name_objects:
zone_objects.add((zone_name, obj))
return camera_objects.union(zone_objects)
def get_cameras_and_zones(config: dict[str, Any]) -> set[str]:
"""Get cameras and zones."""
cameras_zones = set()
for camera in config.get("cameras", {}).keys():
cameras_zones.add(camera)
for zone in config["cameras"][camera].get("zones", {}).keys():
cameras_zones.add(zone)
return cameras_zones
async def async_setup(hass: HomeAssistant, config: Config) -> bool:
"""Set up this integration using YAML is not supported."""
integration = await async_get_integration(hass, DOMAIN)
_LOGGER.info(
STARTUP_MESSAGE.format(
title=NAME,
integration_version=integration.version,
)
)
hass.data.setdefault(DOMAIN, {})
session = async_get_clientsession(hass)
hass.http.register_view(JSMPEGProxyView(session))
hass.http.register_view(NotificationsProxyView(session))
hass.http.register_view(SnapshotsProxyView(session))
hass.http.register_view(VodProxyView(session))
hass.http.register_view(VodSegmentProxyView(session))
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up this integration using UI."""
client = FrigateApiClient(entry.data.get(CONF_URL), async_get_clientsession(hass))
coordinator = FrigateDataUpdateCoordinator(hass, client=client)
await coordinator.async_config_entry_first_refresh()
try:
server_version = await client.async_get_version()
config = await client.async_get_config()
except FrigateApiClientError as exc:
raise ConfigEntryNotReady from exc
if AwesomeVersion(server_version) <= AwesomeVersion(FRIGATE_VERSION_ERROR_CUTOFF):
_LOGGER.error(
"Using a Frigate server (%s) with version %s <= %s which is not "
"compatible -- you must upgrade: %s",
entry.data[CONF_URL],
server_version,
FRIGATE_VERSION_ERROR_CUTOFF,
FRIGATE_RELEASES_URL,
)
return False
model = f"{(await async_get_integration(hass, DOMAIN)).version}/{server_version}"
hass.data[DOMAIN][entry.entry_id] = {
ATTR_COORDINATOR: coordinator,
ATTR_CLIENT: client,
ATTR_CONFIG: config,
ATTR_MODEL: model,
}
# Remove old devices associated with cameras that have since been removed
# from the Frigate server, keeping the 'master' device for this config
# entry.
current_devices: set[tuple[str, str]] = set({get_frigate_device_identifier(entry)})
for item in get_cameras_and_zones(config):
current_devices.add(get_frigate_device_identifier(entry, item))
device_registry = dr.async_get(hass)
for device_entry in dr.async_entries_for_config_entry(
device_registry, entry.entry_id
):
for identifier in device_entry.identifiers:
if identifier in current_devices:
break
else:
device_registry.async_remove_device(device_entry.id)
# Cleanup old clips switch (<v0.9.0) if it exists.
entity_registry = er.async_get(hass)
for camera in config["cameras"].keys():
unique_id = get_frigate_entity_unique_id(
entry.entry_id, SWITCH_DOMAIN, f"{camera}_clips"
)
entity_id = entity_registry.async_get_entity_id(
SWITCH_DOMAIN, DOMAIN, unique_id
)
if entity_id:
entity_registry.async_remove(entity_id)
# Remove old `camera_image_height` option.
if CONF_CAMERA_STATIC_IMAGE_HEIGHT in entry.options:
new_options = entry.options.copy()
new_options.pop(CONF_CAMERA_STATIC_IMAGE_HEIGHT)
hass.config_entries.async_update_entry(entry, options=new_options)
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
entry.async_on_unload(entry.add_update_listener(_async_entry_updated))
return True
class FrigateDataUpdateCoordinator(DataUpdateCoordinator): # type: ignore[misc]
"""Class to manage fetching data from the API."""
def __init__(self, hass: HomeAssistant, client: FrigateApiClient):
"""Initialize."""
self._api = client
super().__init__(hass, _LOGGER, name=DOMAIN, update_interval=SCAN_INTERVAL)
async def _async_update_data(self) -> dict[str, Any]:
"""Update data via library."""
try:
return await self._api.async_get_stats()
except FrigateApiClientError as exc:
raise UpdateFailed from exc
async def async_unload_entry(hass: HomeAssistant, config_entry: ConfigEntry) -> bool:
"""Handle removal of an entry."""
unload_ok = bool(
await hass.config_entries.async_unload_platforms(config_entry, PLATFORMS)
)
if unload_ok:
hass.data[DOMAIN].pop(config_entry.entry_id)
return unload_ok
async def _async_entry_updated(hass: HomeAssistant, config_entry: ConfigEntry) -> None:
"""Handle entry updates."""
await hass.config_entries.async_reload(config_entry.entry_id)
async def async_migrate_entry(hass: HomeAssistant, config_entry: ConfigEntry) -> bool:
"""Migrate from v1 entry."""
if config_entry.version == 1:
_LOGGER.debug("Migrating config entry from version '%s'", config_entry.version)
data = {**config_entry.data}
data[CONF_URL] = data.pop(CONF_HOST)
hass.config_entries.async_update_entry(
config_entry, data=data, title=get_config_entry_title(data[CONF_URL])
)
config_entry.version = 2
@callback # type: ignore[misc]
def update_unique_id(entity_entry: er.RegistryEntry) -> dict[str, str] | None:
"""Update unique ID of entity entry."""
converters: Final[dict[re.Pattern, Callable[[re.Match], list[str]]]] = {
re.compile(rf"^{DOMAIN}_(?P<cam_obj>\S+)_binary_sensor$"): lambda m: [
"motion_sensor",
m.group("cam_obj"),
],
re.compile(rf"^{DOMAIN}_(?P<cam>\S+)_camera$"): lambda m: [
"camera",
m.group("cam"),
],
re.compile(rf"^{DOMAIN}_(?P<cam_obj>\S+)_snapshot$"): lambda m: [
"camera_snapshots",
m.group("cam_obj"),
],
re.compile(rf"^{DOMAIN}_detection_fps$"): lambda m: [
"sensor_fps",
"detection",
],
re.compile(
rf"^{DOMAIN}_(?P<detector>\S+)_inference_speed$"
): lambda m: ["sensor_detector_speed", m.group("detector")],
re.compile(rf"^{DOMAIN}_(?P<cam_fps>\S+)_fps$"): lambda m: [
"sensor_fps",
m.group("cam_fps"),
],
re.compile(rf"^{DOMAIN}_(?P<cam_switch>\S+)_switch$"): lambda m: [
"switch",
m.group("cam_switch"),
],
# Caution: This is a broad but necessary match (keep until last).
re.compile(rf"^{DOMAIN}_(?P<cam_obj>\S+)$"): lambda m: [
"sensor_object_count",
m.group("cam_obj"),
],
}
for regexp, func in converters.items():
match = regexp.match(entity_entry.unique_id)
if match:
args = [config_entry.entry_id] + func(match)
return {"new_unique_id": get_frigate_entity_unique_id(*args)}
return None
await er.async_migrate_entries(hass, config_entry.entry_id, update_unique_id)
_LOGGER.debug(
"Migrating config entry to version '%s' successful", config_entry.version
)
return True
class FrigateEntity(Entity): # type: ignore[misc]
"""Base class for Frigate entities."""
def __init__(self, config_entry: ConfigEntry):
"""Construct a FrigateEntity."""
Entity.__init__(self)
self._config_entry = config_entry
self._available = True
@property
def available(self) -> bool:
"""Return the availability of the entity."""
return self._available
def _get_model(self) -> str:
"""Get the Frigate device model string."""
return str(self.hass.data[DOMAIN][self._config_entry.entry_id][ATTR_MODEL])
class FrigateMQTTEntity(FrigateEntity):
"""Base class for MQTT-based Frigate entities."""
def __init__(
self,
config_entry: ConfigEntry,
frigate_config: dict[str, Any],
state_topic_config: dict[str, Any],
) -> None:
"""Construct a FrigateMQTTEntity."""
super().__init__(config_entry)
self._frigate_config = frigate_config
self._sub_state = None
self._available = False
self._state_topic_config = {
"msg_callback": self._state_message_received,
"qos": 0,
**state_topic_config,
}
async def async_added_to_hass(self) -> None:
"""Subscribe mqtt events."""
self._sub_state = await subscribe_topics(
self.hass,
self._sub_state,
{
"state_topic": self._state_topic_config,
"availability_topic": {
"topic": f"{self._frigate_config['mqtt']['topic_prefix']}/available",
"msg_callback": self._availability_message_received,
"qos": 0,
},
},
)
async def async_will_remove_from_hass(self) -> None:
"""Cleanup prior to hass removal."""
await async_unsubscribe_topics(self.hass, self._sub_state)
self._sub_state = None
@callback # type: ignore[misc]
def _state_message_received(self, msg: ReceiveMessage) -> None:
"""State message received."""
self.async_write_ha_state()
@callback # type: ignore[misc]
def _availability_message_received(self, msg: ReceiveMessage) -> None:
"""Handle a new received MQTT availability message."""
self._available = msg.payload == "online"
self.async_write_ha_state()
| |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import threading
import unittest
from thrift.protocol import THeaderProtocol
from thrift.Thrift import (
TApplicationException,
TPriority,
TProcessorEventHandler,
)
from thrift.transport import THeaderTransport
from thrift.transport import TSocket
from thrift.transport.TTransport import TTransportException
from thrift.util.TCppServerTestManager import TCppServerTestManager
from thrift.util.test_service import TestService, PriorityService, SubPriorityService
from thrift.util.test_service.ttypes import UserException2
class BaseTest(unittest.TestCase):
def _perform_rpc(self, server, service, method, *args, **kwargs):
# Default 5s timeout
return self._expiring_rpc(
server, service, method, 5 * 1000, None, *args, **kwargs
)
# Same but with a timeout
def _expiring_rpc(self, server, service, method, tm, headers, *args, **kwargs):
host, port = server.addr()
with TSocket.TSocket(host=host, port=port) as sock:
sock.setTimeout(tm)
transport = THeaderTransport.THeaderTransport(sock)
if headers:
for key, val in headers.items():
transport.set_header(key, val)
protocol = THeaderProtocol.THeaderProtocol(transport)
client = service.Client(protocol, protocol)
return getattr(client, method)(*args, **kwargs)
class TestTCppServerTestManager(BaseTest):
class Handler(TestService.Iface):
def __init__(self, data):
self.__data = data
def getDataById(self, id):
return self.__data[id]
def throwUserException(self):
raise UserException2("Some message")
def throwUncaughtException(self, msg):
raise AssertionError(msg)
class HandlerWithRequestContext(TestService.Iface, TProcessorEventHandler):
def __init__(self, exceptions=False):
self.__request_context = None
self._response = "not initialized"
self._exceptions = exceptions
def getMessage(self):
return self._response
def setRequestContext(self, ctx):
self.__request_context = ctx
def getRequestContext(self):
return self.__request_context
def postRead(self, *args):
if self._exceptions:
raise Exception("some failure")
ctx = self.getRequestContext()
headers = ctx.getHeaders()
self._response = "headers: %r" % headers
def _perform_getDataById(self, server, val):
return self._perform_rpc(server, TestService, "getDataById", val)
def test_request_context_order(self):
handler = self.HandlerWithRequestContext()
processor = TestService.Processor(handler)
processor.setEventHandler(handler)
headers = {"fruit": "orange"}
with TCppServerTestManager(processor) as server:
message = self._expiring_rpc(
server, TestService, "getMessage", 1000, headers=headers
)
# make sure we saw the headers in the handler's postRead
self.assertTrue(message.startswith("headers: {b'fruit': b'orange'"))
# make sure they were reset after the method call
self.assertTrue(handler.getRequestContext() is None)
def test_request_context_reset_on_exception(self):
handler = self.HandlerWithRequestContext(exceptions=True)
processor = TestService.Processor(handler)
processor.setEventHandler(handler)
with TCppServerTestManager(processor) as server:
try:
self._perform_getDataById(server, 7)
except TApplicationException:
pass
# make sure they were reset after the failure to readArgs
self.assertTrue(handler.getRequestContext() is None)
def test_with_handler(self):
handler = self.Handler({7: "hello"})
with TCppServerTestManager(handler) as server:
data = self._perform_getDataById(server, 7)
self.assertEquals(data, "hello")
def test_with_processor(self):
handler = self.Handler({7: "hello"})
processor = TestService.Processor(handler)
with TCppServerTestManager(processor) as server:
data = self._perform_getDataById(server, 7)
self.assertEquals(data, "hello")
def test_with_server(self):
handler = self.Handler({7: "hello"})
processor = TestService.Processor(handler)
server = TCppServerTestManager.make_server(processor)
with TCppServerTestManager(server) as server:
data = self._perform_getDataById(server, 7)
self.assertEquals(data, "hello")
def test_throw_populates_headers(self):
handler = self.Handler({7: "hello"})
processor = TestService.Processor(handler)
server = TCppServerTestManager.make_server(processor)
with TCppServerTestManager(server) as server:
host, port = server.addr()
with TSocket.TSocket(host=host, port=port) as sock:
transport = THeaderTransport.THeaderTransport(sock)
protocol = THeaderProtocol.THeaderProtocol(transport)
client = TestService.Client(protocol, protocol)
try:
client.throwUserException()
self.fail("Expect to throw UserException2")
except UserException2:
pass
self.assertEquals(b"UserException2", transport.get_headers()[b"uex"])
self.assertIn(b"Some message", transport.get_headers()[b"uexw"])
try:
client.throwUncaughtException("a message!")
self.fail("Expect to throw TApplicationException")
except TApplicationException:
pass
self.assertEquals(
b"TApplicationException", transport.get_headers()[b"uex"]
)
self.assertIn(b"a message!", transport.get_headers()[b"uexw"])
class TestTCppServerPriorities(BaseTest):
class PriorityHandler(PriorityService.Iface):
event = threading.Event()
stuck = threading.Event()
def bestEffort(self):
return True
def normal(self):
return True
def important(self):
return True
def unspecified(self):
return True
class SubPriorityHandler(PriorityService.Iface):
def child_unspecified(self):
return True
def child_highImportant(self):
return True
def test_processor_priorities(self):
handler = self.PriorityHandler()
processor = PriorityService.Processor(handler)
# Did we parse annotations correctly
self.assertEquals(processor.get_priority("bestEffort"), TPriority.BEST_EFFORT)
self.assertEquals(processor.get_priority("normal"), TPriority.NORMAL)
self.assertEquals(processor.get_priority("important"), TPriority.IMPORTANT)
self.assertEquals(processor.get_priority("unspecified"), TPriority.HIGH)
def test_processor_child_priorities(self):
handler = self.SubPriorityHandler()
processor = SubPriorityService.Processor(handler)
# Parent priorities present in extended services
# Make sure parent service priorities don't leak to child services
self.assertEquals(processor.get_priority("bestEffort"), TPriority.BEST_EFFORT)
self.assertEquals(processor.get_priority("normal"), TPriority.NORMAL)
self.assertEquals(processor.get_priority("important"), TPriority.IMPORTANT)
self.assertEquals(processor.get_priority("unspecified"), TPriority.HIGH)
# Child methods
self.assertEquals(processor.get_priority("child_unspecified"), TPriority.NORMAL)
self.assertEquals(
processor.get_priority("child_highImportant"), TPriority.HIGH_IMPORTANT
)
def test_header_priorities(self):
pass
| |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from proton import Message
from proton.handlers import MessagingHandler
from proton.reactor import Container
from qpid_dispatch_internal.compat import UNICODE
from system_test import TestCase, Qdrouterd, main_module, unittest
# ------------------------------------------------
# Helper classes for all tests.
# ------------------------------------------------
class Timeout:
"""
Named timeout object can handle multiple simultaneous
timers, by telling the parent which one fired.
"""
def __init__(self, parent, name):
self.parent = parent
self.name = name
def on_timer_task(self, event):
self.parent.timeout(self.name)
class ManagementMessageHelper:
"""
Format management messages.
"""
def __init__(self, reply_addr):
self.reply_addr = reply_addr
def make_router_link_query(self) :
props = {'count': '100',
'operation': 'QUERY',
'entityType': 'org.apache.qpid.dispatch.router.link',
'name': 'self',
'type': 'org.amqp.management'
}
attrs = []
attrs.append(UNICODE('linkType'))
attrs.append(UNICODE('linkDir'))
attrs.append(UNICODE('deliveryCount'))
attrs.append(UNICODE('priority'))
msg_body = {}
msg_body['attributeNames'] = attrs
return Message(body=msg_body, properties=props, reply_to=self.reply_addr)
# ================================================================
# Setup
# ================================================================
class PriorityTests (TestCase):
@classmethod
def setUpClass(cls):
super(PriorityTests, cls).setUpClass()
def router(name, more_config):
config = [('router', {'mode': 'interior', 'id': name, 'workerThreads': 4}),
('address', {'prefix': 'closest', 'distribution': 'closest'}),
('address', {'prefix': 'balanced', 'distribution': 'balanced'}),
('address', {'prefix': 'multicast', 'distribution': 'multicast'})
] \
+ more_config
config = Qdrouterd.Config(config)
cls.routers.append(cls.tester.qdrouterd(name, config, wait=True))
cls.routers = []
# The sender will send all its messages with magic_message_priority.
# The first router will set target addr priority to magic_address_priority.
# It is important *not* to choose 4 for either of these priorities,
# since that is the default message priority.
cls.magic_message_priority = 3
cls.magic_address_priority = 7
link_cap = 100
A_client_port = cls.tester.get_port()
B_client_port = cls.tester.get_port()
C_client_port = cls.tester.get_port()
A_inter_router_port = cls.tester.get_port()
B_inter_router_port = cls.tester.get_port()
C_inter_router_port = cls.tester.get_port()
A_config = [
('listener',
{'port' : A_client_port,
'role' : 'normal',
'linkCapacity' : link_cap,
'stripAnnotations' : 'no'
}
),
('listener',
{'role' : 'inter-router',
'port' : A_inter_router_port,
'linkCapacity' : link_cap,
'stripAnnotations' : 'no'
}
),
('address',
{'prefix' : 'speedy',
'priority' : cls.magic_address_priority,
'distribution' : 'closest'
}
),
]
cls.B_config = [
('listener',
{'port' : B_client_port,
'role' : 'normal',
'linkCapacity' : link_cap,
'stripAnnotations' : 'no'
}
),
('listener',
{'role' : 'inter-router',
'port' : B_inter_router_port,
'linkCapacity' : link_cap,
'stripAnnotations' : 'no'
}
),
('connector',
{'name' : 'BA_connector',
'role' : 'inter-router',
'port' : A_inter_router_port,
'linkCapacity' : link_cap,
'stripAnnotations' : 'no'
}
)
]
C_config = [
('listener',
{'port' : C_client_port,
'role' : 'normal',
'linkCapacity' : link_cap,
'stripAnnotations' : 'no'
}
),
('listener',
{'role' : 'inter-router',
'port' : C_inter_router_port,
'linkCapacity' : link_cap,
'stripAnnotations' : 'no'
}
),
('connector',
{'name' : 'CB_connector',
'role' : 'inter-router',
'port' : B_inter_router_port,
'linkCapacity' : link_cap,
'stripAnnotations' : 'no'
}
)
]
router('A', A_config)
router('B', cls.B_config)
router('C', C_config)
router_A = cls.routers[0]
router_B = cls.routers[1]
router_C = cls.routers[2]
router_A.wait_router_connected('B')
router_A.wait_router_connected('C')
cls.client_addrs = (router_A.addresses[0],
router_B.addresses[0],
router_C.addresses[0]
)
def test_priority(self):
name = 'test_01'
test = Priority(self,
name,
self.client_addrs,
"speedy/01",
self.magic_message_priority,
self.magic_address_priority
)
test.run()
self.assertIsNone(test.error)
# ================================================================
# Tests
# ================================================================
class Priority (MessagingHandler):
# In this test we will have a linear network of 3 routers.
# The sender attaches at A, and the receiver at C.
#
# receiver <--- C <--- B <--- A <--- sender
#
# Priority -- whether message or address -- only operates
# on inter-router links. The links from A to B will show
# address-priority overriding message-priority. When a
# router does not set any message priority, then messages
# are routed acording to their intrinsic priority which
# was assigned by the sender. This will be shown by the
# connection from router B to C.
#
# The address that the clients use has a prefix of 'speedy'.
# Router A will assign a priority of magic_addr_priority to all
# 'speedy' addresses.
# No other routers will assign any address priorities.
#
# The sending client will assign a priority of magic_msg_priority
# to all the messages it sends.
#
# So what should happen is:
#
# 1. at router A, all the 'speedy' messages go out with
# magic_addr_priority, because addr priority takes precedence.
#
# 2. at router B, they all go out with magic_msg_priority,
# because that router has not assigned any addr priority,
# so the intrinsic message priorities are used.
#
# 3. Nothing special happens at router C, because it is sending
# the messages out over a connection to an endpoint, which
# is not an inter-router connection.
#
# In this test we will send a known number of messages and
# then send management queries to A and B to learn at what
# priorities the messages actually travelled.
def __init__(self, parent, test_name, client_addrs, destination, magic_msg_priority, magic_addr_priority):
super(Priority, self).__init__(prefetch=10)
self.parent = parent
self.client_addrs = client_addrs
self.dest = destination
self.magic_msg_priority = magic_msg_priority
self.magic_addr_priority = magic_addr_priority
self.error = None
self.sender = None
self.receiver = None
self.send_timer = None
self.n_messages = 100
self.n_sent = 0
self.send_conn = None
self.recv_conn = None
self.n_received = 0
self.reactor = None
self.timer_count = 0
self.sent_queries = False
self.finishing = False
self.goals = 0
self.n_goals = 2
self.connections = list()
self.A_addr = self.client_addrs[0]
self.B_addr = self.client_addrs[1]
self.C_addr = self.client_addrs[2]
self.routers = {
'A' : dict(),
'B' : dict()
}
# Shut down everything and exit.
def bail(self, text):
self.send_timer.cancel()
self.finishing = True
self.error = text
for conn in self.connections :
conn.close()
def make_connection(self, event, addr) :
cnx = event.container.connect(addr)
self.connections.append(cnx)
return cnx
def on_start(self, event):
self.reactor = event.reactor
self.send_conn = self.make_connection(event, self.A_addr)
self.recv_conn = self.make_connection(event, self.C_addr)
self.sender = event.container.create_sender(self.send_conn, self.dest)
self.receiver = event.container.create_receiver(self.recv_conn, self.dest)
self.receiver.flow(100)
self.routers['A']['mgmt_conn'] = self.make_connection(event, self.A_addr)
self.routers['A']['mgmt_receiver'] = event.container.create_receiver(self.routers['A']['mgmt_conn'], dynamic=True)
self.routers['A']['mgmt_sender'] = event.container.create_sender(self.routers['A']['mgmt_conn'], "$management")
self.routers['B']['mgmt_conn'] = self.make_connection(event, self.B_addr)
self.routers['B']['mgmt_receiver'] = event.container.create_receiver(self.routers['B']['mgmt_conn'], dynamic=True)
self.routers['B']['mgmt_sender'] = event.container.create_sender(self.routers['B']['mgmt_conn'], "$management")
self.send_timer = event.reactor.schedule(2, Timeout(self, "send"))
def timeout(self, name):
if name == 'send':
self.send()
if not self.sent_queries :
self.test_timer = self.reactor.schedule(1, Timeout(self, "send"))
def on_link_opened(self, event) :
# A mgmt link has opened. Create its management helper.
# ( Now we know the address that the management helper should use as
# the "reply-to" in its management message. )
if event.receiver == self.routers['A']['mgmt_receiver'] :
event.receiver.flow(1000)
self.routers['A']['mgmt_helper'] = ManagementMessageHelper(event.receiver.remote_source.address)
elif event.receiver == self.routers['B']['mgmt_receiver'] :
event.receiver.flow(1000)
self.routers['B']['mgmt_helper'] = ManagementMessageHelper(event.receiver.remote_source.address)
def send(self) :
if self.sender.credit <= 0:
self.receiver.flow(100)
return
# First send the payload messages.
if self.n_sent < self.n_messages :
for i in range(50) :
msg = Message(body=self.n_sent)
msg.priority = 3
self.sender.send(msg)
self.n_sent += 1
# Then send the management queries.
# But only send them once.
elif not self.sent_queries :
# Query router A.
mgmt_helper = self.routers['A']['mgmt_helper']
mgmt_sender = self.routers['A']['mgmt_sender']
msg = mgmt_helper.make_router_link_query()
mgmt_sender.send(msg)
# Query router B.
mgmt_helper = self.routers['B']['mgmt_helper']
mgmt_sender = self.routers['B']['mgmt_sender']
msg = mgmt_helper.make_router_link_query()
mgmt_sender.send(msg)
self.sent_queries = True
# This test has two goals: get the response from router A
# and from router B. As they come in, we check them. If
# the response is unsatisfactory we bail out
def goal_satisfied(self) :
self.goals += 1
if self.goals >= self.n_goals :
self.bail(None)
def on_message(self, event) :
# Don't take any more messages if 'bail' has been called.
if self.finishing :
return
msg = event.message
if event.receiver == self.routers['A']['mgmt_receiver'] :
# Router A has only one set of outgoing links, and it
# has set a priority for our target address. We should
# see all the messages we sent go out with that priority.
magic = self.magic_addr_priority
if 'results' in msg.body :
results = msg.body['results']
# I do not want to trust the possibility that the
# results will be returned to me in priority-order.
# Instead, I explicitly asked for the link priority
# in the management query that was sent. Now I will
# loop through all the results, and look for the one
# with the desired priority.
for i in range(len(results)) :
result = results[i]
role = result[0]
dir = result[1]
message_count = result[2]
priority = result[3]
if role == "inter-router" and dir == "out" and priority == magic :
if message_count >= self.n_messages :
self.goal_satisfied()
return
else :
self.bail("Router A priority %d had %d messages instead of %d." %
(magic, message_count, self.n_messages))
return
elif event.receiver == self.routers['B']['mgmt_receiver'] :
# Router B has two sets of outgoing links, and it has not
# set a priority for the target address. We should see all
# of our messages going out over the message-intrinsic
# priority that the sending client used -- one one of those
# two sets of outgoing links.
magic = self.magic_msg_priority
if 'results' in msg.body :
message_counts = list()
results = msg.body['results']
for i in range(len(results)) :
result = results[i]
role = result[0]
dir = result[1]
message_count = result[2]
priority = result[3]
if role == "inter-router" and dir == "out" :
if priority == magic :
message_counts.append(message_count)
if self.n_messages in message_counts :
self.goal_satisfied()
else :
self.bail("No outgoing link on router B had %d messages at priority 3" % self.n_messages)
else :
# This is a payload message -- not management. Just count it.
self.n_received += 1
def run(self):
Container(self).run()
if __name__ == '__main__':
unittest.main(main_module())
| |
# -*- encoding: utf-8 -*-
from django.test import TestCase
from caffe.models import Caffe
from employees.models import Employee
from .forms import (CategoryForm, FullProductForm, ProductForm, ReportForm,
UnitForm)
from .models import Category, Product, Unit
class CategoryFormTest(TestCase):
"""Tests of CategoryForm."""
def setUp(self):
"""Test data setup."""
self.caffe = Caffe.objects.create(
name='kafo',
city='Gliwice',
street='Wieczorka',
house_number='14',
postal_code='44-100'
)
self.filtry = Caffe.objects.create(
name='filtry',
city='Warszawa',
street='Filry',
house_number='14',
postal_code='44-100'
)
def test_category(self):
"""Check validation."""
form_incorrect = CategoryForm(
{'name': ''},
caffe=self.caffe
)
self.assertFalse(form_incorrect.is_valid())
form_incorrect = CategoryForm(
{'no_such': 'field'},
caffe=self.caffe
)
self.assertFalse(form_incorrect.is_valid())
form_correct = CategoryForm(
{'name': 'Category is correct'},
caffe=self.caffe
)
self.assertTrue(form_correct.is_valid())
form_correct = CategoryForm(
{'name': 'This.is.correct123!@#$%"^&"*():?>M'},
caffe=self.caffe
)
self.assertTrue(form_correct.is_valid())
# test no caffe
with self.assertRaises(Exception):
CategoryForm({'name': "category"})
def test_name_validation(self):
"""Check name validation."""
Category.objects.create(name='Correct', caffe=self.filtry)
form_correct = CategoryForm(
{'name': 'Correct'},
caffe=self.caffe
)
self.assertTrue(form_correct.is_valid())
# invalid name
Category.objects.create(name='Correct', caffe=self.caffe)
form_incorrect = CategoryForm(
{'name': 'Correct'},
caffe=self.caffe
)
self.assertFalse(form_incorrect.is_valid())
class UnitFormTest(TestCase):
"""UnitForm tets."""
def setUp(self):
"""Test data setup."""
self.caffe = Caffe.objects.create(
name='kafo',
city='Gliwice',
street='Wieczorka',
house_number='14',
postal_code='44-100'
)
self.filtry = Caffe.objects.create(
name='filtry',
city='Warszawa',
street='Filry',
house_number='14',
postal_code='44-100'
)
def test_unit_form(self):
"""Validation tests."""
form_correct = UnitForm(
{"name": "correct"},
caffe=self.caffe
)
self.assertTrue(form_correct.is_valid())
form_incorrect = UnitForm(
{'no_such': 'field'},
caffe=self.caffe
)
# should not pass with not-existent
self.assertFalse(form_incorrect.is_valid())
form_correct = UnitForm(
{'name': 'Category is correct'},
caffe=self.caffe
)
self.assertTrue(form_correct.is_valid())
form_correct = UnitForm(
{'name': 'This.is.correct123!@#$%"^&"*():?>M'},
caffe=self.caffe
)
# should pass with ascii characters
self.assertTrue(form_correct.is_valid())
# test no caffe
with self.assertRaises(Exception):
UnitForm({'name': 'Unit'})
def test_unit_same_name(self):
"""Check if Unit with same name is properly handled."""
Unit.objects.create(name='Correct', caffe=self.filtry)
form_correct = UnitForm(
{'name': 'Correct'},
caffe=self.caffe
)
self.assertTrue(form_correct.is_valid())
# invalid name
Unit.objects.create(name='Correct', caffe=self.caffe)
form_incorrect = UnitForm(
{'name': 'Correct'},
caffe=self.caffe
)
self.assertFalse(form_incorrect.is_valid())
class ProductFormTest(TestCase):
"""ProductForm tests."""
def setUp(self):
"""Test data setup."""
self.caffe = Caffe.objects.create(
name='kafo',
city='Gliwice',
street='Wieczorka',
house_number='14',
postal_code='44-100'
)
self.filtry = Caffe.objects.create(
name='filtry',
city='Warszawa',
street='Filry',
house_number='14',
postal_code='44-100'
)
self.cat_first = Category.objects.create(
name="first",
caffe=self.caffe
)
self.cat_second = Category.objects.create(
name="second",
caffe=self.caffe
)
self.cat_first_f = Category.objects.create(
name="second",
caffe=self.filtry
)
self.gram = Unit.objects.create(name="gram", caffe=self.caffe)
self.liter = Unit.objects.create(name="liter", caffe=self.caffe)
self.liter_f = Unit.objects.create(name="liter", caffe=self.filtry)
def test_product_form(self):
"""Check validation."""
form_correct = ProductForm(
{
'name': 'Correct',
'category': self.cat_first.id,
'unit': self.gram.id
},
caffe=self.caffe
)
self.assertTrue(form_correct.is_valid())
form_correct = ProductForm(
{
'name': 'Correct',
'category': self.cat_second.id,
'unit': self.liter.id
},
caffe=self.caffe
)
self.assertTrue(form_correct.is_valid())
form_incorrect = ProductForm(
{
'name': "This is to long name!@dg#d!%#@fd%$f1c@%!#$!"
"#!@$#@%@#%$@%!#FaSDCARADASFVXT#Q%#$@!$!@$"
"#FWB THRYu%#$^u6uyj6#$Tga5%@4rFEtwGEQWEFZ"
"eQEQWvgrtuT(;p8O8olkTU8Uyhdasa213r63634e5",
'category': self.cat_second.id,
'unit': self.liter.id
},
caffe=self.caffe
)
# too long name
self.assertFalse(form_incorrect.is_valid())
form_incorrect = ProductForm(
{
'name': '',
'category': self.cat_second.id,
'unit': self.liter.id
},
caffe=self.caffe
)
self.assertFalse(form_incorrect.is_valid())
form_incorrect = ProductForm(
{'name': 'name', 'category': -1, 'unit': self.liter.id},
caffe=self.caffe
)
self.assertFalse(form_incorrect.is_valid())
form_incorrect = ProductForm(
{'name': 'name', 'category': self.cat_second.id, 'unit': 100},
caffe=self.caffe
)
self.assertFalse(form_incorrect.is_valid())
self.cat_second.delete()
form_incorrect = ProductForm(
{
'name': 'Correct',
'category': self.cat_second.id,
'unit': self.liter.id
},
caffe=self.caffe
)
self.assertFalse(form_incorrect.is_valid())
self.gram.delete()
form_incorrect = ProductForm(
{
'name': 'Correct',
'category': self.cat_first.id,
'unit': self.gram.id
},
caffe=self.caffe
)
self.assertFalse(form_incorrect.is_valid())
# test no caffe
with self.assertRaises(Exception):
ProductForm({
'name': 'Pr',
'category': self.cat_second.id,
'unit': self.liter.id
})
def test_product_same_name(self):
"""Check if product with same name is properly handled."""
Product.objects.create(
name='Correct',
category=self.cat_first_f,
unit=self.liter_f,
caffe=self.filtry
)
form_correct = ProductForm(
{
'name': 'Correct',
'category': self.cat_second.id,
'unit': self.liter.id
},
caffe=self.caffe
)
self.assertTrue(form_correct.is_valid())
Product.objects.create(
name='Correct',
category=self.cat_second,
unit=self.liter,
caffe=self.caffe
)
form_incorrect = ProductForm(
{
'name': 'Correct',
'category': self.cat_second.id,
'unit': self.liter.id
},
caffe=self.caffe
)
self.assertFalse(form_incorrect.is_valid())
class FullProductFormTest(TestCase):
"""FullProductForm tests."""
def setUp(self):
"""Initialize data for further FullProductForm tests."""
self.caffe = Caffe.objects.create(
name='kafo',
city='Gliwice',
street='Wieczorka',
house_number='14',
postal_code='44-100'
)
first_cat = Category.objects.create(name="first", caffe=self.caffe)
second_cat = Category.objects.create(name="second", caffe=self.caffe)
gram = Unit.objects.create(name="gram", caffe=self.caffe)
liter = Unit.objects.create(name="liter", caffe=self.caffe)
Product.objects.create(
name="product1",
category=first_cat,
unit=gram,
caffe=self.caffe
)
Product.objects.create(
name="product2",
category=second_cat,
unit=liter,
caffe=self.caffe
)
def test_full_product(self):
"""Check validation and adding/deleting products."""
product1 = Product.objects.get(name="product1")
product2 = Product.objects.get(name="product2")
form_correct = FullProductForm(
{'product': product1.id, 'amount': 10},
caffe=self.caffe
)
self.assertTrue(form_correct.is_valid())
form_correct = FullProductForm(
{'product': product2.id, 'amount': 10000000},
caffe=self.caffe
)
self.assertTrue(form_correct.is_valid())
product2.delete()
form_incorrect = FullProductForm(
{'product': product2.id, 'amount': 10},
caffe=self.caffe
)
# should not pass with deleted product
self.assertFalse(form_incorrect.is_valid())
form_incorrect = FullProductForm(
{'product': '', 'amount': 10},
caffe=self.caffe
)
self.assertFalse(form_incorrect.is_valid())
form_incorrect = FullProductForm(
{'product': product1.id, 'amount': -10},
caffe=self.caffe
)
# amount should not be negative
self.assertFalse(form_incorrect.is_valid())
form_incorrect = FullProductForm(
{'no_such': 'field'},
caffe=self.caffe
)
self.assertFalse(form_incorrect.is_valid())
form_incorrect = FullProductForm(
{'product': 1231, 'amount': 100},
caffe=self.caffe
)
# not existing product id
self.assertFalse(form_incorrect.is_valid())
# test no caffe
with self.assertRaises(Exception):
FullProductForm({'product': product1.id, 'amount': 100})
class ReportFormTest(TestCase):
"""Report tests."""
def setUp(self):
"""Initialize data for further Report tests."""
self.caffe = Caffe.objects.create(
name='kafo',
city='Gliwice',
street='Wieczorka',
house_number='14',
postal_code='44-100'
)
self.user = Employee.objects.create(
username='admin',
password='admin',
caffe=self.caffe
)
def test_report(self):
"""Check validation on ReportForm."""
# check no caffe
with self.assertRaises(Exception):
ReportForm({}, creator=self.user)
# check no employee
with self.assertRaises(Exception):
ReportForm({}, caffe=self.caffe)
# validate okay
form = ReportForm({}, caffe=self.caffe, creator=self.user)
self.assertTrue(form.is_valid())
| |
#!/usr/bin/python -u
# Debug
# 1 - print interesting stuff
# 2 - print counters as written to stats file
# 4 - print lines as read from log
# 8 - cat vs tail logfile and when done exist, sleeping 0.01 secs
# between samples
# source /opt/stack/venvs/openstack/bin/activate
# the api log is an apache log and only contains countable records, BUT...
# Ceilometer log record counting must contain (after making sure DEBUG record)
# one of the following patterns:
# agent-notification: pipeline
# alarm-notifier: alarm.service
# alarm-evaluator: alarm.service
# collector: dispatcher.database (plus details on type)
import os
import re
import sys
import shlex
import signal
import socket
import subprocess
import syslog
import time
from optparse import OptionParser, OptionGroup
counters = {}
lastsize = 0
counted = False
taildone = False
parser = OptionParser()
parser.add_option('-d', dest='debug', default='0')
parser.add_option('-D', dest='daemon', help='run as daemon', action='store_true')
parser.add_option('-t', dest='type', default='')
try:
(options, args) = parser.parse_args(sys.argv[1:])
except:
print 'invalid option'
sys.exit(1)
debug = int(options.debug)
type = options.type
if type == '':
print 'required: -l'
sys.exit(1)
logdir = '/var/log/ceilometer'
if type == 'agent-notification':
match = 'pipeline'
logtype = 1
logname = '/var/log/ceilometer/ceilometer-agent-notification.log'
extract = 0
elif type == 'alarm-notifier':
match = 'alarm.service'
logtype = 1
logname = '/var/log/ceilometer/ceilometer-alarm-notifier.log'
extract = 0
elif type == 'alarm-evaluator':
match = 'alarm.service'
logtype = 1
logname = '/var/log/ceilometer/ceilometer-alarm-evaluator.log'
extract = 0
elif type == 'collector':
match = 'dispatcher'
logtype = 1
logname = '/var/log/ceilometer/ceilometer-collector.log'
extract = 8
elif type == 'api':
match = ''
logtype = 2
logname = '/var/log/apache2/ceilometer_access.log'
extract = 5
else:
print "Unknownm type:", type
sys.exit(1)
if debug & 1:
print "LogFile: %s Match: %s" % (logname, match)
# stolen from swift-statstee
def logmsg(severity, text):
timestamp = time.strftime("%Y%m", time.gmtime())
logfile = '%s/%s-%s-ceiltail.log' % \
(logdir, time.strftime("%Y%m", time.gmtime()),
socket.gethostname().split('.')[0])
msg = '%s %s %s' % \
(time.strftime("%Y%m%d-%H:%M:%S", time.gmtime()), severity, text)
if debug or re.match('[EF]', severity) and not options.daemon:
print text
try:
log = open(logfile, 'a+')
log.write('%s\n' % msg)
log.close()
except:
print "Couldn't open", logfile
syslog.syslog("couldn't open %s for appending" % logfile)
sys.exit()
if re.match('E|F', severity):
syslog.syslog(text)
if severity == 'F':
sys.exit()
def error(text):
print text
sys.exit(1)
def run_as_daemon():
if debug != 0:
error("No debugging in daemon mode")
myname = os.path.basename(__file__)
runlog = '/var/run/ceiltail-%s.pid' % type
if os.path.exists(runlog):
f = open(runlog, 'r')
pid = f.read()[:-1]
f.close()
# cmdline contains the command that started us BUT spaces have been
# changed to nulls. Since the command should contain -t type,
# we can just see if our command line contains that string
proc_path = '/proc/%s/cmdline' % pid
if os.path.exists(proc_path):
f = open('/proc/%s/cmdline' % pid)
pname = f.read()[:-1]
f.close()
if re.search('%s' % type, pname):
error("a daemonized %s already running" % myname)
# there seems to be some differing opinions of whether or
# not to disable I/O right before we fork/exit, but it
# certainly can't hurt. I also discovered I need to use
# dup2() as 3 opens cause hangs over ssh?!? no explantion
# was ever found
sys.stdin = open('/dev/null', 'r+')
os.dup2(0, 1) # standard output (1)
os.dup2(0, 2) # standard error (2)
# for a new copy and exit the parent
pid = os.fork()
if pid > 0:
sys.exit()
# decouple from parent environent
os.chdir('/')
os.setsid()
os.umask(0)
# and disable all I/O
sys.stdin = open('/dev/null', 'r+')
os.dup2(0, 1) # standard output (1)
os.dup2(0, 2) # standard error (2)
# finally write our new PID to the run file
f = open(runlog, 'w')
f.write('%s\n' % os.getpid())
f.close()
def alarm(signum, frame):
global counted, lastsize, taildone
size = os.path.getsize(logname)
if size < lastsize:
taildone = True
logmsg('W', 'Log rolled: %s' % logname)
os.kill(tail.pid, signal.SIGTERM)
lastsize = size
if counted:
if debug & 2:
print "Counters:", counters
statslog.seek(0)
statslog.write('%s\n' % counters)
statslog.flush()
counted = False
if options.daemon:
if debug & 1:
print "Starting daemon..."
run_as_daemon()
logmsg('I', "ceiltail beginning execution: %s" % type)
statsfile = '/var/log/ceilometer/stats-%s' % type
# NOTE - using this mechanism if file rolls and size doesn't change
# (most unlikely), we won't know it.
devnull = open(os.devnull, 'w')
command = 'tail -n1 -f %s' % logname
if debug & 8:
command = 'cat %s' % logname
if os.path.exists(statsfile) and os.path.getsize(statsfile) > 0:
statslog = open(statsfile, 'r+')
line = statslog.readline()
line = re.sub('{|}', '', line)
for stat in re.split(', ', line):
name, value = stat.split(':')
name = re.sub("'", '', name)
counters[name] = int(value)
else:
statslog = open(statsfile, 'w')
secs = interval = 1
signal.signal(signal.SIGALRM, alarm)
signal.setitimer(signal.ITIMER_REAL, secs, interval)
while 1:
if debug & 1:
print "Command:", command
tail = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE, stderr=devnull)
taildone = False
while not taildone:
line = tail.stdout.readline()[:-1]
# ignore blank records unless we're doing a cat in which case
# we just want to quit
if line == '':
if debug & 8:
sys.exit()
continue
fields = line.split(' ')
# ceilometer logs contain a lot we don't care about
if logtype == 1:
try:
if fields[3] != 'DEBUG' or fields[3] == 'ERROR':
continue
except:
logmsg('E', 'Malformed Record [%s]: >%s<' % (type, line))
continue
if not re.search(match, fields[4]):
continue
if debug & 4:
print line
# if a field to extract, do so we can count it by name,
# otherwise use generic name 'count'
if extract:
ltype = fields[extract]
ltype = re.sub('"', '', ltype)
else:
ltype = 'count'
if debug & 8:
time.sleep(0.01)
ltype.translate(None, ":")
counted = True
if ltype not in counters:
counters[ltype] = 0
counters[ltype] += 1
| |
# engine/default.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Default implementations of per-dialect sqlalchemy.engine classes.
These are semi-private implementation classes which are only of importance
to database dialect authors; dialects will usually use the classes here
as the base class for their own corresponding classes.
"""
import re
import random
from . import reflection, interfaces, result
from ..sql import compiler, expression, schema
from .. import types as sqltypes
from .. import exc, util, pool, processors
import codecs
import weakref
from .. import event
AUTOCOMMIT_REGEXP = re.compile(
r'\s*(?:UPDATE|INSERT|CREATE|DELETE|DROP|ALTER)',
re.I | re.UNICODE)
# When we're handed literal SQL, ensure it's a SELECT query
SERVER_SIDE_CURSOR_RE = re.compile(
r'\s*SELECT',
re.I | re.UNICODE)
class DefaultDialect(interfaces.Dialect):
"""Default implementation of Dialect"""
statement_compiler = compiler.SQLCompiler
ddl_compiler = compiler.DDLCompiler
type_compiler = compiler.GenericTypeCompiler
preparer = compiler.IdentifierPreparer
supports_alter = True
# the first value we'd get for an autoincrement
# column.
default_sequence_base = 1
# most DBAPIs happy with this for execute().
# not cx_oracle.
execute_sequence_format = tuple
supports_views = True
supports_sequences = False
sequences_optional = False
preexecute_autoincrement_sequences = False
postfetch_lastrowid = True
implicit_returning = False
supports_right_nested_joins = True
supports_native_enum = False
supports_native_boolean = False
supports_simple_order_by_label = True
engine_config_types = util.immutabledict([
('convert_unicode', util.bool_or_str('force')),
('pool_timeout', util.asint),
('echo', util.bool_or_str('debug')),
('echo_pool', util.bool_or_str('debug')),
('pool_recycle', util.asint),
('pool_size', util.asint),
('max_overflow', util.asint),
('pool_threadlocal', util.asbool),
])
# if the NUMERIC type
# returns decimal.Decimal.
# *not* the FLOAT type however.
supports_native_decimal = False
if util.py3k:
supports_unicode_statements = True
supports_unicode_binds = True
returns_unicode_strings = True
description_encoding = None
else:
supports_unicode_statements = False
supports_unicode_binds = False
returns_unicode_strings = False
description_encoding = 'use_encoding'
name = 'default'
# length at which to truncate
# any identifier.
max_identifier_length = 9999
# length at which to truncate
# the name of an index.
# Usually None to indicate
# 'use max_identifier_length'.
# thanks to MySQL, sigh
max_index_name_length = None
supports_sane_rowcount = True
supports_sane_multi_rowcount = True
dbapi_type_map = {}
colspecs = {}
default_paramstyle = 'named'
supports_default_values = False
supports_empty_insert = True
supports_multivalues_insert = False
supports_server_side_cursors = False
server_version_info = None
construct_arguments = None
"""Optional set of argument specifiers for various SQLAlchemy
constructs, typically schema items.
To implement, establish as a series of tuples, as in::
construct_arguments = [
(schema.Index, {
"using": False,
"where": None,
"ops": None
})
]
If the above construct is established on the PostgreSQL dialect,
the :class:`.Index` construct will now accept the keyword arguments
``postgresql_using``, ``postgresql_where``, nad ``postgresql_ops``.
Any other argument specified to the constructor of :class:`.Index`
which is prefixed with ``postgresql_`` will raise :class:`.ArgumentError`.
A dialect which does not include a ``construct_arguments`` member will
not participate in the argument validation system. For such a dialect,
any argument name is accepted by all participating constructs, within
the namespace of arguments prefixed with that dialect name. The rationale
here is so that third-party dialects that haven't yet implemented this
feature continue to function in the old way.
.. versionadded:: 0.9.2
.. seealso::
:class:`.DialectKWArgs` - implementing base class which consumes
:attr:`.DefaultDialect.construct_arguments`
"""
# indicates symbol names are
# UPPERCASEd if they are case insensitive
# within the database.
# if this is True, the methods normalize_name()
# and denormalize_name() must be provided.
requires_name_normalize = False
reflection_options = ()
dbapi_exception_translation_map = util.immutabledict()
"""mapping used in the extremely unusual case that a DBAPI's
published exceptions don't actually have the __name__ that they
are linked towards.
.. versionadded:: 1.0.5
"""
def __init__(self, convert_unicode=False,
encoding='utf-8', paramstyle=None, dbapi=None,
implicit_returning=None,
supports_right_nested_joins=None,
case_sensitive=True,
supports_native_boolean=None,
label_length=None, **kwargs):
if not getattr(self, 'ported_sqla_06', True):
util.warn(
"The %s dialect is not yet ported to the 0.6 format" %
self.name)
self.convert_unicode = convert_unicode
self.encoding = encoding
self.positional = False
self._ischema = None
self.dbapi = dbapi
if paramstyle is not None:
self.paramstyle = paramstyle
elif self.dbapi is not None:
self.paramstyle = self.dbapi.paramstyle
else:
self.paramstyle = self.default_paramstyle
if implicit_returning is not None:
self.implicit_returning = implicit_returning
self.positional = self.paramstyle in ('qmark', 'format', 'numeric')
self.identifier_preparer = self.preparer(self)
self.type_compiler = self.type_compiler(self)
if supports_right_nested_joins is not None:
self.supports_right_nested_joins = supports_right_nested_joins
if supports_native_boolean is not None:
self.supports_native_boolean = supports_native_boolean
self.case_sensitive = case_sensitive
if label_length and label_length > self.max_identifier_length:
raise exc.ArgumentError(
"Label length of %d is greater than this dialect's"
" maximum identifier length of %d" %
(label_length, self.max_identifier_length))
self.label_length = label_length
if self.description_encoding == 'use_encoding':
self._description_decoder = \
processors.to_unicode_processor_factory(
encoding
)
elif self.description_encoding is not None:
self._description_decoder = \
processors.to_unicode_processor_factory(
self.description_encoding
)
self._encoder = codecs.getencoder(self.encoding)
self._decoder = processors.to_unicode_processor_factory(self.encoding)
@util.memoized_property
def _type_memos(self):
return weakref.WeakKeyDictionary()
@property
def dialect_description(self):
return self.name + "+" + self.driver
@classmethod
def get_pool_class(cls, url):
return getattr(cls, 'poolclass', pool.QueuePool)
def initialize(self, connection):
try:
self.server_version_info = \
self._get_server_version_info(connection)
except NotImplementedError:
self.server_version_info = None
try:
self.default_schema_name = \
self._get_default_schema_name(connection)
except NotImplementedError:
self.default_schema_name = None
try:
self.default_isolation_level = \
self.get_isolation_level(connection.connection)
except NotImplementedError:
self.default_isolation_level = None
self.returns_unicode_strings = self._check_unicode_returns(connection)
if self.description_encoding is not None and \
self._check_unicode_description(connection):
self._description_decoder = self.description_encoding = None
self.do_rollback(connection.connection)
def on_connect(self):
"""return a callable which sets up a newly created DBAPI connection.
This is used to set dialect-wide per-connection options such as
isolation modes, unicode modes, etc.
If a callable is returned, it will be assembled into a pool listener
that receives the direct DBAPI connection, with all wrappers removed.
If None is returned, no listener will be generated.
"""
return None
def _check_unicode_returns(self, connection, additional_tests=None):
if util.py2k and not self.supports_unicode_statements:
cast_to = util.binary_type
else:
cast_to = util.text_type
if self.positional:
parameters = self.execute_sequence_format()
else:
parameters = {}
def check_unicode(test):
statement = cast_to(
expression.select([test]).compile(dialect=self))
try:
cursor = connection.connection.cursor()
connection._cursor_execute(cursor, statement, parameters)
row = cursor.fetchone()
cursor.close()
except exc.DBAPIError as de:
# note that _cursor_execute() will have closed the cursor
# if an exception is thrown.
util.warn("Exception attempting to "
"detect unicode returns: %r" % de)
return False
else:
return isinstance(row[0], util.text_type)
tests = [
# detect plain VARCHAR
expression.cast(
expression.literal_column("'test plain returns'"),
sqltypes.VARCHAR(60)
),
# detect if there's an NVARCHAR type with different behavior
# available
expression.cast(
expression.literal_column("'test unicode returns'"),
sqltypes.Unicode(60)
),
]
if additional_tests:
tests += additional_tests
results = set([check_unicode(test) for test in tests])
if results.issuperset([True, False]):
return "conditional"
else:
return results == set([True])
def _check_unicode_description(self, connection):
# all DBAPIs on Py2K return cursor.description as encoded,
# until pypy2.1beta2 with sqlite, so let's just check it -
# it's likely others will start doing this too in Py2k.
if util.py2k and not self.supports_unicode_statements:
cast_to = util.binary_type
else:
cast_to = util.text_type
cursor = connection.connection.cursor()
try:
cursor.execute(
cast_to(
expression.select([
expression.literal_column("'x'").label("some_label")
]).compile(dialect=self)
)
)
return isinstance(cursor.description[0][0], util.text_type)
finally:
cursor.close()
def type_descriptor(self, typeobj):
"""Provide a database-specific :class:`.TypeEngine` object, given
the generic object which comes from the types module.
This method looks for a dictionary called
``colspecs`` as a class or instance-level variable,
and passes on to :func:`.types.adapt_type`.
"""
return sqltypes.adapt_type(typeobj, self.colspecs)
def reflecttable(
self, connection, table, include_columns, exclude_columns, **opts):
insp = reflection.Inspector.from_engine(connection)
return insp.reflecttable(
table, include_columns, exclude_columns, **opts)
def get_pk_constraint(self, conn, table_name, schema=None, **kw):
"""Compatibility method, adapts the result of get_primary_keys()
for those dialects which don't implement get_pk_constraint().
"""
return {
'constrained_columns':
self.get_primary_keys(conn, table_name,
schema=schema, **kw)
}
def validate_identifier(self, ident):
if len(ident) > self.max_identifier_length:
raise exc.IdentifierError(
"Identifier '%s' exceeds maximum length of %d characters" %
(ident, self.max_identifier_length)
)
def connect(self, *cargs, **cparams):
return self.dbapi.connect(*cargs, **cparams)
def create_connect_args(self, url):
opts = url.translate_connect_args()
opts.update(url.query)
return [[], opts]
def set_engine_execution_options(self, engine, opts):
if 'isolation_level' in opts:
isolation_level = opts['isolation_level']
@event.listens_for(engine, "engine_connect")
def set_isolation(connection, branch):
if not branch:
self._set_connection_isolation(connection, isolation_level)
if 'schema_translate_map' in opts:
getter = schema._schema_getter(opts['schema_translate_map'])
engine.schema_for_object = getter
@event.listens_for(engine, "engine_connect")
def set_schema_translate_map(connection, branch):
connection.schema_for_object = getter
def set_connection_execution_options(self, connection, opts):
if 'isolation_level' in opts:
self._set_connection_isolation(connection, opts['isolation_level'])
if 'schema_translate_map' in opts:
getter = schema._schema_getter(opts['schema_translate_map'])
connection.schema_for_object = getter
def _set_connection_isolation(self, connection, level):
if connection.in_transaction():
util.warn(
"Connection is already established with a Transaction; "
"setting isolation_level may implicitly rollback or commit "
"the existing transaction, or have no effect until "
"next transaction")
self.set_isolation_level(connection.connection, level)
connection.connection._connection_record.\
finalize_callback.append(self.reset_isolation_level)
def do_begin(self, dbapi_connection):
pass
def do_rollback(self, dbapi_connection):
dbapi_connection.rollback()
def do_commit(self, dbapi_connection):
dbapi_connection.commit()
def do_close(self, dbapi_connection):
dbapi_connection.close()
def create_xid(self):
"""Create a random two-phase transaction ID.
This id will be passed to do_begin_twophase(), do_rollback_twophase(),
do_commit_twophase(). Its format is unspecified.
"""
return "_sa_%032x" % random.randint(0, 2 ** 128)
def do_savepoint(self, connection, name):
connection.execute(expression.SavepointClause(name))
def do_rollback_to_savepoint(self, connection, name):
connection.execute(expression.RollbackToSavepointClause(name))
def do_release_savepoint(self, connection, name):
connection.execute(expression.ReleaseSavepointClause(name))
def do_executemany(self, cursor, statement, parameters, context=None):
cursor.executemany(statement, parameters)
def do_execute(self, cursor, statement, parameters, context=None):
cursor.execute(statement, parameters)
def do_execute_no_params(self, cursor, statement, context=None):
cursor.execute(statement)
def is_disconnect(self, e, connection, cursor):
return False
def reset_isolation_level(self, dbapi_conn):
# default_isolation_level is read from the first connection
# after the initial set of 'isolation_level', if any, so is
# the configured default of this dialect.
self.set_isolation_level(dbapi_conn, self.default_isolation_level)
class StrCompileDialect(DefaultDialect):
statement_compiler = compiler.StrSQLCompiler
ddl_compiler = compiler.DDLCompiler
type_compiler = compiler.StrSQLTypeCompiler
preparer = compiler.IdentifierPreparer
supports_sequences = True
sequences_optional = True
preexecute_autoincrement_sequences = False
implicit_returning = False
supports_native_boolean = True
supports_simple_order_by_label = True
class DefaultExecutionContext(interfaces.ExecutionContext):
isinsert = False
isupdate = False
isdelete = False
is_crud = False
is_text = False
isddl = False
executemany = False
compiled = None
statement = None
result_column_struct = None
returned_defaults = None
_is_implicit_returning = False
_is_explicit_returning = False
# a hook for SQLite's translation of
# result column names
_translate_colname = None
@classmethod
def _init_ddl(cls, dialect, connection, dbapi_connection, compiled_ddl):
"""Initialize execution context for a DDLElement construct."""
self = cls.__new__(cls)
self.root_connection = connection
self._dbapi_connection = dbapi_connection
self.dialect = connection.dialect
self.compiled = compiled = compiled_ddl
self.isddl = True
self.execution_options = compiled.execution_options
if connection._execution_options:
self.execution_options = dict(self.execution_options)
self.execution_options.update(connection._execution_options)
if not dialect.supports_unicode_statements:
self.unicode_statement = util.text_type(compiled)
self.statement = dialect._encoder(self.unicode_statement)[0]
else:
self.statement = self.unicode_statement = util.text_type(compiled)
self.cursor = self.create_cursor()
self.compiled_parameters = []
if dialect.positional:
self.parameters = [dialect.execute_sequence_format()]
else:
self.parameters = [{}]
return self
@classmethod
def _init_compiled(cls, dialect, connection, dbapi_connection,
compiled, parameters):
"""Initialize execution context for a Compiled construct."""
self = cls.__new__(cls)
self.root_connection = connection
self._dbapi_connection = dbapi_connection
self.dialect = connection.dialect
self.compiled = compiled
# this should be caught in the engine before
# we get here
assert compiled.can_execute
self.execution_options = compiled.execution_options.union(
connection._execution_options)
self.result_column_struct = (
compiled._result_columns, compiled._ordered_columns,
compiled._textual_ordered_columns)
self.unicode_statement = util.text_type(compiled)
if not dialect.supports_unicode_statements:
self.statement = self.unicode_statement.encode(
self.dialect.encoding)
else:
self.statement = self.unicode_statement
self.isinsert = compiled.isinsert
self.isupdate = compiled.isupdate
self.isdelete = compiled.isdelete
self.is_text = compiled.isplaintext
if not parameters:
self.compiled_parameters = [compiled.construct_params()]
else:
self.compiled_parameters = \
[compiled.construct_params(m, _group_number=grp) for
grp, m in enumerate(parameters)]
self.executemany = len(parameters) > 1
self.cursor = self.create_cursor()
if self.isinsert or self.isupdate or self.isdelete:
self.is_crud = True
self._is_explicit_returning = bool(compiled.statement._returning)
self._is_implicit_returning = bool(
compiled.returning and not compiled.statement._returning)
if self.compiled.insert_prefetch or self.compiled.update_prefetch:
if self.executemany:
self._process_executemany_defaults()
else:
self._process_executesingle_defaults()
processors = compiled._bind_processors
# Convert the dictionary of bind parameter values
# into a dict or list to be sent to the DBAPI's
# execute() or executemany() method.
parameters = []
if dialect.positional:
for compiled_params in self.compiled_parameters:
param = []
for key in self.compiled.positiontup:
if key in processors:
param.append(processors[key](compiled_params[key]))
else:
param.append(compiled_params[key])
parameters.append(dialect.execute_sequence_format(param))
else:
encode = not dialect.supports_unicode_statements
for compiled_params in self.compiled_parameters:
if encode:
param = dict(
(
dialect._encoder(key)[0],
processors[key](compiled_params[key])
if key in processors
else compiled_params[key]
)
for key in compiled_params
)
else:
param = dict(
(
key,
processors[key](compiled_params[key])
if key in processors
else compiled_params[key]
)
for key in compiled_params
)
parameters.append(param)
self.parameters = dialect.execute_sequence_format(parameters)
return self
@classmethod
def _init_statement(cls, dialect, connection, dbapi_connection,
statement, parameters):
"""Initialize execution context for a string SQL statement."""
self = cls.__new__(cls)
self.root_connection = connection
self._dbapi_connection = dbapi_connection
self.dialect = connection.dialect
self.is_text = True
# plain text statement
self.execution_options = connection._execution_options
if not parameters:
if self.dialect.positional:
self.parameters = [dialect.execute_sequence_format()]
else:
self.parameters = [{}]
elif isinstance(parameters[0], dialect.execute_sequence_format):
self.parameters = parameters
elif isinstance(parameters[0], dict):
if dialect.supports_unicode_statements:
self.parameters = parameters
else:
self.parameters = [
dict((dialect._encoder(k)[0], d[k]) for k in d)
for d in parameters
] or [{}]
else:
self.parameters = [dialect.execute_sequence_format(p)
for p in parameters]
self.executemany = len(parameters) > 1
if not dialect.supports_unicode_statements and \
isinstance(statement, util.text_type):
self.unicode_statement = statement
self.statement = dialect._encoder(statement)[0]
else:
self.statement = self.unicode_statement = statement
self.cursor = self.create_cursor()
return self
@classmethod
def _init_default(cls, dialect, connection, dbapi_connection):
"""Initialize execution context for a ColumnDefault construct."""
self = cls.__new__(cls)
self.root_connection = connection
self._dbapi_connection = dbapi_connection
self.dialect = connection.dialect
self.execution_options = connection._execution_options
self.cursor = self.create_cursor()
return self
@util.memoized_property
def engine(self):
return self.root_connection.engine
@util.memoized_property
def postfetch_cols(self):
return self.compiled.postfetch
@util.memoized_property
def prefetch_cols(self):
if self.isinsert:
return self.compiled.insert_prefetch
elif self.isupdate:
return self.compiled.update_prefetch
else:
return ()
@util.memoized_property
def returning_cols(self):
self.compiled.returning
@util.memoized_property
def no_parameters(self):
return self.execution_options.get("no_parameters", False)
@util.memoized_property
def should_autocommit(self):
autocommit = self.execution_options.get('autocommit',
not self.compiled and
self.statement and
expression.PARSE_AUTOCOMMIT
or False)
if autocommit is expression.PARSE_AUTOCOMMIT:
return self.should_autocommit_text(self.unicode_statement)
else:
return autocommit
def _execute_scalar(self, stmt, type_):
"""Execute a string statement on the current cursor, returning a
scalar result.
Used to fire off sequences, default phrases, and "select lastrowid"
types of statements individually or in the context of a parent INSERT
or UPDATE statement.
"""
conn = self.root_connection
if isinstance(stmt, util.text_type) and \
not self.dialect.supports_unicode_statements:
stmt = self.dialect._encoder(stmt)[0]
if self.dialect.positional:
default_params = self.dialect.execute_sequence_format()
else:
default_params = {}
conn._cursor_execute(self.cursor, stmt, default_params, context=self)
r = self.cursor.fetchone()[0]
if type_ is not None:
# apply type post processors to the result
proc = type_._cached_result_processor(
self.dialect,
self.cursor.description[0][1]
)
if proc:
return proc(r)
return r
@property
def connection(self):
return self.root_connection._branch()
def should_autocommit_text(self, statement):
return AUTOCOMMIT_REGEXP.match(statement)
def _use_server_side_cursor(self):
if not self.dialect.supports_server_side_cursors:
return False
if self.dialect.server_side_cursors:
use_server_side = \
self.execution_options.get('stream_results', True) and (
(self.compiled and isinstance(self.compiled.statement,
expression.Selectable)
or
(
(not self.compiled or
isinstance(self.compiled.statement,
expression.TextClause))
and self.statement and SERVER_SIDE_CURSOR_RE.match(
self.statement))
)
)
else:
use_server_side = \
self.execution_options.get('stream_results', False)
return use_server_side
def create_cursor(self):
if self._use_server_side_cursor():
self._is_server_side = True
return self.create_server_side_cursor()
else:
self._is_server_side = False
return self._dbapi_connection.cursor()
def create_server_side_cursor(self):
raise NotImplementedError()
def pre_exec(self):
pass
def post_exec(self):
pass
def get_result_processor(self, type_, colname, coltype):
"""Return a 'result processor' for a given type as present in
cursor.description.
This has a default implementation that dialects can override
for context-sensitive result type handling.
"""
return type_._cached_result_processor(self.dialect, coltype)
def get_lastrowid(self):
"""return self.cursor.lastrowid, or equivalent, after an INSERT.
This may involve calling special cursor functions,
issuing a new SELECT on the cursor (or a new one),
or returning a stored value that was
calculated within post_exec().
This function will only be called for dialects
which support "implicit" primary key generation,
keep preexecute_autoincrement_sequences set to False,
and when no explicit id value was bound to the
statement.
The function is called once, directly after
post_exec() and before the transaction is committed
or ResultProxy is generated. If the post_exec()
method assigns a value to `self._lastrowid`, the
value is used in place of calling get_lastrowid().
Note that this method is *not* equivalent to the
``lastrowid`` method on ``ResultProxy``, which is a
direct proxy to the DBAPI ``lastrowid`` accessor
in all cases.
"""
return self.cursor.lastrowid
def handle_dbapi_exception(self, e):
pass
def get_result_proxy(self):
if self._is_server_side:
return result.BufferedRowResultProxy(self)
else:
return result.ResultProxy(self)
@property
def rowcount(self):
return self.cursor.rowcount
def supports_sane_rowcount(self):
return self.dialect.supports_sane_rowcount
def supports_sane_multi_rowcount(self):
return self.dialect.supports_sane_multi_rowcount
def _setup_crud_result_proxy(self):
if self.isinsert and \
not self.executemany:
if not self._is_implicit_returning and \
not self.compiled.inline and \
self.dialect.postfetch_lastrowid:
self._setup_ins_pk_from_lastrowid()
elif not self._is_implicit_returning:
self._setup_ins_pk_from_empty()
result = self.get_result_proxy()
if self.isinsert:
if self._is_implicit_returning:
row = result.fetchone()
self.returned_defaults = row
self._setup_ins_pk_from_implicit_returning(row)
result._soft_close(_autoclose_connection=False)
result._metadata = None
elif not self._is_explicit_returning:
result._soft_close(_autoclose_connection=False)
result._metadata = None
elif self.isupdate and self._is_implicit_returning:
row = result.fetchone()
self.returned_defaults = row
result._soft_close(_autoclose_connection=False)
result._metadata = None
elif result._metadata is None:
# no results, get rowcount
# (which requires open cursor on some drivers
# such as kintersbasdb, mxodbc)
result.rowcount
result._soft_close(_autoclose_connection=False)
return result
def _setup_ins_pk_from_lastrowid(self):
key_getter = self.compiled._key_getters_for_crud_column[2]
table = self.compiled.statement.table
compiled_params = self.compiled_parameters[0]
lastrowid = self.get_lastrowid()
if lastrowid is not None:
autoinc_col = table._autoincrement_column
if autoinc_col is not None:
# apply type post processors to the lastrowid
proc = autoinc_col.type._cached_result_processor(
self.dialect, None)
if proc is not None:
lastrowid = proc(lastrowid)
self.inserted_primary_key = [
lastrowid if c is autoinc_col else
compiled_params.get(key_getter(c), None)
for c in table.primary_key
]
else:
# don't have a usable lastrowid, so
# do the same as _setup_ins_pk_from_empty
self.inserted_primary_key = [
compiled_params.get(key_getter(c), None)
for c in table.primary_key
]
def _setup_ins_pk_from_empty(self):
key_getter = self.compiled._key_getters_for_crud_column[2]
table = self.compiled.statement.table
compiled_params = self.compiled_parameters[0]
self.inserted_primary_key = [
compiled_params.get(key_getter(c), None)
for c in table.primary_key
]
def _setup_ins_pk_from_implicit_returning(self, row):
if row is None:
self.inserted_primary_key = None
return
key_getter = self.compiled._key_getters_for_crud_column[2]
table = self.compiled.statement.table
compiled_params = self.compiled_parameters[0]
self.inserted_primary_key = [
row[col] if value is None else value
for col, value in [
(col, compiled_params.get(key_getter(col), None))
for col in table.primary_key
]
]
def lastrow_has_defaults(self):
return (self.isinsert or self.isupdate) and \
bool(self.compiled.postfetch)
def set_input_sizes(self, translate=None, exclude_types=None):
"""Given a cursor and ClauseParameters, call the appropriate
style of ``setinputsizes()`` on the cursor, using DB-API types
from the bind parameter's ``TypeEngine`` objects.
This method only called by those dialects which require it,
currently cx_oracle.
"""
if not hasattr(self.compiled, 'bind_names'):
return
types = dict(
(self.compiled.bind_names[bindparam], bindparam.type)
for bindparam in self.compiled.bind_names)
if self.dialect.positional:
inputsizes = []
for key in self.compiled.positiontup:
typeengine = types[key]
dbtype = typeengine.dialect_impl(self.dialect).\
get_dbapi_type(self.dialect.dbapi)
if dbtype is not None and \
(not exclude_types or dbtype not in exclude_types):
inputsizes.append(dbtype)
try:
self.cursor.setinputsizes(*inputsizes)
except BaseException as e:
self.root_connection._handle_dbapi_exception(
e, None, None, None, self)
else:
inputsizes = {}
for key in self.compiled.bind_names.values():
typeengine = types[key]
dbtype = typeengine.dialect_impl(self.dialect).\
get_dbapi_type(self.dialect.dbapi)
if dbtype is not None and \
(not exclude_types or dbtype not in exclude_types):
if translate:
key = translate.get(key, key)
if not self.dialect.supports_unicode_binds:
key = self.dialect._encoder(key)[0]
inputsizes[key] = dbtype
try:
self.cursor.setinputsizes(**inputsizes)
except BaseException as e:
self.root_connection._handle_dbapi_exception(
e, None, None, None, self)
def _exec_default(self, default, type_):
if default.is_sequence:
return self.fire_sequence(default, type_)
elif default.is_callable:
return default.arg(self)
elif default.is_clause_element:
# TODO: expensive branching here should be
# pulled into _exec_scalar()
conn = self.connection
c = expression.select([default.arg]).compile(bind=conn)
return conn._execute_compiled(c, (), {}).scalar()
else:
return default.arg
def get_insert_default(self, column):
if column.default is None:
return None
else:
return self._exec_default(column.default, column.type)
def get_update_default(self, column):
if column.onupdate is None:
return None
else:
return self._exec_default(column.onupdate, column.type)
def _process_executemany_defaults(self):
key_getter = self.compiled._key_getters_for_crud_column[2]
scalar_defaults = {}
insert_prefetch = self.compiled.insert_prefetch
update_prefetch = self.compiled.update_prefetch
# pre-determine scalar Python-side defaults
# to avoid many calls of get_insert_default()/
# get_update_default()
for c in insert_prefetch:
if c.default and c.default.is_scalar:
scalar_defaults[c] = c.default.arg
for c in update_prefetch:
if c.onupdate and c.onupdate.is_scalar:
scalar_defaults[c] = c.onupdate.arg
for param in self.compiled_parameters:
self.current_parameters = param
for c in insert_prefetch:
if c in scalar_defaults:
val = scalar_defaults[c]
else:
val = self.get_insert_default(c)
if val is not None:
param[key_getter(c)] = val
for c in update_prefetch:
if c in scalar_defaults:
val = scalar_defaults[c]
else:
val = self.get_update_default(c)
if val is not None:
param[key_getter(c)] = val
del self.current_parameters
def _process_executesingle_defaults(self):
key_getter = self.compiled._key_getters_for_crud_column[2]
self.current_parameters = compiled_parameters = \
self.compiled_parameters[0]
for c in self.compiled.insert_prefetch:
if c.default and \
not c.default.is_sequence and c.default.is_scalar:
val = c.default.arg
else:
val = self.get_insert_default(c)
if val is not None:
compiled_parameters[key_getter(c)] = val
for c in self.compiled.update_prefetch:
val = self.get_update_default(c)
if val is not None:
compiled_parameters[key_getter(c)] = val
del self.current_parameters
DefaultDialect.execution_ctx_cls = DefaultExecutionContext
| |
#!/usr/bin/env python
#
# Copyright (c) 2016 Alexander Lokhman <alex.lokhman@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import, division, print_function, with_statement
import itertools
from pydbal.platforms import BasePlatform
from pydbal.types import BaseType
class MySQLPlatform(BasePlatform):
_KEYWORDS = (
"ADD", "ALL", "ALTER", "ANALYZE", "AND", "AS", "ASC", "ASENSITIVE", "BEFORE", "BETWEEN", "BIGINT", "BINARY",
"BLOB", "BOTH", "BY", "CALL", "CASCADE", "CASE", "CHANGE", "CHAR", "CHARACTER", "CHECK", "COLLATE", "COLUMN",
"CONDITION", "CONNECTION", "CONSTRAINT", "CONTINUE", "CONVERT", "CREATE", "CROSS", "CURRENT_DATE",
"CURRENT_TIME", "CURRENT_TIMESTAMP", "CURRENT_USER", "CURSOR", "DATABASE", "DATABASES", "DAY_HOUR",
"DAY_MICROSECOND", "DAY_MINUTE", "DAY_SECOND", "DEC", "DECIMAL", "DECLARE", "DEFAULT", "DELAYED", "DELETE",
"DESC", "DESCRIBE", "DETERMINISTIC", "DISTINCT", "DISTINCTROW", "DIV", "DOUBLE", "DROP", "DUAL", "EACH", "ELSE",
"ELSEIF", "ENCLOSED", "ESCAPED", "EXISTS", "EXIT", "EXPLAIN", "FALSE", "FETCH", "FLOAT", "FLOAT4", "FLOAT8",
"FOR", "FORCE", "FOREIGN", "FROM", "FULLTEXT", "GOTO", "GRANT", "GROUP", "HAVING", "HIGH_PRIORITY",
"HOUR_MICROSECOND", "HOUR_MINUTE", "HOUR_SECOND", "IF", "IGNORE", "IN", "INDEX", "INFILE", "INNER", "INOUT",
"INSENSITIVE", "INSERT", "INT", "INT1", "INT2", "INT3", "INT4", "INT8", "INTEGER", "INTERVAL", "INTO", "IS",
"ITERATE", "JOIN", "KEY", "KEYS", "KILL", "LABEL", "LEADING", "LEAVE", "LEFT", "LIKE", "LIMIT", "LINES", "LOAD",
"LOCALTIME", "LOCALTIMESTAMP", "LOCK", "LONG", "LONGBLOB", "LONGTEXT", "LOOP", "LOW_PRIORITY", "MATCH",
"MEDIUMBLOB", "MEDIUMINT", "MEDIUMTEXT", "MIDDLEINT", "MINUTE_MICROSECOND", "MINUTE_SECOND", "MOD", "MODIFIES",
"NATURAL", "NOT", "NO_WRITE_TO_BINLOG", "NULL", "NUMERIC", "ON", "OPTIMIZE", "OPTION", "OPTIONALLY", "OR",
"ORDER", "OUT", "OUTER", "OUTFILE", "PRECISION", "PRIMARY", "PROCEDURE", "PURGE", "RAID0", "RANGE", "READ",
"READS", "REAL", "REFERENCES", "REGEXP", "RELEASE", "RENAME", "REPEAT", "REPLACE", "REQUIRE", "RESTRICT",
"RETURN", "REVOKE", "RIGHT", "RLIKE", "SCHEMA", "SCHEMAS", "SECOND_MICROSECOND", "SELECT", "SENSITIVE",
"SEPARATOR", "SET", "SHOW", "SMALLINT", "SONAME", "SPATIAL", "SPECIFIC", "SQL", "SQLEXCEPTION", "SQLSTATE",
"SQLWARNING", "SQL_BIG_RESULT", "SQL_CALC_FOUND_ROWS", "SQL_SMALL_RESULT", "SSL", "STARTING", "STRAIGHT_JOIN",
"TABLE", "TERMINATED", "THEN", "TINYBLOB", "TINYINT", "TINYTEXT", "TO", "TRAILING", "TRIGGER", "TRUE", "UNDO",
"UNION", "UNIQUE", "UNLOCK", "UNSIGNED", "UPDATE", "USAGE", "USE", "USING", "UTC_DATE", "UTC_TIME",
"UTC_TIMESTAMP", "VALUES", "VARBINARY", "VARCHAR", "VARCHARACTER", "VARYING", "WHEN", "WHERE", "WHILE", "WITH",
"WRITE", "X509", "XOR", "YEAR_MONTH", "ZEROFILL"
)
_KEYWORDS57 = (
"ACCESSIBLE", "ADD", "ALL", "ALTER", "ANALYZE", "AND", "AS", "ASC", "ASENSITIVE", "BEFORE", "BETWEEN", "BIGINT",
"BINARY", "BLOB", "BOTH", "BY", "CALL", "CASCADE", "CASE", "CHANGE", "CHAR", "CHARACTER", "CHECK", "COLLATE",
"COLUMN", "CONDITION", "CONSTRAINT", "CONTINUE", "CONVERT", "CREATE", "CROSS", "CURRENT_DATE", "CURRENT_TIME",
"CURRENT_TIMESTAMP", "CURRENT_USER", "CURSOR", "DATABASE", "DATABASES", "DAY_HOUR", "DAY_MICROSECOND",
"DAY_MINUTE", "DAY_SECOND", "DEC", "DECIMAL", "DECLARE", "DEFAULT", "DELAYED", "DELETE", "DESC", "DESCRIBE",
"DETERMINISTIC", "DISTINCT", "DISTINCTROW", "DIV", "DOUBLE", "DROP", "DUAL", "EACH", "ELSE", "ELSEIF",
"ENCLOSED", "ESCAPED", "EXISTS", "EXIT", "EXPLAIN", "FALSE", "FETCH", "FLOAT", "FLOAT4", "FLOAT8", "FOR",
"FORCE", "FOREIGN", "FROM", "FULLTEXT", "GET", "GRANT", "GROUP", "HAVING", "HIGH_PRIORITY", "HOUR_MICROSECOND",
"HOUR_MINUTE", "HOUR_SECOND", "IF", "IGNORE", "IN", "INDEX", "INFILE", "INNER", "INOUT", "INSENSITIVE",
"INSERT", "INT", "INT1", "INT2", "INT3", "INT4", "INT8", "INTEGER", "INTERVAL", "INTO", "IO_AFTER_GTIDS",
"IO_BEFORE_GTIDS", "IS", "ITERATE", "JOIN", "KEY", "KEYS", "KILL", "LEADING", "LEAVE", "LEFT", "LIKE", "LIMIT",
"LINEAR", "LINES", "LOAD", "LOCALTIME", "LOCALTIMESTAMP", "LOCK", "LONG", "LONGBLOB", "LONGTEXT", "LOOP",
"LOW_PRIORITY", "MASTER_BIND", "MASTER_SSL_VERIFY_SERVER_CERT", "MATCH", "MAXVALUE", "MEDIUMBLOB", "MEDIUMINT",
"MEDIUMTEXT", "MIDDLEINT", "MINUTE_MICROSECOND", "MINUTE_SECOND", "MOD", "MODIFIES", "NATURAL",
"NO_WRITE_TO_BINLOG", "NONBLOCKING", "NOT", "NULL", "NUMERIC", "ON", "OPTIMIZE", "OPTION", "OPTIONALLY", "OR",
"ORDER", "OUT", "OUTER", "OUTFILE", "PARTITION", "PRECISION", "PRIMARY", "PROCEDURE", "PURGE", "RANGE", "READ",
"READ_WRITE", "READS", "REAL", "REFERENCES", "REGEXP", "RELEASE", "RENAME", "REPEAT", "REPLACE", "REQUIRE",
"RESIGNAL", "RESTRICT", "RETURN", "REVOKE", "RIGHT", "RLIKE", "SCHEMA", "SCHEMAS", "SECOND_MICROSECOND",
"SELECT", "SENSITIVE", "SEPARATOR", "SET", "SHOW", "SIGNAL", "SMALLINT", "SPATIAL", "SPECIFIC", "SQL",
"SQL_BIG_RESULT", "SQL_CALC_FOUND_ROWS", "SQL_SMALL_RESULT", "SQLEXCEPTION", "SQLSTATE", "SQLWARNING", "SSL",
"STARTING", "STRAIGHT_JOIN", "TABLE", "TERMINATED", "THEN", "TINYBLOB", "TINYINT", "TINYTEXT", "TO", "TRAILING",
"TRIGGER", "TRUE", "UNDO", "UNION", "UNIQUE", "UNLOCK", "UNSIGNED", "UPDATE", "USAGE", "USE", "USING",
"UTC_DATE", "UTC_TIME", "UTC_TIMESTAMP", "VALUES", "VARBINARY", "VARCHAR", "VARCHARACTER", "VARYING", "WHEN",
"WHERE", "WHILE", "WITH", "WRITE", "XOR", "YEAR_MONTH", "ZEROFILL"
)
_TYPE_MAPPINGS = {
"tinyint": BaseType.BOOLEAN,
"smallint": BaseType.SMALLINT,
"mediumint": BaseType.INTEGER,
"int": BaseType.INTEGER,
"integer": BaseType.INTEGER,
"bigint": BaseType.BIGINT,
"tinytext": BaseType.TEXT,
"mediumtext": BaseType.TEXT,
"longtext": BaseType.TEXT,
"text": BaseType.TEXT,
"varchar": BaseType.STRING,
"string": BaseType.STRING,
"char": BaseType.STRING,
"date": BaseType.DATE,
"datetime": BaseType.DATETIME,
"timestamp": BaseType.DATETIME,
"time": BaseType.TIME,
"float": BaseType.FLOAT,
"double": BaseType.FLOAT,
"real": BaseType.FLOAT,
"decimal": BaseType.DECIMAL,
"numeric": BaseType.DECIMAL,
"year": BaseType.DATE,
"longblob": BaseType.BLOB,
"blob": BaseType.BLOB,
"mediumblob": BaseType.BLOB,
"tinyblob": BaseType.BLOB,
"binary": BaseType.BINARY,
"varbinary": BaseType.BINARY,
"set": BaseType.ARRAY
}
LENGTH_LIMIT_TINYTEXT = 255
LENGTH_LIMIT_TEXT = 65535
LENGTH_LIMIT_MEDIUMTEXT = 16777215
LENGTH_LIMIT_TINYBLOB = 255
LENGTH_LIMIT_BLOB = 65535
LENGTH_LIMIT_MEDIUMBLOB = 16777215
def _get_keywords(self):
if self._driver.get_server_version() >= (5, 7):
return MySQLPlatform._KEYWORDS57
return MySQLPlatform._KEYWORDS
def _get_type_mappings(self):
return MySQLPlatform._TYPE_MAPPINGS
def get_identifier_quote_character(self):
return "`"
def _modify_limit_sql(self, sql, limit, offset):
if limit is not None:
sql += " LIMIT " + str(limit)
if offset is not None:
sql += " OFFSET " + str(offset)
elif offset is not None:
sql += " LIMIT 18446744073709551615 OFFSET " + str(offset)
return sql
def set_transaction_isolation(self, level):
self._driver.execute_and_clear(
"SET SESSION TRANSACTION ISOLATION LEVEL " + self._get_transaction_isolation_sql(level))
def get_databases(self):
for row in self._fetch("SHOW DATABASES"):
yield row[0][1] # {"Database": ...}
def get_views(self, database=None):
database = "DATABASE()" if database is None else "'" + database + "'"
sql = "SELECT TABLE_NAME, VIEW_DEFINITION FROM INFORMATION_SCHEMA.VIEWS WHERE TABLE_SCHEMA = " + database + ""
for row in self._fetch(sql): # [{"TABLE_NAME": ..., "VIEW_DEFINITION": ...}, ...]
yield row[0][1], row[1][1]
def get_tables(self, database=None):
for row in self._fetch("SHOW FULL TABLES WHERE Table_type = 'BASE TABLE'"):
yield row[0][1]
def get_table_columns(self, table, database=None):
database = "DATABASE()" if database is None else "'" + database + "'"
sql = "SELECT COLUMN_NAME, COLUMN_TYPE, IS_NULLABLE, COLUMN_DEFAULT, EXTRA, COLUMN_COMMENT, COLLATION_NAME " \
"FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = " + database + " AND TABLE_NAME = '" + table + "'"
for row in self._fetch(sql):
row = dict(row)
type_match = BasePlatform._re_table_column_type.match(row["COLUMN_TYPE"])
length = type_match.group("length")
options = {}
type_ = type_match.group("type")
if type_ in ("char", "binary"):
options["fixed"] = True
elif type_ in ("float", "double", "real", "numeric", "decimal") and length is not None:
decimal = (length + ",0").split(",")
options["precision"] = int(decimal[0])
options["scale"] = int(decimal[1])
length = None
elif type_ == "tinytext":
length = MySQLPlatform.LENGTH_LIMIT_TINYTEXT
elif type_ == "text":
length = MySQLPlatform.LENGTH_LIMIT_TEXT
elif type_ == "mediumtext":
length = MySQLPlatform.LENGTH_LIMIT_MEDIUMTEXT
elif type_ == "tinyblob":
length = MySQLPlatform.LENGTH_LIMIT_TINYBLOB
elif type_ == "blob":
length = MySQLPlatform.LENGTH_LIMIT_BLOB
elif type_ == "mediumblob":
length = MySQLPlatform.LENGTH_LIMIT_MEDIUMBLOB
elif type_ in ("tinyint", "smallint", "mediumint", "int", "integer", "bigint", "year"):
length = None
if length:
options["length"] = int(length)
if "unsigned" in row["COLUMN_TYPE"]:
options["unsigned"] = True
if row["IS_NULLABLE"] == "YES":
options["notnull"] = False
if row["COLUMN_DEFAULT"] is not None:
options["default"] = row["COLUMN_DEFAULT"]
if "auto_increment" in row["EXTRA"]:
options["autoincrement"] = True
if row["COLLATION_NAME"] is not None:
options["platform_options"] = {"collation": row["COLLATION_NAME"]}
type_ = self.get_type_mapping(type_)
if row["COLUMN_COMMENT"]:
comment, c_type = BasePlatform.get_type_from_comment(row["COLUMN_COMMENT"])
if comment:
options["comment"] = row["COLUMN_COMMENT"]
if c_type:
type_ = c_type
yield row["COLUMN_NAME"], type_, options
def get_table_indexes(self, table, database=None):
database = "DATABASE()" if database is None else "'" + database + "'"
sql = "SELECT INDEX_NAME, COLUMN_NAME, INDEX_TYPE, NON_UNIQUE FROM INFORMATION_SCHEMA.STATISTICS " \
"WHERE TABLE_SCHEMA = " + database + " AND TABLE_NAME = '" + table + "'"
indexes = []
for row in self._fetch(sql):
row = dict(row)
options = {}
if not row["NON_UNIQUE"]:
options["unique"] = True
if row["INDEX_NAME"] == "PRIMARY":
options["primary"] = True
if "FULLTEXT" in row["INDEX_TYPE"]:
options["flags"] = ("FULLTEXT", )
elif "SPATIAL" in row["INDEX_TYPE"]:
options["flags"] = ("SPATIAL", )
indexes.append((row["INDEX_NAME"], row["COLUMN_NAME"], options))
for group, generator in itertools.groupby(indexes, lambda x: (x[0], x[2])):
yield group[0], tuple(x[1] for x in generator), group[1]
def get_table_foreign_keys(self, table, database=None):
database = "DATABASE()" if database is None else "'" + database + "'"
sql = "SELECT DISTINCT k.CONSTRAINT_NAME, k.COLUMN_NAME, k.REFERENCED_TABLE_NAME, k.REFERENCED_COLUMN_NAME " \
"/*!50116 , c.UPDATE_RULE, c.DELETE_RULE */ FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE k " \
"/*!50116 INNER JOIN INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS c " \
"ON c.CONSTRAINT_NAME = k.CONSTRAINT_NAME AND c.TABLE_NAME = '" + table + "' */ " \
"WHERE k.TABLE_NAME = '" + table + "' AND k.TABLE_SCHEMA = " + database + " " \
"/*!50116 AND c.CONSTRAINT_SCHEMA = " + database + " */ AND k.REFERENCED_COLUMN_NAME IS NOT NULL"
foreign_keys = []
for row in self._fetch(sql):
row = dict(row)
options = {}
delete_rule = row.get("DELETE_RULE")
if delete_rule not in (None, "RESTRICT"):
options["on_delete"] = delete_rule
update_rule = row.get("UPDATE_RULE")
if update_rule not in (None, "RESTRICT"):
options["on_update"] = update_rule
foreign_keys.append((
row["CONSTRAINT_NAME"],
row["COLUMN_NAME"],
row["REFERENCED_TABLE_NAME"],
row["REFERENCED_COLUMN_NAME"],
options
))
for group, generator in itertools.groupby(foreign_keys, lambda x: (x[0], x[2], x[4])):
gen1, gen2 = itertools.tee(generator)
yield group[0], tuple(x[1] for x in gen1), group[1], tuple(x[3] for x in gen2), group[2]
| |
from tornado import netutil
from tornado.ioloop import IOLoop
from tornado.iostream import IOStream
from tornado.testing import AsyncHTTPTestCase, LogTrapTestCase, get_unused_port
from tornado.util import b
from tornado.web import RequestHandler, Application
import socket
import time
class HelloHandler(RequestHandler):
def get(self):
self.write("Hello")
class TestIOStream(AsyncHTTPTestCase, LogTrapTestCase):
def get_app(self):
return Application([('/', HelloHandler)])
def make_iostream_pair(self, **kwargs):
port = get_unused_port()
[listener] = netutil.bind_sockets(port, '127.0.0.1',
family=socket.AF_INET)
streams = [None, None]
def accept_callback(connection, address):
streams[0] = IOStream(connection, io_loop=self.io_loop, **kwargs)
self.stop()
def connect_callback():
streams[1] = client_stream
self.stop()
netutil.add_accept_handler(listener, accept_callback,
io_loop=self.io_loop)
client_stream = IOStream(socket.socket(), io_loop=self.io_loop,
**kwargs)
client_stream.connect(('127.0.0.1', port),
callback=connect_callback)
self.wait(condition=lambda: all(streams))
self.io_loop.remove_handler(listener.fileno())
listener.close()
return streams
def test_read_zero_bytes(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
s.connect(("localhost", self.get_http_port()))
self.stream = IOStream(s, io_loop=self.io_loop)
self.stream.write(b("GET / HTTP/1.0\r\n\r\n"))
# normal read
self.stream.read_bytes(9, self.stop)
data = self.wait()
self.assertEqual(data, b("HTTP/1.0 "))
# zero bytes
self.stream.read_bytes(0, self.stop)
data = self.wait()
self.assertEqual(data, b(""))
# another normal read
self.stream.read_bytes(3, self.stop)
data = self.wait()
self.assertEqual(data, b("200"))
def test_write_zero_bytes(self):
# Attempting to write zero bytes should run the callback without
# going into an infinite loop.
server, client = self.make_iostream_pair()
server.write(b(''), callback=self.stop)
self.wait()
# As a side effect, the stream is now listening for connection
# close (if it wasn't already), but is not listening for writes
self.assertEqual(server._state, IOLoop.READ|IOLoop.ERROR)
def test_connection_refused(self):
# When a connection is refused, the connect callback should not
# be run. (The kqueue IOLoop used to behave differently from the
# epoll IOLoop in this respect)
port = get_unused_port()
stream = IOStream(socket.socket(), self.io_loop)
self.connect_called = False
def connect_callback():
self.connect_called = True
stream.set_close_callback(self.stop)
stream.connect(("localhost", port), connect_callback)
self.wait()
self.assertFalse(self.connect_called)
def test_connection_closed(self):
# When a server sends a response and then closes the connection,
# the client must be allowed to read the data before the IOStream
# closes itself. Epoll reports closed connections with a separate
# EPOLLRDHUP event delivered at the same time as the read event,
# while kqueue reports them as a second read/write event with an EOF
# flag.
response = self.fetch("/", headers={"Connection": "close"})
response.rethrow()
def test_read_until_close(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
s.connect(("localhost", self.get_http_port()))
stream = IOStream(s, io_loop=self.io_loop)
stream.write(b("GET / HTTP/1.0\r\n\r\n"))
stream.read_until_close(self.stop)
data = self.wait()
self.assertTrue(data.startswith(b("HTTP/1.0 200")))
self.assertTrue(data.endswith(b("Hello")))
def test_streaming_callback(self):
server, client = self.make_iostream_pair()
try:
chunks = []
final_called = []
def streaming_callback(data):
chunks.append(data)
self.stop()
def final_callback(data):
assert not data
final_called.append(True)
self.stop()
server.read_bytes(6, callback=final_callback,
streaming_callback=streaming_callback)
client.write(b("1234"))
self.wait(condition=lambda: chunks)
client.write(b("5678"))
self.wait(condition=lambda: final_called)
self.assertEqual(chunks, [b("1234"), b("56")])
# the rest of the last chunk is still in the buffer
server.read_bytes(2, callback=self.stop)
data = self.wait()
self.assertEqual(data, b("78"))
finally:
server.close()
client.close()
def test_streaming_until_close(self):
server, client = self.make_iostream_pair()
try:
chunks = []
def callback(data):
chunks.append(data)
self.stop()
client.read_until_close(callback=callback,
streaming_callback=callback)
server.write(b("1234"))
self.wait()
server.write(b("5678"))
self.wait()
server.close()
self.wait()
self.assertEqual(chunks, [b("1234"), b("5678"), b("")])
finally:
server.close()
client.close()
def test_delayed_close_callback(self):
# The scenario: Server closes the connection while there is a pending
# read that can be served out of buffered data. The client does not
# run the close_callback as soon as it detects the close, but rather
# defers it until after the buffered read has finished.
server, client = self.make_iostream_pair()
try:
client.set_close_callback(self.stop)
server.write(b("12"))
chunks = []
def callback1(data):
chunks.append(data)
client.read_bytes(1, callback2)
server.close()
def callback2(data):
chunks.append(data)
client.read_bytes(1, callback1)
self.wait() # stopped by close_callback
self.assertEqual(chunks, [b("1"), b("2")])
finally:
server.close()
client.close()
def test_close_buffered_data(self):
# Similar to the previous test, but with data stored in the OS's
# socket buffers instead of the IOStream's read buffer. Out-of-band
# close notifications must be delayed until all data has been
# drained into the IOStream buffer. (epoll used to use out-of-band
# close events with EPOLLRDHUP, but no longer)
#
# This depends on the read_chunk_size being smaller than the
# OS socket buffer, so make it small.
server, client = self.make_iostream_pair(read_chunk_size=256)
try:
server.write(b("A") * 512)
client.read_bytes(256, self.stop)
data = self.wait()
self.assertEqual(b("A") * 256, data)
server.close()
# Allow the close to propagate to the client side of the
# connection. Using add_callback instead of add_timeout
# doesn't seem to work, even with multiple iterations
self.io_loop.add_timeout(time.time() + 0.01, self.stop)
self.wait()
client.read_bytes(256, self.stop)
data = self.wait()
self.assertEqual(b("A") * 256, data)
finally:
server.close()
client.close()
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=too-many-arguments, too-many-locals, too-many-instance-attributes
"""`SequentialModule` is a container module that chains a number of modules together."""
import logging
import copy
from ..initializer import Uniform
from .base_module import BaseModule
class SequentialModule(BaseModule):
"""A SequentialModule is a container module that can chain multiple modules together.
.. note::
Building a computation graph with this kind of imperative container is less
flexible and less efficient than the symbolic graph. So, this should be only used as a
handy utility.
"""
META_TAKE_LABELS = 'take_labels'
META_AUTO_WIRING = 'auto_wiring'
def __init__(self, logger=logging):
super(SequentialModule, self).__init__(logger=logger)
self._modules = []
self._metas = []
self._label_shapes = None
self._data_shapes = None
self._meta_keys = set([getattr(SequentialModule, x)
for x in dir(SequentialModule)
if x.startswith('META_')])
def add(self, module, **kwargs):
"""Add a module to the chain.
Parameters
----------
module : BaseModule
The new module to add.
kwargs : ``**keywords``
All the keyword arguments are saved as meta information
for the added module. The currently known meta includes
- `take_labels`: indicating whether the module expect to
take labels when doing computation. Note any module in
the chain can take labels (not necessarily only the top
most one), and they all take the same labels passed
from the original data batch for the `SequentialModule`.
Returns
-------
self
This function returns `self` to allow us to easily chain a
series of `add` calls.
Examples
--------
>>> # An example of addinging two modules to a chain.
>>> seq_mod = mx.mod.SequentialModule()
>>> seq_mod.add(mod1)
>>> seq_mod.add(mod2)
"""
self._modules.append(module)
# a sanity check to avoid typo
for key in kwargs:
assert key in self._meta_keys, ('Unknown meta "%s", a typo?' % key)
self._metas.append(kwargs)
# after adding new modules, we are reset back to raw states, needs
# to bind, init_params, etc.
self.binded = False
self.params_initialized = False
self.optimizer_initialized = False
return self # for easier chaining
@property
def data_names(self):
"""A list of names for data required by this module."""
if len(self._modules) > 0:
return self._modules[0].data_names
return []
@property
def output_names(self):
"""A list of names for the outputs of this module."""
if len(self._modules) > 0:
return self._modules[-1].output_names
return []
@property
def data_shapes(self):
"""Gets data shapes.
Returns
-------
list
A list of `(name, shape)` pairs. The data shapes of the first module
is the data shape of a `SequentialModule`.
"""
assert self.binded
return self._modules[0].data_shapes
@property
def label_shapes(self):
"""Gets label shapes.
Returns
-------
list
A list of `(name, shape)` pairs. The return value could be `None` if
the module does not need labels, or if the module is not bound for
training (in this case, label information is not available).
"""
assert self.binded
return self._label_shapes
@property
def output_shapes(self):
"""Gets output shapes.
Returns
-------
list
A list of `(name, shape)` pairs. The output shapes of the last
module is the output shape of a `SequentialModule`.
"""
assert self.binded
return self._modules[-1].output_shapes
def get_params(self):
"""Gets current parameters.
Returns
-------
(arg_params, aux_params)
A pair of dictionaries each mapping parameter names to NDArray values. This
is a merged dictionary of all the parameters in the modules.
"""
assert self.binded and self.params_initialized
arg_params = dict()
aux_params = dict()
for module in self._modules:
arg, aux = module.get_params()
arg_params.update(arg)
aux_params.update(aux)
return (arg_params, aux_params)
def init_params(self, initializer=Uniform(0.01), arg_params=None, aux_params=None,
allow_missing=False, force_init=False, allow_extra=False):
"""Initializes parameters.
Parameters
----------
initializer : Initializer
arg_params : dict
Default ``None``. Existing parameters. This has higher priority
than `initializer`.
aux_params : dict
Default ``None``. Existing auxiliary states. This has higher priority
than `initializer`.
allow_missing : bool
Allow missing values in `arg_params` and `aux_params` (if not ``None``).
In this case, missing values will be filled with `initializer`.
force_init : bool
Default ``False``.
allow_extra : boolean, optional
Whether allow extra parameters that are not needed by symbol.
If this is True, no error will be thrown when arg_params or aux_params
contain extra parameters that is not needed by the executor.
"""
if self.params_initialized and not force_init:
return
assert self.binded, 'call bind before initializing the parameters'
for module in self._modules:
module.init_params(initializer=initializer, arg_params=arg_params,
aux_params=aux_params, allow_missing=allow_missing,
force_init=force_init, allow_extra=allow_extra)
# make sure we do not have duplicated parameter names
def _check_name(known_names, new_names, modules, i):
"""Internal function to help checking duplicated names."""
for name in new_names:
assert not name in known_names, "Duplicated parameter names: " + \
('name "%s" in layer %d (%s) is already ' % (name, i, type(modules[i]))) + \
('used in layer %d (%s).' % (known_names[name],
type(modules[known_names[name]])))
known_names[name] = i
arg_names = dict()
aux_names = dict()
for i_layer, module in enumerate(self._modules):
arg_params, aux_params = module.get_params()
_check_name(arg_names, arg_params.keys(), self._modules, i_layer)
_check_name(aux_names, aux_params.keys(), self._modules, i_layer)
self.params_initialized = True
def bind(self, data_shapes, label_shapes=None, for_training=True,
inputs_need_grad=False, force_rebind=False, shared_module=None,
grad_req='write'):
"""Binds the symbols to construct executors. This is necessary before one
can perform computation with the module.
Parameters
----------
data_shapes : list of (str, tuple)
Typically is `data_iter.provide_data`.
label_shapes : list of (str, tuple)
Typically is `data_iter.provide_label`.
for_training : bool
Default is ``True``. Whether the executors should be bind for training.
inputs_need_grad : bool
Default is ``False``. Whether the gradients to the input data need to be computed.
Typically this is not needed. But this might be needed when implementing composition
of modules.
force_rebind : bool
Default is ``False``. This function does nothing if the executors are already
bound. But with this ``True``, the executors will be forced to rebind.
shared_module : Module
Default is ``None``. Currently shared module is not supported for `SequentialModule`.
grad_req : str, list of str, dict of str to str
Requirement for gradient accumulation. Can be 'write', 'add', or 'null'
(default to 'write').
Can be specified globally (str) or for each argument (list, dict).
"""
if self.binded and not force_rebind:
self.logger.warning('Already bound, ignoring bind()')
return
if inputs_need_grad:
assert for_training is True
assert shared_module is None, 'Shared module is not supported'
assert len(self._modules) > 0, 'Attempting to bind an empty SequentialModule'
self.binded = True
# the same label shapes are used for all chained modules
self._label_shapes = label_shapes
my_data_shapes = data_shapes
anybody_ever_needs_label = False
for i_layer, module in enumerate(self._modules):
meta = self._metas[i_layer]
if SequentialModule.META_TAKE_LABELS in meta and \
meta[SequentialModule.META_TAKE_LABELS]:
my_label_shapes = label_shapes
anybody_ever_needs_label = True
else:
my_label_shapes = None
my_inputs_need_grad = bool(inputs_need_grad or
(for_training and i_layer > 0))
if meta.get(SequentialModule.META_AUTO_WIRING, False):
data_names = module.data_names
assert len(data_names) == len(my_data_shapes)
my_data_shapes = [(new_name, shape) for (new_name, (_, shape))
in zip(data_names, my_data_shapes)]
module.bind(data_shapes=my_data_shapes, label_shapes=my_label_shapes,
for_training=for_training, inputs_need_grad=my_inputs_need_grad,
force_rebind=force_rebind, shared_module=None, grad_req=grad_req)
# the output of the previous module is the data of the next module
my_data_shapes = module.output_shapes
if not anybody_ever_needs_label:
# then I do not need label either
self._label_shapes = None
def init_optimizer(self, kvstore='local', optimizer='sgd',
optimizer_params=(('learning_rate', 0.01),),
force_init=False):
"""Installs and initializes optimizers.
Parameters
----------
kvstore : str or KVStore
Default `'local'`.
optimizer : str or Optimizer
Default `'sgd'`
optimizer_params : dict
Default ``(('learning_rate', 0.01),)``. The default value is not a dictionary,
just to avoid pylint warning of dangerous default values.
force_init : bool
Default ``False``, indicating whether we should force re-initializing the
optimizer in the case an optimizer is already installed.
"""
assert self.binded and self.params_initialized
if self.optimizer_initialized and not force_init:
self.logger.warning('optimizer already initialized, ignoring.')
return
for module in self._modules:
module.init_optimizer(kvstore=kvstore, optimizer=optimizer,
optimizer_params=optimizer_params, force_init=force_init)
self.optimizer_initialized = True
def forward(self, data_batch, is_train=None):
"""Forward computation.
Parameters
----------
data_batch : DataBatch
is_train : bool
Default is ``None``, in which case `is_train` is take as ``self.for_training``.
"""
assert self.binded and self.params_initialized
# make a shallow copy, just to maintain necessary properties (if any) like
# bucket_key, pad, etc.
data_batch = copy.copy(data_batch)
for i_layer, module in enumerate(self._modules):
module.forward(data_batch, is_train=is_train)
if i_layer+1 == len(self._modules):
# the last layer, do not need to do the followings
break
data_batch.data = module.get_outputs()
if hasattr(data_batch, 'provide_data'):
# need to update this, in case the internal module is using bucketing
# or whatever
data_names = [x[0] for x in module.output_shapes]
assert len(data_names) == len(data_batch.data)
data_batch.provide_data = [(name, x.shape) for name, x in
zip(data_names, data_batch.data)]
def backward(self, out_grads=None):
"""Backward computation."""
assert self.binded and self.params_initialized
for i_layer, module in reversed(list(zip(range(len(self._modules)), self._modules))):
module.backward(out_grads=out_grads)
if i_layer == 0:
break
out_grads = module.get_input_grads()
def update(self):
"""Updates parameters according to installed optimizer and the gradient computed
in the previous forward-backward cycle.
"""
assert self.binded and self.params_initialized and self.optimizer_initialized
for module in self._modules:
module.update()
def get_outputs(self, merge_multi_context=True):
"""Gets outputs from a previous forward computation.
Parameters
----------
merge_multi_context : bool
Default is ``True``. In the case when data-parallelism is used, the outputs
will be collected from multiple devices. A ``True`` value indicate that we
should merge the collected results so that they look like from a single
executor.
Returns
-------
list of NDArray or list of list of NDArray
If `merge_multi_context` is ``True``, it is like ``[out1,
out2]``. Otherwise, it is like ``[[out1_dev1, out1_dev2], [out2_dev1,
out2_dev2]]``. All the output elements are numpy arrays.
"""
assert self.binded and self.params_initialized
return self._modules[-1].get_outputs(merge_multi_context=merge_multi_context)
def get_input_grads(self, merge_multi_context=True):
"""Gets the gradients with respect to the inputs of the module.
Parameters
----------
merge_multi_context : bool
Default is ``True``. In the case when data-parallelism is used, the outputs
will be collected from multiple devices. A ``True`` value indicate that we
should merge the collected results so that they look like from a single
executor.
Returns
-------
list of NDArrays or list of list of NDArrays
If `merge_multi_context` is ``True``, it is like ``[grad1, grad2]``. Otherwise, it
is like ``[[grad1_dev1, grad1_dev2], [grad2_dev1, grad2_dev2]]``. All the output
elements are `NDArray`.
"""
assert self.binded and self.params_initialized and self.inputs_need_grad
return self._modules[0].get_input_grads(merge_multi_context=merge_multi_context)
def update_metric(self, eval_metric, labels, pre_sliced=False, label_pads=None):
"""Evaluates and accumulates evaluation metric on outputs of the last forward computation.
Parameters
----------
eval_metric : EvalMetric
Evaluation metric to use.
labels : list of NDArray if `pre_sliced` parameter is set to `False`,
list of lists of NDArray otherwise. Typically `data_batch.label`.
pre_sliced: bool
Whether the labels are already sliced per device (default: False).
label_pads : pad size if `pre_sliced` parameter is set to `False`,
list of pad sizes otherwise. Typically `data_batch.pad` (default: None).
"""
assert self.binded and self.params_initialized
for meta, module in zip(self._metas, self._modules):
if SequentialModule.META_TAKE_LABELS in meta and \
meta[SequentialModule.META_TAKE_LABELS]:
module.update_metric(eval_metric, labels, pre_sliced, label_pads)
def install_monitor(self, mon):
"""Installs monitor on all executors."""
assert self.binded
for module in self._modules:
module.install_monitor(mon)
| |
#=========================================================================
# Sparse Memory Image
#=========================================================================
# This module contains a class for representing ELF binary which would be
# loaded into a TestMemory object.
from __future__ import print_function
import subprocess
import tempfile
from pymtl import *
#-------------------------------------------------------------------------------
# Global Variables
#-------------------------------------------------------------------------------
# Command to compile using maven-gcc
compile_cmd = "maven-gcc -Wall -MMD -MP -nostartfiles "
compile_cmd += "{asm_file} -o {target} -T {linker_script} "
# Command to execute maven-objdump
objdump_cmd = "maven-objdump -DC --disassemble-zeroes --section=.text "
objdump_cmd += "--section=.data --section=.sdata --section=.xcpthandler "
objdump_cmd += "--section=.init --section=.fini --section=.ctors "
objdump_cmd += "--section=.dtors --section=.eh_frame --section=.jcr "
objdump_cmd += "--section=.sbss --section=.bss --section=.rodata "
objdump_cmd += "{filename} > {dump}"
# Test linker script
test_ld = """
OUTPUT_ARCH( "mips:maven" )
ENTRY( _test )
SECTIONS
{
. = 0x00000004;
.xcpthandler :
{
*(.xcpthandler)
}
. = 0x00000400;
.text :
{
*(.text)
}
.data :
{
*(.data)
}
_end = .;
}
"""
#-------------------------------------------------------------------------------
# Utility Functions
#-------------------------------------------------------------------------------
def execute( cmd ):
# Throws a CalledProcessError if the command is not available; don't catch,
# let this propagate up to the user!
subprocess.check_call( cmd, shell=True )
#-------------------------------------------------------------------------------
# Sparse Memory Image Class
#-------------------------------------------------------------------------------
class SparseMemoryImage:
#-----------------------------------------------------------------------------
# Constructor
#-----------------------------------------------------------------------------
#
# By default the constructor expects an assembly string to create a
# SparseMemoryImage instance. We could also pass a list of lists, binary
# filename or a binary file handle.
def __init__( s, asm_str = None, labels_list = None,
bin_filename = None, bin_filehandle = None, vmh_filename = None,
dump_asm = None, dump_bin = None ):
# sparse memory : list of list of lists
# a single entry in the class represents a label along with
# the bytes of memory stored in the label.
# [ label_addr, [list of bytes] ]
s.sparse_memory_img = [ ]
if labels_list is not None:
# assign the obtained list of list to the sparse memory image data
# structure
s.sparse_memory_img.extend( labels_list )
elif vmh_filename is not None:
vmh_fd = open(vmh_filename)
addr = None
current_list = None
for line in vmh_fd:
# Search for addr labels
if line[0] == '@':
hex_str, slash, label = line[1:].split()
addr = int(hex_str, 16)
current_list = []
#print( hex(addr) )
s.sparse_memory_img.append( [ addr, current_list ] )
# We have an addr label and the line is not blank, get the data
elif addr and line.strip():
hex_str = line.split()[0]
value = int(hex_str, 16)
current_list.append( value )
#print( s.sparse_memory_img )
elif bin_filename is not None:
# Create a temporary file for capturing objdump output and pass the
# filename to objdump command and execute the objdump command
dump = tempfile.NamedTemporaryFile( mode = 'w+' )
cmd = objdump_cmd.format( filename = bin_filename, dump = dump.name )
execute( cmd )
# move to the start of the temp file before parsing
dump.seek( 0 )
# Based on objdump2vmh.py
# fill the sparse memory labels
bytes_list = []
in_label = False
for line in dump:
split_line = line.split()
# start of a label
if( line.find( ">:\n" ) >= 0 ):
in_label = True
# Determine if the virtual address is a halfword or fullword.
# Only write out this line if it's word aligned ignore every
# other .half declaration
vaddr = int( split_line[0], 16 )
if ( vaddr % 4 ) == 0:
label_addr = int( split_line[0], 16 )
# label_name = split_line[1][:-1]
elif in_label:
if line == "\n":
# end of a label
s.sparse_memory_img.append( [ label_addr, bytes_list ] )
bytes_list = []
in_label = False
else:
# append bytes insided a label using a Bits object
inst_bits = Bits( 32, int( split_line[1], 16) )
# deconstruct the 32-bit individual into its constituent bytes
# since we need to extend a list of bytes.
for i in range( 4 ):
bytes_list.append( inst_bits[i*8:i*8+8].uint() )
# we iterate through all the lines in the file and the for loop
# exits the loop when it reaches the EOF. The last label that was
# detected is not yet appended to the sparse memory image.
# Append the last label
s.sparse_memory_img.append( [ label_addr, bytes_list ] )
# close temporary file
dump.close()
elif bin_filehandle is not None:
# actions when a binary file handle is passed in
pass
elif asm_str is not None:
# actions when an assembly string is passed
# assembly test
asm_test = asm_str
# create a temporary assembly test file
asm_file = tempfile.NamedTemporaryFile( mode = 'w+t', suffix = '.s' )
asm_file.write( asm_test )
# need to move the file pointer to the start of the file for reading
# the file from start
asm_file.seek( 0 )
# temporary binary file
target = tempfile.NamedTemporaryFile( mode = 'w+b' )
# create a temporary linker script
ld_script = tempfile.NamedTemporaryFile( mode = 'w+t', suffix = '.ld' )
ld_script.write( test_ld )
ld_script.seek( 0 )
# compile the assembly test
cmd = compile_cmd.format( asm_file = asm_file.name,
target = target.name,
linker_script = ld_script.name )
execute( cmd )
# Create a temporary file for capturing objdump output and pass the
# filename to objdump command and execute the objdump command
dump = tempfile.NamedTemporaryFile( mode = 'w+' )
target.seek( 0 )
cmd = objdump_cmd.format( filename = target.name, dump = dump.name )
execute( cmd )
# move to the start of the temp file before parsing
dump.seek( 0 )
# Based on objdump2vmh.py
# fill the sparse memory labels
bytes_list = []
in_label = False
for line in dump:
split_line = line.split()
# start of a label
if( line.find( ">:\n" ) >= 0 ):
in_label = True
# Determine if the virtual address is a halfword or fullword.
# Only write out this line if it's word aligned ignore every
# other .half declaration
vaddr = int( split_line[0], 16 )
if ( vaddr % 4 ) == 0:
label_addr = int( split_line[0], 16 )
# label_name = split_line[1][:-1]
elif in_label:
if line == "\n":
# end of a label
s.sparse_memory_img.append( [ label_addr, bytes_list ] )
bytes_list = []
in_label = False
else:
# append bytes insided a label using a Bits object
inst_bits = Bits( 32, int( split_line[1], 16) )
# deconstruct the 32-bit individual into its constituent bytes
# since we need to extend a list of bytes.
for i in range( 4 ):
bytes_list.append( inst_bits[i*8:i*8+8].uint() )
# we iterate through all the lines in the file and the for loop
# exits the loop when it reaches the EOF. The last label that was
# detected is not yet appended to the sparse memory image.
# Append the last label
s.sparse_memory_img.append( [ label_addr, bytes_list ] )
# Dump the assembly file created
if dump_asm is not None:
asm_dump_file = open( dump_asm, 'w' )
# seek to the start and dump
asm_file.seek( 0 )
asm_dump_file.write( asm_file.read() )
asm_dump_file.close()
# Dump the binary file
if dump_bin is not None:
cmd = compile_cmd.format( asm_file = asm_file.name,
target = dump_bin,
linker_script = ld_script.name )
execute( cmd )
# close temporary files
asm_file.close()
target.close()
ld_script.close()
dump.close()
#-----------------------------------------------------------------------------
# overload equal comparison
#-----------------------------------------------------------------------------
def __eq__( s, other ):
assert isinstance( other, SparseMemoryImage ) == 1
return s.sparse_memory_img == other.sparse_memory_img
#-----------------------------------------------------------------------------
# load label method
#-----------------------------------------------------------------------------
def load_label( s, label_list ):
s.sparse_memory_img.append( label_list )
#-----------------------------------------------------------------------------
# read label method
#-----------------------------------------------------------------------------
def read_label( s, label_addr ):
return s.sparse_memory_img[ label_addr ]
#-----------------------------------------------------------------------------
# num labels method
#-----------------------------------------------------------------------------
def num_labels( s ):
return len( s.sparse_memory_img )
| |
import math
from sqf.common_expressions import TryCatchExpression, ForEachExpression, \
WhileDoExpression, ForFromToDoExpression, ForSpecDoExpression, SwitchDoExpression, \
IfThenSpecExpression, IfThenElseExpression, IfThenExpression, IfThenExitWithExpression
from sqf.types import Keyword, Namespace, Number, Array, Code, Type, Boolean, String, Nothing, Variable
from sqf.exceptions import SQFParserError
from sqf.keywords import OP_ARITHMETIC, OP_COMPARISON, OP_LOGICAL
from sqf.expressions import BinaryExpression, UnaryExpression
from sqf.interpreter_types import SwitchType
OP_OPERATIONS = {
# Arithmetic
Keyword('+'): lambda x, y: x + y,
Keyword('-'): lambda x, y: x - y,
Keyword('*'): lambda x, y: x * y,
Keyword('/'): lambda x, y: x / y,
Keyword('%'): lambda x, y: x % y,
Keyword('mod'): lambda x, y: x % y,
Keyword('^'): lambda x, y: x ** y,
Keyword('max'): lambda x, y: max(x, y),
Keyword('floor'): lambda x: math.floor(x),
# Comparison
Keyword('=='): lambda x, y: x == y,
Keyword('!='): lambda x, y: x != y,
Keyword('<'): lambda x, y: x < y,
Keyword('>'): lambda x, y: x < y,
Keyword('<='): lambda x, y: x <= y,
Keyword('>='): lambda x, y: x >= y,
# Logical
Keyword('&&'): lambda x, y: x and y,
Keyword('and'): lambda x, y: x and y,
Keyword('||'): lambda x, y: x or y,
Keyword('or'): lambda x, y: x or y,
}
class ComparisonExpression(BinaryExpression):
def __init__(self, op, lhs_rhs_type):
assert(op in OP_COMPARISON)
assert (issubclass(lhs_rhs_type, Type))
super().__init__(lhs_rhs_type, op, lhs_rhs_type, Boolean, self._action)
def _action(self, lhs, rhs, _):
return OP_OPERATIONS[self.keyword](lhs.value, rhs.value)
class ArithmeticExpression(BinaryExpression):
def __init__(self, op):
assert (op in OP_ARITHMETIC)
super().__init__(Number, op, Number, Number, self._action)
def _action(self, lhs, rhs, _):
return OP_OPERATIONS[self.keyword](lhs.value, rhs.value)
class LogicalExpression(BinaryExpression):
def __init__(self, op, rhs_type):
assert (op in OP_LOGICAL)
assert (rhs_type in (Boolean, Code))
super().__init__(Boolean, op, rhs_type, Boolean, self._action)
def _action(self, lhs, rhs, interpreter):
if isinstance(rhs, Code):
result = interpreter.execute_code(rhs)
if type(result) not in (Boolean, Nothing):
interpreter.exception(SQFParserError(rhs.position, 'code return must be a Boolean (returns %s)' % type(result).__name__))
return None
else:
result = rhs
return OP_OPERATIONS[self.keyword](lhs.value, result.value)
class Action:
def __init__(self, action):
self.action = action
def __call__(self, *args):
result = None
# interpreter = args[-1]
all_args = args[:-1]
return self.action(*all_args)
def _select(lhs, rhs, interpreter):
index = int(round(rhs.value))
try:
return lhs[index]
except IndexError:
interpreter.exception(SQFParserError(
lhs.position, 'selecting element %d of array of size %d' % (index, len(lhs))))
def _select_array(lhs, rhs, interpreter):
start = rhs.value[0].value
count = rhs.value[1].value
if start > len(lhs.value):
interpreter.exception(SQFParserError(lhs.position, 'Selecting element past size'))
return lhs.value[start:start + count]
def _subtract_arrays(lhs, rhs):
rhs_set = set([rhs_i.value for rhs_i in rhs.value])
return [lhs_i for lhs_i in lhs if lhs_i.value not in rhs_set]
def _find(lhs_v, rhs_v):
try:
index = next(i for i, v in enumerate(lhs_v.value) if v == rhs_v)
except StopIteration:
index = -1
return index
def _pushBack(lhs_v, rhs_v):
lhs_v.append(rhs_v)
return len(lhs_v.value) - 1
def _pushBackUnique(lhs_v, rhs_v):
if rhs_v in lhs_v.value:
return -1
else:
lhs_v.append(rhs_v)
return len(lhs_v.value) - 1
def _setVariable(lhs_v, rhs_v, interpreter):
namespace_name = lhs_v.value
assert(isinstance(rhs_v, Array))
if len(rhs_v) not in [2, 3]:
interpreter.exception(SQFParserError(
rhs_v.position, 'setVariable requires array of 2-3 elements (has %d)' % (len(rhs_v))))
# get the variable name
if not isinstance(rhs_v.value[0], (String, Nothing)):
interpreter.exception(SQFParserError(
rhs_v.value[0].position, 'setVariable array first element must be a string (is %s)' % type(rhs_v.value[0]).__name__))
variable_name = rhs_v.value[0].value
# get the value
rhs_assignment = rhs_v.value[1]
scope = interpreter.get_scope(variable_name, namespace_name)
scope[variable_name] = rhs_assignment
def _getVariableString(lhs_v, rhs_v, interpreter):
variable = Variable(rhs_v.value)
variable.position = rhs_v.position
return interpreter.value(variable, lhs_v.value)
def _getVariableArray(lhs_v, rhs_v, interpreter):
# get the variable name
if len(rhs_v) != 2:
interpreter.exception(SQFParserError(
rhs_v.position, 'getVariable requires array of 2 elements (has %d)' % (len(rhs_v))))
if not isinstance(rhs_v.value[0], (String, Nothing)):
interpreter.exception(SQFParserError(
rhs_v.value[0].position, 'getVariable array first element must be a string (is %s)' % type(rhs_v.value[0]).__name__))
variable = Variable(rhs_v.value[0].value)
variable.position = rhs_v.value[0].position
outcome = interpreter.value(variable, lhs_v.value)
if outcome == Nothing():
outcome = rhs_v.value[1]
return outcome
def _addPublicVariableEventHandler(lhs_v, rhs_v, interpreter):
interpreter.client.add_listening(lhs_v.value, rhs_v)
def _if_then_else_code(interpreter, condition, then, else_=None):
"""
The equivalent Python code for a if-then-else SQF statement
"""
assert(isinstance(condition, bool) and isinstance(then, Code))
if condition:
result = interpreter.execute_code(then)
else:
if else_ is not None:
result = interpreter.execute_code(else_)
else:
result = Nothing()
return result
def _if_then_else(if_instance, then_or_else, interpreter):
condition = if_instance.condition.value
if isinstance(then_or_else, Code):
then = then_or_else
else_ = None
else:
then = then_or_else.then
else_ = then_or_else.else_
return _if_then_else_code(interpreter, condition, then, else_)
def parse_switch(interpreter, code):
conditions = []
default_used = False
for statement in code.base_tokens:
base_tokens = statement.base_tokens
# evaluate all the base_tokens, trying to obtain their values
values = []
for token in base_tokens:
v = interpreter.value(token)
values.append(v)
if type(values[0]) != SwitchType:
interpreter.exception(SQFParserError(
statement.position, 'Switch code can only start with "case" or "default"'))
if values[0].keyword == Keyword('default'):
if default_used:
interpreter.exception(SQFParserError(code.position, 'Switch code contains more than 1 `default`'))
default_used = True
assert(isinstance(values[0].result, Code))
conditions.append(('default', values[0].result))
else:
case_condition = values[0].result
if len(values) == 1:
conditions.append((case_condition, None))
else:
assert (len(values) == 3 and values[1] == Keyword(':'))
outcome_statement = values[2]
conditions.append((case_condition, outcome_statement))
return conditions
def execute_switch(interpreter, result, conditions):
try:
default = next(o for c, o in conditions if c == 'default')
except StopIteration:
default = None
final_outcome = None
execute_next = False
for condition, outcome in conditions:
if condition == 'default':
continue
condition_outcome = interpreter.value(condition)
if outcome is not None and execute_next:
final_outcome = interpreter.execute_code(outcome)
break
elif condition_outcome == result:
if outcome is not None:
final_outcome = interpreter.execute_code(outcome)
break
else:
execute_next = True
if final_outcome is None:
if default is not None:
final_outcome = interpreter.execute_code(default)
else:
final_outcome = Boolean(True)
return final_outcome
def _foreach_loop(interpreter, code, elements):
outcome = Nothing()
for i, x in enumerate(elements):
outcome = interpreter.execute_code(code, extra_scope={'_x': x, '_forEachIndex': Number(i)})
return outcome
def _forvar_loop_code(interpreter, token_name, start, stop, step, code):
outcome = Nothing()
outcome.position = code.position
for i in range(start, stop + 1, step):
outcome = interpreter.execute_code(code, extra_scope={token_name: Number(i)})
return outcome
def _forvar_loop(for_instance, code, interpreter):
return _forvar_loop_code(interpreter,
for_instance.variable.value,
for_instance.from_.value,
for_instance.to.value, for_instance.step.value, code)
def _forspecs_loop_code(interpreter, start_code, stop_code, increment_code, do_code):
outcome = Nothing()
outcome.position = start_code.position
interpreter.execute_code(start_code)
while True:
condition_outcome = interpreter.execute_code(stop_code)
if condition_outcome.value is False:
break
outcome = interpreter.execute_code(do_code)
interpreter.execute_code(increment_code)
return outcome
def _forspecs_loop(forspec_type, do_code, interpreter):
return _forspecs_loop_code(interpreter, forspec_type.array[0],
forspec_type.array[1], forspec_type.array[2], do_code)
def _while_loop(interpreter, condition_code, do_code):
outcome = Nothing()
while True:
condition_outcome = interpreter.execute_code(condition_code)
if condition_outcome.value is False:
break
outcome = interpreter.execute_code(do_code)
return outcome
INTERPRETER_EXPRESSIONS = [
TryCatchExpression(),
ForEachExpression(lambda lhs, rhs, i: _foreach_loop(i, lhs, rhs.value)),
WhileDoExpression(lambda lhs, rhs, i: _while_loop(i, lhs.condition, rhs)),
ForFromToDoExpression(_forvar_loop),
ForSpecDoExpression(_forspecs_loop),
SwitchDoExpression(lambda lhs, rhs, i: execute_switch(i, lhs.result, parse_switch(i, rhs))),
IfThenSpecExpression(lambda if_type, array, i: _if_then_else_code(i, if_type.condition.value, array.value[0], array.value[1])),
IfThenElseExpression(_if_then_else),
IfThenExpression(_if_then_else),
IfThenExitWithExpression(),
# params
UnaryExpression(Keyword('params'), Array, Nothing, lambda rhs_v, i: i.add_params(rhs_v)),
BinaryExpression(Type, Keyword('params'), Array, Nothing, lambda lhs_v, rhs_v, i: i.add_params(rhs_v)),
# Unary
UnaryExpression(Keyword('-'), Number, Number, Action(lambda x: -x.value)),
UnaryExpression(Keyword('floor'), Number, Number, Action(lambda x: math.floor(x.value))),
UnaryExpression(Keyword('reverse'), Array, Nothing, Action(lambda rhs_v: rhs_v.reverse())),
# Binary
BinaryExpression(Array, Keyword('set'), Array,
Nothing, Action(lambda lhs_v, rhs_v: lhs_v.set(rhs_v))),
# Array related
BinaryExpression(Array, Keyword('resize'), Number,
Nothing, Action(lambda lhs_v, rhs_v: lhs_v.resize(rhs_v.value))),
UnaryExpression(Keyword('count'), Array, Number, Action(lambda x: len(x.value))),
BinaryExpression(Type, Keyword('in'), Array, Boolean, Action(lambda x, array: x in array.value)),
BinaryExpression(Array, Keyword('select'), Number, None, _select),
BinaryExpression(Array, Keyword('select'), Boolean, None, _select),
BinaryExpression(Array, Keyword('select'), Array, Array, _select_array),
BinaryExpression(Array, Keyword('find'), Type, Number, Action(_find)),
BinaryExpression(String, Keyword('find'), String, Number,
Action(lambda lhs_v, rhs_v: lhs_v.value.find(rhs_v.value))),
BinaryExpression(Array, Keyword('pushBack'), Type, Number, Action(_pushBack)),
BinaryExpression(Array, Keyword('pushBackUnique'), Type, Number, Action(_pushBackUnique)),
BinaryExpression(Array, Keyword('append'), Array, Nothing, Action(lambda lhs_v, rhs_v: lhs_v.add(rhs_v.value))),
UnaryExpression(Keyword('toArray'), String, Array,
Action(lambda rhs_v: [Number(ord(s)) for s in rhs_v.value])),
UnaryExpression(Keyword('toString'), Array, String,
Action(lambda rhs_v: '"'+''.join(chr(s.value) for s in rhs_v.value)+'"')),
# code and namespaces
UnaryExpression(Keyword('call'), Code, None, lambda rhs_v, i: i.execute_code(rhs_v)),
BinaryExpression(Type, Keyword('call'), Code, None, lambda lhs_v, rhs_v, i: i.execute_code(rhs_v, extra_scope={"_this": lhs_v})),
BinaryExpression(Namespace, Keyword('setVariable'), Array, Nothing, _setVariable),
BinaryExpression(Namespace, Keyword('getVariable'), String, None, _getVariableString),
BinaryExpression(Namespace, Keyword('getVariable'), Array, None, _getVariableArray),
BinaryExpression(String, Keyword('addPublicVariableEventHandler'), Code, None, _addPublicVariableEventHandler),
BinaryExpression(Array, Keyword('+'), Array, Array, Action(lambda lhs_v, rhs_v: lhs_v.value + rhs_v.value)),
BinaryExpression(Array, Keyword('-'), Array, Array, Action(_subtract_arrays)),
BinaryExpression(String, Keyword('+'), String, String, Action(lambda lhs, rhs: lhs.container + lhs.value + rhs.value + lhs.container)),
]
for op in OP_COMPARISON:
for lhs_rhs_type in [Number, String]:
if lhs_rhs_type == Number or lhs_rhs_type == String and op in [Keyword('=='), Keyword('!=')]:
INTERPRETER_EXPRESSIONS.append(ComparisonExpression(op, lhs_rhs_type))
for op in OP_ARITHMETIC:
INTERPRETER_EXPRESSIONS.append(ArithmeticExpression(op))
for op in OP_LOGICAL:
for rhs_type in (Boolean, Code):
INTERPRETER_EXPRESSIONS.append(LogicalExpression(op, rhs_type))
| |
# Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from connector import channel
from google3.cloud.graphite.mmv2.services.google.file import backup_pb2
from google3.cloud.graphite.mmv2.services.google.file import backup_pb2_grpc
from typing import List
class Backup(object):
def __init__(
self,
name: str = None,
description: str = None,
state: str = None,
create_time: str = None,
labels: dict = None,
capacity_gb: int = None,
storage_bytes: int = None,
source_instance: str = None,
source_file_share: str = None,
source_instance_tier: str = None,
download_bytes: int = None,
project: str = None,
location: str = None,
service_account_file: str = "",
):
channel.initialize()
self.name = name
self.description = description
self.labels = labels
self.source_instance = source_instance
self.source_file_share = source_file_share
self.project = project
self.location = location
self.service_account_file = service_account_file
def apply(self):
stub = backup_pb2_grpc.FileBetaBackupServiceStub(channel.Channel())
request = backup_pb2.ApplyFileBetaBackupRequest()
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.description):
request.resource.description = Primitive.to_proto(self.description)
if Primitive.to_proto(self.labels):
request.resource.labels = Primitive.to_proto(self.labels)
if Primitive.to_proto(self.source_instance):
request.resource.source_instance = Primitive.to_proto(self.source_instance)
if Primitive.to_proto(self.source_file_share):
request.resource.source_file_share = Primitive.to_proto(
self.source_file_share
)
if Primitive.to_proto(self.project):
request.resource.project = Primitive.to_proto(self.project)
if Primitive.to_proto(self.location):
request.resource.location = Primitive.to_proto(self.location)
request.service_account_file = self.service_account_file
response = stub.ApplyFileBetaBackup(request)
self.name = Primitive.from_proto(response.name)
self.description = Primitive.from_proto(response.description)
self.state = BackupStateEnum.from_proto(response.state)
self.create_time = Primitive.from_proto(response.create_time)
self.labels = Primitive.from_proto(response.labels)
self.capacity_gb = Primitive.from_proto(response.capacity_gb)
self.storage_bytes = Primitive.from_proto(response.storage_bytes)
self.source_instance = Primitive.from_proto(response.source_instance)
self.source_file_share = Primitive.from_proto(response.source_file_share)
self.source_instance_tier = BackupSourceInstanceTierEnum.from_proto(
response.source_instance_tier
)
self.download_bytes = Primitive.from_proto(response.download_bytes)
self.project = Primitive.from_proto(response.project)
self.location = Primitive.from_proto(response.location)
def delete(self):
stub = backup_pb2_grpc.FileBetaBackupServiceStub(channel.Channel())
request = backup_pb2.DeleteFileBetaBackupRequest()
request.service_account_file = self.service_account_file
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.description):
request.resource.description = Primitive.to_proto(self.description)
if Primitive.to_proto(self.labels):
request.resource.labels = Primitive.to_proto(self.labels)
if Primitive.to_proto(self.source_instance):
request.resource.source_instance = Primitive.to_proto(self.source_instance)
if Primitive.to_proto(self.source_file_share):
request.resource.source_file_share = Primitive.to_proto(
self.source_file_share
)
if Primitive.to_proto(self.project):
request.resource.project = Primitive.to_proto(self.project)
if Primitive.to_proto(self.location):
request.resource.location = Primitive.to_proto(self.location)
response = stub.DeleteFileBetaBackup(request)
def list(self):
stub = backup_pb2_grpc.FileBetaBackupServiceStub(channel.Channel())
request = backup_pb2.ListFileBetaBackupRequest()
request.service_account_file = self.service_account_file
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.description):
request.resource.description = Primitive.to_proto(self.description)
if Primitive.to_proto(self.labels):
request.resource.labels = Primitive.to_proto(self.labels)
if Primitive.to_proto(self.source_instance):
request.resource.source_instance = Primitive.to_proto(self.source_instance)
if Primitive.to_proto(self.source_file_share):
request.resource.source_file_share = Primitive.to_proto(
self.source_file_share
)
if Primitive.to_proto(self.project):
request.resource.project = Primitive.to_proto(self.project)
if Primitive.to_proto(self.location):
request.resource.location = Primitive.to_proto(self.location)
return stub.ListFileBetaBackup(request).items
def to_proto(self):
resource = backup_pb2.FileBetaBackup()
if Primitive.to_proto(self.name):
resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.description):
resource.description = Primitive.to_proto(self.description)
if Primitive.to_proto(self.labels):
resource.labels = Primitive.to_proto(self.labels)
if Primitive.to_proto(self.source_instance):
resource.source_instance = Primitive.to_proto(self.source_instance)
if Primitive.to_proto(self.source_file_share):
resource.source_file_share = Primitive.to_proto(self.source_file_share)
if Primitive.to_proto(self.project):
resource.project = Primitive.to_proto(self.project)
if Primitive.to_proto(self.location):
resource.location = Primitive.to_proto(self.location)
return resource
class BackupStateEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return backup_pb2.FileBetaBackupStateEnum.Value(
"FileBetaBackupStateEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return backup_pb2.FileBetaBackupStateEnum.Name(resource)[
len("FileBetaBackupStateEnum") :
]
class BackupSourceInstanceTierEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return backup_pb2.FileBetaBackupSourceInstanceTierEnum.Value(
"FileBetaBackupSourceInstanceTierEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return backup_pb2.FileBetaBackupSourceInstanceTierEnum.Name(resource)[
len("FileBetaBackupSourceInstanceTierEnum") :
]
class Primitive(object):
@classmethod
def to_proto(self, s):
if not s:
return ""
return s
@classmethod
def from_proto(self, s):
return s
| |
import logging
import ldap3
try:
from flask import _app_ctx_stack as stack
except ImportError: # pragma: no cover
from flask import _request_ctx_stack as stack
from enum import Enum
log = logging.getLogger(__name__)
AuthenticationResponseStatus = Enum(
'AuthenticationResponseStatus', 'fail success')
class AuthenticationResponse(object):
"""
A response object when authenticating. Lets us pass status codes around
and also user data.
Args:
status (AuthenticationResponseStatus): The status of the result.
user_info (dict): User info dictionary obtained from LDAP.
user_id (str): User id used to authenticate to LDAP with.
user_dn (str): User DN found from LDAP.
user_groups (list): A list containing a dicts of group info.
"""
def __init__(self, status=AuthenticationResponseStatus.fail,
user_info=None, user_id=None, user_dn=None, user_groups=[]):
self.user_info = user_info,
self.user_id = user_id,
self.user_dn = user_dn,
self.user_groups = user_groups
self.status = status
class LDAP3LoginManager(object):
"""
Initialise a LDAP3LoginManager. If app is passed, init_app is called
within this call.
Args:
app (flask.Flask): The flask app to initialise with
"""
def __init__(self, app=None):
self._save_user = None
self.config = {}
self._server_pool = ldap3.ServerPool(
[],
ldap3.FIRST,
active=1, # Loop through all servers once.
exhaust=10, # Remove unreachable servers for 10 seconds.
)
if app is not None:
self.init_app(app)
def init_app(self, app):
'''
Configures this extension with the given app. This registers an
``teardown_appcontext`` call, and attaches this ``LDAP3LoginManager``
to it as ``app.ldap3_login_manager``.
Args:
app (flask.Flask): The flask app to initialise with
'''
app.ldap3_login_manager = self
servers = list(self._server_pool)
for s in servers:
self._server_pool.remove(s)
self.init_config(app.config)
if hasattr(app, 'teardown_appcontext'):
app.teardown_appcontext(self.teardown)
else: # pragma: no cover
app.teardown_request(self.teardown)
self.app = app
def init_config(self, config):
'''
Configures this extension with a given configuration dictionary.
This allows use of this extension without a flask app.
Args:
config (dict): A dictionary with configuration keys
'''
self.config.update(config)
self.config.setdefault('LDAP_PORT', 389)
self.config.setdefault('LDAP_HOST', None)
self.config.setdefault('LDAP_USE_SSL', False)
self.config.setdefault('LDAP_READONLY', True)
self.config.setdefault('LDAP_BIND_DIRECT_CREDENTIALS', False)
self.config.setdefault('LDAP_BIND_DIRECT_SUFFIX', '')
self.config.setdefault('LDAP_BIND_DIRECT_GET_USER_INFO', True)
self.config.setdefault('LDAP_ALWAYS_SEARCH_BIND', False)
self.config.setdefault('LDAP_BASE_DN', '')
self.config.setdefault('LDAP_BIND_USER_DN', None)
self.config.setdefault('LDAP_BIND_USER_PASSWORD', None)
self.config.setdefault('LDAP_SEARCH_FOR_GROUPS', True)
self.config.setdefault('LDAP_FAIL_AUTH_ON_MULTIPLE_FOUND', False)
# Prepended to the Base DN to limit scope when searching for
# Users/Groups.
self.config.setdefault('LDAP_USER_DN', '')
self.config.setdefault('LDAP_GROUP_DN', '')
self.config.setdefault('LDAP_BIND_AUTHENTICATION_TYPE', 'SIMPLE')
# Ldap Filters
self.config.setdefault('LDAP_USER_SEARCH_SCOPE',
'LEVEL')
self.config.setdefault('LDAP_USER_OBJECT_FILTER',
'(objectclass=person)')
self.config.setdefault('LDAP_USER_LOGIN_ATTR', 'uid')
self.config.setdefault('LDAP_USER_RDN_ATTR', 'uid')
self.config.setdefault(
'LDAP_GET_USER_ATTRIBUTES', ldap3.ALL_ATTRIBUTES)
self.config.setdefault('LDAP_GROUP_SEARCH_SCOPE',
'LEVEL')
self.config.setdefault(
'LDAP_GROUP_OBJECT_FILTER', '(objectclass=group)')
self.config.setdefault('LDAP_GROUP_MEMBERS_ATTR', 'uniqueMember')
self.config.setdefault(
'LDAP_GET_GROUP_ATTRIBUTES', ldap3.ALL_ATTRIBUTES)
if self.config.setdefault('LDAP_ADD_SERVER', True):
self.add_server(
hostname=self.config.get('LDAP_HOST'),
port=self.config.get('LDAP_PORT'),
use_ssl=self.config.get('LDAP_USE_SSL')
)
def add_server(self, hostname, port, use_ssl, tls_ctx=None):
"""
Add an additional server to the server pool and return the
freshly created server.
Args:
hostname (str): Hostname of the server
port (int): Port of the server
use_ssl (bool): True if SSL is to be used when connecting.
tls_ctx (ldap3.Tls): An optional TLS context object to use
when connecting.
Returns:
ldap3.Server: The freshly created server object.
"""
if not use_ssl and tls_ctx:
raise ValueError("Cannot specify a TLS context and not use SSL!")
server = ldap3.Server(
hostname,
port=port,
use_ssl=use_ssl,
tls=tls_ctx
)
self._server_pool.add(server)
return server
def _contextualise_connection(self, connection):
"""
Add a connection to the appcontext so it can be freed/unbound at
a later time if an exception occured and it was not freed.
Args:
connection (ldap3.Connection): Connection to add to the appcontext
"""
ctx = stack.top
if ctx is not None:
if not hasattr(ctx, 'ldap3_manager_connections'):
ctx.ldap3_manager_connections = [connection]
else:
ctx.ldap3_manager_connections.append(connection)
def _decontextualise_connection(self, connection):
"""
Remove a connection from the appcontext.
Args:
connection (ldap3.Connection): connection to remove from the
appcontext
"""
ctx = stack.top
if ctx is not None and connection in ctx.ldap3_manager_connections:
ctx.ldap3_manager_connections.remove(connection)
def teardown(self, exception):
"""
Cleanup after a request. Close any open connections.
"""
ctx = stack.top
if ctx is not None:
if hasattr(ctx, 'ldap3_manager_connections'):
for connection in ctx.ldap3_manager_connections:
self.destroy_connection(connection)
if hasattr(ctx, 'ldap3_manager_main_connection'):
log.debug(
"Unbinding a connection used within the request context.")
ctx.ldap3_manager_main_connection.unbind()
ctx.ldap3_manager_main_connection = None
def save_user(self, callback):
'''
This sets the callback for saving a user that has been looked up from
from ldap.
The function you set should take a user dn (unicode), username
(unicode) and userdata (dict), and memberships (list).
::
@ldap3_manager.save_user
def save_user(dn, username, userdata, memberships):
return User(username=username, data=userdata)
Your callback function MUST return the user object in your ORM
(or similar). as this is used within the LoginForm and placed
at ``form.user``
Args:
callback (function): The function to be used as the save user
callback.
'''
self._save_user = callback
return callback
def authenticate(self, username, password):
"""
An abstracted authentication method. Decides whether to perform a
direct bind or a search bind based upon the login attribute configured
in the config.
Args:
username (str): Username of the user to bind
password (str): User's password to bind with.
Returns:
AuthenticationResponse
"""
if self.config.get('LDAP_BIND_DIRECT_CREDENTIALS'):
result = self.authenticate_direct_credentials(username, password)
elif not self.config.get('LDAP_ALWAYS_SEARCH_BIND') and \
self.config.get('LDAP_USER_RDN_ATTR') == \
self.config.get('LDAP_USER_LOGIN_ATTR'):
# Since the user's RDN is the same as the login field,
# we can do a direct bind.
result = self.authenticate_direct_bind(username, password)
else:
# We need to search the User's DN to find who the user is (and
# their DN) so we can try bind with their password.
result = self.authenticate_search_bind(username, password)
return result
def authenticate_direct_credentials(self, username, password):
"""
Performs a direct bind, however using direct credentials. Can be used
if interfacing with an Active Directory domain controller which
authenticates using username@domain.com directly.
Performing this kind of lookup limits the information we can get from
ldap. Instead we can only deduce whether or not their bind was
successful. Do not use this method if you require more user info.
Args:
username (str): username for the user to bind with. LOGIN_SUFFIX
will be appended.
password (str): User's password to bind with.
Returns:
AuthenticationResponse
"""
connection = self._make_connection(
bind_user=username + self.config.get('LDAP_BIND_DIRECT_SUFFIX'),
bind_password=password,
)
response = AuthenticationResponse()
try:
connection.bind()
response.status = AuthenticationResponseStatus.success
response.user_id = username
log.debug(
"Authentication was successful for user '{0}'".format(username))
if self.config.get('LDAP_BIND_DIRECT_GET_USER_INFO'):
# User wants extra info about the bind
user_filter = '({search_attr}={username})'.format(
search_attr=self.config.get('LDAP_USER_LOGIN_ATTR'),
username=username
)
search_filter = '(&{0}{1})'.format(
self.config.get('LDAP_USER_OBJECT_FILTER'),
user_filter,
)
connection.search(
search_base=self.full_user_search_dn,
search_filter=search_filter,
search_scope=getattr(
ldap3, self.config.get('LDAP_USER_SEARCH_SCOPE')),
attributes=self.config.get('LDAP_GET_USER_ATTRIBUTES'),
)
if len(connection.response) == 0 or \
(self.config.get('LDAP_FAIL_AUTH_ON_MULTIPLE_FOUND') and
len(connection.response) > 1):
# Don't allow them to log in.
log.error(
"Could not gather extra info for user '{0}'".format(username))
else:
user = connection.response[0]
user['attributes']['dn'] = user['dn']
response.user_info = user['attributes']
response.user_dn = user['dn']
except ldap3.core.exceptions.LDAPInvalidCredentialsResult as e:
log.debug(
"Authentication was not successful for user '{0}'".format(username))
response.status = AuthenticationResponseStatus.fail
except Exception as e:
log.error(e)
response.status = AuthenticationResponseStatus.fail
self.destroy_connection(connection)
return response
def authenticate_direct_bind(self, username, password):
"""
Performs a direct bind. We can do this since the RDN is the same
as the login attribute. Hence we just string together a dn to find
this user with.
Args:
username (str): Username of the user to bind (the field specified
as LDAP_BIND_RDN_ATTR)
password (str): User's password to bind with.
Returns:
AuthenticationResponse
"""
bind_user = '{rdn}={username},{user_search_dn}'.format(
rdn=self.config.get('LDAP_USER_RDN_ATTR'),
username=username,
user_search_dn=self.full_user_search_dn,
)
connection = self._make_connection(
bind_user=bind_user,
bind_password=password,
)
response = AuthenticationResponse()
try:
connection.bind()
log.debug(
"Authentication was successful for user '{0}'".format(username))
response.status = AuthenticationResponseStatus.success
# Get user info here.
user_info = self.get_user_info(
dn=bind_user, _connection=connection)
response.user_dn = bind_user
response.user_id = username
response.user_info = user_info
if self.config.get('LDAP_SEARCH_FOR_GROUPS'):
response.user_groups = self.get_user_groups(
dn=bind_user, _connection=connection)
except ldap3.core.exceptions.LDAPInvalidCredentialsResult as e:
log.debug(
"Authentication was not successful for user '{0}'".format(username))
response.status = AuthenticationResponseStatus.fail
except Exception as e:
log.error(e)
response.status = AuthenticationResponseStatus.fail
self.destroy_connection(connection)
return response
def authenticate_search_bind(self, username, password):
"""
Performs a search bind to authenticate a user. This is
required when a the login attribute is not the same
as the RDN, since we cannot string together their DN on
the fly, instead we have to find it in the LDAP, then attempt
to bind with their credentials.
Args:
username (str): Username of the user to bind (the field specified
as LDAP_BIND_LOGIN_ATTR)
password (str): User's password to bind with when we find their dn.
Returns:
AuthenticationResponse
"""
connection = self._make_connection(
bind_user=self.config.get('LDAP_BIND_USER_DN'),
bind_password=self.config.get('LDAP_BIND_USER_PASSWORD'),
)
try:
connection.bind()
log.debug("Successfully bound to LDAP as '{0}' for search_bind method".format(
self.config.get('LDAP_BIND_USER_DN') or 'Anonymous'
))
except Exception as e:
self.destroy_connection(connection)
log.error(e)
return AuthenticationResponse()
# Find the user in the search path.
user_filter = '({search_attr}={username})'.format(
search_attr=self.config.get('LDAP_USER_LOGIN_ATTR'),
username=username
)
search_filter = '(&{0}{1})'.format(
self.config.get('LDAP_USER_OBJECT_FILTER'),
user_filter,
)
log.debug("Performing an LDAP Search using filter '{0}', base '{1}', "
"and scope '{2}'".format(
search_filter,
self.full_user_search_dn,
self.config.get('LDAP_USER_SEARCH_SCOPE')
))
connection.search(
search_base=self.full_user_search_dn,
search_filter=search_filter,
search_scope=getattr(
ldap3, self.config.get('LDAP_USER_SEARCH_SCOPE')),
attributes=self.config.get('LDAP_GET_USER_ATTRIBUTES')
)
response = AuthenticationResponse()
if len(connection.response) == 0 or \
(self.config.get('LDAP_FAIL_AUTH_ON_MULTIPLE_FOUND') and
len(connection.response) > 1):
# Don't allow them to log in.
log.debug(
"Authentication was not successful for user '{0}'".format(username))
else:
for user in connection.response:
# Attempt to bind with each user we find until we can find
# one that works.
if 'type' not in user or user.get('type') != 'searchResEntry':
# Issue #13 - Don't return non-entry results.
continue
user_connection = self._make_connection(
bind_user=user['dn'],
bind_password=password
)
log.debug(
"Directly binding a connection to a server with "
"user:'{0}'".format(user['dn']))
try:
user_connection.bind()
log.debug(
"Authentication was successful for user '{0}'".format(username))
response.status = AuthenticationResponseStatus.success
# Populate User Data
user['attributes']['dn'] = user['dn']
response.user_info = user['attributes']
response.user_id = username
response.user_dn = user['dn']
if self.config.get('LDAP_SEARCH_FOR_GROUPS'):
response.user_groups = self.get_user_groups(
dn=user['dn'], _connection=connection)
self.destroy_connection(user_connection)
break
except ldap3.core.exceptions.LDAPInvalidCredentialsResult as e:
log.debug(
"Authentication was not successful for "
"user '{0}'".format(username))
response.status = AuthenticationResponseStatus.fail
except Exception as e: # pragma: no cover
# This should never happen, however in case ldap3 does ever
# throw an error here, we catch it and log it
log.error(e)
response.status = AuthenticationResponseStatus.fail
self.destroy_connection(user_connection)
self.destroy_connection(connection)
return response
def get_user_groups(self, dn, group_search_dn=None, _connection=None):
"""
Gets a list of groups a user at dn is a member of
Args:
dn (str): The dn of the user to find memberships for.
_connection (ldap3.Connection): A connection object to use when
searching. If not given, a temporary connection will be
created, and destroyed after use.
group_search_dn (str): The search dn for groups. Defaults to
``'{LDAP_GROUP_DN},{LDAP_BASE_DN}'``.
Returns:
list: A list of LDAP groups the user is a member of.
"""
connection = _connection
if not connection:
connection = self._make_connection(
bind_user=self.config.get('LDAP_BIND_USER_DN'),
bind_password=self.config.get('LDAP_BIND_USER_PASSWORD')
)
connection.bind()
search_filter = '(&{group_filter}({members_attr}={user_dn}))'.format(
group_filter=self.config.get('LDAP_GROUP_OBJECT_FILTER'),
members_attr=self.config.get('LDAP_GROUP_MEMBERS_ATTR'),
user_dn=dn
)
log.debug("Searching for groups for specific user with filter '{0}' "
", base '{1}' and scope '{2}'".format(
search_filter,
group_search_dn or self.full_group_search_dn,
self.config.get('LDAP_GROUP_SEARCH_SCOPE')
))
connection.search(
search_base=group_search_dn or self.full_group_search_dn,
search_filter=search_filter,
attributes=self.config.get('LDAP_GET_GROUP_ATTRIBUTES'),
search_scope=getattr(
ldap3, self.config.get('LDAP_GROUP_SEARCH_SCOPE'))
)
results = []
for item in connection.response:
if 'type' not in item or item.get('type') != 'searchResEntry':
# Issue #13 - Don't return non-entry results.
continue
group_data = item['attributes']
group_data['dn'] = item['dn']
results.append(group_data)
if not _connection:
# We made a connection, so we need to kill it.
self.destroy_connection(connection)
return results
def get_user_info(self, dn, _connection=None):
"""
Gets info about a user specified at dn.
Args:
dn (str): The dn of the user to find
_connection (ldap3.Connection): A connection object to use when
searching. If not given, a temporary connection will be
created, and destroyed after use.
Returns:
dict: A dictionary of the user info from LDAP
"""
return self.get_object(
dn=dn,
filter=self.config.get('LDAP_USER_OBJECT_FILTER'),
attributes=self.config.get("LDAP_GET_USER_ATTRIBUTES"),
_connection=_connection,
)
def get_user_info_for_username(self, username, _connection=None):
"""
Gets info about a user at a specified username by searching the
Users DN. Username attribute is the same as specified as
LDAP_USER_LOGIN_ATTR.
Args:
username (str): Username of the user to search for.
_connection (ldap3.Connection): A connection object to use when
searching. If not given, a temporary connection will be
created, and destroyed after use.
Returns:
dict: A dictionary of the user info from LDAP
"""
ldap_filter = '(&({0}={1}){2})'.format(
self.config.get('LDAP_USER_LOGIN_ATTR'),
username,
self.config.get('LDAP_USER_OBJECT_FILTER')
)
return self.get_object(
dn=self.full_user_search_dn,
filter=ldap_filter,
attributes=self.config.get("LDAP_GET_USER_ATTRIBUTES"),
_connection=_connection,
)
def get_group_info(self, dn, _connection=None):
"""
Gets info about a group specified at dn.
Args:
dn (str): The dn of the group to find
_connection (ldap3.Connection): A connection object to use when
searching. If not given, a temporary connection will be
created, and destroyed after use.
Returns:
dict: A dictionary of the group info from LDAP
"""
return self.get_object(
dn=dn,
filter=self.config.get('LDAP_GROUP_OBJECT_FILTER'),
attributes=self.config.get("LDAP_GET_GROUP_ATTRIBUTES"),
_connection=_connection,
)
def get_object(self, dn, filter, attributes, _connection=None):
"""
Gets an object at the specified dn and returns it.
Args:
dn (str): The dn of the object to find.
filter (str): The LDAP syntax search filter.
attributes (list): A list of LDAP attributes to get when searching.
_connection (ldap3.Connection): A connection object to use when
searching. If not given, a temporary connection will be created,
and destroyed after use.
Returns:
dict: A dictionary of the object info from LDAP
"""
connection = _connection
if not connection:
connection = self._make_connection(
bind_user=self.config.get('LDAP_BIND_USER_DN'),
bind_password=self.config.get('LDAP_BIND_USER_PASSWORD')
)
connection.bind()
connection.search(
search_base=dn,
search_filter=filter,
attributes=attributes,
)
data = None
if len(connection.response) > 0:
data = connection.response[0]['attributes']
data['dn'] = connection.response[0]['dn']
if not _connection:
# We made a connection, so we need to kill it.
self.destroy_connection(connection)
return data
@property
def connection(self):
"""
Convenience property for externally accessing an authenticated
connection to the server. This connection is automatically
handled by the appcontext, so you do not have to perform an unbind.
Returns:
ldap3.Connection: A bound ldap3.Connection
Raises:
ldap3.core.exceptions.LDAPException: Since this method is performing
a bind on behalf of the caller. You should handle this case
occuring, such as invalid service credentials.
"""
ctx = stack.top
if ctx is None:
raise Exception("Working outside of the Flask application "
"context. If you wish to make a connection outside of a flask"
" application context, please handle your connections "
"and use manager.make_connection()")
if hasattr(ctx, 'ldap3_manager_main_connection'):
return ctx.ldap3_manager_main_connection
else:
connection = self._make_connection(
bind_user=self.config.get('LDAP_BIND_USER_DN'),
bind_password=self.config.get('LDAP_BIND_USER_PASSWORD'),
contextualise=False
)
connection.bind()
if ctx is not None:
ctx.ldap3_manager_main_connection = connection
return connection
def make_connection(self, bind_user=None, bind_password=None, **kwargs):
"""
Make a connection to the LDAP Directory.
Args:
bind_user (str): User to bind with. If `None`, AUTH_ANONYMOUS is
used, otherwise authentication specified with
config['LDAP_BIND_AUTHENTICATION_TYPE'] is used.
bind_password (str): Password to bind to the directory with
**kwargs (dict): Additional arguments to pass to the
``ldap3.Connection``
Returns:
ldap3.Connection: An unbound ldap3.Connection. You should handle exceptions
upon bind if you use this internal method.
"""
return self._make_connection(bind_user, bind_password,
contextualise=False, **kwargs)
def _make_connection(self, bind_user=None, bind_password=None,
contextualise=True, **kwargs):
"""
Make a connection.
Args:
bind_user (str): User to bind with. If `None`, AUTH_ANONYMOUS is
used, otherwise authentication specified with
config['LDAP_BIND_AUTHENTICATION_TYPE'] is used.
bind_password (str): Password to bind to the directory with
contextualise (bool): If true (default), will add this connection to the
appcontext so it can be unbound upon app_teardown.
Returns:
ldap3.Connection: An unbound ldap3.Connection. You should handle exceptions
upon bind if you use this internal method.
"""
authentication = ldap3.ANONYMOUS
if bind_user:
authentication = getattr(ldap3, self.config.get(
'LDAP_BIND_AUTHENTICATION_TYPE'))
log.debug("Opening connection with bind user '{0}'".format(
bind_user or 'Anonymous'))
connection = ldap3.Connection(
server=self._server_pool,
read_only=self.config.get('LDAP_READONLY'),
user=bind_user,
password=bind_password,
client_strategy=ldap3.SYNC,
authentication=authentication,
check_names=True,
raise_exceptions=True,
**kwargs
)
if contextualise:
self._contextualise_connection(connection)
return connection
def destroy_connection(self, connection):
"""
Destroys a connection. Removes the connection from the appcontext, and
unbinds it.
Args:
connection (ldap3.Connection): The connnection to destroy
"""
log.debug("Destroying connection at <{0}>".format(hex(id(connection))))
self._decontextualise_connection(connection)
connection.unbind()
@property
def full_user_search_dn(self):
"""
Returns a the base search DN with the user search DN prepended.
Returns:
str: Full user search dn
"""
return self.compiled_sub_dn(self.config.get('LDAP_USER_DN'))
@property
def full_group_search_dn(self):
"""
Returns a the base search DN with the group search DN prepended.
Returns:
str: Full group search dn
"""
return self.compiled_sub_dn(self.config.get('LDAP_GROUP_DN'))
def compiled_sub_dn(self, prepend):
"""
Returns:
str: A DN with the DN Base appended to the end.
Args:
prepend (str): The dn to prepend to the base.
"""
prepend = prepend.strip()
if prepend == '':
return self.config.get('LDAP_BASE_DN')
return '{prepend},{base}'.format(
prepend=prepend,
base=self.config.get('LDAP_BASE_DN')
)
| |
#!/usr/bin/env python
#===============================================================================
# Copyright (c) 2014 Geoscience Australia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither Geoscience Australia nor the names of its contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#===============================================================================
"""
test_abstract_ingester.py - tests for the top level ingestion algorithm
"""
import re
import os
import logging
import unittest
import subprocess
import dbutil
from cube_util import DatasetError
from abstract_ingester import AbstractIngester
#
# Set up logger.
#
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.INFO)
#
# Constants
#
TEMP_TILE_DIR = 'temp_tile_dir'
DATASET_PATH_DICT = {
'single_path': ['path1'],
'multi_path': ['path1', 'path2', 'path3'],
'skip_one': ['skip1'],
'skip_two': ['path1', 'skip2', 'path3'],
'skip_three': ['path1', 'path2', 'skip3'],
'skip_four': ['skip1', 'skip2', 'path3', 'path4'],
'rollback_one': ['rollback1'],
'rollback_two': ['path1', 'rollback2', 'path3'],
'rollback_three': ['path1', 'path2', 'rollback3'],
'rollback_four': ['path1', 'path2', 'rollback3', 'rollback4'],
'mixed_ops': ['rollback1', 'rollback2', 'path3', 'path4',
'skip5', 'skip6'],
'no_paths': ['rollback1', 'skip2'],
'empty': []
}
DATASET_DICT = {
'path1': 'dataset1',
'path2': 'dataset2',
'path3': 'dataset3',
'path4': 'dataset4',
}
TILE_TYPE_DICT = {
'dataset1': [1],
'dataset2': [1, 2],
'dataset3': [1, 2, 3],
'dataset4': [4]
}
BANDS_DICT = {
('dataset1', 1): 'bands1.1',
('dataset2', 1): 'bands2.1',
('dataset2', 2): 'bands2.2',
('dataset3', 1): 'bands3.1',
('dataset3', 2): 'bands3.2',
('dataset3', 3): 'bands3.3',
('dataset4', 4): 'bands4.4'
}
COVERAGE_DICT = {
('dataset1', 1): ['tile1', 'empty2', 'tile3'],
('dataset2', 1): ['tile4'],
('dataset2', 2): ['tile5', 'tile6'],
('dataset3', 1): ['tile1', 'tile2', 'tile3', 'tile4', 'empty5', 'empty6'],
('dataset3', 2): ['tile7', 'empty8'],
('dataset3', 3): ['empty9'],
('dataset4', 4): ['tile4']
}
#
# Database Classes
#
# pylint: disable = missing-docstring
#
# Many of the methods are simple and self documenting and so do not need
# docstrings.
#
class DummyCollection(object):
"""Dummy collection class for testing."""
def __init__(self):
self.tiles = []
# pylint: disable = no-self-use
#
# These methods do not use object data because this is a dummy
# class for testing, but the methods in a real implementation will,
# so these take self as a parameter for consistancy.
def check_metadata(self, dataset):
"""Raise a DatasetError if the dataset path starts with 'skip'."""
LOGGER.info("Check metadata.")
if re.match(r'^skip', dataset.dataset_path):
raise DatasetError("Testing skip dataset.")
def get_temp_tile_directory(self):
LOGGER.info("Get temporary tile directory.")
LOGGER.info(" returning: '%s'", TEMP_TILE_DIR)
return TEMP_TILE_DIR
def begin_transaction(self):
LOGGER.info("Begin transaction.")
def commit_transaction(self):
LOGGER.info("Commit transaction.")
def rollback_transaction(self):
LOGGER.info("Rollback transaction.")
def create_acquisition_record(self, dataset):
LOGGER.info("Create acquistion record:")
LOGGER.info(" dataset = %s", dataset)
acquisition_record = DummyAcquisitionRecord(self, dataset)
LOGGER.info(" returning: %s", acquisition_record)
return acquisition_record
def create_tile_contents(self, tile_type_id, tile_footprint, band_stack):
LOGGER.info("Create tile contents:")
LOGGER.info(" tile_type_id = %s", tile_type_id)
LOGGER.info(" tile_footprint = %s", tile_footprint)
LOGGER.info(" band_stack = %s", band_stack)
tile_contents = DummyTileContents(tile_type_id,
tile_footprint,
band_stack)
LOGGER.info(" returning: %s", tile_contents)
return tile_contents
def print_tiles(self):
"""Print the final tile list to the log file."""
print_tiles("output tiles", self.tiles)
# pylint: enable = no-self-use
class DummyAcquisitionRecord(object):
"""Dummy aquisition record class for testing."""
def __init__(self, collection, dataset):
self.collection = collection
self.dataset = dataset
def __str__(self):
return "[AcquisitionRecord %s]" % self.dataset
def create_dataset_record(self, dataset):
"""Raise a DatasetError if the dataset path starts with 'rollback'."""
LOGGER.info("Create dataset record:")
LOGGER.info(" dataset = %s", dataset)
if re.match(r'^rollback', dataset.dataset_path):
raise DatasetError("Testing transaction rollback.")
assert self.dataset is dataset, \
"Mismatched datasets in acquisition record."
dataset_record = DummyDatasetRecord(self.collection, self.dataset)
LOGGER.info(" returning: %s", dataset_record)
return dataset_record
class DummyDatasetRecord(object):
"""Dummy dataset record class for testing."""
def __init__(self, collection, dataset):
self.collection = collection
self.dataset_id = DATASET_DICT[dataset.dataset_path]
def __str__(self):
return "[DatasetRecord %s]" % self.dataset_id
def mark_as_tiled(self):
LOGGER.info("%s: mark as tiled.", self)
def list_tile_types(self):
LOGGER.info("%s: list tile types.", self)
tile_types = TILE_TYPE_DICT[self.dataset_id]
LOGGER.info(" returning: %s", tile_types)
return tile_types
def get_tile_bands(self, tile_type_id):
LOGGER.info("%s: get tile bands:", self)
LOGGER.info(" tile_type_id = %s", tile_type_id)
tile_bands = BANDS_DICT[(self.dataset_id, tile_type_id)]
LOGGER.info(" returning: %s", tile_bands)
return tile_bands
def get_coverage(self, tile_type_id):
LOGGER.info("%s: get_coverage:", self)
LOGGER.info(" tile_type_id = %s", tile_type_id)
coverage = COVERAGE_DICT[(self.dataset_id, tile_type_id)]
LOGGER.info(" returning: %s", coverage)
return coverage
def create_tile_record(self, tile_contents):
LOGGER.info("%s: create tile record:", self)
LOGGER.info(" tile_contents = %s", tile_contents)
return DummyTileRecord(self.collection,
self.dataset_id,
tile_contents)
class DummyTileRecord(object):
"""Dummy tile record class for testing."""
def __init__(self, collection, dataset_id, tile_contents):
"""Creates a dummy tile record, and adds the tile to the
collection tile list."""
self.collection = collection
self.dataset_id = dataset_id
self.tile_footprint = tile_contents.tile_footprint
self.band_list = tile_contents.band_stack.band_list
assert tile_contents.reprojected, \
"Expected tile_contents to have been reprojected."
tile_tuple = (self.dataset_id, self.tile_footprint, self.band_list)
self.collection.tiles.append(tile_tuple)
def __str__(self):
return "[TileRecord %s %s %s]" % \
(self.dataset_id, self.tile_footprint, self.band_list)
def make_mosaics(self):
LOGGER.info("%s: make mosaics", self)
class DummyTileContents(object):
"""Dummy tile contents class for testing."""
def __init__(self, tile_type_id, tile_footprint, band_stack):
self.tile_type_id = tile_type_id
self.tile_footprint = tile_footprint
self.band_stack = band_stack
self.reprojected = False
self.removed = False
assert band_stack.vrt_built, \
"Expected band_stack to have had a vrt built."
def __str__(self):
return ("[TileContents %s %s %s]" %
(self.tile_type_id, self.tile_footprint, self.band_stack))
def reproject(self):
LOGGER.info("%s: reproject", self)
self.reprojected = True
def has_data(self):
"""Returns False if the tile footprint starts with 'empty',
True otherwise."""
LOGGER.info("%s: has_data", self)
assert not self.removed, "%s: has been removed." % self
result = bool(not re.match(r'^empty', self.tile_footprint))
LOGGER.info(" returning: %s", result)
return result
def remove(self):
LOGGER.info("%s: remove", self)
self.removed = True
#
# Dataset Classes
#
class DummyDataset(object):
"""Dummy dataset class for testing."""
def __init__(self, dataset_path):
self.dataset_path = dataset_path
def __str__(self):
return "[Dataset %s]" % self.dataset_path
#pylint:disable=no-self-use
def get_x_ref(self):
return None
def get_y_ref(self):
return None
def get_start_datetime(self):
return None
#pylint:enable=no-self-use
def stack_bands(self, band_list):
LOGGER.info("%s: stack_bands:", self)
LOGGER.info(" band_list = %s", band_list)
band_stack = DummyBandStack(band_list)
LOGGER.info(" returning: %s", band_stack)
return band_stack
class DummyBandStack(object):
"""Dummy band stack class for testing."""
def __init__(self, band_list):
self.band_list = band_list
self.vrt_built = False
def __str__(self):
return "[BandStack %s]" % self.band_list
def buildvrt(self, temp_dir):
LOGGER.info("%s: buildvrt:", self)
LOGGER.info(" temp_dir = '%s'", temp_dir)
assert temp_dir == TEMP_TILE_DIR, \
"Unexpected temp_dir, should be '%s'." % TEMP_TILE_DIR
self.vrt_built = True
# pylint: enable = missing-docstring
#
# DummyIngester class
#
class DummyIngester(AbstractIngester):
"""Dummy Ingester subclass for testing."""
def __init__(self, collection):
"""Initialise the source_dir cache then call Ingester init"""
self.source_dir = None
AbstractIngester.__init__(self, collection=collection)
def find_datasets(self, source_dir):
"""Cache source directory then return dummy dataset paths."""
LOGGER.info("Ingester: find datasets")
LOGGER.info(" source_dir = %s", source_dir)
self.source_dir = source_dir
dataset_list = DATASET_PATH_DICT[source_dir]
LOGGER.info(" returning: %s", dataset_list)
return dataset_list
def open_dataset(self, dataset_path):
"""Check dataset_path then return dummy dataset object."""
LOGGER.info("Ingester: open dataset")
LOGGER.info(" dataset_path = %s", dataset_path)
assert dataset_path in DATASET_PATH_DICT[self.source_dir], \
"Unexpected dataset path while opening dataset."
dataset = DummyDataset(dataset_path)
LOGGER.info(" returning: %s", dataset)
return dataset
#
# Utility functions
#
def print_tiles(title, tiles):
"""Print a list of tiles to the log file."""
LOGGER.info("")
LOGGER.info("%s:", title)
for tile in tiles:
LOGGER.info(" %s", tile)
#
# Test suite
#
# pylint: disable=too-many-public-methods
#
# Disabled to avoid complaints about the unittest.TestCase class (which has too
# many public methods according to pylint).
#
class TestIngester(unittest.TestCase):
"""Unit test for the AbstractIngester class.
This is a partially abstract class, so the DummyIngester subclass
(defined above) is actually under test here."""
MODULE = 'abstract_ingester'
SUITE = 'TestIngester'
OUTPUT_DIR = dbutil.output_directory(MODULE, SUITE)
EXPECTED_DIR = dbutil.expected_directory(MODULE, SUITE)
def setUp(self):
#
# Parse out the name of the test case and use it to name a logfile
#
match = re.search(r'\.([^\.]+)$', self.id())
if match:
name = match.group(1)
else:
name = 'TestIngester'
logfile_name = "%s.log" % name
self.logfile_path = os.path.join(self.OUTPUT_DIR, logfile_name)
self.expected_path = os.path.join(self.EXPECTED_DIR, logfile_name)
#
# Set up a handler to log to the logfile, and attach it to the
# root logger.
#
self.handler = logging.FileHandler(self.logfile_path, mode='w')
self.handler.setLevel(logging.INFO)
self.handler.setFormatter(logging.Formatter('%(message)s'))
root_logger = logging.getLogger()
root_logger.addHandler(self.handler)
root_logger.setLevel(logging.DEBUG)
#
# Create the collection and ingester
#
self.collection = DummyCollection()
self.ingester = DummyIngester(self.collection)
def tearDown(self):
#
# Flush the handler and remove it from the root logger.
#
self.handler.flush()
root_logger = logging.getLogger()
root_logger.removeHandler(self.handler)
def check_log_file(self):
"""If an expected logfile exists, check to see if it matches."""
self.handler.flush()
if not os.path.isfile(self.expected_path):
self.skipTest("Expected log file not found.")
else:
try:
subprocess.check_output(['diff',
self.logfile_path,
self.expected_path])
except subprocess.CalledProcessError as err:
self.fail("Log file does not match the expected log file:\n" +
err.output)
def remove_log_file(self):
"""Remove the logfile from the output directory."""
os.remove(self.logfile_path)
def check_tiles(self, source_dir):
"""Check the tiles recorded in the collection against expectations."""
output_tiles = self.collection.tiles
expected_tiles = self.generate_tiles(source_dir)
self.assertEqual(set(output_tiles), set(expected_tiles))
@staticmethod
def generate_tiles(source_dir):
"""Generate the expected tiles for a given source directory.
This replicates the ingest algorithm, only it is much simpler
because it only has to deal with the test data."""
tiles = []
for dataset_path in DATASET_PATH_DICT[source_dir]:
if not re.match(r'(skip)|(rollback)', dataset_path):
dataset_id = DATASET_DICT[dataset_path]
for tile_type_id in TILE_TYPE_DICT[dataset_id]:
tup = (dataset_id, tile_type_id)
bands = BANDS_DICT[tup]
for tile_footprint in COVERAGE_DICT[tup]:
if not re.match(r'empty', tile_footprint):
tiles.append((dataset_id, tile_footprint, bands))
return tiles
def test_single_path_tiles(self):
"""Test for a single dataset path: check tiles."""
self.ingester.ingest('single_path')
self.check_tiles('single_path')
self.remove_log_file()
def test_multi_path_tiles(self):
"""Test for multiple dataset paths: check tiles."""
self.ingester.ingest('multi_path')
self.check_tiles('multi_path')
self.remove_log_file()
def test_skip_one_tiles(self):
"""Test for skipped datasets, test one: check tiles."""
self.ingester.ingest('skip_one')
self.check_tiles('skip_one')
self.remove_log_file()
def test_skip_two_tiles(self):
"""Test for skipped datasets, test two: check tiles."""
self.ingester.ingest('skip_two')
self.check_tiles('skip_two')
self.remove_log_file()
def test_skip_three_tiles(self):
"""Test for skipped datasets, test three: check tiles."""
self.ingester.ingest('skip_three')
self.check_tiles('skip_three')
self.remove_log_file()
def test_skip_four_tiles(self):
"""Test for skipped datasets, test four: check tiles."""
self.ingester.ingest('skip_four')
self.check_tiles('skip_four')
self.remove_log_file()
def test_rollback_one_tiles(self):
"""Test for transaction rollback, test one: check tiles."""
self.ingester.ingest('rollback_one')
self.check_tiles('rollback_one')
self.remove_log_file()
def test_rollback_two_tiles(self):
"""Test for transaction rollback, test two: check tiles."""
self.ingester.ingest('rollback_two')
self.check_tiles('rollback_two')
self.remove_log_file()
def test_rollback_three_tiles(self):
"""Test for transaction rollback, test three: check tiles."""
self.ingester.ingest('rollback_three')
self.check_tiles('rollback_three')
self.remove_log_file()
def test_rollback_four_tiles(self):
"""Test for transaction rollback, test four: check tiles."""
self.ingester.ingest('rollback_four')
self.check_tiles('rollback_four')
self.remove_log_file()
def test_mixed_ops_tiles(self):
"""Test for mixed dataset operations: check tiles."""
self.ingester.ingest('mixed_ops')
self.check_tiles('mixed_ops')
self.remove_log_file()
def test_no_paths_tiles(self):
"""Test for source directory with no valid datasets: check tiles."""
self.ingester.ingest('no_paths')
self.check_tiles('no_paths')
self.remove_log_file()
def test_empty_tiles(self):
"""Test for source directory with no datasets: check tiles."""
self.ingester.ingest('empty')
self.check_tiles('empty')
self.remove_log_file()
def test_single_path_log(self):
"""Test for a single dataset path: check tiles."""
self.ingester.ingest('single_path')
self.collection.print_tiles()
self.check_log_file()
def test_multi_path_log(self):
"""Test for multiple dataset paths: check log file."""
self.ingester.ingest('multi_path')
self.collection.print_tiles()
self.check_log_file()
def test_skip_one_log(self):
"""Test for skipped datasets, test one: check log file."""
self.ingester.ingest('skip_one')
self.collection.print_tiles()
self.check_log_file()
def test_skip_two_log(self):
"""Test for skipped datasets, test two: check log file."""
self.ingester.ingest('skip_two')
self.collection.print_tiles()
self.check_log_file()
def test_skip_three_log(self):
"""Test for skipped datasets, test three: check log file."""
self.ingester.ingest('skip_three')
self.collection.print_tiles()
self.check_log_file()
def test_skip_four_log(self):
"""Test for skipped datasets, test four: check log file."""
self.ingester.ingest('skip_four')
self.collection.print_tiles()
self.check_log_file()
def test_rollback_one_log(self):
"""Test for transaction rollback, test one: check log file."""
self.ingester.ingest('rollback_one')
self.collection.print_tiles()
self.check_log_file()
def test_rollback_two_log(self):
"""Test for transaction rollback, test two: check log file."""
self.ingester.ingest('rollback_two')
self.collection.print_tiles()
self.check_log_file()
def test_rollback_three_log(self):
"""Test for transaction rollback, test three: check log file."""
self.ingester.ingest('rollback_three')
self.collection.print_tiles()
self.check_log_file()
def test_rollback_four_log(self):
"""Test for transaction rollback, test four: check log file."""
self.ingester.ingest('rollback_four')
self.collection.print_tiles()
self.check_log_file()
def test_mixed_ops_log(self):
"""Test for mixed dataset operations: check log file."""
self.ingester.ingest('mixed_ops')
self.collection.print_tiles()
self.check_log_file()
def test_no_paths_log(self):
"""Test for source directory with no valid datasets: check log file."""
self.ingester.ingest('no_paths')
self.collection.print_tiles()
self.check_log_file()
def test_empty_log(self):
"""Test for source directory with no datasets: check log file."""
self.ingester.ingest('empty')
self.collection.print_tiles()
self.check_log_file()
#
# Define test suites
#
def the_suite():
"""Returns a test suite of all the tests in this module."""
test_classes = [TestIngester]
suite_list = map(unittest.defaultTestLoader.loadTestsFromTestCase,
test_classes)
suite = unittest.TestSuite(suite_list)
return suite
#
# Run unit tests if in __main__
#
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(the_suite())
| |
# -*- Mode: Python -*-
# Author: Sam Rushing <rushing@nightmare.com>
# stealing from myself. taken from amk's medusa-0.5.4 distribution,
# then hacked about ten years into the future, buncha stuff ripped
# out.
#
# mostly useful for debugging, should not be distributed with the final client!
#
#
# python REPL channel.
#
import socket
import string
import sys
import time
import asyncore
import asynchat
class monitor_channel (asynchat.async_chat):
try_linemode = 1
def __init__ (self, server, sock, addr):
asynchat.async_chat.__init__ (self, sock)
self.server = server
self.addr = addr
self.set_terminator ('\r\n')
self.data = ''
# local bindings specific to this channel
self.local_env = sys.modules['__main__'].__dict__.copy()
self.push ('Python ' + sys.version + '\r\n')
self.push (sys.copyright+'\r\n')
self.push ('Welcome to the Monitor. You are %r\r\n' % (self.addr,))
self.prompt()
self.number = server.total_sessions
self.line_counter = 0
self.multi_line = []
def handle_connect (self):
# send IAC DO LINEMODE
self.push ('\377\375\"')
def close (self):
self.server.closed_sessions += 1
asynchat.async_chat.close(self)
def prompt (self):
self.push ('>>> ')
def collect_incoming_data (self, data):
self.data = self.data + data
if len(self.data) > 1024:
# denial of service.
self.push ('BCNU\r\n')
self.close_when_done()
def found_terminator (self):
line = self.clean_line (self.data)
self.data = ''
self.line_counter += 1
# check for special case inputs...
if not line and not self.multi_line:
self.prompt()
return
if line in ['\004', 'exit']:
self.push ('BCNU\r\n')
self.close_when_done()
return
oldout = sys.stdout
olderr = sys.stderr
try:
p = output_producer(self, olderr)
sys.stdout = p
sys.stderr = p
try:
# this is, of course, a blocking operation.
# if you wanted to thread this, you would have
# to synchronize, etc... and treat the output
# like a pipe. Not Fun.
#
# try eval first. If that fails, try exec. If that fails,
# hurl.
try:
if self.multi_line:
# oh, this is horrible...
raise SyntaxError
co = compile (line, repr(self), 'eval')
result = eval (co, self.local_env)
method = 'eval'
if result is not None:
print repr(result)
self.local_env['_'] = result
except SyntaxError:
try:
if self.multi_line:
if line and line[0] in [' ','\t']:
self.multi_line.append (line)
self.push ('... ')
return
else:
self.multi_line.append (line)
line = string.join (self.multi_line, '\n')
co = compile (line, repr(self), 'exec')
self.multi_line = []
else:
co = compile (line, repr(self), 'exec')
except SyntaxError, why:
if why[0] == 'unexpected EOF while parsing':
self.push ('... ')
self.multi_line.append (line)
return
else:
t,v,tb = sys.exc_info()
del tb
raise t,v
exec co in self.local_env
method = 'exec'
except:
method = 'exception'
self.multi_line = []
(file, fun, line), t, v, tbinfo = asyncore.compact_traceback()
self.log_info('%s %s %s' %(t, v, tbinfo), 'warning')
finally:
sys.stdout = oldout
sys.stderr = olderr
self.push_with_producer (p)
self.prompt()
# for now, we ignore any telnet option stuff sent to
# us, and we process the backspace key ourselves.
# gee, it would be fun to write a full-blown line-editing
# environment, etc...
def clean_line (self, line):
chars = []
for ch in line:
oc = ord(ch)
if oc < 127:
if oc in [8,177]:
# backspace
chars = chars[:-1]
else:
chars.append (ch)
return string.join (chars, '')
class monitor_server (asyncore.dispatcher):
SERVER_IDENT = 'Bitcoin Monitor Server'
channel_class = monitor_channel
def __init__ (self, hostname='127.0.0.1', port=8023):
asyncore.dispatcher.__init__ (self, socket.socket (socket.AF_INET, socket.SOCK_STREAM))
self.hostname = hostname
self.port = port
self.set_reuse_addr()
self.bind ((hostname, port))
self.log_info('%s started on port %d' % (self.SERVER_IDENT, port))
self.listen (5)
self.closed = 0
self.failed_auths = 0
self.total_sessions = 0
self.closed_sessions = 0
def writable (self):
return 0
def handle_accept (self):
conn, addr = self.accept()
self.log_info ('Incoming monitor connection from %s:%d' % addr)
self.channel_class (self, conn, addr)
self.total_sessions += 1
# don't try to print from within any of the methods
# of this object. 8^)
class output_producer:
def __init__ (self, channel, real_stderr):
self.channel = channel
self.data = ''
# use _this_ for debug output
self.stderr = real_stderr
def check_data (self):
if len(self.data) > 1<<16:
# runaway output, close it.
self.channel.close()
def write (self, data):
lines = string.splitfields (data, '\n')
data = string.join (lines, '\r\n')
self.data = self.data + data
self.check_data()
def writeline (self, line):
self.data = self.data + line + '\r\n'
self.check_data()
def writelines (self, lines):
self.data = self.data + string.joinfields (
lines,
'\r\n'
) + '\r\n'
self.check_data()
def flush (self):
pass
def softspace (self, *args):
pass
def more (self):
if self.data:
result = self.data[:512]
self.data = self.data[512:]
return result
else:
return ''
| |
# engine/default.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Default implementations of per-dialect sqlalchemy.engine classes.
These are semi-private implementation classes which are only of importance
to database dialect authors; dialects will usually use the classes here
as the base class for their own corresponding classes.
"""
import codecs
import random
import re
import weakref
from . import interfaces
from . import result as _result
from .. import event
from .. import exc
from .. import pool
from .. import processors
from .. import types as sqltypes
from .. import util
from ..sql import compiler
from ..sql import expression
from ..sql.elements import quoted_name
AUTOCOMMIT_REGEXP = re.compile(
r"\s*(?:UPDATE|INSERT|CREATE|DELETE|DROP|ALTER)", re.I | re.UNICODE
)
# When we're handed literal SQL, ensure it's a SELECT query
SERVER_SIDE_CURSOR_RE = re.compile(r"\s*SELECT", re.I | re.UNICODE)
class DefaultDialect(interfaces.Dialect):
"""Default implementation of Dialect"""
statement_compiler = compiler.SQLCompiler
ddl_compiler = compiler.DDLCompiler
type_compiler = compiler.GenericTypeCompiler
preparer = compiler.IdentifierPreparer
supports_alter = True
supports_comments = False
inline_comments = False
# the first value we'd get for an autoincrement
# column.
default_sequence_base = 1
# most DBAPIs happy with this for execute().
# not cx_oracle.
execute_sequence_format = tuple
supports_views = True
supports_sequences = False
sequences_optional = False
preexecute_autoincrement_sequences = False
postfetch_lastrowid = True
implicit_returning = False
cte_follows_insert = False
supports_native_enum = False
supports_native_boolean = False
non_native_boolean_check_constraint = True
supports_simple_order_by_label = True
tuple_in_values = False
engine_config_types = util.immutabledict(
[
("convert_unicode", util.bool_or_str("force")),
("pool_timeout", util.asint),
("echo", util.bool_or_str("debug")),
("echo_pool", util.bool_or_str("debug")),
("pool_recycle", util.asint),
("pool_size", util.asint),
("max_overflow", util.asint),
]
)
# if the NUMERIC type
# returns decimal.Decimal.
# *not* the FLOAT type however.
supports_native_decimal = False
if util.py3k:
supports_unicode_statements = True
supports_unicode_binds = True
returns_unicode_strings = True
description_encoding = None
else:
supports_unicode_statements = False
supports_unicode_binds = False
returns_unicode_strings = False
description_encoding = "use_encoding"
name = "default"
# length at which to truncate
# any identifier.
max_identifier_length = 9999
_user_defined_max_identifier_length = None
# length at which to truncate
# the name of an index.
# Usually None to indicate
# 'use max_identifier_length'.
# thanks to MySQL, sigh
max_index_name_length = None
supports_sane_rowcount = True
supports_sane_multi_rowcount = True
colspecs = {}
default_paramstyle = "named"
supports_default_values = False
supports_empty_insert = True
supports_multivalues_insert = False
supports_is_distinct_from = True
supports_server_side_cursors = False
server_version_info = None
default_schema_name = None
construct_arguments = None
"""Optional set of argument specifiers for various SQLAlchemy
constructs, typically schema items.
To implement, establish as a series of tuples, as in::
construct_arguments = [
(schema.Index, {
"using": False,
"where": None,
"ops": None
})
]
If the above construct is established on the PostgreSQL dialect,
the :class:`.Index` construct will now accept the keyword arguments
``postgresql_using``, ``postgresql_where``, nad ``postgresql_ops``.
Any other argument specified to the constructor of :class:`.Index`
which is prefixed with ``postgresql_`` will raise :class:`.ArgumentError`.
A dialect which does not include a ``construct_arguments`` member will
not participate in the argument validation system. For such a dialect,
any argument name is accepted by all participating constructs, within
the namespace of arguments prefixed with that dialect name. The rationale
here is so that third-party dialects that haven't yet implemented this
feature continue to function in the old way.
.. versionadded:: 0.9.2
.. seealso::
:class:`.DialectKWArgs` - implementing base class which consumes
:attr:`.DefaultDialect.construct_arguments`
"""
# indicates symbol names are
# UPPERCASEd if they are case insensitive
# within the database.
# if this is True, the methods normalize_name()
# and denormalize_name() must be provided.
requires_name_normalize = False
reflection_options = ()
dbapi_exception_translation_map = util.immutabledict()
"""mapping used in the extremely unusual case that a DBAPI's
published exceptions don't actually have the __name__ that they
are linked towards.
.. versionadded:: 1.0.5
"""
@util.deprecated_params(
convert_unicode=(
"1.3",
"The :paramref:`.create_engine.convert_unicode` parameter "
"and corresponding dialect-level parameters are deprecated, "
"and will be removed in a future release. Modern DBAPIs support "
"Python Unicode natively and this parameter is unnecessary.",
),
empty_in_strategy=(
"1.4",
"The :paramref:`.create_engine.empty_in_strategy` keyword is "
"deprecated, and no longer has any effect. All IN expressions "
"are now rendered using "
'the "expanding parameter" strategy which renders a set of bound'
'expressions, or an "empty set" SELECT, at statement execution'
"time.",
),
case_sensitive=(
"1.4",
"The :paramref:`.create_engine.case_sensitive` parameter "
"is deprecated and will be removed in a future release. "
"Applications should work with result column names in a case "
"sensitive fashion.",
),
)
def __init__(
self,
convert_unicode=False,
encoding="utf-8",
paramstyle=None,
dbapi=None,
implicit_returning=None,
case_sensitive=True,
supports_native_boolean=None,
max_identifier_length=None,
label_length=None,
# int() is because the @deprecated_params decorator cannot accommodate
# the direct reference to the "NO_LINTING" object
compiler_linting=int(compiler.NO_LINTING),
**kwargs
):
if not getattr(self, "ported_sqla_06", True):
util.warn(
"The %s dialect is not yet ported to the 0.6 format"
% self.name
)
self.convert_unicode = convert_unicode
self.encoding = encoding
self.positional = False
self._ischema = None
self.dbapi = dbapi
if paramstyle is not None:
self.paramstyle = paramstyle
elif self.dbapi is not None:
self.paramstyle = self.dbapi.paramstyle
else:
self.paramstyle = self.default_paramstyle
if implicit_returning is not None:
self.implicit_returning = implicit_returning
self.positional = self.paramstyle in ("qmark", "format", "numeric")
self.identifier_preparer = self.preparer(self)
self.type_compiler = self.type_compiler(self)
if supports_native_boolean is not None:
self.supports_native_boolean = supports_native_boolean
self.case_sensitive = case_sensitive
self._user_defined_max_identifier_length = max_identifier_length
if self._user_defined_max_identifier_length:
self.max_identifier_length = (
self._user_defined_max_identifier_length
)
self.label_length = label_length
self.compiler_linting = compiler_linting
if self.description_encoding == "use_encoding":
self._description_decoder = (
processors.to_unicode_processor_factory
)(encoding)
elif self.description_encoding is not None:
self._description_decoder = (
processors.to_unicode_processor_factory
)(self.description_encoding)
self._encoder = codecs.getencoder(self.encoding)
self._decoder = processors.to_unicode_processor_factory(self.encoding)
@util.memoized_property
def _type_memos(self):
return weakref.WeakKeyDictionary()
@property
def dialect_description(self):
return self.name + "+" + self.driver
@property
def supports_sane_rowcount_returning(self):
"""True if this dialect supports sane rowcount even if RETURNING is
in use.
For dialects that don't support RETURNING, this is synomous
with supports_sane_rowcount.
"""
return self.supports_sane_rowcount
@classmethod
def get_pool_class(cls, url):
return getattr(cls, "poolclass", pool.QueuePool)
@classmethod
def load_provisioning(cls):
package = ".".join(cls.__module__.split(".")[0:-1])
try:
__import__(package + ".provision")
except ImportError:
pass
def initialize(self, connection):
try:
self.server_version_info = self._get_server_version_info(
connection
)
except NotImplementedError:
self.server_version_info = None
try:
self.default_schema_name = self._get_default_schema_name(
connection
)
except NotImplementedError:
self.default_schema_name = None
try:
self.default_isolation_level = self.get_isolation_level(
connection.connection
)
except NotImplementedError:
self.default_isolation_level = None
self.returns_unicode_strings = self._check_unicode_returns(connection)
if (
self.description_encoding is not None
and self._check_unicode_description(connection)
):
self._description_decoder = self.description_encoding = None
if not self._user_defined_max_identifier_length:
max_ident_length = self._check_max_identifier_length(connection)
if max_ident_length:
self.max_identifier_length = max_ident_length
if (
self.label_length
and self.label_length > self.max_identifier_length
):
raise exc.ArgumentError(
"Label length of %d is greater than this dialect's"
" maximum identifier length of %d"
% (self.label_length, self.max_identifier_length)
)
def on_connect(self):
# inherits the docstring from interfaces.Dialect.on_connect
return None
def _check_max_identifier_length(self, connection):
"""Perform a connection / server version specific check to determine
the max_identifier_length.
If the dialect's class level max_identifier_length should be used,
can return None.
.. versionadded:: 1.3.9
"""
return None
def _check_unicode_returns(self, connection, additional_tests=None):
if util.py2k and not self.supports_unicode_statements:
cast_to = util.binary_type
else:
cast_to = util.text_type
if self.positional:
parameters = self.execute_sequence_format()
else:
parameters = {}
def check_unicode(test):
statement = cast_to(
expression.select([test]).compile(dialect=self)
)
try:
cursor = connection.connection.cursor()
connection._cursor_execute(cursor, statement, parameters)
row = cursor.fetchone()
cursor.close()
except exc.DBAPIError as de:
# note that _cursor_execute() will have closed the cursor
# if an exception is thrown.
util.warn(
"Exception attempting to "
"detect unicode returns: %r" % de
)
return False
else:
return isinstance(row[0], util.text_type)
tests = [
# detect plain VARCHAR
expression.cast(
expression.literal_column("'test plain returns'"),
sqltypes.VARCHAR(60),
),
# detect if there's an NVARCHAR type with different behavior
# available
expression.cast(
expression.literal_column("'test unicode returns'"),
sqltypes.Unicode(60),
),
]
if additional_tests:
tests += additional_tests
results = {check_unicode(test) for test in tests}
if results.issuperset([True, False]):
return "conditional"
else:
return results == {True}
def _check_unicode_description(self, connection):
# all DBAPIs on Py2K return cursor.description as encoded
if util.py2k and not self.supports_unicode_statements:
cast_to = util.binary_type
else:
cast_to = util.text_type
cursor = connection.connection.cursor()
try:
cursor.execute(
cast_to(
expression.select(
[expression.literal_column("'x'").label("some_label")]
).compile(dialect=self)
)
)
return isinstance(cursor.description[0][0], util.text_type)
finally:
cursor.close()
def type_descriptor(self, typeobj):
"""Provide a database-specific :class:`.TypeEngine` object, given
the generic object which comes from the types module.
This method looks for a dictionary called
``colspecs`` as a class or instance-level variable,
and passes on to :func:`.types.adapt_type`.
"""
return sqltypes.adapt_type(typeobj, self.colspecs)
def has_index(self, connection, table_name, index_name, schema=None):
if not self.has_table(connection, table_name, schema=schema):
return False
for idx in self.get_indexes(connection, table_name, schema=schema):
if idx["name"] == index_name:
return True
else:
return False
def validate_identifier(self, ident):
if len(ident) > self.max_identifier_length:
raise exc.IdentifierError(
"Identifier '%s' exceeds maximum length of %d characters"
% (ident, self.max_identifier_length)
)
def connect(self, *cargs, **cparams):
# inherits the docstring from interfaces.Dialect.connect
return self.dbapi.connect(*cargs, **cparams)
def create_connect_args(self, url):
# inherits the docstring from interfaces.Dialect.create_connect_args
opts = url.translate_connect_args()
opts.update(url.query)
return [[], opts]
def set_engine_execution_options(self, engine, opts):
if "isolation_level" in opts:
isolation_level = opts["isolation_level"]
@event.listens_for(engine, "engine_connect")
def set_isolation(connection, branch):
if not branch:
self._set_connection_isolation(connection, isolation_level)
if "schema_translate_map" in opts:
engine._schema_translate_map = map_ = opts["schema_translate_map"]
@event.listens_for(engine, "engine_connect")
def set_schema_translate_map(connection, branch):
connection._schema_translate_map = map_
def set_connection_execution_options(self, connection, opts):
if "isolation_level" in opts:
self._set_connection_isolation(connection, opts["isolation_level"])
if "schema_translate_map" in opts:
connection._schema_translate_map = opts["schema_translate_map"]
def _set_connection_isolation(self, connection, level):
if connection.in_transaction():
util.warn(
"Connection is already established with a Transaction; "
"setting isolation_level may implicitly rollback or commit "
"the existing transaction, or have no effect until "
"next transaction"
)
self.set_isolation_level(connection.connection, level)
connection.connection._connection_record.finalize_callback.append(
self.reset_isolation_level
)
def do_begin(self, dbapi_connection):
pass
def do_rollback(self, dbapi_connection):
dbapi_connection.rollback()
def do_commit(self, dbapi_connection):
dbapi_connection.commit()
def do_close(self, dbapi_connection):
dbapi_connection.close()
@util.memoized_property
def _dialect_specific_select_one(self):
return str(expression.select([1]).compile(dialect=self))
def do_ping(self, dbapi_connection):
cursor = None
try:
cursor = dbapi_connection.cursor()
try:
cursor.execute(self._dialect_specific_select_one)
finally:
cursor.close()
except self.dbapi.Error as err:
if self.is_disconnect(err, dbapi_connection, cursor):
return False
else:
raise
else:
return True
def create_xid(self):
"""Create a random two-phase transaction ID.
This id will be passed to do_begin_twophase(), do_rollback_twophase(),
do_commit_twophase(). Its format is unspecified.
"""
return "_sa_%032x" % random.randint(0, 2 ** 128)
def do_savepoint(self, connection, name):
connection.execute(expression.SavepointClause(name))
def do_rollback_to_savepoint(self, connection, name):
connection.execute(expression.RollbackToSavepointClause(name))
def do_release_savepoint(self, connection, name):
connection.execute(expression.ReleaseSavepointClause(name))
def do_executemany(self, cursor, statement, parameters, context=None):
cursor.executemany(statement, parameters)
def do_execute(self, cursor, statement, parameters, context=None):
cursor.execute(statement, parameters)
def do_execute_no_params(self, cursor, statement, context=None):
cursor.execute(statement)
def is_disconnect(self, e, connection, cursor):
return False
def reset_isolation_level(self, dbapi_conn):
# default_isolation_level is read from the first connection
# after the initial set of 'isolation_level', if any, so is
# the configured default of this dialect.
self.set_isolation_level(dbapi_conn, self.default_isolation_level)
def normalize_name(self, name):
if name is None:
return None
if util.py2k:
if isinstance(name, str):
name = name.decode(self.encoding)
name_lower = name.lower()
name_upper = name.upper()
if name_upper == name_lower:
# name has no upper/lower conversion, e.g. non-european characters.
# return unchanged
return name
elif name_upper == name and not (
self.identifier_preparer._requires_quotes
)(name_lower):
# name is all uppercase and doesn't require quoting; normalize
# to all lower case
return name_lower
elif name_lower == name:
# name is all lower case, which if denormalized means we need to
# force quoting on it
return quoted_name(name, quote=True)
else:
# name is mixed case, means it will be quoted in SQL when used
# later, no normalizes
return name
def denormalize_name(self, name):
if name is None:
return None
name_lower = name.lower()
name_upper = name.upper()
if name_upper == name_lower:
# name has no upper/lower conversion, e.g. non-european characters.
# return unchanged
return name
elif name_lower == name and not (
self.identifier_preparer._requires_quotes
)(name_lower):
name = name_upper
if util.py2k:
if not self.supports_unicode_binds:
name = name.encode(self.encoding)
else:
name = unicode(name) # noqa
return name
class _RendersLiteral(object):
def literal_processor(self, dialect):
def process(value):
return "'%s'" % value
return process
class _StrDateTime(_RendersLiteral, sqltypes.DateTime):
pass
class _StrDate(_RendersLiteral, sqltypes.Date):
pass
class _StrTime(_RendersLiteral, sqltypes.Time):
pass
class StrCompileDialect(DefaultDialect):
statement_compiler = compiler.StrSQLCompiler
ddl_compiler = compiler.DDLCompiler
type_compiler = compiler.StrSQLTypeCompiler
preparer = compiler.IdentifierPreparer
supports_sequences = True
sequences_optional = True
preexecute_autoincrement_sequences = False
implicit_returning = False
supports_native_boolean = True
supports_simple_order_by_label = True
colspecs = {
sqltypes.DateTime: _StrDateTime,
sqltypes.Date: _StrDate,
sqltypes.Time: _StrTime,
}
class DefaultExecutionContext(interfaces.ExecutionContext):
isinsert = False
isupdate = False
isdelete = False
is_crud = False
is_text = False
isddl = False
executemany = False
compiled = None
statement = None
result_column_struct = None
returned_defaults = None
_is_implicit_returning = False
_is_explicit_returning = False
_is_future_result = False
_is_server_side = False
# a hook for SQLite's translation of
# result column names
# NOTE: pyhive is using this hook, can't remove it :(
_translate_colname = None
_expanded_parameters = util.immutabledict()
@classmethod
def _init_ddl(cls, dialect, connection, dbapi_connection, compiled_ddl):
"""Initialize execution context for a DDLElement construct."""
self = cls.__new__(cls)
self.root_connection = connection
self._dbapi_connection = dbapi_connection
self.dialect = connection.dialect
self.compiled = compiled = compiled_ddl
self.isddl = True
self.execution_options = compiled.execution_options
if connection._execution_options:
self.execution_options = dict(self.execution_options)
self.execution_options.update(connection._execution_options)
self.unicode_statement = util.text_type(compiled)
if compiled.schema_translate_map:
rst = compiled.preparer._render_schema_translates
self.unicode_statement = rst(
self.unicode_statement, connection._schema_translate_map
)
if not dialect.supports_unicode_statements:
self.statement = dialect._encoder(self.unicode_statement)[0]
else:
self.statement = self.unicode_statement
self.cursor = self.create_cursor()
self.compiled_parameters = []
if dialect.positional:
self.parameters = [dialect.execute_sequence_format()]
else:
self.parameters = [{}]
return self
@classmethod
def _init_compiled(
cls,
dialect,
connection,
dbapi_connection,
compiled,
parameters,
invoked_statement,
extracted_parameters,
):
"""Initialize execution context for a Compiled construct."""
self = cls.__new__(cls)
self.root_connection = connection
self._dbapi_connection = dbapi_connection
self.dialect = connection.dialect
self.extracted_parameters = extracted_parameters
self.invoked_statement = invoked_statement
self.compiled = compiled
# this should be caught in the engine before
# we get here
assert compiled.can_execute
self._is_future_result = connection._execution_options.get(
"future_result", False
)
self.execution_options = compiled.execution_options.union(
connection._execution_options
)
self.result_column_struct = (
compiled._result_columns,
compiled._ordered_columns,
compiled._textual_ordered_columns,
compiled._loose_column_name_matching,
)
self.isinsert = compiled.isinsert
self.isupdate = compiled.isupdate
self.isdelete = compiled.isdelete
self.is_text = compiled.isplaintext
if self.isinsert or self.isupdate or self.isdelete:
self.is_crud = True
self._is_explicit_returning = bool(compiled.statement._returning)
self._is_implicit_returning = bool(
compiled.returning and not compiled.statement._returning
)
if not parameters:
self.compiled_parameters = [
compiled.construct_params(
extracted_parameters=extracted_parameters
)
]
else:
self.compiled_parameters = [
compiled.construct_params(
m,
_group_number=grp,
extracted_parameters=extracted_parameters,
)
for grp, m in enumerate(parameters)
]
self.executemany = len(parameters) > 1
# this must occur before create_cursor() since the statement
# has to be regexed in some cases for server side cursor
self.unicode_statement = util.text_type(compiled)
self.cursor = self.create_cursor()
if self.compiled.insert_prefetch or self.compiled.update_prefetch:
if self.executemany:
self._process_executemany_defaults()
else:
self._process_executesingle_defaults()
processors = compiled._bind_processors
if compiled.literal_execute_params or compiled.post_compile_params:
if self.executemany:
raise exc.InvalidRequestError(
"'literal_execute' or 'expanding' parameters can't be "
"used with executemany()"
)
expanded_state = compiled._process_parameters_for_postcompile(
self.compiled_parameters[0]
)
# re-assign self.unicode_statement
self.unicode_statement = expanded_state.statement
# used by set_input_sizes() which is needed for Oracle
self._expanded_parameters = expanded_state.parameter_expansion
processors = dict(processors)
processors.update(expanded_state.processors)
positiontup = expanded_state.positiontup
elif compiled.positional:
positiontup = self.compiled.positiontup
if compiled.schema_translate_map:
rst = compiled.preparer._render_schema_translates
self.unicode_statement = rst(
self.unicode_statement, connection._schema_translate_map
)
# final self.unicode_statement is now assigned, encode if needed
# by dialect
if not dialect.supports_unicode_statements:
self.statement = self.unicode_statement.encode(
self.dialect.encoding
)
else:
self.statement = self.unicode_statement
# Convert the dictionary of bind parameter values
# into a dict or list to be sent to the DBAPI's
# execute() or executemany() method.
parameters = []
if compiled.positional:
for compiled_params in self.compiled_parameters:
param = [
processors[key](compiled_params[key])
if key in processors
else compiled_params[key]
for key in positiontup
]
parameters.append(dialect.execute_sequence_format(param))
else:
encode = not dialect.supports_unicode_statements
for compiled_params in self.compiled_parameters:
if encode:
param = dict(
(
dialect._encoder(key)[0],
processors[key](compiled_params[key])
if key in processors
else compiled_params[key],
)
for key in compiled_params
)
else:
param = dict(
(
key,
processors[key](compiled_params[key])
if key in processors
else compiled_params[key],
)
for key in compiled_params
)
parameters.append(param)
self.parameters = dialect.execute_sequence_format(parameters)
return self
@classmethod
def _init_statement(
cls, dialect, connection, dbapi_connection, statement, parameters
):
"""Initialize execution context for a string SQL statement."""
self = cls.__new__(cls)
self.root_connection = connection
self._dbapi_connection = dbapi_connection
self.dialect = connection.dialect
self.is_text = True
self._is_future_result = connection._execution_options.get(
"future_result", False
)
# plain text statement
self.execution_options = connection._execution_options
if not parameters:
if self.dialect.positional:
self.parameters = [dialect.execute_sequence_format()]
else:
self.parameters = [{}]
elif isinstance(parameters[0], dialect.execute_sequence_format):
self.parameters = parameters
elif isinstance(parameters[0], dict):
if dialect.supports_unicode_statements:
self.parameters = parameters
else:
self.parameters = [
{dialect._encoder(k)[0]: d[k] for k in d}
for d in parameters
] or [{}]
else:
self.parameters = [
dialect.execute_sequence_format(p) for p in parameters
]
self.executemany = len(parameters) > 1
if not dialect.supports_unicode_statements and isinstance(
statement, util.text_type
):
self.unicode_statement = statement
self.statement = dialect._encoder(statement)[0]
else:
self.statement = self.unicode_statement = statement
self.cursor = self.create_cursor()
return self
@classmethod
def _init_default(cls, dialect, connection, dbapi_connection):
"""Initialize execution context for a ColumnDefault construct."""
self = cls.__new__(cls)
self.root_connection = connection
self._dbapi_connection = dbapi_connection
self.dialect = connection.dialect
self.execution_options = connection._execution_options
self.cursor = self.create_cursor()
return self
@util.memoized_property
def engine(self):
return self.root_connection.engine
@util.memoized_property
def postfetch_cols(self):
return self.compiled.postfetch
@util.memoized_property
def prefetch_cols(self):
if self.isinsert:
return self.compiled.insert_prefetch
elif self.isupdate:
return self.compiled.update_prefetch
else:
return ()
@util.memoized_property
def returning_cols(self):
self.compiled.returning
@util.memoized_property
def no_parameters(self):
return self.execution_options.get("no_parameters", False)
@util.memoized_property
def should_autocommit(self):
autocommit = self.execution_options.get(
"autocommit",
not self.compiled
and self.statement
and expression.PARSE_AUTOCOMMIT
or False,
)
if autocommit is expression.PARSE_AUTOCOMMIT:
return self.should_autocommit_text(self.unicode_statement)
else:
return autocommit
def _execute_scalar(self, stmt, type_, parameters=None):
"""Execute a string statement on the current cursor, returning a
scalar result.
Used to fire off sequences, default phrases, and "select lastrowid"
types of statements individually or in the context of a parent INSERT
or UPDATE statement.
"""
conn = self.root_connection
if (
isinstance(stmt, util.text_type)
and not self.dialect.supports_unicode_statements
):
stmt = self.dialect._encoder(stmt)[0]
if not parameters:
if self.dialect.positional:
parameters = self.dialect.execute_sequence_format()
else:
parameters = {}
conn._cursor_execute(self.cursor, stmt, parameters, context=self)
r = self.cursor.fetchone()[0]
if type_ is not None:
# apply type post processors to the result
proc = type_._cached_result_processor(
self.dialect, self.cursor.description[0][1]
)
if proc:
return proc(r)
return r
@property
def connection(self):
return self.root_connection._branch()
def should_autocommit_text(self, statement):
return AUTOCOMMIT_REGEXP.match(statement)
def _use_server_side_cursor(self):
if not self.dialect.supports_server_side_cursors:
return False
if self.dialect.server_side_cursors:
use_server_side = self.execution_options.get(
"stream_results", True
) and (
(
self.compiled
and isinstance(
self.compiled.statement, expression.Selectable
)
or (
(
not self.compiled
or isinstance(
self.compiled.statement, expression.TextClause
)
)
and self.unicode_statement
and SERVER_SIDE_CURSOR_RE.match(self.unicode_statement)
)
)
)
else:
use_server_side = self.execution_options.get(
"stream_results", False
)
return use_server_side
def create_cursor(self):
if self._use_server_side_cursor():
self._is_server_side = True
return self.create_server_side_cursor()
else:
self._is_server_side = False
return self._dbapi_connection.cursor()
def create_server_side_cursor(self):
raise NotImplementedError()
def pre_exec(self):
pass
def get_out_parameter_values(self, names):
raise NotImplementedError(
"This dialect does not support OUT parameters"
)
def post_exec(self):
pass
def get_result_processor(self, type_, colname, coltype):
"""Return a 'result processor' for a given type as present in
cursor.description.
This has a default implementation that dialects can override
for context-sensitive result type handling.
"""
return type_._cached_result_processor(self.dialect, coltype)
def get_lastrowid(self):
"""return self.cursor.lastrowid, or equivalent, after an INSERT.
This may involve calling special cursor functions, issuing a new SELECT
on the cursor (or a new one), or returning a stored value that was
calculated within post_exec().
This function will only be called for dialects which support "implicit"
primary key generation, keep preexecute_autoincrement_sequences set to
False, and when no explicit id value was bound to the statement.
The function is called once for an INSERT statement that would need to
return the last inserted primary key for those dialects that make use
of the lastrowid concept. In these cases, it is called directly after
:meth:`.ExecutionContext.post_exec`.
"""
return self.cursor.lastrowid
def handle_dbapi_exception(self, e):
pass
def get_result_cursor_strategy(self, result):
if self._is_server_side:
strat_cls = _result.BufferedRowCursorFetchStrategy
else:
strat_cls = _result.DefaultCursorFetchStrategy
return strat_cls.create(result)
@property
def rowcount(self):
return self.cursor.rowcount
def supports_sane_rowcount(self):
return self.dialect.supports_sane_rowcount
def supports_sane_multi_rowcount(self):
return self.dialect.supports_sane_multi_rowcount
def _setup_result_proxy(self):
if self.is_crud or self.is_text:
result = self._setup_crud_result_proxy()
else:
result = _result.ResultProxy._create_for_context(self)
if (
self.compiled
and not self.isddl
and self.compiled.has_out_parameters
):
self._setup_out_parameters(result)
return result
def _setup_out_parameters(self, result):
out_bindparams = [
(param, name)
for param, name in self.compiled.bind_names.items()
if param.isoutparam
]
out_parameters = {}
for bindparam, raw_value in zip(
[param for param, name in out_bindparams],
self.get_out_parameter_values(
[name for param, name in out_bindparams]
),
):
type_ = bindparam.type
impl_type = type_.dialect_impl(self.dialect)
dbapi_type = impl_type.get_dbapi_type(self.dialect.dbapi)
result_processor = impl_type.result_processor(
self.dialect, dbapi_type
)
if result_processor is not None:
raw_value = result_processor(raw_value)
out_parameters[bindparam.key] = raw_value
result.out_parameters = out_parameters
def _setup_crud_result_proxy(self):
if self.isinsert and not self.executemany:
if (
not self._is_implicit_returning
and not self.compiled.inline
and self.dialect.postfetch_lastrowid
):
self._setup_ins_pk_from_lastrowid()
elif not self._is_implicit_returning:
self._setup_ins_pk_from_empty()
result = _result.ResultProxy._create_for_context(self)
if self.isinsert:
if self._is_implicit_returning:
row = result._onerow()
self.returned_defaults = row
self._setup_ins_pk_from_implicit_returning(row)
result._soft_close()
result._metadata = None
elif not self._is_explicit_returning:
result._soft_close()
result._metadata = None
elif self.isupdate and self._is_implicit_returning:
row = result._onerow()
self.returned_defaults = row
result._soft_close()
result._metadata = None
elif result._metadata is None:
# no results, get rowcount
# (which requires open cursor on some drivers
# such as kintersbasdb, mxodbc)
result.rowcount
result._soft_close()
return result
def _setup_ins_pk_from_lastrowid(self):
key_getter = self.compiled._key_getters_for_crud_column[2]
table = self.compiled.statement.table
compiled_params = self.compiled_parameters[0]
lastrowid = self.get_lastrowid()
if lastrowid is not None:
autoinc_col = table._autoincrement_column
if autoinc_col is not None:
# apply type post processors to the lastrowid
proc = autoinc_col.type._cached_result_processor(
self.dialect, None
)
if proc is not None:
lastrowid = proc(lastrowid)
self.inserted_primary_key = [
lastrowid
if c is autoinc_col
else compiled_params.get(key_getter(c), None)
for c in table.primary_key
]
else:
# don't have a usable lastrowid, so
# do the same as _setup_ins_pk_from_empty
self.inserted_primary_key = [
compiled_params.get(key_getter(c), None)
for c in table.primary_key
]
def _setup_ins_pk_from_empty(self):
key_getter = self.compiled._key_getters_for_crud_column[2]
table = self.compiled.statement.table
compiled_params = self.compiled_parameters[0]
self.inserted_primary_key = [
compiled_params.get(key_getter(c), None) for c in table.primary_key
]
def _setup_ins_pk_from_implicit_returning(self, row):
if row is None:
self.inserted_primary_key = None
return
key_getter = self.compiled._key_getters_for_crud_column[2]
table = self.compiled.statement.table
compiled_params = self.compiled_parameters[0]
# TODO: why are we using keyed index here? can't we get the ints?
# can compiler build up the structure here as far as what was
# explicit and what comes back in returning?
row_mapping = row._mapping
self.inserted_primary_key = [
row_mapping[col] if value is None else value
for col, value in [
(col, compiled_params.get(key_getter(col), None))
for col in table.primary_key
]
]
def lastrow_has_defaults(self):
return (self.isinsert or self.isupdate) and bool(
self.compiled.postfetch
)
def set_input_sizes(
self, translate=None, include_types=None, exclude_types=None
):
"""Given a cursor and ClauseParameters, call the appropriate
style of ``setinputsizes()`` on the cursor, using DB-API types
from the bind parameter's ``TypeEngine`` objects.
This method only called by those dialects which require it,
currently cx_oracle.
"""
if not hasattr(self.compiled, "bind_names"):
return
inputsizes = {}
for bindparam in self.compiled.bind_names:
if bindparam in self.compiled.literal_execute_params:
continue
dialect_impl = bindparam.type._unwrapped_dialect_impl(self.dialect)
dialect_impl_cls = type(dialect_impl)
dbtype = dialect_impl.get_dbapi_type(self.dialect.dbapi)
if (
dbtype is not None
and (
not exclude_types
or dbtype not in exclude_types
and dialect_impl_cls not in exclude_types
)
and (
not include_types
or dbtype in include_types
or dialect_impl_cls in include_types
)
):
inputsizes[bindparam] = dbtype
else:
inputsizes[bindparam] = None
if self.dialect._has_events:
self.dialect.dispatch.do_setinputsizes(
inputsizes, self.cursor, self.statement, self.parameters, self
)
if self.dialect.positional:
positional_inputsizes = []
for key in self.compiled.positiontup:
bindparam = self.compiled.binds[key]
dbtype = inputsizes.get(bindparam, None)
if dbtype is not None:
if key in self._expanded_parameters:
positional_inputsizes.extend(
[dbtype] * len(self._expanded_parameters[key])
)
else:
positional_inputsizes.append(dbtype)
try:
self.cursor.setinputsizes(*positional_inputsizes)
except BaseException as e:
self.root_connection._handle_dbapi_exception(
e, None, None, None, self
)
else:
keyword_inputsizes = {}
for bindparam, key in self.compiled.bind_names.items():
dbtype = inputsizes.get(bindparam, None)
if dbtype is not None:
if translate:
# TODO: this part won't work w/ the
# expanded_parameters feature, e.g. for cx_oracle
# quoted bound names
key = translate.get(key, key)
if not self.dialect.supports_unicode_binds:
key = self.dialect._encoder(key)[0]
if key in self._expanded_parameters:
keyword_inputsizes.update(
(expand_key, dbtype)
for expand_key in self._expanded_parameters[key]
)
else:
keyword_inputsizes[key] = dbtype
try:
self.cursor.setinputsizes(**keyword_inputsizes)
except BaseException as e:
self.root_connection._handle_dbapi_exception(
e, None, None, None, self
)
def _exec_default(self, column, default, type_):
if default.is_sequence:
return self.fire_sequence(default, type_)
elif default.is_callable:
self.current_column = column
return default.arg(self)
elif default.is_clause_element:
return self._exec_default_clause_element(column, default, type_)
else:
return default.arg
def _exec_default_clause_element(self, column, default, type_):
# execute a default that's a complete clause element. Here, we have
# to re-implement a miniature version of the compile->parameters->
# cursor.execute() sequence, since we don't want to modify the state
# of the connection / result in progress or create new connection/
# result objects etc.
# .. versionchanged:: 1.4
if not default._arg_is_typed:
default_arg = expression.type_coerce(default.arg, type_)
else:
default_arg = default.arg
compiled = expression.select([default_arg]).compile(
dialect=self.dialect
)
compiled_params = compiled.construct_params()
processors = compiled._bind_processors
if compiled.positional:
positiontup = compiled.positiontup
parameters = self.dialect.execute_sequence_format(
[
processors[key](compiled_params[key])
if key in processors
else compiled_params[key]
for key in positiontup
]
)
else:
parameters = dict(
(
key,
processors[key](compiled_params[key])
if key in processors
else compiled_params[key],
)
for key in compiled_params
)
return self._execute_scalar(
util.text_type(compiled), type_, parameters=parameters
)
current_parameters = None
"""A dictionary of parameters applied to the current row.
This attribute is only available in the context of a user-defined default
generation function, e.g. as described at :ref:`context_default_functions`.
It consists of a dictionary which includes entries for each column/value
pair that is to be part of the INSERT or UPDATE statement. The keys of the
dictionary will be the key value of each :class:`.Column`, which is usually
synonymous with the name.
Note that the :attr:`.DefaultExecutionContext.current_parameters` attribute
does not accommodate for the "multi-values" feature of the
:meth:`.Insert.values` method. The
:meth:`.DefaultExecutionContext.get_current_parameters` method should be
preferred.
.. seealso::
:meth:`.DefaultExecutionContext.get_current_parameters`
:ref:`context_default_functions`
"""
def get_current_parameters(self, isolate_multiinsert_groups=True):
"""Return a dictionary of parameters applied to the current row.
This method can only be used in the context of a user-defined default
generation function, e.g. as described at
:ref:`context_default_functions`. When invoked, a dictionary is
returned which includes entries for each column/value pair that is part
of the INSERT or UPDATE statement. The keys of the dictionary will be
the key value of each :class:`.Column`, which is usually synonymous
with the name.
:param isolate_multiinsert_groups=True: indicates that multi-valued
INSERT constructs created using :meth:`.Insert.values` should be
handled by returning only the subset of parameters that are local
to the current column default invocation. When ``False``, the
raw parameters of the statement are returned including the
naming convention used in the case of multi-valued INSERT.
.. versionadded:: 1.2 added
:meth:`.DefaultExecutionContext.get_current_parameters`
which provides more functionality over the existing
:attr:`.DefaultExecutionContext.current_parameters`
attribute.
.. seealso::
:attr:`.DefaultExecutionContext.current_parameters`
:ref:`context_default_functions`
"""
try:
parameters = self.current_parameters
column = self.current_column
except AttributeError:
raise exc.InvalidRequestError(
"get_current_parameters() can only be invoked in the "
"context of a Python side column default function"
)
compile_state = self.compiled.compile_state
if (
isolate_multiinsert_groups
and self.isinsert
and compile_state._has_multi_parameters
):
if column._is_multiparam_column:
index = column.index + 1
d = {column.original.key: parameters[column.key]}
else:
d = {column.key: parameters[column.key]}
index = 0
keys = compile_state._dict_parameters.keys()
d.update(
(key, parameters["%s_m%d" % (key, index)]) for key in keys
)
return d
else:
return parameters
def get_insert_default(self, column):
if column.default is None:
return None
else:
return self._exec_default(column, column.default, column.type)
def get_update_default(self, column):
if column.onupdate is None:
return None
else:
return self._exec_default(column, column.onupdate, column.type)
def _process_executemany_defaults(self):
key_getter = self.compiled._key_getters_for_crud_column[2]
scalar_defaults = {}
insert_prefetch = self.compiled.insert_prefetch
update_prefetch = self.compiled.update_prefetch
# pre-determine scalar Python-side defaults
# to avoid many calls of get_insert_default()/
# get_update_default()
for c in insert_prefetch:
if c.default and c.default.is_scalar:
scalar_defaults[c] = c.default.arg
for c in update_prefetch:
if c.onupdate and c.onupdate.is_scalar:
scalar_defaults[c] = c.onupdate.arg
for param in self.compiled_parameters:
self.current_parameters = param
for c in insert_prefetch:
if c in scalar_defaults:
val = scalar_defaults[c]
else:
val = self.get_insert_default(c)
if val is not None:
param[key_getter(c)] = val
for c in update_prefetch:
if c in scalar_defaults:
val = scalar_defaults[c]
else:
val = self.get_update_default(c)
if val is not None:
param[key_getter(c)] = val
del self.current_parameters
def _process_executesingle_defaults(self):
key_getter = self.compiled._key_getters_for_crud_column[2]
self.current_parameters = (
compiled_parameters
) = self.compiled_parameters[0]
for c in self.compiled.insert_prefetch:
if c.default and not c.default.is_sequence and c.default.is_scalar:
val = c.default.arg
else:
val = self.get_insert_default(c)
if val is not None:
compiled_parameters[key_getter(c)] = val
for c in self.compiled.update_prefetch:
val = self.get_update_default(c)
if val is not None:
compiled_parameters[key_getter(c)] = val
del self.current_parameters
DefaultDialect.execution_ctx_cls = DefaultExecutionContext
| |
# Copyright (c) 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Tests for `swift.common.splice`'''
import os
import errno
import ctypes
import logging
import tempfile
import unittest
import contextlib
import re
import mock
import nose
import six
from swift.common.splice import splice, tee
LOGGER = logging.getLogger(__name__)
def NamedTemporaryFile():
'''Wrapper to tempfile.NamedTemporaryFile() disabling bufferring.
The wrapper is used to support Python 2 and Python 3 in the same
code base.
'''
if six.PY3:
return tempfile.NamedTemporaryFile(buffering=0)
else:
return tempfile.NamedTemporaryFile(bufsize=0)
def safe_close(fd):
'''Close a file descriptor, ignoring any exceptions'''
try:
os.close(fd)
except Exception:
LOGGER.exception('Error while closing FD')
@contextlib.contextmanager
def pipe():
'''Context-manager providing 2 ends of a pipe, closing them at exit'''
fds = os.pipe()
try:
yield fds
finally:
safe_close(fds[0])
safe_close(fds[1])
class TestSplice(unittest.TestCase):
'''Tests for `splice`'''
def setUp(self):
if not splice.available:
raise nose.SkipTest('splice not available')
def test_flags(self):
'''Test flag attribute availability'''
self.assertTrue(hasattr(splice, 'SPLICE_F_MOVE'))
self.assertTrue(hasattr(splice, 'SPLICE_F_NONBLOCK'))
self.assertTrue(hasattr(splice, 'SPLICE_F_MORE'))
self.assertTrue(hasattr(splice, 'SPLICE_F_GIFT'))
@mock.patch('swift.common.splice.splice._c_splice', None)
def test_available(self):
'''Test `available` attribute correctness'''
self.assertFalse(splice.available)
def test_splice_pipe_to_pipe(self):
'''Test `splice` from a pipe to a pipe'''
with pipe() as (p1a, p1b):
with pipe() as (p2a, p2b):
os.write(p1b, b'abcdef')
res = splice(p1a, None, p2b, None, 3, 0)
self.assertEqual(res, (3, None, None))
self.assertEqual(os.read(p2a, 3), b'abc')
self.assertEqual(os.read(p1a, 3), b'def')
def test_splice_file_to_pipe(self):
'''Test `splice` from a file to a pipe'''
with NamedTemporaryFile() as fd:
with pipe() as (pa, pb):
fd.write(b'abcdef')
fd.seek(0, os.SEEK_SET)
res = splice(fd, None, pb, None, 3, 0)
self.assertEqual(res, (3, None, None))
# `fd.tell()` isn't updated...
self.assertEqual(os.lseek(fd.fileno(), 0, os.SEEK_CUR), 3)
fd.seek(0, os.SEEK_SET)
res = splice(fd, 3, pb, None, 3, 0)
self.assertEqual(res, (3, 6, None))
self.assertEqual(os.lseek(fd.fileno(), 0, os.SEEK_CUR), 0)
self.assertEqual(os.read(pa, 6), b'abcdef')
def test_splice_pipe_to_file(self):
'''Test `splice` from a pipe to a file'''
with NamedTemporaryFile() as fd:
with pipe() as (pa, pb):
os.write(pb, b'abcdef')
res = splice(pa, None, fd, None, 3, 0)
self.assertEqual(res, (3, None, None))
self.assertEqual(fd.tell(), 3)
fd.seek(0, os.SEEK_SET)
res = splice(pa, None, fd, 3, 3, 0)
self.assertEqual(res, (3, None, 6))
self.assertEqual(fd.tell(), 0)
self.assertEqual(fd.read(6), b'abcdef')
@mock.patch.object(splice, '_c_splice')
def test_fileno(self, mock_splice):
'''Test handling of file-descriptors'''
splice(1, None, 2, None, 3, 0)
self.assertEqual(mock_splice.call_args,
((1, None, 2, None, 3, 0), {}))
mock_splice.reset_mock()
with open('/dev/zero', 'r') as fd:
splice(fd, None, fd, None, 3, 0)
self.assertEqual(mock_splice.call_args,
((fd.fileno(), None, fd.fileno(), None, 3, 0),
{}))
@mock.patch.object(splice, '_c_splice')
def test_flags_list(self, mock_splice):
'''Test handling of flag lists'''
splice(1, None, 2, None, 3,
[splice.SPLICE_F_MOVE, splice.SPLICE_F_NONBLOCK])
flags = splice.SPLICE_F_MOVE | splice.SPLICE_F_NONBLOCK
self.assertEqual(mock_splice.call_args,
((1, None, 2, None, 3, flags), {}))
mock_splice.reset_mock()
splice(1, None, 2, None, 3, [])
self.assertEqual(mock_splice.call_args,
((1, None, 2, None, 3, 0), {}))
def test_errno(self):
'''Test handling of failures'''
# Invoke EBADF by using a read-only FD as fd_out
with open('/dev/null', 'r') as fd:
err = errno.EBADF
msg = r'\[Errno %d\] splice: %s' % (err, os.strerror(err))
try:
splice(fd, None, fd, None, 3, 0)
except IOError as e:
self.assertTrue(re.match(msg, str(e)))
else:
self.fail('Expected IOError was not raised')
self.assertEqual(ctypes.get_errno(), 0)
@mock.patch('swift.common.splice.splice._c_splice', None)
def test_unavailable(self):
'''Test exception when unavailable'''
self.assertRaises(EnvironmentError, splice, 1, None, 2, None, 2, 0)
def test_unavailable_in_libc(self):
'''Test `available` attribute when `libc` has no `splice` support'''
class LibC(object):
'''A fake `libc` object tracking `splice` attribute access'''
def __init__(self):
self.splice_retrieved = False
@property
def splice(self):
self.splice_retrieved = True
raise AttributeError
libc = LibC()
mock_cdll = mock.Mock(return_value=libc)
with mock.patch('ctypes.CDLL', new=mock_cdll):
# Force re-construction of a `Splice` instance
# Something you're not supposed to do in actual code
new_splice = type(splice)()
self.assertFalse(new_splice.available)
libc_name = ctypes.util.find_library('c')
mock_cdll.assert_called_once_with(libc_name, use_errno=True)
self.assertTrue(libc.splice_retrieved)
class TestTee(unittest.TestCase):
'''Tests for `tee`'''
def setUp(self):
if not tee.available:
raise nose.SkipTest('tee not available')
@mock.patch('swift.common.splice.tee._c_tee', None)
def test_available(self):
'''Test `available` attribute correctness'''
self.assertFalse(tee.available)
def test_tee_pipe_to_pipe(self):
'''Test `tee` from a pipe to a pipe'''
with pipe() as (p1a, p1b):
with pipe() as (p2a, p2b):
os.write(p1b, b'abcdef')
res = tee(p1a, p2b, 3, 0)
self.assertEqual(res, 3)
self.assertEqual(os.read(p2a, 3), b'abc')
self.assertEqual(os.read(p1a, 6), b'abcdef')
@mock.patch.object(tee, '_c_tee')
def test_fileno(self, mock_tee):
'''Test handling of file-descriptors'''
with pipe() as (pa, pb):
tee(pa, pb, 3, 0)
self.assertEqual(mock_tee.call_args, ((pa, pb, 3, 0), {}))
mock_tee.reset_mock()
tee(os.fdopen(pa, 'r'), os.fdopen(pb, 'w'), 3, 0)
self.assertEqual(mock_tee.call_args, ((pa, pb, 3, 0), {}))
@mock.patch.object(tee, '_c_tee')
def test_flags_list(self, mock_tee):
'''Test handling of flag lists'''
tee(1, 2, 3, [splice.SPLICE_F_MOVE | splice.SPLICE_F_NONBLOCK])
flags = splice.SPLICE_F_MOVE | splice.SPLICE_F_NONBLOCK
self.assertEqual(mock_tee.call_args, ((1, 2, 3, flags), {}))
mock_tee.reset_mock()
tee(1, 2, 3, [])
self.assertEqual(mock_tee.call_args, ((1, 2, 3, 0), {}))
def test_errno(self):
'''Test handling of failures'''
# Invoke EBADF by using a read-only FD as fd_out
with open('/dev/null', 'r') as fd:
err = errno.EBADF
msg = r'\[Errno %d\] tee: %s' % (err, os.strerror(err))
try:
tee(fd, fd, 3, 0)
except IOError as e:
self.assertTrue(re.match(msg, str(e)))
else:
self.fail('Expected IOError was not raised')
self.assertEqual(ctypes.get_errno(), 0)
@mock.patch('swift.common.splice.tee._c_tee', None)
def test_unavailable(self):
'''Test exception when unavailable'''
self.assertRaises(EnvironmentError, tee, 1, 2, 2, 0)
def test_unavailable_in_libc(self):
'''Test `available` attribute when `libc` has no `tee` support'''
class LibC(object):
'''A fake `libc` object tracking `tee` attribute access'''
def __init__(self):
self.tee_retrieved = False
@property
def tee(self):
self.tee_retrieved = True
raise AttributeError
libc = LibC()
mock_cdll = mock.Mock(return_value=libc)
with mock.patch('ctypes.CDLL', new=mock_cdll):
# Force re-construction of a `Tee` instance
# Something you're not supposed to do in actual code
new_tee = type(tee)()
self.assertFalse(new_tee.available)
libc_name = ctypes.util.find_library('c')
mock_cdll.assert_called_once_with(libc_name, use_errno=True)
self.assertTrue(libc.tee_retrieved)
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" BVT tests for remote diagnostics of system VMs
"""
import urllib
from marvin.cloudstackAPI import (runDiagnostics, getDiagnosticsData)
from marvin.cloudstackTestCase import cloudstackTestCase
# Import Local Modules
from marvin.codes import FAILED
from marvin.lib.base import (Account,
ServiceOffering,
VirtualMachine)
from marvin.lib.common import (get_domain,
get_zone,
get_test_template,
list_ssvms,
list_routers)
from nose.plugins.attrib import attr
class TestRemoteDiagnostics(cloudstackTestCase):
"""
Test remote diagnostics with system VMs and VR as root admin
"""
@classmethod
def setUpClass(cls):
testClient = super(TestRemoteDiagnostics, cls).getClsTestClient()
cls.apiclient = testClient.getApiClient()
cls.services = testClient.getParsedTestDataConfig()
# Get Zone, Domain and templates
cls.domain = get_domain(cls.apiclient)
cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests())
cls.hypervisor = testClient.getHypervisorInfo()
cls.services['mode'] = cls.zone.networktype
template = get_test_template(
cls.apiclient,
cls.zone.id,
cls.hypervisor
)
if template == FAILED:
cls.fail("get_test_template() failed to return template")
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
cls._cleanup = []
# Create an account, network, VM and IP addresses
cls.account = Account.create(
cls.apiclient,
cls.services["account"],
domainid=cls.domain.id
)
cls._cleanup.append(cls.account)
cls.service_offering = ServiceOffering.create(
cls.apiclient,
cls.services["service_offerings"]["tiny"]
)
cls._cleanup.append(cls.service_offering)
cls.vm_1 = VirtualMachine.create(
cls.apiclient,
cls.services["virtual_machine"],
templateid=template.id,
accountid=cls.account.name,
domainid=cls.account.domainid,
serviceofferingid=cls.service_offering.id
)
cls._cleanup.append(cls.vm_1)
@classmethod
def tearDownClass(cls):
super(TestRemoteDiagnostics,cls).tearDownClass()
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.hypervisor = self.testClient.getHypervisorInfo()
self.cleanup = []
def tearDown(self):
super(TestRemoteDiagnostics,self).tearDown()
@attr(tags=["advanced", "advancedns", "ssh", "smoke"], required_hardware="true")
def test_01_ping_in_vr_success(self):
'''
Test Ping command execution in VR
'''
# Validate the following:
# 1. Ping command is executed remotely on VR
list_router_response = list_routers(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid
)
self.assertEqual(
isinstance(list_router_response, list),
True,
"Check list response returns a valid list"
)
router = list_router_response[0]
self.debug('Starting the router with ID: %s' % router.id)
cmd = runDiagnostics.runDiagnosticsCmd()
cmd.targetid = router.id
cmd.ipaddress = '8.8.8.8'
cmd.type = 'ping'
cmd_response = self.apiclient.runDiagnostics(cmd)
self.assertEqual(
'0',
cmd_response.exitcode,
'Failed to run remote Ping in VR')
@attr(tags=["advanced", "advancedns", "ssh", "smoke"], required_hardware="true")
def test_02_ping_in_vr_failure(self):
'''
Test Ping command execution in VR
'''
# Validate the following:
# 1. Ping command is executed remotely on VR
# 2. Validate Ping command execution with a non-existent/pingable IP address
if self.hypervisor.lower() == 'simulator':
raise self.skipTest("Skipping negative test case for Simulator hypervisor")
list_router_response = list_routers(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid
)
self.assertEqual(
isinstance(list_router_response, list),
True,
"Check list response returns a valid list"
)
router = list_router_response[0]
self.debug('Starting the router with ID: %s' % router.id)
cmd = runDiagnostics.runDiagnosticsCmd()
cmd.targetid = router.id
cmd.ipaddress = '192.0.2.2'
cmd.type = 'ping'
cmd_response = self.apiclient.runDiagnostics(cmd)
self.assertNotEqual(
'0',
cmd_response.exitcode,
'Check diagnostics command returns a non-zero exit code')
@attr(tags=["advanced", "advancedns", "ssh", "smoke"], required_hardware="true")
def test_03_ping_in_ssvm_success(self):
'''
Test Ping command execution in SSVM
'''
# Validate the following:
# 1. Ping command is executed remotely on SSVM
list_ssvm_response = list_ssvms(
self.apiclient,
systemvmtype='secondarystoragevm',
state='Running',
)
self.assertEqual(
isinstance(list_ssvm_response, list),
True,
'Check list response returns a valid list'
)
ssvm = list_ssvm_response[0]
self.debug('Setting up SSVM with ID %s' % ssvm.id)
cmd = runDiagnostics.runDiagnosticsCmd()
cmd.targetid = ssvm.id
cmd.ipaddress = '8.8.8.8'
cmd.type = 'ping'
cmd_response = self.apiclient.runDiagnostics(cmd)
self.assertEqual(
'0',
cmd_response.exitcode,
'Failed to run remote Ping in SSVM'
)
@attr(tags=["advanced", "advancedns", "ssh", "smoke"], required_hardware="true")
def test_04_ping_in_ssvm_failure(self):
'''
Test Ping command execution in SSVM
'''
# Validate the following:
# 1. Ping command is executed remotely on SSVM
# 2. Validate Ping command execution with a non-existent/pingable IP address
if self.hypervisor.lower() == 'simulator':
raise self.skipTest("Skipping negative test case for Simulator hypervisor")
list_ssvm_response = list_ssvms(
self.apiclient,
systemvmtype='secondarystoragevm',
state='Running',
)
self.assertEqual(
isinstance(list_ssvm_response, list),
True,
'Check list response returns a valid list'
)
ssvm = list_ssvm_response[0]
self.debug('Setting up SSVM with ID %s' % ssvm.id)
cmd = runDiagnostics.runDiagnosticsCmd()
cmd.targetid = ssvm.id
cmd.ipaddress = '192.0.2.2'
cmd.type = 'ping'
cmd_response = self.apiclient.runDiagnostics(cmd)
self.assertNotEqual(
'0',
cmd_response.exitcode,
'Failed to run remote Ping in SSVM'
)
@attr(tags=["advanced", "advancedns", "ssh", "smoke"], required_hardware="true")
def test_05_ping_in_cpvm_success(self):
'''
Test Ping command execution in CPVM
'''
# Validate the following:
# 1. Ping command is executed remotely on CPVM
list_ssvm_response = list_ssvms(
self.apiclient,
systemvmtype='consoleproxy',
state='Running',
)
self.assertEqual(
isinstance(list_ssvm_response, list),
True,
'Check list response returns a valid list'
)
cpvm = list_ssvm_response[0]
self.debug('Setting up CPVM with ID %s' % cpvm.id)
cmd = runDiagnostics.runDiagnosticsCmd()
cmd.targetid = cpvm.id
cmd.ipaddress = '8.8.8.8'
cmd.type = 'ping'
cmd_response = self.apiclient.runDiagnostics(cmd)
self.assertEqual(
'0',
cmd_response.exitcode,
'Failed to run remote Ping in CPVM'
)
@attr(tags=["advanced", "advancedns", "ssh", "smoke"], required_hardware="true")
def test_06_ping_in_cpvm_failure(self):
'''
Test Ping command execution in CPVM
'''
# Validate the following:
# 1. Ping command is executed remotely on CPVM
# 2. Validate Ping command execution with a non-existent/pingable IP address
if self.hypervisor.lower() == 'simulator':
raise self.skipTest("Skipping negative test case for Simulator hypervisor")
list_ssvm_response = list_ssvms(
self.apiclient,
systemvmtype='consoleproxy',
state='Running',
)
self.assertEqual(
isinstance(list_ssvm_response, list),
True,
'Check list response returns a valid list'
)
cpvm = list_ssvm_response[0]
self.debug('Setting up CPVM with ID %s' % cpvm.id)
cmd = runDiagnostics.runDiagnosticsCmd()
cmd.targetid = cpvm.id
cmd.ipaddress = '192.0.2.2'
cmd.type = 'ping'
cmd_response = self.apiclient.runDiagnostics(cmd)
self.assertNotEqual(
'0',
cmd_response.exitcode,
'Check diagnostics command returns a non-zero exit code'
)
@attr(tags=["advanced", "advancedns", "ssh", "smoke"], required_hardware="true")
def test_07_arping_in_vr(self):
'''
Test Arping command execution in VR
'''
# Validate the following:
# 1. Arping command is executed remotely on VR
list_router_response = list_routers(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid
)
self.assertEqual(
isinstance(list_router_response, list),
True,
"Check list response returns a valid list"
)
router = list_router_response[0]
self.debug('Starting the router with ID: %s' % router.id)
cmd = runDiagnostics.runDiagnosticsCmd()
cmd.targetid = router.id
cmd.ipaddress = router.gateway
cmd.type = 'arping'
cmd.params = "-I eth2"
cmd_response = self.apiclient.runDiagnostics(cmd)
self.assertEqual(
'0',
cmd_response.exitcode,
'Failed to run remote Arping in VR')
@attr(tags=["advanced", "advancedns", "ssh", "smoke"], required_hardware="true")
def test_08_arping_in_ssvm(self):
'''
Test Arping command execution in SSVM
'''
# Validate the following:
# 1. Arping command is executed remotely on SSVM
list_ssvm_response = list_ssvms(
self.apiclient,
systemvmtype='secondarystoragevm',
state='Running',
)
self.assertEqual(
isinstance(list_ssvm_response, list),
True,
'Check list response returns a valid list'
)
ssvm = list_ssvm_response[0]
self.debug('Setting up SSVM with ID %s' % ssvm.id)
cmd = runDiagnostics.runDiagnosticsCmd()
cmd.targetid = ssvm.id
cmd.ipaddress = ssvm.gateway
cmd.type = 'arping'
cmd.params = '-I eth2'
cmd_response = self.apiclient.runDiagnostics(cmd)
self.assertEqual(
'0',
cmd_response.exitcode,
'Failed to run remote Arping in SSVM'
)
@attr(tags=["advanced", "advancedns", "ssh", "smoke"], required_hardware="true")
def test_09_arping_in_cpvm(self):
'''
Test Arping command execution in CPVM
'''
# Validate the following:
# 1. Arping command is executed remotely on CPVM
list_cpvm_response = list_ssvms(
self.apiclient,
systemvmtype='secondarystoragevm',
state='Running',
)
self.assertEqual(
isinstance(list_cpvm_response, list),
True,
'Check list response returns a valid list'
)
cpvm = list_cpvm_response[0]
self.debug('Setting up CPVM with ID %s' % cpvm.id)
cmd = runDiagnostics.runDiagnosticsCmd()
cmd.targetid = cpvm.id
cmd.ipaddress = cpvm.gateway
cmd.type = 'arping'
cmd.params = '-I eth2'
cmd_response = self.apiclient.runDiagnostics(cmd)
self.assertEqual(
'0',
cmd_response.exitcode,
'Failed to run remote Arping in CPVM'
)
@attr(tags=["advanced", "advancedns", "ssh", "smoke"], required_hardware="true")
def test_10_traceroute_in_vr(self):
'''
Test Arping command execution in VR
'''
# Validate the following:
# 1. Arping command is executed remotely on VR
list_router_response = list_routers(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid
)
self.assertEqual(
isinstance(list_router_response, list),
True,
"Check list response returns a valid list"
)
router = list_router_response[0]
self.debug('Starting the router with ID: %s' % router.id)
cmd = runDiagnostics.runDiagnosticsCmd()
cmd.targetid = router.id
cmd.ipaddress = '8.8.4.4'
cmd.type = 'traceroute'
cmd.params = "-m 10"
cmd_response = self.apiclient.runDiagnostics(cmd)
self.assertEqual(
'0',
cmd_response.exitcode,
'Failed to run remote Arping in VR')
@attr(tags=["advanced", "advancedns", "ssh", "smoke"], required_hardware="true")
def test_11_traceroute_in_ssvm(self):
'''
Test Traceroute command execution in SSVM
'''
# Validate the following:
# 1. Traceroute command is executed remotely on SSVM
list_ssvm_response = list_ssvms(
self.apiclient,
systemvmtype='secondarystoragevm',
state='Running',
)
self.assertEqual(
isinstance(list_ssvm_response, list),
True,
'Check list response returns a valid list'
)
ssvm = list_ssvm_response[0]
self.debug('Setting up SSVM with ID %s' % ssvm.id)
cmd = runDiagnostics.runDiagnosticsCmd()
cmd.targetid = ssvm.id
cmd.ipaddress = '8.8.4.4'
cmd.type = 'traceroute'
cmd.params = '-m 10'
cmd_response = self.apiclient.runDiagnostics(cmd)
self.assertEqual(
'0',
cmd_response.exitcode,
'Failed to run remote Traceroute in SSVM'
)
@attr(tags=["advanced", "advancedns", "ssh", "smoke"], required_hardware="true")
def test_12_traceroute_in_cpvm(self):
'''
Test Traceroute command execution in CPVMM
'''
# Validate the following:
# 1. Traceroute command is executed remotely on CPVM
list_cpvm_response = list_ssvms(
self.apiclient,
systemvmtype='consoleproxy',
state='Running',
)
self.assertEqual(
isinstance(list_cpvm_response, list),
True,
'Check list response returns a valid list'
)
cpvm = list_cpvm_response[0]
self.debug('Setting up CPVMM with ID %s' % cpvm.id)
cmd = runDiagnostics.runDiagnosticsCmd()
cmd.targetid = cpvm.id
cmd.ipaddress = '8.8.4.4'
cmd.type = 'traceroute'
cmd.params = '-m 10'
cmd_response = self.apiclient.runDiagnostics(cmd)
self.assertEqual(
'0',
cmd_response.exitcode,
'Failed to run remote Traceroute in CPVM'
)
'''
Add Get Diagnostics data BVT
'''
@attr(tags=["advanced", "advancedns", "ssh", "smoke"], required_hardware="true")
def test_13_retrieve_vr_default_files(self):
list_router_response = list_routers(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid
)
self.assertEqual(
isinstance(list_router_response, list),
True,
"Check list response returns a valid list"
)
router = list_router_response[0]
self.debug('Setting up VR with ID %s' % router.id)
cmd = getDiagnosticsData.getDiagnosticsDataCmd()
cmd.targetid = router.id
response = self.apiclient.getDiagnosticsData(cmd)
is_valid_url = self.check_url(response.url)
self.assertEqual(
True,
is_valid_url,
msg="Failed to create valid download url response"
)
def check_url(self, url):
import urllib2
try:
r = urllib.urlopen(url)
if r.code == 200:
return True
except urllib2.HTTPError:
return False
except urllib2.URLError:
return False
return True
@attr(tags=["advanced", "advancedns", "ssh", "smoke"], required_hardware="true")
def test_14_retrieve_vr_one_file(self):
list_router_response = list_routers(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid
)
self.assertEqual(
isinstance(list_router_response, list),
True,
"Check list response returns a valid list"
)
router = list_router_response[0]
self.debug('Setting up VR with ID %s' % router.id)
cmd = getDiagnosticsData.getDiagnosticsDataCmd()
cmd.targetid = router.id
cmd.type = "/var/log/cloud.log"
response = self.apiclient.getDiagnosticsData(cmd)
is_valid_url = self.check_url(response.url)
self.assertEqual(
True,
is_valid_url,
msg="Failed to create valid download url response"
)
@attr(tags=["advanced", "advancedns", "ssh", "smoke"], required_hardware="true")
def test_15_retrieve_ssvm_default_files(self):
list_ssvm_response = list_ssvms(
self.apiclient,
systemvmtype='secondarystoragevm',
state='Running',
)
self.assertEqual(
isinstance(list_ssvm_response, list),
True,
'Check list response returns a valid list'
)
ssvm = list_ssvm_response[0]
self.debug('Setting up SSVM with ID %s' % ssvm.id)
cmd = getDiagnosticsData.getDiagnosticsDataCmd()
cmd.targetid = ssvm.id
response = self.apiclient.getDiagnosticsData(cmd)
is_valid_url = self.check_url(response.url)
self.assertEqual(
True,
is_valid_url,
msg="Failed to create valid download url response"
)
@attr(tags=["advanced", "advancedns", "ssh", "smoke"], required_hardware="true")
def test_16_retrieve_ssvm_single_file(self):
list_ssvm_response = list_ssvms(
self.apiclient,
systemvmtype='secondarystoragevm',
state='Running',
)
self.assertEqual(
isinstance(list_ssvm_response, list),
True,
'Check list response returns a valid list'
)
ssvm = list_ssvm_response[0]
self.debug('Setting up SSVM with ID %s' % ssvm.id)
cmd = getDiagnosticsData.getDiagnosticsDataCmd()
cmd.targetid = ssvm.id
cmd.type = "/var/log/cloud.log"
response = self.apiclient.getDiagnosticsData(cmd)
is_valid_url = self.check_url(response.url)
self.assertEqual(
True,
is_valid_url,
msg="Failed to create valid download url response"
)
@attr(tags=["advanced", "advancedns", "ssh", "smoke"], required_hardware="true")
def test_17_retrieve_cpvm_default_files(self):
list_cpvm_response = list_ssvms(
self.apiclient,
systemvmtype='consoleproxy',
state='Running',
)
self.assertEqual(
isinstance(list_cpvm_response, list),
True,
'Check list response returns a valid list'
)
cpvm = list_cpvm_response[0]
self.debug('Setting up CPVM with ID %s' % cpvm.id)
cmd = getDiagnosticsData.getDiagnosticsDataCmd()
cmd.targetid = cpvm.id
response = self.apiclient.getDiagnosticsData(cmd)
is_valid_url = self.check_url(response.url)
self.assertEqual(
True,
is_valid_url,
msg="Failed to create valid download url response"
)
@attr(tags=["advanced", "advancedns", "ssh", "smoke"], required_hardware="true")
def test_18_retrieve_cpvm_single_file(self):
list_cpvm_response = list_ssvms(
self.apiclient,
systemvmtype='consoleproxy',
state='Running',
)
self.assertEqual(
isinstance(list_cpvm_response, list),
True,
'Check list response returns a valid list'
)
cpvm = list_cpvm_response[0]
self.debug('Setting up CPVM with ID %s' % cpvm.id)
cmd = getDiagnosticsData.getDiagnosticsDataCmd()
cmd.targetid = cpvm.id
cmd.type = "/var/log/cloud.log"
response = self.apiclient.getDiagnosticsData(cmd)
is_valid_url = self.check_url(response.url)
self.assertEqual(
True,
is_valid_url,
msg="Failed to create valid download url response"
)
| |
from libs.graph.DLinkedList import Queue, DoubledLinkedList as List
from libs.graph.PriorityQueue import PriorityQueueBinary as PriorityQueue
from libs.graph.Tree import *
#it is better to use a DoubledLinkedList to operate with a great efficiency on
#the lists those will be used in the graph representation
class Node:
def __init__(self, elem, index, weight = None):
"""
this class represents a graph node
:param elem: an object stored into the node
:param index: int, the index by which the node may be identified
:param weight: int, the weight of the node and of his object - may not be used
"""
self._elem = elem
self._index = index
self._weight = weight
self._token = None #used to mark each node during a generic visit
self._distance = 0 #used to set and retrieve the distance of the node in the visit
self._knights = 0 #used to keep trace of the knights in the node
self._knights_arrived = []
def get_elem(self):
"""
:return: object stored in the node
"""
return self._elem
def get_index(self):
"""
:return: int, the index of the node
"""
return self._index
def get_weight(self):
"""
:return: int, the weight of the node
"""
return self._weight
def get_token(self):
"""
:return: int, the token of the node
"""
return self._token
def set_token(self, token):
"""
:param token: int, the validation token
:return: int, the token of the node
"""
self._token = token
def get_node(self):
"""
:return: tuple, (index, elem, weight)
"""
return self.get_elem(), self.get_weight()
def set_distance(self, dist):
"""
this function can be used to set a particular distance in order to provide
a good interface for BFS and Dijkstra shortest-path algorithms
:param dist: int, distance
:return: None
"""
self._distance += dist
self._knights += 1
def get_distance(self):
"""
:return: int, the distance calculated for the node
"""
return self._distance
def get_count(self):
"""
:return: int, the number of knights
"""
return self._knights
#I'll use an AdjacenceList Graph because of the unitarian value of all the arcs
class GraphAdjacenceList:
def __init__(self):
"""
this class represents a graph using an adjacency list style
"""
self._nodes = dict() #to store the nodes
self._adjacency = dict() #to link the nodes to their adjacence list
self._nextId = 0 #it will be used to store the nodes - id > 0
self._nodes_elems = dict() #it will be used to store the elems inserted
def getNodes(self):
"""
this function is used as an interface to retrieve graph's nodes
:return: (dictionary, dictionary) the nodes and their adjacency lists
"""
return self._nodes, self._adjacency
def insertNode(self, elem, weight = None):
"""
this function allows the user to insert a node into the graph
:param elem: the elem to be stored into the node
:param weight: the weight of the node
:return: Node, the node already inserted or just inserted
"""
if elem in self._nodes_elems:
#if a node has already setted it will be returned
#assuming the computational cost of this check, as it is implemented in python,
#as memory access to the list -> O(1)
return self._nodes_elems[elem]
newNode = Node(elem, self._nextId, weight)
self._nodes[newNode.get_index()] = newNode
self._adjacency[newNode.get_index()] = List()
self._nextId += 1
#storing the elem just inserted
self._nodes_elems[elem] = newNode
return newNode
def linkNode(self, tail, head):
"""
this function links two nodes in a direct connection
:param tail: Node, the tail node
:param head: Node, the head node
:return: None
"""
adj = self._adjacency[tail.get_index()]
if head not in adj.getLastAddedList():
#assuming direct memory access... (see previous method)
adj.addAsLast(head)
def printGraph(self):
"""
this function builds a well formatted visualization of the nodes
:return: list, a list of nodes visual formatted
"""
print("Adjacency Lists:")
for identifier in self._nodes:
print("node", self._nodes[identifier].get_elem(), self._nodes[identifier].get_weight())
self._adjacency[identifier].printList()
print("")
#The chessboard's graph is unitary-weight-arcs formed so we can use a Breadth First Search to return the list of all the
#minimum-path-trees starting each from a knight
def validateNodes(self, token):
"""
this function validate all nodes with a token value in order to accomplish the visit
:param token: int, the token value to validate the node. 0 if not visited, 21 if explored and 42 (for Douglas) if closed
:return: None
"""
nodes = self.getNodes()[0]
for node in nodes.itervalues():
node.set_token(token)
def visitBFS(self, node):
"""
this is a Breadth First Search starting from a vertex. Please note that all the operations are done on the leaves
to let the algorithm be more modular (it doesn't seems be affecting the computational time for it remains proportional
to the dimension of the graph)
:param node: Node, the starting vertex
:return: Tree, representing the visit path
"""
#initializing some useful constants (funny constants too)
unexplored = 0
explored = 21
closed = 42 #So long and thanks for all the fish!
#validating all the nodes as unexplored and starting from the vertex
self.validateNodes(unexplored)
node.set_token(explored)
#initializing the tree containing the only vertex
T_root = Leaf(node)
T_root.setDistance(0.0) #using the float - it is not a counter value
T = Tree(T_root)
#initializing the fringe of the visit
F = Queue()
F.enqueue(T_root)
while not F.isEmpty():
u = F.dequeue()
n = u.getElem()
n.set_token(closed)
for v in self._adjacency[n.get_index()].getLastAddedList():
if v.get_token() == unexplored:
v.set_token(explored)
l = Leaf(v)
F.enqueue(l)
T.insertLeaf(l, u)
return T
def visitNodesBFS(self, Nodes):
"""
this is a simple implementation of a Breadth First Search algorithm to visit the graph
starting from a selected group of nodes
:param Nodes: Node list containing the nodes from which start the visit
:return: list of Trees, the list of all the visits
"""
T_list = []
for node in Nodes:
tree = self.visitBFS(node)
T_list.append(tree)
return T_list
#it is interesting to achieve the same result using minimum path algorithm of Dijkstra
def Dijkstra(self, node):
"""
this is a Dijstra shortest path algorithm implementation starting from a vertex
:param node: Node, the starting vertex
:return: Tree, the shortest paths tree
"""
INF = float('inf')
self.validateNodes(INF)
#we will use the nodes' tokens to store the distance info!
node.set_token(0.0) #0-distance from itself!
#initializing the tree
T_root = Leaf(node)
T_root.setDistance(node.get_token())
T = Tree(T_root)
#initializing a dictionary to keep trace of the leaves
leaves = dict()
leaves[node] = T_root
#initializing the priority queue to mantain the fringe
PQ = PriorityQueue()
PQ.insert(T_root, node.get_token())
while not PQ.isEmpty():
u = PQ.deleteMin() #retrieving the min node from the leaf
n = u.getElem()
for v in self._adjacency[n.get_index()].getLastAddedList():
if v.get_token() == INF:
l = Leaf(v)
leaves[v] = l #updating the leaves' dictionary
PQ.insert(l, n.get_token() + 1.0) #each edge will be unitary-cost
v.set_token(n.get_token() + 1.0)
T.insertLeaf(l, u)
elif n.get_token() + 1.0 < v.get_token():
relaxed = n.get_token() + 1.0
leaves[v].setDistance(relaxed)
#updating the tree... (we are now saving in the priority queue the leaves)
leaves[v].setFather(u)
leaves[n].addSon(leaves[v])
#updating the priority queue
PQ.decreaseKey(leaves[v], relaxed)
v.set_token(relaxed)
return T
def visitDijkstra(self, Nodes):
"""
this is an implementation of the Dijkstra algorithm to visit the graph
starting from a selected group of nodes
:param Nodes: Node list containing the nodes from which start the visit
:return: list of Trees, the list of all the visits
"""
T_list = []
for node in Nodes:
tree = self.Dijkstra(node)
T_list.append(tree)
return T_list
#Pay attention!
# -Bellman condition to decide a shortest path -> for each node it is O(k*n) where k is node's degree
# -save the all available paths in a tree instead of a list of lists -> O(n) (if it is possible...)
# -the chessboard graph is a direct graph with all the arcs costing a single unit
# (please note that it is necessary to consider each knight own k-value in order to calculate
# the move number!!)
# -general purpose: in python2.7 the infinite is... INF = float('inf') -> comparisons using floats
def FloydWarshall(self):
"""
this is a simple implementation of the Floyd-Warshall algorythm using an O(n^2) space
but O(n^3) computational complexity. Please note that in our case the chessboard graph
is unitary-weight-arch created
:return: list of lists, matrix of the distances between two vertices
"""
INF = float('inf')
nodes, adjacency = self.getNodes() #getting the dictionaries
indexes = nodes.keys() #it is the same to access the two dictionaries
dim = len(indexes)
#initializing the matrix
dist = [[INF for m in range(dim)] for n in range(dim)]
for i in range(dim):
ind = indexes[i]
dist[ind][ind] = 0.0
adj_nodes = adjacency[ind].getLastAddedList()
for adj in adj_nodes:
to_ind = adj.get_index()
dist[ind][to_ind] = 1.0
#executing the dinamic programming algorithm
for k in range(dim):
for i in range(dim):
for j in range(dim):
if dist[i][k] != INF and dist[k][j] != INF and dist[i][k] + dist[k][j] < dist[i][j]:
dist[i][j] = dist[i][k] + dist[k][j]
return dist
| |
"""
Gaussian Mixture Models.
This implementation corresponds to frequentist (non-Bayesian) formulation
of Gaussian Mixture Models.
"""
# Author: Ron Weiss <ronweiss@gmail.com>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Bertrand Thirion <bertrand.thirion@inria.fr>
import warnings
import numpy as np
from scipy import linalg
from time import time
from ..base import BaseEstimator
from ..utils import check_random_state, check_array, deprecated
from ..utils.extmath import logsumexp
from ..utils.validation import check_is_fitted
from .. import cluster
from sklearn.externals.six.moves import zip
EPS = np.finfo(float).eps
def log_multivariate_normal_density(X, means, covars, covariance_type='diag'):
"""Compute the log probability under a multivariate Gaussian distribution.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row corresponds to a
single data point.
means : array_like, shape (n_components, n_features)
List of n_features-dimensional mean vectors for n_components Gaussians.
Each row corresponds to a single mean vector.
covars : array_like
List of n_components covariance parameters for each Gaussian. The shape
depends on `covariance_type`:
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
covariance_type : string
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
Returns
-------
lpr : array_like, shape (n_samples, n_components)
Array containing the log probabilities of each data point in
X under each of the n_components multivariate Gaussian distributions.
"""
log_multivariate_normal_density_dict = {
'spherical': _log_multivariate_normal_density_spherical,
'tied': _log_multivariate_normal_density_tied,
'diag': _log_multivariate_normal_density_diag,
'full': _log_multivariate_normal_density_full}
return log_multivariate_normal_density_dict[covariance_type](
X, means, covars)
def sample_gaussian(mean, covar, covariance_type='diag', n_samples=1,
random_state=None):
"""Generate random samples from a Gaussian distribution.
Parameters
----------
mean : array_like, shape (n_features,)
Mean of the distribution.
covar : array_like, optional
Covariance of the distribution. The shape depends on `covariance_type`:
scalar if 'spherical',
(n_features) if 'diag',
(n_features, n_features) if 'tied', or 'full'
covariance_type : string, optional
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array, shape (n_features, n_samples)
Randomly generated sample
"""
rng = check_random_state(random_state)
n_dim = len(mean)
rand = rng.randn(n_dim, n_samples)
if n_samples == 1:
rand.shape = (n_dim,)
if covariance_type == 'spherical':
rand *= np.sqrt(covar)
elif covariance_type == 'diag':
rand = np.dot(np.diag(np.sqrt(covar)), rand)
else:
s, U = linalg.eigh(covar)
s.clip(0, out=s) # get rid of tiny negatives
np.sqrt(s, out=s)
U *= s
rand = np.dot(U, rand)
return (rand.T + mean).T
class _GMMBase(BaseEstimator):
"""Gaussian Mixture Model.
Representation of a Gaussian mixture model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a GMM distribution.
Initializes parameters such that every mixture component has zero
mean and identity covariance.
Read more in the :ref:`User Guide <gmm>`.
Parameters
----------
n_components : int, optional
Number of mixture components. Defaults to 1.
covariance_type : string, optional
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
Defaults to 'diag'.
random_state: RandomState or an int seed (None by default)
A random number generator instance
min_covar : float, optional
Floor on the diagonal of the covariance matrix to prevent
overfitting. Defaults to 1e-3.
tol : float, optional
Convergence threshold. EM iterations will stop when average
gain in log-likelihood is below this threshold. Defaults to 1e-3.
n_iter : int, optional
Number of EM iterations to perform.
n_init : int, optional
Number of initializations to perform. The best results is kept.
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
init_params : string, optional
Controls which parameters are updated in the initialization
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
verbose : int, default: 0
Enable verbose output. If 1 then it always prints the current
initialization and iteration step. If greater than 1 then
it prints additionally the change and time needed for each step.
Attributes
----------
weights_ : array, shape (`n_components`,)
This attribute stores the mixing weights for each mixture component.
means_ : array, shape (`n_components`, `n_features`)
Mean parameters for each mixture component.
covars_ : array
Covariance parameters for each mixture component. The shape
depends on `covariance_type`::
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
converged_ : bool
True when convergence was reached in fit(), False otherwise.
See Also
--------
DPGMM : Infinite gaussian mixture model, using the Dirichlet
process, fit with a variational algorithm
VBGMM : Finite gaussian mixture model fit with a variational
algorithm, better for situations where there might be too little
data to get a good estimate of the covariance matrix.
Examples
--------
>>> import numpy as np
>>> from sklearn import mixture
>>> np.random.seed(1)
>>> g = mixture.GMM(n_components=2)
>>> # Generate random observations with two modes centered on 0
>>> # and 10 to use for training.
>>> obs = np.concatenate((np.random.randn(100, 1),
... 10 + np.random.randn(300, 1)))
>>> g.fit(obs) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, tol=0.001, verbose=0)
>>> np.round(g.weights_, 2)
array([ 0.75, 0.25])
>>> np.round(g.means_, 2)
array([[ 10.05],
[ 0.06]])
>>> np.round(g.covars_, 2) # doctest: +SKIP
array([[[ 1.02]],
[[ 0.96]]])
>>> g.predict([[0], [2], [9], [10]]) # doctest: +ELLIPSIS
array([1, 1, 0, 0]...)
>>> np.round(g.score([[0], [2], [9], [10]]), 2)
array([-2.19, -4.58, -1.75, -1.21])
>>> # Refit the model on new data (initial parameters remain the
>>> # same), this time with an even split between the two modes.
>>> g.fit(20 * [[0]] + 20 * [[10]]) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, tol=0.001, verbose=0)
>>> np.round(g.weights_, 2)
array([ 0.5, 0.5])
"""
def __init__(self, n_components=1, covariance_type='diag',
random_state=None, tol=1e-3, min_covar=1e-3,
n_iter=100, n_init=1, params='wmc', init_params='wmc',
verbose=0):
self.n_components = n_components
self.covariance_type = covariance_type
self.tol = tol
self.min_covar = min_covar
self.random_state = random_state
self.n_iter = n_iter
self.n_init = n_init
self.params = params
self.init_params = init_params
self.verbose = verbose
if covariance_type not in ['spherical', 'tied', 'diag', 'full']:
raise ValueError('Invalid value for covariance_type: %s' %
covariance_type)
if n_init < 1:
raise ValueError('GMM estimation requires at least one run')
self.weights_ = np.ones(self.n_components) / self.n_components
# flag to indicate exit status of fit() method: converged (True) or
# n_iter reached (False)
self.converged_ = False
def _get_covars(self):
"""Covariance parameters for each mixture component.
The shape depends on ``cvtype``::
(n_states, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_states, n_features) if 'diag',
(n_states, n_features, n_features) if 'full'
"""
if self.covariance_type == 'full':
return self.covars_
elif self.covariance_type == 'diag':
return [np.diag(cov) for cov in self.covars_]
elif self.covariance_type == 'tied':
return [self.covars_] * self.n_components
elif self.covariance_type == 'spherical':
return [np.diag(cov) for cov in self.covars_]
def _set_covars(self, covars):
"""Provide values for covariance."""
covars = np.asarray(covars)
_validate_covars(covars, self.covariance_type, self.n_components)
self.covars_ = covars
def score_samples(self, X):
"""Return the per-sample likelihood of the data under the model.
Compute the log probability of X under the model and
return the posterior distribution (responsibilities) of each
mixture component for each element of X.
Parameters
----------
X: array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X.
responsibilities : array_like, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation
"""
check_is_fitted(self, 'means_')
X = check_array(X)
if X.ndim == 1:
X = X[:, np.newaxis]
if X.size == 0:
return np.array([]), np.empty((0, self.n_components))
if X.shape[1] != self.means_.shape[1]:
raise ValueError('The shape of X is not compatible with self')
lpr = (log_multivariate_normal_density(X, self.means_, self.covars_,
self.covariance_type) +
np.log(self.weights_))
logprob = logsumexp(lpr, axis=1)
responsibilities = np.exp(lpr - logprob[:, np.newaxis])
return logprob, responsibilities
def score(self, X, y=None):
"""Compute the log probability under the model.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X
"""
logprob, _ = self.score_samples(X)
return logprob
def predict(self, X):
"""Predict label for data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,) component memberships
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities.argmax(axis=1)
def predict_proba(self, X):
"""Predict posterior probability of data under each Gaussian
in the model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
responsibilities : array-like, shape = (n_samples, n_components)
Returns the probability of the sample for each Gaussian
(state) in the model.
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities
def sample(self, n_samples=1, random_state=None):
"""Generate random samples from the model.
Parameters
----------
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array_like, shape (n_samples, n_features)
List of samples
"""
check_is_fitted(self, 'means_')
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
weight_cdf = np.cumsum(self.weights_)
X = np.empty((n_samples, self.means_.shape[1]))
rand = random_state.rand(n_samples)
# decide which component to use for each sample
comps = weight_cdf.searchsorted(rand)
# for each component, generate all needed samples
for comp in range(self.n_components):
# occurrences of current component in X
comp_in_X = (comp == comps)
# number of those occurrences
num_comp_in_X = comp_in_X.sum()
if num_comp_in_X > 0:
if self.covariance_type == 'tied':
cv = self.covars_
elif self.covariance_type == 'spherical':
cv = self.covars_[comp][0]
else:
cv = self.covars_[comp]
X[comp_in_X] = sample_gaussian(
self.means_[comp], cv, self.covariance_type,
num_comp_in_X, random_state=random_state).T
return X
def fit_predict(self, X, y=None):
"""Fit and then predict labels for data.
Warning: Due to the final maximization step in the EM algorithm,
with low iterations the prediction may not be 100% accurate.
.. versionadded:: 0.17
*fit_predict* method in Gaussian Mixture Model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,) component memberships
"""
return self._fit(X, y).argmax(axis=1)
def _fit(self, X, y=None, do_prediction=False):
"""Estimate model parameters with the EM algorithm.
A initialization step is performed before entering the
expectation-maximization (EM) algorithm. If you want to avoid
this step, set the keyword argument init_params to the empty
string '' when creating the GMM object. Likewise, if you would
like just to do an initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
responsibilities : array, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation.
"""
# initialization step
X = check_array(X, dtype=np.float64, ensure_min_samples=2,
estimator=self)
if X.shape[0] < self.n_components:
raise ValueError(
'GMM estimation with %s components, but got only %s samples' %
(self.n_components, X.shape[0]))
max_log_prob = -np.infty
if self.verbose > 0:
print('Expectation-maximization algorithm started.')
for init in range(self.n_init):
if self.verbose > 0:
print('Initialization ' + str(init + 1))
start_init_time = time()
if 'm' in self.init_params or not hasattr(self, 'means_'):
self.means_ = cluster.KMeans(
n_clusters=self.n_components,
random_state=self.random_state).fit(X).cluster_centers_
if self.verbose > 1:
print('\tMeans have been initialized.')
if 'w' in self.init_params or not hasattr(self, 'weights_'):
self.weights_ = np.tile(1.0 / self.n_components,
self.n_components)
if self.verbose > 1:
print('\tWeights have been initialized.')
if 'c' in self.init_params or not hasattr(self, 'covars_'):
cv = np.cov(X.T) + self.min_covar * np.eye(X.shape[1])
if not cv.shape:
cv.shape = (1, 1)
self.covars_ = \
distribute_covar_matrix_to_match_covariance_type(
cv, self.covariance_type, self.n_components)
if self.verbose > 1:
print('\tCovariance matrices have been initialized.')
# EM algorithms
current_log_likelihood = None
# reset self.converged_ to False
self.converged_ = False
for i in range(self.n_iter):
if self.verbose > 0:
print('\tEM iteration ' + str(i + 1))
start_iter_time = time()
prev_log_likelihood = current_log_likelihood
# Expectation step
log_likelihoods, responsibilities = self.score_samples(X)
current_log_likelihood = log_likelihoods.mean()
# Check for convergence.
if prev_log_likelihood is not None:
change = abs(current_log_likelihood - prev_log_likelihood)
if self.verbose > 1:
print('\t\tChange: ' + str(change))
if change < self.tol:
self.converged_ = True
if self.verbose > 0:
print('\t\tEM algorithm converged.')
break
# Maximization step
self._do_mstep(X, responsibilities, self.params,
self.min_covar)
if self.verbose > 1:
print('\t\tEM iteration ' + str(i + 1) + ' took {0:.5f}s'.format(
time() - start_iter_time))
# if the results are better, keep it
if self.n_iter:
if current_log_likelihood > max_log_prob:
max_log_prob = current_log_likelihood
best_params = {'weights': self.weights_,
'means': self.means_,
'covars': self.covars_}
if self.verbose > 1:
print('\tBetter parameters were found.')
if self.verbose > 1:
print('\tInitialization ' + str(init + 1) + ' took {0:.5f}s'.format(
time() - start_init_time))
# check the existence of an init param that was not subject to
# likelihood computation issue.
if np.isneginf(max_log_prob) and self.n_iter:
raise RuntimeError(
"EM algorithm was never able to compute a valid likelihood " +
"given initial parameters. Try different init parameters " +
"(or increasing n_init) or check for degenerate data.")
if self.n_iter:
self.covars_ = best_params['covars']
self.means_ = best_params['means']
self.weights_ = best_params['weights']
else: # self.n_iter == 0 occurs when using GMM within HMM
# Need to make sure that there are responsibilities to output
# Output zeros because it was just a quick initialization
responsibilities = np.zeros((X.shape[0], self.n_components))
return responsibilities
def fit(self, X, y=None):
"""Estimate model parameters with the EM algorithm.
A initialization step is performed before entering the
expectation-maximization (EM) algorithm. If you want to avoid
this step, set the keyword argument init_params to the empty
string '' when creating the GMM object. Likewise, if you would
like just to do an initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
self
"""
self._fit(X, y)
return self
def _do_mstep(self, X, responsibilities, params, min_covar=0):
"""Perform the Mstep of the EM algorithm and return the cluster weights.
"""
weights = responsibilities.sum(axis=0)
weighted_X_sum = np.dot(responsibilities.T, X)
inverse_weights = 1.0 / (weights[:, np.newaxis] + 10 * EPS)
if 'w' in params:
self.weights_ = (weights / (weights.sum() + 10 * EPS) + EPS)
if 'm' in params:
self.means_ = weighted_X_sum * inverse_weights
if 'c' in params:
covar_mstep_func = _covar_mstep_funcs[self.covariance_type]
self.covars_ = covar_mstep_func(
self, X, responsibilities, weighted_X_sum, inverse_weights,
min_covar)
return weights
def _n_parameters(self):
"""Return the number of free parameters in the model."""
ndim = self.means_.shape[1]
if self.covariance_type == 'full':
cov_params = self.n_components * ndim * (ndim + 1) / 2.
elif self.covariance_type == 'diag':
cov_params = self.n_components * ndim
elif self.covariance_type == 'tied':
cov_params = ndim * (ndim + 1) / 2.
elif self.covariance_type == 'spherical':
cov_params = self.n_components
mean_params = ndim * self.n_components
return int(cov_params + mean_params + self.n_components - 1)
def bic(self, X):
"""Bayesian information criterion for the current model fit
and the proposed data.
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
bic: float (the lower the better)
"""
return (-2 * self.score(X).sum() +
self._n_parameters() * np.log(X.shape[0]))
def aic(self, X):
"""Akaike information criterion for the current model fit
and the proposed data.
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
aic: float (the lower the better)
"""
return - 2 * self.score(X).sum() + 2 * self._n_parameters()
@deprecated("The class GMM is deprecated and "
"will be removed in 0.20. Use class GaussianMixture instead.")
class GMM(_GMMBase):
def __init__(self, n_components=1, covariance_type='diag',
random_state=None, tol=1e-3, min_covar=1e-3,
n_iter=100, n_init=1, params='wmc', init_params='wmc',
verbose=0):
super(GMM, self).__init__(
n_components=n_components, covariance_type=covariance_type,
random_state=random_state, tol=tol, min_covar=min_covar,
n_iter=n_iter, n_init=n_init, params=params,
init_params=init_params, verbose=verbose)
#########################################################################
# some helper routines
#########################################################################
def _log_multivariate_normal_density_diag(X, means, covars):
"""Compute Gaussian log-density at X for a diagonal model."""
n_samples, n_dim = X.shape
lpr = -0.5 * (n_dim * np.log(2 * np.pi) + np.sum(np.log(covars), 1)
+ np.sum((means ** 2) / covars, 1)
- 2 * np.dot(X, (means / covars).T)
+ np.dot(X ** 2, (1.0 / covars).T))
return lpr
def _log_multivariate_normal_density_spherical(X, means, covars):
"""Compute Gaussian log-density at X for a spherical model."""
cv = covars.copy()
if covars.ndim == 1:
cv = cv[:, np.newaxis]
if covars.shape[1] == 1:
cv = np.tile(cv, (1, X.shape[-1]))
return _log_multivariate_normal_density_diag(X, means, cv)
def _log_multivariate_normal_density_tied(X, means, covars):
"""Compute Gaussian log-density at X for a tied model."""
cv = np.tile(covars, (means.shape[0], 1, 1))
return _log_multivariate_normal_density_full(X, means, cv)
def _log_multivariate_normal_density_full(X, means, covars, min_covar=1.e-7):
"""Log probability for full covariance matrices."""
n_samples, n_dim = X.shape
nmix = len(means)
log_prob = np.empty((n_samples, nmix))
for c, (mu, cv) in enumerate(zip(means, covars)):
try:
cv_chol = linalg.cholesky(cv, lower=True)
except linalg.LinAlgError:
# The model is most probably stuck in a component with too
# few observations, we need to reinitialize this components
try:
cv_chol = linalg.cholesky(cv + min_covar * np.eye(n_dim),
lower=True)
except linalg.LinAlgError:
raise ValueError("'covars' must be symmetric, "
"positive-definite")
cv_log_det = 2 * np.sum(np.log(np.diagonal(cv_chol)))
cv_sol = linalg.solve_triangular(cv_chol, (X - mu).T, lower=True).T
log_prob[:, c] = - .5 * (np.sum(cv_sol ** 2, axis=1) +
n_dim * np.log(2 * np.pi) + cv_log_det)
return log_prob
def _validate_covars(covars, covariance_type, n_components):
"""Do basic checks on matrix covariance sizes and values."""
from scipy import linalg
if covariance_type == 'spherical':
if len(covars) != n_components:
raise ValueError("'spherical' covars have length n_components")
elif np.any(covars <= 0):
raise ValueError("'spherical' covars must be non-negative")
elif covariance_type == 'tied':
if covars.shape[0] != covars.shape[1]:
raise ValueError("'tied' covars must have shape (n_dim, n_dim)")
elif (not np.allclose(covars, covars.T)
or np.any(linalg.eigvalsh(covars) <= 0)):
raise ValueError("'tied' covars must be symmetric, "
"positive-definite")
elif covariance_type == 'diag':
if len(covars.shape) != 2:
raise ValueError("'diag' covars must have shape "
"(n_components, n_dim)")
elif np.any(covars <= 0):
raise ValueError("'diag' covars must be non-negative")
elif covariance_type == 'full':
if len(covars.shape) != 3:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
elif covars.shape[1] != covars.shape[2]:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
for n, cv in enumerate(covars):
if (not np.allclose(cv, cv.T)
or np.any(linalg.eigvalsh(cv) <= 0)):
raise ValueError("component %d of 'full' covars must be "
"symmetric, positive-definite" % n)
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
def distribute_covar_matrix_to_match_covariance_type(
tied_cv, covariance_type, n_components):
"""Create all the covariance matrices from a given template."""
if covariance_type == 'spherical':
cv = np.tile(tied_cv.mean() * np.ones(tied_cv.shape[1]),
(n_components, 1))
elif covariance_type == 'tied':
cv = tied_cv
elif covariance_type == 'diag':
cv = np.tile(np.diag(tied_cv), (n_components, 1))
elif covariance_type == 'full':
cv = np.tile(tied_cv, (n_components, 1, 1))
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
return cv
def _covar_mstep_diag(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Perform the covariance M step for diagonal cases."""
avg_X2 = np.dot(responsibilities.T, X * X) * norm
avg_means2 = gmm.means_ ** 2
avg_X_means = gmm.means_ * weighted_X_sum * norm
return avg_X2 - 2 * avg_X_means + avg_means2 + min_covar
def _covar_mstep_spherical(*args):
"""Perform the covariance M step for spherical cases."""
cv = _covar_mstep_diag(*args)
return np.tile(cv.mean(axis=1)[:, np.newaxis], (1, cv.shape[1]))
def _covar_mstep_full(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Perform the covariance M step for full cases."""
# Eq. 12 from K. Murphy, "Fitting a Conditional Linear Gaussian
# Distribution"
n_features = X.shape[1]
cv = np.empty((gmm.n_components, n_features, n_features))
for c in range(gmm.n_components):
post = responsibilities[:, c]
mu = gmm.means_[c]
diff = X - mu
with np.errstate(under='ignore'):
# Underflow Errors in doing post * X.T are not important
avg_cv = np.dot(post * diff.T, diff) / (post.sum() + 10 * EPS)
cv[c] = avg_cv + min_covar * np.eye(n_features)
return cv
def _covar_mstep_tied(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Perform the covariance M step for tied cases."""
# Eq. 15 from K. Murphy, "Fitting a Conditional Linear Gaussian
# Distribution"
avg_X2 = np.dot(X.T, X)
avg_means2 = np.dot(gmm.means_.T, weighted_X_sum)
out = avg_X2 - avg_means2
out *= 1. / X.shape[0]
out.flat[::len(out) + 1] += min_covar
return out
_covar_mstep_funcs = {'spherical': _covar_mstep_spherical,
'diag': _covar_mstep_diag,
'tied': _covar_mstep_tied,
'full': _covar_mstep_full,
}
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from mox3 import mox
from oslo_config import cfg
from oslo_utils import timeutils
from nova.compute import claims
from nova.compute import task_states
from nova.compute import vm_states
from nova import db
from nova.tests.unit.compute import test_compute
from nova.tests.unit.image import fake as fake_image
CONF = cfg.CONF
CONF.import_opt('shelved_offload_time', 'nova.compute.manager')
def _fake_resources():
resources = {
'memory_mb': 2048,
'memory_mb_used': 0,
'free_ram_mb': 2048,
'local_gb': 20,
'local_gb_used': 0,
'free_disk_gb': 20,
'vcpus': 2,
'vcpus_used': 0
}
return resources
class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
def _shelve_instance(self, shelved_offload_time, clean_shutdown=True):
CONF.set_override('shelved_offload_time', shelved_offload_time)
host = 'fake-mini'
instance = self._create_fake_instance_obj(params={'host': host})
image_id = 'fake_image_id'
host = 'fake-mini'
cur_time = timeutils.utcnow()
timeutils.set_time_override(cur_time)
instance.task_state = task_states.SHELVING
instance.save()
self.mox.StubOutWithMock(self.compute, '_notify_about_instance_usage')
self.mox.StubOutWithMock(self.compute.driver, 'snapshot')
self.mox.StubOutWithMock(self.compute.driver, 'power_off')
self.mox.StubOutWithMock(self.compute, '_get_power_state')
self.mox.StubOutWithMock(self.compute.network_api,
'cleanup_instance_network_on_host')
self.compute._notify_about_instance_usage(self.context, instance,
'shelve.start')
if clean_shutdown:
self.compute.driver.power_off(instance,
CONF.shutdown_timeout,
self.compute.SHUTDOWN_RETRY_INTERVAL)
else:
self.compute.driver.power_off(instance, 0, 0)
self.compute._get_power_state(self.context,
instance).AndReturn(123)
if CONF.shelved_offload_time == 0:
self.compute.network_api.cleanup_instance_network_on_host(
self.context, instance, instance.host)
self.compute.driver.snapshot(self.context, instance, 'fake_image_id',
mox.IgnoreArg())
tracking = {'last_state': instance.vm_state}
def check_save(expected_task_state=None):
self.assertEqual(123, instance.power_state)
if tracking['last_state'] == vm_states.ACTIVE:
if CONF.shelved_offload_time == 0:
self.assertEqual(task_states.SHELVING_OFFLOADING,
instance.task_state)
else:
self.assertIsNone(instance.task_state)
self.assertEqual(vm_states.SHELVED, instance.vm_state)
self.assertEqual([task_states.SHELVING,
task_states.SHELVING_IMAGE_UPLOADING],
expected_task_state)
self.assertIn('shelved_at', instance.system_metadata)
self.assertEqual(image_id,
instance.system_metadata['shelved_image_id'])
self.assertEqual(host,
instance.system_metadata['shelved_host'])
tracking['last_state'] = instance.vm_state
elif (tracking['last_state'] == vm_states.SHELVED and
CONF.shelved_offload_time == 0):
self.assertIsNone(instance.host)
self.assertIsNone(instance.node)
self.assertIsNone(instance.task_state)
self.assertEqual(vm_states.SHELVED_OFFLOADED,
instance.vm_state)
self.assertEqual([task_states.SHELVING,
task_states.SHELVING_OFFLOADING],
expected_task_state)
tracking['last_state'] = instance.vm_state
else:
self.fail('Unexpected save!')
self.compute._notify_about_instance_usage(self.context,
instance, 'shelve.end')
if CONF.shelved_offload_time == 0:
self.compute._notify_about_instance_usage(self.context, instance,
'shelve_offload.start')
self.compute.driver.power_off(instance, 0, 0)
self.compute._get_power_state(self.context,
instance).AndReturn(123)
self.compute._notify_about_instance_usage(self.context, instance,
'shelve_offload.end')
self.mox.ReplayAll()
with mock.patch.object(instance, 'save') as mock_save:
mock_save.side_effect = check_save
self.compute.shelve_instance(self.context, instance,
image_id=image_id, clean_shutdown=clean_shutdown)
def test_shelve(self):
self._shelve_instance(-1)
def test_shelve_forced_shutdown(self):
self._shelve_instance(-1, clean_shutdown=False)
def test_shelve_and_offload(self):
self._shelve_instance(0)
def _shelve_offload(self, clean_shutdown=True):
host = 'fake-mini'
instance = self._create_fake_instance_obj(params={'host': host})
instance.task_state = task_states.SHELVING
instance.save()
cur_time = timeutils.utcnow()
timeutils.set_time_override(cur_time)
self.mox.StubOutWithMock(self.compute, '_notify_about_instance_usage')
self.mox.StubOutWithMock(self.compute.driver, 'power_off')
self.mox.StubOutWithMock(self.compute, '_get_power_state')
self.mox.StubOutWithMock(self.compute.network_api,
'cleanup_instance_network_on_host')
self.compute._notify_about_instance_usage(self.context, instance,
'shelve_offload.start')
if clean_shutdown:
self.compute.driver.power_off(instance,
CONF.shutdown_timeout,
self.compute.SHUTDOWN_RETRY_INTERVAL)
else:
self.compute.driver.power_off(instance, 0, 0)
self.compute.network_api.cleanup_instance_network_on_host(
self.context, instance, instance.host)
self.compute._get_power_state(self.context,
instance).AndReturn(123)
self.compute._notify_about_instance_usage(self.context, instance,
'shelve_offload.end')
self.mox.ReplayAll()
with mock.patch.object(instance, 'save'):
self.compute.shelve_offload_instance(self.context, instance,
clean_shutdown=clean_shutdown)
self.assertEqual(vm_states.SHELVED_OFFLOADED, instance.vm_state)
self.assertIsNone(instance.task_state)
def test_shelve_offload(self):
self._shelve_offload()
def test_shelve_offload_forced_shutdown(self):
self._shelve_offload(clean_shutdown=False)
def test_unshelve(self):
instance = self._create_fake_instance_obj()
instance.task_state = task_states.UNSHELVING
instance.save()
image = {'id': 'fake_id'}
node = test_compute.NODENAME
limits = {}
filter_properties = {'limits': limits}
self.mox.StubOutWithMock(self.compute, '_notify_about_instance_usage')
self.mox.StubOutWithMock(self.compute, '_prep_block_device')
self.mox.StubOutWithMock(self.compute.driver, 'spawn')
self.mox.StubOutWithMock(self.compute, '_get_power_state')
self.mox.StubOutWithMock(self.rt, 'instance_claim')
self.mox.StubOutWithMock(self.compute.network_api,
'setup_instance_network_on_host')
self.deleted_image_id = None
def fake_delete(self2, ctxt, image_id):
self.deleted_image_id = image_id
def fake_claim(context, instance, limits):
instance.host = self.compute.host
return claims.Claim(context, instance,
self.rt, _fake_resources())
tracking = {
'last_state': instance.task_state,
'spawned': False,
}
def check_save(expected_task_state=None):
if tracking['last_state'] == task_states.UNSHELVING:
if tracking['spawned']:
self.assertIsNone(instance.task_state)
else:
self.assertEqual(task_states.SPAWNING, instance.task_state)
tracking['spawned'] = True
tracking['last_state'] == instance.task_state
elif tracking['last_state'] == task_states.SPAWNING:
self.assertEqual(vm_states.ACTIVE, instance.vm_state)
tracking['last_state'] == instance.task_state
else:
self.fail('Unexpected save!')
fake_image.stub_out_image_service(self.stubs)
self.stubs.Set(fake_image._FakeImageService, 'delete', fake_delete)
self.compute._notify_about_instance_usage(self.context, instance,
'unshelve.start')
self.compute._prep_block_device(self.context, instance,
mox.IgnoreArg(), do_check_attach=False).AndReturn('fake_bdm')
self.compute.network_api.setup_instance_network_on_host(
self.context, instance, self.compute.host)
self.compute.driver.spawn(self.context, instance, image,
injected_files=[], admin_password=None,
network_info=[],
block_device_info='fake_bdm')
self.compute._get_power_state(self.context, instance).AndReturn(123)
self.compute._notify_about_instance_usage(self.context, instance,
'unshelve.end')
self.mox.ReplayAll()
with mock.patch.object(self.rt, 'instance_claim',
side_effect=fake_claim), \
mock.patch.object(instance, 'save') as mock_save:
mock_save.side_effect = check_save
self.compute.unshelve_instance(
self.context, instance, image=image,
filter_properties=filter_properties,
node=node)
self.assertEqual(image['id'], self.deleted_image_id)
self.assertEqual(instance.host, self.compute.host)
self.assertEqual(123, instance.power_state)
self.assertEqual(vm_states.ACTIVE, instance.vm_state)
self.assertIsNone(instance.task_state)
self.assertIsNone(instance.key_data)
self.assertEqual(self.compute.host, instance.host)
self.assertFalse(instance.auto_disk_config)
def test_unshelve_volume_backed(self):
instance = self._create_fake_instance_obj()
node = test_compute.NODENAME
limits = {}
filter_properties = {'limits': limits}
instance.task_state = task_states.UNSHELVING
instance.save()
self.mox.StubOutWithMock(self.compute, '_notify_about_instance_usage')
self.mox.StubOutWithMock(self.compute, '_prep_block_device')
self.mox.StubOutWithMock(self.compute.driver, 'spawn')
self.mox.StubOutWithMock(self.compute, '_get_power_state')
self.mox.StubOutWithMock(self.rt, 'instance_claim')
self.mox.StubOutWithMock(self.compute.network_api,
'setup_instance_network_on_host')
tracking = {'last_state': instance.task_state}
def check_save(expected_task_state=None):
if tracking['last_state'] == task_states.UNSHELVING:
self.assertEqual(task_states.SPAWNING, instance.task_state)
tracking['last_state'] = instance.task_state
elif tracking['last_state'] == task_states.SPAWNING:
self.assertEqual(123, instance.power_state)
self.assertEqual(vm_states.ACTIVE, instance.vm_state)
self.assertIsNone(instance.task_state)
self.assertIsNone(instance.key_data)
self.assertFalse(instance.auto_disk_config)
self.assertIsNone(instance.task_state)
tracking['last_state'] = instance.task_state
else:
self.fail('Unexpected save!')
self.compute._notify_about_instance_usage(self.context, instance,
'unshelve.start')
self.compute._prep_block_device(self.context, instance,
mox.IgnoreArg(), do_check_attach=False).AndReturn('fake_bdm')
self.compute.network_api.setup_instance_network_on_host(
self.context, instance, self.compute.host)
self.rt.instance_claim(self.context, instance, limits).AndReturn(
claims.Claim(self.context, instance, self.rt,
_fake_resources()))
self.compute.driver.spawn(self.context, instance, None,
injected_files=[], admin_password=None,
network_info=[],
block_device_info='fake_bdm')
self.compute._get_power_state(self.context, instance).AndReturn(123)
self.compute._notify_about_instance_usage(self.context, instance,
'unshelve.end')
self.mox.ReplayAll()
with mock.patch.object(instance, 'save') as mock_save:
mock_save.side_effect = check_save
self.compute.unshelve_instance(self.context, instance, image=None,
filter_properties=filter_properties, node=node)
def test_shelved_poll_none_exist(self):
self.mox.StubOutWithMock(self.compute.driver, 'destroy')
self.mox.StubOutWithMock(timeutils, 'is_older_than')
self.mox.ReplayAll()
self.compute._poll_shelved_instances(self.context)
def test_shelved_poll_not_timedout(self):
instance = self._create_fake_instance_obj()
sys_meta = instance.system_metadata
shelved_time = timeutils.utcnow()
timeutils.set_time_override(shelved_time)
timeutils.advance_time_seconds(CONF.shelved_offload_time - 1)
sys_meta['shelved_at'] = timeutils.strtime(at=shelved_time)
db.instance_update_and_get_original(self.context, instance['uuid'],
{'vm_state': vm_states.SHELVED, 'system_metadata': sys_meta})
self.mox.StubOutWithMock(self.compute.driver, 'destroy')
self.mox.ReplayAll()
self.compute._poll_shelved_instances(self.context)
def test_shelved_poll_timedout(self):
instance = self._create_fake_instance_obj()
sys_meta = instance.system_metadata
shelved_time = timeutils.utcnow()
timeutils.set_time_override(shelved_time)
timeutils.advance_time_seconds(CONF.shelved_offload_time + 1)
sys_meta['shelved_at'] = timeutils.strtime(at=shelved_time)
(old, instance) = db.instance_update_and_get_original(self.context,
instance['uuid'], {'vm_state': vm_states.SHELVED,
'system_metadata': sys_meta})
def fake_destroy(inst, nw_info, bdm):
# NOTE(alaski) There are too many differences between an instance
# as returned by instance_update_and_get_original and
# instance_get_all_by_filters so just compare the uuid.
self.assertEqual(instance['uuid'], inst['uuid'])
self.stubs.Set(self.compute.driver, 'destroy', fake_destroy)
self.compute._poll_shelved_instances(self.context)
class ShelveComputeAPITestCase(test_compute.BaseTestCase):
def test_shelve(self):
# Ensure instance can be shelved.
fake_instance = self._create_fake_instance_obj(
{'display_name': 'vm01'})
instance = fake_instance
self.assertIsNone(instance['task_state'])
def fake_init(self2):
# In original _FakeImageService.__init__(), some fake images are
# created. To verify the snapshot name of this test only, here
# sets a fake method.
self2.images = {}
def fake_create(self2, ctxt, metadata, data=None):
self.assertEqual(metadata['name'], 'vm01-shelved')
metadata['id'] = '8b24ed3f-ee57-43bc-bc2e-fb2e9482bc42'
return metadata
fake_image.stub_out_image_service(self.stubs)
self.stubs.Set(fake_image._FakeImageService, '__init__', fake_init)
self.stubs.Set(fake_image._FakeImageService, 'create', fake_create)
self.compute_api.shelve(self.context, instance)
self.assertEqual(instance.task_state, task_states.SHELVING)
db.instance_destroy(self.context, instance['uuid'])
def test_unshelve(self):
# Ensure instance can be unshelved.
instance = self._create_fake_instance_obj()
self.assertIsNone(instance['task_state'])
self.compute_api.shelve(self.context, instance)
instance.task_state = None
instance.vm_state = vm_states.SHELVED
instance.save()
self.compute_api.unshelve(self.context, instance)
self.assertEqual(instance.task_state, task_states.UNSHELVING)
db.instance_destroy(self.context, instance['uuid'])
| |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Job.strength'
db.delete_column('main_job', 'strength')
# Deleting field 'Job.agility'
db.delete_column('main_job', 'agility')
# Deleting field 'Job.speed'
db.delete_column('main_job', 'speed')
# Deleting field 'Job.vitality'
db.delete_column('main_job', 'vitality')
# Deleting field 'Job.magic'
db.delete_column('main_job', 'magic')
# Deleting field 'Job.spirit'
db.delete_column('main_job', 'spirit')
# Adding field 'Job.maxStrength'
db.add_column('main_job', 'maxStrength',
self.gf('django.db.models.fields.IntegerField')(max_length=3, default=1),
keep_default=False)
# Adding field 'Job.maxVitality'
db.add_column('main_job', 'maxVitality',
self.gf('django.db.models.fields.IntegerField')(max_length=3, default=1),
keep_default=False)
# Adding field 'Job.maxAgility'
db.add_column('main_job', 'maxAgility',
self.gf('django.db.models.fields.IntegerField')(max_length=3, default=1),
keep_default=False)
# Adding field 'Job.maxSpeed'
db.add_column('main_job', 'maxSpeed',
self.gf('django.db.models.fields.IntegerField')(max_length=3, default=1),
keep_default=False)
# Adding field 'Job.maxMagic'
db.add_column('main_job', 'maxMagic',
self.gf('django.db.models.fields.IntegerField')(max_length=3, default=1),
keep_default=False)
# Adding field 'Job.maxSpirit'
db.add_column('main_job', 'maxSpirit',
self.gf('django.db.models.fields.IntegerField')(max_length=3, default=1),
keep_default=False)
def backwards(self, orm):
# User chose to not deal with backwards NULL issues for 'Job.strength'
raise RuntimeError("Cannot reverse this migration. 'Job.strength' and its values cannot be restored.")
# The following code is provided here to aid in writing a correct migration # Adding field 'Job.strength'
db.add_column('main_job', 'strength',
self.gf('django.db.models.fields.IntegerField')(max_length=3),
keep_default=False)
# User chose to not deal with backwards NULL issues for 'Job.agility'
raise RuntimeError("Cannot reverse this migration. 'Job.agility' and its values cannot be restored.")
# The following code is provided here to aid in writing a correct migration # Adding field 'Job.agility'
db.add_column('main_job', 'agility',
self.gf('django.db.models.fields.IntegerField')(max_length=3),
keep_default=False)
# User chose to not deal with backwards NULL issues for 'Job.speed'
raise RuntimeError("Cannot reverse this migration. 'Job.speed' and its values cannot be restored.")
# The following code is provided here to aid in writing a correct migration # Adding field 'Job.speed'
db.add_column('main_job', 'speed',
self.gf('django.db.models.fields.IntegerField')(max_length=3),
keep_default=False)
# User chose to not deal with backwards NULL issues for 'Job.vitality'
raise RuntimeError("Cannot reverse this migration. 'Job.vitality' and its values cannot be restored.")
# The following code is provided here to aid in writing a correct migration # Adding field 'Job.vitality'
db.add_column('main_job', 'vitality',
self.gf('django.db.models.fields.IntegerField')(max_length=3),
keep_default=False)
# User chose to not deal with backwards NULL issues for 'Job.magic'
raise RuntimeError("Cannot reverse this migration. 'Job.magic' and its values cannot be restored.")
# The following code is provided here to aid in writing a correct migration # Adding field 'Job.magic'
db.add_column('main_job', 'magic',
self.gf('django.db.models.fields.IntegerField')(max_length=3),
keep_default=False)
# User chose to not deal with backwards NULL issues for 'Job.spirit'
raise RuntimeError("Cannot reverse this migration. 'Job.spirit' and its values cannot be restored.")
# The following code is provided here to aid in writing a correct migration # Adding field 'Job.spirit'
db.add_column('main_job', 'spirit',
self.gf('django.db.models.fields.IntegerField')(max_length=3),
keep_default=False)
# Deleting field 'Job.maxStrength'
db.delete_column('main_job', 'maxStrength')
# Deleting field 'Job.maxVitality'
db.delete_column('main_job', 'maxVitality')
# Deleting field 'Job.maxAgility'
db.delete_column('main_job', 'maxAgility')
# Deleting field 'Job.maxSpeed'
db.delete_column('main_job', 'maxSpeed')
# Deleting field 'Job.maxMagic'
db.delete_column('main_job', 'maxMagic')
# Deleting field 'Job.maxSpirit'
db.delete_column('main_job', 'maxSpirit')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80', 'unique': 'True'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'blank': 'True', 'to': "orm['auth.Permission']"})
},
'auth.permission': {
'Meta': {'object_name': 'Permission', 'unique_together': "(('content_type', 'codename'),)", 'ordering': "('content_type__app_label', 'content_type__model', 'codename')"},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'blank': 'True', 'to': "orm['auth.Group']", 'related_name': "'user_set'"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'blank': 'True', 'to': "orm['auth.Permission']", 'related_name': "'user_set'"}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '30', 'unique': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'db_table': "'django_content_type'", 'object_name': 'ContentType', 'unique_together': "(('app_label', 'model'),)", 'ordering': "('name',)"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'main.aptitude': {
'Meta': {'object_name': 'Aptitude'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'})
},
'main.baseitem': {
'Meta': {'object_name': 'BaseItem'},
'armour': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'availability': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'cost': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'damageAttribute': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'damageDieCount': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'damageDieSize': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'damageScale': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'effect': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'evasion': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'itemSlot': ('django.db.models.fields.IntegerField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'itemType': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.ItemCategory']"}),
'magicalArmour': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'magicalEvasion': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'target': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'tier': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'main.baseitemability': {
'Meta': {'object_name': 'BaseItemAbility'},
'baseItem': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['main.BaseItem']", 'related_name': "'abilities'"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'main.baseskill': {
'Meta': {'object_name': 'BaseSkill'},
'aptitude': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Aptitude']"}),
'attribute': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'halfRate': ('django.db.models.fields.BooleanField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'skillType': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'specialized': ('django.db.models.fields.BooleanField', [], {})
},
'main.character': {
'Meta': {'object_name': 'Character'},
'accessorySlot': ('django.db.models.fields.related.ForeignKey', [], {'null': 'True', 'to': "orm['main.Item']", 'related_name': "'equippedAccessories'"}),
'age': ('django.db.models.fields.IntegerField', [], {'max_length': '4', 'null': 'True', 'blank': 'True'}),
'agility': ('django.db.models.fields.IntegerField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'baseHP': ('django.db.models.fields.IntegerField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'baseMP': ('django.db.models.fields.IntegerField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'blurb': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'bodySlot': ('django.db.models.fields.related.ForeignKey', [], {'null': 'True', 'to': "orm['main.Item']", 'related_name': "'equippedBodies'"}),
'gil': ('django.db.models.fields.IntegerField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'handSlot': ('django.db.models.fields.related.ForeignKey', [], {'null': 'True', 'to': "orm['main.Item']", 'related_name': "'equippedHands'"}),
'headSlot': ('django.db.models.fields.related.ForeignKey', [], {'null': 'True', 'to': "orm['main.Item']", 'related_name': "'equippedHeads'"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'job': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Job']", 'related_name': "'characters'"}),
'level': ('django.db.models.fields.IntegerField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'magic': ('django.db.models.fields.IntegerField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'race': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Race']", 'related_name': "'characters'"}),
'secondWeaponSlot': ('django.db.models.fields.related.ForeignKey', [], {'null': 'True', 'to': "orm['main.Item']", 'related_name': "'equippedSecondaryWeapons'"}),
'speed': ('django.db.models.fields.IntegerField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'spirit': ('django.db.models.fields.IntegerField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'strength': ('django.db.models.fields.IntegerField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'null': 'True', 'to': "orm['auth.User']", 'related_name': "'characters'"}),
'vitality': ('django.db.models.fields.IntegerField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'weaponSlot': ('django.db.models.fields.related.ForeignKey', [], {'null': 'True', 'to': "orm['main.Item']", 'related_name': "'equippedWeapons'"}),
'xp': ('django.db.models.fields.IntegerField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'})
},
'main.item': {
'Meta': {'object_name': 'Item'},
'baseItem': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.BaseItem']"}),
'character': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Character']", 'related_name': "'items'"}),
'damageAttribute': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'quantity': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'main.itemcategory': {
'Meta': {'object_name': 'ItemCategory'},
'baseSkill': ('django.db.models.fields.related.ForeignKey', [], {'null': 'True', 'to': "orm['main.BaseSkill']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'subCategory': ('django.db.models.fields.IntegerField', [], {'max_length': '2'})
},
'main.job': {
'Meta': {'object_name': 'Job'},
'accuracyBonus': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'aptitude': ('django.db.models.fields.related.ForeignKey', [], {'null': 'True', 'to': "orm['main.Aptitude']", 'blank': 'True'}),
'expertiseAttribute': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'expertiseSkill': ('django.db.models.fields.related.ForeignKey', [], {'null': 'True', 'to': "orm['main.BaseSkill']", 'blank': 'True'}),
'hasMP': ('django.db.models.fields.BooleanField', [], {}),
'hpDie': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'items': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['main.ItemCategory']"}),
'maxAgility': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'maxMagic': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'maxSpeed': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'maxSpirit': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'maxStrength': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'maxVitality': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'mpDie': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'skillPoints': ('django.db.models.fields.IntegerField', [], {'max_length': '3'})
},
'main.overviewbox': {
'Meta': {'object_name': 'OverviewBox'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'viewName': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'main.overviewboxsetting': {
'Meta': {'object_name': 'OverviewBoxSetting'},
'character': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Character']", 'related_name': "'overviewBoxSettings'"}),
'enabled': ('django.db.models.fields.BooleanField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'overviewBox': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.OverviewBox']"}),
'sortOrder': ('django.db.models.fields.IntegerField', [], {}),
'spanFull': ('django.db.models.fields.BooleanField', [], {})
},
'main.race': {
'Meta': {'object_name': 'Race'},
'dayVision': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'hearing': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lifeSense': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'magicSense': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'maxAgility': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'maxMagic': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'maxSpeed': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'maxSpirit': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'maxStrength': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'maxVitality': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'nightVision': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'smell': ('django.db.models.fields.IntegerField', [], {'max_length': '3'})
},
'main.skill': {
'Meta': {'object_name': 'Skill'},
'baseSkill': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.BaseSkill']"}),
'character': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Character']", 'related_name': "'skills'"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.IntegerField', [], {}),
'specialization': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'})
},
'main.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'currentCharacter': ('django.db.models.fields.related.ForeignKey', [], {'null': 'True', 'to': "orm['main.Character']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'unique': 'True', 'to': "orm['auth.User']"})
}
}
complete_apps = ['main']
| |
import platform
import sys
import os
import os.path
import re
import errno
import stat
from subprocess import Popen, PIPE, STDOUT
from shutil import rmtree, copy, Error as ShError
#######################################################################################################################
SYSNAME = platform.system()
if SYSNAME == 'Linux':
if platform.machine() == 'x86_64':
TURBULENZOS = 'linux64'
else:
TURBULENZOS = 'linux32'
elif SYSNAME == 'Windows':
TURBULENZOS = 'win32'
elif SYSNAME == 'Darwin':
TURBULENZOS = 'macosx'
else:
echo('unknown os')
exit(1)
PYTHON = 'python%s.%s' % (sys.version_info[0], sys.version_info[1])
ENV = 'env'
TURBULENZROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
# Required to get the git commands working on Windows
if not 'HOME' in os.environ:
os.environ['HOME'] = '%s%s' % (os.environ['HOMEDRIVE'], os.environ['HOMEPATH'])
#######################################################################################################################
def echo(message=''):
print message
def log(message):
echo(' >> ' + message)
COLORED_OUTPUT = sys.stdout.isatty() and SYSNAME != 'Windows'
def error(message):
if COLORED_OUTPUT:
log('\033[31m[ERROR]\033[0m - %s' % message)
else:
log('[ERROR] - %s' % message)
# pylint: disable=C0103
def ok(message):
if COLORED_OUTPUT:
log('\033[32m[OK]\033[0m - %s' % message)
else:
log('[OK] - %s' % message)
# pylint: enable=C0103
def warning(message):
if COLORED_OUTPUT:
log('\033[1m\033[33m[WARNING]\033[0m - %s' % message)
else:
log('[WARNING] - %s' % message)
#######################################################################################################################
# pylint: disable=C0103
def cp(src, dst, verbose=True):
if verbose:
echo('Copying: %s -> %s' % (os.path.basename(src), os.path.basename(dst)))
try:
copy(src, dst)
except (ShError, IOError) as e:
error(str(e))
# pylint: enable=C0103
# pylint: disable=C0103
def rm(filename, verbose=True):
if verbose:
echo('Removing: %s' % filename)
try:
os.remove(filename)
except OSError as _:
pass
# pylint: enable=C0103
def mkdir(path, verbose=True):
if verbose:
echo('Creating: %s' % path)
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
raise
def rmdir(path, verbose=True):
def _handle_remove_readonly(func, path, exc):
excvalue = exc[1]
if func in (os.rmdir, os.remove) and excvalue.errno == errno.EACCES:
os.chmod(path, stat.S_IRWXU| stat.S_IRWXG| stat.S_IRWXO) # 0777
func(path)
else:
raise
if verbose:
echo('Removing: %s' % path)
try:
rmtree(path, onerror=_handle_remove_readonly)
except OSError:
pass
#######################################################################################################################
# pylint: disable=W0231
class CalledProcessError(Exception):
def __init__(self, retcode, cmd, output=None):
self.retcode = retcode
self.cmd = cmd
self.output = output
def __str__(self):
return "Command '%s' returned non-zero exit status %d" % (self.cmd, self.retcode)
# pylint: enable=W0231
# pylint: disable=C0103
def sh(command, cwd=None, env=None, verbose=True, console=False, ignore=False, shell=False, wait=True):
if isinstance(command, list):
command_list = command
command_string = ' '.join(command)
else:
command_list = command.split()
command_string = command
if verbose:
echo('Executing: %s' % command_string)
if wait:
if console:
process = Popen(command_list, stderr=STDOUT, cwd=cwd, shell=shell, env=env)
else:
process = Popen(command_list, stdout=PIPE, stderr=STDOUT, cwd=cwd, shell=shell, env=env)
output, _ = process.communicate()
output = str(output)
retcode = process.poll()
if retcode:
if ignore is False:
raise CalledProcessError(retcode, command_list, output=output)
if output is not None:
output = output.rstrip()
return output
else:
if SYSNAME == 'Windows':
DETACHED_PROCESS = 0x00000008
return Popen(command_list, creationflags=DETACHED_PROCESS, cwd=cwd, shell=shell, env=env)
else:
return Popen(command_list, stdout=PIPE, stderr=STDOUT, cwd=cwd, shell=shell, env=env)
# pylint: enable=C0103
#######################################################################################################################
def command_no_arguments(fn):
def new(arguments=None):
return fn()
return new
def command_with_arguments(fn):
def new(arguments = None, *args, **kwargs):
return fn(arguments or [], *args, **kwargs)
return new
def command_requires_env(fn):
virtual_env = os.environ.get('VIRTUAL_ENV', None)
if virtual_env:
def new(*args, **kwargs):
return fn(*args, **kwargs)
else:
def new(*args, **kwargs):
error('Virtualenv not activated, required for: %s' % sys.argv[1])
return new
#######################################################################################################################
def check_documentation_links(build_path):
bad_link_regex = [re.compile('.*<em class="xref std std-ref">.*<\/em>.*'),
re.compile('.*?:ref:?.*')]
result = 0
for (dirpath, _, filenames) in os.walk(build_path):
for f in filenames:
if os.path.splitext(f)[1] == '.html':
file_path = os.path.join(dirpath, f)
html_file = open(file_path, 'rt')
html = html_file.read()
for regex in bad_link_regex:
match = regex.search(html)
if match:
result += 1
warning(file_path)
error('Broken or malformed link with contents "%s"' % match.group(0))
html_file.close()
if result > 0:
error('%d broken or malformed link%s' % (result, 's' if result > 1 else ''))
return result
#######################################################################################################################
if platform.system() == "Windows":
# pylint: disable=W0404
# pylint: disable=F0401, E0602
def _get_reg_software_value(store, path, key):
import _winreg
value = None
try:
install_key = _winreg.OpenKey(store, 'SOFTWARE\%s' % path)
(value, _) = _winreg.QueryValueEx(install_key, key)
except WindowsError:
try:
install_key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE,
'SOFTWARE\Wow6432Node\%s' % path)
(value, _) = _winreg.QueryValueEx(install_key, key)
except WindowsError:
pass
return value
# pylint: enable=F0401, E0602
# pylint: disable=F0401, E0602
def find_devenv():
from _winreg import HKEY_LOCAL_MACHINE
devenv_path = _get_reg_software_value(HKEY_LOCAL_MACHINE, 'Microsoft\VisualStudio\9.0', 'InstallDir')
if devenv_path is not None:
devenv_path = os.path.join(devenv_path, 'devenv.com')
return devenv_path
# pylint: enable=F0401, E0602
# pylint: enable=W0404
else:
def find_devenv():
return None
| |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_db import api as oslo_db_api
from oslo_db import exception as db_exc
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_service import service
from oslo_utils import timeutils
import requests
import six
from six.moves.urllib import parse as urlparse
from heat.common import exception
from heat.common.i18n import _
from heat.common.i18n import _LI
from heat.db import api as db_api
from heat.engine import api
from heat.objects import software_config as software_config_object
from heat.objects import software_deployment as software_deployment_object
from heat.rpc import api as rpc_api
LOG = logging.getLogger(__name__)
class SoftwareConfigService(service.Service):
def show_software_config(self, cnxt, config_id):
sc = software_config_object.SoftwareConfig.get_by_id(cnxt, config_id)
return api.format_software_config(sc)
def list_software_configs(self, cnxt, limit=None, marker=None,
tenant_safe=True):
scs = software_config_object.SoftwareConfig.get_all(
cnxt,
limit=limit,
marker=marker,
tenant_safe=tenant_safe)
result = [api.format_software_config(sc, detail=False) for sc in scs]
return result
def create_software_config(self, cnxt, group, name, config,
inputs, outputs, options):
sc = software_config_object.SoftwareConfig.create(cnxt, {
'group': group,
'name': name,
'config': {
'inputs': inputs,
'outputs': outputs,
'options': options,
'config': config
},
'tenant': cnxt.tenant_id})
return api.format_software_config(sc)
def delete_software_config(self, cnxt, config_id):
software_config_object.SoftwareConfig.delete(cnxt, config_id)
def list_software_deployments(self, cnxt, server_id):
all_sd = software_deployment_object.SoftwareDeployment.get_all(
cnxt, server_id)
result = [api.format_software_deployment(sd) for sd in all_sd]
return result
def metadata_software_deployments(self, cnxt, server_id):
if not server_id:
raise ValueError(_('server_id must be specified'))
all_sd = software_deployment_object.SoftwareDeployment.get_all(
cnxt, server_id)
# sort the configs by config name, to give the list of metadata a
# deterministic and controllable order.
all_sd_s = sorted(all_sd, key=lambda sd: sd.config.name)
result = [api.format_software_config(sd.config) for sd in all_sd_s]
return result
@oslo_db_api.wrap_db_retry(max_retries=10, retry_on_request=True)
def _push_metadata_software_deployments(self, cnxt, server_id, sd):
rs = db_api.resource_get_by_physical_resource_id(cnxt, server_id)
if not rs:
return
deployments = self.metadata_software_deployments(cnxt, server_id)
md = rs.rsrc_metadata or {}
md['deployments'] = deployments
rows_updated = db_api.resource_update(
cnxt, rs.id, {'rsrc_metadata': md}, rs.atomic_key)
if not rows_updated:
raise db_exc.RetryRequest(
exception.DeploymentConcurrentTransaction(server=server_id))
metadata_put_url = None
metadata_queue_id = None
for rd in rs.data:
if rd.key == 'metadata_put_url':
metadata_put_url = rd.value
break
elif rd.key == 'metadata_queue_id':
metadata_queue_id = rd.value
break
if metadata_put_url:
json_md = jsonutils.dumps(md)
requests.put(metadata_put_url, json_md)
elif metadata_queue_id:
zaqar_plugin = cnxt.clients.client_plugin('zaqar')
zaqar = zaqar_plugin.create_for_tenant(sd.stack_user_project_id)
queue = zaqar.queue(metadata_queue_id)
queue.post({'body': md, 'ttl': zaqar_plugin.DEFAULT_TTL})
def _refresh_swift_software_deployment(self, cnxt, sd, deploy_signal_id):
container, object_name = urlparse.urlparse(
deploy_signal_id).path.split('/')[-2:]
swift_plugin = cnxt.clients.client_plugin('swift')
swift = swift_plugin.client()
try:
headers = swift.head_object(container, object_name)
except Exception as ex:
# ignore not-found, in case swift is not consistent yet
if swift_plugin.is_not_found(ex):
LOG.info(_LI('Signal object not found: %(c)s %(o)s'), {
'c': container, 'o': object_name})
return sd
raise ex
lm = headers.get('last-modified')
last_modified = swift_plugin.parse_last_modified(lm)
prev_last_modified = sd.updated_at
if prev_last_modified:
# assume stored as utc, convert to offset-naive datetime
prev_last_modified = prev_last_modified.replace(tzinfo=None)
if prev_last_modified and (last_modified <= prev_last_modified):
return sd
try:
(headers, obj) = swift.get_object(container, object_name)
except Exception as ex:
# ignore not-found, in case swift is not consistent yet
if swift_plugin.is_not_found(ex):
LOG.info(_LI(
'Signal object not found: %(c)s %(o)s'), {
'c': container, 'o': object_name})
return sd
raise ex
if obj:
self.signal_software_deployment(
cnxt, sd.id, jsonutils.loads(obj),
last_modified.isoformat())
return software_deployment_object.SoftwareDeployment.get_by_id(
cnxt, sd.id)
def _refresh_zaqar_software_deployment(self, cnxt, sd, deploy_queue_id):
zaqar_plugin = cnxt.clients.client_plugin('zaqar')
zaqar = zaqar_plugin.create_for_tenant(sd.stack_user_project_id)
queue = zaqar.queue(deploy_queue_id)
messages = list(queue.pop())
if messages:
self.signal_software_deployment(
cnxt, sd.id, messages[0].body, None)
return software_deployment_object.SoftwareDeployment.get_by_id(
cnxt, sd.id)
def show_software_deployment(self, cnxt, deployment_id):
sd = software_deployment_object.SoftwareDeployment.get_by_id(
cnxt, deployment_id)
if sd.status == rpc_api.SOFTWARE_DEPLOYMENT_IN_PROGRESS:
c = sd.config.config
input_values = dict((i['name'], i['value']) for i in c['inputs'])
transport = input_values.get('deploy_signal_transport')
if transport == 'TEMP_URL_SIGNAL':
sd = self._refresh_swift_software_deployment(
cnxt, sd, input_values.get('deploy_signal_id'))
elif transport == 'ZAQAR_SIGNAL':
sd = self._refresh_zaqar_software_deployment(
cnxt, sd, input_values.get('deploy_queue_id'))
return api.format_software_deployment(sd)
def create_software_deployment(self, cnxt, server_id, config_id,
input_values, action, status,
status_reason, stack_user_project_id):
sd = software_deployment_object.SoftwareDeployment.create(cnxt, {
'config_id': config_id,
'server_id': server_id,
'input_values': input_values,
'tenant': cnxt.tenant_id,
'stack_user_project_id': stack_user_project_id,
'action': action,
'status': status,
'status_reason': status_reason})
self._push_metadata_software_deployments(cnxt, server_id, sd)
return api.format_software_deployment(sd)
def signal_software_deployment(self, cnxt, deployment_id, details,
updated_at):
if not deployment_id:
raise ValueError(_('deployment_id must be specified'))
sd = software_deployment_object.SoftwareDeployment.get_by_id(
cnxt, deployment_id)
status = sd.status
if not status == rpc_api.SOFTWARE_DEPLOYMENT_IN_PROGRESS:
# output values are only expected when in an IN_PROGRESS state
return
details = details or {}
output_status_code = rpc_api.SOFTWARE_DEPLOYMENT_OUTPUT_STATUS_CODE
ov = sd.output_values or {}
status = None
status_reasons = {}
status_code = details.get(output_status_code)
if status_code and str(status_code) != '0':
status = rpc_api.SOFTWARE_DEPLOYMENT_FAILED
status_reasons[output_status_code] = _(
'Deployment exited with non-zero status code: %s'
) % details.get(output_status_code)
event_reason = 'deployment failed (%s)' % status_code
else:
event_reason = 'deployment succeeded'
for output in sd.config.config['outputs'] or []:
out_key = output['name']
if out_key in details:
ov[out_key] = details[out_key]
if output.get('error_output', False):
status = rpc_api.SOFTWARE_DEPLOYMENT_FAILED
status_reasons[out_key] = details[out_key]
event_reason = 'deployment failed'
for out_key in rpc_api.SOFTWARE_DEPLOYMENT_OUTPUTS:
ov[out_key] = details.get(out_key)
if status == rpc_api.SOFTWARE_DEPLOYMENT_FAILED:
# build a status reason out of all of the values of outputs
# flagged as error_output
status_reasons = [' : '.join((k, six.text_type(status_reasons[k])))
for k in status_reasons]
status_reason = ', '.join(status_reasons)
else:
status = rpc_api.SOFTWARE_DEPLOYMENT_COMPLETE
status_reason = _('Outputs received')
self.update_software_deployment(
cnxt, deployment_id=deployment_id,
output_values=ov, status=status, status_reason=status_reason,
config_id=None, input_values=None, action=None,
updated_at=updated_at)
# Return a string describing the outcome of handling the signal data
return event_reason
def update_software_deployment(self, cnxt, deployment_id, config_id,
input_values, output_values, action,
status, status_reason, updated_at):
update_data = {}
if config_id:
update_data['config_id'] = config_id
if input_values:
update_data['input_values'] = input_values
if output_values:
update_data['output_values'] = output_values
if action:
update_data['action'] = action
if status:
update_data['status'] = status
if status_reason:
update_data['status_reason'] = status_reason
if updated_at:
update_data['updated_at'] = timeutils.normalize_time(
timeutils.parse_isotime(updated_at))
else:
update_data['updated_at'] = timeutils.utcnow()
sd = software_deployment_object.SoftwareDeployment.update_by_id(
cnxt, deployment_id, update_data)
# only push metadata if this update resulted in the config_id
# changing, since metadata is just a list of configs
if config_id:
self._push_metadata_software_deployments(cnxt, sd.server_id, sd)
return api.format_software_deployment(sd)
def delete_software_deployment(self, cnxt, deployment_id):
software_deployment_object.SoftwareDeployment.delete(
cnxt, deployment_id)
| |
from __future__ import absolute_import, division, print_function
import logging
from inspect import getmro
from collections import defaultdict
from .message import Message
from .exceptions import InvalidSubscriber, InvalidMessage
__all__ = ['Hub', 'HubListener']
class Hub(object):
"""The hub manages communication between subscribers.
Objects :func:`subscribe` to receive specific message types. When
a message is passed to :func:`broadcast`, the hub observes the
following protocol:
* For each subscriber, it looks for a message class
subscription that is a superclass of the input message type
(if several are found, the most-subclassed one is chosen)
* If one is found, it calls the subscriptions filter(message)
class (if provided)
* If filter(message) == True, it calls handler(message)
(or notify(message) if handler wasn't provided).
"""
def __init__(self, *args):
"""
Any arguments that are passed to Hub will be registered
to the new hub object.
"""
# Dictionary of subscriptions
self._subscriptions = defaultdict(dict)
from .data import Data
from .subset import Subset
from .data_collection import DataCollection
listeners = set(filter(lambda x: isinstance(x, HubListener), args))
data = set(filter(lambda x: isinstance(x, Data), args))
subsets = set(filter(lambda x: isinstance(x, Subset), args))
dcs = set(filter(lambda x: isinstance(x, DataCollection), args))
listeners -= (data | subsets | dcs)
if set(listeners | data | subsets | dcs) != set(args):
raise TypeError("Inputs must be HubListener, data, subset, or "
"data collection objects")
for l in listeners:
l.register_to_hub(self)
for d in data:
d.register_to_hub(self)
for dc in dcs:
dc.register_to_hub(self)
for s in subsets:
s.register()
def subscribe(self, subscriber, message_class,
handler=None,
filter=lambda x: True):
"""Subscribe an object to a type of message class.
:param subscriber: The subscribing object
:type subscriber: :class:`~glue.core.hub.HubListener`
:param message_class: A :class:`~glue.core.message.Message` class
to subscribe to
:param handler:
An optional function of the form handler(message) that will
receive the message on behalf of the subscriber. If not provided,
this defaults to the HubListener's notify method
:type handler: Callable
:param filter:
An optional function of the form filter(message). Messages
are only passed to the subscriber if filter(message) == True.
The default is to always pass messages.
:type filter: Callable
Raises:
InvalidMessage: If the input class isn't a
:class:`~glue.core.message.Message` class
InvalidSubscriber: If the input subscriber isn't a
HubListener object.
"""
if not isinstance(subscriber, HubListener):
raise InvalidSubscriber("Subscriber must be a HubListener: %s" %
type(subscriber))
if not isinstance(message_class, type) or \
not issubclass(message_class, Message):
raise InvalidMessage("message class must be a subclass of "
"glue.Message: %s" % type(message_class))
logging.getLogger(__name__).info("Subscribing %s to %s",
subscriber, message_class.__name__)
if not handler:
handler = subscriber.notify
self._subscriptions[subscriber][message_class] = (filter, handler)
def is_subscribed(self, subscriber, message):
"""
Test whether the subscriber has suscribed to a given message class
:param subscriber: The subscriber to test
:param message: The message class to test
Returns:
True if the subscriber/message pair have been subscribed to the hub
"""
return subscriber in self._subscriptions and \
message in self._subscriptions[subscriber]
def get_handler(self, subscriber, message):
try:
return self._subscriptions[subscriber][message][1]
except KeyError:
return None
def unsubscribe(self, subscriber, message):
"""
Remove a (subscriber,message) pair from subscription list.
The handler originally attached to the subscription will
no longer be called when broadcasting messages of type message
"""
if subscriber not in self._subscriptions:
return
if message in self._subscriptions[subscriber]:
self._subscriptions[subscriber].pop(message)
def unsubscribe_all(self, subscriber):
"""
Unsubscribe the object from any subscriptions.
"""
if subscriber in self._subscriptions:
self._subscriptions.pop(subscriber)
def _find_handlers(self, message):
"""Yields all (subscriber, handler) pairs that should receive a message
"""
# self._subscriptions:
# subscriber => { message type => (filter, handler)}
# loop over subscribed objects
for subscriber, subscriptions in list(self._subscriptions.items()):
# subscriptions to message or its superclasses
messages = [msg for msg in subscriptions.keys() if
issubclass(type(message), msg)]
if len(messages) == 0:
continue
# narrow to the most-specific message
candidate = max(messages, key=_mro_count)
test, handler = subscriptions[candidate]
if test(message):
yield subscriber, handler
def broadcast(self, message):
"""Broadcasts a message to all subscribed objects.
:param message: The message to broadcast
:type message: :class:`~glue.core.message.Message`
"""
logging.getLogger(__name__).info("Broadcasting %s", message)
for subscriber, handler in self._find_handlers(message):
handler(message)
def __getstate__(self):
""" Return a picklable representation of the hub
Note: Only objects in glue.core are currently supported
as pickleable. Thus, any subscriptions from objects outside
glue.core will note be saved or restored
"""
result = self.__dict__.copy()
result['_subscriptions'] = self._subscriptions.copy()
for s in self._subscriptions:
try:
module = s.__module__
except AttributeError:
module = ''
if not module.startswith('glue.core'):
print('Pickle warning: Hub removing subscription to %s' % s)
result['_subscriptions'].pop(s)
return result
class HubListener(object):
"""
The base class for any object that subscribes to hub messages.
This interface defines a single method, notify, that receives
messages
"""
def register_to_hub(self, hub):
raise NotImplementedError
def unregister(self, hub):
""" Default unregistration action. Calls hub.unsubscribe_all on self"""
hub.unsubscribe_all(self)
def notify(self, message):
raise NotImplementedError("Message has no handler: %s" % message)
def _mro_count(obj):
return len(getmro(obj))
| |
# This file is mainly for detecting the graphics used and storing these
# information in string that are later used for the report
import json
import re
import logging
from django.db import models
LOG = logging.getLogger(__name__)
class UserReport(models.Model):
uploader = models.GenericIPAddressField(editable=False)
# Hex SHA-1 digest of user's reported ID
# (The hashing means that publishing the database won't let people upload
# faked reports under someone else's user ID, and also ensures a simple
# consistent structure)
user_id_hash = models.CharField(max_length=40, db_index=True, editable=False)
# When the server received the upload
upload_date = models.DateTimeField(auto_now_add=True, db_index=True, editable=False)
# When the user claims to have generated the report
generation_date = models.DateTimeField(editable=False)
data_type = models.CharField(max_length=16, db_index=True, editable=False)
data_version = models.IntegerField(editable=False)
data = models.TextField(editable=False)
def get_data_json(self, cache=True):
"""
Get the json data
:param cache flag that indicates to cache the json
:return json
"""
def get_json(data):
try:
return json.loads(data)
except ValueError:
LOG.warning("The data_json is invalid for id = %d" % self.id)
return {}
# Cache the json
if cache and not hasattr(self, 'cached_json'):
self.cached_json = get_json(self.data)
return self.cached_json
return get_json(self.data)
def has_data(self):
return bool(self.get_data_json())
def clear_cache(self):
delattr(self, 'cached_json')
def downcast(self):
if self.data_type == 'hwdetect':
return UserReport_hwdetect.objects.get(id=self.id)
return self
class UserReport_hwdetect(UserReport):
pattern_device_identifier = re.compile(
r'^(?:AMD |ATI |NVIDIA |Mesa DRI )?(.*?)\s*(?:GEM 20100328 2010Q1|GEM 20100330 DEVELOPMENT|GEM 20091221 2009Q4|20090101|Series)?\s*(?:x86|/AGP|/PCI|/MMX|/MMX\+|/SSE|/SSE2|/3DNOW!|/3DNow!|/3DNow!\+)*(?: TCL| NO-TCL)?(?: DRI2)?(?: \(Microsoft Corporation - WDDM\))?(?: OpenGL Engine)?\s*$')
pattern_gl_version = re.compile(r'^(\d+\.\d+).*')
pattern_gl_driver_mesa_git = re.compile(r'^OpenGL \d+\.\d+(?:\.\d+)? (Mesa \d+\.\d+)-devel \(git-([a-f0-9]+)')
pattern_gl_driver_mesa_normal = re.compile(r'^OpenGL \d+\.\d+(?:\.\d+)? (Mesa .*)$')
pattern_gl_driver_nvidia = re.compile(r'^OpenGL \d+\.\d+(?:\.\d+)? NVIDIA (.*)$')
pattern_gl_driver_amd_direct = re.compile(r'^OpenGL (\d+\.\d+\.\d+) Compatibility Profile Context(?: FireGL)?$')
pattern_gl_driver_amd_indirect = re.compile(
r'^OpenGL 1\.4 \((\d+\.\d+\.\d+) Compatibility Profile Context(?: FireGL)?\)$')
class Meta:
proxy = True
def get_os(self):
""":return the operating system"""
data_json = self.get_data_json()
if data_json:
if data_json.get('os_win'):
return 'Windows'
elif data_json.get('os_linux'):
return 'Linux'
elif data_json.get('os_macosx'):
return 'OS X'
elif data_json.get('os_unix'):
return 'Other Unix'
return 'Unknown'
def gl_renderer(self):
data_json = self.get_data_json()
if 'GL_RENDERER' not in data_json:
return ""
# The renderer string should typically be interpreted as UTF-8
try:
return data_json['GL_RENDERER'].encode('iso-8859-1').decode('utf-8').strip()
except UnicodeError:
return data_json['GL_RENDERER'].strip()
def gl_extensions(self):
data_json = self.get_data_json()
if 'GL_EXTENSIONS' not in data_json:
LOG.warning("The GL_EXTENSIONS does not exist for id = %d" % self.id)
return None
values = re.split(r'\s+', data_json['GL_EXTENSIONS'])
# skip empty strings (e.g. no extensions at all, or leading/trailing space)
return frozenset(v for v in values if v)
def gl_limits(self):
data_json = self.get_data_json()
limits = {}
for (k, v) in data_json.items():
if not k.startswith('GL_'):
continue
if k == 'GL_VERSION':
m = re.match(self.pattern_gl_version, v)
if m:
limits[k] = '%s [...]' % m.group(1)
limits['GL_VERSION' + '_COMPLETE'] = v # non standard
continue
if k in ('GL_RENDERER', 'GL_EXTENSIONS'):
continue
# Hide some values that got deleted from the report in r8953, for consistency
if k in ('GL_MAX_COLOR_MATRIX_STACK_DEPTH', 'GL_FRAGMENT_PROGRAM_ARB.GL_MAX_PROGRAM_ADDRESS_REGISTERS_ARB',
'GL_FRAGMENT_PROGRAM_ARB.GL_MAX_PROGRAM_NATIVE_ADDRESS_REGISTERS_ARB'):
continue
# Hide some pixel depth values that are not really correlated with device
if k in ('GL_RED_BITS', 'GL_GREEN_BITS', 'GL_BLUE_BITS', 'GL_ALPHA_BITS', 'GL_INDEX_BITS', 'GL_DEPTH_BITS',
'GL_STENCIL_BITS',
'GL_ACCUM_RED_BITS', 'GL_ACCUM_GREEN_BITS', 'GL_ACCUM_BLUE_BITS', 'GL_ACCUM_ALPHA_BITS'):
continue
limits[k] = v
return limits
def gl_device_identifier(self):
"""
Construct a nice-looking concise graphics device identifier
(skipping boring hardware/driver details)
"""
renderer = self.gl_renderer()
m = re.match(self.pattern_device_identifier, renderer)
if m:
renderer = m.group(1)
return renderer.strip()
def gl_vendor(self):
return self.get_data_json().get('GL_VENDOR', '').strip()
def gl_driver(self):
"""
Construct a nice string identifying the driver
It tries all the known possibilities for drivers to find the used one
"""
data_json = self.get_data_json()
if 'gfx_drv_ver' not in data_json or 'GL_VENDOR' not in data_json:
return ''
gfx_drv_ver = data_json['gfx_drv_ver']
# Try the Mesa git style first
m = re.match(self.pattern_gl_driver_mesa_git, gfx_drv_ver)
if m:
return '%s-git-%s' % (m.group(1), m.group(2))
# Try the normal Mesa style
m = re.match(self.pattern_gl_driver_mesa_normal, gfx_drv_ver)
if m:
return m.group(1)
# Try the NVIDIA Linux style
m = re.match(self.pattern_gl_driver_nvidia, gfx_drv_ver)
if m:
return m.group(1)
# Try the ATI Catalyst Linux style
m = re.match(self.pattern_gl_driver_amd_direct, gfx_drv_ver)
if m:
return m.group(1)
# Try the non-direct-rendering ATI Catalyst Linux style
m = re.match(self.pattern_gl_driver_amd_indirect, gfx_drv_ver)
if m:
return '%s (indirect)' % m.group(1)
possibilities = [] # Otherwise the iteration at the will will
# Try to guess the relevant Windows driver
# (These are the ones listed in lib/sysdep/os/win/wgfx.cpp in the 0 AD code)
if data_json['GL_VENDOR'] == 'NVIDIA Corporation':
possibilities = [
# Assume 64-bit takes precedence
r'nvoglv64.dll \((.*?)\)',
r'nvoglv32.dll \((.*?)\)',
r'nvoglnt.dll \((.*?)\)'
]
if data_json['GL_VENDOR'] in ('ATI Technologies Inc.', 'Advanced Micro Devices, Inc.'):
possibilities = [
r'atioglxx.dll \((.*?)\)',
r'atioglx2.dll \((.*?)\)',
r'atioglaa.dll \((.*?)\)'
]
if data_json['GL_VENDOR'] == 'Intel':
possibilities = [
# Assume 64-bit takes precedence
r'ig4icd64.dll \((.*?)\)',
r'ig4icd32.dll \((.*?)\)',
# Legacy 32-bit
r'iglicd32.dll \((.*?)\)',
r'ialmgicd32.dll \((.*?)\)',
r'ialmgicd.dll \((.*?)\)'
]
for i in possibilities:
m = re.search(i, gfx_drv_ver)
if m:
return m.group(1)
return gfx_drv_ver
class GraphicsDevice(models.Model):
device_name = models.CharField(max_length=128, db_index=True)
vendor = models.CharField(max_length=64)
renderer = models.CharField(max_length=128)
os = models.CharField(max_length=16)
driver = models.CharField(max_length=128)
usercount = models.IntegerField()
def __str__(self):
return 'GraphicsDevice<name = "{0}", vendor = "{1}", renderer = "{2}", OS = "{3}", driver = "{4}", ' \
'usercount = {5}>'.format(self.device_name, self.vendor, self.renderer, self.os, self.driver,
self.usercount)
class GraphicsExtension(models.Model):
device = models.ForeignKey(GraphicsDevice)
name = models.CharField(max_length=128, db_index=True)
def __str__(self):
return 'GraphicsExtension<device_id = "{0}", name = "{1}">'.format(self.device_id, self.name)
class GraphicsLimit(models.Model):
device = models.ForeignKey(GraphicsDevice)
name = models.CharField(max_length=128, db_index=True)
value = models.CharField(max_length=64)
def __str__(self):
return 'GraphicsLimit<device_id = "{0}", name = "{1}", value = "{2}">'.format(self.device_id, self.name,
self.value)
| |
#analysis files
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QFileDialog
from analysis_gui import Ui_Analysis
import numpy as np
import matplotlib,math,csv
matplotlib.use('Qt5Agg')
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
from matplotlib.figure import Figure
class MyMplCanvas(FigureCanvas):
"""Ultimately, this is a QWidget (as well as a FigureCanvasAgg, etc.)."""
def __init__(self, parent=None):
fig = Figure()
self.axes = fig.add_subplot(111)
self.compute_initial_figure()
FigureCanvas.__init__(self, fig)
self.setParent(parent)
FigureCanvas.setSizePolicy(self,
QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
def compute_initial_figure(self):
pass
class Radar(FigureCanvas):
def __init__(self, titles, rect=None, parent=None):
fig = Figure()
if rect is None:
rect = [0.05, 0.05, 0.8, 0.8]
self.n = len(titles)
self.angles = np.arange(90, 90 + 360, 360.0 / self.n)
self.angles = [a % 360 for a in self.angles]
self.axes = [fig.add_axes(rect, projection="polar", label="axes%d" % i)
for i in range(self.n)]
#FigureCanvas.setSizePolicy(self,
#QtWidgets.QSizePolicy.Expanding,
#QtWidgets.QSizePolicy.Expanding)
#FigureCanvas.updateGeometry(self)
self.ax = self.axes[0]
self.ax.set_thetagrids(self.angles,labels=titles, fontsize=14)
for ax in self.axes[1:]:
ax.patch.set_visible(False)
ax.grid("off")
ax.xaxis.set_visible(False)
for ax, angle in zip(self.axes, self.angles):
ax.set_rgrids([0.2,0.4,0.6,0.8,1.0], angle=angle)
ax.spines["polar"].set_visible(False)
ax.set_ylim(auto=True)
ax.set_xlim(auto=True)
FigureCanvas.__init__(self, fig)
self.setParent(parent)
def plot(self, values, *args, **kw):
angle = np.deg2rad(np.r_[self.angles, self.angles[0]])
values = np.r_[values, values[0]]
self.ax.plot(angle, values, *args, **kw)
class Analysis(QtWidgets.QMainWindow, Ui_Analysis):
def __init__(self,parent = None):
super(Analysis,self).__init__(parent)
self.setupUi(self)
self.XY_widget = QtWidgets.QWidget(self.tab_XY)
self.Radar_widget = QtWidgets.QWidget(self.tab_Radar)
self.Box_widget = QtWidgets.QWidget(self.tab_Box)
self.Table_widget = QtWidgets.QWidget(self.tab_Table)
self.XY_Layout = QtWidgets.QVBoxLayout(self.XY_widget)
self.XY = MyMplCanvas(self.XY_widget)
self.XY_Layout.addWidget(self.XY)
self.mpl_toolbar = NavigationToolbar(self.XY, self.XY_widget)
self.XY_Layout.addWidget(self.mpl_toolbar)
self.Box_Layout = QtWidgets.QVBoxLayout(self.Box_widget)
self.box = MyMplCanvas(self.Box_widget)
self.Box_Layout.addWidget(self.box)
self.box_toolbar = NavigationToolbar(self.box, self.Box_widget)
self.Box_Layout.addWidget(self.box_toolbar)
#self.tabWidget.setFocus()
#self.setCentralWidget(self.tabWidget)
#self.XY_widget.setFocus()
#self.Radar_widget.setFocus()
#self.Box_widget.setFocus()
#self.tabWidget.setFocus()
#self.setCentralWidget(self.tabWidget)
self.actionOpen.triggered.connect(self.open)
self.actionMax_min.triggered.connect(self.max_min)
self.actionStandardization_M_0_S_1.triggered.connect(self.standardization)
self.actionBaseline_Correction.triggered.connect(self.baseline)
self.actionPeak_Detection.triggered.connect(self.peak_detection)
self.actionFWHM.triggered.connect(self.FWHM)
self.actionRise_Time.triggered.connect(self.rise_time)
self.actionFall_Time.triggered.connect(self.fall_time)
self.sensor_name = []
self.sensor_sn = []
self.time = []
self.s1, self.s2, self.s3, self.s4, self.s5 = [], [], [], [], []
self.s6, self.s7, self.s8, self.s9, self.s10 = [], [], [], [], []
self.s11, self.s12, self.s13, self.s14, self.s15 = [], [], [], [], []
self.s16, self.s17, self.s18 = [], [], []
def open(self):
self.data = []
self.sensor_name = []
self.sensor_sn = []
self.time = []
self.s1, self.s2, self.s3, self.s4, self.s5 = [], [], [], [], []
self.s6, self.s7, self.s8, self.s9, self.s10 = [], [], [], [], []
self.s11, self.s12, self.s13, self.s14, self.s15 = [], [], [], [], []
self.s16, self.s17, self.s18 = [], [], []
self.s1_normalized = []
self.s2_normalized = []
self.s3_normalized = []
self.s4_normalized = []
self.s5_normalized = []
self.s6_normalized = []
self.s7_normalized = []
self.s8_normalized = []
self.s9_normalized = []
self.s10_normalized = []
self.s11_normalized = []
self.s12_normalized = []
self.s13_normalized = []
self.s14_normalized = []
self.s15_normalized = []
self.s16_normalized = []
self.s17_normalized = []
self.s18_normalized = []
filename = QFileDialog.getOpenFileName(self, 'Open',filter="CSV Files (*.csv);;FOX Files (*.txt)",
initialFilter= "CSV Files (*.csv)")
if filename[0]=='':
print("Cancel")
elif filename[1]=='FOX Files (*.txt)':
file = open(filename[0])
lines = file.readlines()
for i in range(len(lines)):
if lines[i].startswith("[SENSOR NAME]"):
i += 1
self.sensor_name = lines[i].split()
if lines[i].startswith("[SENSOR SN]"):
i += 1
self.sensor_sn = lines[i].split()
if lines[i].startswith("[SENSOR DATA]"):
j = i + 1
self.data = []
for i in range(121):
self.data.append(lines[j].split())
j += 1
print(self.sensor_name)
print(self.sensor_sn)
print(self.data)
for i in range(len(self.data)):
for j in range(19):
if j==0:
self.time.append(self.data[i][j])
if j==1:
self.s1.append(float(self.data[i][j]))
if j==2:
self.s2.append(float(self.data[i][j]))
if j==3:
self.s3.append(float(self.data[i][j]))
if j==4:
self.s4.append(float(self.data[i][j]))
if j==5:
self.s5.append(float(self.data[i][j]))
if j==6:
self.s6.append(float(self.data[i][j]))
if j==7:
self.s7.append(float(self.data[i][j]))
if j==8:
self.s8.append(float(self.data[i][j]))
if j==9:
self.s9.append(float(self.data[i][j]))
if j==10:
self.s10.append(float(self.data[i][j]))
if j==11:
self.s11.append(float(self.data[i][j]))
if j==12:
self.s12.append(float(self.data[i][j]))
if j==13:
self.s13.append(float(self.data[i][j]))
if j==14:
self.s14.append(float(self.data[i][j]))
if j==15:
self.s15.append(float(self.data[i][j]))
if j==16:
self.s16.append(float(self.data[i][j]))
if j==17:
self.s17.append(float(self.data[i][j]))
if j==18:
self.s18.append(float(self.data[i][j]))
self.XY.axes.cla()
self.XY.axes.plot(self.time, self.s1,label=self.sensor_name[0])
self.XY.axes.plot(self.time, self.s2,label=self.sensor_name[1])
self.XY.axes.plot(self.time, self.s3,label=self.sensor_name[2])
self.XY.axes.plot(self.time, self.s4,label=self.sensor_name[3])
self.XY.axes.plot(self.time, self.s5,label=self.sensor_name[4])
self.XY.axes.plot(self.time, self.s6,label=self.sensor_name[5])
self.XY.axes.plot(self.time, self.s7,label=self.sensor_name[6])
self.XY.axes.plot(self.time, self.s8,label=self.sensor_name[7])
self.XY.axes.plot(self.time, self.s9,label=self.sensor_name[8])
self.XY.axes.plot(self.time, self.s10,label=self.sensor_name[9])
self.XY.axes.plot(self.time, self.s11,label=self.sensor_name[10])
self.XY.axes.plot(self.time, self.s12,label=self.sensor_name[11])
self.XY.axes.plot(self.time, self.s13,label=self.sensor_name[12])
self.XY.axes.plot(self.time, self.s14,label=self.sensor_name[13])
self.XY.axes.plot(self.time, self.s15,label=self.sensor_name[14])
self.XY.axes.plot(self.time, self.s16,label=self.sensor_name[15])
self.XY.axes.plot(self.time, self.s17,label=self.sensor_name[16])
self.XY.axes.plot(self.time, self.s18,label=self.sensor_name[17])
self.XY.axes.set_xlabel("Time")
self.XY.axes.set_ylabel("Impedance")
self.XY.axes.legend(loc='best')
self.XY.draw()
self.menuNormalization.setEnabled(True)
for item in self.s1:
self.s1_normalized.append((item - min(self.s1)) / (max(self.s1) - min(self.s1)))
for item in self.s2:
self.s2_normalized.append((item - min(self.s2)) / (max(self.s2) - min(self.s2)))
for item in self.s3:
self.s3_normalized.append((item - min(self.s3)) / (max(self.s3) - min(self.s3)))
for item in self.s4:
self.s4_normalized.append((item - min(self.s4)) / (max(self.s4) - min(self.s4)))
for item in self.s5:
self.s5_normalized.append((item - min(self.s5)) / (max(self.s5) - min(self.s5)))
for item in self.s6:
self.s6_normalized.append((item - min(self.s6)) / (max(self.s6) - min(self.s6)))
for item in self.s7:
self.s7_normalized.append((item - min(self.s7)) / (max(self.s7) - min(self.s7)))
for item in self.s8:
self.s8_normalized.append((item - min(self.s8)) / (max(self.s8) - min(self.s8)))
for item in self.s9:
self.s9_normalized.append((item - min(self.s9)) / (max(self.s9) - min(self.s9)))
for item in self.s10:
self.s10_normalized.append((item - min(self.s10)) / (max(self.s10) - min(self.s10)))
for item in self.s11:
self.s11_normalized.append((item - min(self.s11)) / (max(self.s11) - min(self.s11)))
for item in self.s12:
self.s12_normalized.append((item - min(self.s12)) / (max(self.s12) - min(self.s12)))
for item in self.s13:
self.s13_normalized.append((item - min(self.s13)) / (max(self.s13) - min(self.s13)))
for item in self.s14:
self.s14_normalized.append((item - min(self.s14)) / (max(self.s14) - min(self.s14)))
for item in self.s15:
self.s15_normalized.append((item - min(self.s15)) / (max(self.s15) - min(self.s15)))
for item in self.s16:
self.s16_normalized.append((item - min(self.s16)) / (max(self.s16) - min(self.s16)))
for item in self.s17:
self.s17_normalized.append((item - min(self.s17)) / (max(self.s17) - min(self.s17)))
for item in self.s18:
self.s18_normalized.append((item - min(self.s18)) / (max(self.s18) - min(self.s18)))
self.radar_plot()
self.box_plot()
elif filename[1] == "CSV Files (*.csv)":
with open(filename[0], 'r') as csvfile:
lines = csv.reader(csvfile)
data = list(lines)
self.tableWidget.setRowCount(len(data))
self.tableWidget.setColumnCount(64)
for i in range(3):
for j in range(2):
self.tableWidget.setItem(i,j,QtWidgets.QTableWidgetItem(data[i][j]))
for i in range(3,len(data)):
for j in range(64):
self.tableWidget.setItem(i, j, QtWidgets.QTableWidgetItem(data[i][j]))
def max_min(self):
self.XY.axes.cla()
self.XY.axes.plot(self.time, self.s1_normalized, label=self.sensor_name[0])
'''
self.sc.axes.plot(self.time, self.s2_normalized, label=self.sensor_name[1])
self.sc.axes.plot(self.time, self.s3_normalized, label=self.sensor_name[2])
self.sc.axes.plot(self.time, self.s4_normalized, label=self.sensor_name[3])
self.sc.axes.plot(self.time, self.s5_normalized, label=self.sensor_name[4])
self.sc.axes.plot(self.time, self.s6_normalized, label=self.sensor_name[5])
self.sc.axes.plot(self.time, self.s7_normalized, label=self.sensor_name[6])
self.sc.axes.plot(self.time, self.s8_normalized, label=self.sensor_name[7])
self.sc.axes.plot(self.time, self.s9_normalized, label=self.sensor_name[8])
self.sc.axes.plot(self.time, self.s10_normalized, label=self.sensor_name[9])
self.sc.axes.plot(self.time, self.s11_normalized, label=self.sensor_name[10])
self.sc.axes.plot(self.time, self.s12_normalized, label=self.sensor_name[11])
self.sc.axes.plot(self.time, self.s13_normalized, label=self.sensor_name[12])
self.sc.axes.plot(self.time, self.s14_normalized, label=self.sensor_name[13])
self.sc.axes.plot(self.time, self.s15_normalized, label=self.sensor_name[14])
self.sc.axes.plot(self.time, self.s16_normalized, label=self.sensor_name[15])
self.sc.axes.plot(self.time, self.s17_normalized, label=self.sensor_name[16])
self.sc.axes.plot(self.time, self.s18_normalized, label=self.sensor_name[17])
'''
self.XY.axes.set_xlabel("Time")
self.XY.axes.set_ylabel("Impedance")
self.XY.axes.legend(loc='best')
self.XY.draw()
self.actionPeak_Detection.setEnabled(True)
self.actionRise_Time.setEnabled(True)
self.actionFall_Time.setEnabled(True)
self.actionFWHM.setEnabled(True)
def standardization(self):
z1,z2,z3,z4,z5,z6,z7,z8,z9,z10,z11,z12,z13,z14,z15,z16,z17,z18 = [],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[]
m1 = sum(self.s1) / len(self.s1)
m2 = sum(self.s2) / len(self.s2)
m3 = sum(self.s3) / len(self.s3)
m4 = sum(self.s4) / len(self.s4)
m5 = sum(self.s5) / len(self.s5)
m6 = sum(self.s6) / len(self.s6)
m7 = sum(self.s7) / len(self.s7)
m8 = sum(self.s8) / len(self.s8)
m9 = sum(self.s9) / len(self.s9)
m10 = sum(self.s10) / len(self.s10)
m11 = sum(self.s11) / len(self.s11)
m12 = sum(self.s12) / len(self.s12)
m13 = sum(self.s13) / len(self.s13)
m14 = sum(self.s14) / len(self.s14)
m15 = sum(self.s15) / len(self.s15)
m16 = sum(self.s16) / len(self.s16)
m17 = sum(self.s17) / len(self.s17)
m18 = sum(self.s18) / len(self.s18)
sd1 = self.calculate_sd(self.s1, m1)
sd2 = self.calculate_sd(self.s2, m2)
sd3 = self.calculate_sd(self.s3, m3)
sd4 = self.calculate_sd(self.s4, m4)
sd5 = self.calculate_sd(self.s5, m5)
sd6 = self.calculate_sd(self.s6, m6)
sd7 = self.calculate_sd(self.s7, m7)
sd8 = self.calculate_sd(self.s8, m8)
sd9 = self.calculate_sd(self.s9, m9)
sd10 = self.calculate_sd(self.s10, m10)
sd11 = self.calculate_sd(self.s11, m11)
sd12 = self.calculate_sd(self.s12, m12)
sd13 = self.calculate_sd(self.s13, m13)
sd14 = self.calculate_sd(self.s14, m14)
sd15 = self.calculate_sd(self.s15, m15)
sd16 = self.calculate_sd(self.s16, m16)
sd17 = self.calculate_sd(self.s17, m17)
sd18 = self.calculate_sd(self.s18, m18)
for item in self.s1:
z1.append((item-m1)/sd1)
for item in self.s2:
z2.append((item-m2)/sd2)
for item in self.s3:
z3.append((item-m3)/sd3)
for item in self.s4:
z4.append((item-m4)/sd4)
for item in self.s5:
z5.append((item-m5)/sd5)
for item in self.s6:
z6.append((item-m6)/sd6)
for item in self.s7:
z7.append((item-m7)/sd7)
for item in self.s8:
z8.append((item-m8)/sd8)
for item in self.s9:
z9.append((item-m9)/sd9)
for item in self.s10:
z10.append((item-m10)/sd10)
for item in self.s11:
z11.append((item-m11)/sd11)
for item in self.s12:
z12.append((item-m12)/sd12)
for item in self.s13:
z13.append((item-m13)/sd13)
for item in self.s14:
z14.append((item-m14)/sd14)
for item in self.s15:
z15.append((item-m15)/sd15)
for item in self.s16:
z16.append((item-m16)/sd16)
for item in self.s17:
z17.append((item-m17)/sd17)
for item in self.s18:
z18.append((item-m18)/sd18)
'''
mz1 = sum(z1) / len(z1)
mz2 = sum(z2) / len(z2)
mz3 = sum(z3) / len(z3)
mz4 = sum(z4) / len(z4)
mz5 = sum(z5) / len(z5)
mz6 = sum(z6) / len(z6)
mz7 = sum(z7) / len(z7)
mz8 = sum(z8) / len(z8)
mz9 = sum(z9) / len(z9)
mz10 = sum(z10) / len(z10)
mz11 = sum(z11) / len(z11)
mz12 = sum(z12) / len(z12)
mz13 = sum(z13) / len(z13)
mz14 = sum(z14) / len(z14)
mz15 = sum(z15) / len(z15)
mz16 = sum(z16) / len(z16)
mz17 = sum(z17) / len(z17)
mz18 = sum(z18) / len(z18)
sdz1 = self.calculate_sd(z1, mz1)
sdz2 = self.calculate_sd(z2, mz2)
sdz3 = self.calculate_sd(z3, mz3)
sdz4 = self.calculate_sd(z4, mz4)
sdz5 = self.calculate_sd(z5, mz5)
sdz6 = self.calculate_sd(z6, mz6)
sdz7 = self.calculate_sd(z7, mz7)
sdz8 = self.calculate_sd(z8, mz8)
sdz9 = self.calculate_sd(z9, mz9)
sdz10 = self.calculate_sd(z10, mz10)
sdz11 = self.calculate_sd(z11, mz11)
sdz12 = self.calculate_sd(z12, mz12)
sdz13 = self.calculate_sd(z13, mz13)
sdz14 = self.calculate_sd(z14, mz14)
sdz15 = self.calculate_sd(z15, mz15)
sdz16 = self.calculate_sd(z16, mz16)
sdz17 = self.calculate_sd(z17, mz17)
sdz18 = self.calculate_sd(z18, mz18)
print(mz1,sdz1)
print(mz2, sdz2)
print(mz3, sdz3)
print(mz4, sdz4)
print(mz5, sdz5)
print(mz6, sdz6)
print(mz7, sdz7)
print(mz8, sdz8)
print(mz9, sdz9)
print(mz10, sdz10)
print(mz11, sdz11)
print(mz12, sdz12)
print(mz13, sdz13)
print(mz14, sdz14)
print(mz15, sdz15)
print(mz16, sdz16)
print(mz17, sdz17)
print(mz18, sdz18)
'''
self.XY.axes.cla()
self.XY.axes.plot(self.time, z1, label=self.sensor_name[0])
'''
self.sc.axes.plot(self.time, z2, label=self.sensor_name[1])
self.sc.axes.plot(self.time, z3, label=self.sensor_name[2])
self.sc.axes.plot(self.time, z4, label=self.sensor_name[3])
self.sc.axes.plot(self.time, z5, label=self.sensor_name[4])
self.sc.axes.plot(self.time, z6, label=self.sensor_name[5])
self.sc.axes.plot(self.time, z7, label=self.sensor_name[6])
self.sc.axes.plot(self.time, z8, label=self.sensor_name[7])
self.sc.axes.plot(self.time, z9, label=self.sensor_name[8])
self.sc.axes.plot(self.time, z10, label=self.sensor_name[9])
self.sc.axes.plot(self.time, z11, label=self.sensor_name[10])
self.sc.axes.plot(self.time, z12, label=self.sensor_name[11])
self.sc.axes.plot(self.time, z13, label=self.sensor_name[12])
self.sc.axes.plot(self.time, z14, label=self.sensor_name[13])
self.sc.axes.plot(self.time, z15, label=self.sensor_name[14])
self.sc.axes.plot(self.time, z16, label=self.sensor_name[15])
self.sc.axes.plot(self.time, z17, label=self.sensor_name[16])
self.sc.axes.plot(self.time, z18, label=self.sensor_name[17])
'''
self.XY.axes.set_xlabel("Time")
self.XY.axes.set_ylabel("Impedance")
self.XY.axes.legend(loc='best')
self.XY.draw()
def calculate_sd(self,list,mean):
sd = 0.0
for item in list:
sd += (item-mean) ** 2
sd = sd/(len(list)-1)
sd = sd ** (1/2)
return sd
def baseline(self):
'''
s1 = np.array(self.s1)
base = peakutils.baseline(s1, deg=3, max_it=100, tol=0.001)
#self.sc.axes.cla()
self.sc.axes.plot(self.time, base, label="baseline",c='red')
self.sc.axes.legend(loc='best')
self.sc.draw()
'''
def peak_detection(self):
s1_diff = []
self.s1_indexes = []
for i in range(len(self.s1_normalized)-1):
s1_diff.append(self.s1_normalized[i+1]-self.s1_normalized[i])
print("diff=" + str(s1_diff))
print(len(s1_diff))
for i in range(len(s1_diff)-1):
if s1_diff[i]>0 and s1_diff[i+1]<0:
self.s1_indexes.append(i+1)
print(self.s1_indexes)
for i in range(len(self.s1_indexes)-1):
if self.s1_normalized[self.s1_indexes[i]]>0.5 and (self.s1_indexes[i+1]-self.s1_indexes[i])>=5:
self.XY.axes.scatter(self.time[self.s1_indexes[i]], self.s1_normalized[self.s1_indexes[i]],c='red')
self.XY.draw()
self.actionRise_Time.setEnabled(True)
def rise_time(self):
upper_limit = 0
lower_limit = 0
max_index = 0
rel_tol = 0.05
abs_tol = 0.1
peak_values = []
#for i in range(len(self.s1_indexes)):
#peak_values.append(self.s1_normalized[self.s1_indexes[i]])
for i in range(len(self.s1_normalized)):
if self.s1_normalized[i]==max(self.s1_normalized):
max_index = i
print("max index=" + str(max_index))
for i in range(max_index):
#if math.isclose(self.s1_normalized[i],0.9*self.s1_normalized[peak_index],rel_tol=0.05):
if abs(self.s1_normalized[i]-0.9*max(self.s1_normalized)) <= abs_tol:
upper_limit = i
#if math.isclose(self.s1_normalized[i], 0.1*self.s1_normalized[peak_index], rel_tol=0.05):
if abs(self.s1_normalized[i]-0.1*max(self.s1_normalized)) <= abs_tol:
lower_limit = i
print(upper_limit)
print(lower_limit)
self.XY.axes.text(100,0.9,"Rise Time = " + str(upper_limit-lower_limit)+'s')
self.XY.draw()
def fall_time(self):
upper_limit = 0
lower_limit = 0
max_index = 0
rel_tol = 0.05
abs_tol = 0.1
for i in range(len(self.s1_normalized)):
if self.s1_normalized[i]==max(self.s1_normalized):
max_index = i
print("max index="+ str(max_index))
for i in range(max_index,len(self.s1_normalized)):
if abs(self.s1_normalized[i] - 0.9 * max(self.s1_normalized)) <= abs_tol:
lower_limit = i
if abs(self.s1_normalized[i] - 0.1 * max(self.s1_normalized)) <= abs_tol:
upper_limit = i
break
print(upper_limit)
print(lower_limit)
self.XY.axes.text(100,0.8,"Fall Time = " + str(upper_limit - lower_limit) + 's')
self.XY.draw()
def FWHM(self):
upper_limit = 0
lower_limit = 0
max_index = 0
rel_tol = 0.15
abs_tol = 0.1
for i in range(len(self.s1_normalized)):
if self.s1_normalized[i] == max(self.s1_normalized):
max_index = i
print("max index=" + str(max_index))
for i in range(max_index):
if abs(self.s1_normalized[i] - 0.5 * max(self.s1_normalized)) <= abs_tol:
lower_limit = i
for i in range(max_index, len(self.s1_normalized)):
if abs(self.s1_normalized[i] - 0.5 * max(self.s1_normalized)) <= abs_tol:
upper_limit = i
break
print(upper_limit)
print(lower_limit)
x = [lower_limit,upper_limit]
y = [self.s1_normalized[lower_limit],self.s1_normalized[upper_limit]]
self.XY.axes.plot(x,y,c='red')
self.XY.axes.text(100,0.7, "FWHM = " + str(upper_limit - lower_limit) + 's')
self.XY.draw()
def radar_plot(self):
titles = self.sensor_name
self.Radar_Layout = QtWidgets.QVBoxLayout(self.Radar_widget)
self.radar = Radar(titles, rect=None, parent=self.Radar_widget)
self.Radar_Layout.addWidget(self.radar)
self.radar_toolbar = NavigationToolbar(self.radar, self.Radar_widget)
self.Radar_Layout.addWidget(self.radar_toolbar)
for i in range(121):
self.radar.plot([self.s1_normalized[i],self.s2_normalized[i],self.s3_normalized[i],self.s4_normalized[i],self.s5_normalized[i],self.s6_normalized[i],self.s7_normalized[i],self.s8_normalized[i],self.s9_normalized[i],self.s10_normalized[i],self.s11_normalized[i],self.s12_normalized[i],self.s13_normalized[i],self.s14_normalized[i],self.s15_normalized[i],self.s16_normalized[i],self.s17_normalized[i],self.s18_normalized[i]])
self.radar.draw()
self.actionRadar_Plot.setEnabled(False)
def box_plot(self):
labels = self.sensor_name
data = [self.s1_normalized,self.s2_normalized,self.s3_normalized,self.s4_normalized,self.s5_normalized,self.s6_normalized,self.s7_normalized,self.s8_normalized,self.s9_normalized,self.s10_normalized,self.s11_normalized,self.s12_normalized,self.s13_normalized,self.s14_normalized,self.s15_normalized,self.s16_normalized,self.s17_normalized,self.s18_normalized]
self.box.axes.cla()
self.box.axes.boxplot(data,labels=labels)
self.box.axes.set_ylabel("Impedance")
self.box.draw()
| |
# coding: utf-8
"""
Talon.One API
The Talon.One API is used to manage applications and campaigns, as well as to integrate with your application. The operations in the _Integration API_ section are used to integrate with our platform, while the other operations are used to manage applications and campaigns. ### Where is the API? The API is available at the same hostname as these docs. For example, if you are reading this page at `https://mycompany.talon.one/docs/api/`, the URL for the [updateCustomerProfile][] operation is `https://mycompany.talon.one/v1/customer_profiles/id` [updateCustomerProfile]: #operation--v1-customer_profiles--integrationId--put # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from talon_one.configuration import Configuration
class RollbackAddedLoyaltyPointsEffectProps(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'program_id': 'int',
'sub_ledger_id': 'str',
'value': 'float',
'recipient_integration_id': 'str',
'transaction_uuid': 'str'
}
attribute_map = {
'program_id': 'programId',
'sub_ledger_id': 'subLedgerId',
'value': 'value',
'recipient_integration_id': 'recipientIntegrationId',
'transaction_uuid': 'transactionUUID'
}
def __init__(self, program_id=None, sub_ledger_id=None, value=None, recipient_integration_id=None, transaction_uuid=None, local_vars_configuration=None): # noqa: E501
"""RollbackAddedLoyaltyPointsEffectProps - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._program_id = None
self._sub_ledger_id = None
self._value = None
self._recipient_integration_id = None
self._transaction_uuid = None
self.discriminator = None
self.program_id = program_id
self.sub_ledger_id = sub_ledger_id
self.value = value
self.recipient_integration_id = recipient_integration_id
self.transaction_uuid = transaction_uuid
@property
def program_id(self):
"""Gets the program_id of this RollbackAddedLoyaltyPointsEffectProps. # noqa: E501
The ID of the loyalty program where the points were originally added # noqa: E501
:return: The program_id of this RollbackAddedLoyaltyPointsEffectProps. # noqa: E501
:rtype: int
"""
return self._program_id
@program_id.setter
def program_id(self, program_id):
"""Sets the program_id of this RollbackAddedLoyaltyPointsEffectProps.
The ID of the loyalty program where the points were originally added # noqa: E501
:param program_id: The program_id of this RollbackAddedLoyaltyPointsEffectProps. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and program_id is None: # noqa: E501
raise ValueError("Invalid value for `program_id`, must not be `None`") # noqa: E501
self._program_id = program_id
@property
def sub_ledger_id(self):
"""Gets the sub_ledger_id of this RollbackAddedLoyaltyPointsEffectProps. # noqa: E501
The ID of the subledger within the loyalty program where these points were originally added # noqa: E501
:return: The sub_ledger_id of this RollbackAddedLoyaltyPointsEffectProps. # noqa: E501
:rtype: str
"""
return self._sub_ledger_id
@sub_ledger_id.setter
def sub_ledger_id(self, sub_ledger_id):
"""Sets the sub_ledger_id of this RollbackAddedLoyaltyPointsEffectProps.
The ID of the subledger within the loyalty program where these points were originally added # noqa: E501
:param sub_ledger_id: The sub_ledger_id of this RollbackAddedLoyaltyPointsEffectProps. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and sub_ledger_id is None: # noqa: E501
raise ValueError("Invalid value for `sub_ledger_id`, must not be `None`") # noqa: E501
self._sub_ledger_id = sub_ledger_id
@property
def value(self):
"""Gets the value of this RollbackAddedLoyaltyPointsEffectProps. # noqa: E501
The amount of points that were rolled back # noqa: E501
:return: The value of this RollbackAddedLoyaltyPointsEffectProps. # noqa: E501
:rtype: float
"""
return self._value
@value.setter
def value(self, value):
"""Sets the value of this RollbackAddedLoyaltyPointsEffectProps.
The amount of points that were rolled back # noqa: E501
:param value: The value of this RollbackAddedLoyaltyPointsEffectProps. # noqa: E501
:type: float
"""
if self.local_vars_configuration.client_side_validation and value is None: # noqa: E501
raise ValueError("Invalid value for `value`, must not be `None`") # noqa: E501
self._value = value
@property
def recipient_integration_id(self):
"""Gets the recipient_integration_id of this RollbackAddedLoyaltyPointsEffectProps. # noqa: E501
The user for whom these points were originally added # noqa: E501
:return: The recipient_integration_id of this RollbackAddedLoyaltyPointsEffectProps. # noqa: E501
:rtype: str
"""
return self._recipient_integration_id
@recipient_integration_id.setter
def recipient_integration_id(self, recipient_integration_id):
"""Sets the recipient_integration_id of this RollbackAddedLoyaltyPointsEffectProps.
The user for whom these points were originally added # noqa: E501
:param recipient_integration_id: The recipient_integration_id of this RollbackAddedLoyaltyPointsEffectProps. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and recipient_integration_id is None: # noqa: E501
raise ValueError("Invalid value for `recipient_integration_id`, must not be `None`") # noqa: E501
self._recipient_integration_id = recipient_integration_id
@property
def transaction_uuid(self):
"""Gets the transaction_uuid of this RollbackAddedLoyaltyPointsEffectProps. # noqa: E501
The identifier of 'deduction' entry added to the ledger as the `addLoyaltyPoints` effect is rolled back. # noqa: E501
:return: The transaction_uuid of this RollbackAddedLoyaltyPointsEffectProps. # noqa: E501
:rtype: str
"""
return self._transaction_uuid
@transaction_uuid.setter
def transaction_uuid(self, transaction_uuid):
"""Sets the transaction_uuid of this RollbackAddedLoyaltyPointsEffectProps.
The identifier of 'deduction' entry added to the ledger as the `addLoyaltyPoints` effect is rolled back. # noqa: E501
:param transaction_uuid: The transaction_uuid of this RollbackAddedLoyaltyPointsEffectProps. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and transaction_uuid is None: # noqa: E501
raise ValueError("Invalid value for `transaction_uuid`, must not be `None`") # noqa: E501
self._transaction_uuid = transaction_uuid
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, RollbackAddedLoyaltyPointsEffectProps):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, RollbackAddedLoyaltyPointsEffectProps):
return True
return self.to_dict() != other.to_dict()
| |
from sqlalchemy import select, cast, Date, Float, desc, func
from ecorelevesensor.models import DBSession, Individual
from ecorelevesensor.models.data import (
V_Individuals_LatLonDate,
V_Individuals_History
)
from ecorelevesensor.models.views import V_SearchIndiv
from pyramid.security import NO_PERMISSION_REQUIRED
from pyramid.view import view_config
from collections import OrderedDict
from datetime import datetime
import json
route_prefix = 'core/individuals/'
@view_config(route_name=route_prefix + 'search/values', renderer='json')
def core_individuals_values(request):
''' Get the different values of the field_name given in parameter.
If a parameter limit is passed, then limit the number of values returned.
'''
column = request.params['field_name']
limit = int(request.params.get('limit', 0))
if column in V_SearchIndiv.columns:
query = select([V_SearchIndiv.c[column]]
).where(V_SearchIndiv.columns[column]!=None
).order_by(V_SearchIndiv.columns[column]
).distinct()
if limit > 0:
query = query.limit(limit)
return [str(item[column]) for item in DBSession.execute(query).fetchall()]
else:
return []
@view_config(route_name=route_prefix + 'search', renderer='json', request_method='POST')
def core_individuals_search(request):
'''Search individuals by posting a JSON object containing the fields :
- criteria : dict object with column_name:value fields
- order_by : dict object with column_name:'asc' or column_name:'desc' fields
- offset : int
- limit : int
'''
query = select(V_SearchIndiv.c)
# Look over the criteria list
criteria = json.loads(request.POST.get('criteria', '{}'))
for column_name, obj in criteria.items():
if column_name in V_SearchIndiv.c:
column = V_SearchIndiv.c[column_name]
value = obj['value']
op = obj.get('op', 'is')
if op in ['is', 'null']:
query = query.where(column == value)
elif op == 'is not':
query = query.where(column != value or column == None)
elif op == 'not null':
query = query.where(column != value)
elif op == 'begin with':
query = query.where(column.like(value + '%'))
elif op == 'not begin with':
query = query.where(column.notlike(value + '%'))
# Set sorting columns and order
order_by = json.loads(request.POST.get('order_by', '[]'))
order_by_clause = []
for obj in order_by:
column, order = obj.split(':')
if column in V_SearchIndiv.columns:
if order == 'asc':
order_by_clause.append(V_SearchIndiv.columns[column].asc())
elif order == 'desc':
order_by_clause.append(V_SearchIndiv.columns[column].desc())
if len(order_by_clause) > 0:
query = query.order_by(*order_by_clause)
# Run query
total = DBSession.execute(select([func.count()]).select_from(query.alias())).scalar()
# Define the limit and offset if exist
offset = int(request.POST.get('offset', 0))
limit = int(request.POST.get('per_page', 0))
if limit > 0:
query = query.limit(limit)
if offset > 0:
query = query.offset(offset)
result = [{'total_entries':total}]
data = DBSession.execute(query).fetchall()
result.append([OrderedDict(row) for row in data])
return result
@view_config(
permission=NO_PERMISSION_REQUIRED,
route_name=route_prefix + 'search/export',
renderer='csv',
request_method='POST'
)
def core_individuals_search_export(request):
'''Search individuals by posting a JSON object containing the fields :
- criteria : dict object with column_name:value fields
- order_by : dict object with column_name:'asc' or column_name:'desc' fields
- offset : int
- limit : int
Return search results as CSV.
'''
query = select(V_SearchIndiv.c)
# Look over the criteria list
criteria = request.json_body.get('criteria', {})
for column_name, obj in criteria.items():
if column_name in V_SearchIndiv.c:
column = V_SearchIndiv.c[column_name]
value = obj['value']
op = obj.get('op', 'is')
if op in ['is', 'null']:
query = query.where(column == value)
elif op == 'is not':
query = query.where(column != value or column == None)
elif op == 'not null':
query = query.where(column != value)
elif op == 'begin with':
query = query.where(column.like(value + '%'))
elif op == 'not begin with':
query = query.where(column.notlike(value + '%'))
# Run query
data = DBSession.execute(query).fetchall()
header = [col.name for col in V_SearchIndiv.c]
rows = [[val for val in row] for row in data]
# override attributes of response
filename = 'individual_search_export.csv'
request.response.content_disposition = 'attachment;filename=' + filename
return {
'header': header,
'rows': rows,
}
@view_config(route_name=route_prefix + 'stations', renderer='json')
def core_individuals_stations(request):
''' Get the stations of an identified individual. Parameter is : id (int)'''
try:
id = int(request.params['id'])
# Query
query = select([cast(V_Individuals_LatLonDate.c.lat, Float), cast(V_Individuals_LatLonDate.c.lon, Float), V_Individuals_LatLonDate.c.date]
).where(V_Individuals_LatLonDate.c.ind_id == id).order_by(desc(V_Individuals_LatLonDate.c.date))
# Create list of features from query result
epoch = datetime.utcfromtimestamp(0)
features = [
{
'type':'Feature',
'properties':{'date':(date - epoch).total_seconds()},
'geometry':{'type':'Point', 'coordinates':[lon,lat]}
}
for lat, lon, date in reversed(DBSession.execute(query).fetchall())]
result = {'type':'FeatureCollection', 'features':features}
return result
except:
return []
@view_config(route_name=route_prefix + 'history', renderer='json')
def core_individuals_history(request):
''' Get the history of an identified individual. Parameter is : id (int)'''
try:
id = int(request.params['id'])
# Query for characteristic history list
query = select([V_Individuals_History.c.label, V_Individuals_History.c.value, cast(V_Individuals_History.c.begin_date, Date), cast(V_Individuals_History.c.end_date, Date)]
).where(V_Individuals_History.c.id == id
).order_by(V_Individuals_History.c.carac, desc(V_Individuals_History.c.begin_date))
# Create list of characteristic history
null_date_filter = lambda date: None if date is None else str(date)
history = [OrderedDict([('characteristic',label), ('value',value), ('from',str(begin_date)), ('to',null_date_filter(end_date))]) for label, value, begin_date, end_date in DBSession.execute(query).fetchall()]
result = {'history':history}
# Get current value from the list, preventing a new connection to the database
result['Age'] = next((item['value'] for item in history if item['characteristic'] == 'Age'), None)
result['Sex'] = next((item['value'] for item in history if item['characteristic'] == 'Sex'), None)
result['PTT'] = next((item['value'] for item in history if item['characteristic'] == 'PTT'), None)
result['Species'] = next((item['value'] for item in history if item['characteristic'] == 'Species'), None)
result['Origin'] = next((item['value'] for item in history if item['characteristic'] == 'Origin'), None)
return result
except:
return []
@view_config(route_name=(route_prefix + 'count'), renderer='json')
def core_individuals_count(request):
return {'count':DBSession.execute(select([func.count(Individual.id).label('nb')])).scalar()}
@view_config(route_name='core/individual', renderer='json')
def core_individual(request):
''' Get the attributes of an identified individual.
'''
id = int(request.matchdict['id'])
indiv = DBSession.query(Individual).filter(Individual.id==id).one()
query = select([V_Individuals_History.c.label, V_Individuals_History.c.value, cast(V_Individuals_History.c.begin_date, Date), cast(V_Individuals_History.c.end_date, Date)]
).where(V_Individuals_History.c.id == id
).order_by(V_Individuals_History.c.carac, desc(V_Individuals_History.c.begin_date))
carac = DBSession.execute(query).fetchall()
null_date_filter = lambda date: None if date is None else str(date)
indiv.history = [OrderedDict([('name',label), ('value',value), ('from',str(begin_date)), ('to',null_date_filter(end_date))]) for label, value, begin_date, end_date in carac]
return indiv
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Migrate dashboard position_json data from V1 to V2
Revision ID: bebcf3fed1fe
Revises: fc480c87706c
Create Date: 2018-07-22 11:59:07.025119
"""
# revision identifiers, used by Alembic.
import collections
from functools import reduce
import json
import sys
import uuid
from alembic import op
from sqlalchemy import (
Column,
ForeignKey,
Integer,
String,
Table,
Text,
)
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from superset import db
revision = 'bebcf3fed1fe'
down_revision = 'fc480c87706c'
Base = declarative_base()
BACKGROUND_TRANSPARENT = 'BACKGROUND_TRANSPARENT'
CHART_TYPE = 'DASHBOARD_CHART_TYPE'
COLUMN_TYPE = 'DASHBOARD_COLUMN_TYPE'
DASHBOARD_GRID_ID = 'DASHBOARD_GRID_ID'
DASHBOARD_GRID_TYPE = 'DASHBOARD_GRID_TYPE'
DASHBOARD_HEADER_ID = 'DASHBOARD_HEADER_ID'
DASHBOARD_HEADER_TYPE = 'DASHBOARD_HEADER_TYPE'
DASHBOARD_ROOT_ID = 'DASHBOARD_ROOT_ID'
DASHBOARD_ROOT_TYPE = 'DASHBOARD_ROOT_TYPE'
DASHBOARD_VERSION_KEY = 'DASHBOARD_VERSION_KEY'
MARKDOWN_TYPE = 'DASHBOARD_MARKDOWN_TYPE'
ROW_TYPE = 'DASHBOARD_ROW_TYPE'
GRID_COLUMN_COUNT = 12
GRID_MIN_COLUMN_COUNT = 1
GRID_MIN_ROW_UNITS = 5
GRID_RATIO = 4.0
NUMBER_OF_CHARTS_PER_ROW = 3
MAX_RECURSIVE_LEVEL = 6
ROW_HEIGHT = 8
TOTAL_COLUMNS = 48
DEFAULT_CHART_WIDTH = int(TOTAL_COLUMNS / NUMBER_OF_CHARTS_PER_ROW)
MAX_VALUE = sys.maxsize
class Slice(Base):
"""Declarative class to do query in upgrade"""
__tablename__ = 'slices'
id = Column(Integer, primary_key=True)
slice_name = Column(String(250))
params = Column(Text)
viz_type = Column(String(250))
dashboard_slices = Table(
'dashboard_slices', Base.metadata,
Column('id', Integer, primary_key=True),
Column('dashboard_id', Integer, ForeignKey('dashboards.id')),
Column('slice_id', Integer, ForeignKey('slices.id')),
)
class Dashboard(Base):
"""Declarative class to do query in upgrade"""
__tablename__ = 'dashboards'
id = Column(Integer, primary_key=True)
dashboard_title = Column(String(500))
position_json = Column(Text)
slices = relationship(
'Slice', secondary=dashboard_slices, backref='dashboards')
def is_v2_dash(positions):
return (
isinstance(positions, dict) and
positions.get('DASHBOARD_VERSION_KEY') == 'v2'
)
def get_boundary(positions):
top = MAX_VALUE
left = MAX_VALUE
bottom = 0
right = 0
for position in positions:
top = min(position['row'], top)
left = min(position['col'], left)
bottom = max(position['row'] + position['size_y'], bottom)
right = max(position['col'] + position['size_x'], right)
return {
'top': top,
'bottom': bottom,
'left': left,
'right': right,
}
def generate_id():
return uuid.uuid4().hex[:8]
def has_overlap(positions, xAxis=True):
sorted_positions = \
sorted(positions[:], key=lambda pos: pos['col']) \
if xAxis else sorted(positions[:], key=lambda pos: pos['row'])
result = False
for idx, position in enumerate(sorted_positions):
if idx < len(sorted_positions) - 1:
if xAxis:
result = position['col'] + position['size_x'] > \
sorted_positions[idx + 1]['col']
else:
result = position['row'] + position['size_y'] > \
sorted_positions[idx + 1]['row']
if result:
break
return result
def get_empty_layout():
return {
DASHBOARD_VERSION_KEY: 'v2',
DASHBOARD_ROOT_ID: {
'type': DASHBOARD_ROOT_TYPE,
'id': DASHBOARD_ROOT_ID,
'children': [DASHBOARD_GRID_ID],
},
DASHBOARD_GRID_ID: {
'type': DASHBOARD_GRID_TYPE,
'id': DASHBOARD_GRID_ID,
'children': [],
},
}
def get_header_component(title):
return {
'id': DASHBOARD_HEADER_ID,
'type': DASHBOARD_HEADER_TYPE,
'meta': {
'text': title,
},
}
def get_row_container():
return {
'type': ROW_TYPE,
'id': 'DASHBOARD_ROW_TYPE-{}'.format(generate_id()),
'children': [],
'meta': {
'background': BACKGROUND_TRANSPARENT,
},
}
def get_col_container():
return {
'type': COLUMN_TYPE,
'id': 'DASHBOARD_COLUMN_TYPE-{}'.format(generate_id()),
'children': [],
'meta': {
'background': BACKGROUND_TRANSPARENT,
},
}
def get_chart_holder(position):
size_x = position['size_x']
size_y = position['size_y']
slice_id = position['slice_id']
slice_name = position.get('slice_name')
code = position.get('code')
width = max(
GRID_MIN_COLUMN_COUNT,
int(round(size_x / GRID_RATIO)),
)
height = max(
GRID_MIN_ROW_UNITS,
int(round(((size_y / GRID_RATIO) * 100) / ROW_HEIGHT)),
)
if code is not None:
markdown_content = ' ' # white-space markdown
if len(code):
markdown_content = code
elif slice_name.strip():
markdown_content = '##### {}'.format(slice_name)
return {
'type': MARKDOWN_TYPE,
'id': 'DASHBOARD_MARKDOWN_TYPE-{}'.format(generate_id()),
'children': [],
'meta': {
'width': width,
'height': height,
'code': markdown_content,
},
}
return {
'type': CHART_TYPE,
'id': 'DASHBOARD_CHART_TYPE-{}'.format(generate_id()),
'children': [],
'meta': {
'width': width,
'height': height,
'chartId': int(slice_id),
},
}
def get_children_max(children, attr, root):
return max([root[childId]['meta'][attr] for childId in children])
def get_children_sum(children, attr, root):
return reduce(
(lambda sum, childId: sum + root[childId]['meta'][attr]),
children,
0,
)
# find column that: width > 2 and
# each row has at least 1 chart can reduce width
def get_wide_column_ids(children, root):
return list(
filter(
lambda childId: can_reduce_column_width(root[childId], root),
children,
),
)
def is_wide_leaf_component(component):
return (
component['type'] in [CHART_TYPE, MARKDOWN_TYPE] and
component['meta']['width'] > GRID_MIN_COLUMN_COUNT
)
def can_reduce_column_width(column_component, root):
return (
column_component['type'] == COLUMN_TYPE and
column_component['meta']['width'] > GRID_MIN_COLUMN_COUNT and
all([
is_wide_leaf_component(root[childId]) or (
root[childId]['type'] == ROW_TYPE and
all([
is_wide_leaf_component(root[id]) for id in root[childId]['children']
])
) for childId in column_component['children']
])
)
def reduce_row_width(row_component, root):
wide_leaf_component_ids = list(
filter(
lambda childId: is_wide_leaf_component(root[childId]),
row_component['children'],
),
)
widest_chart_id = None
widest_width = 0
for component_id in wide_leaf_component_ids:
if root[component_id]['meta']['width'] > widest_width:
widest_width = root[component_id]['meta']['width']
widest_chart_id = component_id
if widest_chart_id:
root[widest_chart_id]['meta']['width'] -= 1
return get_children_sum(row_component['children'], 'width', root)
def reduce_component_width(component):
if is_wide_leaf_component(component):
component['meta']['width'] -= 1
return component['meta']['width']
def convert(positions, level, parent, root):
if len(positions) == 0:
return
if len(positions) == 1 or level >= MAX_RECURSIVE_LEVEL:
# special treatment for single chart dash:
# always wrap chart inside a row
if parent['type'] == DASHBOARD_GRID_TYPE:
row_container = get_row_container()
root[row_container['id']] = row_container
parent['children'].append(row_container['id'])
parent = row_container
chart_holder = get_chart_holder(positions[0])
root[chart_holder['id']] = chart_holder
parent['children'].append(chart_holder['id'])
return
current_positions = positions[:]
boundary = get_boundary(current_positions)
top = boundary['top']
bottom = boundary['bottom']
left = boundary['left']
right = boundary['right']
# find row dividers
layers = []
current_row = top + 1
while len(current_positions) and current_row <= bottom:
upper = []
lower = []
is_row_divider = True
for position in current_positions:
row = position['row']
size_y = position['size_y']
if row + size_y <= current_row:
lower.append(position)
continue
elif row >= current_row:
upper.append(position)
continue
is_row_divider = False
break
if is_row_divider:
current_positions = upper[:]
layers.append(lower)
current_row += 1
# Each layer is a list of positions belong to same row section
# they can be a list of charts, or arranged in columns, or mixed
for layer in layers:
if len(layer) == 0:
continue
if len(layer) == 1 and parent['type'] == COLUMN_TYPE:
chart_holder = get_chart_holder(layer[0])
root[chart_holder['id']] = chart_holder
parent['children'].append(chart_holder['id'])
continue
# create a new row
row_container = get_row_container()
root[row_container['id']] = row_container
parent['children'].append(row_container['id'])
current_positions = layer[:]
if not has_overlap(current_positions):
# this is a list of charts in the same row
sorted_by_col = sorted(
current_positions,
key=lambda pos: pos['col'],
)
for position in sorted_by_col:
chart_holder = get_chart_holder(position)
root[chart_holder['id']] = chart_holder
row_container['children'].append(chart_holder['id'])
else:
# this row has columns, find col dividers
current_col = left + 1
while len(current_positions) and current_col <= right:
upper = []
lower = []
is_col_divider = True
for position in current_positions:
col = position['col']
size_x = position['size_x']
if col + size_x <= current_col:
lower.append(position)
continue
elif col >= current_col:
upper.append(position)
continue
is_col_divider = False
break
if is_col_divider:
# is single chart in the column:
# add to parent container without create new column container
if len(lower) == 1:
chart_holder = get_chart_holder(lower[0])
root[chart_holder['id']] = chart_holder
row_container['children'].append(chart_holder['id'])
else:
# create new col container
col_container = get_col_container()
root[col_container['id']] = col_container
if not has_overlap(lower, False):
sorted_by_row = sorted(
lower,
key=lambda pos: pos['row'],
)
for position in sorted_by_row:
chart_holder = get_chart_holder(position)
root[chart_holder['id']] = chart_holder
col_container['children'].append(chart_holder['id'])
else:
convert(lower, level + 2, col_container, root)
# add col meta
if len(col_container['children']):
row_container['children'].append(col_container['id'])
col_container['meta']['width'] = get_children_max(
col_container['children'],
'width',
root,
)
current_positions = upper[:]
current_col += 1
# add row meta
row_container['meta']['width'] = get_children_sum(
row_container['children'],
'width',
root,
)
def convert_to_layout(positions):
root = get_empty_layout()
convert(positions, 0, root[DASHBOARD_GRID_ID], root)
# remove row's width, height and col's height from its meta data
# and make sure every row's width <= GRID_COLUMN_COUNT
# Each item is a dashboard component:
# row_container, or col_container, or chart_holder
for item in root.values():
if not isinstance(item, dict):
continue
if ROW_TYPE == item['type']:
meta = item['meta']
if meta.get('width', 0) > GRID_COLUMN_COUNT:
current_width = meta['width']
while (
current_width > GRID_COLUMN_COUNT and
len(list(filter(
lambda childId: is_wide_leaf_component(root[childId]),
item['children'],
)))
):
current_width = reduce_row_width(item, root)
# because we round v1 chart size to nearest v2 grids count, result
# in there might be overall row width > GRID_COLUMN_COUNT.
# So here is an extra step to check row width, and reduce chart
# or column width if needed and if possible.
if current_width > GRID_COLUMN_COUNT:
has_wide_columns = True
while has_wide_columns:
col_ids = get_wide_column_ids(item['children'], root)
idx = 0
# need 2nd loop since same column may reduce multiple times
while idx < len(col_ids) and current_width > GRID_COLUMN_COUNT:
current_column = col_ids[idx]
for childId in root[current_column]['children']:
if root[childId]['type'] == ROW_TYPE:
root[childId]['meta']['width'] = reduce_row_width(
root[childId], root,
)
else:
root[childId]['meta']['width'] = \
reduce_component_width(root[childId])
root[current_column]['meta']['width'] = get_children_max(
root[current_column]['children'],
'width',
root,
)
current_width = get_children_sum(
item['children'],
'width',
root,
)
idx += 1
has_wide_columns = (
len(get_wide_column_ids(item['children'], root)) and
current_width > GRID_COLUMN_COUNT
)
meta.pop('width', None)
return root
def merge_position(position, bottom_line, last_column_start):
col = position['col']
size_x = position['size_x']
size_y = position['size_y']
end_column = len(bottom_line) \
if col + size_x > last_column_start \
else col + size_x
# finding index where index >= col and bottom_line value > bottom_line[col]
taller_indexes = [i for i, value in enumerate(bottom_line)
if (i >= col and value > bottom_line[col])]
current_row_value = bottom_line[col]
# if no enough space to fit current position, will start from taller row value
if len(taller_indexes) > 0 and (taller_indexes[0] - col + 1) < size_x:
current_row_value = max(bottom_line[col:col + size_x])
# add current row value with size_y of this position
for i in range(col, end_column):
bottom_line[i] = current_row_value + size_y
# In original position data, a lot of position's row attribute are problematic,
# for example, same positions are assigned to more than 1 chart.
# The convert function depends on row id, col id to split the whole dashboard into
# nested rows and columns. Bad row id will lead to many empty spaces, or a few charts
# are overlapped in the same row.
# This function read positions by row first.
# Then based on previous col id, width and height attribute,
# re-calculate next position's row id.
def scan_dashboard_positions_data(positions):
positions_by_row_id = {}
for position in positions:
row = position['row']
position['col'] = min(position['col'], TOTAL_COLUMNS)
if not positions_by_row_id.get(row):
positions_by_row_id[row] = []
positions_by_row_id[row].append(position)
bottom_line = [0] * (TOTAL_COLUMNS + 1)
# col index always starts from 1, set a large number for [0] as placeholder
bottom_line[0] = MAX_VALUE
last_column_start = max([position['col'] for position in positions])
# ordered_raw_positions are arrays of raw positions data sorted by row id
ordered_raw_positions = []
row_ids = sorted(positions_by_row_id.keys())
for row_id in row_ids:
ordered_raw_positions.append(positions_by_row_id[row_id])
updated_positions = []
while len(ordered_raw_positions):
next_row = ordered_raw_positions.pop(0)
next_col = 1
while len(next_row):
# special treatment for same (row, col) assigned to more than 1 chart:
# add one additional row and display wider chart first
available_columns_index = [i for i, e in enumerate(
list(filter(lambda x: x['col'] == next_col, next_row)))]
if len(available_columns_index):
idx = available_columns_index[0]
if len(available_columns_index) > 1:
idx = sorted(
available_columns_index,
key=lambda x: next_row[x]['size_x'],
reverse=True,
)[0]
next_position = next_row.pop(idx)
merge_position(next_position, bottom_line, last_column_start + 1)
next_position['row'] = \
bottom_line[next_position['col']] - next_position['size_y']
updated_positions.append(next_position)
next_col += next_position['size_x']
else:
next_col = next_row[0]['col']
return updated_positions
def upgrade():
bind = op.get_bind()
session = db.Session(bind=bind)
dashboards = session.query(Dashboard).all()
for i, dashboard in enumerate(dashboards):
print('scanning dashboard ({}/{}) >>>>'.format(i + 1, len(dashboards)))
position_json = json.loads(dashboard.position_json or '[]')
if not is_v2_dash(position_json):
print('Converting dashboard... dash_id: {}'.format(dashboard.id))
position_dict = {}
positions = []
slices = dashboard.slices
if position_json:
# scan and fix positions data: extra spaces, dup rows, .etc
position_json = scan_dashboard_positions_data(position_json)
position_dict = \
{str(position['slice_id']): position for position in position_json}
last_row_id = max([pos['row'] + pos['size_y'] for pos in position_json]) \
if position_json else 0
new_slice_counter = 0
for slice in slices:
position = position_dict.get(str(slice.id))
# some dashboard didn't have position_json
# place 3 charts in a row
if not position:
position = {
'col': (
new_slice_counter % NUMBER_OF_CHARTS_PER_ROW *
DEFAULT_CHART_WIDTH + 1
),
'row': (
last_row_id +
int(new_slice_counter / NUMBER_OF_CHARTS_PER_ROW) *
DEFAULT_CHART_WIDTH
),
'size_x': DEFAULT_CHART_WIDTH,
'size_y': DEFAULT_CHART_WIDTH,
'slice_id': str(slice.id),
}
new_slice_counter += 1
# attach additional parameters to position dict,
# prepare to replace markup and separator viz_type
# to dashboard UI component
form_data = json.loads(slice.params or '{}')
viz_type = slice.viz_type
if form_data and viz_type in ['markup', 'separator']:
position['code'] = form_data.get('code')
position['slice_name'] = slice.slice_name
positions.append(position)
v2_layout = convert_to_layout(positions)
v2_layout[DASHBOARD_HEADER_ID] = get_header_component(
dashboard.dashboard_title)
sorted_by_key = collections.OrderedDict(sorted(v2_layout.items()))
dashboard.position_json = json.dumps(sorted_by_key, indent=2)
session.merge(dashboard)
session.commit()
else:
print('Skip converted dash_id: {}'.format(dashboard.id))
session.close()
def downgrade():
print('downgrade is done')
| |
#!/usr/bin/env python3
import tensorflow as tf
from tfx.bricks import embedding, dense_to_one_hot, linear, dropout, reduce_max, batch_norm_lin, conv2d_bn, \
pow_1, softmax_2d
from model import ModelW2TArgs
class Model(ModelW2TArgs):
def __init__(self, data, FLAGS):
super(Model, self).__init__(data, FLAGS)
conv_mul = 2
histories_embedding_size = 16
histories_vocabulary_length = len(data.idx2word_history)
history_length = data.train_set['histories'].shape[1]
action_templates_vocabulary_length = len(data.idx2word_action_template)
action_templates_embedding_size = 8
num_actions_arguments = data.batch_actions_arguments.shape[2]
actions_arguments_vocabulary_length = len(data.idx2word_action_arguments)
with tf.name_scope('data'):
batch_histories = tf.Variable(data.batch_histories, name='histories',
trainable=False)
batch_actions_template = tf.Variable(data.batch_actions_template, name='actions',
trainable=False)
batch_action_arguments = tf.Variable(data.batch_actions_arguments, name='actions_arguments',
trainable=False)
histories = tf.gather(batch_histories, self.batch_idx)
actions_template = tf.gather(batch_actions_template, self.batch_idx)
actions_arguments = tf.gather(batch_action_arguments, self.batch_idx)
with tf.name_scope('model'):
encoder_embedding = embedding(
input=histories,
length=histories_vocabulary_length,
size=histories_embedding_size,
name='encoder_embedding'
)
with tf.name_scope("UtterancesEncoder"):
conv3 = encoder_embedding
# conv3 = dropout(conv3, pow_1(self.dropout_keep_prob, 2))
conv3 = conv2d_bn(
input=conv3,
filter=[1, 3, conv3.size, conv3.size * conv_mul],
phase_train=self.phase_train,
name='conv_utt_size_3_layer_1'
)
encoded_utterances = reduce_max(conv3, [2], keep_dims=True)
with tf.name_scope("HistoryEncoder"):
conv3 = encoded_utterances
conv3 = dropout(conv3, pow_1(self.dropout_keep_prob, 2))
conv3 = conv2d_bn(
input=conv3,
filter=[3, 1, conv3.size, conv3.size * conv_mul],
phase_train=self.phase_train,
name='conv_hist_size_3_layer_1'
)
conv3 = dropout(conv3, pow_1(self.dropout_keep_prob, 2))
conv3 = conv2d_bn(
input=conv3,
filter=[3, 1, conv3.size, conv3.size * conv_mul],
phase_train=self.phase_train,
name='conv_hist_size_3_layer_2'
)
encoded_history = reduce_max(conv3, [1, 2])
with tf.name_scope("Decoder"):
second_to_last_user_utterance = encoded_utterances[:, history_length - 3, 0, :]
last_system_utterance = encoded_utterances[:, history_length - 2, 0, :]
last_user_utterance = encoded_utterances[:, history_length - 1, 0, :]
dialogue_state = tf.concat(
1,
[
encoded_history,
last_user_utterance,
last_system_utterance,
second_to_last_user_utterance,
],
name='dialogue_state'
)
dialogue_state_size = conv3.size + \
3 * histories_embedding_size * conv_mul
dialogue_state = tf.nn.relu(dialogue_state)
dialogue_state = dropout(dialogue_state, self.dropout_keep_prob)
# action prediction
projection = linear(
input=dialogue_state,
input_size=dialogue_state_size,
output_size=dialogue_state_size,
name='linear_projection_1'
)
projection = batch_norm_lin(projection, dialogue_state_size, self.phase_train,
name='linear_projection_1_bn')
activation = tf.nn.relu(projection)
activation = dropout(activation, self.dropout_keep_prob)
projection = linear(
input=activation,
input_size=dialogue_state_size,
output_size=dialogue_state_size,
name='linear_projection_2'
)
projection = batch_norm_lin(projection, dialogue_state_size, self.phase_train,
name='linear_projection_2_bn')
activation = tf.nn.relu(projection)
activation = dropout(activation, self.dropout_keep_prob)
projection = linear(
input=activation,
input_size=dialogue_state_size,
output_size=action_templates_vocabulary_length,
name='linear_projection_3_predictions_action'
)
self.predictions_action = tf.nn.softmax(projection, name="softmax_output_prediction_action")
# argument prediction
# first encode decoded action template and teh true action template
choice = tf.floor(tf.random_uniform([1], self.use_inputs_prob, 1 + self.use_inputs_prob, tf.float32))
prediction_action_argmax = tf.stop_gradient(tf.argmax(self.predictions_action, 1))
predicted_action_templates_embedding = embedding(
input=prediction_action_argmax,
length=action_templates_vocabulary_length,
size=action_templates_embedding_size,
name='action_templates_embedding'
)
true_action_template_embedding = tf.gather(predicted_action_templates_embedding.embedding_table, actions_template)
predicted_action_templates_embedding = tf.stop_gradient(predicted_action_templates_embedding)
action_templates_embedding = choice * true_action_template_embedding + (1.0 - choice) * predicted_action_templates_embedding
dialogue_state_action_template = tf.concat(
1,
[
dialogue_state,
action_templates_embedding
],
name='dialogue_state_action_template'
)
dialogue_state_action_template_size = (
dialogue_state_size +
action_templates_embedding_size
)
# condition on the dialogue state and the decoded template
projection = linear(
input=dialogue_state_action_template,
input_size=dialogue_state_action_template_size,
output_size=dialogue_state_action_template_size,
name='linear_projection_1_predictions_arguments'
)
projection = batch_norm_lin(projection, dialogue_state_action_template_size, self.phase_train,
name='linear_projection_1_predictions_arguments_bn')
activation = tf.nn.relu(projection)
activation = dropout(activation, self.dropout_keep_prob)
projection = linear(
input=activation,
input_size=dialogue_state_action_template_size,
output_size=dialogue_state_action_template_size,
name='linear_projection_2_predictions_arguments'
)
projection = batch_norm_lin(projection, dialogue_state_action_template_size, self.phase_train,
name='linear_projection_2_predictions_arguments_bn')
activation = tf.nn.relu(projection)
activation = dropout(activation, self.dropout_keep_prob)
projection = linear(
input=activation,
input_size=dialogue_state_action_template_size,
output_size=num_actions_arguments * actions_arguments_vocabulary_length,
name='linear_projection_3_predictions_arguments'
)
self.predictions_arguments = softmax_2d(
input=projection,
n_classifiers=num_actions_arguments,
n_classes=actions_arguments_vocabulary_length,
name="softmax_2d_predictions_arguments")
if FLAGS.print_variables:
for v in tf.trainable_variables():
print(v.name)
with tf.name_scope('loss'):
one_hot_labels_action = dense_to_one_hot(actions_template, action_templates_vocabulary_length)
one_hot_labels_arguments = dense_to_one_hot(actions_arguments, actions_arguments_vocabulary_length)
loss_action = tf.reduce_mean(
- one_hot_labels_action * tf.log(tf.clip_by_value(self.predictions_action, 1e-10, 1.0)),
name='loss'
)
loss_arguments = tf.reduce_mean(
- one_hot_labels_arguments * tf.log(tf.clip_by_value(self.predictions_arguments, 1e-10, 1.0)),
name='loss'
)
self.loss = loss_action + loss_arguments
tf.scalar_summary('loss', self.loss)
with tf.name_scope('accuracy'):
correct_prediction_action = tf.equal(
tf.argmax(one_hot_labels_action, 1),
tf.argmax(self.predictions_action, 1)
)
self.accuracy_action = tf.reduce_mean(tf.cast(correct_prediction_action, 'float'))
tf.scalar_summary('accuracy_action', self.accuracy_action)
correct_prediction_arguments = tf.equal(tf.argmax(one_hot_labels_arguments, 2),
tf.argmax(self.predictions_arguments, 2))
self.accuracy_arguments = tf.reduce_mean(tf.cast(correct_prediction_arguments, 'float'))
tf.scalar_summary('accuracy_arguments', self.accuracy_arguments)
| |
# coding=utf-8
import random
from copy import deepcopy, copy
from itertools import product
from simpleai.search.utils import argmin
MOST_CONSTRAINED_VARIABLE = 'mcv'
HIGHEST_DEGREE_VARIABLE = 'degree'
LEAST_CONSTRAINING_VALUE = 'lvc'
def backtrack(problem, variable_heuristic='', value_heuristic='', inference=True):
'''
Backtracking search.
variable_heuristic is the heuristic for variable choosing, can be
MOST_CONSTRAINED_VARIABLE, HIGHEST_DEGREE_VARIABLE, or blank for simple
ordered choosing.
value_heuristic is the heuristic for value choosing, can be
LEAST_CONSTRAINING_VALUE or blank for simple ordered choosing.
'''
assignment = {}
domains = deepcopy(problem.domains)
if variable_heuristic == MOST_CONSTRAINED_VARIABLE:
variable_chooser = _most_constrained_variable_chooser
elif variable_heuristic == HIGHEST_DEGREE_VARIABLE:
variable_chooser = _highest_degree_variable_chooser
else:
variable_chooser = _basic_variable_chooser
if value_heuristic == LEAST_CONSTRAINING_VALUE:
values_sorter = _least_constraining_values_sorter
else:
values_sorter = _basic_values_sorter
return _backtracking(problem,
assignment,
domains,
variable_chooser,
values_sorter,
inference=inference)
def _basic_variable_chooser(problem, variables, domains):
'''
Choose the next variable in order.
'''
return variables[0]
def _most_constrained_variable_chooser(problem, variables, domains):
'''
Choose the variable that has less available values.
'''
# the variable with fewer values available
return sorted(variables, key=lambda v: len(domains[v]))[0]
def _highest_degree_variable_chooser(problem, variables, domains):
'''
Choose the variable that is involved on more constraints.
'''
# the variable involved in more constraints
return sorted(variables, key=lambda v: problem.var_degrees[v], reverse=True)[0]
def _count_conflicts(problem, assignment, variable=None, value=None):
'''
Count the number of violated constraints on a given assignment.
'''
return len(_find_conflicts(problem, assignment, variable, value))
def _call_constraint(assignment, neighbors, constraint):
variables, values = zip(*[(n, assignment[n])
for n in neighbors])
return constraint(variables, values)
def _find_conflicts(problem, assignment, variable=None, value=None):
'''
Find violated constraints on a given assignment, with the possibility
of specifying a new variable and value to add to the assignment before
checking.
'''
if variable is not None and value is not None:
assignment = deepcopy(assignment)
assignment[variable] = value
conflicts = []
for neighbors, constraint in problem.constraints:
# if all the neighbors on the constraint have values, check if conflict
if all(n in assignment for n in neighbors):
if not _call_constraint(assignment, neighbors, constraint):
conflicts.append((neighbors, constraint))
return conflicts
def _basic_values_sorter(problem, assignment, variable, domains):
'''
Sort values in the same original order.
'''
return domains[variable][:]
def _least_constraining_values_sorter(problem, assignment, variable, domains):
'''
Sort values based on how many conflicts they generate if assigned.
'''
# the value that generates less conflicts
def update_assignment(value):
new_assignment = deepcopy(assignment)
new_assignment[variable] = value
return new_assignment
values = sorted(domains[variable][:],
key=lambda v: _count_conflicts(problem, assignment,
variable, v))
return values
def _backtracking(problem, assignment, domains, variable_chooser, values_sorter, inference=True):
'''
Internal recursive backtracking algorithm.
'''
from simpleai.search.arc import arc_consistency_3
if len(assignment) == len(problem.variables):
return assignment
pending = [v for v in problem.variables
if v not in assignment]
variable = variable_chooser(problem, pending, domains)
values = values_sorter(problem, assignment, variable, domains)
for value in values:
new_assignment = deepcopy(assignment)
new_assignment[variable] = value
if not _count_conflicts(problem, new_assignment): # TODO on aima also checks if using fc
new_domains = deepcopy(domains)
new_domains[variable] = [value]
if not inference or arc_consistency_3(new_domains, problem.constraints):
result = _backtracking(problem,
new_assignment,
new_domains,
variable_chooser,
values_sorter,
inference=inference)
if result:
return result
return None
def _min_conflicts_value(problem, assignment, variable):
'''
Return the value generate the less number of conflicts.
In case of tie, a random value is selected among this values subset.
'''
return argmin(problem.domains[variable], lambda x: _count_conflicts(problem, assignment, variable, x))
def min_conflicts(problem, initial_assignment=None, iterations_limit=0):
"""
Min conflicts search.
initial_assignment the initial assignment, or None to generate a random
one.
If iterations_limit is specified, the algorithm will end after that
number of iterations. Else, it will continue until if finds an assignment
that doesn't generate conflicts (a solution).
"""
assignment = {}
if initial_assignment:
assignment.update(initial_assignment)
else:
for variable in problem.variables:
value = _min_conflicts_value(problem, assignment, variable)
assignment[variable] = value
iteration = 0
run = True
while run:
conflicts = _find_conflicts(problem, assignment)
conflict_variables = [v for v in problem.variables
if any(v in conflict[0] for conflict in conflicts)]
if conflict_variables:
variable = random.choice(conflict_variables)
value = _min_conflicts_value(problem, assignment, variable)
assignment[variable] = value
iteration += 1
if iterations_limit and iteration >= iterations_limit:
run = False
elif not _count_conflicts(problem, assignment):
run = False
return assignment
def convert_to_binary(variables, domains, constraints):
"""
Returns new constraint list, all binary, using hidden variables.
You can use it as previous step when creating a problem.
"""
def wdiff(vars_):
def diff(variables, values):
hidden, other = variables
if hidden.startswith('hidden'):
idx = vars_.index(other)
return values[1] == values[0][idx]
else:
idx = vars_.index(hidden)
return values[0] == values[1][idx]
diff.no_wrap = True # so it's not wrapped to swap values
return diff
new_constraints = []
new_domains = copy(domains)
new_variables = list(variables)
last = 0
for vars_, const in constraints:
if len(vars_) == 2:
new_constraints.append((vars_, const))
continue
hidden = 'hidden%d' % last
new_variables.append(hidden)
last += 1
new_domains[hidden] = [t for t in product(*map(domains.get, vars_)) if const(vars_, t)]
for var in vars_:
new_constraints.append(((hidden, var), wdiff(vars_)))
return new_variables, new_domains, new_constraints
| |
"""
Support for the Amazon Polly text to speech service.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/tts.amazon_polly/
"""
import logging
import voluptuous as vol
from homeassistant.components.tts import PLATFORM_SCHEMA, Provider
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['boto3==1.9.16']
_LOGGER = logging.getLogger(__name__)
CONF_REGION = 'region_name'
CONF_ACCESS_KEY_ID = 'aws_access_key_id'
CONF_SECRET_ACCESS_KEY = 'aws_secret_access_key'
CONF_PROFILE_NAME = 'profile_name'
ATTR_CREDENTIALS = 'credentials'
DEFAULT_REGION = 'us-east-1'
SUPPORTED_REGIONS = ['us-east-1', 'us-east-2', 'us-west-1', 'us-west-2',
'ca-central-1', 'eu-west-1', 'eu-central-1', 'eu-west-2',
'eu-west-3', 'ap-southeast-1', 'ap-southeast-2',
'ap-northeast-2', 'ap-northeast-1', 'ap-south-1',
'sa-east-1']
CONF_VOICE = 'voice'
CONF_OUTPUT_FORMAT = 'output_format'
CONF_SAMPLE_RATE = 'sample_rate'
CONF_TEXT_TYPE = 'text_type'
SUPPORTED_VOICES = [
'Zhiyu', # Chinese
'Mads', 'Naja', # Danish
'Ruben', 'Lotte', # Dutch
'Russell', 'Nicole', # English Austrailian
'Brian', 'Amy', 'Emma', # English
'Aditi', 'Raveena', # English, Indian
'Joey', 'Justin', 'Matthew', 'Ivy', 'Joanna', 'Kendra', 'Kimberly',
'Salli', # English
'Geraint', # English Welsh
'Mathieu', 'Celine', 'Lea', # French
'Chantal', # French Canadian
'Hans', 'Marlene', 'Vicki', # German
'Aditi', # Hindi
'Karl', 'Dora', # Icelandic
'Giorgio', 'Carla', 'Bianca', # Italian
'Takumi', 'Mizuki', # Japanese
'Seoyeon', # Korean
'Liv', # Norwegian
'Jacek', 'Jan', 'Ewa', 'Maja', # Polish
'Ricardo', 'Vitoria', # Portuguese, Brazilian
'Cristiano', 'Ines', # Portuguese, European
'Carmen', # Romanian
'Maxim', 'Tatyana', # Russian
'Enrique', 'Conchita', 'Lucia', # Spanish European
'Mia', # Spanish Mexican
'Miguel', 'Penelope', # Spanish US
'Astrid', # Swedish
'Filiz', # Turkish
'Gwyneth', # Welsh
]
SUPPORTED_OUTPUT_FORMATS = ['mp3', 'ogg_vorbis', 'pcm']
SUPPORTED_SAMPLE_RATES = ['8000', '16000', '22050']
SUPPORTED_SAMPLE_RATES_MAP = {
'mp3': ['8000', '16000', '22050'],
'ogg_vorbis': ['8000', '16000', '22050'],
'pcm': ['8000', '16000'],
}
SUPPORTED_TEXT_TYPES = ['text', 'ssml']
CONTENT_TYPE_EXTENSIONS = {
'audio/mpeg': 'mp3',
'audio/ogg': 'ogg',
'audio/pcm': 'pcm',
}
DEFAULT_VOICE = 'Joanna'
DEFAULT_OUTPUT_FORMAT = 'mp3'
DEFAULT_TEXT_TYPE = 'text'
DEFAULT_SAMPLE_RATES = {
'mp3': '22050',
'ogg_vorbis': '22050',
'pcm': '16000',
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_REGION, default=DEFAULT_REGION):
vol.In(SUPPORTED_REGIONS),
vol.Inclusive(CONF_ACCESS_KEY_ID, ATTR_CREDENTIALS): cv.string,
vol.Inclusive(CONF_SECRET_ACCESS_KEY, ATTR_CREDENTIALS): cv.string,
vol.Exclusive(CONF_PROFILE_NAME, ATTR_CREDENTIALS): cv.string,
vol.Optional(CONF_VOICE, default=DEFAULT_VOICE): vol.In(SUPPORTED_VOICES),
vol.Optional(CONF_OUTPUT_FORMAT, default=DEFAULT_OUTPUT_FORMAT):
vol.In(SUPPORTED_OUTPUT_FORMATS),
vol.Optional(CONF_SAMPLE_RATE):
vol.All(cv.string, vol.In(SUPPORTED_SAMPLE_RATES)),
vol.Optional(CONF_TEXT_TYPE, default=DEFAULT_TEXT_TYPE):
vol.In(SUPPORTED_TEXT_TYPES),
})
def get_engine(hass, config):
"""Set up Amazon Polly speech component."""
output_format = config.get(CONF_OUTPUT_FORMAT)
sample_rate = config.get(
CONF_SAMPLE_RATE, DEFAULT_SAMPLE_RATES[output_format])
if sample_rate not in SUPPORTED_SAMPLE_RATES_MAP.get(output_format):
_LOGGER.error("%s is not a valid sample rate for %s",
sample_rate, output_format)
return None
config[CONF_SAMPLE_RATE] = sample_rate
import boto3
profile = config.get(CONF_PROFILE_NAME)
if profile is not None:
boto3.setup_default_session(profile_name=profile)
aws_config = {
CONF_REGION: config.get(CONF_REGION),
CONF_ACCESS_KEY_ID: config.get(CONF_ACCESS_KEY_ID),
CONF_SECRET_ACCESS_KEY: config.get(CONF_SECRET_ACCESS_KEY),
}
del config[CONF_REGION]
del config[CONF_ACCESS_KEY_ID]
del config[CONF_SECRET_ACCESS_KEY]
polly_client = boto3.client('polly', **aws_config)
supported_languages = []
all_voices = {}
all_voices_req = polly_client.describe_voices()
for voice in all_voices_req.get('Voices'):
all_voices[voice.get('Id')] = voice
if voice.get('LanguageCode') not in supported_languages:
supported_languages.append(voice.get('LanguageCode'))
return AmazonPollyProvider(
polly_client, config, supported_languages, all_voices)
class AmazonPollyProvider(Provider):
"""Amazon Polly speech api provider."""
def __init__(self, polly_client, config, supported_languages,
all_voices):
"""Initialize Amazon Polly provider for TTS."""
self.client = polly_client
self.config = config
self.supported_langs = supported_languages
self.all_voices = all_voices
self.default_voice = self.config.get(CONF_VOICE)
self.name = 'Amazon Polly'
@property
def supported_languages(self):
"""Return a list of supported languages."""
return self.supported_langs
@property
def default_language(self):
"""Return the default language."""
return self.all_voices.get(self.default_voice).get('LanguageCode')
@property
def default_options(self):
"""Return dict include default options."""
return {CONF_VOICE: self.default_voice}
@property
def supported_options(self):
"""Return a list of supported options."""
return [CONF_VOICE]
def get_tts_audio(self, message, language=None, options=None):
"""Request TTS file from Polly."""
voice_id = options.get(CONF_VOICE, self.default_voice)
voice_in_dict = self.all_voices.get(voice_id)
if language != voice_in_dict.get('LanguageCode'):
_LOGGER.error("%s does not support the %s language",
voice_id, language)
return None, None
resp = self.client.synthesize_speech(
OutputFormat=self.config[CONF_OUTPUT_FORMAT],
SampleRate=self.config[CONF_SAMPLE_RATE],
Text=message,
TextType=self.config[CONF_TEXT_TYPE],
VoiceId=voice_id
)
return (CONTENT_TYPE_EXTENSIONS[resp.get('ContentType')],
resp.get('AudioStream').read())
| |
from rapidsms.tests.scripted import TestScript
from form.models import *
from reporters.models import *
import reporters.app as reporter_app
import bednets.app as nigeria_app
import form.app as form_app
from app import App
from models import *
from django.core.management.commands.dumpdata import Command
import random
import time
import os
from datetime import datetime, timedelta
class TestApp (TestScript):
apps = (reporter_app.App, App,form_app.App, nigeria_app.App )
# the test_backend script does the loading of the dummy backend that allows reporters
# to work properly in tests
fixtures = ['nigeria_llin', 'kano_locations', 'test_backend']
def setUp(self):
TestScript.setUp(self)
def testFixture(self):
""""This isn't actually a test. It just takes advantage
of the test harness to spam a bunch of messages to the
supply app and spit out the data in a format that can
be sucked into a fixture"""
# this is the number of transactions that will be generated
transaction_count = 0
# these are the locations that will be the origins, chosen randomly
# from this list
# the destinations will be chosen randomly from the origins' children
originating_locations = [20, 2001, 2002, 2003,2004]
stock_levels = dict([[loc, random.randint(1, 10000) * 10 + 50000] for loc in originating_locations])
# the sender will always be the same, for now
phone = "55555"
all_txns = []
# these are the percentages these items will match
waybill_match_percent = .9
amount_match_percent = .9
loc_match_percent = .95
num_locs = len(Location.objects.all())
# allow specifying the minimum and maximum dates for message generation
min_date = datetime(2009,4,1)
max_date = datetime(2009,4,30)
min_time = time.mktime(min_date.timetuple())
max_time = time.mktime(max_date.timetuple())
# generate the array of dates we're going to use at the start. This is so we can order
# our transactions
iss_dates = []
for i in range(transaction_count):
iss_dates.append(datetime.fromtimestamp(random.randint(min_time, max_time)))
iss_dates.sort()
rec_dates = []
for i in range(transaction_count):
# make the date from a min and max timestamp
rec_dates.append(datetime.fromtimestamp(
random.randint(
# the min is the shipment date
time.mktime(iss_dates[i].timetuple()),
#the max is the shipment date + 0 to 4 days
time.mktime((iss_dates[i] + timedelta(random.randint(0,4))).timetuple()))))
for i in range(transaction_count):
# get some random data based on the parameters we've set above
origin = Location.objects.get(code=random.choice(originating_locations ))
destination = random.choice(origin.children.all())
waybill = random.randint(10000,99999)
amount = random.randint(1, 500) * 10
diff = stock_levels[int(origin.code)] - amount
if diff > 0:
stock = diff
else:
stock = random.randint(1, 10000) * 10
stock_levels[int(origin.code)] = stock
issue_string = "%s@%s > llin issue from %s to %s %s %s %s" % (phone, iss_dates[i].strftime("%Y%m%d%H%M"), origin.code, destination.code, waybill, amount, stock)
all_txns.append(issue_string)
# create a waybill number based on the likelihood of match
if random.random() < waybill_match_percent:
ret_waybill = waybill
else:
ret_waybill = random.randint(10000,99999)
# create an amount based on the likelihood of match
if random.random() < amount_match_percent:
ret_amount = amount
else:
ret_amount = random.randint(1, 500) * 10
# create an origin and destination based on the likelihood of match
if random.random() < loc_match_percent:
ret_orig = origin
else:
ret_orig = Location.objects.get(pk=random.randint(1,num_locs))
if random.random() < loc_match_percent:
ret_dest = destination
else:
ret_dest = Location.objects.get(pk=random.randint(1, num_locs))
if stock_levels.has_key(int(ret_dest.code)):
ret_stock = stock_levels[int(ret_dest.code)] + amount
else:
# make sure the stock at the receiver is higher than the amount of the bill
ret_stock = random.randint(1, 2000) * 10 + ret_amount
stock_levels[int(ret_dest.code)] = ret_stock
# make sure the date received is after the date sent
receive_string = "%s@%s > llin receive from %s to %s %s %s %s" % (phone, rec_dates[i].strftime("%Y%m%d%H%M"), ret_orig.code, ret_dest.code, ret_waybill, ret_amount, ret_stock)
all_txns.append(receive_string)
script = "\n".join(all_txns)
self.runScript(script)
dumpdata = Command()
filename = os.path.abspath(os.path.join(os.path.dirname(__file__),"fixtures/test_transactions_stock.json"))
options = { "indent" : 2 }
datadump = dumpdata.handle("supply", **options)
# uncomment these lines to save the fixture
#file = open(filename, "w")
#file.write(datadump)
#file.write(datadump)
#file.close()
#print "=== Successfully wrote fixtures to %s ===" % filename
testIssueFormats = """
t_i_formats > llin register 20 sm mister sender
t_i_formats < Hello mister! You are now registered as Stock manager at KANO State.
# base case
t_i_formats > llin issue from 20 to 2027 11111 200 1800
t_i_formats < Thank you mister. Received report for LLIN ISSUE: origin=KANO, dest=KURA, waybill=11111, amount=200, stock=1800
# casing
t_i_formats > llin ISSUE from 20 to 2027 11111 200 1800
t_i_formats < Thank you mister. Received report for LLIN ISSUE: origin=KANO, dest=KURA, waybill=11111, amount=200, stock=1800
# other spellings
t_i_formats > llin issued from 20 to 2027 11111 200 1800
t_i_formats < Thank you mister. Received report for LLIN ISSUE: origin=KANO, dest=KURA, waybill=11111, amount=200, stock=1800
t_i_formats > llin ishew from 20 to 2027 11111 200 1800
t_i_formats < Thank you mister. Received report for LLIN ISSUE: origin=KANO, dest=KURA, waybill=11111, amount=200, stock=1800
t_i_formats > llin is from 20 to 2027 11111 200 1800
t_i_formats < Thank you mister. Received report for LLIN ISSUE: origin=KANO, dest=KURA, waybill=11111, amount=200, stock=1800
# spaces
t_i_formats > llin issue from 20 to 2027 11111 200 1800
t_i_formats < Thank you mister. Received report for LLIN ISSUE: origin=KANO, dest=KURA, waybill=11111, amount=200, stock=1800
# fail
t_i_formats > llin send from 20 to 2027 11111 200 1800
t_i_formats < Sorry we didn't understand that. Available forms are LLIN: REGISTER, NETCARDS, NETS, RECEIVE, ISSUE
"""
testReceiveFormats = """
t_r_formats > llin register 20 sm mister sender
t_r_formats < Hello mister! You are now registered as Stock manager at KANO State.
# base case
t_r_formats > llin receive from 20 to 2027 11111 200 1800
t_r_formats < Thank you mister. Received report for LLIN RECEIVE: origin=KANO, dest=KURA, waybill=11111, amount=200, stock=1800
# casing
t_r_formats > llin RECEIVE from 20 to 2027 11111 200 1800
t_r_formats < Thank you mister. Received report for LLIN RECEIVE: origin=KANO, dest=KURA, waybill=11111, amount=200, stock=1800
# other spellings
t_r_formats > llin receeved from 20 to 2027 11111 200 1800
t_r_formats < Thank you mister. Received report for LLIN RECEIVE: origin=KANO, dest=KURA, waybill=11111, amount=200, stock=1800
t_r_formats > llin recieve from 20 to 2027 11111 200 1800
t_r_formats < Thank you mister. Received report for LLIN RECEIVE: origin=KANO, dest=KURA, waybill=11111, amount=200, stock=1800
t_r_formats > llin recv from 20 to 2027 11111 200 1800
t_r_formats < Thank you mister. Received report for LLIN RECEIVE: origin=KANO, dest=KURA, waybill=11111, amount=200, stock=1800
t_r_formats > llin rec from 20 to 2027 11111 200 1800
t_r_formats < Thank you mister. Received report for LLIN RECEIVE: origin=KANO, dest=KURA, waybill=11111, amount=200, stock=1800
t_r_formats > llin receeev from 20 to 2027 11111 200 1800
t_r_formats < Thank you mister. Received report for LLIN RECEIVE: origin=KANO, dest=KURA, waybill=11111, amount=200, stock=1800
# spaces
t_r_formats > llin receive from 20 to 2027 11111 200 1800
t_r_formats < Thank you mister. Received report for LLIN RECEIVE: origin=KANO, dest=KURA, waybill=11111, amount=200, stock=1800
# fail
# should this one fail??
t_r_formats > llin rcv from 20 to 2027 11111 200 1800
t_r_formats < Sorry we didn't understand that. Available forms are LLIN: REGISTER, NETCARDS, NETS, RECEIVE, ISSUE
t_r_formats > llin get from 20 to 2027 11111 200 1800
t_r_formats < Sorry we didn't understand that. Available forms are LLIN: REGISTER, NETCARDS, NETS, RECEIVE, ISSUE
"""
def testScript(self):
mismatched_amounts = """
8005552222 > llin register 20 sm mister sender
8005552222 < Hello mister! You are now registered as Stock manager at KANO State.
8005551111 > llin register 2027 sm mister recipient
8005551111 < Hello mister! You are now registered as Stock manager at KURA LGA.
8005552222 > llin issue from 20 to 2027 11111 200 1800
8005552222 < Thank you mister. Received report for LLIN ISSUE: origin=KANO, dest=KURA, waybill=11111, amount=200, stock=1800
8005551111 > llin receive from 20 to 2027 11111 150 500
8005551111 < Thank you mister. Received report for LLIN RECEIVE: origin=KANO, dest=KURA, waybill=11111, amount=150, stock=500
"""
self.runScript(mismatched_amounts)
sender = Reporter.objects.get(alias="msender")
recipient = Reporter.objects.get(alias="mrecipient")
issue = PartialTransaction.objects.get(origin__name__iexact="KANO",\
destination__name__iexact="KURA", shipment_id="11111",\
domain__code__abbreviation__iexact="LLIN", type="I", reporter__pk=sender.pk)
receipt = PartialTransaction.objects.get(origin__name__iexact="KANO",\
destination__name__iexact="KURA", shipment_id="11111",\
domain__code__abbreviation__iexact="LLIN", type="R", reporter__pk=recipient.pk)
origin_stock = Stock.objects.get(location__name__iexact="KANO",\
domain__code__abbreviation__iexact="LLIN")
dest_stock = Stock.objects.get(location__name__iexact="KURA",\
domain__code__abbreviation__iexact="LLIN")
# everything in its right place
self.assertEqual(sender.location, issue.origin)
self.assertEqual(recipient.location, issue.destination)
self.assertEqual(sender.location, receipt.origin)
self.assertEqual(recipient.location, receipt.destination)
# stocks created with correct balance
self.assertEqual(issue.stock, origin_stock.balance)
self.assertEqual(receipt.stock, dest_stock.balance)
# issue and receipt have been matched into a transaction
self.assertEqual(issue.status, 'C')
self.assertEqual(issue.status, receipt.status)
first_transaction = Transaction.objects.get(domain__code__abbreviation__iexact="LLIN",\
amount_sent=issue.amount, amount_received=receipt.amount,\
issue=issue, receipt=receipt)
# mister recipient received 50 fewer nets than were sent by mister sender
self.assertNotEqual(issue.amount, receipt.amount)
self.assertNotEqual(first_transaction.amount_sent, first_transaction.amount_received)
self.assertEqual(first_transaction.flag, 'A')
# mister recipient realizes his error and resends with correct amount
amendment = """
8005551111 > llin receive from 20 to 2027 11111 200 500
8005551111 < Thank you mister. Received report for LLIN RECEIVE: origin=KANO, dest=KURA, waybill=11111, amount=200, stock=500
"""
self.runScript(amendment)
# pick fresh sprouts of these from the database
receipt = PartialTransaction.objects.get(pk=receipt.pk)
first_transaction = Transaction.objects.get(pk=first_transaction.pk)
# mister recipient's original receipt should now be amended and
# the first transaction should be flagged as incorrect
self.assertEqual(receipt.status, 'A')
self.assertEqual(first_transaction.flag, 'I')
# mister recipient's amendment
second_receipt = PartialTransaction.objects.get(origin__name__iexact="KANO",\
destination__name__iexact="KURA", shipment_id="11111",\
domain__code__abbreviation__iexact="LLIN", type="R", reporter=recipient, status="C")
# make sure this is a new one
self.assertNotEqual(second_receipt.pk, receipt.pk)
# make sure a new transaction was matched
second_transaction = Transaction.objects.get(domain__code__abbreviation__iexact="LLIN",\
amount_sent=issue.amount, amount_received=second_receipt.amount,\
issue=issue, receipt=second_receipt)
# make sure this is a new one
self.assertNotEqual(first_transaction.pk, second_transaction.pk)
# the new transaction should not be flagged with either of these
self.assertNotEqual(second_transaction.flag, 'I')
self.assertNotEqual(second_transaction.flag, 'A')
# new figures should add up
self.assertEqual(issue.amount, second_receipt.amount)
self.assertEqual(second_transaction.amount_sent, second_transaction.amount_received)
def testUnregisteredSubmissions(self):
# send a form from an unregistered user and assure it is accepted
unregistered_submission = """
supply_tus_1 > llin issue from 20 to 2027 11111 200 1800
supply_tus_1 < Received report for LLIN ISSUE: origin=KANO, dest=KURA, waybill=11111, amount=200, stock=1800
supply_tus_1 < Please register your phone.
"""
self.runScript(unregistered_submission)
# check that the connection object in the transaction is set properly
connection = PersistantConnection.objects.get(identity="supply_tus_1")
issue = PartialTransaction.objects.get(origin__name__iexact="KANO",\
destination__name__iexact="KURA", shipment_id="11111",\
domain__code__abbreviation__iexact="LLIN", type="I", connection=connection)
# check that the reporter is empty
self.assertFalse(issue.reporter)
| |
# Modified work:
# -----------------------------------------------------------------------------
# Copyright (c) 2018 Preferred Infrastructure, Inc.
# Copyright (c) 2018 Preferred Networks, Inc.
# -----------------------------------------------------------------------------
# Original work:
# -----------------------------------------------------------------------------
# Copyright (c) 2015 by Contributors
# \file roi_pooling.cu
# \brief roi pooling operator
# \author Ross Girshick, Kye-Hyeon Kim, Jian Guo
# \changed to roi_align by Elaine Bao
# \file roi_align.cu
# \roi align operator described in Mask RCNN
# -----------------------------------------------------------------------------
import numpy
import six
import chainer
from chainer.backends import cuda
from chainer import function
from chainer import utils
from chainer.utils import type_check
def _pair(x):
if isinstance(x, chainer.utils.collections_abc.Iterable):
return x
return x, x
def _get_bounds(p, limit):
if p < -1 or p > limit:
# out of range, so it is empty
return None, None, None
if p <= 0:
p = 0
low = int(numpy.floor(p))
if low >= limit - 1:
high = low = limit - 1
p = float(low)
else:
high = low + 1
return p, low, high
def _get_bilinear_interp_params(y, x, y_low, x_low, y_high, x_high):
ly = y - y_low
lx = x - x_low
hy = 1. - ly
hx = 1. - lx
w1 = hy * hx
w2 = hy * lx
w3 = ly * hx
w4 = ly * lx
return w1, w2, w3, w4
_GET_BILINEAR_INTERP_KERNEL = '''
__device__
bool get_bounds(
T &p, const int limit, int &low, int &high) {
if (p < -1. || p > limit) {
// empty
return false;
}
if (p <= 0) {
p = 0;
}
low = (int)p;
if (low >= limit - 1) {
high = low = limit - 1;
p = (T)low;
} else {
high = low + 1;
}
return true;
}
__device__
void get_bilinear_interp_params(
T y, T x, int y_low, int x_low, int y_high, int x_high,
T &w1, T &w2, T &w3, T &w4) {
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly;
T hx = 1. - lx;
w1 = hy * hx;
w2 = hy * lx;
w3 = ly * hx;
w4 = ly * lx;
}
'''
class ROIAverageAlign2D(function.Function):
"""ROI average align over a set of 2d planes."""
def __init__(self, outsize, spatial_scale, sampling_ratio=None):
outh, outw = _pair(outsize)
if not (isinstance(outh, int) and outh > 0):
raise TypeError(
'outsize[0] must be positive integer: {}, {}'
.format(type(outh), outh))
if not (isinstance(outw, int) and outw > 0):
raise TypeError(
'outsize[1] must be positive integer: {}, {}'
.format(type(outw), outw))
if isinstance(spatial_scale, int):
spatial_scale = float(spatial_scale)
if not (isinstance(spatial_scale, float) and spatial_scale > 0):
raise TypeError(
'spatial_scale must be a positive float number: {}, {}'
.format(type(spatial_scale), spatial_scale))
sampling_ratio = _pair(sampling_ratio)
if not all((isinstance(s, int) and s >= 1) or s is None
for s in sampling_ratio):
raise TypeError(
'sampling_ratio must be integer >= 1 or a pair of it: {}'
.format(sampling_ratio))
self.outh, self.outw = outh, outw
self.spatial_scale = spatial_scale
self.sampling_ratio = sampling_ratio
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 3)
x_type, roi_type, roi_index_type = in_types
type_check.expect(
x_type.dtype == numpy.float32,
x_type.ndim == 4,
roi_type.dtype == numpy.float32,
roi_type.ndim == 2,
roi_type.shape[1] == 4,
roi_index_type.dtype == numpy.int32,
roi_index_type.ndim == 1,
roi_type.shape[0] == roi_index_type.shape[0],
)
def forward_cpu(self, inputs):
self.retain_inputs((1, 2))
self._bottom_data_shape = inputs[0].shape
bottom_data, bottom_rois, bottom_roi_indices = inputs
channels, height, width = bottom_data.shape[1:]
n_rois = bottom_rois.shape[0]
top_data = numpy.empty((n_rois, channels, self.outh,
self.outw), dtype=bottom_data.dtype)
pooled_width, pooled_height = self.outw, self.outh
spatial_scale = self.spatial_scale
for i in six.moves.range(top_data.size):
pw = i % pooled_width
ph = int(i / pooled_width) % pooled_height
c = int(i / pooled_width / pooled_height) % channels
n = int(i / pooled_width / pooled_height / channels)
roi_batch_ind = int(bottom_roi_indices[n])
roi_start_h = bottom_rois[n, 0] * spatial_scale
roi_start_w = bottom_rois[n, 1] * spatial_scale
roi_end_h = bottom_rois[n, 2] * spatial_scale
roi_end_w = bottom_rois[n, 3] * spatial_scale
roi_height = max(roi_end_h - roi_start_h, 1.)
roi_width = max(roi_end_w - roi_start_w, 1.)
bin_size_h = roi_height / pooled_height
bin_size_w = roi_width / pooled_width
if self.sampling_ratio[0] is None:
roi_bin_grid_h = int(numpy.ceil(roi_height / pooled_height))
else:
roi_bin_grid_h = self.sampling_ratio[0]
if self.sampling_ratio[1] is None:
roi_bin_grid_w = int(numpy.ceil(roi_width / pooled_width))
else:
roi_bin_grid_w = self.sampling_ratio[1]
count = roi_bin_grid_h * roi_bin_grid_w
output_val = 0.
for iy in six.moves.range(roi_bin_grid_h):
y = roi_start_h + ph * bin_size_h + \
(iy + .5) * bin_size_h / roi_bin_grid_h
y, y_low, y_high = _get_bounds(y, height)
if y is None or y_low is None or y_high is None:
continue
for ix in six.moves.range(roi_bin_grid_w):
x = roi_start_w + pw * bin_size_w + \
(ix + .5) * bin_size_w / roi_bin_grid_w
x, x_low, x_high = _get_bounds(x, width)
if x is None or x_low is None or x_high is None:
continue
# bilinear interpolation {{
w1, w2, w3, w4 = _get_bilinear_interp_params(
y, x, y_low, x_low, y_high, x_high)
v1 = bottom_data[roi_batch_ind, c, y_low, x_low]
v2 = bottom_data[roi_batch_ind, c, y_low, x_high]
v3 = bottom_data[roi_batch_ind, c, y_high, x_low]
v4 = bottom_data[roi_batch_ind, c, y_high, x_high]
output_val += w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4
# }}
output_val /= count
top_data[n, c, ph, pw] = output_val
return top_data,
def forward_gpu(self, inputs):
self.retain_inputs((1, 2))
self._bottom_data_shape = inputs[0].shape
bottom_data, bottom_rois, bottom_roi_indices = inputs
channels, height, width = bottom_data.shape[1:]
n_rois = bottom_rois.shape[0]
top_data = cuda.cupy.empty((n_rois, channels, self.outh,
self.outw), dtype=bottom_data.dtype)
if self.sampling_ratio[0] is None:
sampling_ratio_h = 0
else:
sampling_ratio_h = self.sampling_ratio[0]
if self.sampling_ratio[1] is None:
sampling_ratio_w = 0
else:
sampling_ratio_w = self.sampling_ratio[1]
cuda.elementwise(
'''
raw T bottom_data, T spatial_scale, int32 channels,
int32 height, int32 width, int32 pooled_height, int32 pooled_width,
int32 sampling_ratio_h, int32 sampling_ratio_w,
raw T bottom_rois, raw int32 bottom_roi_indices
''',
'T top_data',
'''
int pw = i % pooled_width;
int ph = (i / pooled_width) % pooled_height;
int c = (i / pooled_width / pooled_height) % channels;
int n = i / pooled_width / pooled_height / channels;
int roi_batch_ind = bottom_roi_indices[n];
T roi_start_h = bottom_rois[n * 4 + 0] * spatial_scale;
T roi_start_w = bottom_rois[n * 4 + 1] * spatial_scale;
T roi_end_h = bottom_rois[n * 4 + 2] * spatial_scale;
T roi_end_w = bottom_rois[n * 4 + 3] * spatial_scale;
// Force malformed ROIs to be 1x1
T roi_height = max(roi_end_h - roi_start_h, (T)1.);
T roi_width = max(roi_end_w - roi_start_w, (T)1.);
T bin_size_h = static_cast<T>(roi_height)
/ static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width)
/ static_cast<T>(pooled_width);
int bottom_data_offset =
(roi_batch_ind * channels + c) * height * width;
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio_h > 0)
? sampling_ratio_h
: ceil(roi_height / pooled_height); // e.g. = 2
int roi_bin_grid_w = (sampling_ratio_w > 0)
? sampling_ratio_w
: ceil(roi_width / pooled_width);
// We do average (integral) pooling inside a bin
T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
T output_val = 0.;
for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g. iy = 0, 1
{
T y = roi_start_h + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h); // e.g. 0.5, 1.5
int y_low, y_high;
bool y_ret = get_bounds(y, height, y_low, y_high);
if (!y_ret) continue;
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
T x = roi_start_w + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
int x_low, x_high;
bool x_ret = get_bounds(x, width, x_low, x_high);
if (!x_ret) continue;
// bilinear_interpolation_gradient {{
T w1, w2, w3, w4;
get_bilinear_interp_params(
y, x, y_low, x_low, y_high, x_high, w1, w2, w3, w4);
T v1 = bottom_data[bottom_data_offset +
y_low * width + x_low];
T v2 = bottom_data[bottom_data_offset +
y_low * width + x_high];
T v3 = bottom_data[bottom_data_offset +
y_high * width + x_low];
T v4 = bottom_data[bottom_data_offset +
y_high * width + x_high];
output_val += (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
// }}
}
}
output_val /= count;
top_data = output_val;
''',
'roi_average_align_2d_fwd',
preamble=_GET_BILINEAR_INTERP_KERNEL,
)(bottom_data, self.spatial_scale, channels, height, width,
self.outh, self.outw, sampling_ratio_h, sampling_ratio_w,
bottom_rois, bottom_roi_indices, top_data)
return top_data,
def backward_cpu(self, inputs, gy):
bottom_rois, bottom_roi_indices = inputs[1:]
channels, height, width = self._bottom_data_shape[1:]
bottom_diff = numpy.zeros(self._bottom_data_shape, gy[0].dtype)
spatial_scale = self.spatial_scale
pooled_height = self.outh
pooled_width = self.outw
top_diff = gy[0]
for i in six.moves.range(top_diff.size):
pw = i % pooled_width
ph = int(i / pooled_width) % pooled_height
c = int(i / pooled_width / pooled_height) % channels
n = int(i / pooled_width / pooled_height / channels)
roi_batch_ind = int(bottom_roi_indices[n])
roi_start_h = bottom_rois[n, 0] * spatial_scale
roi_start_w = bottom_rois[n, 1] * spatial_scale
roi_end_h = bottom_rois[n, 2] * spatial_scale
roi_end_w = bottom_rois[n, 3] * spatial_scale
roi_height = max(roi_end_h - roi_start_h, 1.)
roi_width = max(roi_end_w - roi_start_w, 1.)
bin_size_h = roi_height / pooled_height
bin_size_w = roi_width / pooled_width
top_diff_this_bin = top_diff[n, c, ph, pw]
if self.sampling_ratio[0] is None:
roi_bin_grid_h = int(numpy.ceil(roi_height / pooled_height))
else:
roi_bin_grid_h = self.sampling_ratio[0]
if self.sampling_ratio[1] is None:
roi_bin_grid_w = int(numpy.ceil(roi_width / pooled_width))
else:
roi_bin_grid_w = self.sampling_ratio[1]
count = roi_bin_grid_h * roi_bin_grid_w
for iy in six.moves.range(roi_bin_grid_h):
y = roi_start_h + ph * bin_size_h + \
(iy + .5) * bin_size_h / roi_bin_grid_h
y, y_low, y_high = _get_bounds(y, height)
if y is None or y_low is None or y_high is None:
continue
for ix in six.moves.range(roi_bin_grid_w):
x = roi_start_w + pw * bin_size_w + \
(ix + .5) * bin_size_w / roi_bin_grid_w
x, x_low, x_high = _get_bounds(x, width)
if x is None or x_low is None or x_high is None:
continue
# bilinear_interpolation_gradient {{
w1, w2, w3, w4 = _get_bilinear_interp_params(
y, x, y_low, x_low, y_high, x_high)
g1 = top_diff_this_bin * w1 / count
g2 = top_diff_this_bin * w2 / count
g3 = top_diff_this_bin * w3 / count
g4 = top_diff_this_bin * w4 / count
if (x_low >= 0 and x_high >= 0 and
y_low >= 0 and y_high >= 0):
bottom_diff[roi_batch_ind, c, y_low, x_low] += g1
bottom_diff[roi_batch_ind, c, y_low, x_high] += g2
bottom_diff[roi_batch_ind, c, y_high, x_low] += g3
bottom_diff[roi_batch_ind, c, y_high, x_high] += g4
# }}
return bottom_diff, None, None
def backward_gpu(self, inputs, gy):
utils.nondeterministic('atomicAdd')
bottom_rois, bottom_roi_indices = inputs[1:]
channels, height, width = self._bottom_data_shape[1:]
bottom_diff = cuda.cupy.zeros(self._bottom_data_shape, gy[0].dtype)
if self.sampling_ratio[0] is None:
sampling_ratio_h = 0
else:
sampling_ratio_h = self.sampling_ratio[0]
if self.sampling_ratio[1] is None:
sampling_ratio_w = 0
else:
sampling_ratio_w = self.sampling_ratio[1]
cuda.elementwise(
'''
raw T top_diff,
int32 num_rois, T spatial_scale,
int32 channels, int32 height, int32 width,
int32 pooled_height, int32 pooled_width,
int32 sampling_ratio_h, int32 sampling_ratio_w,
raw T bottom_rois, raw int32 bottom_roi_indices
''',
'raw T bottom_diff',
'''
// (n, c, h, w) coords in bottom data
int pw = i % pooled_width;
int ph = (i / pooled_width) % pooled_height;
int c = (i / pooled_width / pooled_height) % channels;
int n = i / pooled_width / pooled_height / channels;
// Do not using rounding; this implementation detail is critical
int roi_batch_ind = bottom_roi_indices[n];
T roi_start_h = bottom_rois[n * 4 + 0] * spatial_scale;
T roi_start_w = bottom_rois[n * 4 + 1] * spatial_scale;
T roi_end_h = bottom_rois[n * 4 + 2] * spatial_scale;
T roi_end_w = bottom_rois[n * 4 + 3] * spatial_scale;
// Force malformed ROIs to be 1x1
T roi_width = max(roi_end_w - roi_start_w, (T)1.);
T roi_height = max(roi_end_h - roi_start_h, (T)1.);
T bin_size_h = static_cast<T>(roi_height) /
static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) /
static_cast<T>(pooled_width);
int bottom_diff_offset =
(roi_batch_ind * channels + c) * height * width;
int top_offset = (n * channels + c) * pooled_height * pooled_width;
T top_diff_this_bin =
top_diff[top_offset + ph * pooled_width + pw];
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio_h > 0)
? sampling_ratio_h
: ceil(roi_height / pooled_height); // e.g. = 2
int roi_bin_grid_w = (sampling_ratio_w > 0)
? sampling_ratio_w
: ceil(roi_width / pooled_width);
// We do average (integral) pooling inside a bin
T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
for (int iy = 0; iy < roi_bin_grid_h; iy++) {
T y = roi_start_h + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h); // e.g. 0.5, 1.5
int y_low, y_high;
bool y_ret = get_bounds(y, height, y_low, y_high);
if (!y_ret) continue;
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
T x = roi_start_w + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
int x_low, x_high;
bool x_ret = get_bounds(x, width, x_low, x_high);
if (!x_ret) continue;
// bilinear_interpolation_gradient {{
T w1, w2, w3, w4;
get_bilinear_interp_params(
y, x, y_low, x_low, y_high, x_high, w1, w2, w3, w4);
T g1 = top_diff_this_bin * w1 / count;
T g2 = top_diff_this_bin * w2 / count;
T g3 = top_diff_this_bin * w3 / count;
T g4 = top_diff_this_bin * w4 / count;
if (x_low >= 0 && x_high >= 0 &&
y_low >= 0 && y_high >= 0) {
atomicAdd(&bottom_diff[bottom_diff_offset +
y_low * width + x_low], g1);
atomicAdd(&bottom_diff[bottom_diff_offset +
y_low * width + x_high], g2);
atomicAdd(&bottom_diff[bottom_diff_offset +
y_high * width + x_low], g3);
atomicAdd(&bottom_diff[bottom_diff_offset +
y_high * width + x_high], g4);
}
// }}
}
}
''',
'roi_average_align_2d_bwd',
preamble=_GET_BILINEAR_INTERP_KERNEL,
)(gy[0], bottom_rois.shape[0],
self.spatial_scale, channels, height, width, self.outh, self.outw,
sampling_ratio_h, sampling_ratio_w, bottom_rois, bottom_roi_indices,
bottom_diff, size=gy[0].size)
return bottom_diff, None, None
def roi_average_align_2d(
x, rois, roi_indices, outsize, spatial_scale, sampling_ratio=None
):
"""Spatial Region of Interest (ROI) average align function.
This function acts similarly to
:func:`~chainer.functions.roi_average_pooling_2d`, but it computes average
of input spatial patch with bilinear interpolation for each channel with
the region of interest.
Args:
x (~chainer.Variable): Input variable. The shape is expected to be
4 dimentional: ``(n: batch, c: channel, h, height, w: width)``.
rois (~chainer.Variable): Input roi variable. The shape is expected to
be ``(n: data size, 4)``, and each datum is set as below:
``(y_min, x_min, y_max, x_max)``.
roi_indices (~chainer.Variable): Input roi variable. The shape is
expected to be ``(n: data size, )``.
outsize ((int, int) or int): Expected output size after pooled
(height, width). ``outsize=o`` and ``outsize=(o, o)``
are equivalent.
spatial_scale (float): Scale of the roi is resized.
sampling_ratio ((int, int) or int): Sampling step for the alignment.
It must be an integer over :math:`1` or :obj:`None`, and the value
is automatically decided when :obj:`None` is passed. Use of
different ratio in height and width axis is also supported by
passing tuple of int as ``(sampling_ratio_h, sampling_ratio_w)``.
``sampling_ratio=s`` and ``sampling_ratio=(s, s)`` are equivalent.
Returns:
~chainer.Variable: Output variable.
See the original paper proposing ROIAlign:
`Mask R-CNN <https://arxiv.org/abs/1703.06870>`_.
"""
return ROIAverageAlign2D(outsize, spatial_scale, sampling_ratio)(
x, rois, roi_indices)
| |
import itertools
import numpy as np
import pandas as pd
from graphysio.utils import truncatevecs
def findPressureFeet(curve):
series = curve.series
samplerate = curve.samplerate
fstderiv = series.diff().shift(-1)
sndderiv = fstderiv.diff().shift(-1)
# Remove deceleration peaks
sndderiv = sndderiv * (fstderiv > 0)
def performWindowing(sumcoef=4, quantcoef=3):
# Find pulse rising edge
winsum = int(samplerate / sumcoef)
winquant = int(samplerate * quantcoef)
sndderivsq = sndderiv ** 2
integral = sndderivsq.rolling(window=winsum, center=True).sum()
thres = integral.rolling(window=winquant).quantile(0.7)
thres = thres.fillna(method='backfill')
risings = (integral > thres).astype(int)
risingvar = risings.diff()
(risingStarts,) = (risingvar > 0).to_numpy().nonzero()
(risingStops,) = (risingvar < 0).to_numpy().nonzero()
return (risingStarts, risingStops)
found = False
for quantcoef in [3, 2, 1]:
# Try again with smaller window if we find nothing
risingStarts, risingStops = performWindowing(quantcoef=quantcoef)
try:
risingStops = risingStops[risingStops > risingStarts[0]]
found = True
break
except IndexError:
continue
# Last resort: find one foot on the whole series
if not found:
risingStarts = [0]
risingStops = [len(sndderiv) - 1]
def locateMaxima():
for start, stop in zip(risingStarts, risingStops):
idxstart = sndderiv.index[start]
idxstop = sndderiv.index[stop]
try:
maximum = sndderiv.loc[idxstart:idxstop].idxmax()
except ValueError:
continue
else:
yield maximum
cycleStarts = pd.Index(list(locateMaxima()))
return cycleStarts
def findFlowCycles(curve):
series = curve.series
bincycles = (series > series.min()).astype(int)
(idxstarts,) = (bincycles.diff().shift(-1) > 0).to_numpy().nonzero()
(idxstops,) = (bincycles.diff() < 0).to_numpy().nonzero()
cycleStarts = series.iloc[idxstarts]
cycleStops = series.iloc[idxstops]
# Handle the case where we start within a cycle
try:
cycleStops = cycleStops[cycleStops.index > cycleStarts.index[0]]
except IndexError as e:
raise TypeError(f'No cycle detected: {e}')
return (cycleStarts.index, cycleStops.index)
def findPressureCycles(curve):
series = curve.series
cycles = []
starts, durations = curve.getCycleIndices()
for start, duration in zip(starts, durations):
stop = start + duration
diastop = start - duration
dia = findPOI(series, [start, diastop], 'min', windowsize=0.05, forcesign=False)
sbp = findPOI(series, [start, stop], 'max', windowsize=0.05)
cycle = (dia, sbp)
cycles.append(cycle)
return [pd.Index(idx, dtype=np.int64) for idx in zip(*cycles)]
def findPressureFull(curve):
dia, sbp = findPressureCycles(curve)
upstroke_duration = np.abs(sbp - dia)
dia1, sbp1 = truncatevecs([dia[1:], sbp])
dic = findDicProj(curve.series, dia1, sbp1, upstroke_duration)
return [dia, sbp, dic]
# Utility function for point placing
def isbetter(new, ref, kind, forcesign):
if kind == 'max':
condition = new > ref
if forcesign:
condition = condition or (new < 0)
elif kind == 'min':
condition = new < ref
if forcesign:
condition = condition or (new > 0)
else:
raise ValueError(kind)
return condition
def genWindows(soi, interval, windowspan):
begin, end = interval
ltr = end > begin
windowspan *= 1e9 # s to ns
if begin is None or end is None:
return
direction = 1 if ltr else -1
for n in itertools.count():
start = begin + direction * n * windowspan
stop = start + direction * windowspan
# Stop condition if we exceed end
if ltr:
if stop >= end:
stop = end
else:
if stop <= end:
stop = end
start, stop = (stop, start)
window = soi.loc[start:stop]
if len(window) < 1:
return
yield window.index.values
def findPOI(soi, interval, kind, windowsize, forcesign=True):
if kind not in ['min', 'max']:
raise ValueError(kind)
argkind = 'idx' + kind
goodwindow = []
previous = -np.inf if kind == 'max' else np.inf
for window in genWindows(soi, interval, windowsize):
zoi = soi.loc[window]
new = getattr(zoi, kind)()
if isbetter(new, previous, kind, forcesign):
goodwindow = window
else:
break
previous = new
finalzoi = soi.loc[goodwindow]
try:
retidx = getattr(finalzoi, argkind)()
except ValueError:
# No POI found
retidx = None
return retidx
def findPOIGreedy(soi, start, kind):
if kind not in ['min', 'max']:
raise ValueError(kind)
loc = soi.index.get_loc(start, method='nearest')
# Find direction
try:
finddir = soi.iloc[[loc - 1, loc, loc + 1]]
except IndexError:
# We're at the edge of the curve
return start
npminmax = np.argmin if kind == 'min' else np.argmax
direction = npminmax(finddir.values) - 1
if direction == 0:
# We're already at the local minimum
return start
curloc = loc
while True:
nextloc = curloc + direction
try:
samplesoi = soi.iloc[[curloc, nextloc]]
except IndexError:
# We're at the edge of the curve
break
nextisbetter = npminmax(samplesoi.values)
if not nextisbetter:
# Minimum found
break
curloc = nextloc
return soi.index[curloc]
def distance(l1, l2, p):
return np.cross(l2 - l1, p - l1) / np.linalg.norm(l2 - l1)
def findDicProj(series, dia, sbp, upstroke_duration):
dics = []
for si, di, up in zip(sbp, dia, upstroke_duration):
zoi = series.loc[si:di]
if len(zoi) < 1:
continue
p1 = np.array([si, zoi.iloc[0]])
p2 = np.array([di, zoi.iloc[-1]])
p3 = np.vstack([zoi.index, zoi.values]).transpose()
d = distance(p1, p2, p3)
# Limit search zone to the beginning of the segment
search_len = len(series.loc[si : si + 2 * up])
search_zone = d[0:search_len]
argmin = np.argmin(search_zone)
dics.append(zoi.index[argmin])
return pd.Index(dics)
| |
# -*- coding: UTF-8 -*-
import sys
from vulkan import *
from PySide2 import (QtGui, QtCore)
validationLayers = [
'VK_LAYER_LUNARG_standard_validation'
]
enableValidationLayers = True
class InstanceProcAddr(object):
T = None
def __init__(self, func):
self.__func = func
def __call__(self, *args, **kwargs):
funcName = self.__func.__name__
func = InstanceProcAddr.procfunc(funcName)
if func:
return func(*args, **kwargs)
else:
return VK_ERROR_EXTENSION_NOT_PRESENT
@staticmethod
def procfunc(funcName):
return vkGetInstanceProcAddr(InstanceProcAddr.T, funcName)
@InstanceProcAddr
def vkCreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator):
pass
@InstanceProcAddr
def vkDestroyDebugReportCallbackEXT(instance, pCreateInfo, pAllocator):
pass
@InstanceProcAddr
def vkCreateWin32SurfaceKHR(instance, pCreateInfo, pAllocator):
pass
@InstanceProcAddr
def vkDestroySurfaceKHR(instance, surface, pAllocator):
pass
@InstanceProcAddr
def vkGetPhysicalDeviceSurfaceSupportKHR(physicalDevice, queueFamilyIndex, surface):
pass
# device ext functions
def debugCallback(*args):
print('DEBUG: {} {}'.format(args[5], args[6]))
return 0
class Win32misc(object):
@staticmethod
def getInstance(hWnd):
from cffi import FFI as _FFI
_ffi = _FFI()
_ffi.cdef('long __stdcall GetWindowLongA(void* hWnd, int nIndex);')
_lib = _ffi.dlopen('User32.dll')
return _lib.GetWindowLongA(_ffi.cast('void*', hWnd), -6) # GWL_HINSTANCE
class QueueFamilyIndices(object):
def __init__(self):
self.graphicsFamily = -1
self.presentFamily = -1
@property
def isComplete(self):
return self.graphicsFamily >= 0 and self.presentFamily >= 0
class HelloTriangleApplication(QtGui.QWindow):
def __init__(self):
super(HelloTriangleApplication, self).__init__()
self.setWidth(1280)
self.setHeight(720)
self.setTitle("Vulkan Python - PySide2")
#self.setSurfaceType(self.OpenGLSurface)
self.__instance = None
self.__callbcak = None
self.__surface = None
self.__physicalDevice = None
self.__device = None
self.__graphicQueue = None
self.__presentQueue = None
self.__indices = QueueFamilyIndices()
self.initVulkan()
def __del__(self):
if self.__surface:
vkDestroySurfaceKHR(self.__instance, self.__surface, None)
if self.__device:
vkDestroyDevice(self.__device, None)
if self.__callbcak:
vkDestroyDebugReportCallbackEXT(self.__instance, self.__callbcak, None)
if self.__instance:
vkDestroyInstance(self.__instance, None)
print('instance destroyed')
def initVulkan(self):
self.__cretaeInstance()
self.__setupDebugCallback()
self.__createSurface()
self.__pickPhysicalDevice()
self.__createLogicalDevice()
def __cretaeInstance(self):
if enableValidationLayers and not self.__checkValidationLayerSupport():
raise Exception("validation layers requested, but not available!")
appInfo = VkApplicationInfo(
# sType=VK_STRUCTURE_TYPE_APPLICATION_INFO,
pApplicationName='Python VK',
applicationVersion=VK_MAKE_VERSION(1, 0, 0),
pEngineName='pyvulkan',
engineVersion=VK_MAKE_VERSION(1, 0, 0),
apiVersion=VK_API_VERSION
)
extenstions = self.__getRequiredExtensions()
if enableValidationLayers:
instanceInfo = VkInstanceCreateInfo(
pApplicationInfo=appInfo,
enabledLayerCount=len(validationLayers),
ppEnabledLayerNames=validationLayers,
enabledExtensionCount=len(extenstions),
ppEnabledExtensionNames=extenstions
)
else:
instanceInfo = VkInstanceCreateInfo(
pApplicationInfo=appInfo,
enabledLayerCount=0,
enabledExtensionCount=len(extenstions),
ppEnabledExtensionNames=extenstions
)
self.__instance = vkCreateInstance(instanceInfo, None)
InstanceProcAddr.T = self.__instance
def __setupDebugCallback(self):
if not enableValidationLayers:
return
createInfo = VkDebugReportCallbackCreateInfoEXT(
flags=VK_DEBUG_REPORT_WARNING_BIT_EXT | VK_DEBUG_REPORT_ERROR_BIT_EXT,
pfnCallback=debugCallback
)
self.__callbcak = vkCreateDebugReportCallbackEXT(self.__instance, createInfo, None)
def __createSurface(self):
if sys.platform == 'win32':
hwnd = self.winId()
hinstance = Win32misc.getInstance(hwnd)
createInfo = VkWin32SurfaceCreateInfoKHR(
hinstance=hinstance,
hwnd=hwnd
)
self.__surface = vkCreateWin32SurfaceKHR(self.__instance, createInfo, None)
# elif sys.platform == 'linux':
# pass
def __pickPhysicalDevice(self):
physicalDevices = vkEnumeratePhysicalDevices(self.__instance)
for device in physicalDevices:
if self.__isDeviceSuitable(device):
self.__physicalDevice = device
break
assert self.__physicalDevice != None
def __createLogicalDevice(self):
self.__indices = self.__findQueueFamilies(self.__physicalDevice)
uniqueQueueFamilies = {}.fromkeys([self.__indices.graphicsFamily, self.__indices.presentFamily])
queueCreateInfos = []
for i in uniqueQueueFamilies:
queueCreateInfo = VkDeviceQueueCreateInfo(
queueFamilyIndex=i,
queueCount=1,
pQueuePriorities=[1.0]
)
queueCreateInfos.append(queueCreateInfo)
deviceFeatures = VkPhysicalDeviceFeatures()
if enableValidationLayers:
createInfo = VkDeviceCreateInfo(
queueCreateInfoCount=len(queueCreateInfos),
pQueueCreateInfos=queueCreateInfos,
enabledExtensionCount=0,
enabledLayerCount=len(validationLayers),
ppEnabledLayerNames=validationLayers,
pEnabledFeatures=deviceFeatures
)
else:
createInfo = VkDeviceCreateInfo(
queueCreateInfoCount=1,
pQueueCreateInfos=queueCreateInfo,
enabledExtensionCount=0,
enabledLayerCount=0,
pEnabledFeatures=deviceFeatures
)
self.__device = vkCreateDevice(self.__physicalDevice, createInfo, None)
self.__graphicQueue = vkGetDeviceQueue(self.__device, self.__indices.graphicsFamily, 0)
self.__presentQueue = vkGetDeviceQueue(self.__device, self.__indices.presentFamily, 0)
def __isDeviceSuitable(self, device):
indices = self.__findQueueFamilies(device)
return indices.isComplete
def __findQueueFamilies(self, device):
indices = QueueFamilyIndices()
familyProperties = vkGetPhysicalDeviceQueueFamilyProperties(device)
for i, prop in enumerate(familyProperties):
if prop.queueCount > 0 and prop.queueFlags & VK_QUEUE_GRAPHICS_BIT:
indices.graphicsFamily = i
presentSupport = vkGetPhysicalDeviceSurfaceSupportKHR(device, i, self.__surface)
if prop.queueCount > 0 and presentSupport:
indices.presentFamily = i
if indices.isComplete:
break
return indices
def __getRequiredExtensions(self):
extenstions = [e.extensionName for e in vkEnumerateInstanceExtensionProperties(None)]
if enableValidationLayers:
extenstions.append(VK_EXT_DEBUG_REPORT_EXTENSION_NAME)
return extenstions
def __checkValidationLayerSupport(self):
availableLayers = vkEnumerateInstanceLayerProperties()
for layer in validationLayers:
layerfound = False
for layerProp in availableLayers:
if layer == layerProp.layerName:
layerfound = True
break
return layerfound
return False
if __name__ == '__main__':
import sys
app = QtGui.QGuiApplication(sys.argv)
win = HelloTriangleApplication()
win.show()
def clenaup():
global win
del win
app.aboutToQuit.connect(clenaup)
sys.exit(app.exec_())
| |
#!/usr/bin/env python
# Copyright 2012 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Impala's shell
import cmd
import errno
import getpass
import os
import prettytable
import re
import shlex
import signal
import socket
import sqlparse
import sys
import time
from impala_client import (ImpalaClient, DisconnectedException, QueryStateException,
RPCException, TApplicationException)
from impala_shell_config_defaults import impala_shell_defaults
from option_parser import get_option_parser, get_config_from_file
from shell_output import DelimitedOutputFormatter, OutputStream, PrettyOutputFormatter
from subprocess import call
VERSION_FORMAT = "Impala Shell v%(version)s (%(git_hash)s) built on %(build_date)s"
VERSION_STRING = "build version not available"
HISTORY_LENGTH = 100
# Tarball / packaging build makes impala_build_version available
try:
from impala_build_version import get_git_hash, get_build_date, get_version
VERSION_STRING = VERSION_FORMAT % {'version': get_version(),
'git_hash': get_git_hash()[:7],
'build_date': get_build_date()}
except Exception:
pass
class CmdStatus:
"""Values indicate the execution status of a command to the cmd shell driver module
SUCCESS and ERROR continue running the shell and ABORT exits the shell
Since SUCCESS == None, successful commands do not need to explicitly return
anything on completion
"""
SUCCESS = None
ABORT = True
ERROR = False
class ImpalaPrettyTable(prettytable.PrettyTable):
"""Patched version of PrettyTable that handles utf-8 characters by replacing them with a
placeholder, rather than ignoring them entirely"""
def _unicode(self, value):
if not isinstance(value, basestring):
value = str(value)
if not isinstance(value, unicode):
# If a value cannot be encoded, replace it with a placeholder.
value = unicode(value, self.encoding, "replace")
return value
class ImpalaShell(cmd.Cmd):
""" Simple Impala Shell.
Basic usage: type connect <host:port> to connect to an impalad
Then issue queries or other commands. Tab-completion should show the set of
available commands.
Methods that implement shell commands return a boolean tuple (stop, status)
stop is a flag the command loop uses to continue/discontinue the prompt.
Status tells the caller that the command completed successfully.
"""
# If not connected to an impalad, the server version is unknown.
UNKNOWN_SERVER_VERSION = "Not Connected"
DISCONNECTED_PROMPT = "[Not connected] > "
# Error and warning that is printed by cancel_query
CANCELLATION_ERROR = 'Cancelled'
# Message to display in shell when cancelling a query
CANCELLATION_MESSAGE = ' Cancelling Query'
# Commands are terminated with the following delimiter.
CMD_DELIM = ';'
DEFAULT_DB = 'default'
# Regex applied to all tokens of a query to detect the query type.
INSERT_REGEX = re.compile("^insert$", re.I)
# Seperator for queries in the history file.
HISTORY_FILE_QUERY_DELIM = '_IMP_DELIM_'
def __init__(self, options):
cmd.Cmd.__init__(self)
self.is_alive = True
self.impalad = None
self.use_kerberos = options.use_kerberos
self.kerberos_service_name = options.kerberos_service_name
self.use_ssl = options.ssl
self.ca_cert = options.ca_cert
self.user = options.user
self.ldap_password = None;
self.use_ldap = options.use_ldap
self.verbose = options.verbose
self.prompt = ImpalaShell.DISCONNECTED_PROMPT
self.server_version = ImpalaShell.UNKNOWN_SERVER_VERSION
self.refresh_after_connect = options.refresh_after_connect
self.current_db = options.default_db
self.history_file = os.path.expanduser("~/.impalahistory")
# Stores the state of user input until a delimiter is seen.
self.partial_cmd = str()
# Stores the old prompt while the user input is incomplete.
self.cached_prompt = str()
self.show_profiles = options.show_profiles
# Output formatting flags/options
self.output_file = options.output_file
self.output_delimiter = options.output_delimiter
self.write_delimited = options.write_delimited
self.print_header = options.print_header
self.set_query_options = {}
self._populate_command_list()
self.imp_client = None;
# Tracks query handle of the last query executed. Used by the 'profile' command.
self.last_query_handle = None;
self.query_handle_closed = None
try:
self.readline = __import__('readline')
self.readline.set_history_length(HISTORY_LENGTH)
except ImportError:
self._disable_readline()
if options.use_ldap:
self.ldap_password = getpass.getpass("LDAP password for %s:" % self.user)
if options.impalad != None:
self.do_connect(options.impalad)
# We handle Ctrl-C ourselves, using an Event object to signal cancellation
# requests between the handler and the main shell thread.
signal.signal(signal.SIGINT, self._signal_handler)
def _populate_command_list(self):
"""Populate a list of commands in the shell.
Each command has its own method of the form do_<command>, and can be extracted by
introspecting the class directory.
"""
# Slice the command method name to get the name of the command.
self.commands = [cmd[3:] for cmd in dir(self.__class__) if cmd.startswith('do_')]
def _disable_readline(self):
"""Disables the readline module.
The readline module is responsible for keeping track of command history.
"""
self.readline = None
def _print_options(self, default_options, set_options):
# Prints the current query options
# with default values distinguished from set values by brackets []
if not default_options and not set_options:
print '\tNo options available.'
else:
for k in sorted(default_options.keys()):
if k in set_options.keys() and set_options[k] != default_options[k]:
print '\n'.join(["\t%s: %s" % (k, set_options[k])])
else:
print '\n'.join(["\t%s: [%s]" % (k, default_options[k])])
def do_shell(self, args):
"""Run a command on the shell
Usage: shell <cmd>
! <cmd>
"""
try:
start_time = time.time()
os.system(args)
self._print_if_verbose("--------\nExecuted in %2.2fs" % (time.time() - start_time))
except Exception, e:
print_to_stderr('Error running command : %s' % e)
return CmdStatus.ERROR
def sanitise_input(self, args, interactive=True):
"""Convert the command to lower case, so it's recognized"""
# A command terminated by a semi-colon is legal. Check for the trailing
# semi-colons and strip them from the end of the command.
args = args.strip()
tokens = args.split(' ')
if not interactive:
tokens[0] = tokens[0].lower()
# Strip all the non-interactive commands of the delimiter.
return ' '.join(tokens).rstrip(ImpalaShell.CMD_DELIM)
# The first token is converted into lower case to route it to the
# appropriate command handler. This only applies to the first line of user input.
# Modifying tokens in subsequent lines may change the semantics of the command,
# so do not modify the text.
if not self.partial_cmd:
# The first token is the command.
# If it's EOF, call do_quit()
if tokens[0] == 'EOF':
return 'quit'
else:
tokens[0] = tokens[0].lower()
elif tokens[0] == "EOF":
# If a command is in progress and the user hits a Ctrl-D, clear its state
# and reset the prompt.
self.prompt = self.cached_prompt
self.partial_cmd = str()
# The print statement makes the new prompt appear in a new line.
# Also print an extra newline to indicate that the current command has
# been cancelled.
print '\n'
return str()
args = self._check_for_command_completion(' '.join(tokens).strip())
return args.rstrip(ImpalaShell.CMD_DELIM)
def _shlex_split(self, line):
"""Reimplement shlex.split() so that escaped single quotes
are actually escaped. shlex.split() only escapes double quotes
by default. This method will throw a ValueError if an open
quotation (either single or double) is found.
"""
my_split = shlex.shlex(line, posix=True)
my_split.escapedquotes = '"\''
my_split.whitespace_split = True
my_split.commenters = ''
return list(my_split)
def _cmd_ends_with_delim(self, line):
"""Check if the input command ends with a command delimiter.
A command ending with the delimiter and containing an open quotation character is
not considered terminated. If no open quotation is found, it's considered
terminated.
"""
if line.endswith(ImpalaShell.CMD_DELIM):
try:
# Look for an open quotation in the entire command, and not just the
# current line.
if self.partial_cmd: line = '%s %s' % (self.partial_cmd, line)
self._shlex_split(line)
return True
# If the command ends with a delimiter, check if it has an open quotation.
# shlex in self._split() throws a ValueError iff an open quotation is found.
# A quotation can either be a single quote or a double quote.
except ValueError:
pass
# This checks to see if there are any backslashed quotes
# outside of quotes, since backslashed quotes
# outside of single or double quotes should not be escaped.
# Ex. 'abc\'xyz' -> closed because \' is escaped
# \'abcxyz -> open because \' is not escaped
# \'abcxyz' -> closed
# Iterate through the line and switch the state if a single or double quote is found
# and ignore escaped single and double quotes if the line is considered open (meaning
# a previous single or double quote has not been closed yet)
state_closed = True;
opener = None;
for i, char in enumerate(line):
if state_closed and (char in ['\'', '\"']):
state_closed = False
opener = char
elif not state_closed and opener == char:
if line[i - 1] != '\\':
state_closed = True
opener = None;
return state_closed
return False
def _check_for_command_completion(self, cmd):
"""Check for a delimiter at the end of user input.
The end of the user input is scanned for a legal delimiter.
If a delimiter is not found:
- Input is not send to onecmd()
- onecmd() is a method in Cmd which routes the user input to the
appropriate method. An empty string results in a no-op.
- Input is removed from history.
- Input is appended to partial_cmd
If a delimiter is found:
- The contents of partial_cmd are put in history, as they represent
a completed command.
- The contents are passed to the appropriate method for execution.
- partial_cmd is reset to an empty string.
"""
if self.readline: current_history_len = self.readline.get_current_history_length()
# Input is incomplete, store the contents and do nothing.
if not self._cmd_ends_with_delim(cmd):
# The user input is incomplete, change the prompt to reflect this.
if not self.partial_cmd and cmd:
self.cached_prompt = self.prompt
self.prompt = '> '.rjust(len(self.cached_prompt))
# partial_cmd is already populated, add the current input after a newline.
if self.partial_cmd and cmd:
self.partial_cmd = "%s\n%s" % (self.partial_cmd, cmd)
else:
# If the input string is empty or partial_cmd is empty.
self.partial_cmd = "%s%s" % (self.partial_cmd, cmd)
# Remove the most recent item from history if:
# -- The current state of user input in incomplete.
# -- The most recent user input is not an empty string
if self.readline and current_history_len > 0 and cmd:
self.readline.remove_history_item(current_history_len - 1)
# An empty string results in a no-op. Look at emptyline()
return str()
elif self.partial_cmd: # input ends with a delimiter and partial_cmd is not empty
if cmd != ImpalaShell.CMD_DELIM:
completed_cmd = "%s\n%s" % (self.partial_cmd, cmd)
else:
completed_cmd = "%s%s" % (self.partial_cmd, cmd)
# Reset partial_cmd to an empty string
self.partial_cmd = str()
# Replace the most recent history item with the completed command.
completed_cmd = sqlparse.format(completed_cmd)
if self.readline and current_history_len > 0:
self.readline.replace_history_item(current_history_len - 1,
completed_cmd.encode('utf-8'))
# Revert the prompt to its earlier state
self.prompt = self.cached_prompt
else: # Input has a delimiter and partial_cmd is empty
completed_cmd = sqlparse.format(cmd)
return completed_cmd
def _signal_handler(self, signal, frame):
"""Handles query cancellation on a Ctrl+C event"""
if self.last_query_handle is None or self.query_handle_closed:
return
# Create a new connection to the impalad and cancel the query.
try:
self.query_handle_closed = True
print_to_stderr(ImpalaShell.CANCELLATION_MESSAGE)
new_imp_client = ImpalaClient(self.impalad)
new_imp_client.connect()
new_imp_client.cancel_query(self.last_query_handle, False)
self._validate_database()
except Exception, e:
print_to_stderr("Failed to reconnect and close: %s" % str(e))
# TODO: Add a retry here
def precmd(self, args):
args = self.sanitise_input(args)
if not args: return args
# Split args using sqlparse. If there are multiple queries present in user input,
# the length of the returned query list will be greater than one.
parsed_cmds = sqlparse.split(args)
if len(parsed_cmds) > 1:
# The last command needs a delimiter to be successfully executed.
parsed_cmds[-1] += ImpalaShell.CMD_DELIM
self.cmdqueue.extend(parsed_cmds)
# If cmdqueue is populated, then commands are executed from the cmdqueue, and user
# input is ignored. Send an empty string as the user input just to be safe.
return str()
return args.encode('utf-8')
def postcmd(self, status, args):
# status conveys to shell how the shell should continue execution
# should always be a CmdStatus
return status
def do_summary(self, args):
summary = None
try:
summary = self.imp_client.get_summary(self.last_query_handle)
except RPCException:
pass
if summary is None:
print_to_stderr("Could not retrieve summary for query.")
return CmdStatus.ERROR
if summary.nodes is None:
print_to_stderr("Summary not available")
return CmdStatus.SUCCESS
output = []
table = self.construct_table_header(["Operator", "#Hosts", "Avg Time", "Max Time",
"#Rows", "Est. #Rows", "Peak Mem",
"Est. Peak Mem", "Detail"])
self.imp_client.build_summary_table(summary, 0, False, 0, False, output)
formatter = PrettyOutputFormatter(table)
self.output_stream = OutputStream(formatter, filename=self.output_file)
self.output_stream.write(output)
def do_set(self, args):
"""Set or display query options.
Display query options:
Usage: SET
Set query options:
Usage: SET <option>=<value>
"""
# TODO: Expand set to allow for setting more than just query options.
if len(args) == 0:
print "Query options (defaults shown in []):"
self._print_options(self.imp_client.default_query_options, self.set_query_options);
return CmdStatus.SUCCESS
# Remove any extra spaces surrounding the tokens.
# Allows queries that have spaces around the = sign.
tokens = [arg.strip() for arg in args.split("=")]
if len(tokens) != 2:
print_to_stderr("Error: SET <option>=<value>")
return CmdStatus.ERROR
option_upper = tokens[0].upper()
if option_upper not in self.imp_client.default_query_options.keys():
print "Unknown query option: %s" % (tokens[0])
print "Available query options, with their values (defaults shown in []):"
self._print_options(self.imp_client.default_query_options, self.set_query_options)
return CmdStatus.ERROR
self.set_query_options[option_upper] = tokens[1]
self._print_if_verbose('%s set to %s' % (option_upper, tokens[1]))
def do_unset(self, args):
"""Unset a query option"""
if len(args.split()) != 1:
print 'Usage: unset <option>'
return CmdStatus.ERROR
option = args.upper()
if self.set_query_options.get(option):
print 'Unsetting %s' % option
del self.set_query_options[option]
else:
print "No option called %s is set" % args
def do_quit(self, args):
"""Quit the Impala shell"""
self._print_if_verbose("Goodbye " + self.user)
self.is_alive = False
return CmdStatus.ABORT
def do_exit(self, args):
"""Exit the impala shell"""
return self.do_quit(args)
def do_connect(self, args):
"""Connect to an Impalad instance:
Usage: connect, defaults to the fqdn of the localhost and port 21000
connect <hostname:port>
connect <hostname>, defaults to port 21000
"""
# Assume the user wants to connect to the local impalad if no connection string is
# specified. Conneting to a kerberized impalad requires an fqdn as the host name.
if not args: args = socket.getfqdn()
tokens = args.split(" ")
# validate the connection string.
host_port = [val for val in tokens[0].split(':') if val.strip()]
if (':' in tokens[0] and len(host_port) != 2):
print_to_stderr("Connection string must either be empty, or of the form "
"<hostname[:port]>")
return CmdStatus.ERROR
elif len(host_port) == 1:
host_port.append('21000')
self.impalad = tuple(host_port)
if self.imp_client: self.imp_client.close_connection()
self.imp_client = ImpalaClient(self.impalad, self.use_kerberos,
self.kerberos_service_name, self.use_ssl,
self.ca_cert, self.user, self.ldap_password,
self.use_ldap)
self._connect()
# If the connection fails and the Kerberos has not been enabled,
# check for a valid kerberos ticket and retry the connection
# with kerberos enabled.
if not self.imp_client.connected and not self.use_kerberos:
try:
if call(["klist", "-s"]) == 0:
print_to_stderr(("Kerberos ticket found in the credentials cache, retrying "
"the connection with a secure transport."))
self.imp_client.use_kerberos = True
self._connect()
except OSError, e:
pass
if self.imp_client.connected:
self._print_if_verbose('Connected to %s:%s' % self.impalad)
self._print_if_verbose('Server version: %s' % self.server_version)
self.prompt = "[%s:%s] > " % self.impalad
if self.refresh_after_connect:
self.cmdqueue.append('invalidate metadata' + ImpalaShell.CMD_DELIM)
print_to_stderr("Invalidating Metadata")
self._validate_database()
try:
self.imp_client.build_default_query_options_dict()
except RPCException, e:
print_to_stderr(e)
# In the case that we lost connection while a command was being entered,
# we may have a dangling command, clear partial_cmd
self.partial_cmd = str()
# Check if any of query options set by the user are inconsistent
# with the impalad being connected to
for set_option in self.set_query_options.keys():
if set_option not in set(self.imp_client.default_query_options.keys()):
print ('%s is not supported for the impalad being '
'connected to, ignoring.' % set_option)
del self.set_query_options[set_option]
def _connect(self):
try:
server_version = self.imp_client.connect()
if server_version:
self.server_version = server_version
except TApplicationException:
# We get a TApplicationException if the transport is valid,
# but the RPC does not exist.
print_to_stderr("Error: Unable to communicate with impalad service. This "
"service may not be an impalad instance. Check host:port and try again.")
self.imp_client.close_connection()
raise
except ImportError:
print_to_stderr(("Unable to import the python 'ssl' module. It is"
" required for an SSL-secured connection."))
sys.exit(1)
except socket.error, (code, e):
# if the socket was interrupted, reconnect the connection with the client
if code == errno.EINTR:
self._reconnect_cancellation
else:
print_to_stderr("Socket error %s: %s" % (code, e))
self.prompt = self.DISCONNECTED_PROMPT
except Exception, e:
print_to_stderr("Error connecting: %s, %s" % (type(e).__name__, e))
# If a connection to another impalad failed while already connected
# reset the prompt to disconnected.
self.server_version = self.UNKNOWN_SERVER_VERSION
self.prompt = self.DISCONNECTED_PROMPT
def _reconnect_cancellation(self):
self._connect()
self._validate_database()
def _validate_database(self):
if self.current_db:
self.current_db = self.current_db.strip('`')
self.cmdqueue.append(('use `%s`' % self.current_db) + ImpalaShell.CMD_DELIM)
def _print_if_verbose(self, message):
if self.verbose:
print_to_stderr(message)
def print_runtime_profile(self, profile, status=False):
if self.show_profiles or status:
if profile is not None:
print "Query Runtime Profile:\n" + profile
def _parse_table_name_arg(self, arg):
""" Parses an argument string and returns the result as a db name, table name combo.
If the table name was not fully qualified, the current database is returned as the db.
Otherwise, the table is split into db/table name parts and returned.
If an invalid format is provided, None is returned.
"""
if not arg: return
# If a multi-line argument, the name might be split across lines
arg = arg.replace('\n', '')
# Get the database and table name, using the current database if the table name
# wasn't fully qualified.
db_name, tbl_name = self.current_db, arg
if db_name is None:
db_name = ImpalaShell.DEFAULT_DB
db_table_name = arg.split('.')
if len(db_table_name) == 1:
return db_name, db_table_name[0]
if len(db_table_name) == 2:
return db_table_name
def do_alter(self, args):
query = self.imp_client.create_beeswax_query("alter %s" % args,
self.set_query_options)
return self._execute_stmt(query)
def do_create(self, args):
query = self.imp_client.create_beeswax_query("create %s" % args,
self.set_query_options)
return self._execute_stmt(query)
def do_drop(self, args):
query = self.imp_client.create_beeswax_query("drop %s" % args,
self.set_query_options)
return self._execute_stmt(query)
def do_load(self, args):
query = self.imp_client.create_beeswax_query("load %s" % args,
self.set_query_options)
return self._execute_stmt(query)
def do_profile(self, args):
"""Prints the runtime profile of the last INSERT or SELECT query executed."""
if len(args) > 0:
print_to_stderr("'profile' does not accept any arguments")
return CmdStatus.ERROR
elif self.last_query_handle is None:
print_to_stderr('No previous query available to profile')
return CmdStatus.ERROR
profile = self.imp_client.get_runtime_profile(self.last_query_handle)
return self.print_runtime_profile(profile, True)
def do_select(self, args):
"""Executes a SELECT... query, fetching all rows"""
query = self.imp_client.create_beeswax_query("select %s" % args,
self.set_query_options)
return self._execute_stmt(query)
def _format_outputstream(self):
column_names = self.imp_client.get_column_names(self.last_query_handle)
if self.write_delimited:
formatter = DelimitedOutputFormatter(field_delim=self.output_delimiter)
self.output_stream = OutputStream(formatter, filename=self.output_file)
# print the column names
if self.print_header:
self.output_stream.write([column_names])
else:
prettytable = self.construct_table_header(column_names)
formatter = PrettyOutputFormatter(prettytable)
self.output_stream = OutputStream(formatter, filename=self.output_file)
def _execute_stmt(self, query, is_insert=False):
""" The logic of executing any query statement
The client executes the query and the query_handle is returned immediately,
even as the client waits for the query to finish executing.
If the query was not an insert, the results are fetched from the client
as they are streamed in, through the use of a generator.
The execution time is printed and the query is closed if it hasn't been already
"""
try:
self._print_if_verbose("Query: %s" % (query.query,))
start_time = time.time()
self.last_query_handle = self.imp_client.execute_query(query)
self.query_handle_closed = False
wait_to_finish = self.imp_client.wait_to_finish(self.last_query_handle)
# retrieve the error log
warning_log = self.imp_client.get_warning_log(self.last_query_handle)
if is_insert:
num_rows = self.imp_client.close_insert(self.last_query_handle)
else:
# impalad does not support the fetching of metadata for certain types of queries.
if not self.imp_client.expect_result_metadata(query.query):
# Close the query
self.imp_client.close_query(self.last_query_handle)
self.query_handle_closed = True
return CmdStatus.SUCCESS
self._format_outputstream()
# fetch returns a generator
rows_fetched = self.imp_client.fetch(self.last_query_handle)
num_rows = 0
for rows in rows_fetched:
self.output_stream.write(rows)
num_rows += len(rows)
end_time = time.time()
if warning_log:
self._print_if_verbose(warning_log)
# print insert when is_insert is true (which is 1)
# print fetch when is_insert is false (which is 0)
verb = ["Fetch", "Insert"][is_insert]
self._print_if_verbose("%sed %d row(s) in %2.2fs" % (verb, num_rows,
end_time - start_time))
if not is_insert:
self.imp_client.close_query(self.last_query_handle, self.query_handle_closed)
self.query_handle_closed = True
profile = self.imp_client.get_runtime_profile(self.last_query_handle)
self.print_runtime_profile(profile)
return CmdStatus.SUCCESS
except RPCException, e:
# could not complete the rpc successfully
# suppress error if reason is cancellation
if self._no_cancellation_error(e):
print_to_stderr(e)
except QueryStateException, e:
# an exception occurred while executing the query
if self._no_cancellation_error(e):
self.imp_client.close_query(self.last_query_handle, self.query_handle_closed)
print_to_stderr(e)
except DisconnectedException, e:
# the client has lost the connection
print_to_stderr(e)
self.imp_client.connected = False
self.prompt = ImpalaShell.DISCONNECTED_PROMPT
except socket.error, (code, e):
# if the socket was interrupted, reconnect the connection with the client
if code == errno.EINTR:
print ImpalaShell.CANCELLATION_MESSAGE
self._reconnect_cancellation()
else:
print_to_stderr("Socket error %s: %s" % (code, e))
self.prompt = self.DISCONNECTED_PROMPT
self.imp_client.connected = False
except Exception, u:
# if the exception is unknown, there was possibly an issue with the connection
# set the shell as disconnected
print_to_stderr('Unknown Exception : %s' % (u,))
self.imp_client.connected = False
self.prompt = ImpalaShell.DISCONNECTED_PROMPT
return CmdStatus.ERROR
def _no_cancellation_error(self, error):
if ImpalaShell.CANCELLATION_ERROR not in str(error):
return True
def construct_table_header(self, column_names):
""" Constructs the table header for a given query handle.
Should be called after the query has finished and before data is fetched.
All data is left aligned.
"""
table = ImpalaPrettyTable()
for column in column_names:
# Column names may be encoded as utf-8
table.add_column(column.decode('utf-8', 'ignore'), [])
table.align = "l"
return table
def do_values(self, args):
"""Executes a VALUES(...) query, fetching all rows"""
query = self.imp_client.create_beeswax_query("values %s" % args,
self.set_query_options)
return self._execute_stmt(query)
def do_with(self, args):
"""Executes a query with a WITH clause, fetching all rows"""
query = self.imp_client.create_beeswax_query("with %s" % args,
self.set_query_options)
# Set posix=True and add "'" to escaped quotes
# to deal with escaped quotes in string literals
lexer = shlex.shlex(query.query.lstrip(), posix=True)
lexer.escapedquotes += "'"
# Because the WITH clause may precede INSERT or SELECT queries,
# just checking the first token is insufficient.
is_insert = False
tokens = list(lexer)
if filter(self.INSERT_REGEX.match, tokens): is_insert = True
return self._execute_stmt(query, is_insert=is_insert)
def do_use(self, args):
"""Executes a USE... query"""
query = self.imp_client.create_beeswax_query("use %s" % args,
self.set_query_options)
if self._execute_stmt(query) is CmdStatus.SUCCESS:
self.current_db = args
else:
return CmdStatus.ERROR
def do_show(self, args):
"""Executes a SHOW... query, fetching all rows"""
query = self.imp_client.create_beeswax_query("show %s" % args,
self.set_query_options)
return self._execute_stmt(query)
def do_describe(self, args):
"""Executes a DESCRIBE... query, fetching all rows"""
query = self.imp_client.create_beeswax_query("describe %s" % args,
self.set_query_options)
return self._execute_stmt(query)
def do_desc(self, args):
return self.do_describe(args)
def do_insert(self, args):
"""Executes an INSERT query"""
query = self.imp_client.create_beeswax_query("insert %s" % args,
self.set_query_options)
return self._execute_stmt(query, is_insert=True)
def do_explain(self, args):
"""Explain the query execution plan"""
query = self.imp_client.create_beeswax_query("explain %s" % args,
self.set_query_options)
return self._execute_stmt(query)
def do_history(self, args):
"""Display command history"""
# Deal with readline peculiarity. When history does not exists,
# readline returns 1 as the history length and stores 'None' at index 0.
if self.readline and self.readline.get_current_history_length() > 0:
for index in xrange(1, self.readline.get_current_history_length() + 1):
cmd = self.readline.get_history_item(index)
print_to_stderr('[%d]: %s' % (index, cmd))
else:
print_to_stderr("The readline module was either not found or disabled. Command "
"history will not be collected.")
def preloop(self):
"""Load the history file if it exists"""
if self.readline:
# The history file is created when the Impala shell is invoked and commands are
# issued. In the first invocation of the shell, the history file will not exist.
# Clearly, this is not an error, return.
if not os.path.exists(self.history_file): return
try:
self.readline.read_history_file(self.history_file)
self._replace_history_delimiters(ImpalaShell.HISTORY_FILE_QUERY_DELIM, '\n')
except IOError, i:
msg = "Unable to load command history (disabling history collection): %s" % i
print_to_stderr(msg)
# This history file exists but is not readable, disable readline.
self._disable_readline()
def postloop(self):
"""Save session commands in history."""
if self.readline:
try:
self._replace_history_delimiters('\n', ImpalaShell.HISTORY_FILE_QUERY_DELIM)
self.readline.write_history_file(self.history_file)
except IOError, i:
msg = "Unable to save command history (disabling history collection): %s" % i
print_to_stderr(msg)
# The history file is not writable, disable readline.
self._disable_readline()
def _replace_history_delimiters(self, src_delim, tgt_delim):
"""Replaces source_delim with target_delim for all items in history.
Read all the items from history into a local list. Clear the history and copy them
back after doing the transformation.
"""
history_len = self.readline.get_current_history_length()
# load the history and replace the shell's delimiter with EOL
history_items = map(self.readline.get_history_item, xrange(1, history_len + 1))
history_items = [item.replace(src_delim, tgt_delim) for item in history_items]
# Clear the original history and replace it with the mutated history.
self.readline.clear_history()
for history_item in history_items:
self.readline.add_history(history_item)
def default(self, args):
query = self.imp_client.create_beeswax_query(args, self.set_query_options)
return self._execute_stmt(query)
def emptyline(self):
"""If an empty line is entered, do nothing"""
def do_version(self, args):
"""Prints the Impala build version"""
print_to_stderr("Shell version: %s" % VERSION_STRING)
print_to_stderr("Server version: %s" % self.server_version)
def completenames(self, text, *ignored):
"""Make tab completion of commands case agnostic
Override the superclass's completenames() method to support tab completion for
upper case and mixed case commands.
"""
cmd_names = [cmd for cmd in self.commands if cmd.startswith(text.lower())]
# If the user input is upper case, return commands in upper case.
if text.isupper(): return [cmd_names.upper() for cmd_names in cmd_names]
# If the user input is lower case or mixed case, return lower case commands.
return cmd_names
WELCOME_STRING = """Welcome to the Impala shell. Press TAB twice to see a list of \
available commands.
Copyright (c) 2012 Cloudera, Inc. All rights reserved.
(Shell build version: %s)""" % VERSION_STRING
def print_to_stderr(message):
print >> sys.stderr, message
def parse_query_text(query_text, utf8_encode_policy='strict'):
"""Parse query file text to extract queries and encode into utf-8"""
return [q.encode('utf-8', utf8_encode_policy) for q in sqlparse.split(query_text)]
def execute_queries_non_interactive_mode(options):
"""Run queries in non-interactive mode."""
queries = []
if options.query_file:
try:
query_file_handle = open(options.query_file, 'r')
queries = parse_query_text(query_file_handle.read())
query_file_handle.close()
except Exception, e:
print_to_stderr('Error: %s' % e)
sys.exit(1)
elif options.query:
queries = parse_query_text(options.query)
shell = ImpalaShell(options)
# The impalad was specified on the command line and the connection failed.
# Return with an error, no need to process the query.
if options.impalad and shell.imp_client.connected == False:
sys.exit(1)
queries = shell.cmdqueue + queries
# Deal with case.
sanitized_queries = []
for query in queries:
sanitized_queries.append(shell.sanitise_input(query, interactive=False))
for query in sanitized_queries:
# check if an error was encountered
if shell.onecmd(query) is CmdStatus.ERROR:
print_to_stderr('Could not execute command: %s' % query)
if not options.ignore_query_failure:
sys.exit(1)
if __name__ == "__main__":
# pass defaults into option parser
parser = get_option_parser(impala_shell_defaults)
options, args = parser.parse_args()
# use path to file specified by user in config_file option
user_config = os.path.expanduser(options.config_file);
# by default, use the .impalarc in the home directory
config_to_load = impala_shell_defaults.get("config_file")
# verify user_config, if found
if os.path.isfile(user_config) and user_config != config_to_load:
if options.verbose:
print_to_stderr("Loading in options from config file: %s \n" % user_config)
# Command line overrides loading ~/.impalarc
config_to_load = user_config
elif user_config != config_to_load:
print_to_stderr('%s not found.\n' % user_config)
sys.exit(1)
# default options loaded in from impala_shell_config_defaults.py
# options defaults overwritten by those in config file
try:
impala_shell_defaults.update(get_config_from_file(config_to_load))
except Exception, e:
msg = "Unable to read configuration file correctly. Check formatting: %s\n" % e
print_to_stderr(msg)
sys.exit(1)
parser = get_option_parser(impala_shell_defaults)
options, args = parser.parse_args()
# Arguments that could not be parsed are stored in args. Print an error and exit.
if len(args) > 0:
print_to_stderr('Error, could not parse arguments "%s"' % (' ').join(args))
parser.print_help()
sys.exit(1)
if options.version:
print VERSION_STRING
sys.exit(0)
if options.use_kerberos and options.use_ldap:
print_to_stderr("Please specify at most one authentication mechanism (-k or -l)")
sys.exit(1)
if options.use_kerberos:
print_to_stderr("Starting Impala Shell using Kerberos authentication")
print_to_stderr("Using service name '%s'" % options.kerberos_service_name)
# Check if the user has a ticket in the credentials cache
try:
if call(['klist', '-s']) != 0:
print_to_stderr(("-k requires a valid kerberos ticket but no valid kerberos "
"ticket found."))
sys.exit(1)
except OSError, e:
print_to_stderr('klist not found on the system, install kerberos clients')
sys.exit(1)
elif options.use_ldap:
print_to_stderr("Starting Impala Shell using LDAP-based authentication")
else:
print_to_stderr("Starting Impala Shell without Kerberos authentication")
if options.ssl:
if options.ca_cert is None:
print_to_stderr("SSL is enabled. Impala server certificates will NOT be verified"\
" (set --ca_cert to change)")
else:
print_to_stderr("SSL is enabled")
if options.output_file:
try:
# Make sure the given file can be opened for writing. This will also clear the file
# if successful.
open(options.output_file, 'wb')
except IOError, e:
print_to_stderr('Error opening output file for writing: %s' % e)
sys.exit(1)
if options.query or options.query_file:
execute_queries_non_interactive_mode(options)
sys.exit(0)
intro = WELCOME_STRING
shell = ImpalaShell(options)
while shell.is_alive:
try:
try:
shell.cmdloop(intro)
except KeyboardInterrupt:
intro = '\n'
# A last measure against any exceptions thrown by an rpc
# not caught in the shell
except socket.error, (code, e):
# if the socket was interrupted, reconnect the connection with the client
if code == errno.EINTR:
print shell.CANCELLATION_MESSAGE
shell._reconnect_cancellation()
else:
print_to_stderr("Socket error %s: %s" % (code, e))
shell.imp_client.connected = False
shell.prompt = shell.DISCONNECTED_PROMPT
except DisconnectedException, e:
# the client has lost the connection
print_to_stderr(e)
shell.imp_client.connected = False
shell.prompt = shell.DISCONNECTED_PROMPT
except QueryStateException, e:
# an exception occurred while executing the query
if shell._no_cancellation_error(e):
shell.imp_client.close_query(shell.last_query_handle,
shell.query_handle_closed)
print_to_stderr(e)
except RPCException, e:
# could not complete the rpc successfully
# suppress error if reason is cancellation
if shell._no_cancellation_error(e):
print_to_stderr(e)
finally:
intro = ''
| |
import abc
import sqlite3
import os
import os.path
import sys
from pogo.dao.local_db_access import LocalDBAccessor
from pogo.util.config import StretchConfig
class RecordDaoLocal(object):
__metaclass__ = abc.ABCMeta
def __init__(self, localdbaccessor):
if not localdbaccessor:
raise ValueError("RecordDaoLocal object needs a LocalDBAccessor.")
else:
self._dba = localdbaccessor
"""
assemble a SQL insert string appropriate for
this object's class.
The result will be something like
"INSERT INTO attempts ( timestamp, bifrozt_host, source_ip ) VALUES ( ?,?,? )"
"""
def build_insert_query(self):
table_name = self.get_table_name()
insert_fields = self.get_insert_fields()
sql = "INSERT INTO " + table_name + " ( "
sql += ','.join(insert_fields) + " ) VALUES ( "
sql += len(insert_fields) * '?,'
if sql.endswith(','):
sql = sql.rstrip(',')
sql += " )"
return sql
"""
Make a list of values for a record, with the fields
in the same order as in this class's INSERT_FIELDS tuple.
"""
def build_values_list(self, record):
d = record.as_dict()
ret_list = []
for f in self.get_insert_fields():
ret_list.append(d[f])
return ret_list
"""
Insert a single record.
"""
def insert_single(self, record ):
sql = self.build_insert_query()
values_list = self.build_values_list(record)
try:
# self.db_open()
cursor = self._dba.db.cursor()
cursor.execute('BEGIN TRANSACTION')
cursor.execute(sql, values_list )
cursor.execute('COMMIT')
except sqlite3.Error as e: # @UndefinedVariable
cursor.execute('ROLLBACK')
raise e
"""
records should be a tuple of dto records, all of
the type appropriate for this record_dao_local object.
That is, if this is an AttemptRecordDaoLocal, all
the records should be instances of AttemptRecord
"""
def insert_bulk(self, records):
sql = self.build_insert_query()
count_of_written = 0
try:
cursor = self._dba.db.cursor()
cursor.execute('BEGIN TRANSACTION')
for r in records:
values_list = self.build_values_list(r)
cursor.execute(sql, values_list)
count_of_written+=1
cursor.execute('COMMIT')
return count_of_written
except sqlite3.Error as e: # @UndefinedVariable
cursor.execute('ROLLBACK')
raise e
def list_all(self):
return self.list_where(None)
def list_where(self, where_clause=None):
sql = "SELECT " + self.get_all_fields() + " FROM " + self.get_table_name()
if where_clause:
sql += " WHERE " + where_clause
cursor = self._dba.db.cursor()
cursor.execute(sql)
return cursor.fetchall()
"""
Meant to be used as follows:
new_values = ('a1', 'a2')
where_clause = "db_id = 2444"
fields = ('columnA', 'columnII')
dao_object.update_where(fields, new_values, where_clause)
Of course, fields and corresponding values must be in
the same order.
"""
def update_where(self, fields, new_values, where_clause=None):
table_name = self.get_table_name()
sql = "UPDATE " + table_name + " SET "
if fields and len(fields) > 0:
for f in fields:
sql += "'" + f + "' = ?,"
# remove last comma, if present
if sql.endswith(','):
sql = sql.rstrip(',')
if where_clause is not None:
sql += " WHERE " + where_clause
try:
cursor = self._dba.db.cursor()
cursor.execute('BEGIN TRANSACTION')
cursor.execute(sql, new_values)
cursor.execute('COMMIT')
except sqlite3.Error as e: # @UndefinedVariable
cursor.execute('ROLLBACK')
raise e
def delete_where(self, where_clause):
if not where_clause:
raise ValueError("RecordDaoLocal.delete_where() called without where clause.")
table_name = self.get_table_name()
sql = "DELETE FROM " + table_name + " WHERE " + where_clause
count_deleted = 0
try:
cursor = self._dba.db.cursor()
cursor.execute('BEGIN TRANSACTION')
cursor.execute(sql)
count_deleted = cursor.rowcount
cursor.execute('COMMIT')
return count_deleted # should be the number of rows changed
except sqlite3.Error as e: # @UndefinedVariable
cursor.execute('ROLLBACK')
raise e
@abc.abstractmethod
def get_table_name(self):
return ''
@abc.abstractmethod
def get_all_fields(self):
return ''
@abc.abstractmethod
def get_insert_fields(self):
return ''
class AttemptRecordDaoLocal(RecordDaoLocal):
TABLE_NAME = 'attempts'
ALL_FIELDS = "db_id, es_id, timestamp, bifrozt_host, source_ip, user, password, success, country_code, country_name"
INSERT_FIELDS = ( 'timestamp', 'bifrozt_host',
'source_ip', 'user', 'password', 'success',
'country_code', 'country_name' )
def __init__(self, localdbaccessor):
super(AttemptRecordDaoLocal,self).__init__(localdbaccessor)
def get_table_name(self):
return AttemptRecordDaoLocal.TABLE_NAME
def get_all_fields(self):
return AttemptRecordDaoLocal.ALL_FIELDS
def get_insert_fields(self):
return AttemptRecordDaoLocal.INSERT_FIELDS
class LogRecordDaoLocal(RecordDaoLocal):
TABLE_NAME = 'log_msg'
ALL_FIELDS = "db_id, es_id, timestamp, bifrozt_host, server_info, message"
INSERT_FIELDS = ( 'timestamp', 'bifrozt_host', 'server_info', 'message' )
def __init__(self, localdbaccessor):
super(LogRecordDaoLocal,self).__init__(localdbaccessor)
def get_table_name(self):
return LogRecordDaoLocal.TABLE_NAME
def get_all_fields(self):
return LogRecordDaoLocal.ALL_FIELDS
def get_insert_fields(self):
return LogRecordDaoLocal.INSERT_FIELDS
class SessionLogDaoLocal(RecordDaoLocal):
TABLE_NAME = 'session_log_records'
ALL_FIELDS = ALL_FIELDS = "db_id, es_id, timestamp, bifrozt_host, source_ip, country_code, country_name, channel, message"
INSERT_FIELDS = ( 'timestamp', 'bifrozt_host',
'source_ip', 'country_code', 'country_name', 'channel', 'message' )
def __init__(self, localdbaccessor):
super(SessionLogDaoLocal,self).__init__(localdbaccessor)
def get_table_name(self):
return SessionLogDaoLocal.TABLE_NAME
def get_all_fields(self):
return SessionLogDaoLocal.ALL_FIELDS
def get_insert_fields(self):
return SessionLogDaoLocal.INSERT_FIELDS
class SessionRecordingDaoLocal(RecordDaoLocal):
TABLE_NAME = 'session_recordings'
ALL_FIELDS = ALL_FIELDS = "db_id, es_id, timestamp, bifrozt_host, source_ip, country_code, country_name, filename, contents"
INSERT_FIELDS = ( 'timestamp', 'bifrozt_host',
'source_ip', 'country_code', 'country_name', 'filename', 'contents' )
def __init__(self, localdbaccessor):
super(SessionRecordingDaoLocal,self).__init__(localdbaccessor)
def get_table_name(self):
return SessionRecordingDaoLocal.TABLE_NAME
def get_all_fields(self):
return SessionRecordingDaoLocal.ALL_FIELDS
def get_insert_fields(self):
return SessionRecordingDaoLocal.INSERT_FIELDS
class SessionDownloadDaoLocal(RecordDaoLocal):
TABLE_NAME = 'session_downloads'
ALL_FIELDS = ALL_FIELDS = "db_id, es_id, timestamp, bifrozt_host, source_ip, country_code, country_name, filename, contents"
INSERT_FIELDS = ( 'timestamp', 'bifrozt_host',
'source_ip', 'country_code', 'country_name', 'filename', 'contents' )
def __init__(self, localdbaccessor):
super(SessionDownloadDaoLocal,self).__init__(localdbaccessor)
def get_table_name(self):
return SessionDownloadDaoLocal.TABLE_NAME
def get_all_fields(self):
return SessionDownloadDaoLocal.ALL_FIELDS
def get_insert_fields(self):
return SessionDownloadDaoLocal.INSERT_FIELDS
| |
# -*- coding: utf-8 -*-
#*****************************************************************************
# Copyright (C) 2006 Jorgen Stenarson. <jorgen.stenarson@bostream.nu>
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#*****************************************************************************
import re,operator,string,sys,os
#import wordmatcher
#import pyreadline.clipboard as clipboard
if "pyreadline" in sys.modules:
pyreadline= sys.modules["pyreadline"]
else:
import pyreadline
import lineobj
import exceptions
class EscapeHistory(exceptions.Exception):
pass
from pyreadline.logger import log_sock
class LineHistory(object):
def __init__(self):
self.history=[]
self._history_length=100
self._history_cursor=0
self.history_filename=os.path.expanduser('~/.history')
self.lastcommand=None
self.query=""
def get_history_length(self):
value=self._history_length
log_sock("get_history_length:%d"%value,"history")
return value
def set_history_length(self,value):
log_sock("set_history_length: old:%d new:%d"%(self._history_length,value),"history")
self._history_length=value
def get_history_cursor(self):
value=self._history_cursor
log_sock("get_history_cursor:%d"%value,"history")
return value
def set_history_cursor(self,value):
log_sock("set_history_cursor: old:%d new:%d"%(self._history_cursor,value),"history")
self._history_cursor=value
history_length=property(get_history_length,set_history_length)
history_cursor=property(get_history_cursor,set_history_cursor)
def clear_history(self):
'''Clear readline history.'''
self.history[:] = []
self.history_cursor = 0
def read_history_file(self, filename=None):
'''Load a readline history file.'''
if filename is None:
filename=self.history_filename
try:
for line in open(filename, 'r'):
self.add_history(lineobj.ReadLineTextBuffer(line.rstrip()))
except IOError:
self.history = []
self.history_cursor = 0
def write_history_file(self, filename=None):
'''Save a readline history file.'''
if filename is None:
filename=self.history_filename
fp = open(filename, 'wb')
for line in self.history[-self.history_length:]:
fp.write(line.get_line_text())
fp.write('\n')
fp.close()
def add_history(self, line):
'''Append a line to the history buffer, as if it was the last line typed.'''
if not line.get_line_text():
pass
elif len(self.history) > 0 and self.history[-1].get_line_text() == line.get_line_text():
pass
else:
self.history.append(line)
self.history_cursor = len(self.history)
def previous_history(self,current): # (C-p)
'''Move back through the history list, fetching the previous command. '''
if self.history_cursor==len(self.history):
self.history.append(current.copy()) #do not use add_history since we do not want to increment cursor
if self.history_cursor > 0:
self.history_cursor -= 1
current.set_line(self.history[self.history_cursor].get_line_text())
current.point=lineobj.EndOfLine
def next_history(self,current): # (C-n)
'''Move forward through the history list, fetching the next command. '''
if self.history_cursor < len(self.history)-1:
self.history_cursor += 1
current.set_line(self.history[self.history_cursor].get_line_text())
def beginning_of_history(self): # (M-<)
'''Move to the first line in the history.'''
self.history_cursor = 0
if len(self.history) > 0:
self.l_buffer = self.history[0]
def end_of_history(self,current): # (M->)
'''Move to the end of the input history, i.e., the line currently
being entered.'''
self.history_cursor=len(self.history)
current.set_line(self.history[-1].get_line_text())
def reverse_search_history(self,searchfor,startpos=None):
if startpos is None:
startpos=self.history_cursor
res=[(idx,line) for idx,line in enumerate(self.history[startpos:0:-1]) if line.startswith(searchfor)]
if res:
self.history_cursor-=res[0][0]
return res[0][1].get_line_text()
return ""
def forward_search_history(self,searchfor,startpos=None):
if startpos is None:
startpos=self.history_cursor
res=[(idx,line) for idx,line in enumerate(self.history[startpos:]) if line.startswith(searchfor)]
if res:
self.history_cursor+=res[0][0]
return res[0][1].get_line_text()
return ""
def _non_i_search(self, direction, current):
c = pyreadline.rl.console
line = current.get_line_text()
query = ''
while 1:
c.pos(*pyreadline.rl.prompt_end_pos)
scroll = c.write_scrolling(":%s" % query)
pyreadline.rl._update_prompt_pos(scroll)
pyreadline.rl._clear_after()
event = c.getkeypress()
log_sock(str(event),"history")
if event.keyinfo.keyname == 'backspace':
if len(query) > 0:
query = query[:-1]
else:
break
elif event.char in string.letters + string.digits + string.punctuation + ' ':
query += event.char
elif event.keyinfo.keyname == 'return':
break
else:
pyreadline.rl._bell()
log_sock(query,"history")
res=""
if query:
if direction==-1:
res=self.reverse_search_history(query)
else:
res=self.forward_search_history(query)
log_sock(res,"history")
return lineobj.ReadLineTextBuffer(res,point=0)
def non_incremental_reverse_search_history(self,current): # (M-p)
'''Search backward starting at the current line and moving up
through the history as necessary using a non-incremental search for
a string supplied by the user.'''
return self._non_i_search(-1,current)
def non_incremental_forward_search_history(self,current): # (M-n)
'''Search forward starting at the current line and moving down
through the the history as necessary using a non-incremental search
for a string supplied by the user.'''
return self._non_i_search(1,current)
def _search(self, direction, partial):
try:
if (self.lastcommand != self.history_search_forward and
self.lastcommand != self.history_search_backward):
self.query = ''.join(partial[0:partial.point].get_line_text())
hcstart=max(self.history_cursor,0)
log_sock("hcstart %s"%hcstart,"history")
hc = self.history_cursor + direction
while (direction < 0 and hc >= 0) or (direction > 0 and hc < len(self.history)):
h = self.history[hc]
if not self.query:
self.history_cursor = hc
result=lineobj.ReadLineTextBuffer(h,point=len(h.get_line_text()))
return result
elif h.get_line_text().startswith(self.query) and h != partial.get_line_text():
self.history_cursor = hc
result=lineobj.ReadLineTextBuffer(h,point=partial.point)
return result
hc += direction
else:
if len(self.history)==0:
pass
elif hc>=len(self.history) and not self.query:
self.history_cursor=len(self.history)
return lineobj.ReadLineTextBuffer("",point=0)
elif self.history[max(min(hcstart,len(self.history)-1),0)].get_line_text().startswith(self.query) and self.query:
return lineobj.ReadLineTextBuffer(self.history[max(min(hcstart,len(self.history)-1),0)],point=partial.point)
else:
return lineobj.ReadLineTextBuffer(partial,point=partial.point)
return lineobj.ReadLineTextBuffer(self.query,point=min(len(self.query),partial.point))
except IndexError:
log_sock("hcstart:%s %s"%(hcstart,len(self.history)),"history")
raise
def history_search_forward(self,partial): # ()
'''Search forward through the history for the string of characters
between the start of the current line and the point. This is a
non-incremental search. By default, this command is unbound.'''
q= self._search(1,partial)
return q
def history_search_backward(self,partial): # ()
'''Search backward through the history for the string of characters
between the start of the current line and the point. This is a
non-incremental search. By default, this command is unbound.'''
q= self._search(-1,partial)
return q
if __name__=="__main__":
q=LineHistory()
RL=lineobj.ReadLineTextBuffer
q.add_history(RL("aaaa"))
q.add_history(RL("aaba"))
q.add_history(RL("aaca"))
q.add_history(RL("akca"))
q.add_history(RL("bbb"))
q.add_history(RL("ako"))
| |
from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:9332")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:9332")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a LongCoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a LongCoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
| |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013-2015 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
from io import BytesIO
import logging
import os
import re
import struct
import sys
from .compat import sysconfig, detect_encoding, ZipFile
from .resources import finder
from .util import (FileOperator, get_export_entry, convert_path,
get_executable, in_venv)
logger = logging.getLogger(__name__)
_DEFAULT_MANIFEST = '''
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
<assemblyIdentity version="1.0.0.0"
processorArchitecture="X86"
name="%s"
type="win32"/>
<!-- Identify the application security requirements. -->
<trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
<security>
<requestedPrivileges>
<requestedExecutionLevel level="asInvoker" uiAccess="false"/>
</requestedPrivileges>
</security>
</trustInfo>
</assembly>'''.strip()
# check if Python is called on the first line with this expression
FIRST_LINE_RE = re.compile(b'^#!.*pythonw?[0-9.]*([ \t].*)?$')
SCRIPT_TEMPLATE = '''# -*- coding: utf-8 -*-
if __name__ == '__main__':
import sys, re
def _resolve(module, func):
__import__(module)
mod = sys.modules[module]
parts = func.split('.')
result = getattr(mod, parts.pop(0))
for p in parts:
result = getattr(result, p)
return result
try:
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
func = _resolve('%(module)s', '%(func)s')
rc = func() # None interpreted as 0
except Exception as e: # only supporting Python >= 2.6
sys.stderr.write('%%s\\n' %% e)
rc = 1
sys.exit(rc)
'''
class ScriptMaker(object):
"""
A class to copy or create scripts from source scripts or callable
specifications.
"""
script_template = SCRIPT_TEMPLATE
executable = None # for shebangs
def __init__(self, source_dir, target_dir, add_launchers=True,
dry_run=False, fileop=None):
self.source_dir = source_dir
self.target_dir = target_dir
self.add_launchers = add_launchers
self.force = False
self.clobber = False
# It only makes sense to set mode bits on POSIX.
self.set_mode = (os.name == 'posix') or (os.name == 'java' and
os._name == 'posix')
self.variants = set(('', 'X.Y'))
self._fileop = fileop or FileOperator(dry_run)
def _get_alternate_executable(self, executable, options):
if options.get('gui', False) and os.name == 'nt':
dn, fn = os.path.split(executable)
fn = fn.replace('python', 'pythonw')
executable = os.path.join(dn, fn)
return executable
if sys.platform.startswith('java'): # pragma: no cover
def _is_shell(self, executable):
"""
Determine if the specified executable is a script
(contains a #! line)
"""
try:
with open(executable) as fp:
return fp.read(2) == '#!'
except (OSError, IOError):
logger.warning('Failed to open %s', executable)
return False
def _fix_jython_executable(self, executable):
if self._is_shell(executable):
# Workaround for Jython is not needed on Linux systems.
import java
if java.lang.System.getProperty('os.name') == 'Linux':
return executable
elif executable.lower().endswith('jython.exe'):
# Use wrapper exe for Jython on Windows
return executable
return '/usr/bin/env %s' % executable
def _get_shebang(self, encoding, post_interp=b'', options=None):
enquote = True
if self.executable:
executable = self.executable
enquote = False # assume this will be taken care of
elif not sysconfig.is_python_build():
executable = get_executable()
elif in_venv():
executable = os.path.join(sysconfig.get_path('scripts'),
'python%s' % sysconfig.get_config_var('EXE'))
else:
executable = os.path.join(
sysconfig.get_config_var('BINDIR'),
'python%s%s' % (sysconfig.get_config_var('VERSION'),
sysconfig.get_config_var('EXE')))
if options:
executable = self._get_alternate_executable(executable, options)
if sys.platform.startswith('java'): # pragma: no cover
executable = self._fix_jython_executable(executable)
# Normalise case for Windows
executable = os.path.normcase(executable)
# If the user didn't specify an executable, it may be necessary to
# cater for executable paths with spaces (not uncommon on Windows)
if enquote and ' ' in executable:
executable = '"%s"' % executable
# Issue #51: don't use fsencode, since we later try to
# check that the shebang is decodable using utf-8.
executable = executable.encode('utf-8')
# in case of IronPython, play safe and enable frames support
if (sys.platform == 'cli' and '-X:Frames' not in post_interp
and '-X:FullFrames' not in post_interp):
post_interp += b' -X:Frames'
shebang = b'#!' + executable + post_interp + b'\n'
# Python parser starts to read a script using UTF-8 until
# it gets a #coding:xxx cookie. The shebang has to be the
# first line of a file, the #coding:xxx cookie cannot be
# written before. So the shebang has to be decodable from
# UTF-8.
try:
shebang.decode('utf-8')
except UnicodeDecodeError:
raise ValueError(
'The shebang (%r) is not decodable from utf-8' % shebang)
# If the script is encoded to a custom encoding (use a
# #coding:xxx cookie), the shebang has to be decodable from
# the script encoding too.
if encoding != 'utf-8':
try:
shebang.decode(encoding)
except UnicodeDecodeError:
raise ValueError(
'The shebang (%r) is not decodable '
'from the script encoding (%r)' % (shebang, encoding))
return shebang
def _get_script_text(self, entry):
return self.script_template % dict(module=entry.prefix,
func=entry.suffix)
manifest = _DEFAULT_MANIFEST
def get_manifest(self, exename):
base = os.path.basename(exename)
return self.manifest % base
def _write_script(self, names, shebang, script_bytes, filenames, ext):
use_launcher = self.add_launchers and os.name == 'nt'
linesep = os.linesep.encode('utf-8')
if not use_launcher:
script_bytes = shebang + linesep + script_bytes
else:
if ext == 'py':
launcher = self._get_launcher('t')
else:
launcher = self._get_launcher('w')
stream = BytesIO()
with ZipFile(stream, 'w') as zf:
zf.writestr('__main__.py', script_bytes)
zip_data = stream.getvalue()
script_bytes = launcher + shebang + linesep + zip_data
for name in names:
outname = os.path.join(self.target_dir, name)
if use_launcher:
n, e = os.path.splitext(outname)
if e.startswith('.py'):
outname = n
outname = '%s.exe' % outname
try:
self._fileop.write_binary_file(outname, script_bytes)
except Exception:
# Failed writing an executable - it might be in use.
logger.warning('Failed to write executable - trying to '
'use .deleteme logic')
dfname = '%s.deleteme' % outname
if os.path.exists(dfname):
os.remove(dfname) # Not allowed to fail here
os.rename(outname, dfname) # nor here
self._fileop.write_binary_file(outname, script_bytes)
logger.debug('Able to replace executable using '
'.deleteme logic')
try:
os.remove(dfname)
except Exception:
pass # still in use - ignore error
else:
if os.name == 'nt' and not outname.endswith('.' + ext):
outname = '%s.%s' % (outname, ext)
if os.path.exists(outname) and not self.clobber:
logger.warning('Skipping existing file %s', outname)
continue
self._fileop.write_binary_file(outname, script_bytes)
if self.set_mode:
self._fileop.set_executable_mode([outname])
filenames.append(outname)
def _make_script(self, entry, filenames, options=None):
post_interp = b''
if options:
args = options.get('interpreter_args', [])
if args:
args = ' %s' % ' '.join(args)
post_interp = args.encode('utf-8')
shebang = self._get_shebang('utf-8', post_interp, options=options)
script = self._get_script_text(entry).encode('utf-8')
name = entry.name
scriptnames = set()
if '' in self.variants:
scriptnames.add(name)
if 'X' in self.variants:
scriptnames.add('%s%s' % (name, sys.version[0]))
if 'X.Y' in self.variants:
scriptnames.add('%s-%s' % (name, sys.version[:3]))
if options and options.get('gui', False):
ext = 'pyw'
else:
ext = 'py'
self._write_script(scriptnames, shebang, script, filenames, ext)
def _copy_script(self, script, filenames):
adjust = False
script = os.path.join(self.source_dir, convert_path(script))
outname = os.path.join(self.target_dir, os.path.basename(script))
if not self.force and not self._fileop.newer(script, outname):
logger.debug('not copying %s (up-to-date)', script)
return
# Always open the file, but ignore failures in dry-run mode --
# that way, we'll get accurate feedback if we can read the
# script.
try:
f = open(script, 'rb')
except IOError:
if not self.dry_run:
raise
f = None
else:
encoding, lines = detect_encoding(f.readline)
f.seek(0)
first_line = f.readline()
if not first_line:
logger.warning('%s: %s is an empty file (skipping)',
self.get_command_name(), script)
return
match = FIRST_LINE_RE.match(first_line.replace(b'\r\n', b'\n'))
if match:
adjust = True
post_interp = match.group(1) or b''
if not adjust:
if f:
f.close()
self._fileop.copy_file(script, outname)
if self.set_mode:
self._fileop.set_executable_mode([outname])
filenames.append(outname)
else:
logger.info('copying and adjusting %s -> %s', script,
self.target_dir)
if not self._fileop.dry_run:
shebang = self._get_shebang(encoding, post_interp)
if b'pythonw' in first_line:
ext = 'pyw'
else:
ext = 'py'
n = os.path.basename(outname)
self._write_script([n], shebang, f.read(), filenames, ext)
if f:
f.close()
@property
def dry_run(self):
return self._fileop.dry_run
@dry_run.setter
def dry_run(self, value):
self._fileop.dry_run = value
if os.name == 'nt':
# Executable launcher support.
# Launchers are from https://bitbucket.org/vinay.sajip/simple_launcher/
def _get_launcher(self, kind):
if struct.calcsize('P') == 8: # 64-bit
bits = '64'
else:
bits = '32'
name = '%s%s.exe' % (kind, bits)
# Issue 31: don't hardcode an absolute package name, but
# determine it relative to the current package
distlib_package = __name__.rsplit('.', 1)[0]
result = finder(distlib_package).find(name).bytes
return result
# Public API follows
def make(self, specification, options=None):
"""
Make a script.
:param specification: The specification, which is either a valid export
entry specification (to make a script from a
callable) or a filename (to make a script by
copying from a source location).
:param options: A dictionary of options controlling script generation.
:return: A list of all absolute pathnames written to.
"""
filenames = []
entry = get_export_entry(specification)
if entry is None:
self._copy_script(specification, filenames)
else:
self._make_script(entry, filenames, options=options)
return filenames
def make_multiple(self, specifications, options=None):
"""
Take a list of specifications and make scripts from them,
:param specifications: A list of specifications.
:return: A list of all absolute pathnames written to,
"""
filenames = []
for specification in specifications:
filenames.extend(self.make(specification, options))
return filenames
| |
import pytest
import os
import json
import pyecore.ecore as Ecore
from pyecore.resources import *
from pyecore.resources.json import JsonResource, JsonOptions, DefaultObjectMapper, NO_OBJECT
@pytest.fixture(scope='module')
def lib():
package = Ecore.EPackage('mypackage')
package.nsURI = 'http://simplemetamodel/1.0'
package.nsPrefix = 'simplemm'
AbsA = Ecore.EClass('AbsA', abstract=True)
A = Ecore.EClass('A', superclass=(AbsA,))
SubA = Ecore.EClass('SubA', superclass=(A,))
MyRoot = Ecore.EClass('MyRoot')
MyRoot.a_container = Ecore.EReference('a_container', eType=AbsA, upper=-1,
containment=True)
MyRoot.eStructuralFeatures.append(MyRoot.a_container)
MyRoot.eStructuralFeatures.append(Ecore.EAttribute('trans',
eType=Ecore.EString,
transient=True))
package.eClassifiers.extend([MyRoot, A, SubA, AbsA])
package.MyRoot = MyRoot
package.SubA = SubA
package.A = A
# we register the metamodel first
global_registry[package.nsURI] = package
return package
def test_json_resource_save_metamodel(tmpdir, lib):
f = tmpdir.mkdir('pyecore-tmp').join('test.json')
resource = JsonResource(URI(str(f)))
resource.append(lib)
resource.save()
# we read the model
resource = JsonResource(URI(str(f)))
resource.load()
root = resource.contents[0]
assert len(root.eContents) == len(lib.eContents)
assert isinstance(root, lib.eClass.python_class)
def test_json_resource_save_metamodel_uri(tmpdir, lib):
f = tmpdir.mkdir('pyecore-tmp').join('test.json')
resource = JsonResource(URI(str(f)), use_uuid=True)
resource.append(lib)
resource.save()
# we read the model
resource = JsonResource(URI(str(f)))
resource.load()
root = resource.contents[0]
assert len(root.eContents) == len(lib.eContents)
assert isinstance(root, lib.eClass.python_class)
def test_json_resource_createset(tmpdir, lib):
f = tmpdir.mkdir('pyecore-tmp').join('test.json')
resource = JsonResource(URI(str(f)))
# we create some instances
root = lib.MyRoot()
a1 = lib.A()
suba1 = lib.SubA()
root.a_container.extend([a1, suba1])
root.trans = 'transient_value'
# we add the elements to the resource
resource.append(root)
resource.save()
# we read the model
resource = JsonResource(URI(str(f)))
resource.load()
assert resource.contents != []
assert len(resource.contents[0].eContents) == 2
def test_json_resource_createSaveModifyRead(tmpdir, lib):
f = tmpdir.mkdir('pyecore-tmp').join('test.json')
resource = JsonResource(URI(str(f)))
# we create some instances
root = lib.MyRoot()
a1 = lib.A()
suba1 = lib.SubA()
root.a_container.extend([a1, suba1])
# we add the elements to the resource
resource.append(root)
resource.save()
# we add more instances
a2 = lib.A()
root.a_container.append(a2)
# we save again
resource.save()
# we read the model
resource = JsonResource(URI(str(f)))
resource.load()
assert resource.contents != []
assert len(resource.contents[0].eContents) == 3
# Defines a small metamodel
eClass = Ecore.EPackage('pack', nsURI='http://test_pack/1.0', nsPrefix='pack')
nsURI = 'http://tst/1.0'
@Ecore.EMetaclass
class A(object):
child = Ecore.EReference(containment=True, upper=-1)
imply = Ecore.EReference()
ref_by = Ecore.EReference()
distant = Ecore.EReference(eType=Ecore.EObject)
A.child.eType = A
A.imply.eType = A
A.ref_by.eType = A
@Ecore.EMetaclass
class Point(object):
x = Ecore.EAttribute(eType=Ecore.EDouble)
y = Ecore.EAttribute(eType=Ecore.EDouble)
z = Ecore.EAttribute(eType=Ecore.EDouble)
def test_json_resource_save_static_metamodel(tmpdir):
f = tmpdir.mkdir('pyecore-tmp').join('test.json')
resource = JsonResource(URI(str(f)))
# we add the elements to the resource
resource.append(eClass)
resource.save()
# we read the model
resource = JsonResource(URI(str(f)))
resource.load()
assert resource.contents != []
assert len(resource.contents[0].eContents) == 2
root = resource.contents[0]
assert root.eContents[0].name == 'A'
def test_json_option_serialize_default_values(tmpdir):
f = tmpdir.mkdir('pyecore-tmp').join('test.json')
resource = JsonResource(URI(str(f)))
p = Point()
p.x = 0.0
p.z = 0.0
resource.append(p)
resource.save(options={JsonOptions.SERIALIZE_DEFAULT_VALUES: True})
dct = json.load(open(str(f)))
assert dct['x'] == 0.0
assert dct['z'] == 0.0
assert 'y' not in dct
def test_json_save_multiple_roots(tmpdir):
A = Ecore.EClass('A')
A.eStructuralFeatures.append(Ecore.EAttribute('name', Ecore.EString))
pack = Ecore.EPackage('pack', 'packuri', 'pack')
pack.eClassifiers.append(A)
f = tmpdir.mkdir('pyecore-tmp').join('multiple.json')
resource = JsonResource(URI(str(f)))
resource.append(A(name='root1'))
resource.append(A(name='root2'))
resource.save()
dct = json.load(open(str(f)))
assert type(dct) is list
assert dct[0]['name'] == 'root1'
assert dct[1]['name'] == 'root2'
def test_json_save_multiple_roots_roundtrip(tmpdir):
A = Ecore.EClass('A')
A.eStructuralFeatures.append(Ecore.EAttribute('name', Ecore.EString))
pack = Ecore.EPackage('pack', 'packuri', 'pack')
pack.eClassifiers.append(A)
f = tmpdir.mkdir('pyecore-tmp').join('multiple.json')
resource = JsonResource(URI(str(f)))
resource.append(A(name='root1'))
resource.append(A(name='root2'))
resource.save()
global_registry[pack.nsURI] = pack
resource = JsonResource(URI(str(f)))
resource.load()
assert len(resource.contents) == 2
assert resource.contents[0].name == 'root1'
assert resource.contents[1].name == 'root2'
del global_registry[pack.nsURI]
def test_json_custom_mapper(tmpdir):
class MyMapper(object):
def to_dict_from_obj(self, obj, options, use_uuid, resource):
d = {
'name_custom': str(obj.name) + '_custom'
}
return d
class MyRootMapper(DefaultObjectMapper):
def to_dict_from_obj(self, obj, options, use_uuid, resource):
d = super().to_dict_from_obj(obj, options, use_uuid, resource)
d['name_custom'] = str(obj.name) + '_custom'
return d
@Ecore.EMetaclass
class A(object):
name = Ecore.EAttribute(eType=Ecore.EString)
def __init__(self, name):
self.name = name
@Ecore.EMetaclass
class B(A):
pass
@Ecore.EMetaclass
class Root(object):
name = Ecore.EAttribute(eType=Ecore.EString)
many_a = Ecore.EReference(eType=A, upper=-1, containment=True)
eclasses = Ecore.EReference(eType=Ecore.EClass, upper=-1, containment=True)
root = Root()
root.many_a.append(A('test1'))
root.many_a.append(B('test2'))
root.eclasses.append(Ecore.EClass('test3'))
f = tmpdir.mkdir('pyecore-tmp').join('custom.json')
resource = JsonResource(URI(str(f)))
resource.register_mapper(A, MyMapper())
resource.register_mapper(Ecore.EClass.eClass, MyMapper())
resource.register_mapper(Root.eClass, MyRootMapper())
resource.append(root)
resource.save()
dct = json.load(open(str(f)))
assert dct['many_a'][0]['name_custom'] == 'test1_custom'
assert dct['many_a'][1]['name_custom'] == 'test2_custom'
assert dct['eclasses'][0]['name_custom'] == 'test3_custom'
def test_json_custom_no_mapping(tmpdir):
class MyMapper(object):
def to_dict_from_obj(self, obj, options, use_uuid, resource):
return NO_OBJECT
@Ecore.EMetaclass
class A(object):
pass
@Ecore.EMetaclass
class B(A):
pass
@Ecore.EMetaclass
class Root(object):
many_a = Ecore.EReference(eType=A, upper=-1, containment=True)
root = Root()
root.many_a.append(A())
root.many_a.append(B())
f = tmpdir.mkdir('pyecore-tmp').join('nomapping.json')
resource = JsonResource(URI(str(f)))
resource.register_mapper(A, MyMapper())
resource.append(root)
resource.save()
dct = json.load(open(str(f)))
print(dct)
assert dct['many_a'] == []
| |
#!/usr/bin/env python
# Copyright (c) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import optparse
import re
import string
import sys
template_h = string.Template("""// Code generated from InspectorInstrumentation.idl
#ifndef ${file_name}_h
#define ${file_name}_h
${includes}
namespace blink {
${forward_declarations}
namespace InspectorInstrumentation {
$methods
} // namespace InspectorInstrumentation
} // namespace blink
#endif // !defined(${file_name}_h)
""")
template_inline = string.Template("""
inline void ${name}(${params_public})
{ ${fast_return}
if (${condition})
${name}Impl(${params_impl});
}
""")
template_inline_forward = string.Template("""
inline void ${name}(${params_public})
{ ${fast_return}
${name}Impl(${params_impl});
}
""")
template_inline_returns_value = string.Template("""
inline ${return_type} ${name}(${params_public})
{ ${fast_return}
if (${condition})
return ${name}Impl(${params_impl});
return ${default_return_value};
}
""")
template_cpp = string.Template("""// Code generated from InspectorInstrumentation.idl
#include "config.h"
${includes}
namespace blink {
${extra_definitions}
namespace InspectorInstrumentation {
$methods
} // namespace InspectorInstrumentation
} // namespace blink
""")
template_outofline = string.Template("""
${return_type} ${name}Impl(${params_impl})
{${impl_lines}
}""")
template_agent_call = string.Template("""
if (${agent_class}* agent = ${agent_fetch})
${maybe_return}agent->${name}(${params_agent});""")
template_agent_call_timeline_returns_cookie = string.Template("""
int timelineAgentId = 0;
if (InspectorTimelineAgent* agent = agents->inspectorTimelineAgent()) {
if (agent->${name}(${params_agent}))
timelineAgentId = agent->id();
}""")
template_instrumenting_agents_h = string.Template("""// Code generated from InspectorInstrumentation.idl
#ifndef InstrumentingAgentsInl_h
#define InstrumentingAgentsInl_h
#include "platform/heap/Handle.h"
#include "wtf/FastAllocBase.h"
#include "wtf/Noncopyable.h"
#include "wtf/PassRefPtr.h"
#include "wtf/RefCounted.h"
namespace blink {
${forward_list}
class InstrumentingAgents : public RefCountedWillBeGarbageCollectedFinalized<InstrumentingAgents> {
WTF_MAKE_NONCOPYABLE(InstrumentingAgents);
WTF_MAKE_FAST_ALLOCATED_WILL_BE_REMOVED;
public:
static PassRefPtrWillBeRawPtr<InstrumentingAgents> create()
{
return adoptRefWillBeNoop(new InstrumentingAgents());
}
~InstrumentingAgents() { }
void trace(Visitor*);
void reset();
${accessor_list}
private:
InstrumentingAgents();
${member_list}
};
}
#endif // !defined(InstrumentingAgentsInl_h)
""")
template_instrumenting_agent_accessor = string.Template("""
${class_name}* ${getter_name}() const { return ${member_name}; }
void set${class_name}(${class_name}* agent) { ${member_name} = agent; }""")
template_instrumenting_agents_cpp = string.Template("""
InstrumentingAgents::InstrumentingAgents()
: $init_list
{
}
void InstrumentingAgents::trace(Visitor* visitor)
{
$trace_list
}
void InstrumentingAgents::reset()
{
$reset_list
}""")
def match_and_consume(pattern, source):
match = re.match(pattern, source)
if match:
return match, source[len(match.group(0)):].strip()
return None, source
def load_model_from_idl(source):
source = re.sub("//.*", "", source) # Remove line comments
source = re.sub("/\*(.|\n)*?\*/", "", source, re.MULTILINE) # Remove block comments
source = re.sub("\]\s*?\n\s*", "] ", source) # Merge the method annotation with the next line
source = source.strip()
model = []
while len(source):
match, source = match_and_consume("interface\s(\w*)\s?\{([^\{]*)\}", source)
if not match:
sys.stderr.write("Cannot parse %s\n" % source[:100])
sys.exit(1)
model.append(File(match.group(1), match.group(2)))
return model
class File:
def __init__(self, name, source):
self.name = name
self.header_name = self.name + "Inl"
self.includes = [include_inspector_header("InspectorInstrumentation")]
self.forward_declarations = []
self.declarations = []
for line in map(str.strip, source.split("\n")):
line = re.sub("\s{2,}", " ", line).strip() # Collapse whitespace
if len(line) == 0:
continue
if line[0] == "#":
self.includes.append(line)
elif line.startswith("class "):
self.forward_declarations.append(line)
else:
self.declarations.append(Method(line))
self.includes.sort()
self.forward_declarations.sort()
def generate(self, cpp_lines, used_agents):
header_lines = []
for declaration in self.declarations:
for agent in set(declaration.agents):
used_agents.add(agent)
declaration.generate_header(header_lines)
declaration.generate_cpp(cpp_lines)
return template_h.substitute(None,
file_name=self.header_name,
includes="\n".join(self.includes),
forward_declarations="\n".join(self.forward_declarations),
methods="\n".join(header_lines))
class Method:
def __init__(self, source):
match = re.match("(\[[\w|,|=|\s]*\])?\s?(\w*\*?) (\w*)\((.*)\)\s?;", source)
if not match:
sys.stderr.write("Cannot parse %s\n" % source)
sys.exit(1)
self.options = []
if match.group(1):
options_str = re.sub("\s", "", match.group(1)[1:-1])
if len(options_str) != 0:
self.options = options_str.split(",")
self.return_type = match.group(2)
self.name = match.group(3)
# Splitting parameters by a comma, assuming that attribute lists contain no more than one attribute.
self.params = map(Parameter, map(str.strip, match.group(4).split(",")))
self.accepts_cookie = len(self.params) and self.params[0].type == "const InspectorInstrumentationCookie&"
self.returns_cookie = self.return_type == "InspectorInstrumentationCookie"
self.returns_value = self.return_type != "void"
if self.return_type == "bool":
self.default_return_value = "false"
elif self.return_type == "int":
self.default_return_value = "0"
elif self.return_type == "String":
self.default_return_value = "\"\""
else:
self.default_return_value = self.return_type + "()"
for param in self.params:
if "DefaultReturn" in param.options:
self.default_return_value = param.name
self.params_impl = self.params
if not self.accepts_cookie and not "Inline=Forward" in self.options:
if not "Keep" in self.params_impl[0].options:
self.params_impl = self.params_impl[1:]
self.params_impl = [Parameter("InstrumentingAgents* agents")] + self.params_impl
self.agents = filter(lambda option: not "=" in option, self.options)
def generate_header(self, header_lines):
if "Inline=Custom" in self.options:
return
header_lines.append("%s %sImpl(%s);" % (
self.return_type, self.name, ", ".join(map(Parameter.to_str_class, self.params_impl))))
if "Inline=FastReturn" in self.options or "Inline=Forward" in self.options:
fast_return = "\n FAST_RETURN_IF_NO_FRONTENDS(%s);" % self.default_return_value
else:
fast_return = ""
for param in self.params:
if "FastReturn" in param.options:
fast_return += "\n if (!%s)\n return %s;" % (param.name, self.default_return_value)
if self.accepts_cookie:
condition = "%s.isValid()" % self.params_impl[0].name
template = template_inline
elif "Inline=Forward" in self.options:
condition = ""
template = template_inline_forward
else:
condition = "InstrumentingAgents* agents = instrumentingAgentsFor(%s)" % self.params[0].name
if self.returns_value:
template = template_inline_returns_value
else:
template = template_inline
header_lines.append(template.substitute(
None,
name=self.name,
fast_return=fast_return,
return_type=self.return_type,
default_return_value=self.default_return_value,
params_public=", ".join(map(Parameter.to_str_full, self.params)),
params_impl=", ".join(map(Parameter.to_str_name, self.params_impl)),
condition=condition))
def generate_cpp(self, cpp_lines):
if len(self.agents) == 0:
return
body_lines = map(self.generate_ref_ptr, self.params)
body_lines += map(self.generate_agent_call, self.agents)
if self.returns_cookie:
if "Timeline" in self.agents:
timeline_agent_id = "timelineAgentId"
else:
timeline_agent_id = "0"
body_lines.append("\n return InspectorInstrumentationCookie(agents, %s);" % timeline_agent_id)
elif self.returns_value:
body_lines.append("\n return %s;" % self.default_return_value)
cpp_lines.append(template_outofline.substitute(
None,
return_type=self.return_type,
name=self.name,
params_impl=", ".join(map(Parameter.to_str_class_and_name, self.params_impl)),
impl_lines="".join(body_lines)))
def generate_agent_call(self, agent):
agent_class, agent_getter = agent_getter_signature(agent)
leading_param_name = self.params_impl[0].name
if not self.accepts_cookie:
agent_fetch = "%s->%s()" % (leading_param_name, agent_getter)
elif agent == "Timeline":
agent_fetch = "retrieveTimelineAgent(%s)" % leading_param_name
else:
agent_fetch = "%s.instrumentingAgents()->%s()" % (leading_param_name, agent_getter)
if agent == "Timeline" and self.returns_cookie:
template = template_agent_call_timeline_returns_cookie
else:
template = template_agent_call
if not self.returns_value or self.returns_cookie:
maybe_return = ""
else:
maybe_return = "return "
return template.substitute(
None,
name=self.name,
agent_class=agent_class,
agent_fetch=agent_fetch,
maybe_return=maybe_return,
params_agent=", ".join(map(Parameter.to_str_value, self.params_impl)[1:]))
def generate_ref_ptr(self, param):
if param.is_prp:
return "\n RefPtr<%s> %s = %s;" % (param.inner_type, param.value, param.name)
else:
return ""
class Parameter:
def __init__(self, source):
self.options = []
match, source = match_and_consume("\[(\w*)\]", source)
if match:
self.options.append(match.group(1))
parts = map(str.strip, source.split("="))
if len(parts) == 1:
self.default_value = None
else:
self.default_value = parts[1]
param_decl = parts[0]
if re.match("(const|unsigned long) ", param_decl):
min_type_tokens = 2
else:
min_type_tokens = 1
if len(param_decl.split(" ")) > min_type_tokens:
parts = param_decl.split(" ")
self.type = " ".join(parts[:-1])
self.name = parts[-1]
else:
self.type = param_decl
self.name = generate_param_name(self.type)
if re.match("PassRefPtr<", param_decl):
self.is_prp = True
self.value = self.name
self.name = "prpP" + self.name[1:]
self.inner_type = re.match("PassRefPtr<(.+)>", param_decl).group(1)
else:
self.is_prp = False
self.value = self.name
def to_str_full(self):
if self.default_value is None:
return self.to_str_class_and_name()
return "%s %s = %s" % (self.type, self.name, self.default_value)
def to_str_class_and_name(self):
return "%s %s" % (self.type, self.name)
def to_str_class(self):
return self.type
def to_str_name(self):
return self.name
def to_str_value(self):
return self.value
def generate_param_name(param_type):
base_name = re.match("(const |PassRefPtr<)?(\w*)", param_type).group(2)
return "param" + base_name
def agent_class_name(agent):
custom_agent_names = ["PageDebugger", "PageRuntime", "WorkerRuntime"]
if agent in custom_agent_names:
return "%sAgent" % agent
return "Inspector%sAgent" % agent
def agent_getter_signature(agent):
agent_class = agent_class_name(agent)
return agent_class, agent_class[0].lower() + agent_class[1:]
def include_header(name):
return "#include \"%s.h\"" % name
def include_inspector_header(name):
return include_header("core/inspector/" + name)
def generate_instrumenting_agents(used_agents):
agents = list(used_agents)
forward_list = []
accessor_list = []
member_list = []
init_list = []
trace_list = []
reset_list = []
for agent in agents:
class_name, getter_name = agent_getter_signature(agent)
member_name = "m_" + getter_name
forward_list.append("class %s;" % class_name)
accessor_list.append(template_instrumenting_agent_accessor.substitute(
None,
class_name=class_name,
getter_name=getter_name,
member_name=member_name))
member_list.append(" RawPtrWillBeMember<%s> %s;" % (class_name, member_name))
init_list.append("%s(nullptr)" % member_name)
trace_list.append("visitor->trace(%s);" % member_name)
reset_list.append("%s = nullptr;" % member_name)
forward_list.sort()
accessor_list.sort()
member_list.sort()
init_list.sort()
trace_list.sort()
reset_list.sort()
header_lines = template_instrumenting_agents_h.substitute(
None,
forward_list="\n".join(forward_list),
accessor_list="\n".join(accessor_list),
member_list="\n".join(member_list))
cpp_lines = template_instrumenting_agents_cpp.substitute(
None,
init_list="\n , ".join(init_list),
trace_list="\n ".join(trace_list),
reset_list="\n ".join(reset_list))
return header_lines, cpp_lines
def generate(input_path, output_dir):
fin = open(input_path, "r")
files = load_model_from_idl(fin.read())
fin.close()
cpp_includes = []
cpp_lines = []
used_agents = set()
for f in files:
cpp_includes.append(include_header(f.header_name))
fout = open(output_dir + "/" + f.header_name + ".h", "w")
fout.write(f.generate(cpp_lines, used_agents))
fout.close()
for agent in used_agents:
cpp_includes.append(include_inspector_header(agent_class_name(agent)))
cpp_includes.append(include_header("InstrumentingAgentsInl"))
cpp_includes.sort()
instrumenting_agents_header, instrumenting_agents_cpp = generate_instrumenting_agents(used_agents)
fout = open(output_dir + "/" + "InstrumentingAgentsInl.h", "w")
fout.write(instrumenting_agents_header)
fout.close()
fout = open(output_dir + "/InspectorInstrumentationImpl.cpp", "w")
fout.write(template_cpp.substitute(None,
includes="\n".join(cpp_includes),
extra_definitions=instrumenting_agents_cpp,
methods="\n".join(cpp_lines)))
fout.close()
cmdline_parser = optparse.OptionParser()
cmdline_parser.add_option("--output_dir")
try:
arg_options, arg_values = cmdline_parser.parse_args()
if (len(arg_values) != 1):
raise Exception("Exactly one plain argument expected (found %s)" % len(arg_values))
input_path = arg_values[0]
output_dirpath = arg_options.output_dir
if not output_dirpath:
raise Exception("Output directory must be specified")
except Exception:
# Work with python 2 and 3 http://docs.python.org/py3k/howto/pyporting.html
exc = sys.exc_info()[1]
sys.stderr.write("Failed to parse command-line arguments: %s\n\n" % exc)
sys.stderr.write("Usage: <script> --output_dir <output_dir> InspectorInstrumentation.idl\n")
exit(1)
generate(input_path, output_dirpath)
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Test Network
------------
This module tests the basic functionality of a VLAN network. Each test "runs"
on a VLAN with two nodes, node_1 and node_2, and each has a state machine.
"""
import unittest
from bacpypes.debugging import bacpypes_debugging, ModuleLogger
from bacpypes.pdu import Address, LocalBroadcast, PDU
from bacpypes.comm import bind
from bacpypes.vlan import Network, Node
from ..state_machine import ClientStateMachine, StateMachineGroup
from ..time_machine import reset_time_machine, run_time_machine
# some debugging
_debug = 0
_log = ModuleLogger(globals())
@bacpypes_debugging
class TNetwork(StateMachineGroup):
def __init__(self, node_count):
if _debug: TNetwork._debug("__init__ %r", node_count)
StateMachineGroup.__init__(self)
self.vlan = Network(broadcast_address=0)
for i in range(node_count):
node = Node(i + 1, self.vlan)
# bind a client state machine to the node
csm = ClientStateMachine()
bind(csm, node)
# add it to this group
self.append(csm)
def run(self, time_limit=60.0):
if _debug: TNetwork._debug("run %r", time_limit)
# reset the time machine
reset_time_machine()
if _debug: TNetwork._debug(" - time machine reset")
# run the group
super(TNetwork, self).run()
# run it for some time
run_time_machine(time_limit)
if _debug: TNetwork._debug(" - time machine finished")
# check for success
all_success, some_failed = super(TNetwork, self).check_for_success()
assert all_success
@bacpypes_debugging
class TestVLAN(unittest.TestCase):
def __init__(self, *args, **kwargs):
if _debug: TestVLAN._debug("__init__ %r %r", args, kwargs)
super(TestVLAN, self).__init__(*args, **kwargs)
def test_idle(self):
"""Test that a very quiet network can exist. This is not a network
test so much as a state machine group test.
"""
if _debug: TestVLAN._debug("test_idle")
# two element network
tnet = TNetwork(2)
tnode1, tnode2 = tnet.state_machines
# set the start states of both machines to success
tnode1.start_state.success()
tnode2.start_state.success()
# run the group
tnet.run()
def test_send_receive(self):
"""Test that a node can send a message to another node.
"""
if _debug: TestVLAN._debug("test_send_receive")
# two element network
tnet = TNetwork(2)
tnode1, tnode2 = tnet.state_machines
# make a PDU from node 1 to node 2
pdu = PDU(b'data', source=1, destination=2)
if _debug: TestVLAN._debug(" - pdu: %r", pdu)
# node 1 sends the pdu, mode 2 gets it
tnode1.start_state.send(pdu).success()
tnode2.start_state.receive(PDU, pduSource=1).success()
# run the group
tnet.run()
def test_broadcast(self):
"""Test that a node can send out a 'local broadcast' message which will
be received by every other node.
"""
if _debug: TestVLAN._debug("test_broadcast")
# three element network
tnet = TNetwork(3)
tnode1, tnode2, tnode3 = tnet.state_machines
# make a broadcast PDU
pdu = PDU(b'data', source=1, destination=0)
if _debug: TestVLAN._debug(" - pdu: %r", pdu)
# node 1 sends the pdu, node 2 and 3 each get it
tnode1.start_state.send(pdu).success()
tnode2.start_state.receive(PDU, pduSource=1).success()
tnode3.start_state.receive(PDU, pduSource=1).success()
# run the group
tnet.run()
def test_spoof_fail(self):
"""Test verifying that a node cannot send out packets with a source
address other than its own, see also test_spoof_pass().
"""
if _debug: TestVLAN._debug("test_spoof_fail")
# two element network
tnet = TNetwork(1)
tnode1, = tnet.state_machines
# make a unicast PDU with the wrong source
pdu = PDU(b'data', source=2, destination=3)
# the node sends the pdu and would be a success but...
tnode1.start_state.send(pdu).success()
# when the node attempts to send it raises an error
with self.assertRaises(RuntimeError):
tnet.run()
def test_spoof_pass(self):
"""Test allowing a node to send out packets with a source address
other than its own, see also test_spoof_fail().
"""
if _debug: TestVLAN._debug("test_spoof_pass")
# one node network
tnet = TNetwork(1)
tnode1, = tnet.state_machines
# reach into the network and enable spoofing for the node
tnet.vlan.nodes[0].spoofing = True
# make a unicast PDU from a fictitious node
pdu = PDU(b'data', source=3, destination=1)
# node 1 sends the pdu, but gets it back as if it was from node 3
tnode1.start_state.send(pdu).receive(PDU, pduSource=3).success()
# run the group
tnet.run()
def test_promiscuous_pass(self):
"""Test 'promiscuous mode' of a node which allows it to receive every
packet sent on the network. This is like the network is a hub, or
the node is connected to a 'monitor' port on a managed switch.
"""
if _debug: TestVLAN._debug("test_promiscuous_pass")
# three element network
tnet = TNetwork(3)
tnode1, tnode2, tnode3 = tnet.state_machines
# reach into the network and enable promiscuous mode
tnet.vlan.nodes[2].promiscuous = True
# make a PDU from node 1 to node 2
pdu = PDU(b'data', source=1, destination=2)
# node 1 sends the pdu to node 2, node 3 also gets a copy
tnode1.start_state.send(pdu).success()
tnode2.start_state.receive(PDU, pduSource=1).success()
tnode3.start_state.receive(PDU, pduDestination=2).success()
# run the group
tnet.run()
def test_promiscuous_fail(self):
if _debug: TestVLAN._debug("test_promiscuous_fail")
# three element network
tnet = TNetwork(3)
tnode1, tnode2, tnode3 = tnet.state_machines
# make a PDU from node 1 to node 2
pdu = PDU(b'data', source=1, destination=2)
# node 1 sends the pdu to node 2, node 3 waits and gets nothing
tnode1.start_state.send(pdu).success()
tnode2.start_state.receive(PDU, pduSource=1).success()
# if node 3 receives anything it will trigger unexpected receive and fail
tnode3.start_state.timeout(0.5).success()
# run the group
tnet.run()
@bacpypes_debugging
class TestVLANEvents(unittest.TestCase):
def __init__(self, *args, **kwargs):
if _debug: TestVLANEvents._debug("__init__ %r %r", args, kwargs)
super(TestVLANEvents, self).__init__(*args, **kwargs)
def test_send_receive(self):
"""Test that a node can send a message to another node and use
events to continue with the messages.
"""
if _debug: TestVLAN._debug("test_send_receive")
# two element network
tnet = TNetwork(2)
tnode1, tnode2 = tnet.state_machines
# make a PDU from node 1 to node 2
dead_pdu = PDU(b'dead', source=1, destination=2)
if _debug: TestVLAN._debug(" - dead_pdu: %r", dead_pdu)
# make a PDU from node 1 to node 2
beef_pdu = PDU(b'beef', source=1, destination=2)
if _debug: TestVLAN._debug(" - beef_pdu: %r", beef_pdu)
# node 1 sends dead_pdu, waits for event, sends beef_pdu
tnode1.start_state \
.send(dead_pdu).wait_event('e') \
.send(beef_pdu).success()
# node 2 receives dead_pdu, sets event, waits for beef_pdu
tnode2.start_state \
.receive(PDU, pduData=b'dead').set_event('e') \
.receive(PDU, pduData=b'beef').success()
# run the group
tnet.run()
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import json
import os
import random as _random
import sys
import traceback
from getopt import getopt, GetoptError
from multiprocessing import Process
from os import environ
from wsgiref.simple_server import make_server
import requests as _requests
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError, \
JSONRPCError, InvalidRequestError
from jsonrpcbase import ServerError as JSONServerError
from biokbase import log
from kb_hmmer.authclient import KBaseAuth as _KBaseAuth
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
AUTH = 'auth-service-url'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'kb_hmmer'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from kb_hmmer.kb_hmmerImpl import kb_hmmer # noqa @IgnorePep8
impl_kb_hmmer = kb_hmmer(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = JSONServerError()
newerr.trace = traceback.format_exc()
if len(e.args) == 1:
newerr.data = repr(e.args[0])
else:
newerr.data = repr(e.args)
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if 'types' in self.method_data[request['method']]:
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def provenance(self):
callbackURL = os.environ.get('SDK_CALLBACK_URL')
if callbackURL:
# OK, there's a callback server from which we can get provenance
arg_hash = {'method': 'CallbackServer.get_provenance',
'params': [],
'version': '1.1',
'id': str(_random.random())[2:]
}
body = json.dumps(arg_hash)
response = _requests.post(callbackURL, data=body,
timeout=60)
response.encoding = 'utf-8'
if response.status_code == 500:
if ('content-type' in response.headers and
response.headers['content-type'] ==
'application/json'):
err = response.json()
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, response.text)
else:
raise ServerError('Unknown', 0, response.text)
if not response.ok:
response.raise_for_status()
resp = response.json()
if 'result' not in resp:
raise ServerError('Unknown', 0,
'An unknown server error occurred')
return resp['result'][0]
else:
return self.get('provenance')
class ServerError(Exception):
'''
The call returned an error. Fields:
name - the name of the error.
code - the error code.
message - a human readable error message.
data - the server side stacktrace.
'''
def __init__(self, name, code, message, data=None, error=None):
super(Exception, self).__init__(message)
self.name = name
self.code = code
self.message = message if message else ''
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'kb_hmmer'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_kb_hmmer.HMMER_MSA_Search,
name='kb_hmmer.HMMER_MSA_Search',
types=[dict])
self.method_authentication['kb_hmmer.HMMER_MSA_Search'] = 'required' # noqa
self.rpc_service.add(impl_kb_hmmer.HMMER_Local_MSA_Group_Search,
name='kb_hmmer.HMMER_Local_MSA_Group_Search',
types=[dict])
self.method_authentication['kb_hmmer.HMMER_Local_MSA_Group_Search'] = 'required' # noqa
self.rpc_service.add(impl_kb_hmmer.HMMER_dbCAN_Search,
name='kb_hmmer.HMMER_dbCAN_Search',
types=[dict])
self.method_authentication['kb_hmmer.HMMER_dbCAN_Search'] = 'required' # noqa
self.rpc_service.add(impl_kb_hmmer.HMMER_EnvBioelement_Search,
name='kb_hmmer.HMMER_EnvBioelement_Search',
types=[dict])
self.method_authentication['kb_hmmer.HMMER_EnvBioelement_Search'] = 'required' # noqa
self.rpc_service.add(impl_kb_hmmer.HMMER_PhyloMarkers_Search,
name='kb_hmmer.HMMER_PhyloMarkers_Search',
types=[dict])
self.method_authentication['kb_hmmer.HMMER_PhyloMarkers_Search'] = 'required' # noqa
self.rpc_service.add(impl_kb_hmmer.status,
name='kb_hmmer.status',
types=[dict])
authurl = config.get(AUTH) if config else None
self.auth_client = _KBaseAuth(authurl)
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {
'call_stack': [{'time': self.now_in_utc(),
'method': req['method']}
]
}
prov_action = {'service': ctx['module'],
'method': ctx['method'],
'method_params': req['params']
}
ctx['provenance'] = [prov_action]
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
auth_req = self.method_authentication.get(
method_name, 'none')
if auth_req != 'none':
if token is None and auth_req == 'required':
err = JSONServerError()
err.data = (
'Authentication required for ' +
'kb_hmmer ' +
'but no authentication header was passed')
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user = self.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception as e:
if auth_req == 'required':
err = JSONServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print('Request method was %s\n' % environ['REQUEST_METHOD'])
# print('Environment dictionary is:\n%s\n' % pprint.pformat(environ))
# print('Request body was: %s' % request_body)
# print('Result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result))
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body.encode('utf8')]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
e = error['error'].get('error')
if not e:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# noqa Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone @IgnorePep8
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh, mm = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60,
60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print("Monkeypatching std libraries for async")
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {'': application}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print("Listening on port %s" % port)
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if token:
user = application.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
if (len(sys.argv) >= 3 and len(sys.argv) <= 4 and
os.path.isfile(sys.argv[1])):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print(str(err)) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print("Host set to %s" % host)
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print("Listening on port %s" % port)
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
| |
"""Handle Manager."""
import json
import logging
import math
import numbers
import os
from queue import Queue
from threading import Event
import time
from typing import (
Any,
Callable,
cast,
Dict,
Iterable,
List,
Optional,
Sequence,
Tuple,
TYPE_CHECKING,
)
from wandb.proto.wandb_internal_pb2 import (
HistoryRecord,
MetricRecord,
Record,
Result,
SampledHistoryItem,
SummaryItem,
SummaryRecord,
)
from . import meta, sample, stats, tb_watcher
from .settings_static import SettingsStatic
from ..interface.interface_queue import InterfaceQueue
from ..lib import handler_util, proto_util, tracelog
if TYPE_CHECKING:
from wandb.proto.wandb_internal_pb2 import (
ArtifactDoneRequest,
MetricSummary,
)
SummaryDict = Dict[str, Any]
logger = logging.getLogger(__name__)
def _dict_nested_set(target: Dict[str, Any], key_list: Sequence[str], v: Any) -> None:
# recurse down the dictionary structure:
for k in key_list[:-1]:
target.setdefault(k, {})
new_target = target.get(k)
if TYPE_CHECKING:
new_target = cast(Dict[str, Any], new_target)
target = new_target
# use the last element of the key to write the leaf:
target[key_list[-1]] = v
class HandleManager(object):
_consolidated_summary: SummaryDict
_sampled_history: Dict[str, sample.UniformSampleAccumulator]
_partial_history: Dict[str, Any]
_settings: SettingsStatic
_record_q: "Queue[Record]"
_result_q: "Queue[Result]"
_stopped: Event
_sender_q: "Queue[Record]"
_writer_q: "Queue[Record]"
_interface: InterfaceQueue
_system_stats: Optional[stats.SystemStats]
_tb_watcher: Optional[tb_watcher.TBWatcher]
_metric_defines: Dict[str, MetricRecord]
_metric_globs: Dict[str, MetricRecord]
_metric_track: Dict[Tuple[str, ...], float]
_metric_copy: Dict[Tuple[str, ...], Any]
_track_time: Optional[float]
_accumulate_time: float
_artifact_xid_done: Dict[str, "ArtifactDoneRequest"]
def __init__(
self,
settings: SettingsStatic,
record_q: "Queue[Record]",
result_q: "Queue[Result]",
stopped: Event,
sender_q: "Queue[Record]",
writer_q: "Queue[Record]",
interface: InterfaceQueue,
) -> None:
self._settings = settings
self._record_q = record_q
self._result_q = result_q
self._stopped = stopped
self._sender_q = sender_q
self._writer_q = writer_q
self._interface = interface
self._tb_watcher = None
self._system_stats = None
self._step = 0
self._track_time = None
self._accumulate_time = 0
self._run_start_time = 0
# keep track of summary from key/val updates
self._consolidated_summary = dict()
self._sampled_history = dict()
self._partial_history = dict()
self._metric_defines = dict()
self._metric_globs = dict()
self._metric_track = dict()
self._metric_copy = dict()
# TODO: implement release protocol to clean this up
self._artifact_xid_done = dict()
def __len__(self) -> int:
return self._record_q.qsize()
def handle(self, record: Record) -> None:
record_type = record.WhichOneof("record_type")
assert record_type
handler_str = "handle_" + record_type
handler: Callable[[Record], None] = getattr(self, handler_str, None)
assert handler, "unknown handle: {}".format(handler_str)
handler(record)
def handle_request(self, record: Record) -> None:
request_type = record.request.WhichOneof("request_type")
assert request_type
handler_str = "handle_request_" + request_type
handler: Callable[[Record], None] = getattr(self, handler_str, None)
if request_type != "network_status":
logger.debug("handle_request: {}".format(request_type))
assert handler, "unknown handle: {}".format(handler_str)
handler(record)
def _dispatch_record(self, record: Record, always_send: bool = False) -> None:
if not self._settings._offline or always_send:
tracelog.log_message_queue(record, self._sender_q)
self._sender_q.put(record)
if not record.control.local and self._writer_q:
tracelog.log_message_queue(record, self._writer_q)
self._writer_q.put(record)
def _respond_result(self, result: Result) -> None:
tracelog.log_message_queue(result, self._result_q)
self._result_q.put(result)
def debounce(self) -> None:
pass
def handle_request_defer(self, record: Record) -> None:
defer = record.request.defer
state = defer.state
logger.info("handle defer: {}".format(state))
# only handle flush tb (sender handles the rest)
if state == defer.FLUSH_STATS:
if self._system_stats:
# TODO(jhr): this could block so we dont really want to call shutdown
# from handler thread
self._system_stats.shutdown()
elif state == defer.FLUSH_TB:
if self._tb_watcher:
# shutdown tensorboard workers so we get all metrics flushed
self._tb_watcher.finish()
self._tb_watcher = None
elif state == defer.FLUSH_PARTIAL_HISTORY:
self._flush_partial_history()
elif state == defer.FLUSH_SUM:
self._save_summary(self._consolidated_summary, flush=True)
# defer is used to drive the sender finish state machine
self._dispatch_record(record, always_send=True)
def handle_request_login(self, record: Record) -> None:
self._dispatch_record(record)
def handle_run(self, record: Record) -> None:
self._dispatch_record(record)
def handle_stats(self, record: Record) -> None:
self._dispatch_record(record)
def handle_config(self, record: Record) -> None:
self._dispatch_record(record)
def handle_output(self, record: Record) -> None:
self._dispatch_record(record)
def handle_files(self, record: Record) -> None:
self._dispatch_record(record)
def handle_artifact(self, record: Record) -> None:
self._dispatch_record(record)
def handle_alert(self, record: Record) -> None:
self._dispatch_record(record)
def _save_summary(self, summary_dict: SummaryDict, flush: bool = False) -> None:
summary = SummaryRecord()
for k, v in summary_dict.items():
update = summary.update.add()
update.key = k
update.value_json = json.dumps(v)
record = Record(summary=summary)
if flush:
self._dispatch_record(record)
elif not self._settings._offline:
tracelog.log_message_queue(record, self._sender_q)
self._sender_q.put(record)
def _save_history(self, history: HistoryRecord,) -> None:
for item in history.item:
# TODO(jhr) save nested keys?
k = item.key
v = json.loads(item.value_json)
if isinstance(v, numbers.Real):
self._sampled_history.setdefault(k, sample.UniformSampleAccumulator())
self._sampled_history[k].add(v)
def _update_summary_metrics(
self,
s: "MetricSummary",
kl: List[str],
v: "numbers.Real",
float_v: float,
goal_max: Optional[bool],
) -> bool:
updated = False
best_key: Optional[Tuple[str, ...]] = None
if s.none:
return False
if s.copy:
# non key list copy already done in _update_summary
if len(kl) > 1:
_dict_nested_set(self._consolidated_summary, kl, v)
return True
if s.last:
last_key = tuple(kl + ["last"])
old_last = self._metric_track.get(last_key)
if old_last is None or float_v != old_last:
self._metric_track[last_key] = float_v
_dict_nested_set(self._consolidated_summary, last_key, v)
updated = True
if s.best:
best_key = tuple(kl + ["best"])
if s.max or best_key and goal_max:
max_key = tuple(kl + ["max"])
old_max = self._metric_track.get(max_key)
if old_max is None or float_v > old_max:
self._metric_track[max_key] = float_v
if s.max:
_dict_nested_set(self._consolidated_summary, max_key, v)
updated = True
if best_key:
_dict_nested_set(self._consolidated_summary, best_key, v)
updated = True
# defaulting to minimize if goal is not supecified
if s.min or best_key and not goal_max:
min_key = tuple(kl + ["min"])
old_min = self._metric_track.get(min_key)
if old_min is None or float_v < old_min:
self._metric_track[min_key] = float_v
if s.min:
_dict_nested_set(self._consolidated_summary, min_key, v)
updated = True
if best_key:
_dict_nested_set(self._consolidated_summary, best_key, v)
updated = True
if s.mean:
tot_key = tuple(kl + ["tot"])
num_key = tuple(kl + ["num"])
avg_key = tuple(kl + ["mean"])
tot = self._metric_track.get(tot_key, 0.0)
num = self._metric_track.get(num_key, 0)
tot += float_v
num += 1
self._metric_track[tot_key] = tot
self._metric_track[num_key] = num
_dict_nested_set(self._consolidated_summary, avg_key, tot / num)
updated = True
return updated
def _update_summary_leaf(
self, kl: List[str], v: Any, d: Optional[MetricRecord] = None,
) -> bool:
has_summary = d and d.HasField("summary")
if len(kl) == 1:
copy_key = tuple(kl)
old_copy = self._metric_copy.get(copy_key)
if old_copy is None or v != old_copy:
self._metric_copy[copy_key] = v
# Store copy metric if not specified, or copy behavior
if not has_summary or (d and d.summary.copy):
self._consolidated_summary[kl[0]] = v
return True
if not d:
return False
if not has_summary:
return False
if not isinstance(v, numbers.Real):
return False
if math.isnan(v):
return False
float_v = float(v)
goal_max = None
if d.goal:
goal_max = d.goal == d.GOAL_MAXIMIZE
if self._update_summary_metrics(
d.summary, kl=kl, v=v, float_v=float_v, goal_max=goal_max
):
return True
return False
def _update_summary_list(
self, kl: List[str], v: Any, d: Optional[MetricRecord] = None,
) -> bool:
metric_key = ".".join([k.replace(".", "\\.") for k in kl])
d = self._metric_defines.get(metric_key, d)
# if the dict has _type key, its a wandb table object
if isinstance(v, dict) and not handler_util.metric_is_wandb_dict(v):
updated = False
for nk, nv in v.items():
if self._update_summary_list(kl=kl[:] + [nk], v=nv, d=d):
updated = True
return updated
# If the dict is a media object, update the pointer to the latest alias
elif isinstance(v, dict) and handler_util.metric_is_wandb_dict(v):
if "_latest_artifact_path" in v and "artifact_path" in v:
# TODO: Make non-destructive?
v["artifact_path"] = v["_latest_artifact_path"]
updated = self._update_summary_leaf(kl=kl, v=v, d=d)
return updated
def _update_summary_media_objects(self, v: Dict[str, Any]) -> Dict[str, Any]:
# For now, non recursive - just top level
for nk, nv in v.items():
if (
isinstance(nv, dict)
and handler_util.metric_is_wandb_dict(nv)
and "_latest_artifact_path" in nv
and "artifact_path" in nv
):
# TODO: Make non-destructive?
nv["artifact_path"] = nv["_latest_artifact_path"]
v[nk] = nv
return v
def _update_summary(self, history_dict: Dict[str, Any]) -> bool:
# keep old behavior fast path if no define metrics have been used
if not self._metric_defines:
history_dict = self._update_summary_media_objects(history_dict)
self._consolidated_summary.update(history_dict)
return True
updated = False
for k, v in history_dict.items():
if self._update_summary_list(kl=[k], v=v):
updated = True
return updated
def _history_assign_step(
self, history: HistoryRecord, history_dict: Dict[str, Any],
) -> None:
has_step = history.HasField("step")
item = history.item.add()
item.key = "_step"
if has_step:
step = history.step.num
history_dict["_step"] = step
item.value_json = json.dumps(step)
self._step = step + 1
else:
history_dict["_step"] = self._step
item.value_json = json.dumps(self._step)
self._step += 1
def _history_define_metric(self, hkey: str) -> Optional[MetricRecord]:
"""check for hkey match in glob metrics, return defined metric."""
# Dont define metric for internal metrics
if hkey.startswith("_"):
return None
for k, mglob in self._metric_globs.items():
if k.endswith("*"):
if hkey.startswith(k[:-1]):
m = MetricRecord()
m.CopyFrom(mglob)
m.ClearField("glob_name")
m.options.defined = False
m.name = hkey
return m
return None
def _history_update_leaf(
self,
kl: List[str],
v: Any,
history_dict: Dict[str, Any],
update_history: Dict[str, Any],
) -> None:
hkey = ".".join([k.replace(".", "\\.") for k in kl])
m = self._metric_defines.get(hkey)
if not m:
m = self._history_define_metric(hkey)
if not m:
return
mr = Record()
mr.metric.CopyFrom(m)
mr.control.local = True # Dont store this, just send it
self._handle_defined_metric(mr)
if m.options.step_sync and m.step_metric:
if m.step_metric not in history_dict:
copy_key = tuple([m.step_metric])
step = self._metric_copy.get(copy_key)
if step is not None:
update_history[m.step_metric] = step
def _history_update_list(
self,
kl: List[str],
v: Any,
history_dict: Dict[str, Any],
update_history: Dict[str, Any],
) -> None:
if isinstance(v, dict):
for nk, nv in v.items():
self._history_update_list(
kl=kl[:] + [nk],
v=nv,
history_dict=history_dict,
update_history=update_history,
)
return
self._history_update_leaf(
kl=kl, v=v, history_dict=history_dict, update_history=update_history
)
def _history_update(
self, history: HistoryRecord, history_dict: Dict[str, Any],
) -> None:
# if syncing an old run, we can skip this logic
if history_dict.get("_step") is None:
self._history_assign_step(history, history_dict)
update_history: Dict[str, Any] = {}
# Look for metric matches
if self._metric_defines or self._metric_globs:
for hkey, hval in history_dict.items():
self._history_update_list([hkey], hval, history_dict, update_history)
if update_history:
history_dict.update(update_history)
for k, v in update_history.items():
item = history.item.add()
item.key = k
item.value_json = json.dumps(v)
def handle_history(self, record: Record) -> None:
history_dict = proto_util.dict_from_proto_list(record.history.item)
# Inject _runtime if it is not present
if history_dict is not None:
if "_runtime" not in history_dict:
self._history_assign_runtime(record.history, history_dict)
self._history_update(record.history, history_dict)
self._dispatch_record(record)
self._save_history(record.history)
updated = self._update_summary(history_dict)
if updated:
self._save_summary(self._consolidated_summary)
def _flush_partial_history(self, step: Optional[int] = None,) -> None:
if self._partial_history:
history = HistoryRecord()
for k, v in self._partial_history.items():
item = history.item.add()
item.key = k
item.value_json = json.dumps(v)
if step is not None:
history.step.num = step
self.handle_history(Record(history=history))
self._partial_history = {}
def handle_request_partial_history(self, record: Record) -> None:
partial_history = record.request.partial_history
flush = None
if partial_history.HasField("action"):
flush = partial_history.action.flush
step = None
if partial_history.HasField("step"):
step = partial_history.step.num
history_dict = proto_util.dict_from_proto_list(partial_history.item)
if step is not None:
if step < self._step:
logger.warning(
f"Step {step} < {self._step}. Dropping entry: {history_dict}."
)
return
elif step > self._step:
self._flush_partial_history()
self._step = step
elif flush is None:
flush = True
self._partial_history.update(history_dict)
if flush:
self._flush_partial_history(self._step)
def handle_summary(self, record: Record) -> None:
summary = record.summary
for item in summary.update:
if len(item.nested_key) > 0:
# we use either key or nested_key -- not both
assert item.key == ""
key = tuple(item.nested_key)
else:
# no counter-assertion here, because technically
# summary[""] is valid
key = (item.key,)
target = self._consolidated_summary
# recurse down the dictionary structure:
for prop in key[:-1]:
target = target[prop]
# use the last element of the key to write the leaf:
target[key[-1]] = json.loads(item.value_json)
for item in summary.remove:
if len(item.nested_key) > 0:
# we use either key or nested_key -- not both
assert item.key == ""
key = tuple(item.nested_key)
else:
# no counter-assertion here, because technically
# summary[""] is valid
key = (item.key,)
target = self._consolidated_summary
# recurse down the dictionary structure:
for prop in key[:-1]:
target = target[prop]
# use the last element of the key to erase the leaf:
del target[key[-1]]
self._save_summary(self._consolidated_summary)
def handle_exit(self, record: Record) -> None:
if self._track_time is not None:
self._accumulate_time += time.time() - self._track_time
record.exit.runtime = int(self._accumulate_time)
self._dispatch_record(record, always_send=True)
def handle_final(self, record: Record) -> None:
self._dispatch_record(record, always_send=True)
def handle_preempting(self, record: Record) -> None:
self._dispatch_record(record)
def handle_header(self, record: Record) -> None:
self._dispatch_record(record)
def handle_footer(self, record: Record) -> None:
self._dispatch_record(record)
def handle_request_check_version(self, record: Record) -> None:
self._dispatch_record(record)
def handle_request_attach(self, record: Record) -> None:
self._dispatch_record(record)
def handle_request_log_artifact(self, record: Record) -> None:
self._dispatch_record(record)
def handle_request_artifact_send(self, record: Record) -> None:
assert record.control.req_resp
result = proto_util._result_from_record(record)
self._dispatch_record(record)
# send response immediately, the request will be polled for result
xid = record.uuid
result.response.artifact_send_response.xid = xid
self._respond_result(result)
def handle_request_artifact_poll(self, record: Record) -> None:
assert record.control.req_resp
xid = record.request.artifact_poll.xid
assert xid
result = proto_util._result_from_record(record)
done_req = self._artifact_xid_done.get(xid)
if done_req:
result.response.artifact_poll_response.artifact_id = done_req.artifact_id
result.response.artifact_poll_response.error_message = (
done_req.error_message
)
result.response.artifact_poll_response.ready = True
self._respond_result(result)
def handle_request_artifact_done(self, record: Record) -> None:
assert not record.control.req_resp
done_req = record.request.artifact_done
xid = done_req.xid
assert xid
self._artifact_xid_done[xid] = done_req
# def handle_request_artifact_release(self, record: Record) -> None:
# assert record.control.req_resp
# # TODO: implement release protocol to clean up _artifact_xid_done dict
def handle_telemetry(self, record: Record) -> None:
self._dispatch_record(record)
def handle_request_run_start(self, record: Record) -> None:
run_start = record.request.run_start
assert run_start
assert run_start.run
self._run_start_time = run_start.run.start_time.ToSeconds()
self._track_time = time.time()
if run_start.run.resumed and run_start.run.runtime:
self._accumulate_time = run_start.run.runtime
else:
self._accumulate_time = 0
if not self._settings._disable_stats:
pid = os.getpid()
self._system_stats = stats.SystemStats(pid=pid, interface=self._interface)
self._system_stats.start()
if not self._settings._disable_meta and not run_start.run.resumed:
run_meta = meta.Meta(settings=self._settings, interface=self._interface)
run_meta.probe()
run_meta.write()
self._tb_watcher = tb_watcher.TBWatcher(
self._settings, interface=self._interface, run_proto=run_start.run
)
if run_start.run.resumed:
self._step = run_start.run.starting_step
result = proto_util._result_from_record(record)
self._respond_result(result)
def handle_request_resume(self, record: Record) -> None:
if self._system_stats is not None:
logger.info("starting system metrics thread")
self._system_stats.start()
if self._track_time is not None:
self._accumulate_time += time.time() - self._track_time
self._track_time = time.time()
def handle_request_pause(self, record: Record) -> None:
if self._system_stats is not None:
logger.info("stopping system metrics thread")
self._system_stats.shutdown()
if self._track_time is not None:
self._accumulate_time += time.time() - self._track_time
self._track_time = None
def handle_request_poll_exit(self, record: Record) -> None:
self._dispatch_record(record, always_send=True)
def handle_request_stop_status(self, record: Record) -> None:
self._dispatch_record(record)
def handle_request_network_status(self, record: Record) -> None:
self._dispatch_record(record)
def handle_request_status(self, record: Record) -> None:
self._dispatch_record(record, always_send=True)
def handle_request_get_summary(self, record: Record) -> None:
result = proto_util._result_from_record(record)
for key, value in self._consolidated_summary.items():
item = SummaryItem()
item.key = key
item.value_json = json.dumps(value)
result.response.get_summary_response.item.append(item)
self._respond_result(result)
def handle_tbrecord(self, record: Record) -> None:
logger.info("handling tbrecord: %s", record)
if self._tb_watcher:
tbrecord = record.tbrecord
self._tb_watcher.add(tbrecord.log_dir, tbrecord.save, tbrecord.root_dir)
self._dispatch_record(record)
def _handle_defined_metric(self, record: Record) -> None:
metric = record.metric
if metric._control.overwrite:
self._metric_defines.setdefault(metric.name, MetricRecord()).CopyFrom(
metric
)
else:
self._metric_defines.setdefault(metric.name, MetricRecord()).MergeFrom(
metric
)
# before dispatching, make sure step_metric is defined, if not define it and
# dispatch it locally first
metric = self._metric_defines[metric.name]
if metric.step_metric and metric.step_metric not in self._metric_defines:
m = MetricRecord(name=metric.step_metric)
self._metric_defines[metric.step_metric] = m
mr = Record()
mr.metric.CopyFrom(m)
mr.control.local = True # Dont store this, just send it
self._dispatch_record(mr)
self._dispatch_record(record)
def _handle_glob_metric(self, record: Record) -> None:
metric = record.metric
if metric._control.overwrite:
self._metric_globs.setdefault(metric.glob_name, MetricRecord()).CopyFrom(
metric
)
else:
self._metric_globs.setdefault(metric.glob_name, MetricRecord()).MergeFrom(
metric
)
self._dispatch_record(record)
def handle_metric(self, record: Record) -> None:
"""Handle MetricRecord.
Walkthrough of the life of a MetricRecord:
Metric defined:
- run.define_metric() parses arguments create wandb_metric.Metric
- build MetricRecord publish to interface
- handler (this function) keeps list of metrics published:
- self._metric_defines: Fully defined metrics
- self._metric_globs: metrics that have a wildcard
- dispatch writer and sender thread
- writer: records are saved to persistent store
- sender: fully defined metrics get mapped into metadata for UI
History logged:
- handle_history
- check if metric matches _metric_defines
- if not, check if metric matches _metric_globs
- if _metric globs match, generate defined metric and call _handle_metric
Args:
record (Record): Metric record to process
"""
if record.metric.name:
self._handle_defined_metric(record)
elif record.metric.glob_name:
self._handle_glob_metric(record)
def handle_request_sampled_history(self, record: Record) -> None:
result = proto_util._result_from_record(record)
for key, sampled in self._sampled_history.items():
item = SampledHistoryItem()
item.key = key
values: Iterable[Any] = sampled.get()
if all(isinstance(i, numbers.Integral) for i in values):
item.values_int.extend(values)
elif all(isinstance(i, numbers.Real) for i in values):
item.values_float.extend(values)
result.response.sampled_history_response.item.append(item)
self._respond_result(result)
def handle_request_shutdown(self, record: Record) -> None:
# TODO(jhr): should we drain things and stop new requests from coming in?
result = proto_util._result_from_record(record)
self._respond_result(result)
self._stopped.set()
def finish(self) -> None:
logger.info("shutting down handler")
if self._tb_watcher:
self._tb_watcher.finish()
def __next__(self) -> Record:
return self._record_q.get(block=True)
next = __next__
def _history_assign_runtime(
self, history: HistoryRecord, history_dict: Dict[str, Any],
) -> None:
# _runtime calculation is meaningless if there is no _timestamp
if "_timestamp" not in history_dict:
return
# if it is offline sync, self._run_start_time is 0
# in that case set it to the first tfevent timestamp
if self._run_start_time == 0:
self._run_start_time = history_dict["_timestamp"]
history_dict["_runtime"] = int(
history_dict["_timestamp"] - self._run_start_time
)
item = history.item.add()
item.key = "_runtime"
item.value_json = json.dumps(history_dict[item.key])
| |
"""
Octave (and Matlab) code printer
The `OctaveCodePrinter` converts SymPy expressions into Octave expressions.
It uses a subset of the Octave language for Matlab compatibility.
A complete code generator, which uses `octave_code` extensively, can be found
in `sympy.utilities.codegen`. The `codegen` module can be used to generate
complete source code files.
"""
from __future__ import print_function, division
from sympy.core import Mul, Pow, S, Rational
from sympy.core.compatibility import string_types, range
from sympy.core.mul import _keep_coeff
from sympy.printing.codeprinter import CodePrinter, Assignment
from sympy.printing.precedence import precedence
from re import search
# List of known functions. First, those that have the same name in
# SymPy and Octave. This is almost certainly incomplete!
known_fcns_src1 = ["sin", "cos", "tan", "asin", "acos", "atan", "atan2",
"sinh", "cosh", "tanh", "asinh", "acosh", "atanh",
"log", "exp", "erf", "gamma", "sign", "floor", "csc",
"sec", "cot", "coth", "acot", "acoth", "erfc",
"besselj", "bessely", "besseli", "besselk",
"erfinv", "erfcinv", "factorial" ]
# These functions have different names ("Sympy": "Octave"), more
# generally a mapping to (argument_conditions, octave_function).
known_fcns_src2 = {
"Abs": "abs",
"ceiling": "ceil",
"conjugate": "conj",
"DiracDelta": "dirac",
"Heaviside": "heaviside",
}
class OctaveCodePrinter(CodePrinter):
"""
A printer to convert expressions to strings of Octave/Matlab code.
"""
printmethod = "_octave"
language = "Octave"
_operators = {
'and': '&',
'or': '|',
'not': '~',
}
_default_settings = {
'order': None,
'full_prec': 'auto',
'precision': 16,
'user_functions': {},
'human': True,
'contract': True,
'inline': True,
}
# Note: contract is for expressing tensors as loops (if True), or just
# assignment (if False). FIXME: this should be looked a more carefully
# for Octave.
def __init__(self, settings={}):
super(OctaveCodePrinter, self).__init__(settings)
self.known_functions = dict(zip(known_fcns_src1, known_fcns_src1))
self.known_functions.update(dict(known_fcns_src2))
userfuncs = settings.get('user_functions', {})
self.known_functions.update(userfuncs)
def _rate_index_position(self, p):
return p*5
def _get_statement(self, codestring):
return "%s;" % codestring
def _get_comment(self, text):
return "% {0}".format(text)
def _declare_number_const(self, name, value):
return "{0} = {1};".format(name, value)
def _format_code(self, lines):
return self.indent_code(lines)
def _traverse_matrix_indices(self, mat):
# Octave uses Fortran order (column-major)
rows, cols = mat.shape
return ((i, j) for j in range(cols) for i in range(rows))
def _get_loop_opening_ending(self, indices):
open_lines = []
close_lines = []
for i in indices:
# Octave arrays start at 1 and end at dimension
var, start, stop = map(self._print,
[i.label, i.lower + 1, i.upper + 1])
open_lines.append("for %s = %s:%s" % (var, start, stop))
close_lines.append("end")
return open_lines, close_lines
def _print_Mul(self, expr):
# print complex numbers nicely in Octave
if (expr.is_number and expr.is_imaginary and
expr.as_coeff_Mul()[0].is_integer):
return "%si" % self._print(-S.ImaginaryUnit*expr)
# cribbed from str.py
prec = precedence(expr)
c, e = expr.as_coeff_Mul()
if c < 0:
expr = _keep_coeff(-c, e)
sign = "-"
else:
sign = ""
a = [] # items in the numerator
b = [] # items that are in the denominator (if any)
if self.order not in ('old', 'none'):
args = expr.as_ordered_factors()
else:
# use make_args in case expr was something like -x -> x
args = Mul.make_args(expr)
# Gather args for numerator/denominator
for item in args:
if (item.is_commutative and item.is_Pow and item.exp.is_Rational
and item.exp.is_negative):
if item.exp != -1:
b.append(Pow(item.base, -item.exp, evaluate=False))
else:
b.append(Pow(item.base, -item.exp))
elif item.is_Rational and item is not S.Infinity:
if item.p != 1:
a.append(Rational(item.p))
if item.q != 1:
b.append(Rational(item.q))
else:
a.append(item)
a = a or [S.One]
a_str = [self.parenthesize(x, prec) for x in a]
b_str = [self.parenthesize(x, prec) for x in b]
# from here it differs from str.py to deal with "*" and ".*"
def multjoin(a, a_str):
# here we probably are assuming the constants will come first
r = a_str[0]
for i in range(1, len(a)):
mulsym = '*' if a[i-1].is_number else '.*'
r = r + mulsym + a_str[i]
return r
if len(b) == 0:
return sign + multjoin(a, a_str)
elif len(b) == 1:
divsym = '/' if b[0].is_number else './'
return sign + multjoin(a, a_str) + divsym + b_str[0]
else:
divsym = '/' if all([bi.is_number for bi in b]) else './'
return (sign + multjoin(a, a_str) +
divsym + "(%s)" % multjoin(b, b_str))
def _print_Pow(self, expr):
powsymbol = '^' if all([x.is_number for x in expr.args]) else '.^'
PREC = precedence(expr)
if expr.exp == S.Half:
return "sqrt(%s)" % self._print(expr.base)
if expr.is_commutative:
if expr.exp == -S.Half:
sym = '/' if expr.base.is_number else './'
return "1" + sym + "sqrt(%s)" % self._print(expr.base)
if expr.exp == -S.One:
sym = '/' if expr.base.is_number else './'
return "1" + sym + "%s" % self.parenthesize(expr.base, PREC)
return '%s%s%s' % (self.parenthesize(expr.base, PREC), powsymbol,
self.parenthesize(expr.exp, PREC))
def _print_MatPow(self, expr):
PREC = precedence(expr)
return '%s^%s' % (self.parenthesize(expr.base, PREC),
self.parenthesize(expr.exp, PREC))
def _print_Pi(self, expr):
return 'pi'
def _print_ImaginaryUnit(self, expr):
return "1i"
def _print_Exp1(self, expr):
return "exp(1)"
def _print_GoldenRatio(self, expr):
# FIXME: how to do better, e.g., for octave_code(2*GoldenRatio)?
#return self._print((1+sqrt(S(5)))/2)
return "(1+sqrt(5))/2"
def _print_NumberSymbol(self, expr):
if self._settings["inline"]:
return self._print(expr.evalf(self._settings["precision"]))
else:
# assign to a variable, perhaps more readable for longer program
return super(OctaveCodePrinter, self)._print_NumberSymbol(expr)
def _print_Assignment(self, expr):
from sympy.functions.elementary.piecewise import Piecewise
from sympy.tensor.indexed import IndexedBase
# Copied from codeprinter, but remove special MatrixSymbol treatment
lhs = expr.lhs
rhs = expr.rhs
# We special case assignments that take multiple lines
if not self._settings["inline"] and isinstance(expr.rhs, Piecewise):
# Here we modify Piecewise so each expression is now
# an Assignment, and then continue on the print.
expressions = []
conditions = []
for (e, c) in rhs.args:
expressions.append(Assignment(lhs, e))
conditions.append(c)
temp = Piecewise(*zip(expressions, conditions))
return self._print(temp)
if self._settings["contract"] and (lhs.has(IndexedBase) or
rhs.has(IndexedBase)):
# Here we check if there is looping to be done, and if so
# print the required loops.
return self._doprint_loops(rhs, lhs)
else:
lhs_code = self._print(lhs)
rhs_code = self._print(rhs)
return self._get_statement("%s = %s" % (lhs_code, rhs_code))
def _print_Infinity(self, expr):
return 'inf'
def _print_NegativeInfinity(self, expr):
return '-inf'
def _print_NaN(self, expr):
return 'NaN'
def _print_list(self, expr):
return '{' + ', '.join(self._print(a) for a in expr) + '}'
_print_tuple = _print_list
_print_Tuple = _print_list
def _print_BooleanTrue(self, expr):
return "true"
def _print_BooleanFalse(self, expr):
return "false"
def _print_bool(self, expr):
return str(expr).lower()
# Could generate quadrature code for definite Integrals?
#_print_Integral = _print_not_supported
def _print_MatrixBase(self, A):
# Handle zero dimensions:
if (A.rows, A.cols) == (0, 0):
return '[]'
elif A.rows == 0 or A.cols == 0:
return 'zeros(%s, %s)' % (A.rows, A.cols)
elif (A.rows, A.cols) == (1, 1):
# Octave does not distinguish between scalars and 1x1 matrices
return self._print(A[0, 0])
elif A.rows == 1:
return "[%s]" % A.table(self, rowstart='', rowend='', colsep=' ')
elif A.cols == 1:
# note .table would unnecessarily equispace the rows
return "[%s]" % "; ".join([self._print(a) for a in A])
return "[%s]" % A.table(self, rowstart='', rowend='',
rowsep=';\n', colsep=' ')
def _print_SparseMatrix(self, A):
from sympy.matrices import Matrix
L = A.col_list();
# make row vectors of the indices and entries
I = Matrix([[k[0] + 1 for k in L]])
J = Matrix([[k[1] + 1 for k in L]])
AIJ = Matrix([[k[2] for k in L]])
return "sparse(%s, %s, %s, %s, %s)" % (self._print(I), self._print(J),
self._print(AIJ), A.rows, A.cols)
# FIXME: Str/CodePrinter could define each of these to call the _print
# method from higher up the class hierarchy (see _print_NumberSymbol).
# Then subclasses like us would not need to repeat all this.
_print_Matrix = \
_print_DenseMatrix = \
_print_MutableDenseMatrix = \
_print_ImmutableMatrix = \
_print_ImmutableDenseMatrix = \
_print_MatrixBase
_print_MutableSparseMatrix = \
_print_ImmutableSparseMatrix = \
_print_SparseMatrix
def _print_MatrixElement(self, expr):
return self._print(expr.parent) + '(%s, %s)'%(expr.i+1, expr.j+1)
def _print_MatrixSlice(self, expr):
def strslice(x, lim):
l = x[0] + 1
h = x[1]
step = x[2]
lstr = self._print(l)
hstr = 'end' if h == lim else self._print(h)
if step == 1:
if l == 1 and h == lim:
return ':'
if l == h:
return lstr
else:
return lstr + ':' + hstr
else:
return ':'.join((lstr, self._print(step), hstr))
return (self._print(expr.parent) + '(' +
strslice(expr.rowslice, expr.parent.shape[0]) + ', ' +
strslice(expr.colslice, expr.parent.shape[1]) + ')')
def _print_Indexed(self, expr):
inds = [ self._print(i) for i in expr.indices ]
return "%s(%s)" % (self._print(expr.base.label), ", ".join(inds))
def _print_Idx(self, expr):
return self._print(expr.label)
def _print_Identity(self, expr):
return "eye(%s)" % self._print(expr.shape[0])
def _print_hankel1(self, expr):
return "besselh(%s, 1, %s)" % (self._print(expr.order),
self._print(expr.argument))
def _print_hankel2(self, expr):
return "besselh(%s, 2, %s)" % (self._print(expr.order),
self._print(expr.argument))
# Note: as of 2015, Octave doesn't have spherical Bessel functions
def _print_jn(self, expr):
from sympy.functions import sqrt, besselj
x = expr.argument
expr2 = sqrt(S.Pi/(2*x))*besselj(expr.order + S.Half, x)
return self._print(expr2)
def _print_yn(self, expr):
from sympy.functions import sqrt, bessely
x = expr.argument
expr2 = sqrt(S.Pi/(2*x))*bessely(expr.order + S.Half, x)
return self._print(expr2)
def _print_airyai(self, expr):
return "airy(0, %s)" % self._print(expr.args[0])
def _print_airyaiprime(self, expr):
return "airy(1, %s)" % self._print(expr.args[0])
def _print_airybi(self, expr):
return "airy(2, %s)" % self._print(expr.args[0])
def _print_airybiprime(self, expr):
return "airy(3, %s)" % self._print(expr.args[0])
def _print_Piecewise(self, expr):
if expr.args[-1].cond != True:
# We need the last conditional to be a True, otherwise the resulting
# function may not return a result.
raise ValueError("All Piecewise expressions must contain an "
"(expr, True) statement to be used as a default "
"condition. Without one, the generated "
"expression may not evaluate to anything under "
"some condition.")
lines = []
if self._settings["inline"]:
# Express each (cond, expr) pair in a nested Horner form:
# (condition) .* (expr) + (not cond) .* (<others>)
# Expressions that result in multiple statements won't work here.
ecpairs = ["({0}).*({1}) + (~({0})).*(".format
(self._print(c), self._print(e))
for e, c in expr.args[:-1]]
elast = "%s" % self._print(expr.args[-1].expr)
pw = " ...\n".join(ecpairs) + elast + ")"*len(ecpairs)
# Note: current need these outer brackets for 2*pw. Would be
# nicer to teach parenthesize() to do this for us when needed!
return "(" + pw + ")"
else:
for i, (e, c) in enumerate(expr.args):
if i == 0:
lines.append("if (%s)" % self._print(c))
elif i == len(expr.args) - 1 and c == True:
lines.append("else")
else:
lines.append("elseif (%s)" % self._print(c))
code0 = self._print(e)
lines.append(code0)
if i == len(expr.args) - 1:
lines.append("end")
return "\n".join(lines)
def indent_code(self, code):
"""Accepts a string of code or a list of code lines"""
# code mostly copied from ccode
if isinstance(code, string_types):
code_lines = self.indent_code(code.splitlines(True))
return ''.join(code_lines)
tab = " "
inc_regex = ('^function ', '^if ', '^elseif ', '^else$', '^for ')
dec_regex = ('^end$', '^elseif ', '^else$')
# pre-strip left-space from the code
code = [ line.lstrip(' \t') for line in code ]
increase = [ int(any([search(re, line) for re in inc_regex]))
for line in code ]
decrease = [ int(any([search(re, line) for re in dec_regex]))
for line in code ]
pretty = []
level = 0
for n, line in enumerate(code):
if line == '' or line == '\n':
pretty.append(line)
continue
level -= decrease[n]
pretty.append("%s%s" % (tab*level, line))
level += increase[n]
return pretty
def octave_code(expr, assign_to=None, **settings):
r"""Converts `expr` to a string of Octave (or Matlab) code.
The string uses a subset of the Octave language for Matlab compatibility.
Parameters
==========
expr : Expr
A sympy expression to be converted.
assign_to : optional
When given, the argument is used as the name of the variable to which
the expression is assigned. Can be a string, ``Symbol``,
``MatrixSymbol``, or ``Indexed`` type. This can be helpful for
expressions that generate multi-line statements.
precision : integer, optional
The precision for numbers such as pi [default=16].
user_functions : dict, optional
A dictionary where keys are ``FunctionClass`` instances and values are
their string representations. Alternatively, the dictionary value can
be a list of tuples i.e. [(argument_test, cfunction_string)]. See
below for examples.
human : bool, optional
If True, the result is a single string that may contain some constant
declarations for the number symbols. If False, the same information is
returned in a tuple of (symbols_to_declare, not_supported_functions,
code_text). [default=True].
contract: bool, optional
If True, ``Indexed`` instances are assumed to obey tensor contraction
rules and the corresponding nested loops over indices are generated.
Setting contract=False will not generate loops, instead the user is
responsible to provide values for the indices in the code.
[default=True].
inline: bool, optional
If True, we try to create single-statement code instead of multiple
statements. [default=True].
Examples
========
>>> from sympy import octave_code, symbols, sin, pi
>>> x = symbols('x')
>>> octave_code(sin(x).series(x).removeO())
'x.^5/120 - x.^3/6 + x'
>>> from sympy import Rational, ceiling, Abs
>>> x, y, tau = symbols("x, y, tau")
>>> octave_code((2*tau)**Rational(7, 2))
'8*sqrt(2)*tau.^(7/2)'
Note that element-wise (Hadamard) operations are used by default between
symbols. This is because its very common in Octave to write "vectorized"
code. It is harmless if the values are scalars.
>>> octave_code(sin(pi*x*y), assign_to="s")
's = sin(pi*x.*y);'
If you need a matrix product "*" or matrix power "^", you can specify the
symbol as a ``MatrixSymbol``.
>>> from sympy import Symbol, MatrixSymbol
>>> n = Symbol('n', integer=True, positive=True)
>>> A = MatrixSymbol('A', n, n)
>>> octave_code(3*pi*A**3)
'(3*pi)*A^3'
This class uses several rules to decide which symbol to use a product.
Pure numbers use "*", Symbols use ".*" and MatrixSymbols use "*".
A HadamardProduct can be used to specify componentwise multiplication ".*"
of two MatrixSymbols. There is currently there is no easy way to specify
scalar symbols, so sometimes the code might have some minor cosmetic
issues. For example, suppose x and y are scalars and A is a Matrix, then
while a human programmer might write "(x^2*y)*A^3", we generate:
>>> octave_code(x**2*y*A**3)
'(x.^2.*y)*A^3'
Matrices are supported using Octave inline notation. When using
``assign_to`` with matrices, the name can be specified either as a string
or as a ``MatrixSymbol``. The dimenions must align in the latter case.
>>> from sympy import Matrix, MatrixSymbol
>>> mat = Matrix([[x**2, sin(x), ceiling(x)]])
>>> octave_code(mat, assign_to='A')
'A = [x.^2 sin(x) ceil(x)];'
``Piecewise`` expressions are implemented with logical masking by default.
Alternatively, you can pass "inline=False" to use if-else conditionals.
Note that if the ``Piecewise`` lacks a default term, represented by
``(expr, True)`` then an error will be thrown. This is to prevent
generating an expression that may not evaluate to anything.
>>> from sympy import Piecewise
>>> pw = Piecewise((x + 1, x > 0), (x, True))
>>> octave_code(pw, assign_to=tau)
'tau = ((x > 0).*(x + 1) + (~(x > 0)).*(x));'
Note that any expression that can be generated normally can also exist
inside a Matrix:
>>> mat = Matrix([[x**2, pw, sin(x)]])
>>> octave_code(mat, assign_to='A')
'A = [x.^2 ((x > 0).*(x + 1) + (~(x > 0)).*(x)) sin(x)];'
Custom printing can be defined for certain types by passing a dictionary of
"type" : "function" to the ``user_functions`` kwarg. Alternatively, the
dictionary value can be a list of tuples i.e., [(argument_test,
cfunction_string)]. This can be used to call a custom Octave function.
>>> from sympy import Function
>>> f = Function('f')
>>> g = Function('g')
>>> custom_functions = {
... "f": "existing_octave_fcn",
... "g": [(lambda x: x.is_Matrix, "my_mat_fcn"),
... (lambda x: not x.is_Matrix, "my_fcn")]
... }
>>> mat = Matrix([[1, x]])
>>> octave_code(f(x) + g(x) + g(mat), user_functions=custom_functions)
'existing_octave_fcn(x) + my_fcn(x) + my_mat_fcn([1 x])'
Support for loops is provided through ``Indexed`` types. With
``contract=True`` these expressions will be turned into loops, whereas
``contract=False`` will just print the assignment expression that should be
looped over:
>>> from sympy import Eq, IndexedBase, Idx, ccode
>>> len_y = 5
>>> y = IndexedBase('y', shape=(len_y,))
>>> t = IndexedBase('t', shape=(len_y,))
>>> Dy = IndexedBase('Dy', shape=(len_y-1,))
>>> i = Idx('i', len_y-1)
>>> e = Eq(Dy[i], (y[i+1]-y[i])/(t[i+1]-t[i]))
>>> octave_code(e.rhs, assign_to=e.lhs, contract=False)
'Dy(i) = (y(i + 1) - y(i))./(t(i + 1) - t(i));'
"""
return OctaveCodePrinter(settings).doprint(expr, assign_to)
def print_octave_code(expr, **settings):
"""Prints the Octave (or Matlab) representation of the given expression.
See `octave_code` for the meaning of the optional arguments.
"""
print(octave_code(expr, **settings))
| |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This package provides DockerImage for examining docker_build outputs."""
import abc
import cStringIO
import httplib
import json
import os
import tarfile
from containerregistry.client import docker_creds # pylint: disable=unused-import
from containerregistry.client import docker_name
from containerregistry.client.v2 import docker_http
import httplib2 # pylint: disable=unused-import
class DockerImage(object):
"""Interface for implementations that interact with Docker images."""
__metaclass__ = abc.ABCMeta # For enforcing that methods are overriden.
def fs_layers(self):
"""The ordered collection of filesystem layers that comprise this image."""
manifest = json.loads(self.manifest())
return [x['blobSum'] for x in manifest['fsLayers']]
def blob_set(self):
"""The unique set of blobs that compose to create the filesystem."""
return set(self.fs_layers())
@abc.abstractmethod
def manifest(self):
"""The JSON manifest referenced by the tag/digest.
Returns:
The raw json manifest
"""
def blob_size(self, digest):
"""The byte size of the raw blob."""
return len(self.blob(digest))
@abc.abstractmethod
def blob(self, digest):
"""The raw blob of the layer.
Args:
digest: the 'algo:digest' of the layer being addressed.
Returns:
The raw blob string of the layer.
"""
# __enter__ and __exit__ allow use as a context manager.
@abc.abstractmethod
def __enter__(self):
"""Open the image for reading."""
@abc.abstractmethod
def __exit__(self, unused_type, unused_value, unused_traceback):
"""Close the image."""
class FromRegistry(DockerImage):
"""This accesses a docker image hosted on a registry (non-local)."""
def __init__(
self,
name,
basic_creds,
transport):
self._name = name
self._creds = basic_creds
self._original_transport = transport
self._response = {}
def _content(self, suffix, cache=True):
"""Fetches content of the resources from registry by http calls."""
if isinstance(self._name, docker_name.Repository):
suffix = '{repository}/{suffix}'.format(
repository=self._name.repository,
suffix=suffix)
if suffix in self._response:
return self._response[suffix]
_, content = self._transport.Request(
'{scheme}://{registry}/v2/{suffix}'.format(
scheme=docker_http.Scheme(self._name.registry),
registry=self._name.registry,
suffix=suffix),
accepted_codes=[httplib.OK])
if cache:
self._response[suffix] = content
return content
def _tags(self):
# See //cloud/containers/registry/proto/v2/tags.proto
# for the full response structure.
return json.loads(self._content('tags/list'))
def tags(self):
return self._tags().get('tags', [])
def manifests(self):
payload = self._tags()
if 'manifest' not in payload:
# Only GCR supports this schema.
return {}
return payload['manifest']
def children(self):
payload = self._tags()
if 'child' not in payload:
# Only GCR supports this schema.
return []
return payload['child']
def exists(self):
try:
self.manifest()
return True
except docker_http.V2DiagnosticException:
# TODO(user): Check for 404
return False
def manifest(self):
"""Override."""
# GET server1/v2/<name>/manifests/<tag_or_digest>
if isinstance(self._name, docker_name.Tag):
return self._content('manifests/' + self._name.tag)
else:
assert isinstance(self._name, docker_name.Digest)
return self._content('manifests/' + self._name.digest)
def blob_size(self, digest):
"""The byte size of the raw blob."""
suffix = 'blobs/' + digest
if isinstance(self._name, docker_name.Repository):
suffix = '{repository}/{suffix}'.format(
repository=self._name.repository,
suffix=suffix)
resp, unused_content = self._transport.Request(
'{scheme}://{registry}/v2/{suffix}'.format(
scheme=docker_http.Scheme(self._name.registry),
registry=self._name.registry,
suffix=suffix),
method='HEAD',
accepted_codes=[httplib.OK])
return int(resp['content-length'])
# Large, do not memoize.
def blob(self, digest):
"""Override."""
# GET server1/v2/<name>/blobs/<digest>
return self._content('blobs/' + digest, cache=False)
def catalog(self, page_size=100):
# TODO(user): Handle docker_name.Repository for /v2/<name>/_catalog
if isinstance(self._name, docker_name.Repository):
raise ValueError('Expected docker_name.Registry for "name"')
url = '{scheme}://{registry}/v2/_catalog?n={page_size}'.format(
scheme=docker_http.Scheme(self._name.registry),
registry=self._name.registry,
page_size=page_size)
for _, content in self._transport.PaginatedRequest(
url, accepted_codes=[httplib.OK]):
wrapper_object = json.loads(content)
if 'repositories' not in wrapper_object:
raise docker_http.BadStateException(
'Malformed JSON response: %s' % content)
for repo in wrapper_object['repositories']:
# TODO(user): This should return docker_name.Repository instead.
yield repo
# __enter__ and __exit__ allow use as a context manager.
def __enter__(self):
# Create a v2 transport to use for making authenticated requests.
self._transport = docker_http.Transport(
self._name, self._creds, self._original_transport, docker_http.PULL)
return self
def __exit__(self, unused_type, unused_value, unused_traceback):
pass
def _in_whiteout_dir(
fs,
name
):
while name:
dirname = os.path.dirname(name)
if name == dirname:
break
if fs.get(dirname):
return True
name = dirname
return False
_WHITEOUT_PREFIX = '.wh.'
def extract(image, tar):
"""Extract the final filesystem from the image into tar.
Args:
image: a docker image whose final filesystem to construct.
tar: the open tarfile into which we are writing the final filesystem.
"""
# Maps all of the files we have already added (and should never add again)
# to whether they are a tombstone or not.
fs = {}
# Walk the layers, topmost first and add files. If we've seen them in a
# higher layer then we skip them.
for layer in image.fs_layers():
buf = cStringIO.StringIO(image.blob(layer))
with tarfile.open(mode='r:gz', fileobj=buf) as layer_tar:
for member in layer_tar.getmembers():
# If we see a whiteout file, then don't add anything to the tarball
# but ensure that any lower layers don't add a file with the whited
# out name.
basename = os.path.basename(member.name)
dirname = os.path.dirname(member.name)
tombstone = basename.startswith(_WHITEOUT_PREFIX)
if tombstone:
basename = basename[len(_WHITEOUT_PREFIX):]
# Before adding a file, check to see whether it (or its whiteout) have
# been seen before.
name = os.path.normpath(os.path.join('.', dirname, basename))
if name in fs:
continue
# Check for a whited out parent directory
if _in_whiteout_dir(fs, name):
continue
# Mark this file as handled by adding its name.
# A non-directory implicitly tombstones any entries with
# a matching (or child) name.
fs[name] = tombstone or not member.isdir()
if not tombstone:
if member.isfile():
tar.addfile(member, fileobj=layer_tar.extractfile(member.name))
else:
tar.addfile(member, fileobj=None)
| |
import onmt
import onmt.Models
import onmt.modules
import torch.nn as nn
import torch
from torch.autograd import Variable
def loadImageLibs():
"Conditional import of torch image libs."
global Image, transforms
from PIL import Image
from torchvision import transforms
class Translator(object):
def __init__(self, opt):
self.opt = opt
self.tt = torch.cuda if opt.cuda else torch
self.beam_accum = None
checkpoint = torch.load(opt.model,
map_location=lambda storage, loc: storage)
model_opt = checkpoint['opt']
self.src_dict = checkpoint['dicts']['src']
self.tgt_dict = checkpoint['dicts']['tgt']
self._type = model_opt.encoder_type \
if "encoder_type" in model_opt else "text"
if self._type == "text":
encoder = onmt.Models.Encoder(model_opt, self.src_dict)
elif self._type == "img":
loadImageLibs()
encoder = onmt.modules.ImageEncoder(model_opt)
decoder = onmt.Models.Decoder(model_opt, self.tgt_dict)
model = onmt.Models.NMTModel(encoder, decoder)
generator = nn.Sequential(
nn.Linear(model_opt.rnn_size, self.tgt_dict.size()),
nn.LogSoftmax())
model.load_state_dict(checkpoint['model'])
generator.load_state_dict(checkpoint['generator'])
if opt.cuda:
model.cuda()
generator.cuda()
else:
model.cpu()
generator.cpu()
model.generator = generator
self.model = model
self.model.eval()
def initBeamAccum(self):
self.beam_accum = {
"predicted_ids": [],
"beam_parent_ids": [],
"scores": [],
"log_probs": []}
def _getBatchSize(self, batch):
if self._type == "text":
return batch.size(1)
else:
return batch.size(0)
def buildData(self, srcBatch, goldBatch):
# This needs to be the same as preprocess.py.
if self._type == "text":
srcData = [self.src_dict.convertToIdxTensor(b,
onmt.Constants.UNK_WORD)
for b in srcBatch]
elif self._type == "img":
srcData = [transforms.ToTensor()(
Image.open(self.opt.src_img_dir + "/" + b[0]))
for b in srcBatch]
tgtData = None
if goldBatch:
tgtData = [self.tgt_dict.convertToIdxTensor(b,
onmt.Constants.UNK_WORD,
onmt.Constants.BOS_WORD,
onmt.Constants.EOS_WORD) for b in goldBatch]
return onmt.Dataset(srcData, tgtData, self.opt.batch_size,
self.opt.cuda, volatile=True,
data_type=self._type)
def buildTargetTokens(self, pred, src, attn):
tokens = self.tgt_dict.convertToLabels(pred, onmt.Constants.EOS)
tokens = tokens[:-1] # EOS
if self.opt.replace_unk:
for i in range(len(tokens)):
if tokens[i] == onmt.Constants.UNK_WORD:
_, maxIndex = attn[i].max(0)
tokens[i] = src[maxIndex[0]]
return tokens
def buildAlignment(self, src, pred, attn):
src_length = len(src)
alignment = [] # contains a list of pairs (src_pos, trg_pos)
for i in xrange(len(pred)):
_, max_index = attn[i].max(0)
j = max_index[0]
if 0 <= j < src_length:
alignment.append((j, i))
return alignment
def translateBatch(self, srcBatch, tgtBatch):
# Batch size is in different location depending on data.
beamSize = self.opt.beam_size
# (1) run the encoder on the src
encStates, context = self.model.encoder(srcBatch)
# Drop the lengths needed for encoder.
srcBatch = srcBatch[0]
batchSize = self._getBatchSize(srcBatch)
rnnSize = context.size(2)
encStates = (self.model._fix_enc_hidden(encStates[0]),
self.model._fix_enc_hidden(encStates[1]))
decoder = self.model.decoder
attentionLayer = decoder.attn
useMasking = self._type == "text"
# This mask is applied to the attention model inside the decoder
# so that the attention ignores source padding
padMask = None
if useMasking:
padMask = srcBatch.data.eq(onmt.Constants.PAD).t()
def mask(padMask):
if useMasking:
attentionLayer.applyMask(padMask)
# (2) if a target is specified, compute the 'goldScore'
# (i.e. log likelihood) of the target under the model
goldScores = context.data.new(batchSize).zero_()
if tgtBatch is not None:
decStates = encStates
decOut = self.model.make_init_decoder_output(context)
mask(padMask)
initOutput = self.model.make_init_decoder_output(context)
decOut, decStates, attn = self.model.decoder(
tgtBatch[:-1], decStates, context, initOutput)
for dec_t, tgt_t in zip(decOut, tgtBatch[1:].data):
gen_t = self.model.generator.forward(dec_t)
tgt_t = tgt_t.unsqueeze(1)
scores = gen_t.data.gather(1, tgt_t)
scores.masked_fill_(tgt_t.eq(onmt.Constants.PAD), 0)
goldScores += scores
# (3) run the decoder to generate sentences, using beam search
# Expand tensors for each beam.
context = Variable(context.data.repeat(1, beamSize, 1))
decStates = (Variable(encStates[0].data.repeat(1, beamSize, 1)),
Variable(encStates[1].data.repeat(1, beamSize, 1)))
beam = [onmt.Beam(beamSize, self.opt.cuda) for k in range(batchSize)]
decOut = self.model.make_init_decoder_output(context)
if useMasking:
padMask = srcBatch.data.eq(
onmt.Constants.PAD).t() \
.unsqueeze(0) \
.repeat(beamSize, 1, 1)
batchIdx = list(range(batchSize))
remainingSents = batchSize
for i in range(self.opt.max_sent_length):
mask(padMask)
# Prepare decoder input.
input = torch.stack([b.getCurrentState() for b in beam
if not b.done]).t().contiguous().view(1, -1)
decOut, decStates, attn = self.model.decoder(
Variable(input, volatile=True), decStates, context, decOut)
# decOut: 1 x (beam*batch) x numWords
decOut = decOut.squeeze(0)
out = self.model.generator.forward(decOut)
# batch x beam x numWords
wordLk = out.view(beamSize, remainingSents, -1) \
.transpose(0, 1).contiguous()
attn = attn.view(beamSize, remainingSents, -1) \
.transpose(0, 1).contiguous()
active = []
for b in range(batchSize):
if beam[b].done:
continue
idx = batchIdx[b]
if not beam[b].advance(wordLk.data[idx], attn.data[idx]):
active += [b]
for decState in decStates: # iterate over h, c
# layers x beam*sent x dim
sentStates = decState.view(-1, beamSize,
remainingSents,
decState.size(2))[:, :, idx]
sentStates.data.copy_(
sentStates.data.index_select(
1, beam[b].getCurrentOrigin()))
if not active:
break
# in this section, the sentences that are still active are
# compacted so that the decoder is not run on completed sentences
activeIdx = self.tt.LongTensor([batchIdx[k] for k in active])
batchIdx = {beam: idx for idx, beam in enumerate(active)}
def updateActive(t):
# select only the remaining active sentences
view = t.data.view(-1, remainingSents, rnnSize)
newSize = list(t.size())
newSize[-2] = newSize[-2] * len(activeIdx) // remainingSents
return Variable(view.index_select(1, activeIdx)
.view(*newSize), volatile=True)
decStates = (updateActive(decStates[0]),
updateActive(decStates[1]))
decOut = updateActive(decOut)
context = updateActive(context)
if useMasking:
padMask = padMask.index_select(1, activeIdx)
remainingSents = len(active)
# (4) package everything up
allHyp, allScores, allAttn = [], [], []
n_best = self.opt.n_best
for b in range(batchSize):
scores, ks = beam[b].sortBest()
allScores += [scores[:n_best]]
hyps, attn = zip(*[beam[b].getHyp(k) for k in ks[:n_best]])
allHyp += [hyps]
if useMasking:
valid_attn = srcBatch.data[:, b].ne(onmt.Constants.PAD) \
.nonzero().squeeze(1)
attn = [a.index_select(1, valid_attn) for a in attn]
allAttn += [attn]
if self.beam_accum:
self.beam_accum["beam_parent_ids"].append(
[t.tolist()
for t in beam[b].prevKs])
self.beam_accum["scores"].append([
["%4f" % s for s in t.tolist()]
for t in beam[b].allScores][1:])
self.beam_accum["predicted_ids"].append(
[[self.tgt_dict.getLabel(id)
for id in t.tolist()]
for t in beam[b].nextYs][1:])
return allHyp, allScores, allAttn, goldScores
def translate(self, srcBatch, goldBatch):
# (1) convert words to indexes
dataset = self.buildData(srcBatch, goldBatch)
src, tgt, indices = dataset[0]
batchSize = self._getBatchSize(src[0])
# (2) translate
pred, predScore, attn, goldScore = self.translateBatch(src, tgt)
pred, predScore, attn, goldScore = list(zip(
*sorted(zip(pred, predScore, attn, goldScore, indices),
key=lambda x: x[-1])))[:-1]
# (3) convert indexes to words
predBatch = []
for b in range(batchSize):
predBatch.append(
[self.buildTargetTokens(pred[b][n], srcBatch[b], attn[b][n])
for n in range(self.opt.n_best)]
)
# (4) get alignment
alignmentBatch = []
if self.opt.alignment:
for b in range(batchSize):
alignmentBatch.append(
[self.buildAlignment(srcBatch[b], predBatch[b][n], attn[b][n])
for n in range(self.opt.n_best)]
)
# return predBatch, predScore, goldScore
return predBatch, predScore, goldScore, alignmentBatch
| |
import traceback
from typing import Dict, List, Optional, Union
import dateparser
import demistomock as demisto # noqa: F401
import requests
from CommonServerPython import * # noqa: F401
# Disable insecure warnings
requests.packages.urllib3.disable_warnings()
''' CONSTANTS '''
DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
MAX_INCIDENTS_TO_FETCH = 50
HELLOWORLD_SEVERITIES = ['Low', 'Medium', 'High', 'Critical']
''' CLIENT CLASS '''
class Client(BaseClient):
def test_connect(self, api_key):
return self._http_request(
method='GET',
url_suffix='/info.php?',
params={
'indicator': 'pulsedive.com',
'key': api_key
}
)
def get_ip_reputation(self, ip: str, api_key) -> Dict[str, Any]:
return self._http_request(
method='GET',
url_suffix='/info.php?',
params={
'indicator': ip,
'pretty': '1',
'key': api_key
}
)
def get_domain_reputation(self, domain: str, api_key) -> Dict[str, Any]:
return self._http_request(
method='GET',
url_suffix='/info.php?',
params={
'indicator': domain,
'pretty': '1',
'key': api_key
}
)
def get_url_reputation(self, url: str, api_key) -> Dict[str, Any]:
return self._http_request(
method='GET',
url_suffix='/info.php?',
params={
'indicator': url,
'pretty': '1',
'key': api_key
}
)
''' HELPER FUNCTIONS '''
def parse_domain_date(domain_date: Union[List[str], str], date_format: str = '%Y-%m-%dT%H:%M:%S.000Z') -> Optional[str]:
"""Converts whois date format to an ISO8601 string
Converts the HelloWorld domain WHOIS date (YYYY-mm-dd HH:MM:SS) format
in a datetime. If a list is returned with multiple elements, takes only
the first one.
:type domain_date: ``Union[List[str],str]``
:param severity:
a string or list of strings with the format 'YYYY-mm-DD HH:MM:SS'
:return: Parsed time in ISO8601 format
:rtype: ``Optional[str]``
"""
if isinstance(domain_date, str):
# if str parse the value
return dateparser.parse(domain_date).strftime(date_format)
elif isinstance(domain_date, list) and len(domain_date) > 0 and isinstance(domain_date[0], str):
# if list with at least one element, parse the first element
return dateparser.parse(domain_date[0]).strftime(date_format)
# in any other case return nothing
return None
def convert_to_xsoar_severity(pulsedive_severity) -> int:
if (pulsedive_severity == 'unknown' or pulsedive_severity == 'none'):
xsoar_severity = Common.DBotScore.NONE # unknown
elif pulsedive_severity == 'high':
xsoar_severity = Common.DBotScore.SUSPICIOUS # suspicious
elif pulsedive_severity == 'critical':
xsoar_severity = Common.DBotScore.BAD # bad
else:
xsoar_severity = Common.DBotScore.GOOD # good
return xsoar_severity
''' COMMAND FUNCTIONS '''
def test_module(client: Client, api_key) -> str:
"""Tests API connectivity and authentication"""
try:
client.test_connect(api_key)
except DemistoException:
return 'Could not connect to Pulsedive'
return 'ok'
def ip_reputation_command(client: Client, args: Dict[str, Any], api_key) -> List[CommandResults]:
ips = argToList(args.get('ip'))
if len(ips) == 0:
raise ValueError('IP(s) not specified')
command_results: List[CommandResults] = []
for ip in ips:
try:
ip_data = client.get_ip_reputation(ip, api_key)
indicator_ip = ip_data['indicator']
reputation = ip_data['risk']
score = convert_to_xsoar_severity(reputation)
# Create the DBotScore structure first using the Common.DBotScore class.
dbot_score = Common.DBotScore(
indicator=indicator_ip,
indicator_type=DBotScoreType.IP,
integration_name='Pulsedive',
score=score,
malicious_description=f'Pulsedive returned reputation {reputation}'
)
# Create the IP Standard Context structure using Common.IP and add
# dbot_score to it.
ip_standard_context = Common.IP(
ip=indicator_ip,
dbot_score=dbot_score
)
ip_data.pop('objects', None)
ip_data.pop('nir', None)
command_results.append(CommandResults(
readable_output=tableToMarkdown('IP Details:', ip_data),
outputs_prefix='Pulsedive.IP',
outputs_key_field='indicator',
outputs=ip_data,
indicator=ip_standard_context
))
except DemistoException:
# Create the DBotScore structure first using the Common.DBotScore class.
dbot_score = Common.DBotScore(
indicator=ip,
indicator_type=DBotScoreType.IP,
integration_name='Pulsedive',
score=Common.DBotScore.NONE,
malicious_description='Pulsedive returned reputation None'
)
# Create the IP Standard Context structure using Common.IP and add
# dbot_score to it.
ip_standard_context = Common.IP(
ip=ip,
dbot_score=dbot_score
)
command_results.append(CommandResults(
readable_output=str(ip) + ' not found in indicator data',
outputs_prefix='Pulsedive.IP',
outputs_key_field='indicator',
indicator=ip_standard_context
))
return command_results
def domain_reputation_command(client: Client, args: Dict[str, Any], api_key) -> List[CommandResults]:
domains = argToList(args.get('domain'))
if len(domains) == 0:
raise ValueError('domain(s) not specified')
command_results: List[CommandResults] = []
for domain in domains:
try:
domain_data = client.get_domain_reputation(domain, api_key)
indicator_domain = domain_data['indicator']
reputation = domain_data['risk']
score = convert_to_xsoar_severity(reputation)
if 'creation_date' in domain_data:
domain_data['creation_date'] = parse_domain_date(domain_data['creation_date'])
if 'expiration_date' in domain_data:
domain_data['expiration_date'] = parse_domain_date(domain_data['expiration_date'])
if 'updated_date' in domain_data:
domain_data['updated_date'] = parse_domain_date(domain_data['updated_date'])
dbot_score = Common.DBotScore(
indicator=indicator_domain,
integration_name='Pulsedive',
indicator_type=DBotScoreType.DOMAIN,
score=score,
malicious_description='Pulsedive returned reputation {reputation}'
)
domain_standard_context = Common.Domain(
domain=indicator_domain,
# creation_date=domain_data.get('creation_date', None),
# expiration_date=domain_data.get('expiration_date', None),
# updated_date=domain_data.get('updated_date', None),
# organization=domain_data.get('org', None),
# name_servers=domain_data.get('name_servers', None),
# registrant_name=domain_data.get('name', None),
# registrant_country=domain_data.get('country', None),
# registrar_name=domain_data.get('registrar', None),
dbot_score=dbot_score
)
command_results.append(CommandResults(
readable_output=tableToMarkdown('Domain Details:', domain_data),
outputs_prefix='Pulsedive.Domain',
outputs_key_field='indicator',
outputs=domain_data,
indicator=domain_standard_context
))
except DemistoException:
# Create the DBotScore structure first using the Common.DBotScore class.
dbot_score = Common.DBotScore(
indicator=domain,
indicator_type=DBotScoreType.DOMAIN,
integration_name='Pulsedive',
score=Common.DBotScore.NONE,
malicious_description='Pulsedive returned reputation None'
)
domain_standard_context = Common.Domain(
domain=domain,
dbot_score=dbot_score
)
command_results.append(CommandResults(
readable_output=str(domain) + ' not found in indicator data',
outputs_prefix='Pulsedive.Domain',
outputs_key_field='indicator',
indicator=domain_standard_context
))
return command_results
def url_reputation_command(client: Client, args: Dict[str, Any], api_key) -> List[CommandResults]:
urls = argToList(args.get('url'))
if len(urls) == 0:
raise ValueError('URL(s) not specified')
command_results: List[CommandResults] = []
for url in urls:
try:
url_data = client.get_url_reputation(url, api_key)
indicator_url = url_data['indicator']
reputation = url_data['risk']
score = convert_to_xsoar_severity(reputation)
dbot_score = Common.DBotScore(
indicator=str(indicator_url),
indicator_type=DBotScoreType.URL,
integration_name='Pulsedive',
score=score,
malicious_description=f'Pulsedive returned reputation {reputation}'
)
url_standard_context = Common.URL(
url=indicator_url,
dbot_score=dbot_score
)
url_data.pop('objects', None)
url_data.pop('nir', None)
command_results.append(CommandResults(
readable_output=tableToMarkdown('URL Details:', url_data),
outputs_prefix='Pulsedive.URL',
outputs_key_field='indicator',
outputs=url_data,
indicator=url_standard_context
))
except DemistoException:
# Create the DBotScore structure first using the Common.DBotScore class.
dbot_score = Common.DBotScore(
indicator=str(url),
indicator_type=DBotScoreType.URL,
integration_name='Pulsedive',
score=Common.DBotScore.NONE,
malicious_description='Pulsedive returned reputation None'
)
url_standard_context = Common.URL(
url=str(url),
dbot_score=dbot_score
)
command_results.append(CommandResults(
readable_output=str(url) + ' not found in indicator data',
outputs_prefix='Pulsedive.URL',
outputs_key_field='indicator',
indicator=url_standard_context
))
return command_results
''' MAIN FUNCTION '''
def main() -> None:
api_key = demisto.params().get('apikey')
base_url = 'https://www.pulsedive.com/api'
verify_certificate = not demisto.params().get('insecure', False)
proxy = demisto.params().get('proxy', False)
headers = {'User-Agent': 'XSOAR - Integration'}
try:
client = Client(
base_url=base_url,
verify=verify_certificate,
proxy=proxy,
headers=headers)
if demisto.command() == 'test-module':
# This is the call made when pressing the integration Test button.
return_results(test_module(client, api_key))
elif demisto.command() == 'ip':
return_results(ip_reputation_command(client, demisto.args(), api_key))
elif demisto.command() == 'domain':
return_results(domain_reputation_command(client, demisto.args(), api_key))
elif demisto.command() == 'url':
return_results(url_reputation_command(client, demisto.args(), api_key))
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
''' ENTRY POINT '''
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| |
"""
@package mi.instrument.star_asimet.bulkmet.metbk_a.driver
@file marine-integrations/mi/instrument/star_aismet/bulkmet/metbk_a/driver.py
@author Bill Bollenbacher
@brief Driver for the metbk_a
Release notes:
initial version
"""
import re
import time
from mi.core.log import get_logger
from mi.core.common import BaseEnum
from mi.core.exceptions import SampleException, \
InstrumentProtocolException
from mi.core.time_tools import get_timestamp_delayed
from mi.core.instrument.instrument_protocol import CommandResponseInstrumentProtocol
from mi.core.instrument.instrument_fsm import ThreadSafeFSM
from mi.core.instrument.chunker import StringChunker
from mi.core.driver_scheduler import DriverSchedulerConfigKey
from mi.core.driver_scheduler import TriggerType
from mi.core.instrument.instrument_driver import SingleConnectionInstrumentDriver
from mi.core.instrument.instrument_driver import DriverEvent
from mi.core.instrument.instrument_driver import DriverAsyncEvent
from mi.core.instrument.instrument_driver import DriverProtocolState
from mi.core.instrument.instrument_driver import DriverParameter
from mi.core.instrument.instrument_driver import ResourceAgentState
from mi.core.instrument.instrument_driver import DriverConfigKey
from mi.core.instrument.data_particle import DataParticle
from mi.core.instrument.data_particle import DataParticleKey
from mi.core.instrument.data_particle import CommonDataParticleType
from mi.core.instrument.driver_dict import DriverDictKey
from mi.core.instrument.protocol_param_dict import ProtocolParameterDict, \
ParameterDictType, \
ParameterDictVisibility
__author__ = 'Bill Bollenbacher'
__license__ = 'Apache 2.0'
log = get_logger()
# newline.
NEWLINE = '\r\n'
# default timeout.
TIMEOUT = 10
SYNC_TIMEOUT = 30
AUTO_SAMPLE_SCHEDULED_JOB = 'auto_sample'
LOGGING_STATUS_REGEX = r'.*Sampling (GO|STOPPED)'
LOGGING_STATUS_COMPILED = re.compile(LOGGING_STATUS_REGEX, re.DOTALL)
LOGGING_SYNC_REGEX = r'.*Sampling GO - synchronizing...'
LOGGING_SYNC_COMPILED = re.compile(LOGGING_STATUS_REGEX, re.DOTALL)
####
# Driver Constant Definitions
####
class ScheduledJob(BaseEnum):
ACQUIRE_STATUS = 'acquire_status'
CLOCK_SYNC = 'clock_sync'
class ProtocolState(BaseEnum):
"""
Instrument protocol states
"""
UNKNOWN = DriverProtocolState.UNKNOWN
COMMAND = DriverProtocolState.COMMAND
AUTOSAMPLE = DriverProtocolState.AUTOSAMPLE
DIRECT_ACCESS = DriverProtocolState.DIRECT_ACCESS
SYNC_CLOCK = 'PROTOCOL_STATE_SYNC_CLOCK'
class ProtocolEvent(BaseEnum):
"""
Protocol events
"""
ENTER = DriverEvent.ENTER
EXIT = DriverEvent.EXIT
DISCOVER = DriverEvent.DISCOVER
EXECUTE_DIRECT = DriverEvent.EXECUTE_DIRECT
START_DIRECT = DriverEvent.START_DIRECT
STOP_DIRECT = DriverEvent.STOP_DIRECT
GET = DriverEvent.GET
SET = DriverEvent.SET
ACQUIRE_SAMPLE = DriverEvent.ACQUIRE_SAMPLE
ACQUIRE_STATUS = DriverEvent.ACQUIRE_STATUS
CLOCK_SYNC = DriverEvent.CLOCK_SYNC
START_AUTOSAMPLE = DriverEvent.START_AUTOSAMPLE
STOP_AUTOSAMPLE = DriverEvent.STOP_AUTOSAMPLE
FLASH_STATUS = 'DRIVER_EVENT_FLASH_STATUS'
class Capability(BaseEnum):
"""
Protocol events that should be exposed to users (subset of above).
"""
GET = ProtocolEvent.GET
SET = ProtocolEvent.SET
ACQUIRE_STATUS = ProtocolEvent.ACQUIRE_STATUS
ACQUIRE_SAMPLE = ProtocolEvent.ACQUIRE_SAMPLE
CLOCK_SYNC = ProtocolEvent.CLOCK_SYNC
START_AUTOSAMPLE = ProtocolEvent.START_AUTOSAMPLE
STOP_AUTOSAMPLE = ProtocolEvent.STOP_AUTOSAMPLE
FLASH_STATUS = ProtocolEvent.FLASH_STATUS
START_DIRECT = ProtocolEvent.START_DIRECT
STOP_DIRECT = ProtocolEvent.STOP_DIRECT
DISCOVER = ProtocolEvent.DISCOVER
class Parameter(DriverParameter):
"""
Device specific parameters.
"""
CLOCK = 'clock'
SAMPLE_INTERVAL = 'sample_interval'
class Prompt(BaseEnum):
"""
Device i/o prompts.
"""
CR_NL = NEWLINE
STOPPED = "Sampling STOPPED"
SYNC = "Sampling GO - synchronizing..."
GO = "Sampling GO"
FS = "bytes free\r" + NEWLINE
class Command(BaseEnum):
"""
Instrument command strings
"""
GET_CLOCK = "#CLOCK"
SET_CLOCK = "#CLOCK="
D = "#D"
FS = "#FS"
STAT = "#STAT"
GO = "#GO"
STOP = "#STOP"
class DataParticleType(BaseEnum):
"""
Data particle types produced by this driver
"""
RAW = CommonDataParticleType.RAW
METBK_PARSED = 'metbk_parsed'
METBK_STATUS = 'metbk_status'
###############################################################################
# Data Particles
###############################################################################
class METBK_SampleDataParticleKey(BaseEnum):
BAROMETRIC_PRESSURE = 'barometric_pressure'
RELATIVE_HUMIDITY = 'relative_humidity'
AIR_TEMPERATURE = 'air_temperature'
LONGWAVE_IRRADIANCE = 'longwave_irradiance'
PRECIPITATION = 'precipitation'
SEA_SURFACE_TEMPERATURE = 'sea_surface_temperature'
SEA_SURFACE_CONDUCTIVITY = 'sea_surface_conductivity'
SHORTWAVE_IRRADIANCE = 'shortwave_irradiance'
EASTWARD_WIND_VELOCITY = 'eastward_wind_velocity'
NORTHWARD_WIND_VELOCITY = 'northward_wind_velocity'
class METBK_SampleDataParticle(DataParticle):
_data_particle_type = DataParticleType.METBK_PARSED
@staticmethod
def regex_compiled():
"""
get the compiled regex pattern
@return: compiled re
"""
SAMPLE_DATA_PATTERN = (r'(-*\d+\.\d+)' + # BPR
'\s*(-*\d+\.\d+)' + # RH %
'\s*(-*\d+\.\d+)' + # RH temp
'\s*(-*\d+\.\d+)' + # LWR
'\s*(-*\d+\.\d+)' + # PRC
'\s*(-*\d+\.\d+)' + # ST
'\s*(-*\d+\.\d+)' + # SC
'\s*(-*\d+\.\d+)' + # SWR
'\s*(-*\d+\.\d+)' + # We
'\s*(-*\d+\.\d+)' + # Wn
'.*?' + NEWLINE) # throw away batteries
return re.compile(SAMPLE_DATA_PATTERN, re.DOTALL)
def _build_parsed_values(self):
match = METBK_SampleDataParticle.regex_compiled().match(self.raw_data)
if not match:
raise SampleException("METBK_SampleDataParticle: No regex match of parsed sample data: [%s]", self.raw_data)
result = [{DataParticleKey.VALUE_ID: METBK_SampleDataParticleKey.BAROMETRIC_PRESSURE,
DataParticleKey.VALUE: float(match.group(1))},
{DataParticleKey.VALUE_ID: METBK_SampleDataParticleKey.RELATIVE_HUMIDITY,
DataParticleKey.VALUE: float(match.group(2))},
{DataParticleKey.VALUE_ID: METBK_SampleDataParticleKey.AIR_TEMPERATURE,
DataParticleKey.VALUE: float(match.group(3))},
{DataParticleKey.VALUE_ID: METBK_SampleDataParticleKey.LONGWAVE_IRRADIANCE,
DataParticleKey.VALUE: float(match.group(4))},
{DataParticleKey.VALUE_ID: METBK_SampleDataParticleKey.PRECIPITATION,
DataParticleKey.VALUE: float(match.group(5))},
{DataParticleKey.VALUE_ID: METBK_SampleDataParticleKey.SEA_SURFACE_TEMPERATURE,
DataParticleKey.VALUE: float(match.group(6))},
{DataParticleKey.VALUE_ID: METBK_SampleDataParticleKey.SEA_SURFACE_CONDUCTIVITY,
DataParticleKey.VALUE: float(match.group(7))},
{DataParticleKey.VALUE_ID: METBK_SampleDataParticleKey.SHORTWAVE_IRRADIANCE,
DataParticleKey.VALUE: float(match.group(8))},
{DataParticleKey.VALUE_ID: METBK_SampleDataParticleKey.EASTWARD_WIND_VELOCITY,
DataParticleKey.VALUE: float(match.group(9))},
{DataParticleKey.VALUE_ID: METBK_SampleDataParticleKey.NORTHWARD_WIND_VELOCITY,
DataParticleKey.VALUE: float(match.group(10))}]
log.debug("METBK_SampleDataParticle._build_parsed_values: result=%s" % result)
return result
class METBK_StatusDataParticleKey(BaseEnum):
INSTRUMENT_MODEL = 'instrument_model'
SERIAL_NUMBER = 'serial_number'
CALIBRATION_DATE = 'calibration_date'
FIRMWARE_VERSION = 'firmware_version'
DATE_TIME_STRING = 'date_time_string'
LOGGING_INTERVAL = 'logging_interval'
CURRENT_TICK = 'current_tick'
RECENT_RECORD_INTERVAL = 'recent_record_interval'
FLASH_CARD_PRESENCE = 'flash_card_presence'
BATTERY_VOLTAGE_MAIN = 'battery_voltage_main'
FAILURE_MESSAGES = 'failure_messages'
PTT_ID1 = 'ptt_id1'
PTT_ID2 = 'ptt_id2'
PTT_ID3 = 'ptt_id3'
SAMPLING_STATE = 'sampling_state'
class METBK_StatusDataParticle(DataParticle):
_data_particle_type = DataParticleType.METBK_STATUS
@staticmethod
def regex_compiled():
"""
get the compiled regex pattern
@return: compiled re
"""
STATUS_DATA_PATTERN = (r'Model:\s+(.+?)\r\n' +
'SerNum:\s+(.+?)\r\n' +
'CfgDat:\s+(.+?)\r\n' +
'Firmware:\s+(.+?)\r\n' +
'RTClock:\s+(.+?)\r\n' +
'Logging Interval:\s+(\d+);\s+' +
'Current Tick:\s+(\d+)\r\n' +
'R-interval:\s+(.+?)\r\n' +
'(.+?)\r\n' + # compact flash info
'Main Battery Voltage:\s+(.+?)\r\n' +
'(.+?)' + # module failures & PTT messages
'\r\nSampling\s+(\w+)\r\n')
return re.compile(STATUS_DATA_PATTERN, re.DOTALL)
def _build_parsed_values(self):
log.debug("METBK_StatusDataParticle: input = %s" % self.raw_data)
match = METBK_StatusDataParticle.regex_compiled().match(self.raw_data)
if not match:
raise SampleException("METBK_StatusDataParticle: No regex match of parsed status data: [%s]", self.raw_data)
result = [{DataParticleKey.VALUE_ID: METBK_StatusDataParticleKey.INSTRUMENT_MODEL,
DataParticleKey.VALUE: match.group(1)},
{DataParticleKey.VALUE_ID: METBK_StatusDataParticleKey.SERIAL_NUMBER,
DataParticleKey.VALUE: match.group(2)},
{DataParticleKey.VALUE_ID: METBK_StatusDataParticleKey.CALIBRATION_DATE,
DataParticleKey.VALUE: match.group(3)},
{DataParticleKey.VALUE_ID: METBK_StatusDataParticleKey.FIRMWARE_VERSION,
DataParticleKey.VALUE: match.group(4)},
{DataParticleKey.VALUE_ID: METBK_StatusDataParticleKey.DATE_TIME_STRING,
DataParticleKey.VALUE: match.group(5)},
{DataParticleKey.VALUE_ID: METBK_StatusDataParticleKey.LOGGING_INTERVAL,
DataParticleKey.VALUE: int(match.group(6))},
{DataParticleKey.VALUE_ID: METBK_StatusDataParticleKey.CURRENT_TICK,
DataParticleKey.VALUE: int(match.group(7))},
{DataParticleKey.VALUE_ID: METBK_StatusDataParticleKey.RECENT_RECORD_INTERVAL,
DataParticleKey.VALUE: int(match.group(8))},
{DataParticleKey.VALUE_ID: METBK_StatusDataParticleKey.FLASH_CARD_PRESENCE,
DataParticleKey.VALUE: match.group(9)},
{DataParticleKey.VALUE_ID: METBK_StatusDataParticleKey.BATTERY_VOLTAGE_MAIN,
DataParticleKey.VALUE: float(match.group(10))},
{DataParticleKey.VALUE_ID: METBK_StatusDataParticleKey.SAMPLING_STATE,
DataParticleKey.VALUE: match.group(12)}]
lines = match.group(11).split(NEWLINE)
length = len(lines)
print ("length=%d; lines=%s" % (length, lines))
if length < 3:
raise SampleException("METBK_StatusDataParticle: Not enough PTT lines in status data: [%s]", self.raw_data)
# grab PTT lines
result.append({DataParticleKey.VALUE_ID: METBK_StatusDataParticleKey.PTT_ID1,
DataParticleKey.VALUE: lines[length - 3]})
result.append({DataParticleKey.VALUE_ID: METBK_StatusDataParticleKey.PTT_ID2,
DataParticleKey.VALUE: lines[length - 2]})
result.append({DataParticleKey.VALUE_ID: METBK_StatusDataParticleKey.PTT_ID3,
DataParticleKey.VALUE: lines[length - 1]})
# grab any module failure lines
if length > 3:
length -= 3
failures = []
for index in range(0, length):
failures.append(lines[index])
result.append({DataParticleKey.VALUE_ID: METBK_StatusDataParticleKey.FAILURE_MESSAGES,
DataParticleKey.VALUE: failures})
log.debug("METBK_StatusDataParticle: result = %s" % result)
return result
###############################################################################
# Driver
###############################################################################
class InstrumentDriver(SingleConnectionInstrumentDriver):
"""
InstrumentDriver subclass
Subclasses SingleConnectionInstrumentDriver with connection state
machine.
"""
########################################################################
# Superclass overrides for resource query.
########################################################################
def get_resource_params(self):
"""
Return list of device parameters available.
"""
return Parameter.list()
########################################################################
# Protocol builder.
########################################################################
def _build_protocol(self):
"""
Construct the driver protocol state machine.
"""
self._protocol = Protocol(Prompt, NEWLINE, self._driver_event)
###########################################################################
# Protocol
###########################################################################
class Protocol(CommandResponseInstrumentProtocol):
"""
Instrument protocol class
Subclasses CommandResponseInstrumentProtocol
"""
last_sample = ''
def __init__(self, prompts, newline, driver_event):
"""
Protocol constructor.
@param prompts A BaseEnum class containing instrument prompts.
@param newline The newline.
@param driver_event Driver process event callback.
"""
# Construct protocol superclass.
CommandResponseInstrumentProtocol.__init__(self, prompts, newline, driver_event)
# Build protocol state machine.
self._protocol_fsm = ThreadSafeFSM(ProtocolState, ProtocolEvent, ProtocolEvent.ENTER, ProtocolEvent.EXIT)
# Add event handlers for protocol state machine.
self._protocol_fsm.add_handler(ProtocolState.UNKNOWN, ProtocolEvent.ENTER, self._handler_unknown_enter)
self._protocol_fsm.add_handler(ProtocolState.UNKNOWN, ProtocolEvent.EXIT, self._handler_unknown_exit)
self._protocol_fsm.add_handler(ProtocolState.UNKNOWN, ProtocolEvent.DISCOVER, self._handler_unknown_discover)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.ENTER, self._handler_command_enter)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.EXIT, self._handler_command_exit)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.ACQUIRE_SAMPLE,
self._handler_acquire_sample)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.START_DIRECT,
self._handler_command_start_direct)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.CLOCK_SYNC,
self._handler_command_sync_clock)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.GET, self._handler_get)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.SET, self._handler_command_set)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.START_AUTOSAMPLE,
self._handler_command_start_autosample)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.FLASH_STATUS, self._handler_flash_status)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.ACQUIRE_STATUS,
self._handler_acquire_status)
self._protocol_fsm.add_handler(ProtocolState.AUTOSAMPLE, ProtocolEvent.ENTER, self._handler_autosample_enter)
self._protocol_fsm.add_handler(ProtocolState.AUTOSAMPLE, ProtocolEvent.EXIT, self._handler_autosample_exit)
self._protocol_fsm.add_handler(ProtocolState.AUTOSAMPLE, ProtocolEvent.STOP_AUTOSAMPLE,
self._handler_autosample_stop_autosample)
self._protocol_fsm.add_handler(ProtocolState.AUTOSAMPLE, ProtocolEvent.ACQUIRE_SAMPLE,
self._handler_acquire_sample)
self._protocol_fsm.add_handler(ProtocolState.AUTOSAMPLE, ProtocolEvent.CLOCK_SYNC,
self._handler_autosample_sync_clock)
self._protocol_fsm.add_handler(ProtocolState.AUTOSAMPLE, ProtocolEvent.GET, self._handler_get)
self._protocol_fsm.add_handler(ProtocolState.AUTOSAMPLE, ProtocolEvent.FLASH_STATUS, self._handler_flash_status)
self._protocol_fsm.add_handler(ProtocolState.AUTOSAMPLE, ProtocolEvent.ACQUIRE_STATUS,
self._handler_acquire_status)
# We setup a new state for clock sync because then we could use the state machine so the autosample scheduler
# is disabled before we try to sync the clock. Otherwise there could be a race condition introduced when we
# are syncing the clock and the scheduler requests a sample.
self._protocol_fsm.add_handler(ProtocolState.SYNC_CLOCK, ProtocolEvent.ENTER, self._handler_sync_clock_enter)
self._protocol_fsm.add_handler(ProtocolState.SYNC_CLOCK, ProtocolEvent.CLOCK_SYNC,
self._handler_sync_clock_sync)
self._protocol_fsm.add_handler(ProtocolState.DIRECT_ACCESS, ProtocolEvent.ENTER,
self._handler_direct_access_enter)
self._protocol_fsm.add_handler(ProtocolState.DIRECT_ACCESS, ProtocolEvent.EXIT,
self._handler_direct_access_exit)
self._protocol_fsm.add_handler(ProtocolState.DIRECT_ACCESS, ProtocolEvent.EXECUTE_DIRECT,
self._handler_direct_access_execute_direct)
self._protocol_fsm.add_handler(ProtocolState.DIRECT_ACCESS, ProtocolEvent.STOP_DIRECT,
self._handler_direct_access_stop_direct)
# Add build handlers for device commands.
self._add_build_handler(Command.GET_CLOCK, self._build_simple_command)
self._add_build_handler(Command.SET_CLOCK, self._build_set_clock_command)
self._add_build_handler(Command.D, self._build_simple_command)
self._add_build_handler(Command.GO, self._build_simple_command)
self._add_build_handler(Command.STOP, self._build_simple_command)
self._add_build_handler(Command.FS, self._build_simple_command)
self._add_build_handler(Command.STAT, self._build_simple_command)
# Add response handlers for device commands.
self._add_response_handler(Command.GET_CLOCK, self._parse_clock_response)
self._add_response_handler(Command.SET_CLOCK, self._parse_clock_response)
self._add_response_handler(Command.FS, self._parse_fs_response)
self._add_response_handler(Command.STAT, self._parse_common_response)
# Construct the parameter dictionary containing device parameters,
# current parameter values, and set formatting functions.
self._build_param_dict()
self._build_command_dict()
self._build_driver_dict()
self._chunker = StringChunker(Protocol.sieve_function)
self._add_scheduler_event(ScheduledJob.ACQUIRE_STATUS, ProtocolEvent.ACQUIRE_STATUS)
self._add_scheduler_event(ScheduledJob.CLOCK_SYNC, ProtocolEvent.CLOCK_SYNC)
# Start state machine in UNKNOWN state.
self._protocol_fsm.start(ProtocolState.UNKNOWN)
@staticmethod
def sieve_function(raw_data):
"""
The method that splits samples and status
"""
matchers = []
return_list = []
matchers.append(METBK_SampleDataParticle.regex_compiled())
matchers.append(METBK_StatusDataParticle.regex_compiled())
for matcher in matchers:
for match in matcher.finditer(raw_data):
return_list.append((match.start(), match.end()))
return return_list
def _got_chunk(self, chunk, timestamp):
"""
The base class got_data has gotten a chunk from the chunker. Pass it to extract_sample
with the appropriate particle objects and REGEXes.
"""
log.debug("_got_chunk: chunk=%s" % chunk)
self._extract_sample(METBK_SampleDataParticle, METBK_SampleDataParticle.regex_compiled(), chunk, timestamp)
self._extract_sample(METBK_StatusDataParticle, METBK_StatusDataParticle.regex_compiled(), chunk, timestamp)
def _filter_capabilities(self, events):
"""
Return a list of currently available capabilities.
"""
return [x for x in events if Capability.has(x)]
########################################################################
# override methods from base class.
########################################################################
def _extract_sample(self, particle_class, regex, line, timestamp, publish=True):
"""
Overridden to add duplicate sample checking. This duplicate checking should only be performed
on sample chunks and not other chunk types, therefore the regex is performed before the string checking.
Extract sample from a response line if present and publish parsed particle
@param particle_class The class to instantiate for this specific
data particle. Parameterizing this allows for simple, standard
behavior from this routine
@param regex The regular expression that matches a data sample
@param line string to match for sample.
@param timestamp port agent timestamp to include with the particle
@param publish boolean to publish samples (default True). If True,
two different events are published: one to notify raw data and
the other to notify parsed data.
@retval dict of dicts {'parsed': parsed_sample, 'raw': raw_sample} if
the line can be parsed for a sample. Otherwise, None.
@todo Figure out how the agent wants the results for a single poll
and return them that way from here
"""
match = regex.match(line)
if match:
if particle_class == METBK_SampleDataParticle:
# check to see if there is a delta from last sample, and don't parse this sample if there isn't
if match.group(0) == self.last_sample:
return
# save this sample as last_sample for next check
self.last_sample = match.group(0)
particle = particle_class(line, port_timestamp=timestamp)
parsed_sample = particle.generate()
if publish and self._driver_event:
self._driver_event(DriverAsyncEvent.SAMPLE, parsed_sample)
return parsed_sample
########################################################################
# implement virtual methods from base class.
########################################################################
def apply_startup_params(self):
"""
Apply sample_interval startup parameter.
"""
config = self.get_startup_config()
log.debug("apply_startup_params: startup config = %s" % config)
if config.has_key(Parameter.SAMPLE_INTERVAL):
log.debug("apply_startup_params: setting sample_interval to %d" % config[Parameter.SAMPLE_INTERVAL])
self._param_dict.set_value(Parameter.SAMPLE_INTERVAL, config[Parameter.SAMPLE_INTERVAL])
########################################################################
# Unknown handlers.
########################################################################
def _handler_unknown_enter(self, *args, **kwargs):
"""
Enter unknown state.
"""
# Tell driver superclass to send a state change event.
# Superclass will query the state.
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
def _handler_unknown_exit(self, *args, **kwargs):
"""
Exit unknown state.
"""
pass
def _handler_unknown_discover(self, *args, **kwargs):
"""
Discover current state; can only be COMMAND (instrument has no actual AUTOSAMPLE mode).
"""
next_state = self._discover()
result = []
return next_state, (next_state, result)
########################################################################
# Clock Sync handlers.
# Not much to do in this state except sync the clock then transition
# back to autosample. When in command mode we don't have to worry about
# stopping the scheduler so we just sync the clock without state
# transitions
########################################################################
def _handler_sync_clock_enter(self, *args, **kwargs):
"""
Enter sync clock state.
"""
# Tell driver superclass to send a state change event.
# Superclass will query the state.
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
self._protocol_fsm.on_event(ProtocolEvent.CLOCK_SYNC)
def _handler_sync_clock_sync(self, *args, **kwargs):
"""
Sync the clock
"""
next_state = ProtocolState.AUTOSAMPLE
result = []
self._sync_clock()
self._async_agent_state_change(ResourceAgentState.STREAMING)
return next_state, (next_state, result)
########################################################################
# Command handlers.
# just implemented to make DA possible, instrument has no actual command mode
########################################################################
def _handler_command_enter(self, *args, **kwargs):
"""
Enter command state.
"""
self._init_params()
# Tell driver superclass to send a state change event.
# Superclass will query the state.
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
def _handler_command_exit(self, *args, **kwargs):
"""
Exit command state.
"""
pass
def _handler_command_set(self, *args, **kwargs):
"""
no writable parameters so does nothing, just implemented to make framework happy
"""
next_state = None
result = None
return next_state, result
def _handler_command_start_direct(self, *args, **kwargs):
"""
"""
next_state = ProtocolState.DIRECT_ACCESS
result = []
return next_state, (next_state, result)
def _handler_command_start_autosample(self, *args, **kwargs):
"""
"""
next_state = ProtocolState.AUTOSAMPLE
result = []
self._start_logging()
return next_state, (next_state, result)
def _handler_command_sync_clock(self, *args, **kwargs):
"""
sync clock close to a second edge
@throws InstrumentTimeoutException if device respond correctly.
@throws InstrumentProtocolException if command could not be built or misunderstood.
"""
next_state = None
result = []
self._sync_clock()
return next_state, (next_state, result)
########################################################################
# autosample handlers.
########################################################################
def _handler_autosample_enter(self, *args, **kwargs):
"""
Enter autosample state Because this is an instrument that must be
polled we need to ensure the scheduler is added when we are in an
autosample state. This scheduler raises events to poll the
instrument for data.
"""
self._init_params()
self._ensure_autosample_config()
self._add_scheduler_event(AUTO_SAMPLE_SCHEDULED_JOB, ProtocolEvent.ACQUIRE_SAMPLE)
# Tell driver superclass to send a state change event.
# Superclass will query the state.
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
def _handler_autosample_exit(self, *args, **kwargs):
"""
exit autosample state.
"""
self._remove_scheduler(AUTO_SAMPLE_SCHEDULED_JOB)
def _handler_autosample_stop_autosample(self, *args, **kwargs):
next_state = ProtocolState.COMMAND
result = []
self._stop_logging()
return next_state, (next_state, result)
def _handler_autosample_sync_clock(self, *args, **kwargs):
"""
sync clock close to a second edge
@throws InstrumentTimeoutException if device respond correctly.
@throws InstrumentProtocolException if command could not be built or misunderstood.
"""
next_state = ProtocolState.SYNC_CLOCK
result = []
return next_state, (next_state, result)
########################################################################
# Direct access handlers.
########################################################################
def _handler_direct_access_enter(self, *args, **kwargs):
"""
Enter direct access state.
"""
# Tell driver superclass to send a state change event.
# Superclass will query the state.
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
self._sent_cmds = []
def _handler_direct_access_exit(self, *args, **kwargs):
"""
Exit direct access state.
"""
pass
def _handler_direct_access_execute_direct(self, data):
next_state = None
result = []
self._do_cmd_direct(data)
return next_state, (next_state, result)
def _handler_direct_access_stop_direct(self):
next_state = self._discover()
result = []
return next_state, (next_state, result)
########################################################################
# general handlers.
########################################################################
def _handler_flash_status(self, *args, **kwargs):
"""
Acquire flash status from instrument.
@retval (next_state, (next_state, result)) tuple, (None, (None, None)).
@throws InstrumentTimeoutException if device cannot be woken for command.
@throws InstrumentProtocolException if command could not be built or misunderstood.
"""
next_state = None
result = self._do_cmd_resp(Command.FS, expected_prompt=Prompt.FS)
log.debug("FLASH RESULT: %s", result)
return next_state, (next_state, [result])
def _handler_acquire_sample(self, *args, **kwargs):
"""
Acquire sample from instrument.
@retval (next_state, (next_state, result)) tuple, (None, (None, None)).
@throws InstrumentTimeoutException if device cannot be woken for command.
@throws InstrumentProtocolException if command could not be built or misunderstood.
"""
next_state = None
result = self._do_cmd_resp(Command.D, *args, **kwargs)
return next_state, (next_state, [result])
def _handler_acquire_status(self, *args, **kwargs):
"""
Acquire status from instrument.
@retval (next_state, (next_state, result)) tuple, (None, (None, None)).
@throws InstrumentTimeoutException if device cannot be woken for command.
@throws InstrumentProtocolException if command could not be built or misunderstood.
"""
next_state = None
log.debug("Logging status: %s", self._is_logging())
result = self._do_cmd_resp(Command.STAT, expected_prompt=[Prompt.STOPPED, Prompt.GO])
return next_state, (next_state, [result])
########################################################################
# Private helpers.
########################################################################
def _set_params(self, *args, **kwargs):
"""
Overloaded from the base class, used in apply DA params. Not needed
here so just noop it.
"""
pass
def _discover(self, *args, **kwargs):
"""
Discover current state; can only be COMMAND (instrument has no actual AUTOSAMPLE mode).
@retval (next_state, result), (ProtocolState.COMMAND, None) if successful.
"""
logging = self._is_logging()
if logging is None:
return ProtocolState.UNKNOWN
elif logging:
return ProtocolState.AUTOSAMPLE
return ProtocolState.COMMAND
def _start_logging(self):
"""
start the instrument logging if is isn't running already.
"""
if not self._is_logging():
log.debug("Sending start logging command: %s", Command.GO)
self._do_cmd_resp(Command.GO, expected_prompt=Prompt.GO)
def _stop_logging(self):
"""
stop the instrument logging if is is running. When the instrument
is in a syncing state we can not stop logging. We must wait before
we sent the stop command.
"""
if self._is_logging():
log.debug("Attempting to stop the instrument logging.")
result = self._do_cmd_resp(Command.STOP, expected_prompt=[Prompt.STOPPED, Prompt.SYNC, Prompt.GO])
log.debug("Stop Command Result: %s", result)
# If we are still logging then let's wait until we are not
# syncing before resending the command.
if self._is_logging():
self._wait_for_sync()
log.debug("Attempting to stop the instrument again.")
result = self._do_cmd_resp(Command.STOP, expected_prompt=[Prompt.STOPPED, Prompt.SYNC, Prompt.GO])
log.debug("Stop Command Result: %s", result)
def _wait_for_sync(self):
"""
When the instrument is syncing internal parameters we can't stop
logging. So we will watch the logging status and when it is not
synchronizing we will return. Basically we will just block
until we are no longer syncing.
@raise InstrumentProtocolException when we timeout waiting for a
transition.
"""
timeout = time.time() + SYNC_TIMEOUT
while time.time() < timeout:
result = self._do_cmd_resp(Command.STAT, expected_prompt=[Prompt.STOPPED, Prompt.SYNC, Prompt.GO])
match = LOGGING_SYNC_COMPILED.match(result)
if match:
log.debug("We are still in sync mode. Wait a bit and retry")
time.sleep(2)
else:
log.debug("Transitioned out of sync.")
return True
# We timed out
raise InstrumentProtocolException("failed to transition out of sync mode")
def _is_logging(self):
"""
Run the status command to determine if we are in command or autosample
mode.
@return: True if sampling, false if not, None if we can't determine
"""
log.debug("_is_logging: start")
result = self._do_cmd_resp(Command.STAT, expected_prompt=[Prompt.STOPPED, Prompt.GO])
log.debug("Checking logging status from %s", result)
match = LOGGING_STATUS_COMPILED.match(result)
if not match:
log.error("Unable to determine logging status from: %s", result)
return None
if match.group(1) == 'GO':
log.debug("Looks like we are logging: %s", match.group(1))
return True
else:
log.debug("Looks like we are NOT logging: %s", match.group(1))
return False
def _ensure_autosample_config(self):
scheduler_config = self._get_scheduler_config()
if (scheduler_config == None):
log.debug("_ensure_autosample_config: adding scheduler element to _startup_config")
self._startup_config[DriverConfigKey.SCHEDULER] = {}
self._get_scheduler_config()
log.debug("_ensure_autosample_config: adding autosample config to _startup_config")
config = {DriverSchedulerConfigKey.TRIGGER: {
DriverSchedulerConfigKey.TRIGGER_TYPE: TriggerType.INTERVAL,
DriverSchedulerConfigKey.SECONDS: self._param_dict.get(Parameter.SAMPLE_INTERVAL)}}
self._startup_config[DriverConfigKey.SCHEDULER][AUTO_SAMPLE_SCHEDULED_JOB] = config
if (not self._scheduler):
self.initialize_scheduler()
def _sync_clock(self, *args, **kwargs):
"""
sync clock close to a second edge
@throws InstrumentTimeoutException if device respond correctly.
@throws InstrumentProtocolException if command could not be built or misunderstood.
"""
time_format = "%Y/%m/%d %H:%M:%S"
str_val = get_timestamp_delayed(time_format)
log.debug("Setting instrument clock to '%s'", str_val)
self._do_cmd_resp(Command.SET_CLOCK, str_val, expected_prompt=Prompt.CR_NL)
def _wakeup(self, timeout):
"""There is no wakeup sequence for this instrument"""
pass
def _build_driver_dict(self):
"""
Populate the driver dictionary with options
"""
self._driver_dict.add(DriverDictKey.VENDOR_SW_COMPATIBLE, False)
def _build_command_dict(self):
"""
Populate the command dictionary with command.
"""
self._cmd_dict.add(Capability.START_AUTOSAMPLE, display_name="Start Autosample")
self._cmd_dict.add(Capability.STOP_AUTOSAMPLE, display_name="Stop Autosample")
self._cmd_dict.add(Capability.CLOCK_SYNC, display_name="Synchronize Clock")
self._cmd_dict.add(Capability.ACQUIRE_STATUS, display_name="Acquire Status")
self._cmd_dict.add(Capability.ACQUIRE_SAMPLE, display_name="Acquire Sample")
self._cmd_dict.add(Capability.FLASH_STATUS, display_name="Flash Status")
self._cmd_dict.add(Capability.DISCOVER, display_name='Discover')
def _build_param_dict(self):
"""
Populate the parameter dictionary with XR-420 parameters.
For each parameter key add value formatting function for set commands.
"""
# The parameter dictionary.
self._param_dict = ProtocolParameterDict()
# Add parameter handlers to parameter dictionary for instrument configuration parameters.
self._param_dict.add(Parameter.CLOCK,
r'(.*)\r\n',
lambda match: match.group(1),
lambda string: str(string),
type=ParameterDictType.STRING,
display_name="clock",
expiration=0,
visibility=ParameterDictVisibility.READ_ONLY)
self._param_dict.add(Parameter.SAMPLE_INTERVAL,
r'Not used. This parameter is not parsed from instrument response',
None,
self._int_to_string,
type=ParameterDictType.INT,
default_value=30,
value=30,
startup_param=True,
display_name="sample_interval",
visibility=ParameterDictVisibility.IMMUTABLE)
def _update_params(self, *args, **kwargs):
"""
Update the parameter dictionary.
"""
log.debug("_update_params:")
# Issue clock command and parse results.
# This is the only parameter and it is always changing so don't bother with the 'change' event
self._do_cmd_resp(Command.GET_CLOCK)
def _build_set_clock_command(self, cmd, val):
"""
Build handler for set clock command (cmd=val followed by newline).
@param cmd the string for setting the clock (this should equal #CLOCK=).
@param val the parameter value to set.
@ retval The set command to be sent to the device.
"""
cmd = '%s%s' % (cmd, val) + NEWLINE
return cmd
def _parse_clock_response(self, response, prompt):
"""
Parse handler for clock command.
@param response command response string.
@param prompt prompt following command response.
@throws InstrumentProtocolException if clock command misunderstood.
"""
log.debug("_parse_clock_response: response=%s, prompt=%s" % (response, prompt))
if prompt not in [Prompt.CR_NL]:
raise InstrumentProtocolException('CLOCK command not recognized: %s.' % response)
if not self._param_dict.update(response):
raise InstrumentProtocolException('CLOCK command not parsed: %s.' % response)
return
def _parse_fs_response(self, response, prompt):
"""
Parse handler for FS command.
@param response command response string.
@param prompt prompt following command response.
@throws InstrumentProtocolException if FS command misunderstood.
"""
log.debug("_parse_fs_response: response=%s, prompt=%s" % (response, prompt))
if prompt not in [Prompt.FS]:
raise InstrumentProtocolException('FS command not recognized: %s.' % response)
return response
def _parse_common_response(self, response, prompt):
"""
Parse handler for common commands.
@param response command response string.
@param prompt prompt following command response.
"""
return response
| |
# -*- coding: utf-8 -*-
from __future__ import print_function
from sys import hexversion
import random
from .context import sortedcontainers
from sortedcontainers import SortedSet
from nose.tools import raises
from functools import wraps
import operator
if hexversion < 0x03000000:
from itertools import izip as zip
range = xrange
random.seed(0)
actions = []
def actor(func):
actions.append(func)
return func
def test_init():
sst = SortedSet()
sst._check()
sst = SortedSet(load=10000)
assert sst._list._load == 10000
assert sst._list._twice == 20000
assert sst._list._half == 5000
sst._check()
sst = SortedSet(range(10000))
assert all(tup[0] == tup[1] for tup in zip(sst, range(10000)))
sst.clear()
assert len(sst) == 0
assert list(iter(sst)) == []
sst._check()
@actor
def stress_contains(sst):
values = list(sst)
assert all((val in sst) for val in values)
@actor
def stress_delitem(sst):
for rpt in range(100):
pos = random.randrange(0, len(sst))
del sst[pos]
@actor
def stress_operator(sst):
other = SortedSet(sst)
stress_delitem(other)
assert other < sst
assert sst > other
@actor
def stress_getitem(sst):
other = list(sst)
assert all(sst[pos] == other[pos] for pos in range(len(sst)))
@actor
def stress_reversed(sst):
other = list(reversed(list(sst)))
assert all(tup[0] == tup[1] for tup in zip(reversed(sst), other))
@actor
def stress_add(sst):
for rpt in range(100):
val = random.randrange(0, 1000)
sst.add(val)
@actor
def stress_count(sst):
for val in sst:
assert sst.count(val) == 1
@actor
def stress_difference(sst):
copy_one = sst.copy()
stress_delitem(copy_one)
copy_two = sst.copy()
stress_delitem(copy_two)
sst.difference_update(copy_one, copy_two)
@actor
def stress_discard(sst):
for rpt in range(100):
pos = random.randrange(0, len(sst))
val = sst[pos]
sst.discard(val)
@actor
def stress_index(sst):
for rpt in range(100):
pos = random.randrange(0, len(sst))
val = sst[pos]
assert pos == sst.index(val)
@actor
def stress_intersection(sst):
copy_one = sst.copy()
stress_delitem(copy_one)
copy_two = sst.copy()
stress_delitem(copy_two)
sst.intersection_update(copy_one, copy_two)
@actor
def stress_symmetric_difference(sst):
copy_one = sst.copy()
stress_delitem(copy_one)
sst.symmetric_difference_update(copy_one)
@actor
def stress_pop(sst):
val = sst[-1]
assert val == sst.pop()
for rpt in range(100):
pos = random.randrange(0, len(sst))
val = sst[pos]
assert val == sst.pop(pos)
@actor
def stress_remove(sst):
for rpt in range(100):
pos = random.randrange(0, len(sst))
val = sst[pos]
sst.remove(val)
@actor
def stress_update(sst):
def iter_randomly(start, stop, count):
for rpt in range(count):
yield random.randrange(start, stop)
sst.update(iter_randomly(0, 500, 100),
iter_randomly(500, 1000, 100),
iter_randomly(1000, 1500, 100),
iter_randomly(1500, 2000, 100))
@actor
def stress_isdisjoint(sst):
values = [-1, -2, -3]
assert sst.isdisjoint(values)
@actor
def stress_issubset(sst):
that = SortedSet(sst)
that.update(range(1000))
assert sst.issubset(that)
@actor
def stress_issuperset(sst):
that = SortedSet(sst)
assert sst.issuperset(that)
def test_stress(repeat=1000):
sst = SortedSet(range(1000))
for rpt in range(repeat):
action = random.choice(actions)
action(sst)
try:
sst._check()
except AssertionError:
print(action)
raise
start_len = len(sst)
while len(sst) < 500:
sst.add(random.randrange(0, 2000))
while len(sst) > 2000:
del sst[random.randrange(0, len(sst))]
if start_len != len(sst):
sst._check()
if __name__ == '__main__':
import sys
from datetime import datetime
start = datetime.now()
print('Python', sys.version_info)
try:
num = int(sys.argv[1])
print('Setting iterations to', num)
except:
print('Setting iterations to 1000 (default)')
num = 1000
try:
pea = int(sys.argv[2])
random.seed(pea)
print('Setting seed to', pea)
except:
print('Setting seed to 0 (default)')
random.seed(0)
try:
test_stress(num)
except:
raise
finally:
print('Exiting after', (datetime.now() - start))
| |
#!/usr/bin/env python
'''
NAME
launcher.py
DESCRIPTION
'launcher.py' emulates the behaviour of a web-based launcher.
This script will ssh to a remote host and execute the bash-based
'headnode.bash' script on that host.
HISTORY
07 November 2014
o Initial design and coding.
'''
# System imports
import os
import sys
import argparse
import tempfile, shutil
import time
import random
from _common import systemMisc as misc
from _common import crun
class launcherRemote:
'''
This class simply logins into a remote host and runs
a script.
'''
#
# Class member variables -- if declared here are shared
# across all instances of this class
#
_dictErr = {
'Load' : {
'action' : 'attempting to pickle load object, ',
'error' : 'a PickleError occured.',
'exitCode' : 14},
'outDirNotCreate': {
'action' : 'attempting to create the <outDir>, ',
'error' : 'a system error was encountered. Do you have create permission?',
'exitCode' : 13},
}
def __init__(self, **kwargs):
'''
Basic constructor. Checks on named input args, checks that files
exist and creates directories.
'''
self._lw = 120
self._rw = 20
self._topDir = ''
self._str_remotePath = '~/chris'
self._str_remoteScript = 'headnode.bash'
self._str_remoteHost = 'mit.eofe4.edu'
self._str_remoteUser = 'rudolph'
self._str_remoteCrun = 'crun_hpc_mosix'
for key, value in kwargs.iteritems():
if key == 'remotePath': self._str_remotePath = value
if key == 'remoteScript': self._str_remoteScript = value
if key == 'remoteHost': self._str_remoteHost = value
if key == 'remoteUser': self._str_remoteUser = value
if key == 'remoteCrun': self._str_remoteCrun = value
# A local "shell"
self.OSshell = crun.crun()
self.OSshell.echo(False)
self.OSshell.echoStdOut(False)
self.OSshell.detach(False)
self.OSshell.waitForChild(True)
# The remote/scheduler shell
self.sshCluster = crun.crun(remoteUser=self._str_remoteUser,
remoteHost=self._str_remoteHost)
print("remote call stdout = %s" % self.sshCluster.stdout())
print("remote call stderr = %s" % self.sshCluster.stderr())
self.initialize()
def initialize(self):
'''
This method provides some "post-constructor" initialization. It is
typically called after the constructor and after other class flags
have been set (or reset).
'''
pass
def run(self):
'''
The 'run' method actually does the work of this class.
'''
str_cmd = '%s/%s' % (self._str_remotePath, self._str_remoteScript)
self.sshCluster.echo(True)
self.sshCluster.echoStdOut(True)
# fire-and-forget the remote script
self.sshCluster(str_cmd)
def synopsis(ab_shortOnly = False):
scriptName = os.path.basename(sys.argv[0])
shortSynopsis = '''
SYNOPSIS
%s \\
[--remoteHost <remoteHost>] \\
[--remoteUser <remoteUser>] \\
[--remotePath <remotePath>] \\
[--remoteScript <remoteScript>] \\
[--remoteCrun <crun_hpc_type>]
''' % scriptName
description = '''
DESCRIPTION
`%s' emulates the ChRIS launcher.php in as much as it
ssh's to a remote host and executes a bash-based script
on that host.
ARGS
--remoteHost <remoteHost>
The host to connect to. It is assumed that this is an HPC
headnode.
--remoteUser <remoteUser>
The remote user.
--remotePath <remotePath>
The path to the remote script to run on the headnode.
--remoteScript <remoteScript>
The remote script to run on the headnode.
--ctype <crun_hpc_type>
The crun hpc class to use.
"<Command and args to execute>"
The command string to schedule.
EXAMPLES
''' % (scriptName)
if ab_shortOnly:
return shortSynopsis
else:
return shortSynopsis + description
#
# entry point
#
if __name__ == "__main__":
# always show the help if no arguments were specified
if len( sys.argv ) == 1:
print synopsis()
sys.exit( 1 )
verbosity = 0
parser = argparse.ArgumentParser(description = synopsis(True))
parser.add_argument('--verbosity', '-v',
dest='verbosity',
action='store',
default=0,
help='verbosity level')
parser.add_argument('--remoteHost', '-r',
dest='remoteHost',
action='store',
default='eofe4.mit.edu',
help='the remote headnode.')
parser.add_argument('--remoteUser', '-u',
dest='remoteUser',
action='store',
default='rudolph',
help='the headnode user name.')
parser.add_argument('--remotePath', '-p',
dest='remotePath',
action='store',
default='~/src/devel/distrib',
help='the path to the remote script to execute.')
parser.add_argument('--remoteScript', '-s',
dest='remoteScript',
action='store',
default='headnode.bash',
help='the remote script to execute.')
parser.add_argument('--remoteCrun', '-c',
dest='remoteCrun',
action='store',
default='crun_hpc_mghpcc',
help='the remote cluster crun object type.')
args = parser.parse_args()
launcher = launcherRemote(
remoteHost = args.remoteHost,
remoteUser = args.remoteUser,
remotePath = args.remotePath,
remoteScript = args.remoteScript,
remoteCrun = args.remoteCrun
)
launcher.run()
| |
# Copyright (c) 2016 Jonathan Lloyd - copyright@thisisjonathan.com
#
# The MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Lists of words for use in idea templates"""
ADJECTIVES = [
'scary',
'challenging',
'slow-paced',
'relaxing',
'twitchy',
'mysterious',
'colourful',
'silly',
'comedic',
'retro',
'pixel',
'generative',
'stealthy',
'minimalistic',
'zen-like',
'quirky',
'rogue-like',
'turn based',
'side-scrolling',
'multiplayer',
'co-op',
'first-person',
'virtual reality',
]
GENRES = [
'platformer',
'shooter',
'stealth',
'fighting',
'adventure',
'text-adventure',
'role-playing',
'simulation',
'strategy',
'casual',
'puzzle',
'racing',
'board',
'rhythm',
'tower defense',
'beat-em-up',
]
CHARACTERS = [
'anti-hero',
'vigilante',
'soldier',
'Italian plumber',
'dragon',
'super hero',
'super soldier',
'hacker',
'child',
'explorer',
'introvert',
'poet',
'boy',
'girl',
'thief',
'spy',
'trained assassin',
'ninja',
'samurai',
'trickster',
'king',
'queen',
'princess',
'time traveller',
'mad scientist',
'alien',
'cyborg',
'astronaut',
'blue hedgehog',
'villain',
'wizard',
'sorceress',
'witch',
'vampire',
'icy blue dragon',
'zombie with no conscience',
'peasant girl',
'pizza delivery man',
]
CHARACTER_ADJECTIVES = [
'outlaw',
'renegade',
'vengeful',
'rollerskating',
'motorbike-riding',
'superintelligent',
'robotic',
'virtual',
'space-faring',
'sea-faring',
'destitute',
'reckless',
'incompetent',
'lovelorn',
'flamboyant',
'wise-cracking',
'mischievous',
'fourth wall breaking',
'reactionary',
'spell-casting',
'undead',
]
THINGS = [
'planet',
'castle',
'totalitarian regime',
'alien planet',
'map',
'government',
'society',
'player',
'army',
'underground pipe network',
'battlefield',
'herd of cats',
'Russian author',
'cell phone',
'magical ring',
'magical power',
'enchanted rune',
'plasma gun',
'improvised weapon',
'mountain',
'forest',
'ocean',
'lost city',
'floating raft city',
'floating sky city',
'flag',
]
PRESENT_VERBS = [
'escapes',
'fights',
'must destroy',
'avoids',
'controls',
'explores the deeper meaning of',
'builds',
'goes on an adventure with',
'has a deep and meaningful conversation with',
'gets drunk with',
'has an existential crisis because of',
'rules the world using',
'falls in love with',
'discovers their best friend is',
]
ACTIONS = [
'press the spacebar',
'twitch your mouse',
'fire your weapon',
'die',
'jump over something',
'walk backwards',
'cast a spell',
'jump on some platforms',
'use mind control',
'solve a platforming puzzle',
'quit the game',
'move to the beat',
'progress',
'live',
'move things with your mind',
'transform into any character',
'hack into a website',
'score points',
'collect items',
'use motion controls',
'lose health',
'touch the walls',
'turn back time',
'jump on enemies',
'craft items',
'build a shelter',
'solve mind-bending puzzles',
'rewind time',
'avoid enemies',
'protect your base',
'sweep enemy bases',
'capture enemies',
'control enemy characters',
'play an awesome solo on a rock band controller',
'unlock all the achievements',
'break into a secret area',
'fight off an alien invasion',
'bring about the end of the world',
]
ART_STYLES = [
'pixel-art',
'voxel',
'vector',
'stop motion',
'glitch',
'low-poly',
'3D',
'2D',
'film noir',
'b-movie',
'80s SciFi',
'low budget',
'cartoonish',
'hand illustrated',
'steampunk',
'dieselpunk',
'Victorian',
'cypherpunk',
'neon',
'water colour',
'fairytale',
'gothic',
'isometric',
'psychedelic',
'abstract',
'post-apocalyptic',
'dystopian',
'under water',
'medieval',
'cel shaded',
]
SETTINGS = [
'computer',
'castle',
'office',
'hotel',
'school',
'church',
'junkyard',
'science lab',
'casino',
'factory',
'nightclub',
'cave',
'spaceship',
'artificial space habitat',
'abandoned mental asylum',
'haunted house',
'school for witches and wizards',
'school for mutants',
'secret government science lab',
'theatre',
'dying world',
'racetrack',
'desolate wasteland',
]
HASHTAGS = [
'gamedev',
'indiedev',
'indiegame',
'indiegames',
'indie',
'gameart',
'indiegamedev',
'gameideas',
'gameidea',
]
RESOURCES = [
'lives',
'food',
'health',
'coins',
'gems',
'points',
'potions',
'souls',
'units',
'stamina',
'arrows',
'bullets',
'missiles',
'pizza',
'materials',
]
| |
#!/bin/python3
import requests
import time
import os
import sys
import re
from html.parser import HTMLParser
import config
import logger
_CONFIGFILE = 'config.yaml'
_WAIT = 0.25 # global rait limiting value, don't set to 0
_EXT = re.compile('^http.*/download/[0-9]+/[^\.]+(\.[a-zA-Z]{2,4})\?token=.*$')
log = logger.getLogger()
log.info('Initializing project')
class ImageParser(HTMLParser):
def get_img(self):
try:
return self.img
except:
return None
class DAParser(ImageParser):
def handle_starttag(self, tag, attrs):
# Easiest way to grab an image from deviant art is to parse the page
# and pull out the (hopefully) only link ('a' tag) with a class of
# 'dev-page-download'
try:
if tag == 'a' and attrs:
# Attrs are a list of tuples, (name, value)
d = {}
for attr in attrs:
d[attr[0]] = attr[1]
if d and 'class' in d and 'dev-page-download' in d['class'].split():
log.debug(d)
self.img = d['href']
except:
log.error('Unhandled exception in DA Parser', exc_info=True)
raise
def download_image(path, url, cookies=None, referrer=None):
'''Wrapper around requests to attempt downloading a file at a specified URL'''
try:
log.debug("Beginning download.")
downloaded_image = requests.get(url, cookies=cookies, headers={'referrer': referrer}, stream=True)
log.debug("Finished download.")
if downloaded_image.ok:
with open(path, 'wb') as f:
for block in downloaded_image.iter_content(1024):
if not block:
break
f.write(block)
return path
else:
return False
except:
log.error('Unhandled exception in downloader', exc_info=True)
return None
def da_api(conf, method, endpoint, payload):
global _WAIT
remaining_retries = 10
reauthed = False
try:
while remaining_retries:
time.sleep(conf['wait'])
if method == 'get':
request = requests.get(endpoint, params=payload)
elif method == 'post':
request = requests.post(endpoint, data=payload)
if request.status_code == 200:
if conf['wait'] > 0.25:
conf['wait'] /= 2 # Ratched down our wait time since we succeeded
log.debug("Reduced wait time to %s", conf['wait'])
return request
elif request.status_code == 401: # Invalid token, need to refresh
log.debug("API reports invalid or expired token (401). Current token: %s", payload['access_token'])
# reauth with the api
if not reauthed: # Only reauth once per loop to prevent infinite reauthing
log.debug("Not yet reauthed in this loop.")
remaining_retries += 1
conf['access_token'] = auth(conf['client_id'], conf['client_secret'])
payload['access_token'] = conf['access_token']
reauthed = True
elif request.status_code == 429: # API rate limiting
log.warning("API ratelimiting encountered.")
log.debug("Request status code: %s", request.status_code)
log.debug("Request contents: %s", request.text)
conf['wait'] *= 4
log.debug("increased wait time to %s", conf['wait'])
else:
# lolidunno
log.warning("Unknown response from server.")
log.debug("Request status code: %s", request.status_code)
log.debug("Request contents: %s", request.text)
conf['wait'] *= 10 # Don't know what went wrong, so lets wait a long time to see if it fixes itself
log.debug("increased wait time to %s", conf['wait'])
remaining_retries -= 1
log.debug("%s retries remaining", remaining_retries)
except:
log.error('Unhandled exception in api wrapper', exc_info=True)
return None
def auth(id, secret):
try:
oid_request = requests.post(
'https://www.deviantart.com/oauth2/token',
data={'grant_type': 'client_credentials', 'client_id': id, 'client_secret': secret}
)
'''
Successful requests look like
oid_request.json()
{'access_token': '1a2b3a21a3b21ab2132b15a46b8a7b9513a2b16a8b79a79842ab1654957a98',
'expires_in': 3600,
'status': 'success',
'token_type': 'Bearer'}
'''
if oid_request and oid_request.ok and 'status' in oid_request.json() and oid_request.json()['status'] == 'success':
access_token = oid_request.json()['access_token']
log.debug("Got access token: %s", access_token)
else:
log.critical("Unable to get oauth token from API")
log.debug("Request okay: %s", oid_request.ok)
log.debug("Request status code: %s", oid_request.status_code)
log.debug("Request encoding: %s", oid_request.encoding)
log.debug("Request contents: %s", oid_request.text)
log.debug("Request json: %s", oid_request.json())
oid_request.raise_for_status
return access_token
except:
log.error('Unhandled exception in api wrapper', exc_info=True)
raise
def api_paged(conf, method, endpoint, payload, limit=None):
'''Wrapper for da_api that pulls all paged results out'''
items = []
offset = 0
has_more = True
if limit:
payload['limit'] = limit
elif 'limit' not in payload:
payload['limit'] = 10
while has_more:
payload['offset'] = offset
page = da_api(conf, method, endpoint, payload)
if not page:
log.critical("Unable to get page from API at %s. Deviant art may be unavailable.", endpoint)
sys.exit(1)
items.extend(page.json()['results'])
has_more = 'has_more' in page.json() and page.json()['has_more']
if has_more:
log.debug('On page offset %s and api reports more. Next offset %s', offset, page.json()['next_offset'])
offset = page.json()['next_offset']
return items
def sanitize_path(path):
return ''.join([c for c in path if c.isalpha() or c.isdigit() or c in '-_.() ']).strip()
def download_folder(conf, folder_id, folder_path):
os.makedirs(folder_path, exist_ok=True) # Yay python 3
deviations = api_paged(
conf,
'get',
'https://www.deviantart.com/api/v1/oauth2/collections/%s' % folder_id,
{'username': conf['user'], 'mature_content': 'true', 'access_token': conf['access_token']},
24
)
deviation_count = len(deviations)
log.info('Got %i deviations to fetch for folder %s.', deviation_count, folder_id)
downloaded = os.listdir(folder_path)
i = -1 # Setup for completion percentage
for deviation in deviations:
try:
i += 1
log.info('Working on image %i of %i - %s%s complete.' % (i, deviation_count, str((i/deviation_count)*100)[:4], r'%'))
url = deviation['url']
log.debug("")
log.debug("Working with deviation %s", url)
if deviation['is_deleted']:
log.info("Deviation is deleted, skipping.")
continue
if deviation['is_mature']:
log.debug("Deviation is marked as mature.")
ext = os.path.splitext(deviation['content']['src'])[-1]
# Black magic to pull trailing unique identifier, or just filename
# sans extension or path
unq = os.path.splitext(os.path.split(deviation['content']['src'])[-1])[0].split('-')[-1]
# Generate file name
filename = '%s_%s-%s%s' % (deviation['author']['username'], deviation['title'], unq, ext)
path = sanitize_path(filename)
log.debug("Looking for file %s", path)
if path in downloaded:
log.info('Deviation exists in output path, skipping.')
continue
# Generate full path
path = os.path.join(folder_path, path)
log.debug("Output path will be: %s", path)
unsuccessful = True
if deviation['is_downloadable']:
log.debug("Deviation is flagged as downloadable")
log.debug("Scraping %s for full image.", url)
parser = DAParser(convert_charrefs=True)
d_page = requests.get(url)
parser.feed(d_page.text)
img = parser.get_img()
del parser
if img:
log.debug("Found image url: %s", img)
# get real extension here and rebuild file name
ext = _EXT.findall(img)[0]
filename = '%s_%s-%s%s' % (deviation['author']['username'], deviation['title'], unq, ext)
path = ''.join([c for c in filename if c.isalpha() or c.isdigit() or c in '-_.() ']).strip()
log.debug("Looking for full file %s", path)
if path in downloaded:
log.info('Deviation exists in output path, skipping.')
continue
path = os.path.join(folder_path, path)
if download_image(path=path, url=img, cookies=d_page.cookies, referrer=url):
unsuccessful = False
unsuccessful = False
else:
log.warning("No image found when scraping page %s", url)
if unsuccessful:
# Get provided image
# TODO Handle images that you must be logged in to see.
log.debug('Falling back to content from API')
url = deviation['content']['src']
log.debug("Using image url: %s", url)
download_image(path=path, url=url)
time.sleep(1) # Be nice to Deviant Art :)
except:
log.error("Unhandled exception when downloading deviations.", exc_info=True)
def main(args):
conf = config.getConfig(_CONFIGFILE)
if not conf:
log.critical('Invalid config.')
sys.exit(1)
if 'log level' in conf and conf['log level'].upper() in [
'CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG']:
log.setLevel(conf['log level'].upper())
log.info("Set logging level to %s", conf['log level'].upper())
elif 'log level' in conf:
log.warning('Invalid log level specified in config. Level must be one of [critical, error, warning, info, debug].')
# Set some internal values
conf['wait'] = _WAIT
conf['access_token'] = auth(conf['client_id'], conf['client_secret'])
collections = api_paged(
conf,
'get',
'https://www.deviantart.com/api/v1/oauth2/collections/folders',
{'username': conf['user'], 'access_token': conf['access_token']},
50
)
log.debug('Found %i collections for user %s.', len(collections), conf['user'])
folders = []
folder = None
# If collection is specified, use it. Else use all.
if 'collection' in conf and conf['collection']:
try:
folder = [(f['folderid'], f['name']) for f in collections if f['name'].lower() == conf['collection'].lower()][0]
except IndexError:
log.critical("Collection %s Not found for user %s", conf['collection'], conf['user'])
sys.exit(1)
log.debug('Found folder id %s for folder %s for user %s', folder, conf['collection'], conf['user'])
folders.append(folder)
else:
folders.extend([(f['folderid'], f['name']) for f in collections])
if folders:
for id, name in folders: # Folders is a list of tuples
path = os.path.join(conf['output_path'], sanitize_path(name)) # Use folder name to create subfolder
log.debug("Downloading %s to %s", name, path)
download_folder(conf, id, path)
log.info('Finished')
if __name__ == '__main__':
try:
main(sys.argv[1:])
except KeyboardInterrupt:
log.warning('Caught keyboard interrupt. Closing.')
sys.exit(1)
| |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from froide.helper.auth_migration_util import USER_DB_NAME
APP_MODEL, APP_MODEL_NAME = 'account.User', 'account.user'
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'FoiRequest.resolution'
db.add_column(u'foirequest_foirequest', 'resolution',
self.gf('django.db.models.fields.CharField')(default='', max_length=50, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'FoiRequest.resolution'
db.delete_column(u'foirequest_foirequest', 'resolution')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
APP_MODEL_NAME: {
'Meta': {'object_name': 'User', 'db_table': "'%s'" % USER_DB_NAME},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'foirequest.deferredmessage': {
'Meta': {'ordering': "('timestamp',)", 'object_name': 'DeferredMessage'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mail': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'recipient': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'request': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['foirequest.FoiRequest']", 'null': 'True', 'blank': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
u'foirequest.foiattachment': {
'Meta': {'ordering': "('name',)", 'object_name': 'FoiAttachment'},
'approved': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'belongs_to': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['foirequest.FoiMessage']", 'null': 'True'}),
'can_approve': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'converted': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'original_set'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['foirequest.FoiAttachment']"}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '255'}),
'filetype': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'format': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_converted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_redacted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'redacted': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'unredacted_set'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['foirequest.FoiAttachment']"}),
'size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'foirequest.foievent': {
'Meta': {'ordering': "('-timestamp',)", 'object_name': 'FoiEvent'},
'context_json': ('django.db.models.fields.TextField', [], {}),
'event_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'public_body': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['publicbody.PublicBody']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'request': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['foirequest.FoiRequest']"}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['%s']" % APP_MODEL, 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'})
},
u'foirequest.foimessage': {
'Meta': {'ordering': "('timestamp',)", 'object_name': 'FoiMessage'},
'content_hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'html': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_escalation': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_postal': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_response': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'not_publishable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'original': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'plaintext': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'plaintext_redacted': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'recipient': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'recipient_email': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'recipient_public_body': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'received_messages'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['publicbody.PublicBody']"}),
'redacted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'request': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['foirequest.FoiRequest']"}),
'sender_email': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'sender_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'sender_public_body': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'send_messages'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['publicbody.PublicBody']"}),
'sender_user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['%s']" % APP_MODEL, 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'sent': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '50', 'null': 'True', 'blank': 'True'}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'subject_redacted': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'blank': 'True'})
},
u'foirequest.foirequest': {
'Meta': {'ordering': "('last_message',)", 'object_name': 'FoiRequest'},
'checked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'costs': ('django.db.models.fields.FloatField', [], {'default': '0.0'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'due_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'first_message': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_foi': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['publicbody.Jurisdiction']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'last_message': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'law': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['publicbody.FoiLaw']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'public_body': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['publicbody.PublicBody']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'refusal_reason': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'resolution': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'resolved_on': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'same_as': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['foirequest.FoiRequest']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'same_as_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'secret': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'secret_address': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'summary': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['%s']" % APP_MODEL, 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'visibility': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'})
},
u'foirequest.publicbodysuggestion': {
'Meta': {'ordering': "('timestamp',)", 'object_name': 'PublicBodySuggestion'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'public_body': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['publicbody.PublicBody']"}),
'reason': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'request': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['foirequest.FoiRequest']"}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['%s']" % APP_MODEL, 'null': 'True', 'on_delete': 'models.SET_NULL'})
},
u'foirequest.taggedfoirequest': {
'Meta': {'object_name': 'TaggedFoiRequest'},
'content_object': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['foirequest.FoiRequest']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'foirequest_taggedfoirequest_items'", 'to': u"orm['taggit.Tag']"})
},
u'publicbody.foilaw': {
'Meta': {'object_name': 'FoiLaw'},
'combined': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['publicbody.FoiLaw']", 'symmetrical': 'False', 'blank': 'True'}),
'created': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'email_only': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['publicbody.Jurisdiction']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'letter_end': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'letter_start': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'long_description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'max_response_time': ('django.db.models.fields.IntegerField', [], {'default': '30', 'null': 'True', 'blank': 'True'}),
'max_response_time_unit': ('django.db.models.fields.CharField', [], {'default': "'day'", 'max_length': '32', 'blank': 'True'}),
'mediator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'mediating_laws'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': u"orm['publicbody.PublicBody']", 'blank': 'True', 'null': 'True'}),
'meta': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'priority': ('django.db.models.fields.SmallIntegerField', [], {'default': '3'}),
'refusal_reasons': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'request_note': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'to': u"orm['sites.Site']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'updated': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
u'publicbody.jurisdiction': {
'Meta': {'object_name': 'Jurisdiction'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'rank': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'})
},
u'publicbody.publicbody': {
'Meta': {'ordering': "('name',)", 'object_name': 'PublicBody'},
'_created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'public_body_creators'", 'on_delete': 'models.SET_NULL', 'default': '1', 'to': u"orm['%s']" % APP_MODEL, 'blank': 'True', 'null': 'True'}),
'_updated_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'public_body_updaters'", 'on_delete': 'models.SET_NULL', 'default': '1', 'to': u"orm['%s']" % APP_MODEL, 'blank': 'True', 'null': 'True'}),
'address': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'classification': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'classification_slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'confirmed': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'contact': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'depth': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['publicbody.Jurisdiction']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'laws': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['publicbody.FoiLaw']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'number_of_requests': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'other_names': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'children'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': u"orm['publicbody.PublicBody']", 'blank': 'True', 'null': 'True'}),
'request_note': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'root': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'descendants'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': u"orm['publicbody.PublicBody']", 'blank': 'True', 'null': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'to': u"orm['sites.Site']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'topic': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['publicbody.PublicBodyTopic']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'website_dump': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
u'publicbody.publicbodytopic': {
'Meta': {'object_name': 'PublicBodyTopic'},
'count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'rank': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'})
},
u'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'taggit.tag': {
'Meta': {'object_name': 'Tag'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
}
}
complete_apps = ['foirequest']
| |
import re
import logging
from optparse import make_option
from django.db import transaction, connections, DEFAULT_DB_ALIAS
from django.core.management.base import BaseCommand
from vdw.genes.models import Gene, GenePhenotype
from vdw.literature.models import PubMed
from vdw.phenotypes.models import Phenotype
from vdw.variants.models import Variant, VariantPhenotype
BATCH_SIZE = 1000
log = logging.getLogger(__name__)
def load_hgmd_phenotypes(label, keys, cursor, using):
count = 0
total = 0
# Local storage for new instances
pmids = {}
phenotypes = {}
while True:
rows = cursor.fetchmany(100)
if not rows:
break
for row in rows:
record = dict(zip(keys, row))
pubmed = gene = variant = None
new_phenotype = saved = False
# PubMed IDs
if record['pubmed_id']:
pubmed = PubMed(pk=record['pubmed_id'])
pubmed._state.db = using
# Some records have a bogus PMID. Only proces the valid ones.
elif type(record['pubmed']) is int or record['pubmed'].isdigit():
pmid = int(record['pubmed'])
if pmid in pmids:
pubmed = PubMed(pk=pmids[pmid])
pubmed._state.db = using
else:
pubmed = PubMed(pmid=pmid)
pubmed.save()
pmids[pmid] = pubmed.pk
# Phenotypes
if record['phenotype_id']:
phenotype = Phenotype(pk=record['phenotype_id'])
phenotype._state.db = using
else:
new_phenotype = True
term = record['phenotype']
if term in phenotypes:
phenotype = Phenotype(pk=phenotypes[term])
phenotype._state.db = using
else:
phenotype = Phenotype(term=term)
phenotype.save()
phenotypes[term] = phenotype.pk
# Variants
variant = Variant(pk=record['variant_id'])
variant._state.db = using
if new_phenotype or not VariantPhenotype.objects\
.filter(variant=variant, phenotype=phenotype,
hgmd_id=record['hgmd_id']).exists():
vp = VariantPhenotype(variant=variant, phenotype=phenotype,
hgmd_id=record['hgmd_id'])
vp.save()
saved = True
# Genes
if record['gene_id']:
gene = Gene(pk=record['gene_id'])
gene._state.db = using
if new_phenotype or not GenePhenotype.objects\
.filter(gene=gene, phenotype=phenotype,
hgmd_id=record['hgmd_id']).exists():
gp = GenePhenotype(gene=gene, phenotype=phenotype,
hgmd_id=record['hgmd_id'])
gp.save()
saved = True
# Associate articles with other objects
if pubmed:
phenotype.articles.add(pubmed)
if variant:
variant.articles.add(pubmed)
if gene:
gene.articles.add(pubmed)
total += 1
if saved:
count += 1
if count % BATCH_SIZE == 0:
transaction.commit()
log.debug('Loading {0}...{1}/{2}'.format(label, count, total))
# Print a newline for the terminal prompt
print
class Command(BaseCommand):
"""Cleans and loads phenotype terms from various sources into a uniform
set of tables.
"""
option_list = BaseCommand.option_list + (
make_option('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Nominates a database to '
'print the SQL for. Defaults to the "default" database.'),
make_option('--hpo', action='store_true', default=False,
help='Reload HPO phenotypes'),
make_option('--hgmd-snp', action='store_true', default=False,
help='Load HGMD phenotypes and associations for SNPs.'),
make_option('--hgmd-indel', action='store_true', default=False,
help='Load HGMD phenotypes and associations for INDELs.'),
)
def load_hpo(self, cursor, using):
keys = ['gene_id', 'hpo_terms']
# Fetch, parse and load only genes that cleanly map to the gene table.
# Attempting to join against the synonym table may lead to ambiguity
# as to which approved gene is the correct one.
cursor.execute('''
SELECT "gene"."id", "hpo_terms"
FROM "raw"."hpo_gene_phenotypes"
LEFT OUTER JOIN "gene"
ON ("entrez_gene_symbol" = "gene"."symbol")
WHERE "gene"."symbol" IS NOT NULL
''')
phenotype_counter = 1
gene_phenotype_counter = 1
phenotype_fout = open('phenotype.txt', 'w+')
gene_phenotype_fout = open('gene_phenotype.txt', 'w+')
phenotype_header_keys = ['id', 'term', 'description', 'hpo_id']
gene_phenotype_header_keys = ['id', 'gene_id', 'phenotype_id']
phenotype_fout.write('\t'.join(phenotype_header_keys) + '\n')
gene_phenotype_fout.write('\t'.join(gene_phenotype_header_keys) + '\n')
phenotype_ids = {}
hpo_term_re = re.compile('(.*?)\(HP:(\d+)\)$')
while True:
rows = cursor.fetchmany(100)
if not rows:
break
for row in rows:
source_record = dict(zip(keys, row))
hpo_terms = [hpo_term_re.match(term).groups()
for term in source_record['hpo_terms'].split(';')]
for term, hpo_id in hpo_terms:
# Write term as new
if hpo_id not in phenotype_ids:
phenotype_ids[hpo_id] = phenotype_counter
phenotype_fout.write('\t'.join([str(phenotype_counter),
term, '\\N', hpo_id]) + '\n')
phenotype_counter += 1
gene_phenotype_fout.write('\t'.join([
str(gene_phenotype_counter),
str(source_record['gene_id']),
str(phenotype_ids[hpo_id])
]) + '\n')
gene_phenotype_counter += 1
phenotype_fout.flush()
gene_phenotype_fout.flush()
phenotype_fout.seek(0)
gene_phenotype_fout.seek(0)
phenotype_fout.readline()
gene_phenotype_fout.readline()
cursor.copy_from(phenotype_fout, 'phenotype',
columns=phenotype_header_keys)
cursor.copy_from(gene_phenotype_fout, 'gene_phenotype')
return phenotype_counter
load_hpo.short_name = 'HPO'
def load_hgmd_snp(self, cursor, using=None):
cursor.execute('''
select
hgmd.acc_num,
variant.id,
hgmd.disease,
phenotype.id,
gene.id as gene_id,
pubmed.pmid,
hgmd.pmid
from (
select
v.id,
substr(ve.hgvs_c, 3) as hgvs,
c.value as chr,
t.refseq_id
from variant_effect ve
inner join transcript t on (ve.transcript_id = t.id)
inner join variant v on (ve.variant_id = v.id)
inner join chromosome c on (v.chr_id = c.id)
inner join variant_type vt on (v.type_id = vt.id)
where vt.value = 'SNP'
and ve.hgvs_c is not null
) variant inner join
(
select
mut.acc_num,
trim(both from regexp_replace(mut.disease, '\s*\?$', '')) as disease, # noqa
_mut.hgvs,
mut.gene,
mut.pmid,
_mut."refCORE" || '.' || _mut."refVER"::text as refseq_id
from raw.hgmd_mutation mut
inner join raw.hgmd_mutnomen _mut
on (mut.acc_num = _mut.acc_num)
inner join raw.hgmd_hg19_coords hg19
on (mut.acc_num = hg19.acc_num)
where _mut.hgvs is not null
) hgmd on (hgmd.refseq_id = variant.refseq_id
and hgmd.hgvs = variant.hgvs)
left outer join gene on (hgmd.gene = gene.symbol)
left outer join pubmed on (hgmd.pmid = pubmed.pmid::text)
left outer join phenotype on (hgmd.disease = phenotype.term)
''')
keys = ['hgmd_id', 'variant_id', 'phenotype', 'phenotype_id',
'gene_id', 'pubmed_id', 'pubmed']
load_hgmd_phenotypes('HGMD SNP', keys, cursor, using)
load_hgmd_snp.short_name = 'HGMD SNP'
def load_hgmd_indel(self, cursor, using=None):
# The local variant has some overlap with with the HGMD indel
# the greater of the two lengths for alt and ref must be used
# Note, Postgres 9.2 has introduces int8range and && operator which
# simplifies this logic.
cursor.execute('''
select
HGMD.hgmd_id,
V.id,
HGMD.disease,
phenotype.id,
gene.id,
pubmed.pmid,
HGMD.pmid
from (
select
v.id,
c.value as chr,
pos as start,
pos + greatest(length(ref), length(alt)) as end
from variant v
inner join chromosome c on (v.chr_id = c.id)
inner join variant_type vt on (v.type_id = vt.id)
where vt.value = 'INDEL'
) V inner join
(
select
m.acc_num as hgmd_id,
trim(both from regexp_replace(m.disease, '\s*\?$', ''))
as disease,
m.gene,
m.pmid,
c.chromosome as chr,
c."coordSTART" as start,
c."coordEND" as end
from raw.hgmd_indel m
inner join raw.hgmd_hg19_coords c
on (m.acc_num = c.acc_num)
where m.disease not like '%%?'
) HGMD on (V.chr = HGMD.chr and (
-- overlap
(V.start <= HGMD.end and V.end >= HGMD.end)
-- overlap
or (HGMD.start <= V.end and HGMD.end >= V.end)
-- V contained in HGMD
or (V.start >= HGMD.start and V.end <= HGMD.end)
-- HGMD contain in V
or (HGMD.start >= V.start and HGMD.end <= V.end)
))
left outer join gene on (HGMD.gene = gene.symbol)
left outer join pubmed on (HGMD.pmid = pubmed.pmid::text)
left outer join phenotype
on (lower(HGMD.disease) = lower(phenotype.term))
''')
keys = ['hgmd_id', 'variant_id', 'phenotype', 'phenotype_id',
'gene_id', 'pubmed_id', 'pubmed']
load_hgmd_phenotypes('HGMD INDEL', keys, cursor, using)
load_hgmd_indel.short_name = 'HGMD INDEL'
def handle(self, *args, **options):
using = options.get('database')
load_hpo = self.load_hpo \
if options.get('hpo') else None
load_hgmd_snp = self.load_hgmd_snp \
if options.get('hgmd_snp') else None
load_hgmd_indel = self.load_hgmd_indel \
if options.get('hgmd_indel') else None
connection = connections[using]
cursor = connection.cursor()
for handler in (load_hpo, load_hgmd_snp, load_hgmd_indel):
if not handler:
continue
with transaction.commit_manually(using):
try:
handler(cursor, using)
transaction.commit()
except Exception:
transaction.rollback()
log.exception(
'Failed to load {0}'.format(handler.short_name))
| |
import time
import httplib
from urllib import urlencode
from threading import Lock
from django.conf import settings
from django.core.cache import cache
from graphite.node import LeafNode, BranchNode
from graphite.readers import FetchInProgress
from graphite.logger import log
from graphite.util import unpickle
from graphite.render.hashing import compactHash
def connector_class_selector(https_support=False):
return httplib.HTTPSConnection if https_support else httplib.HTTPConnection
class RemoteStore(object):
lastFailure = 0.0
available = property(lambda self: time.time() - self.lastFailure > settings.REMOTE_RETRY_DELAY)
def __init__(self, host):
self.host = host
def find(self, query):
request = FindRequest(self, query)
request.send()
return request
def fail(self):
self.lastFailure = time.time()
class FindRequest(object):
__slots__ = ('store', 'query', 'connection',
'failed', 'cacheKey', 'cachedResult')
def __init__(self, store, query):
self.store = store
self.query = query
self.connection = None
self.failed = False
if query.startTime:
start = query.startTime - (query.startTime % settings.FIND_CACHE_DURATION)
else:
start = ""
if query.endTime:
end = query.endTime - (query.endTime % settings.FIND_CACHE_DURATION)
else:
end = ""
self.cacheKey = "find:%s:%s:%s:%s" % (store.host, compactHash(query.pattern), start, end)
self.cachedResult = None
def send(self):
log.info("FindRequest.send(host=%s, query=%s) called" % (self.store.host, self.query))
self.cachedResult = cache.get(self.cacheKey)
if self.cachedResult is not None:
log.info("FindRequest(host=%s, query=%s) using cached result" % (self.store.host, self.query))
return
query_params = [
('local', '1'),
('format', 'pickle'),
('query', self.query.pattern),
]
if self.query.startTime:
query_params.append( ('from', self.query.startTime) )
if self.query.endTime:
query_params.append( ('until', self.query.endTime) )
query_string = urlencode(query_params)
try:
connector_class = connector_class_selector(settings.INTRACLUSTER_HTTPS)
self.connection = connector_class(self.store.host)
self.connection.timeout = settings.REMOTE_FIND_TIMEOUT
self.connection.request('GET', '/metrics/find/?' + query_string)
except:
log.exception("FindRequest.send(host=%s, query=%s) exception during request" % (self.store.host, self.query))
self.store.fail()
self.failed = True
def get_results(self):
if self.failed:
return
if self.cachedResult is not None:
results = self.cachedResult
else:
if self.connection is None:
self.send()
try:
try: # Python 2.7+, use buffering of HTTP responses
response = self.connection.getresponse(buffering=True)
except TypeError: # Python 2.6 and older
response = self.connection.getresponse()
assert response.status == 200, "received error response %s - %s" % (response.status, response.reason)
result_data = response.read()
results = unpickle.loads(result_data)
except:
log.exception("FindRequest.get_results(host=%s, query=%s) exception processing response" % (self.store.host, self.query))
self.store.fail()
return
cache.set(self.cacheKey, results, settings.FIND_CACHE_DURATION)
for node_info in results:
if node_info.get('is_leaf'):
reader = RemoteReader(self.store, node_info, bulk_query=self.query.pattern)
node = LeafNode(node_info['path'], reader)
else:
node = BranchNode(node_info['path'])
node.local = False
yield node
class ReadResult(object):
__slots__ = ('lock', 'store', 'has_done_response_read', 'result', 'done_cb', 'connection', 'urlpath')
def __init__(self, store, urlpath, done_cb):
self.lock = Lock()
self.store = store
self.has_done_response_read = False
self.result = None
self.done_cb = done_cb
self.urlpath = urlpath
self._connect(urlpath)
def _connect(self, urlpath):
url = "http://%s%s" % (self.store.host, urlpath)
try:
log.info("ReadResult :: requesting %s" % url)
connector_class = connector_class_selector(settings.INTRACLUSTER_HTTPS)
self.connection = connector_class(self.store.host)
self.connection.timeout = settings.REMOTE_FETCH_TIMEOUT
self.connection.request('GET', urlpath)
except:
self.store.fail()
log.exception("Error requesting %s" % url)
raise
def get(self):
"""
First thread to call `get` will read a response from alrady established connections
Subsequent calls will get memoized response
"""
with self.lock:
if not self.has_done_response_read: # we are first one to call for result
return self.read_response()
else: # result was already read, return it
if self.result is None:
raise Exception("Passive remote fetch failed to find cached results")
return self.result
def read_response(self): # called under self.lock
try:
self.has_done_response_read = True
# safe if self.connection.timeout works as advertised
try: # Python 2.7+, use buffering of HTTP responses
response = self.connection.getresponse(buffering=True)
except TypeError: # Python 2.6 and older
response = self.connection.getresponse()
if response.status != 200:
raise Exception("Error response %d %s from http://%s%s" % (response.status, response.reason, self.store.host, self.urlpath))
pickled_response = response.read()
self.result = {
series['name']: series
for series in unpickle.loads(pickled_response)
}
return self.result
except:
self.store.fail()
log.exception("Error requesting http://%s%s" % (self.store.host, self.urlpath))
raise
finally:
self.done_cb()
class RemoteReader(object):
__slots__ = ('store', 'metric_path', 'intervals', 'query', 'connection')
inflight_requests = {}
inflight_lock = Lock()
def __init__(self, store, node_info, bulk_query=None):
self.store = store
self.metric_path = node_info['path']
self.intervals = node_info['intervals']
self.query = bulk_query or node_info['path']
self.connection = None
def __repr__(self):
return '<RemoteReader[%x]: %s>' % (id(self), self.store.host)
def get_intervals(self):
return self.intervals
def fetch(self, startTime, endTime):
query_params = [
('target', self.query),
('format', 'pickle'),
('local', '1'),
('noCache', '1'),
('from', str( int(startTime) )),
('until', str( int(endTime) ))
]
query_string = urlencode(query_params)
urlpath = '/render/?' + query_string
url = "http://%s%s" % (self.store.host, urlpath)
fetch_result = self.get_inflight_requests(url, urlpath)
def extract_my_results():
series = fetch_result.get().get(self.metric_path, None)
if not series:
return None
time_info = (series['start'], series['end'], series['step'])
return (time_info, series['values'])
return FetchInProgress(extract_my_results)
def get_inflight_requests(self, url, urlpath):
with self.inflight_lock:
if url not in self.inflight_requests:
self.inflight_requests[url] = ReadResult(self.store, urlpath, lambda: self.done_inflight_request(url))
return self.inflight_requests[url]
def done_inflight_request(self, url):
with self.inflight_lock:
del self.inflight_requests[url]
| |
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 09 21:29:20 2013
Author: Josef Perktold
"""
import os
import numpy as np
import pandas as pd
import statsmodels.discrete.discrete_model as smd
from statsmodels.genmod.generalized_linear_model import GLM
from statsmodels.genmod import families
from statsmodels.genmod.families import links
from statsmodels.regression.linear_model import OLS
from statsmodels.base.covtype import get_robustcov_results
import statsmodels.stats.sandwich_covariance as sw
from statsmodels.tools.tools import add_constant
from numpy.testing import assert_allclose, assert_equal, assert_
import statsmodels.tools._testing as smt
# get data and results as module global for now, TODO: move to class
from .results import results_count_robust_cluster as results_st
cur_dir = os.path.dirname(os.path.abspath(__file__))
filepath = os.path.join(cur_dir, "results", "ships.csv")
data_raw = pd.read_csv(filepath, index_col=False)
data = data_raw.dropna()
#mod = smd.Poisson.from_formula('accident ~ yr_con + op_75_79', data=dat)
# Do not use formula for tests against Stata because intercept needs to be last
endog = data['accident']
exog_data = data['yr_con op_75_79'.split()]
exog = add_constant(exog_data, prepend=False)
group = np.asarray(data['ship'], int)
exposure = np.asarray(data['service'])
# TODO get the test methods from regression/tests
class CheckCountRobustMixin(object):
def test_basic(self):
res1 = self.res1
res2 = self.res2
if len(res1.params) == (len(res2.params) - 1):
# Stata includes lnalpha in table for NegativeBinomial
mask = np.ones(len(res2.params), np.bool_)
mask[-2] = False
res2_params = res2.params[mask]
res2_bse = res2.bse[mask]
else:
res2_params = res2.params
res2_bse = res2.bse
assert_allclose(res1._results.params, res2_params, 1e-4)
assert_allclose(self.bse_rob / self.corr_fact, res2_bse, 6e-5)
@classmethod
def get_robust_clu(cls):
res1 = cls.res1
cov_clu = sw.cov_cluster(res1, group)
cls.bse_rob = sw.se_cov(cov_clu)
cls.corr_fact = cls.get_correction_factor(res1)
@classmethod
def get_correction_factor(cls, results, sub_kparams=True):
mod = results.model
nobs, k_vars = mod.exog.shape
if sub_kparams:
# TODO: document why we adjust by k_params for some classes
# but not others.
k_params = len(results.params)
else:
k_params = 0
corr_fact = (nobs - 1.) / float(nobs - k_params)
# for bse we need sqrt of correction factor
return np.sqrt(corr_fact)
def test_oth(self):
res1 = self.res1
res2 = self.res2
assert_allclose(res1._results.llf, res2.ll, 1e-4)
assert_allclose(res1._results.llnull, res2.ll_0, 1e-4)
def test_ttest(self):
smt.check_ttest_tvalues(self.res1)
def test_waldtest(self):
smt.check_ftest_pvalues(self.res1)
class TestPoissonClu(CheckCountRobustMixin):
@classmethod
def setup_class(cls):
cls.res2 = results_st.results_poisson_clu
mod = smd.Poisson(endog, exog)
cls.res1 = mod.fit(disp=False)
cls.get_robust_clu()
class TestPoissonCluGeneric(CheckCountRobustMixin):
@classmethod
def setup_class(cls):
cls.res2 = results_st.results_poisson_clu
mod = smd.Poisson(endog, exog)
cls.res1 = res1 = mod.fit(disp=False)
debug = False
if debug:
# for debugging
cls.bse_nonrobust = cls.res1.bse.copy()
cls.res1 = res1 = mod.fit(disp=False)
cls.get_robust_clu()
cls.res3 = cls.res1
cls.bse_rob3 = cls.bse_rob.copy()
cls.res1 = res1 = mod.fit(disp=False)
from statsmodels.base.covtype import get_robustcov_results
#res_hc0_ = cls.res1.get_robustcov_results('HC1')
get_robustcov_results(cls.res1._results, 'cluster',
groups=group,
use_correction=True,
df_correction=True, #TODO has no effect
use_t=False, #True,
use_self=True)
cls.bse_rob = cls.res1.bse
cls.corr_fact = cls.get_correction_factor(cls.res1)
class TestPoissonHC1Generic(CheckCountRobustMixin):
@classmethod
def setup_class(cls):
cls.res2 = results_st.results_poisson_hc1
mod = smd.Poisson(endog, exog)
cls.res1 = mod.fit(disp=False)
from statsmodels.base.covtype import get_robustcov_results
#res_hc0_ = cls.res1.get_robustcov_results('HC1')
get_robustcov_results(cls.res1._results, 'HC1', use_self=True)
cls.bse_rob = cls.res1.bse
cls.corr_fact = cls.get_correction_factor(cls.res1, sub_kparams=False)
# TODO: refactor xxxFit to full testing results
class TestPoissonCluFit(CheckCountRobustMixin):
@classmethod
def setup_class(cls):
cls.res2 = results_st.results_poisson_clu
mod = smd.Poisson(endog, exog)
# scaling of cov_params_default to match Stata
# TODO should the default be changed?
nobs, k_params = mod.exog.shape
# TODO: this is similar but not identical to logic in
# get_correction_factor; can we de-duplicate?
sc_fact = (nobs-1.) / float(nobs - k_params)
cls.res1 = mod.fit(disp=False, cov_type='cluster',
cov_kwds=dict(groups=group,
use_correction=True,
scaling_factor=1. / sc_fact,
df_correction=True), #TODO has no effect
use_t=False, #True,
)
# The model results, t_test, ... should also work without
# normalized_cov_params, see #2209
# Note: we cannot set on the wrapper res1, we need res1._results
cls.res1._results.normalized_cov_params = None
cls.bse_rob = cls.res1.bse
# backwards compatibility with inherited test methods
cls.corr_fact = 1
def test_basic_inference(self):
res1 = self.res1
res2 = self.res2
rtol = 1e-7
assert_allclose(res1.params, res2.params, rtol=1e-8)
assert_allclose(res1.bse, res2.bse, rtol=rtol)
assert_allclose(res1.tvalues, res2.tvalues, rtol=rtol, atol=1e-8)
assert_allclose(res1.pvalues, res2.pvalues, rtol=rtol, atol=1e-20)
ci = res2.params_table[:, 4:6]
assert_allclose(res1.conf_int(), ci, rtol=5e-7, atol=1e-20)
class TestPoissonHC1Fit(CheckCountRobustMixin):
@classmethod
def setup_class(cls):
cls.res2 = results_st.results_poisson_hc1
mod = smd.Poisson(endog, exog)
cls.res1 = mod.fit(disp=False, cov_type='HC1')
cls.bse_rob = cls.res1.bse
cls.corr_fact = cls.get_correction_factor(cls.res1, sub_kparams=False)
class TestPoissonHC1FitExposure(CheckCountRobustMixin):
@classmethod
def setup_class(cls):
cls.res2 = results_st.results_poisson_exposure_hc1
mod = smd.Poisson(endog, exog, exposure=exposure)
cls.res1 = mod.fit(disp=False, cov_type='HC1')
cls.bse_rob = cls.res1.bse
cls.corr_fact = cls.get_correction_factor(cls.res1, sub_kparams=False)
class TestPoissonCluExposure(CheckCountRobustMixin):
@classmethod
def setup_class(cls):
cls.res2 = results_st.results_poisson_exposure_clu #nonrobust
mod = smd.Poisson(endog, exog, exposure=exposure)
cls.res1 = mod.fit(disp=False)
cls.get_robust_clu()
class TestPoissonCluExposureGeneric(CheckCountRobustMixin):
@classmethod
def setup_class(cls):
cls.res2 = results_st.results_poisson_exposure_clu #nonrobust
mod = smd.Poisson(endog, exog, exposure=exposure)
cls.res1 = res1 = mod.fit(disp=False)
from statsmodels.base.covtype import get_robustcov_results
#res_hc0_ = cls.res1.get_robustcov_results('HC1')
get_robustcov_results(cls.res1._results, 'cluster',
groups=group,
use_correction=True,
df_correction=True, #TODO has no effect
use_t=False, #True,
use_self=True)
cls.bse_rob = cls.res1.bse #sw.se_cov(cov_clu)
cls.corr_fact = cls.get_correction_factor(cls.res1)
class TestGLMPoissonClu(CheckCountRobustMixin):
@classmethod
def setup_class(cls):
cls.res2 = results_st.results_poisson_clu
mod = smd.Poisson(endog, exog)
mod = GLM(endog, exog, family=families.Poisson())
cls.res1 = mod.fit()
cls.get_robust_clu()
class TestGLMPoissonCluGeneric(CheckCountRobustMixin):
@classmethod
def setup_class(cls):
cls.res2 = results_st.results_poisson_clu
mod = GLM(endog, exog, family=families.Poisson())
cls.res1 = res1 = mod.fit()
get_robustcov_results(cls.res1._results, 'cluster',
groups=group,
use_correction=True,
df_correction=True, #TODO has no effect
use_t=False, #True,
use_self=True)
cls.bse_rob = cls.res1.bse
cls.corr_fact = cls.get_correction_factor(cls.res1)
class TestGLMPoissonHC1Generic(CheckCountRobustMixin):
@classmethod
def setup_class(cls):
cls.res2 = results_st.results_poisson_hc1
mod = GLM(endog, exog, family=families.Poisson())
cls.res1 = mod.fit()
#res_hc0_ = cls.res1.get_robustcov_results('HC1')
get_robustcov_results(cls.res1._results, 'HC1', use_self=True)
cls.bse_rob = cls.res1.bse
cls.corr_fact = cls.get_correction_factor(cls.res1, sub_kparams=False)
# TODO: refactor xxxFit to full testing results
class TestGLMPoissonCluFit(CheckCountRobustMixin):
@classmethod
def setup_class(cls):
cls.res2 = results_st.results_poisson_clu
mod = GLM(endog, exog, family=families.Poisson())
cls.res1 = res1 = mod.fit(cov_type='cluster',
cov_kwds=dict(groups=group,
use_correction=True,
df_correction=True), #TODO has no effect
use_t=False, #True,
)
# The model results, t_test, ... should also work without
# normalized_cov_params, see #2209
# Note: we cannot set on the wrapper res1, we need res1._results
cls.res1._results.normalized_cov_params = None
cls.bse_rob = cls.res1.bse
cls.corr_fact = cls.get_correction_factor(cls.res1)
class TestGLMPoissonHC1Fit(CheckCountRobustMixin):
@classmethod
def setup_class(cls):
cls.res2 = results_st.results_poisson_hc1
mod = GLM(endog, exog, family=families.Poisson())
cls.res1 = mod.fit(cov_type='HC1')
cls.bse_rob = cls.res1.bse
cls.corr_fact = cls.get_correction_factor(cls.res1, sub_kparams=False)
class TestNegbinClu(CheckCountRobustMixin):
@classmethod
def setup_class(cls):
cls.res2 = results_st.results_negbin_clu
mod = smd.NegativeBinomial(endog, exog)
cls.res1 = mod.fit(disp=False, gtol=1e-7)
cls.get_robust_clu()
class TestNegbinCluExposure(CheckCountRobustMixin):
@classmethod
def setup_class(cls):
cls.res2 = results_st.results_negbin_exposure_clu #nonrobust
mod = smd.NegativeBinomial(endog, exog, exposure=exposure)
cls.res1 = mod.fit(disp=False)
cls.get_robust_clu()
# mod_nbe = smd.NegativeBinomial(endog, exog, exposure=data['service'])
# res_nbe = mod_nbe.fit()
# mod_nb = smd.NegativeBinomial(endog, exog)
# res_nb = mod_nb.fit()
#
# cov_clu_nb = sw.cov_cluster(res_nb, group)
# k_params = k_vars + 1
# print sw.se_cov(cov_clu_nb / ((nobs-1.) / float(nobs - k_params)))
#
# wt = res_nb.wald_test(np.eye(len(res_nb.params))[1:3], cov_p=cov_clu_nb/((nobs-1.) / float(nobs - k_params)))
# print wt
#
# print dir(results_st)
class TestNegbinCluGeneric(CheckCountRobustMixin):
@classmethod
def setup_class(cls):
cls.res2 = results_st.results_negbin_clu
mod = smd.NegativeBinomial(endog, exog)
cls.res1 = res1 = mod.fit(disp=False, gtol=1e-7)
get_robustcov_results(cls.res1._results, 'cluster',
groups=group,
use_correction=True,
df_correction=True, #TODO has no effect
use_t=False, #True,
use_self=True)
cls.bse_rob = cls.res1.bse
cls.corr_fact = cls.get_correction_factor(cls.res1)
class TestNegbinCluFit(CheckCountRobustMixin):
@classmethod
def setup_class(cls):
cls.res2 = results_st.results_negbin_clu
mod = smd.NegativeBinomial(endog, exog)
cls.res1 = res1 = mod.fit(disp=False, cov_type='cluster',
cov_kwds=dict(groups=group,
use_correction=True,
df_correction=True), #TODO has no effect
use_t=False, #True,
gtol=1e-7)
cls.bse_rob = cls.res1.bse
cls.corr_fact = cls.get_correction_factor(cls.res1)
class TestNegbinCluExposureFit(CheckCountRobustMixin):
@classmethod
def setup_class(cls):
cls.res2 = results_st.results_negbin_exposure_clu #nonrobust
mod = smd.NegativeBinomial(endog, exog, exposure=exposure)
cls.res1 = res1 = mod.fit(disp=False, cov_type='cluster',
cov_kwds=dict(groups=group,
use_correction=True,
df_correction=True), #TODO has no effect
use_t=False, #True,
)
cls.bse_rob = cls.res1.bse
cls.corr_fact = cls.get_correction_factor(cls.res1)
class CheckDiscreteGLM(object):
# compare GLM with other models, no verified reference results
def test_basic(self):
res1 = self.res1 # GLM model
res2 = self.res2 # comparison model, discrete or OLS
assert_equal(res1.cov_type, self.cov_type)
assert_equal(res2.cov_type, self.cov_type)
rtol = getattr(res1, 'rtol', 1e-13)
assert_allclose(res1.params, res2.params, rtol=rtol)
assert_allclose(res1.bse, res2.bse, rtol=1e-10)
def test_score_hessian(self):
res1 = self.res1
res2 = self.res2
# We need to fix scale in GLM and OLS,
# discrete MLE have it always fixed
if isinstance(res2.model, OLS):
kwds = {'scale': res2.scale}
else:
kwds = {}
if isinstance(res2.model, OLS):
sgn = + 1
else:
sgn = -1 # see #4714
score1 = res1.model.score(res1.params * 0.98, scale=res1.scale)
score2 = res2.model.score(res1.params * 0.98, **kwds)
assert_allclose(score1, score2, rtol=1e-13)
hess1 = res1.model.hessian(res1.params, scale=res1.scale)
hess2 = res2.model.hessian(res1.params, **kwds)
assert_allclose(hess1, hess2, rtol=1e-10)
if isinstance(res2.model, OLS):
# skip the rest
return
scoref1 = res1.model.score_factor(res1.params, scale=res1.scale)
scoref2 = res2.model.score_factor(res1.params, **kwds)
assert_allclose(scoref1, scoref2, rtol=1e-10)
hessf1 = res1.model.hessian_factor(res1.params, scale=res1.scale)
hessf2 = res2.model.hessian_factor(res1.params, **kwds)
assert_allclose(sgn * hessf1, hessf2, rtol=1e-10)
def test_score_test(self):
res1 = self.res1
res2 = self.res2
if isinstance(res2.model, OLS):
# skip
return
fitted = self.res1.fittedvalues
exog_extra = np.column_stack((fitted**2, fitted**3))
res_lm1 = res1.score_test(exog_extra, cov_type='nonrobust')
res_lm2 = res2.score_test(exog_extra, cov_type='nonrobust')
assert_allclose(np.hstack(res_lm1), np.hstack(res_lm2), rtol=5e-7)
class TestGLMPoisson(CheckDiscreteGLM):
@classmethod
def setup_class(cls):
np.random.seed(987125643) # not intentional seed
endog_count = np.random.poisson(endog)
cls.cov_type = 'HC0'
mod1 = GLM(endog_count, exog, family=families.Poisson())
cls.res1 = mod1.fit(cov_type='HC0')
mod1 = smd.Poisson(endog_count, exog)
cls.res2 = mod1.fit(cov_type='HC0')
cls.res1.rtol = 1e-11
class TestGLMLogit(CheckDiscreteGLM):
@classmethod
def setup_class(cls):
endog_bin = (endog > endog.mean()).astype(int)
cls.cov_type = 'cluster'
mod1 = GLM(endog_bin, exog, family=families.Binomial())
cls.res1 = mod1.fit(cov_type='cluster', cov_kwds=dict(groups=group))
mod1 = smd.Logit(endog_bin, exog)
cls.res2 = mod1.fit(cov_type='cluster', cov_kwds=dict(groups=group))
class TestGLMLogitOffset(CheckDiscreteGLM):
@classmethod
def setup_class(cls):
endog_bin = (endog > endog.mean()).astype(int)
cls.cov_type = 'cluster'
offset = np.ones(endog_bin.shape[0])
mod1 = GLM(endog_bin, exog, family=families.Binomial(), offset=offset)
cls.res1 = mod1.fit(cov_type='cluster', cov_kwds=dict(groups=group))
mod1 = smd.Logit(endog_bin, exog, offset=offset)
cls.res2 = mod1.fit(cov_type='cluster', cov_kwds=dict(groups=group))
class TestGLMProbit(CheckDiscreteGLM):
@classmethod
def setup_class(cls):
endog_bin = (endog > endog.mean()).astype(int)
cls.cov_type = 'cluster'
mod1 = GLM(endog_bin, exog, family=families.Binomial(link=links.probit()))
cls.res1 = mod1.fit(method='newton',
cov_type='cluster', cov_kwds=dict(groups=group))
mod1 = smd.Probit(endog_bin, exog)
cls.res2 = mod1.fit(cov_type='cluster', cov_kwds=dict(groups=group))
cls.rtol = 1e-6
def test_score_hessian(self):
res1 = self.res1
res2 = self.res2
# Note scale is fixed at 1, so we do not need to fix it explicitly
score1 = res1.model.score(res1.params * 0.98)
score2 = res2.model.score(res1.params * 0.98)
assert_allclose(score1, score2, rtol=1e-13)
hess1 = res1.model.hessian(res1.params)
hess2 = res2.model.hessian(res1.params)
assert_allclose(hess1, hess2, rtol=1e-13)
class TestGLMProbitOffset(CheckDiscreteGLM):
@classmethod
def setup_class(cls):
endog_bin = (endog > endog.mean()).astype(int)
cls.cov_type = 'cluster'
offset = np.ones(endog_bin.shape[0])
mod1 = GLM(endog_bin, exog,
family=families.Binomial(link=links.probit()),
offset=offset)
cls.res1 = mod1.fit(method='newton',
cov_type='cluster', cov_kwds=dict(groups=group))
mod1 = smd.Probit(endog_bin, exog, offset=offset)
cls.res2 = mod1.fit(cov_type='cluster', cov_kwds=dict(groups=group))
cls.rtol = 1e-6
class TestGLMGaussNonRobust(CheckDiscreteGLM):
@classmethod
def setup_class(cls):
cls.cov_type = 'nonrobust'
mod1 = GLM(endog, exog, family=families.Gaussian())
cls.res1 = mod1.fit()
mod2 = OLS(endog, exog)
cls.res2 = mod2.fit()
class TestGLMGaussClu(CheckDiscreteGLM):
@classmethod
def setup_class(cls):
cls.cov_type = 'cluster'
mod1 = GLM(endog, exog, family=families.Gaussian())
cls.res1 = mod1.fit(cov_type='cluster', cov_kwds=dict(groups=group))
mod2 = OLS(endog, exog)
cls.res2 = mod2.fit(cov_type='cluster', cov_kwds=dict(groups=group))
class TestGLMGaussHC(CheckDiscreteGLM):
@classmethod
def setup_class(cls):
cls.cov_type = 'HC0'
mod1 = GLM(endog, exog, family=families.Gaussian())
cls.res1 = mod1.fit(cov_type='HC0')
mod2 = OLS(endog, exog)
cls.res2 = mod2.fit(cov_type='HC0')
class TestGLMGaussHAC(CheckDiscreteGLM):
@classmethod
def setup_class(cls):
cls.cov_type = 'HAC'
kwds={'maxlags':2}
mod1 = GLM(endog, exog, family=families.Gaussian())
cls.res1 = mod1.fit(cov_type='HAC', cov_kwds=kwds)
mod2 = OLS(endog, exog)
cls.res2 = mod2.fit(cov_type='HAC', cov_kwds=kwds)
class TestGLMGaussHAC2(CheckDiscreteGLM):
@classmethod
def setup_class(cls):
cls.cov_type = 'HAC'
# check kernel specified as string
kwds = {'kernel': 'bartlett', 'maxlags': 2}
mod1 = GLM(endog, exog, family=families.Gaussian())
cls.res1 = mod1.fit(cov_type='HAC', cov_kwds=kwds)
mod2 = OLS(endog, exog)
kwds2 = {'maxlags': 2}
cls.res2 = mod2.fit(cov_type='HAC', cov_kwds=kwds2)
class TestGLMGaussHACUniform(CheckDiscreteGLM):
@classmethod
def setup_class(cls):
cls.cov_type = 'HAC'
kwds={'kernel':sw.weights_uniform, 'maxlags':2}
mod1 = GLM(endog, exog, family=families.Gaussian())
cls.res1 = mod1.fit(cov_type='HAC', cov_kwds=kwds)
mod2 = OLS(endog, exog)
cls.res2 = mod2.fit(cov_type='HAC', cov_kwds=kwds)
#for debugging
cls.res3 = mod2.fit(cov_type='HAC', cov_kwds={'maxlags':2})
def test_cov_options(self):
# check keyword `weights_func
kwdsa = {'weights_func': sw.weights_uniform, 'maxlags': 2}
res1a = self.res1.model.fit(cov_type='HAC', cov_kwds=kwdsa)
res2a = self.res2.model.fit(cov_type='HAC', cov_kwds=kwdsa)
assert_allclose(res1a.bse, self.res1.bse, rtol=1e-12)
assert_allclose(res2a.bse, self.res2.bse, rtol=1e-12)
# regression test for bse values
bse = np.array([ 2.82203924, 4.60199596, 11.01275064])
assert_allclose(res1a.bse, bse, rtol=1e-6)
assert_(res1a.cov_kwds['weights_func'] is sw.weights_uniform)
kwdsb = {'kernel': sw.weights_bartlett, 'maxlags': 2}
res1a = self.res1.model.fit(cov_type='HAC', cov_kwds=kwdsb)
res2a = self.res2.model.fit(cov_type='HAC', cov_kwds=kwdsb)
assert_allclose(res1a.bse, res2a.bse, rtol=1e-12)
# regression test for bse values
bse = np.array([ 2.502264, 3.697807, 9.193303])
assert_allclose(res1a.bse, bse, rtol=1e-6)
class TestGLMGaussHACUniform2(TestGLMGaussHACUniform):
@classmethod
def setup_class(cls):
cls.cov_type = 'HAC'
kwds={'kernel': sw.weights_uniform, 'maxlags': 2}
mod1 = GLM(endog, exog, family=families.Gaussian())
cls.res1 = mod1.fit(cov_type='HAC', cov_kwds=kwds)
# check kernel as string
mod2 = OLS(endog, exog)
kwds2 = {'kernel': 'uniform', 'maxlags': 2}
cls.res2 = mod2.fit(cov_type='HAC', cov_kwds=kwds)
class TestGLMGaussHACPanel(CheckDiscreteGLM):
@classmethod
def setup_class(cls):
cls.cov_type = 'hac-panel'
# time index is just made up to have a test case
time = np.tile(np.arange(7), 5)[:-1]
mod1 = GLM(endog.copy(), exog.copy(), family=families.Gaussian())
kwds = dict(time=time,
maxlags=2,
kernel=sw.weights_uniform,
use_correction='hac',
df_correction=False)
cls.res1 = mod1.fit(cov_type='hac-panel', cov_kwds=kwds)
cls.res1b = mod1.fit(cov_type='nw-panel', cov_kwds=kwds)
mod2 = OLS(endog, exog)
cls.res2 = mod2.fit(cov_type='hac-panel', cov_kwds=kwds)
def test_kwd(self):
# test corrected keyword name
assert_allclose(self.res1b.bse, self.res1.bse, rtol=1e-12)
class TestGLMGaussHACPanelGroups(CheckDiscreteGLM):
@classmethod
def setup_class(cls):
cls.cov_type = 'hac-panel'
# time index is just made up to have a test case
groups = np.repeat(np.arange(5), 7)[:-1]
mod1 = GLM(endog.copy(), exog.copy(), family=families.Gaussian())
kwds = dict(groups=pd.Series(groups), # check for #3606
maxlags=2,
kernel=sw.weights_uniform,
use_correction='hac',
df_correction=False)
cls.res1 = mod1.fit(cov_type='hac-panel', cov_kwds=kwds)
mod2 = OLS(endog, exog)
cls.res2 = mod2.fit(cov_type='hac-panel', cov_kwds=kwds)
class TestGLMGaussHACGroupsum(CheckDiscreteGLM):
@classmethod
def setup_class(cls):
cls.cov_type = 'hac-groupsum'
# time index is just made up to have a test case
time = np.tile(np.arange(7), 5)[:-1]
mod1 = GLM(endog, exog, family=families.Gaussian())
kwds = dict(time=pd.Series(time), # check for #3606
maxlags=2,
use_correction='hac',
df_correction=False)
cls.res1 = mod1.fit(cov_type='hac-groupsum', cov_kwds=kwds)
cls.res1b = mod1.fit(cov_type='nw-groupsum', cov_kwds=kwds)
mod2 = OLS(endog, exog)
cls.res2 = mod2.fit(cov_type='hac-groupsum', cov_kwds=kwds)
def test_kwd(self):
# test corrected keyword name
assert_allclose(self.res1b.bse, self.res1.bse, rtol=1e-12)
| |
#!/usr/bin/env python
import errno
import getopt
import math
import os
import progress
import re
import select
import signal
import socket
import subprocess
import sys
import time
from xml.dom.minidom import parse,parseString
from xml.dom import DOMException
from pprint import pprint
from configtool import getConfigVal, setConfigVal
try:
import numpy
except:
sys.stderr.write("failed to import numpy\n")
#return number of cpus online
def cpuCount():
try:
return os.sysconf("SC_NPROCESSORS_ONLN")
except:
pass
try:
return os.sysconf("_SC_NPROCESSORS_ONLN")
except:
pass
try:
return int(os.environ["NUMBER_OF_PROCESSORS"])
except:
pass
try:
return int(os.environ["NUM_PROCESSORS"])
except:
sys.stderr.write("failed to get the number of processors\n")
return 1 # guess 1
def getmemorysize():
try:
return int(re.match("MemTotal: *([0-9]+) *kB", open("/proc/meminfo").read()).group(1))*1024
except:
sys.stderr.write("failed to get total memory\n")
return 8 * (1024**3) # guess 8gb
def setmemlimit(n = getmemorysize()):
try:
import resource
resource.setrlimit(resource.RLIMIT_AS, (n,n))
except:
sys.stderr.write("failed to set memory limit\n")
def parallelRunJobs(jobs, nParallelJobs=None):
#outFile=open("/tmp/parallelRunJobs.txt", "w")
class JobInfo:
def __init__(self, id, fn):
self.id=id
self.fn=fn
self.pid=None
self.fd=None
self.msg=""
self.rv=None
def __cmp__(this, that):
return this.id-that.id
def fileno(self):
return self.fd.fileno()
def forkrun(self):
self.fd, w = socket.socketpair()
self.pid = os.fork()
if self.pid == 0:
#child
progress.disable()
self.fd.close()
class Redir():
def __init__(self, fd):
self.fd=fd
def write(self, s):
self.fd.sendall(s)
#outFile.write(s)
#outFile.flush()
sys.stdout = Redir(w)
#sys.stderr = sys.stdout
try:
rv = self.fn()
except Exception, e:
#import traceback
#traceback.print_exc()
print "Exception:",e
rv = False
print exitval
if rv:
sys.exit(0)
else:
sys.exit(1)
else:
#parent
w.close()
self.fd.setblocking(0)
return self
def handleevent(self):
if self.pid is None:
return None
try:
m=self.fd.recv(1024)
if m is not None:
self.msg+=m
if self.msg.rfind(exitval) >= 0:
raise Exception("done")
except:
pid, self.rv = os.waitpid(self.pid, 0)
assert self.pid == pid
self.pid = None
self.fd.close()
self.fd = None
def kill(self):
if self.pid is not None:
os.kill(self.pid, signal.SIGKILL)
def addmsg(self, msg):
self.msg+=msg
def getmsg(self):
return self.msg.replace(exitval,"") \
.replace('\n',' ') \
.strip()
startline = progress.currentline()
if nParallelJobs is None:
# nParallelJobs=cpuCount()
nParallelJobs=max(1, cpuCount()/4)
exitval="!EXIT!"
maxprinted=[0]
jobs_pending = map(lambda id: JobInfo(id, jobs[id]), xrange(len(jobs)))
jobs_running = [] # JobInfo list
jobs_done = [] # JobInfo list
def mkstatus():
s="running jobs: "
failed=len(filter(lambda x: x.rv!=0, jobs_done))
complete=(len(jobs_done)-failed)
if complete>0:
s += "%d complete, "%complete
if failed>0:
s += "%d failed, "%failed
s += "%d running, "%len(jobs_running)
s += "%d pending"%len(jobs_pending)
return s
def updatestatus(fast=False):
progress.remaining(2*len(jobs_pending)+len(jobs_running))
if not fast:
for j in jobs_done[maxprinted[0]:]:
if j.id==maxprinted[0]:
print j.getmsg()
maxprinted[0]+=1
else:
break
progress.push()
progress.status(mkstatus)
updatestatus()
try:
while len(jobs_pending)>0 or len(jobs_running)>0:
#spawn new jobs
while len(jobs_pending)>0 and len(jobs_running)<nParallelJobs:
jobs_running.append(jobs_pending.pop(0).forkrun())
updatestatus()
#wait for an event
rj, wj, xj = select.select(jobs_running, [], jobs_running)
#handle pending data
for j in rj:
j.handleevent()
for j in wj:
j.handleevent()
for j in xj:
j.handleevent()
#move completed jobs to jobs_done list
newdone=filter(lambda x: x.pid is None, jobs_running)
jobs_running = filter(lambda x: x.pid is not None, jobs_running)
jobs_done.extend(newdone)
jobs_done.sort()
updatestatus(True)
except KeyboardInterrupt:
for j in jobs_running:
j.kill()
j.addmsg("INTERRUPTED")
jobs_done.extend(jobs_running)
jobs_done.sort()
updatestatus()
raise
updatestatus()
progress.pop()
return jobs_done
def getscriptpath():
try:
import configtool
m=re.search('''from ['"](.*)['"]''', str(configtool))
return os.path.dirname(m.group(1))
except:
return os.path.abspath(os.path.dirname(sys.argv[0]))
def chdirToPetabricksRoot():
old = os.getcwd()
new = getscriptpath()
isCurDirOk = lambda: os.path.isfile("src/compiler/pbc.cpp")
if not isCurDirOk():
os.chdir(new)
if not isCurDirOk():
os.chdir(os.pardir)
if not isCurDirOk():
os.chdir(old)
raise Exception("This script should be run from petabricks root directory")
def compilePetabricks():
cmd=["make","-sqC","src","all"]
if subprocess.call(cmd) != 0:
cmd=["make", "-j%d"%cpuCount()]
p=subprocess.Popen(cmd)
rv=p.wait()
if rv!=0:
raise Exception("pbc compile failed")
return rv
return 0
def expandBenchmarkName(name, ext):
base=re.sub("[.]pbcc$","", name)
if ext:
name=base+ext
if os.path.isfile(name):
return name
if name[0] != '/':
#Try to locate the file in the standard position
return "./examples/%s" % (name)
else:
return name
benchmarkToBin = lambda name: expandBenchmarkName(name, "")
benchmarkToSrc = lambda name: expandBenchmarkName(name, ".pbcc")
benchmarkToInfo = lambda name: expandBenchmarkName(name, ".info")
benchmarkToCfg = lambda name: expandBenchmarkName(name, ".cfg")
class InvalidBenchmarkNameException(Exception):
def __init__(self, name):
self.name=name
def __str__(self):
return "InvalidBenchmarkNameException(%s)" % self.name
def searchBenchmarkName(n):
for root, dirs, files in os.walk("./examples"):
if n in files or n + ".pbcc" in files:
return normalizeBenchmarkName("%s/%s"%(root,n), False)
raise InvalidBenchmarkNameException(n)
def normalizeBenchmarkName(orig, search=True):
n=re.sub("^[./]*examples[/]", "", orig);
n=re.sub("[.]pbcc$","", n)
if os.path.isfile(orig+".pbcc"):
orig = os.path.abspath(orig+".pbcc")
elif os.path.isfile(orig):
orig = os.path.abspath(orig)
else:
orig = None
if os.path.isfile(benchmarkToSrc(n)) or not search:
return n
else:
try:
return searchBenchmarkName(n)
except InvalidBenchmarkNameException:
if orig is not None:
return orig
raise
def compileBenchmark(pbc, src, binary=None, info=None, jobs=None, heuristics=None):
if not os.path.isfile(src):
raise IOError()
#Build the command
cmd=[pbc]
if binary is not None:
cmd.append("--output="+binary)
if info is not None:
cmd.append("--outputinfo="+info)
if jobs is not None:
cmd.append("--jobs="+str(jobs))
if heuristics is not None:
cmd.append("--heuristics="+heuristics)
cmd.append(src)
#Remove the output file (if it exists)
if os.path.isfile(binary):
os.unlink(binary)
#Execute the compiler
p = subprocess.Popen(cmd, stdout=NULL, stderr=NULL)
status = p.wait()
return status
def compileBenchmarks(benchmarks, learning=False, heuristicSetFileName=None, noLearningList=[]):
NULL=open("/dev/null","w")
pbc="./src/pbc"
libdepends=[pbc, "./src/libpbmain.a", "./src/libpbruntime.a", "./src/libpbcommon.a"]
assert os.path.isfile(pbc)
benchmarkMaxLen=0
jobs_per_pbc=max(1, 2*cpuCount() / len(benchmarks))
#from learningcompiler import LearningCompiler
#compiler = LearningCompiler(pbc, heuristicSetFileName, jobs=jobs_per_pbc)
def innerCompileBenchmark(name):
print name.ljust(benchmarkMaxLen)
src=benchmarkToSrc(name)
binary=benchmarkToBin(name)
srcModTime=max(os.path.getmtime(src), reduce(max, map(os.path.getmtime, libdepends)))
if os.path.isfile(binary) and os.path.getmtime(binary) > srcModTime:
print "compile SKIPPED"
return True
try:
#if learning and (name not in noLearningList):
# status=compiler.compileLearningHeuristics(src, finalBinary=binary)
#else:
status=compileBenchmark(pbc, src, binary=binary, jobs=jobs_per_pbc)
if status == 0:
print "compile PASSED"
return True
else:
print "compile FAILED (rc=%d)"%status
return False
except IOError:
print "invalid benchmark"
return False
newjob = lambda name, fn: lambda: innerCompileBenchmark(name) and fn()
mergejob = lambda oldfn, fn: lambda: oldfn() and fn()
jobs=[]
# build jobs list
jobsdata = dict()
for b in benchmarks:
if type(b) is type(()):
name, fn, postfn = b
else:
name, fn, postfn = b, lambda: True, lambda: True
benchmarkMaxLen=max(benchmarkMaxLen, len(name))
if not jobsdata.has_key(name):
jobsdata[name] = [newjob(name,fn), postfn]
jobs.append(name)
else:
jobsdata[name][0] = mergejob(jobsdata[name][0], fn)
jobs = map(lambda n: mergejob(*jobsdata[n]), jobs)
if learning:
#Cannot run multiple jobs in parallel: the autotuning results
#would be affected
return parallelRunJobs(jobs, nParallelJobs=1)
else:
return parallelRunJobs(jobs)
def loadAndCompileBenchmarks(file, searchterms=[], extrafn=lambda b: True, postfn=lambda b: True, learning=False, heuristicSetFileName=None, noLearningList=[]):
chdirToPetabricksRoot()
compilePetabricks()
benchmarks=open(file)
stripcomment = re.compile("([^#]*)([#].*)?")
benchmarks=map(lambda x: stripcomment.match(x).group(1).strip(), benchmarks)
benchmarks=filter(lambda x: len(x)>0, benchmarks)
ws = re.compile("[ \t]+")
benchmarks=map(lambda x: ws.split(x), benchmarks)
if len(searchterms)>0:
benchmarks=filter(lambda b: any(s in b[0] for s in searchterms), benchmarks)
for b in benchmarks:
b[0]=normalizeBenchmarkName(b[0])
return compileBenchmarks(map(lambda x: (x[0], lambda: extrafn(x), lambda: postfn(x[0])), benchmarks), learning, heuristicSetFileName, noLearningList), benchmarks
def killSubprocess(p):
if p.poll() is None:
try:
p.kill() #requires python 2.6
except:
os.kill(p.pid, signal.SIGTERM)
def tryAorB(A, B):
def tryAorBinst(x):
try:
return A(x)
except:
return B(x)
return tryAorBinst
#attempt to convert to an int or float
tryIntFloat = tryAorB(int, tryAorB(float, lambda x: x))
class TimingRunTimeout(Exception):
def __str__(self):
return repr(self.value)
class TimingRunFailed(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def goodwait(p):
'''
Python doesn't check if its system calls return EINTR, which is kind of
dumb, so we have to catch this here.
'''
rv=None
while True:
try:
rv=p.wait()
return rv
except OSError, e:
if e.errno != errno.EINTR:
raise
def xmlToDict(xml, tag, fn=tryIntFloat, idx=0):
try:
rslt = xml.getElementsByTagName(tag)[idx].attributes
attrs=dict()
for x in xrange(rslt.length):
attrs[str(rslt.item(x).name)]=fn(rslt.item(x).nodeValue)
return attrs
except Exception,e:
return None
NULL=open("/dev/null", "w")
def callAndWait(cmd):
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=NULL)
goodwait(p)
if p.returncode == -15:
raise TimingRunTimeout()
if p.returncode != 0:
raise TimingRunFailed(p.returncode)
return p
#parse timing results with a given time limit
def executeRun(cmd, returnTags=['timing', 'accuracy', 'outputhash'], retries=3):
p = callAndWait(cmd)
try:
xml = parse(p.stdout)
except Exception, e:
print 'program crash',e
if retries>1:
return executeRun(cmd, returnTags, retries-1)
else:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=NULL)
goodwait(p)
print p.stdout.read()
sys.exit(99)
timing = xmlToDict(xml, "timing")
if timing['average'] > 2**31:
raise TimingRunTimeout()
if type(returnTags) is type(""):
return xmlToDict(xml, returnTags)
else:
return map(lambda t: xmlToDict(xml, t), returnTags)
def executeRaceRun(_cmd, configa, configb, retries=3):
cmd = _cmd + ['--config='+configa, '--race-with='+configb]
p = callAndWait(cmd)
try:
xml = parse(p.stdout)
except Exception, e:
print 'program crash',e
if retries>1:
return executeRaceRun(_cmd, configa, configb, retries-1)
else:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=NULL)
goodwait(p)
print p.stdout.read()
sys.exit(99)
aresult = xmlToDict(xml, "testresult", tryIntFloat, 0)
bresult = xmlToDict(xml, "testresult", tryIntFloat, 1)
assert aresult['label']==0
assert bresult['label']==1
return aresult, bresult
#parse timing results with a given time limit
def executeTimingRun(prog, n, args=[], limit=None, returnTags='timing'):
cmd = [ prog, "--n=%d"%n, "--time" ]
cmd.extend(args);
if limit:
cmd.append("--max-sec=%f" % float(limit))
return executeRun(cmd, returnTags)
def collectTimingSamples(prog, n=100, step=100, maxTime=10.0, x=[], y=[], args=[], scaler=lambda x: x):
start=time.time()
left=maxTime
try:
while left>0:
ni = int(math.ceil(scaler(n)))
y.append(executeTimingRun(prog, ni, args=args, limit=int(left+1))['average'])
x.append(ni)
n+=step
left=start+maxTime-time.time()
except TimingRunTimeout:
if len(x)<1:
raise
return x,y
def binarySearchInverse(fx, y, thresh=0.001, min=0.0, max=1000000000):
y0=fx(min)
yn=fx(max)
assert y0<=yn
if y0 > y-thresh:
return min
if yn < y+thresh:
return max
guess=(min+max)/2.0
yguess=fx(guess)
#binary search
if abs(yguess-y) < thresh:
return guess
if yguess>y:
return binarySearchInverse(fx, y, thresh, min, guess)
else:
return binarySearchInverse(fx, y, thresh, guess, max)
#fit y = c1 * x**c2
def expFitRaw(x,y):
# shift to log scale
x=map(lambda z: math.log(z,2), x)
y=map(lambda z: math.log(z,2), y)
# and polyfit
c2,c1 = numpy.polyfit(x, y, 1)
c1=2**c1
return c1,c2
#fit y = c1 * x**c2
def expFit(x,y):
c1,c2 = expFitRaw(x,y)
return lambda x: c1*x**c2,\
lambda y: 2**(math.log(y/c1, 2)/c2), \
"%.10f * x^%.4f"%(c1,c2)
#fit y = p[0]*x**n + ... + p[n-2]*x + p[n-1]
#order is picked automatically based on expFit
def polyFit(x,y):
c1, order = expFitRaw(x,y)
p = numpy.polyfit(x, y, int(math.ceil(order)))
fx=lambda x: numpy.polyval(p,x)
invfx=lambda y: binarySearchInverse(fx, y)
return fx, invfx, repr(p)
def collectTimingSamples2(prog, maxTime=12.0, args=[]):
#make initial guess at order
x,y=collectTimingSamples(prog, 4, 1, maxTime, args=args, scaler=lambda x: 2**x)
return x,y
def testEstimation(x, y, fit, prog):
pf, pinv, pStr = fit(x,y)
print " ",pStr
print " est 10k", pf(10000) #, "actual=", executeTimingRun(prog,10000)['average']
print " est 1 sec", (pinv(1))
print " est 2 sec", (pinv(2))
print " est 3 sec", (pinv(3))
def inferGoodInputSizes(prog, desiredTimes, maxTime=5.0):
x,y=collectTimingSamples2(prog, maxTime)
efx, efy, estr = expFit(x,y)
#pfx, pfy, pstr = polyFit(x,y)
sizes=map(int, map(efy, desiredTimes))
return sizes
def getMakefileFlag(name):
r=re.compile("^"+name+"[ ]*[=][ ]*(.*)")
return r.match(filter(lambda l: r.match(l), open("src/Makefile"))[0]).group(1).strip()
getCXX = lambda: getMakefileFlag("CXX")
getCXXFLAGS = lambda: getMakefileFlag("CXXFLAGS")
def getTunables(tx, type):
return filter( lambda t: t.getAttribute("type")==type, tx.getElementsByTagName("tunable") )
getTunablesSequential=lambda tx: getTunables(tx, "system.cutoff.sequential")
getTunablesSplitSize=lambda tx: getTunables(tx, "system.cutoff.splitsize")
def mainname(bin):
run_command = mkcmd("--name")
p = subprocess.Popen(run_command, stdout=subprocess.PIPE, stderr=substderr)
os.waitpid(p.pid, 0)
lines = p.stdout.readlines()
return lines[-1].strip()
def gitRevision(n=40):
try:
cmd=["git","log","-n","1","--pretty=format:%H"]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=NULL)
return p.communicate()[0][0:n]
except:
return "0"*n
if __name__ == "__main__":
chdirToPetabricksRoot()
print gitRevision()
compilePetabricks()
compileBenchmarks(map(normalizeBenchmarkName, ["add", "multiply", "transpose"]))
print "Estimating input sizes"
print inferGoodInputSizes("./examples/simple/add", [0.1,0.5,1.0], 2)
| |
# Electrum - Lightweight Bitcoin Client
# Copyright (c) 2012 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import asyncio
from typing import Sequence, Optional, TYPE_CHECKING
import aiorpcx
from .util import bh2u, TxMinedInfo, NetworkJobOnDefaultServer
from .crypto import sha256d
from .bitcoin import hash_decode, hash_encode
from .transaction import Transaction
from .blockchain import hash_header
from .interface import GracefulDisconnect
from .network import UntrustedServerReturnedError
from . import constants
if TYPE_CHECKING:
from .network import Network
from .address_synchronizer import AddressSynchronizer
class MerkleVerificationFailure(Exception): pass
class MissingBlockHeader(MerkleVerificationFailure): pass
class MerkleRootMismatch(MerkleVerificationFailure): pass
class InnerNodeOfSpvProofIsValidTx(MerkleVerificationFailure): pass
class SPV(NetworkJobOnDefaultServer):
""" Simple Payment Verification """
def __init__(self, network: 'Network', wallet: 'AddressSynchronizer'):
self.wallet = wallet
NetworkJobOnDefaultServer.__init__(self, network)
def _reset(self):
super()._reset()
self.merkle_roots = {} # txid -> merkle root (once it has been verified)
self.requested_merkle = set() # txid set of pending requests
async def _start_tasks(self):
async with self.group as group:
await group.spawn(self.main)
def diagnostic_name(self):
return self.wallet.diagnostic_name()
async def main(self):
self.blockchain = self.network.blockchain()
while True:
await self._maybe_undo_verifications()
await self._request_proofs()
await asyncio.sleep(0.1)
async def _request_proofs(self):
local_height = self.blockchain.height()
unverified = self.wallet.get_unverified_txs()
for tx_hash, tx_height in unverified.items():
# do not request merkle branch if we already requested it
if tx_hash in self.requested_merkle or tx_hash in self.merkle_roots:
continue
# or before headers are available
if tx_height <= 0 or tx_height > local_height:
continue
# if it's in the checkpoint region, we still might not have the header
header = self.blockchain.read_header(tx_height)
if header is None:
if tx_height < constants.net.max_checkpoint():
await self.group.spawn(self.network.request_chunk(tx_height, None, can_return_early=True))
continue
# request now
self.logger.info(f'requested merkle {tx_hash}')
self.requested_merkle.add(tx_hash)
await self.group.spawn(self._request_and_verify_single_proof, tx_hash, tx_height)
async def _request_and_verify_single_proof(self, tx_hash, tx_height):
try:
merkle = await self.network.get_merkle_for_transaction(tx_hash, tx_height)
except UntrustedServerReturnedError as e:
if not isinstance(e.original_exception, aiorpcx.jsonrpc.RPCError):
raise
self.logger.info(f'tx {tx_hash} not at height {tx_height}')
self.wallet.remove_unverified_tx(tx_hash, tx_height)
self.requested_merkle.discard(tx_hash)
return
# Verify the hash of the server-provided merkle branch to a
# transaction matches the merkle root of its block
if tx_height != merkle.get('block_height'):
self.logger.info('requested tx_height {} differs from received tx_height {} for txid {}'
.format(tx_height, merkle.get('block_height'), tx_hash))
tx_height = merkle.get('block_height')
pos = merkle.get('pos')
merkle_branch = merkle.get('merkle')
# we need to wait if header sync/reorg is still ongoing, hence lock:
async with self.network.bhi_lock:
header = self.network.blockchain().read_header(tx_height)
try:
verify_tx_is_in_block(tx_hash, merkle_branch, pos, header, tx_height)
except MerkleVerificationFailure as e:
if self.network.config.get("skipmerklecheck"):
self.logger.info(f"skipping merkle proof check {tx_hash}")
else:
self.logger.info(str(e))
raise GracefulDisconnect(e)
# we passed all the tests
self.merkle_roots[tx_hash] = header.get('merkle_root')
self.requested_merkle.discard(tx_hash)
self.logger.info(f"verified {tx_hash}")
header_hash = hash_header(header)
tx_info = TxMinedInfo(height=tx_height,
timestamp=header.get('timestamp'),
txpos=pos,
header_hash=header_hash)
self.wallet.add_verified_tx(tx_hash, tx_info)
#if self.is_up_to_date() and self.wallet.is_up_to_date():
# self.wallet.save_verified_tx(write=True)
@classmethod
def hash_merkle_root(cls, merkle_branch: Sequence[str], tx_hash: str, leaf_pos_in_tree: int):
"""Return calculated merkle root."""
try:
h = hash_decode(tx_hash)
merkle_branch_bytes = [hash_decode(item) for item in merkle_branch]
leaf_pos_in_tree = int(leaf_pos_in_tree) # raise if invalid
except Exception as e:
raise MerkleVerificationFailure(e)
if leaf_pos_in_tree < 0:
raise MerkleVerificationFailure('leaf_pos_in_tree must be non-negative')
index = leaf_pos_in_tree
for item in merkle_branch_bytes:
if len(item) != 32:
raise MerkleVerificationFailure('all merkle branch items have to 32 bytes long')
h = sha256d(item + h) if (index & 1) else sha256d(h + item)
index >>= 1
cls._raise_if_valid_tx(bh2u(h))
if index != 0:
raise MerkleVerificationFailure(f'leaf_pos_in_tree too large for branch')
return hash_encode(h)
@classmethod
def _raise_if_valid_tx(cls, raw_tx: str):
# If an inner node of the merkle proof is also a valid tx, chances are, this is an attack.
# https://lists.linuxfoundation.org/pipermail/bitcoin-dev/2018-June/016105.html
# https://lists.linuxfoundation.org/pipermail/bitcoin-dev/attachments/20180609/9f4f5b1f/attachment-0001.pdf
# https://bitcoin.stackexchange.com/questions/76121/how-is-the-leaf-node-weakness-in-merkle-trees-exploitable/76122#76122
tx = Transaction(raw_tx)
try:
tx.deserialize()
except:
pass
else:
raise InnerNodeOfSpvProofIsValidTx()
async def _maybe_undo_verifications(self):
old_chain = self.blockchain
cur_chain = self.network.blockchain()
if cur_chain != old_chain:
self.blockchain = cur_chain
above_height = cur_chain.get_height_of_last_common_block_with_chain(old_chain)
self.logger.info(f"undoing verifications above height {above_height}")
tx_hashes = self.wallet.undo_verifications(self.blockchain, above_height)
for tx_hash in tx_hashes:
self.logger.info(f"redoing {tx_hash}")
self.remove_spv_proof_for_tx(tx_hash)
def remove_spv_proof_for_tx(self, tx_hash):
self.merkle_roots.pop(tx_hash, None)
self.requested_merkle.discard(tx_hash)
def is_up_to_date(self):
return not self.requested_merkle
def verify_tx_is_in_block(tx_hash: str, merkle_branch: Sequence[str],
leaf_pos_in_tree: int, block_header: Optional[dict],
block_height: int) -> None:
"""Raise MerkleVerificationFailure if verification fails."""
if not block_header:
raise MissingBlockHeader("merkle verification failed for {} (missing header {})"
.format(tx_hash, block_height))
if len(merkle_branch) > 30:
raise MerkleVerificationFailure(f"merkle branch too long: {len(merkle_branch)}")
calc_merkle_root = SPV.hash_merkle_root(merkle_branch, tx_hash, leaf_pos_in_tree)
if block_header.get('merkle_root') != calc_merkle_root:
raise MerkleRootMismatch("merkle verification failed for {} ({} != {})".format(
tx_hash, block_header.get('merkle_root'), calc_merkle_root))
| |
# voter_guide/views_admin.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
from .controllers import voter_guides_import_from_master_server
from .models import VoterGuide, VoterGuideListManager, VoterGuideManager
from .serializers import VoterGuideSerializer
from admin_tools.views import redirect_to_sign_in_page
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.contrib.messages import get_messages
from django.http import HttpResponseRedirect
from django.shortcuts import render
from election.models import Election, ElectionManager, TIME_SPAN_LIST
from organization.models import Organization, OrganizationListManager
from organization.views_admin import organization_edit_process_view
from position.models import PositionEntered, PositionForFriends, PositionListManager
from rest_framework.views import APIView
from rest_framework.response import Response
from voter.models import voter_has_authority
from wevote_functions.functions import convert_to_int, extract_twitter_handle_from_text_string, positive_value_exists, \
STATE_CODE_MAP
# This page does not need to be protected.
class VoterGuidesSyncOutView(APIView):
def get(self, request, format=None):
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
voter_guide_list = VoterGuide.objects.all()
if positive_value_exists(google_civic_election_id):
voter_guide_list = voter_guide_list.filter(google_civic_election_id=google_civic_election_id)
serializer = VoterGuideSerializer(voter_guide_list, many=True)
return Response(serializer.data)
@login_required
def voter_guides_import_from_master_server_view(request):
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
state_code = request.GET.get('state_code', '')
results = voter_guides_import_from_master_server(request, google_civic_election_id)
if not results['success']:
messages.add_message(request, messages.ERROR, results['status'])
else:
messages.add_message(request, messages.INFO, 'Voter Guides import completed. '
'Saved: {saved}, Updated: {updated}, '
'Master data not imported (local duplicates found): '
'{duplicates_removed}, '
'Not processed: {not_processed}'
''.format(saved=results['saved'],
updated=results['updated'],
duplicates_removed=results['duplicates_removed'],
not_processed=results['not_processed']))
return HttpResponseRedirect(reverse('admin_tools:sync_dashboard', args=()) + "?google_civic_election_id=" +
str(google_civic_election_id) + "&state_code=" + str(state_code))
@login_required
def generate_voter_guides_view(request):
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
voter_guide_stored_for_this_organization = []
# voter_guide_stored_for_this_public_figure = []
# voter_guide_stored_for_this_voter = []
voter_guide_created_count = 0
voter_guide_updated_count = 0
# What elections do we want to generate voter_guides for?
election_list = Election.objects.all()
# Cycle through organizations
organization_list = Organization.objects.all()
for organization in organization_list:
# Cycle through elections. Find out position count for this org for each election.
# If > 0, then create a voter_guide entry
if organization.id not in voter_guide_stored_for_this_organization:
for election in election_list:
# organization hasn't had voter guides stored yet.
# Search for positions with this organization_id and google_civic_election_id
google_civic_election_id = int(election.google_civic_election_id) # Convert VarChar to Integer
positions_count = PositionEntered.objects.filter(
organization_id=organization.id,
google_civic_election_id=google_civic_election_id).count()
if positions_count > 0:
voter_guide_manager = VoterGuideManager()
results = voter_guide_manager.update_or_create_organization_voter_guide_by_election_id(
organization.we_vote_id, election.google_civic_election_id)
if results['success']:
if results['new_voter_guide_created']:
voter_guide_created_count += 1
else:
voter_guide_updated_count += 1
for time_span in TIME_SPAN_LIST:
# organization hasn't had voter guides stored yet.
# Search for positions with this organization_id and time_span
positions_count = PositionEntered.objects.filter(
organization_id=organization.id,
vote_smart_time_span=time_span).count()
if positions_count > 0:
voter_guide_manager = VoterGuideManager()
results = voter_guide_manager.update_or_create_organization_voter_guide_by_time_span(
organization.we_vote_id, time_span)
if results['success']:
if results['new_voter_guide_created']:
voter_guide_created_count += 1
else:
voter_guide_updated_count += 1
voter_guide_stored_for_this_organization.append(organization.id)
# Cycle through public figures
# voter_guide_manager = VoterGuideManager()
# voter_guide_manager.update_or_create_public_figure_voter_guide(1234, 'wv02')
# Cycle through voters
# voter_guide_manager = VoterGuideManager()
# voter_guide_manager.update_or_create_voter_voter_guide(1234, 'wv03')
messages.add_message(request, messages.INFO,
'{voter_guide_created_count} voter guides created, '
'{voter_guide_updated_count} updated.'.format(
voter_guide_created_count=voter_guide_created_count,
voter_guide_updated_count=voter_guide_updated_count,
))
return HttpResponseRedirect(reverse('voter_guide:voter_guide_list', args=()))
@login_required
def generate_voter_guides_for_one_election_view(request):
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
if not positive_value_exists(google_civic_election_id):
messages.add_message(request, messages.ERROR,
'Cannot generate voter guides for one election: google_civic_election_id missing')
return HttpResponseRedirect(reverse('voter_guide:voter_guide_list', args=()))
voter_guide_stored_for_this_organization = []
# voter_guide_stored_for_this_public_figure = []
# voter_guide_stored_for_this_voter = []
voter_guide_created_count = 0
voter_guide_updated_count = 0
# What elections do we want to generate voter_guides for?
election_list = Election.objects.all()
# Cycle through organizations
organization_list = Organization.objects.all()
for organization in organization_list:
# Cycle through elections. Find out position count for this org for each election.
# If > 0, then create a voter_guide entry
if organization.id not in voter_guide_stored_for_this_organization:
# organization hasn't had voter guides stored yet in this run through.
# Search for positions with this organization_id and google_civic_election_id
positions_count = PositionEntered.objects.filter(
organization_id=organization.id,
google_civic_election_id=google_civic_election_id).count()
if positions_count > 0:
voter_guide_manager = VoterGuideManager()
results = voter_guide_manager.update_or_create_organization_voter_guide_by_election_id(
organization.we_vote_id, google_civic_election_id)
if results['success']:
if results['new_voter_guide_created']:
voter_guide_created_count += 1
else:
voter_guide_updated_count += 1
for time_span in TIME_SPAN_LIST:
# organization hasn't had voter guides stored yet.
# Search for positions with this organization_id and time_span
positions_count = PositionEntered.objects.filter(
organization_id=organization.id,
vote_smart_time_span=time_span).count()
if positions_count > 0:
voter_guide_manager = VoterGuideManager()
results = voter_guide_manager.update_or_create_organization_voter_guide_by_time_span(
organization.we_vote_id, time_span)
if results['success']:
if results['new_voter_guide_created']:
voter_guide_created_count += 1
else:
voter_guide_updated_count += 1
voter_guide_stored_for_this_organization.append(organization.id)
# Cycle through public figures
# voter_guide_manager = VoterGuideManager()
# voter_guide_manager.update_or_create_public_figure_voter_guide(1234, 'wv02')
# Cycle through voters
# voter_guide_manager = VoterGuideManager()
# voter_guide_manager.update_or_create_voter_voter_guide(1234, 'wv03')
messages.add_message(request, messages.INFO,
'{voter_guide_created_count} voter guides created, '
'{voter_guide_updated_count} updated.'.format(
voter_guide_created_count=voter_guide_created_count,
voter_guide_updated_count=voter_guide_updated_count,
))
return HttpResponseRedirect(reverse('voter_guide:voter_guide_list', args=()))
@login_required
def refresh_existing_voter_guides_view(request):
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
voter_guide_updated_count = 0
# Cycle through existing voter_guides
voter_guide_list_manager = VoterGuideListManager()
voter_guide_manager = VoterGuideManager()
results = voter_guide_list_manager.retrieve_all_voter_guides()
if results['voter_guide_list_found']:
voter_guide_list = results['voter_guide_list']
for voter_guide in voter_guide_list:
if positive_value_exists(voter_guide.organization_we_vote_id):
if positive_value_exists(voter_guide.google_civic_election_id):
results = voter_guide_manager.update_or_create_organization_voter_guide_by_election_id(
voter_guide.organization_we_vote_id, voter_guide.google_civic_election_id)
if results['success']:
voter_guide_updated_count += 1
elif positive_value_exists(voter_guide.vote_smart_time_span):
results = voter_guide_manager.update_or_create_organization_voter_guide_by_time_span(
voter_guide.organization_we_vote_id, voter_guide.vote_smart_time_span)
if results['success']:
voter_guide_updated_count += 1
messages.add_message(request, messages.INFO,
'{voter_guide_updated_count} updated.'.format(
voter_guide_updated_count=voter_guide_updated_count,
))
return HttpResponseRedirect(reverse('voter_guide:voter_guide_list', args=()))
@login_required
def voter_guide_list_view(request):
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
voter_guide_list = []
voter_guide_list_object = VoterGuideListManager()
if positive_value_exists(google_civic_election_id):
results = voter_guide_list_object.retrieve_voter_guides_for_election(
google_civic_election_id=google_civic_election_id)
if results['success']:
voter_guide_list = results['voter_guide_list']
else:
order_by = "google_civic_election_id"
results = voter_guide_list_object.retrieve_all_voter_guides(order_by)
if results['success']:
voter_guide_list = results['voter_guide_list']
modified_voter_guide_list = []
position_list_manager = PositionListManager()
for one_voter_guide in voter_guide_list:
# How many Publicly visible positions are there in this election on this voter guide?
retrieve_public_positions = True
one_voter_guide.number_of_public_positions = position_list_manager.fetch_positions_count_for_voter_guide(
one_voter_guide.organization_we_vote_id, one_voter_guide.google_civic_election_id,
retrieve_public_positions)
# How many Friends-only visible positions are there in this election on this voter guide?
retrieve_public_positions = False
one_voter_guide.number_of_friends_only_positions = position_list_manager.fetch_positions_count_for_voter_guide(
one_voter_guide.organization_we_vote_id, one_voter_guide.google_civic_election_id,
retrieve_public_positions)
modified_voter_guide_list.append(one_voter_guide)
election_list = Election.objects.order_by('-election_day_text')
messages_on_stage = get_messages(request)
template_values = {
'election_list': election_list,
'google_civic_election_id': google_civic_election_id,
'messages_on_stage': messages_on_stage,
'voter_guide_list': modified_voter_guide_list,
}
return render(request, 'voter_guide/voter_guide_list.html', template_values)
@login_required
def voter_guide_search_view(request):
"""
Before creating a voter guide, search for an existing organization
:param request:
:return:
"""
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
# A positive value in google_civic_election_id means we want to create a voter guide for this org for this election
google_civic_election_id = request.GET.get('google_civic_election_id', 0)
messages_on_stage = get_messages(request)
election_manager = ElectionManager()
upcoming_election_list = []
results = election_manager.retrieve_upcoming_elections()
if results['success']:
upcoming_election_list = results['election_list']
state_list = STATE_CODE_MAP
sorted_state_list = sorted(state_list.items())
template_values = {
'messages_on_stage': messages_on_stage,
'upcoming_election_list': upcoming_election_list,
'google_civic_election_id': google_civic_election_id,
'state_list': sorted_state_list,
}
return render(request, 'voter_guide/voter_guide_search.html', template_values)
@login_required
def voter_guide_search_process_view(request):
"""
Process the new or edit organization forms
:param request:
:return:
"""
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
add_organization_button = request.POST.get('add_organization_button', False)
if add_organization_button:
return organization_edit_process_view(request)
organization_name = request.POST.get('organization_name', '')
organization_twitter_handle = request.POST.get('organization_twitter_handle', '')
organization_facebook = request.POST.get('organization_facebook', '')
organization_website = request.POST.get('organization_website', '')
# state_served_code = request.POST.get('state_served_code', False)
# Save this variable so we have it on the "Add New Position" page
google_civic_election_id = request.POST.get('google_civic_election_id', 0)
# Filter incoming data
organization_twitter_handle = extract_twitter_handle_from_text_string(organization_twitter_handle)
# Search for organizations that match
organization_email = ''
organization_list_manager = OrganizationListManager()
results = organization_list_manager.organization_search_find_any_possibilities(
organization_name, organization_twitter_handle, organization_website, organization_email,
organization_facebook)
if results['organizations_found']:
organizations_list = results['organizations_list']
organizations_count = len(organizations_list)
messages.add_message(request, messages.INFO, 'We found {count} existing organization(s) '
'that might match.'.format(count=organizations_count))
else:
organizations_list = []
messages.add_message(request, messages.INFO, 'No voter guides found with those search terms. '
'Please try again. ')
election_manager = ElectionManager()
upcoming_election_list = []
results = election_manager.retrieve_upcoming_elections()
if results['success']:
upcoming_election_list = results['election_list']
messages_on_stage = get_messages(request)
template_values = {
'messages_on_stage': messages_on_stage,
'organizations_list': organizations_list,
'organization_name': organization_name,
'organization_twitter_handle': organization_twitter_handle,
'organization_facebook': organization_facebook,
'organization_website': organization_website,
'upcoming_election_list': upcoming_election_list,
'google_civic_election_id': google_civic_election_id,
}
return render(request, 'voter_guide/voter_guide_search.html', template_values)
| |
import analysis
import numpy as np
from sklearn import linear_model
from sklearn.preprocessing import PolynomialFeatures
from sklearn import cross_validation
from sklearn.pipeline import Pipeline
import sklearn
#from sklearn.kernel_ridge import KernelRidge
from sklearn import svm
from sklearn import gaussian_process
from scipy.special import digamma
import scipy.stats
from sklearn.metrics import roc_auc_score
import copy
from scipy.stats.stats import pearsonr
import matplotlib.pyplot as plt
list_freq = [422400, 729600, 1036800, 1497600, 1958400, 2457600]
def preprocess(dic, app = 'angry_birds'):
"""
given dic: conf -> labels
return the design matrix, each row = (features, target)
"""
a = np.zeros((24, 3))
i = 0
for cores in [1,2,3,4]:
for freq in list_freq:
m = np.mean(dic[app, cores, freq, 0])
a[i,:] = np.asarray([cores, freq, m])
i+= 1
return a
def learn(a):
data = a[:,0:-1]
target = a[:,-1]
LR = linear_model.LinearRegression()
scores = cross_validation.cross_val_score(LR, data, target, cv=4, scoring = 'mean_absolute_error')
print 'LR:', -scores.mean()
LR_poly2 = Pipeline([('poly', PolynomialFeatures(degree=3)),
('linear', linear_model.LinearRegression())])
scores = cross_validation.cross_val_score(LR_poly2, data, target, cv=4, scoring = 'mean_absolute_error')
print 'LR_poly3:', -scores.mean()
Ridge = linear_model.Ridge (alpha = 0)
scores = cross_validation.cross_val_score(Ridge, data, target, cv=4, scoring = 'mean_absolute_error')
print 'Ridge:', -scores.mean()
KR = KernelRidge()
scores = cross_validation.cross_val_score(KR, data, target, cv=4, scoring = 'mean_absolute_error')
print 'KR:', -scores.mean()
SVR = clf = svm.SVR()
scores = cross_validation.cross_val_score(SVR, data, target, cv=4, scoring = 'mean_absolute_error')
print 'SVR:', -scores.mean()
GP = gaussian_process.GaussianProcess(theta0=1e-2, thetaL=1e-4, thetaU=1e-1)
scores = cross_validation.cross_val_score(GP, data, target, cv=4, scoring = 'mean_absolute_error')
print 'GP:', -scores.mean()
app = [ 'angry_birds', 'youtube', 'gladiator', 'chrome_cnn',
'epic_citadel', 'facebook', 'photoshop', 'compubench_rs_particles',
'compubench_rs_gaussian', 'compubench_rs_julia', 'compubench_rs_facedetection', 'compubench_rs_ambiant']
gpu = [200, 320, 389, 462.4, 578]
def create_features(conf):
f = [0]*12 + [0,0,0,1]
for i, a in enumerate(app):
if a == conf[0]:
f[i] = 1
f[12] = conf[1] * 1.0 / 4
f[13] = conf[2] * 1.0 / 2457600
f[14] = gpu[conf[3]] *1.0 / 578
f[15] = 1.0
return f
class model:
def __init__(self, data, lambda_w = 0, lambda_v = 0, A = 0, B = 0):
"""
data is an array of [wid, question, url, rating, time, ip, application, cpu cores, cpu freq, gpu index]
e.g:
['A2XXXXXXXXXXX', 'Question Random Assignment - Text 18',
'www.youtube.com/embed/ZqD83lS8exs?wmode=transparent',
'1 - Very Dissatisfied', 'Thu Jul 31 02:56:58 GMT 2014',
'11.11.1.111', 'compubench_rs_particles', '3', '2457600', '0']
lambda: weight for regularization of W and V
A, B: weights (prior) on theta
organize:
- F: a features matrix, row i = list of features for item i
- L: crowd label, elem i = (list of W, list of L)
"""
self.data = data
self.dic_conf_wl = analysis.get_dic_conf_wl(data)
n = len(self.dic_conf_wl)
self.list_conf = self.dic_conf_wl.keys()
self.F = []
self.empi_mean = []
self.empi_var = []
for conf in self.list_conf:
f = create_features(conf)
self.F.append(f)
labels = self.dic_conf_wl[conf][1]
self.empi_mean.append( np.mean(labels) )
self.empi_var.append ( np.var(labels) )
self.F = np.asarray(self.F)
self.L = []
for conf in self.list_conf:
labels = self.dic_conf_wl[conf]
self.L.append(labels)
self.n = len(self.L) # number of items
self.m = len(self.F[0]) # number of features
# build dic_w_il
self.dic_w_il = {}
for i in range(self.n):
workers, labels = self.L[i]
for w, l in zip(workers, labels):
if w not in self.dic_w_il: self.dic_w_il[w] = []
self.dic_w_il[w].append( (i,l))
self.ep = 1e-100
self.lambda_w = lambda_w
self.lambda_v = lambda_v
self.A = A
self.B = B
def get_mean(self, i):
return self.F[i].dot(self.w)
def get_std(self, i):
return np.exp( self.F[i].dot(self.v) )
def get_var(self, i):
return pow( self.get_std(i), 2)
def spam_dist(self, l):
"""
distribution of labels from spammers
"""
return scipy.stats.norm(3,self.s).pdf(l)
def e_step(self):
"""
evaluate posterior over Z
"""
self.pt = []
for i in range(self.n):
self.pt.append([])
workers, labels = self.L[i]
for w, l in zip(workers, labels):
p1 = scipy.stats.norm.pdf(l, loc = self.get_mean(i), scale = self.get_std(i) ) * self.theta[w]
p0 = self.spam_dist(l) * (1-self.theta[w])
p = p1 *1.0/ (p0 + p1)
self.pt[i].append(p)
def expected_ll(self, w, v):
"""
return expected log likelihood
"""
res = 0
for i in range(self.n):
workers, labels = self.L[i]
for worker, l, pt1 in zip(workers, labels, self.pt[i]):
pt0 = 1 - pt1
#theta = self.theta[worker]
ll0 = np.log( self.spam_dist(l) ) # + np.log(1-theta)
mean = self.F[i].dot(w)
std = np.exp(self.F[i].dot(v))
if std < self.ep: std = self.ep
ll1 = scipy.stats.norm.logpdf(l, loc = mean, scale = std )# + np.log(theta)
res += pt0*ll0 + pt1*ll1
#regularization
for i in range(self.m-1):
res -= self.lambda_w * w[i]*w[i] + self.lambda_v * v[i]*v[i]
return res
def grad_expected_ll(self, w, v):
gw = np.zeros( (self.m,) )
gv = np.zeros( (self.m,) )
for i in range(self.n):
workers, labels = self.L[i]
for worker, l, pt1 in zip(workers, labels, self.pt[i]):
wtc = self.F[i].dot(w)
sigma = np.exp(self.F[i].dot(v))
if sigma < self.ep: sigma = self.ep
update_w = pt1*(l-wtc)/pow(sigma,2)*self.F[i]
gw += update_w
update_v = pt1*(-self.F[i] + pow(l-wtc,2)/pow(sigma,2)*self.F[i])
gv += update_v
for i in range(self.m-1):
gw[i] -= 2 * self.lambda_w * w[i]
gv[i] -= 2 * self.lambda_v * v[i]
return np.hstack( (gw, gv) )
def check_grad(self, ep = 0.0000001, check_range = None):
if check_range==None: check_range = range(self.m)
w = np.random.rand(self.m) - 0.5
v = np.random.rand(self.m) - 0.5
a = self.expected_ll(w, v)
fw = np.zeros(self.m)
fv = np.zeros(self.m)
for i in check_range:
x = np.zeros(self.m)
x[i] = ep
fw[i] = (self.expected_ll(w + x, v) - a ) / ep
fv[i] = (self.expected_ll(w, v + x) - a ) / ep
print w
print v
print 'calculated grad = ', zip(range(self.m*2), self.grad_expected_ll(w, v))
print 'finite diff grad = ', zip(range(self.m*2), np.hstack((fw, fv)))
def m_step_theta(self):
"""
set theta_j to max expected ll
"""
for w in self.dic_w_il: self.theta[w] = 0
for i in range(self.n):
workers, labels = self.L[i]
for w, l, pt1 in zip(workers, labels, self.pt[i]):
self.theta[w] += pt1
for w in self.dic_w_il:
num = len(self.dic_w_il[w])
r = self.theta[w] * 1.0 / num
self.theta[w] = r
#if (r < 0.85 ):
# self.theta[w] = r # no regularize
#else:
# # regularize
# self.theta[w] = (self.theta[w] * 1.0 + self.A) / ( len( self.dic_w_il[w]) + self.A + self.B)
# set self.s = sd of spam dist
s = 0
sw = 0
for i in range(self.n):
workers, labels = self.L[i]
for w, l, pt1 in zip(workers, labels, self.pt[i]):
s += pow(l - 3, 2)*(1-self.theta[w])
sw += 1- self.theta[w]
if sw > 0:
self.s = pow(s*1.0/sw, 0.5)
def m_step_wv(self, update_v = True):
"""
maximize w and v
"""
m = self.m
f = lambda x: -self.expected_ll(x[:m], x[m:])
fp = lambda x: -self.grad_expected_ll(x[:m], x[m:])
x0 = np.hstack( (self.w, self.v) )
#opt_method = 'Nelder-Mead'
opt_method = 'BFGS'
res = scipy.optimize.minimize(f, x0, method=opt_method, jac=fp)
#print res
self.w = res.x[:m]
if update_v:
self.v = res.x[m:]
def m_step(self, update_v = True):
"""
maximize expected ll of w, v, theta
"""
self.m_step_theta()
self.m_step_wv(update_v)
def init_wv(self):
"""
init the params w and v
using the results of linear regression on empirical
"""
self.lr = sklearn.linear_model.LinearRegression(fit_intercept = False)
self.lr.fit(self.F, self.empi_mean)
self.w = self.lr.coef_
self.lr.fit(self.F, np.log( pow(np.asarray(self.empi_var), 0.5)) )
self.v = self.lr.coef_
def init_em(self, h_theta = 0.8):
"""
init w, v, theta
"""
self.s = 2 # sd of spam distribution
self.w = np.zeros((self.m,))
self.v = np.zeros((self.m,))
self.theta = {}
dic_ul = analysis.get_dic_url_labels(self.data)
dic_w, dic_mean = analysis.agreement(self.data, dic_ul)
for w in self.dic_w_il:
#self.theta[w] = 1 - h_theta*abs(dic_mean[w])
self.theta[w] = h_theta
self.init_wv()
#self.pt = []
#for i in range(self.n):
# self.pt.append([])
# workers, labels = self.L[i]
# for w, l in zip(workers, labels):
# self.pt[i].append(0.99)
def em(self, w_it = 3, v_it = 1):
"""
"""
# iterate
for it in range(w_it):
self.e_step()
update_v = it < v_it
self.m_step(update_v)
def get_var_f(self, f):
"""
return variance for a feature vector
"""
return pow ( np.exp ( f.dot(self.v) ), 2)
def predict(self, list_conf):
"""
predict mean and var of new conf
"""
res_mean = []
res_var = []
for conf in list_conf:
f = np.asarray( create_features(conf) )
mean = f.dot(self.w)
var = self.get_var_f(f)
res_mean.append(mean)
res_var.append(var)
return (res_mean, res_var)
def spam_score(self, workers):
"""
return prob of the worker being a spammer
"""
res = []
for w in workers:
res.append(1 - self.theta[w])
return res
def get_dic(self, w, v):
"""
return dics of conf to mean and var
using prediction by w and v
"""
dic_mean = {}
dic_var = {}
for i in range(self.n):
conf = tuple(self.list_conf[i])
f = self.F[i]
mean = f.dot(w)
var = pow( np.exp(f.dot(v)), 2)
dic_mean[conf] = mean
dic_var[conf] = var
return (dic_mean, dic_var)
class LR:
"""
baseline: linear regression
"""
def __init__(self, data, hetero = False):
self.dic_conf_wl = analysis.get_dic_conf_wl(data)
n = len(self.dic_conf_wl)
list_conf = self.dic_conf_wl.keys()
self.F = []
self.empi_mean = []
self.empi_var = []
for conf in list_conf:
f = create_features(conf)
self.F.append(f)
labels = self.dic_conf_wl[conf][1]
self.empi_mean.append( np.mean(labels) )
self.empi_var.append ( np.var(labels) )
self.lr_mean = sklearn.linear_model.LinearRegression(fit_intercept = False)
self.lr_mean.fit(self.F, self.empi_mean)
self.const_var = np.sum((self.lr_mean.predict(self.F) - self.empi_mean)**2) *1.0/ (n-2)
self.lr_var = sklearn.linear_model.LinearRegression(fit_intercept = False)
#self.lr_var.fit(self.F, self.empi_var )
#self.lr_var.fit(self.F, np.log( pow(np.asarray(self.empi_var), 0.5)))
self.hetero = hetero
def predict(self, list_conf):
"""
predict mean and var of new conf
"""
self.tF = []
for conf in list_conf:
f = create_features(conf)
self.tF.append(f)
res_mean = self.lr_mean.predict(self.tF)
if self.hetero:
res_var = self.lr_var.predict(self.tF)
#res_var = pow( np.exp(self.lr_var.predict(self.tF)), 2)
else:
res_var = [self.const_var] * len(list_conf)
return (res_mean, res_var)
class baseline_spam(model):
"""
baselines for spam detection
"""
def __init__(self, data):
model.__init__(self, data)
#get spam score
self.ss = {}
for w in self.dic_w_il:
self.ss[w] = 0
for i, l in self.dic_w_il[w]:
# difference between label and average label
self.ss[w] += np.abs( l - np.mean(self.L[i][1]) )
self.ss[w] = self.ss[w] * 1.0 / len(self.dic_w_il[w])
#normalize:
max_score = max(self.ss.values())
for w in self.ss:
self.ss[w] = self.ss[w] * 1.0 / max_score
def spam_score(self, workers):
res = []
for w in workers:
res.append(self.ss[w])
return res
empirical_spam = [0.13, 0.25, 0.22, 0.27, 0.14]
def plot_empi_spam():
fig, ax = plt.subplots()
ax.bar(np.asarray([1,2,3,4,5]) - 0.5, empirical_spam)
ax.set_xlabel('Rating')
ax.set_ylabel('Proportion')
ax.set_xticks(np.asarray([1,2,3,4,5]))
class eval():
"""
evaluate
"""
def __init__(self, data, ptrain = 0.6, pval = 0.2, prw = 0.1, prl = 0.8, ptr = 1.0, plt = 1.0, pwk = 0.0, rand_seed = 1234, noise = 'empirical', bad_guys = []):
"""
ptrain = train set
pval = validation set
prw = proportion of random workers (spammers)
prl = proportion of random labels (how often a random worker gives a random label)
ptr = proportion of train conf
plt = proportion of labels for each conf in the train set
pwk = proportion of workers to be removed (remove the ones with high diff)
"""
self.data = copy.deepcopy(data)
self.pwk = pwk
self.del_wk()
self.dic_conf_wl = analysis.get_dic_conf_wl(self.data)
self.list_conf = self.dic_conf_wl.keys()
#self.rs = np.random.RandomState(1)
#self.rs.shuffle(self.list_conf)
self.rs = np.random.RandomState(rand_seed)
self.rs.shuffle(self.list_conf)
self.n = len(self.list_conf)
self.n_train = int(ptrain * self.n) # number of total train conf
self.n_given = int(self.n_train * ptr) # number of train conf given to method
self.train_conf = self.list_conf[:self.n_given]
self.n_val = int(pval * self.n) # number of total validation conf
self.val_conf = self.list_conf[self.n_train:self.n_train+self.n_val]
self.test_conf = self.list_conf[self.n_train+self.n_val:]
# get gold L for test
self.gold_mean = []; self.gold_var = []; self.gold_num = []
for conf in self.test_conf:
labs = self.dic_conf_wl[conf][1]
workers = self.dic_conf_wl[conf][0]
labels = []
for l, w in zip(labs, workers):
if w not in bad_guys:
labels.append(l)
self.gold_mean.append( np.mean(labels) )
self.gold_var.append ( np.var(labels) )
self.gold_num.append( len(labels) )
# also get gold L for train
self.train_mean = []; self.train_var = []; self.train_num = []
for conf in self.train_conf:
labels = self.dic_conf_wl[conf][1]
self.train_mean.append( np.mean(labels) )
self.train_var.append ( np.var(labels) )
self.train_num.append( len(labels) )
# also get gold L for valildataion
self.val_mean = []; self.val_var = []; self.val_num = []
for conf in self.val_conf:
labels = self.dic_conf_wl[conf][1]
self.val_mean.append( np.mean(labels) )
self.val_var.append ( np.var(labels) )
self.val_num.append( len(labels) )
self.plt = plt
self.get_train_data()
#inject noise
train_workers = analysis.get_list_workers(self.train_data)
self.rs.shuffle(train_workers)
self.n_random_workers = int(prw * len(train_workers))
self.random_workers = train_workers[:self.n_random_workers]
self.train_workers = train_workers
self.noise = noise
self.prl = prl
self.inject_noise()
def rand_rating(self):
if self.noise == 'uniform':
return self.rs.randint(1,6)
elif self.noise == 'empirical':
return np.nonzero(self.rs.multinomial(1, empirical_spam))[0][0]+1
else:
raise "unknown noise"
def inject_noise(self):
for i in range(len(self.train_data)):
w = self.train_data[i][0]
if w in self.random_workers:
if np.random.uniform() < self.prl:
self.train_data[i][3] = str(self.rand_rating())
def get_train_data(self):
self.train_data = []
dic_conf_num = {}# conf-> number of crowd labels this conf got
for d in self.data:
conf = analysis.get_conf(d)
if conf in self.train_conf:
if conf not in dic_conf_num: dic_conf_num[conf] = 0
if dic_conf_num[conf] > self.plt * len(self.dic_conf_wl[conf][0]): continue
dic_conf_num[conf] += 1
self.train_data.append(d)
def del_wk(self):
"""
remove workers with high deviation
remove a proportion of self.pwk workers
"""
if self.pwk == 0.0: return
dic_ul = analysis.get_dic_url_labels(self.data)
dic_w, dic_mean = analysis.agreement(self.data, dic_ul)
self.workers = sorted(dic_mean.items(), key = lambda i : abs(i[1]), reverse = False)
nwk = len(self.workers)
keep_workers = list( zip(*self.workers[:int((1-self.pwk) * nwk)])[0]) # list of workers to keep
new_data = []
for i in self.data:
if i[0] in keep_workers:
new_data.append(i)
self.data = new_data
def get_mae(self, a, b):
if ( len(a) != len(b) ) : raise "len not equal"
res = 0
for x,y in zip(a,b):
res += np.abs(x-y)
res = res * 1.0 / len(a)
return res
def eval(self, model):
"""
model has beeen trained
model has a predict method
"""
res_mean, res_var = model.predict(self.test_conf)
mae_mean = self.get_mae(res_mean, self.gold_mean)
mae_var = self.get_mae(res_var, self.gold_var)
#print "correlation: ", pearsonr(res_var, self.gold_var)
return [mae_mean, mae_var]
def eval_val(self, model):
"""
model has beeen trained
model has a predict method
evaluate on validation data
"""
res_mean, res_var = model.predict(self.val_conf)
mae_mean = self.get_mae(res_mean, self.val_mean)
mae_var = self.get_mae(res_var, self.val_var)
#print "correlation: ", pearsonr(res_var, self.gold_var)
return [mae_mean, mae_var]
def print_val(self, model):
res_mean, res_var = model.predict(self.val_conf)
mae_var = self.get_mae(res_var, self.val_var)
s = 0
for i,j in zip(res_var, self.val_var):
print i, j, i - j
s += (i-j)
print "s = ", s
def eval_all(self, em_it = 3):
"""
evaluate
"""
# LR
#lr = LR(self.train_data, hetero = True)
lr = LR(self.train_data, hetero = False)
eval_lr = self.eval(lr)
# nospam model
#ns = model_nospam(self.train_data)
#ns.init_em()
#ns.em(em_it)
#eval_ns = self.eval(ns)
# model
#new99 = model(self.train_data)
#new99.init_em(0.99)
#new99.em(em_it)
#eval_new99 = self.eval(new99)
new8 = model(self.train_data)
#new8.init_em(0.99)
new8.init_em(1)
new8.em(1,1)
eval_new8 = self.eval(new8)
# fix bias model
#fb = model_fixbias(self.train_data)
#fb.init_em()
#fb.em(em_it)
#eval_fb = self.eval(fb)
# variational model
var82 = model_var(self.train_data, 9.9, 0.1)
var82.init_em()
#var82.e_step()
var82.em(1,1)
eval_var82 = self.eval(var82)
#var191 = model_var(self.train_data, 19, 1)
#var191.init_em()
#var191.em(em_it)
#eval_var191 = self.eval(var191)
# spamer score
ss_baseline = self.detect_spammer(baseline_spam(self.train_data))
ss_new = self.detect_spammer(new8)
ss_var82 = self.detect_spammer(var82)
print "linear reg/baseline:", eval_lr, ss_baseline
#print "no spam model:", eval_ns
print "new model:", eval_new8, ss_new
#print "new model(fixbias)", eval_fb
print "var model", eval_var82, ss_var82
#return ([eval_lr, eval_new99, eval_new9, eval_ns, eval_fb, eval_var91, eval_var191], ss_baseline, ss_new)
#return ([eval_lr, eval_new8], ss_baseline, ss_new)
return ([eval_lr, eval_new8, eval_var82], ss_baseline, ss_new, ss_var82)
def detect_spammer(self, model):
"""
return AUC of the model in detecting the spammers.
model has a method spam_score(list_workers) that return the prob of being spammer
"""
# in self.train_workers, the first n_random_workers
if self.n_random_workers == 0:
return -1
score = model.spam_score(self.train_workers)
y = [1] * self.n_random_workers + [0] * (len(self.train_workers) - self.n_random_workers)
return roc_auc_score(y, score)
class model_constvar(model):
"""
same model with constant variance
"""
def __init__(self, data):
model.__init__(self,data)
self.std = 1
def get_std(self, i):
return self.std
def expected_ll(self, w):
"""
return expected log likelihood
"""
res = 0
for i in range(self.n):
workers, labels = self.L[i]
for worker, l, pt1 in zip(workers, labels, self.pt[i]):
pt0 = 1 - pt1
theta = self.theta[worker]
ll0 = np.log(self.spam_dist(l)) + np.log(1-theta)
mean = self.F[i].dot(w)
std = self.std
if std < self.ep: std = self.ep
ll1 = scipy.stats.norm.logpdf(l, loc = mean, scale = std ) + np.log(theta)
res += pt0*ll0 + pt1*ll1
return res
def grad_expected_ll(self, w):
gw = np.zeros( (self.m,) )
for i in range(self.n):
workers, labels = self.L[i]
for worker, l, pt1 in zip(workers, labels, self.pt[i]):
wtc = self.F[i].dot(w)
sigma = self.std
if sigma < self.ep: sigma = self.ep
update_w = pt1*(l-wtc)/pow(sigma,2)*self.F[i]
gw += update_w
return gw
def m_step_var(self):
s1 = 0
s2 = 0
for i in range(self.n):
workers, labels = self.L[i]
for worker, l, pt1 in zip(workers, labels, self.pt[i]):
wtx = self.F[i].dot(self.w)
s1 += pt1*pow(l-wtx,2)
s2 += pt1
self.var = s1*1.0/s2
def m_step(self):
"""
maximize theta, W and var
"""
self.m_step_theta()
# maximize W
m = self.m
f = lambda x: -self.expected_ll(x)
fp = lambda x: -self.grad_expected_ll(x)
x0 = self.w
#opt_method = 'Nelder-Mead'
opt_method = 'BFGS'
res = scipy.optimize.minimize(f, x0, method=opt_method, jac=fp)
print res
self.w = res.x
# maximize var
self.m_step_var()
class model_var(model):
"""
theta is hidden variable
inference by meanfield variational
"""
def __init__(self, data, A = 8.0, B = 2.0):
model.__init__(self, data)
self.A = A
self.B = B
def init_em(self):
"""
init params: w and v
init variational params alpha, beta, gamma
"""
self.s = 2
model.init_wv(self)
self.alpha = {}
self.beta = {}
for w in self.dic_w_il:
self.alpha[w] = self.A
self.beta[w] = self.B
self.gamma = []
for i in range(self.n):
workers, labels = self.L[i]
self.gamma.append([])
for w, l in zip(workers, labels):
self.gamma[i].append( self.A*1.0/(self.A+self.B) )
def update(self, a, b):
self.n_update += 1
self.change += np.abs(a-b)
def e_step(self, max_it = 10):
for it in range(max_it):
self.change = 0; self.n_update = 0
# update q(Z)
for i in range(self.n):
workers, labels = self.L[i]
for w, l, j in zip(workers, labels, range(len(workers))):
alpha = self.alpha[w]
beta = self.beta[w]
z0 = (self.spam_dist(l)) * np.exp( digamma(beta) - digamma(alpha+beta) )
z1 = scipy.stats.norm.pdf(l, loc = self.get_mean(i), scale = self.get_std(i) ) * np.exp( digamma(alpha) - digamma(alpha+beta) )
g = z1*1.0 / (z0+z1)
self.update(self.gamma[i][j], g)
self.gamma[i][j] = g
# update q(theta)
new_alpha = {}
new_beta = {}
for w in self.dic_w_il:
new_alpha[w] = self.A
new_beta[w] = self.B
for i in range(self.n):
workers, labels = self.L[i]
for w, l, g in zip(workers, labels, self.gamma[i]):
new_alpha[w] += (1-g)
new_beta[w] += g
for w in self.dic_w_il:
self.update(self.alpha[w], new_alpha[w])
self.alpha[w] = new_alpha[w]
self.update(self.beta[w], new_beta[w])
self.beta[w] = new_beta[w]
#
avg_change = self.change * 1.0/self.n_update
if avg_change < 0.01:break
def m_step(self, update_v = True):
self.pt = self.gamma
model.m_step_wv(self, update_v)
def spam_score(self, workers):
"""
return prob of being a spammer
"""
res = []
for w in workers:
a = self.alpha[w]- self.A; b = self.beta[w] - self.B
res.append( a * 1.0/(a+b) )
return res
class model_nospam(model):
def __init__(self, data):
model.__init__(self, data)
def e_step(self):
self.pt = []
for i in range(self.n):
self.pt.append([])
workers, labels = self.L[i]
for w, l in zip(workers, labels):
self.pt[i].append(1.0)
class test:
def __init__(self, data, n_train = 1):
self.data = copy.deepcopy(data)
self.reduce()
self.dic_conf_wl = analysis.get_dic_conf_wl(self.data)
self.list_conf = self.dic_conf_wl.keys()
#self.rs = np.random.RandomState(rand_seed)
#self.rs.shuffle(self.list_conf)
self.n = len(self.list_conf)
self.n_train = n_train
self.train_conf = self.list_conf[:self.n_train]
self.test_conf = self.list_conf[self.n_train:]
# get gold L for test
self.gold_mean = []; self.gold_var = []; self.gold_num = []
for conf in self.test_conf:
labels = self.dic_conf_wl[conf][1]
self.gold_mean.append( np.mean(labels) )
self.gold_var.append ( np.var(labels) )
self.gold_num.append( len(labels) )
# also get gold L for train
self.train_mean = []; self.train_var = []; self.train_num = []
for conf in self.train_conf:
labels = self.dic_conf_wl[conf][1]
self.train_mean.append( np.mean(labels) )
self.train_var.append ( np.var(labels) )
self.train_num.append( len(labels) )
self.get_train_data()
def reduce(self):
"""
del label so that each conf has ~ same number of L
"""
dic_conf_wl = analysis.get_dic_conf_wl(self.data)
dic_conf_num = {}
for conf in dic_conf_wl.keys():
labels = dic_conf_wl[conf][1]
dic_conf_num[conf] = len(labels)
new_data = []
for d in self.data:
conf = analysis.get_conf(d)
if dic_conf_num[conf] > 50:
dic_conf_num[conf] -= 1
else:
new_data.append(d)
self.data = new_data
def get_train_data(self):
self.train_data = []
for d in self.data:
conf = analysis.get_conf(d)
if conf in self.train_conf:
self.train_data.append(d)
def run(self, model, n_it = 1):
self.M = model(self.train_data)
self.M.init_em()
x = self.M.predict(self.train_conf)
self.M.em(n_it)
x1 = self.M.predict(self.train_conf)
self.print_res(self.train_var, x, x1)
def print_res(self, gold, x, x1):
sum_x = 0
sum_x1 = 0
for i in range(len(gold)):
#print i, gold[i], x[1][i], x1[1][i]
sum_x += x[1][i] - gold[i]
sum_x1 += x1[1][i] - gold[i]
e0 = eval([])
print 'mae x = ' , e0.get_mae(gold, x[1])
print 'mae x1 = ', e0.get_mae(gold, x1[1])
print 'sum x = ', sum_x, ' sum x1 = ', sum_x1
class model_fixbias(model):
def m_step_wv(self):
self.wm = []
self.wv = []
for i in range(self.n):
workers, labels = self.L[i]
m = len(labels)
s = 0
sw = 0
for w, l, pt1 in zip(workers, labels, self.pt[i]):
s += pt1 * l
sw += pt1
wmean = s * 1.0 / sw
self.wm.append(wmean)
s = 0
for w, l, pt1 in zip(workers, labels, self.pt[i]):
s += pt1 * pow(l - wmean, 2)
wvar = s * 1.0 / sw
self.wv.append(wvar)
self.lr = sklearn.linear_model.LinearRegression(fit_intercept = False)
self.lr.fit(self.F, self.wm)
self.w = self.lr.coef_
self.lr.fit(self.F, np.log( pow(np.asarray(self.wv), 0.5)) )
self.v = self.lr.coef_
class model_vf(model_var, model_fixbias):
def m_step(self):
self.pt = self.gamma
model_fixbias.m_step_wv(self)
class model_stoch(model):
"""
using stochastic gradient descent to optimize params
"""
def __init__(self, data, lr_w = 0.001, lr_v = 0.001):
model.__init__(self, data)
self.lr_w = lr_w
self.lr_v = lr_v
def m_step_wv(self):
"""
maximize w and v using SGD
"""
for it in range(50):
for i in range(self.n):
gw = np.zeros( (self.m,) )
gv = np.zeros( (self.m,) )
workers, labels = self.L[i]
for worker, l, pt1 in zip(workers, labels, self.pt[i]):
wtc = self.F[i].dot(self.w)
sigma = np.exp(self.F[i].dot(self.v))
if sigma < self.ep: sigma = self.ep
update_w = pt1*(l-wtc)/pow(sigma,2)*self.F[i]
gw += update_w
update_v = pt1*(-self.F[i] + pow(l-wtc,2)/pow(sigma,2)*self.F[i])
gv += update_v
self.w = self.w + self.lr_w * gw
self.v = self.v + self.lr_v * gv
#for i in range(self.m-1):
# gw[i] -= 2 * self.lambda_w * w[i]
# gv[i] -= 2 * self.lambda_v * v[i]
class model_school(model):
def __init__(self, data):
model.__init__(self, data)
def get_mean(self, i, k):
return self.F[i].dot(self.w[k])
def get_std(self, i, k):
return np.exp(self.F[i].dot(self.v[k]))
def get_var(self, i, k):
return pow(self.get_std(i,k), 2)
def e_step(self):
"""
evaluate posterior over Z
"""
self.pt = []
for i in range(self.n):
self.pt.append([])
workers, labels = self.L[i]
for w, l in zip(workers, labels):
p1 = scipy.stats.norm.pdf(l, loc = self.get_mean(i, 1), scale = self.get_std(i, 1) ) * self.theta[w]
p0 = scipy.stats.norm.pdf(l, loc = self.get_mean(i, 0), scale = self.get_std(i, 0) ) * (1-self.theta[w])
p = p1 *1.0/ (p0 + p1)
self.pt[i].append(p)
def expected_ll(self, x):
"""
return expected log likelihood
"""
l = len(x)/2
w = x[:l].reshape((2, self.m))
v = x[l:].reshape((2, self.m))
res = 0
for i in range(self.n):
workers, labels = self.L[i]
for worker, l, pt1 in zip(workers, labels, self.pt[i]):
pt0 = 1 - pt1
#theta = self.theta[worker]
ll = [0,0]
for k in [0,1]:
mean = self.F[i].dot(w[k])
std = np.exp(self.F[i].dot(v[k]))
if std < self.ep: std = self.ep
ll[k] = scipy.stats.norm.logpdf(l, loc = mean, scale = std )# + np.log(theta)
res += pt0*ll[0] + pt1*ll[1]
#regularization
#for i in range(self.m-1):
# res -= self.lambda_w * w[i]*w[i] + self.lambda_v * v[i]*v[i]
return res
def grad_expected_ll(self, x):
l = len(x)/2
w = x[:l].reshape((2, self.m))
v = x[l:].reshape((2, self.m))
gw = np.zeros( (2, self.m) )
gv = np.zeros( (2, self.m) )
for k in [0,1]:
for i in range(self.n):
workers, labels = self.L[i]
for worker, l, pt1 in zip(workers, labels, self.pt[i]):
wtc = self.F[i].dot(w[k])
sigma = np.exp(self.F[i].dot(v[k]))
if sigma < self.ep: sigma = self.ep
pt = pt1 if (k==1) else 1 - pt1
update_w = pt*(l-wtc)/pow(sigma,2)*self.F[i]
gw[k] += update_w[k]
update_v = pt*(-self.F[i] + pow(l-wtc,2)/pow(sigma,2)*self.F[i])
gv[k] += update_v[k]
# regularization
#for i in range(self.m-1):
# gw[i] -= 2 * self.lambda_w * w[i]
# gv[i] -= 2 * self.lambda_v * v[i]
return np.hstack( (gw[0,:], gw[1,:], gv[0,:], gv[1,:]) )
def m_step_wv(self):
"""
maximize w and v
"""
m = self.m
f = lambda x: -self.expected_ll(x)
fp = lambda x: -self.grad_expected_ll(x)
x0 = np.hstack( (self.w.reshape((2*m,)), self.v.reshape((2*m,))) )
#opt_method = 'Nelder-Mead'
opt_method = 'BFGS'
res = scipy.optimize.minimize(f, x0, method=opt_method, jac=fp)
#print res
x = res.x
l = len(x)/2
self.w = x[:l].reshape((2, self.m))
self.v = x[l:].reshape((2, self.m))
def m_step(self):
"""
maximize expected ll of w, v, theta
"""
self.m_step_theta()
self.m_step_wv()
def init_wv(self):
"""
init the params w and v
using the results of linear regression on empirical
"""
self.lr = sklearn.linear_model.LinearRegression(fit_intercept = False)
self.lr.fit(self.F, self.empi_mean)
self.w[0] = self.lr.coef_ + self.rs.normal(0, 0.1, self.m)
self.w[1] = self.lr.coef_ + self.rs.normal(0, 0.1, self.m)
self.lr.fit(self.F, np.log( pow(np.asarray(self.empi_var), 0.5)) )
self.v[0] = self.lr.coef_ + self.rs.normal(0, 0.1, self.m)
self.v[1] = self.lr.coef_ + self.rs.normal(0, 0.1, self.m)
def init_em(self, rseed = 1):
"""
init w, v, theta
"""
self.rs = np.random.RandomState(rseed)
self.w = np.zeros((2, self.m))
self.v = np.zeros((2, self.m))
self.theta = {}
for w in self.dic_w_il:
self.theta[w] = self.rs.rand()
self.init_wv()
def em(self, n_it = 3):
"""
"""
# iterate
for it in range(n_it):
self.e_step()
self.m_step()
| |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""An extensible ASCII table reader and writer.
cds.py:
Classes to read CDS / Vizier table format
:Copyright: Smithsonian Astrophysical Observatory (2011)
:Author: Tom Aldcroft (aldcroft@head.cfa.harvard.edu)
"""
import fnmatch
import itertools
import re
import os
from contextlib import suppress
from . import core
from . import fixedwidth
from astropy.units import Unit
__doctest_skip__ = ['*']
class CdsHeader(core.BaseHeader):
_subfmt = 'CDS'
col_type_map = {'e': core.FloatType,
'f': core.FloatType,
'i': core.IntType,
'a': core.StrType}
'The ReadMe file to construct header from.'
readme = None
def get_type_map_key(self, col):
match = re.match(r'\d*(\S)', col.raw_type.lower())
if not match:
raise ValueError('Unrecognized {} format "{}" for column "{}"'.format(
self._subfmt, col.raw_type, col.name))
return match.group(1)
def get_cols(self, lines):
"""
Initialize the header Column objects from the table ``lines`` for a CDS/MRT
header.
Parameters
----------
lines : list
List of table lines
"""
# Read header block for the table ``self.data.table_name`` from the read
# me file ``self.readme``.
if self.readme and self.data.table_name:
in_header = False
readme_inputter = core.BaseInputter()
f = readme_inputter.get_lines(self.readme)
# Header info is not in data lines but in a separate file.
lines = []
comment_lines = 0
for line in f:
line = line.strip()
if in_header:
lines.append(line)
if line.startswith(('------', '=======')):
comment_lines += 1
if comment_lines == 3:
break
else:
match = re.match(r'Byte-by-byte Description of file: (?P<name>.+)$',
line, re.IGNORECASE)
if match:
# Split 'name' in case in contains multiple files
names = [s for s in re.split('[, ]+', match.group('name'))
if s]
# Iterate on names to find if one matches the tablename
# including wildcards.
for pattern in names:
if fnmatch.fnmatch(self.data.table_name, pattern):
in_header = True
lines.append(line)
break
else:
raise core.InconsistentTableError("Can't find table {} in {}".format(
self.data.table_name, self.readme))
found_line = False
for i_col_def, line in enumerate(lines):
if re.match(r'Byte-by-byte Description', line, re.IGNORECASE):
found_line = True
elif found_line: # First line after list of file descriptions
i_col_def -= 1 # Set i_col_def to last description line
break
else:
raise ValueError('no line with "Byte-by-byte Description" found')
re_col_def = re.compile(r"""\s*
(?P<start> \d+ \s* -)? \s*
(?P<end> \d+) \s+
(?P<format> [\w.]+) \s+
(?P<units> \S+) \s+
(?P<name> \S+)
(\s+ (?P<descr> \S.*))?""",
re.VERBOSE)
cols = []
for line in itertools.islice(lines, i_col_def + 4, None):
if line.startswith(('------', '=======')):
break
match = re_col_def.match(line)
if match:
col = core.Column(name=match.group('name'))
col.start = int(re.sub(r'[-\s]', '',
match.group('start') or match.group('end'))) - 1
col.end = int(match.group('end'))
unit = match.group('units')
if unit == '---':
col.unit = None # "---" is the marker for no unit in CDS/MRT table
else:
col.unit = Unit(unit, format='cds', parse_strict='warn')
col.description = (match.group('descr') or '').strip()
col.raw_type = match.group('format')
col.type = self.get_col_type(col)
match = re.match(
r'(?P<limits>[\[\]] \S* [\[\]])?' # Matches limits specifier (eg [])
# that may or may not be present
r'\?' # Matches '?' directly
r'((?P<equal>=)(?P<nullval> \S*))?' # Matches to nullval if and only
# if '=' is present
r'(?P<order>[-+]?[=]?)' # Matches to order specifier:
# ('+', '-', '+=', '-=')
r'(\s* (?P<descriptiontext> \S.*))?', # Matches description text even
# even if no whitespace is
# present after '?'
col.description, re.VERBOSE)
if match:
col.description = (match.group('descriptiontext') or '').strip()
if issubclass(col.type, core.FloatType):
fillval = 'nan'
else:
fillval = '0'
if match.group('nullval') == '-':
col.null = '---'
# CDS/MRT tables can use -, --, ---, or ---- to mark missing values
# see https://github.com/astropy/astropy/issues/1335
for i in [1, 2, 3, 4]:
self.data.fill_values.append(('-' * i, fillval, col.name))
else:
col.null = match.group('nullval')
if (col.null is None):
col.null = ''
self.data.fill_values.append((col.null, fillval, col.name))
cols.append(col)
else: # could be a continuation of the previous col's description
if cols:
cols[-1].description += line.strip()
else:
raise ValueError(f'Line "{line}" not parsable as CDS header')
self.names = [x.name for x in cols]
self.cols = cols
class CdsData(core.BaseData):
"""CDS table data reader
"""
_subfmt = 'CDS'
splitter_class = fixedwidth.FixedWidthSplitter
def process_lines(self, lines):
"""Skip over CDS/MRT header by finding the last section delimiter"""
# If the header has a ReadMe and data has a filename
# then no need to skip, as the data lines do not have header
# info. The ``read`` method adds the table_name to the ``data``
# attribute.
if self.header.readme and self.table_name:
return lines
i_sections = [i for i, x in enumerate(lines)
if x.startswith(('------', '======='))]
if not i_sections:
raise core.InconsistentTableError(f'No {self._subfmt} section delimiter found')
return lines[i_sections[-1]+1:] # noqa
class Cds(core.BaseReader):
"""CDS format table.
See: http://vizier.u-strasbg.fr/doc/catstd.htx
Example::
Table: Table name here
= ==============================================================================
Catalog reference paper
Bibliography info here
================================================================================
ADC_Keywords: Keyword ; Another keyword ; etc
Description:
Catalog description here.
================================================================================
Byte-by-byte Description of file: datafile3.txt
--------------------------------------------------------------------------------
Bytes Format Units Label Explanations
--------------------------------------------------------------------------------
1- 3 I3 --- Index Running identification number
5- 6 I2 h RAh Hour of Right Ascension (J2000)
8- 9 I2 min RAm Minute of Right Ascension (J2000)
11- 15 F5.2 s RAs Second of Right Ascension (J2000)
--------------------------------------------------------------------------------
Note (1): A CDS file can contain sections with various metadata.
Notes can be multiple lines.
Note (2): Another note.
--------------------------------------------------------------------------------
1 03 28 39.09
2 04 18 24.11
**About parsing the CDS format**
The CDS format consists of a table description and the table data. These
can be in separate files as a ``ReadMe`` file plus data file(s), or
combined in a single file. Different subsections within the description
are separated by lines of dashes or equal signs ("------" or "======").
The table which specifies the column information must be preceded by a line
starting with "Byte-by-byte Description of file:".
In the case where the table description is combined with the data values,
the data must be in the last section and must be preceded by a section
delimiter line (dashes or equal signs only).
**Basic usage**
Use the ``ascii.read()`` function as normal, with an optional ``readme``
parameter indicating the CDS ReadMe file. If not supplied it is assumed that
the header information is at the top of the given table. Examples::
>>> from astropy.io import ascii
>>> table = ascii.read("data/cds.dat")
>>> table = ascii.read("data/vizier/table1.dat", readme="data/vizier/ReadMe")
>>> table = ascii.read("data/cds/multi/lhs2065.dat", readme="data/cds/multi/ReadMe")
>>> table = ascii.read("data/cds/glob/lmxbrefs.dat", readme="data/cds/glob/ReadMe")
The table name and the CDS ReadMe file can be entered as URLs. This can be used
to directly load tables from the Internet. For example, Vizier tables from the
CDS::
>>> table = ascii.read("ftp://cdsarc.u-strasbg.fr/pub/cats/VII/253/snrs.dat",
... readme="ftp://cdsarc.u-strasbg.fr/pub/cats/VII/253/ReadMe")
If the header (ReadMe) and data are stored in a single file and there
is content between the header and the data (for instance Notes), then the
parsing process may fail. In this case you can instruct the reader to
guess the actual start of the data by supplying ``data_start='guess'`` in the
call to the ``ascii.read()`` function. You should verify that the output
data table matches expectation based on the input CDS file.
**Using a reader object**
When ``Cds`` reader object is created with a ``readme`` parameter
passed to it at initialization, then when the ``read`` method is
executed with a table filename, the header information for the
specified table is taken from the ``readme`` file. An
``InconsistentTableError`` is raised if the ``readme`` file does not
have header information for the given table.
>>> readme = "data/vizier/ReadMe"
>>> r = ascii.get_reader(ascii.Cds, readme=readme)
>>> table = r.read("data/vizier/table1.dat")
>>> # table5.dat has the same ReadMe file
>>> table = r.read("data/vizier/table5.dat")
If no ``readme`` parameter is specified, then the header
information is assumed to be at the top of the given table.
>>> r = ascii.get_reader(ascii.Cds)
>>> table = r.read("data/cds.dat")
>>> #The following gives InconsistentTableError, since no
>>> #readme file was given and table1.dat does not have a header.
>>> table = r.read("data/vizier/table1.dat")
Traceback (most recent call last):
...
InconsistentTableError: No CDS section delimiter found
Caveats:
* The Units and Explanations are available in the column ``unit`` and
``description`` attributes, respectively.
* The other metadata defined by this format is not available in the output table.
"""
_format_name = 'cds'
_io_registry_format_aliases = ['cds']
_io_registry_can_write = False
_description = 'CDS format table'
data_class = CdsData
header_class = CdsHeader
def __init__(self, readme=None):
super().__init__()
self.header.readme = readme
def write(self, table=None):
"""Not available for the CDS class (raises NotImplementedError)"""
raise NotImplementedError
def read(self, table):
# If the read kwarg `data_start` is 'guess' then the table may have extraneous
# lines between the end of the header and the beginning of data.
if self.data.start_line == 'guess':
# Replicate the first part of BaseReader.read up to the point where
# the table lines are initially read in.
with suppress(TypeError):
# For strings only
if os.linesep not in table + '':
self.data.table_name = os.path.basename(table)
self.data.header = self.header
self.header.data = self.data
# Get a list of the lines (rows) in the table
lines = self.inputter.get_lines(table)
# Now try increasing data.start_line by one until the table reads successfully.
# For efficiency use the in-memory list of lines instead of `table`, which
# could be a file.
for data_start in range(len(lines)):
self.data.start_line = data_start
with suppress(Exception):
table = super().read(lines)
return table
else:
return super().read(table)
| |
# setup.py
# Copyright (c) 2013-2020 Pablo Acosta-Serafini
# See LICENSE for details
# pylint: disable=C0111,E0401,E0601,E1111,R0904,W0122,W0201,W0621
# Taken in large part from:
# http://www.jeffknupp.com/blog/2013/08/16/
# open-sourcing-a-python-project-the-right-way/
# With additional hints from:
# http://oddbird.net/set-your-code-free-preso/
# The function to get the version number from __init__.py is from:
# https://python-packaging-user-guide.readthedocs.org/
# en/latest/single_source_version/
# Standard library imports
from __future__ import print_function
import io
import glob
import os
import sys
# PyPI imports
from setuptools import setup
from setuptools.command.test import test as TestCommand
# Intra-package imports
from pypkg.functions import (
get_entry_points,
get_pkg_data_files,
get_pkg_submodules,
load_requirements,
python_version,
)
###
# Supported interpreter check
###
# When installing from tarball/zip/wheel, path is temporary one and setup.py
# is not in a directory where its name is the package name, have to find
# package name by finding location of pkgdata file
STEM = "pkgdata"
FOUND = False
START_DIR = os.path.dirname(os.path.abspath(__file__))
for (DIRPATH, _, FNAMES) in os.walk(START_DIR):
# Ignore .tox, .git and other directories
if DIRPATH[len(START_DIR) + 1 :].startswith("."):
continue
#
for FNAME in FNAMES:
if os.path.splitext(os.path.basename(FNAME))[0] == STEM:
FNAME = os.path.join(DIRPATH, FNAME)
sys.path.append(DIRPATH)
import pkgdata
PKG_NAME = os.path.basename(
os.path.dirname(os.path.abspath(sys.modules["pkgdata"].__file__))
)
FOUND = True
break
if not FOUND:
raise RuntimeError("Supported Python interpreter versions cold not be found")
PYTHON_VER = python_version("{0:0x}".format(sys.hexversion & 0xFFFF0000)[:-4])
SUPPORTED_INTERPS = sorted(pkgdata.SUPPORTED_INTERPS)
if PYTHON_VER not in SUPPORTED_INTERPS:
sys.exit("Supported interpreter versions: {0}".format(", ".join(SUPPORTED_INTERPS)))
###
# Functions
###
def get_short_desc(long_desc):
"""Get first sentence of first paragraph of long description."""
found = False
olines = []
for line in [item.rstrip() for item in long_desc.split("\n")]:
if found and (((not line) and (not olines)) or (line and olines)):
olines.append(line)
elif found and olines and (not line):
return (" ".join(olines).split(".")[0]).strip()
found = line == ".. [[[end]]]" if not found else found
return ""
def read(*filenames, **kwargs):
"""Read plain text file(s)."""
encoding = kwargs.get("encoding", "utf-8")
sep = kwargs.get("sep", "\n")
buf = []
for filename in filenames:
with io.open(filename, encoding=encoding) as fobj:
buf.append(fobj.read())
return sep.join(buf)
###
# Global variables
###
REPO = "http://github.com/pmacosta/{pkg_name}/".format(pkg_name=PKG_NAME)
AUTHOR = "Pablo Acosta-Serafini"
AUTHOR_EMAIL = "pmasdev@gmail.com"
LICENSE = "MIT"
PKG_DIR = os.path.abspath(os.path.dirname(__file__))
LONG_DESCRIPTION = read(
os.path.join(PKG_DIR, "README.rst"), os.path.join(PKG_DIR, "CHANGELOG.rst")
)
SHORT_DESC = get_short_desc(LONG_DESCRIPTION)
# Actual directory is os.join(sys.prefix, 'share', PKG_NAME)
SHARE_DIR = os.path.join("share", PKG_NAME)
INSTALL_REQUIRES = load_requirements(PKG_DIR, PYTHON_VER, "source")
TESTING_REQUIRES = load_requirements(PKG_DIR, PYTHON_VER, "testing")
if os.environ.get("MERGE_REQUIREMENTS", False):
INSTALL_REQUIRES = INSTALL_REQUIRES + TESTING_REQUIRES
try:
DATA_FILES = get_pkg_data_files(SHARE_DIR)
except IOError:
print("PKG_DIR: {0}".format(PKG_DIR))
print("Contents:")
print(glob.glob(os.path.join(PKG_DIR, "*")))
print("PKG_DIR/data")
print("Contents:")
print(glob.glob(os.path.join(PKG_DIR, "data", "*")))
raise
###
# Extract version (from coveragepy)
###
VERSION_PY = os.path.join(PKG_DIR, PKG_NAME, "pkgdata.py")
with open(VERSION_PY) as fobj:
__version__ = VERSION_INFO = ""
# Execute the code in pkgdata.py.
exec(compile(fobj.read(), VERSION_PY, "exec"))
if VERSION_INFO[3] == "alpha":
DEVSTAT = "3 - Alpha"
elif VERSION_INFO[3] in ["beta", "candidate"]:
DEVSTAT = "4 - Beta"
else:
assert VERSION_INFO[3] == "final"
DEVSTAT = "5 - Production/Stable"
###
# Classes
###
class Tox(TestCommand): # noqa
user_options = [("tox-args=", "a", "Arguments to pass to tox")]
def initialize_options(self): # noqa
TestCommand.initialize_options(self)
self.tox_args = None
def finalize_options(self): # noqa
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self): # noqa
# pylint: disable=C0415
import shlex
import tox
args = self.tox_args
if args:
args = shlex.split(self.tox_args)
errno = tox.cmdline(args=args)
sys.exit(errno)
###
# Processing
###
# package_data is used only for binary packages, i.e.
# $ python setup.py bdist ...
# but NOT when building source packages, i.e.
# $ python setup.py sdist ...
setup(
name=PKG_NAME,
version=__version__,
url=REPO,
license=LICENSE,
author=AUTHOR,
tests_require=TESTING_REQUIRES,
install_requires=INSTALL_REQUIRES,
cmdclass={"tests": Tox},
author_email=AUTHOR_EMAIL,
description=SHORT_DESC,
long_description=LONG_DESCRIPTION,
packages=[PKG_NAME] + [PKG_NAME + "." + item for item in get_pkg_submodules()],
entry_points=get_entry_points(),
data_files=DATA_FILES,
zip_safe=False,
platforms="any",
classifiers=[
"Programming Language :: Python :: {0}".format(pyver)
for pyver in SUPPORTED_INTERPS
]
+ [
"Development Status :: " + DEVSTAT,
"Natural Language :: English",
"Environment :: Console",
"Intended Audience :: Developers",
"License :: OSI Approved :: " + LICENSE + " License",
"Operating System :: OS Independent",
"Topic :: Software Development :: Libraries :: Python Modules",
],
)
| |
# Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import re
import time
import netaddr
import six
from six import moves
from rally.common.i18n import _
from rally.common import log as logging
from rally.common import utils
from rally.deployment.serverprovider import provider
from rally import exceptions
LOG = logging.getLogger(__name__)
INET_ADDR_RE = re.compile(r" *inet ((\d+\.){3}\d+)\/\d+ .*")
IPT_PORT_TEMPLATE = ("iptables -t nat -{action} PREROUTING -d {host_ip}"
" -p tcp --syn --dport {port}"
" -j DNAT --to-destination {ip}:22")
def _get_script(filename):
path = os.path.abspath(os.path.join(os.path.dirname(__file__),
"lxc", filename))
return open(path, "rb")
def _get_script_from_template(template_filename, **kwargs):
template = _get_script(template_filename).read()
return moves.StringIO(template.format(**kwargs))
class LxcHost(object):
"""Represent lxc enabled host."""
def __init__(self, server, config):
"""Initialize LxcHost object.
:param server: Server object
:param config: dictionary with following key/values:
network ipv4 network for containers
lxc_bridge bridge interface name (default lxcbr0)
tunnel_to ip address for make tunnel to
forward_ssh use ssh port forwarding (do not use for
controller nodes)
"""
self.config = config
if "network" in config:
self.network = netaddr.IPNetwork(config["network"])
else:
self.network = None
self.server = server
self.containers = []
self.path = "/var/lib/lxc/"
self._port_cache = {}
def _get_updated_server(self, **kwargs):
credentials = self.server.get_credentials()
credentials.update(kwargs)
return provider.Server.from_credentials(credentials)
@property
def backingstore(self):
if not hasattr(self, "_backingstore"):
code = self.server.ssh.execute("df -t btrfs %s" % self.path)[0]
self._backingstore = "" if code else "btrfs"
return self._backingstore
def prepare(self):
if self.network:
dhcp_start = str(self.network.network + 2)
dhcp_end = str(self.network.network + self.network.size - 2)
dhcp_range = ",".join([dhcp_start, dhcp_end])
values = {
"USE_LXC_BRIDGE": "true",
"LXC_BRIDGE": self.config.get("lxc_bridge", "lxcbr0"),
"LXC_ADDR": self.network.network + 1,
"LXC_NETMASK": self.network.netmask,
"LXC_NETWORK": self.network,
"LXC_DHCP_RANGE": dhcp_range,
"LXC_DHCP_MAX": self.network.size - 3,
}
config = moves.StringIO()
for name, value in six.iteritems(values):
config.write("%(name)s=\"%(value)s\"\n" % {"name": name,
"value": value})
config.seek(0)
self.server.ssh.run("cat > /tmp/.lxc_default", stdin=config)
self.server.ssh.run("/bin/sh", stdin=_get_script("lxc-install.sh"))
self.create_local_tunnels()
self.create_remote_tunnels()
def create_local_tunnels(self):
"""Create tunel on lxc host side."""
for tunnel_to in self.config["tunnel_to"]:
script = _get_script_from_template("tunnel-local.sh",
net=self.network,
local=self.server.host,
remote=tunnel_to)
self.server.ssh.run("/bin/sh", stdin=script)
def create_remote_tunnels(self):
"""Create tunel on remote side."""
for tunnel_to in self.config["tunnel_to"]:
script = _get_script_from_template("tunnel-remote.sh",
net=self.network,
local=tunnel_to,
remote=self.server.host)
server = self._get_updated_server(host=tunnel_to)
server.ssh.run("/bin/sh", stdin=script)
def delete_tunnels(self):
for tunnel_to in self.config["tunnel_to"]:
remote_server = self._get_updated_server(host=tunnel_to)
remote_server.ssh.execute("ip tun del t%s" % self.network.ip)
self.server.ssh.execute("ip tun del t%s" % tunnel_to)
def get_ip(self, name):
"""Get container's ip by name."""
cmd = "lxc-attach -n %s ip addr list dev eth0" % name
for attempt in range(1, 16):
code, stdout = self.server.ssh.execute(cmd)[:2]
if code:
continue
for line in stdout.splitlines():
m = INET_ADDR_RE.match(line)
if m:
return m.group(1)
time.sleep(attempt)
msg = _("Timeout waiting for ip address of container \"%s\"") % name
raise exceptions.TimeoutException(msg)
def get_port(self, ip):
"""Get forwarded ssh port for instance ip.
Ssh port forwarding is used for containers access from outside.
Any container is accessible by host's ip and forwarded port. E.g:
6.6.6.6:10023 -> 10.1.1.11:22
6.6.6.6:10024 -> 10.1.1.12:22
6.6.6.6:10025 -> 10.1.1.13:22
where 6.6.6.6 is host's ip.
Ip->port association is stored in self._port_cache to reduce number
of iptables calls.
"""
if not self._port_cache:
self._port_cache = {}
port_re = re.compile(r".+ tcp dpt:(\d+).*to:([\d\.]+)\:22")
cmd = "iptables -n -t nat -L PREROUTING"
code, out, err = self.server.ssh.execute(cmd)
for l in out:
m = port_re.match(l)
if m:
self._port_cache[m.group(2)] = int(m.group(1))
port = self._port_cache.get(ip)
if port is None:
if self._port_cache:
port = max(self._port_cache.values()) + 1
else:
port = 1222
self._port_cache[ip] = port
cmd = IPT_PORT_TEMPLATE.format(host_ip=self.server.host, ip=ip,
port=port, action="I")
self.server.ssh.run(cmd)
return port
def create_container(self, name, distribution, release=None):
cmd = ["lxc-create"]
if self.backingstore == "btrfs":
cmd += ["-B", "btrfs"]
cmd += ["-n", name, "-t", distribution]
if release:
if distribution == "ubuntu":
cmd += ["--", "-r", release]
elif distribution == "debian":
cmd = ["SUITE=%s" % release] + cmd
self.server.ssh.run(" ".join(cmd))
self.configure_container(name)
self.containers.append(name)
def create_clone(self, name, source):
cmd = ["lxc-clone"]
if self.backingstore == "btrfs":
cmd.append("--snapshot")
cmd.extend(["-o", source, "-n", name])
self.server.ssh.execute(" ".join(cmd))
self.configure_container(name)
self.containers.append(name)
def configure_container(self, name):
path = os.path.join(self.path, name, "rootfs")
conf_script = _get_script("configure_container.sh")
self.server.ssh.run("/bin/sh -e -s %s" % path, stdin=conf_script)
def start_containers(self):
for name in self.containers:
self.server.ssh.run("lxc-start -d -n %s" % name)
def stop_containers(self):
for name in self.containers:
self.server.ssh.run("lxc-stop -n %s" % name)
def destroy_ports(self, ipports):
script = ""
for ip, port in ipports:
cmd = IPT_PORT_TEMPLATE.format(action="D", port=port, ip=ip,
host_ip=self.server.host)
script += cmd + "\n"
self.server.ssh.run("/bin/sh -e", stdin=script)
def destroy_containers(self):
for name in self.containers:
self.server.ssh.run("lxc-stop -n %s" % name)
self.server.ssh.run("lxc-destroy -n %s" % name)
def get_server_object(self, name, wait=True):
"""Create Server object for container."""
ip = self.get_ip(name)
if self.config.get("forward_ssh", False):
server = self._get_updated_server(port=self.get_port(ip))
else:
server = self._get_updated_server(host=ip)
if wait:
server.ssh.wait(timeout=300)
return server
def get_server_objects(self, wait=True):
"""Generate Server objects from all containers."""
for name in self.containers:
yield self.get_server_object(name, wait)
@provider.configure(name="LxcProvider")
class LxcProvider(provider.ProviderFactory):
"""Provide lxc container(s) on given host.
Sample configuration:
{
"type": "LxcProvider",
"distribution": "ubuntu",
"start_lxc_network": "10.1.1.0/24",
"containers_per_host": 32,
"tunnel_to": ["10.10.10.10"],
"forward_ssh": false,
"container_name_prefix": "rally-multinode-02",
"host_provider": {
"type": "ExistingServers",
"credentials": [{"user": "root", "host": "host.net"}]
}
}
"""
CONFIG_SCHEMA = {
"type": "object",
"properties": {
"type": {"type": "string"},
"distribution": {"type": "string"},
"release": {"type": "string"},
"start_lxc_network": {"type": "string",
"pattern": "^(\d+\.){3}\d+\/\d+$"},
"containers_per_host": {"type": "integer"},
"forward_ssh": {"type": "boolean"},
"tunnel_to": {"type": "array",
"elements": {"type": "string",
"pattern": "^(\d+\.){3}\d+$"}},
"container_name_prefix": {"type": "string"},
"host_provider": {"type": "object",
"properties": {"type": {"type": "string"}}},
},
"required": ["type", "containers_per_host",
"container_name_prefix", "host_provider"],
}
def validate(self):
super(LxcProvider, self).validate()
if "start_lxc_network" not in self.config:
return
lxc_net = netaddr.IPNetwork(self.config["start_lxc_network"])
num_containers = self.config["containers_per_host"]
if lxc_net.size - 3 < num_containers:
message = _("Network size is not enough for %d hosts.")
raise exceptions.InvalidConfigException(message % num_containers)
def get_host_provider(self):
return provider.ProviderFactory.get_provider(
self.config["host_provider"], self.deployment)
@utils.log_deploy_wrapper(LOG.info, _("Create containers on host"))
def create_servers(self):
host_provider = self.get_host_provider()
name_prefix = self.config["container_name_prefix"]
hosts = []
if "start_lxc_network" in self.config:
network = netaddr.IPNetwork(self.config["start_lxc_network"])
else:
network = None
distribution = self.config.get("distribution", "ubuntu")
release = self.config.get("release", None)
for server in host_provider.create_servers():
config = {"tunnel_to": self.config.get("tunnel_to", []),
"forward_ssh": self.config.get("forward_ssh", False)}
if network:
config["network"] = str(network)
host = LxcHost(server, config)
host.prepare()
ip = str(network.ip).replace(".", "-") if network else "0"
first_name = "%s-000-%s" % (name_prefix, ip)
host.create_container(first_name, distribution, release)
for i in range(1, self.config.get("containers_per_host", 1)):
name = "%s-%03d-%s" % (name_prefix, i, ip)
host.create_clone(name, first_name)
host.start_containers()
hosts.append(host)
if network:
network += 1
servers = []
for host in hosts:
for server in host.get_server_objects():
servers.append(server)
info = {"host": host.server.get_credentials(),
"config": host.config,
"forwarded_ports": host._port_cache.items(),
"container_names": host.containers}
self.resources.create(info)
return servers
@utils.log_deploy_wrapper(LOG.info, _("Destroy host(s)"))
def destroy_servers(self):
for resource in self.resources.get_all():
server = provider.Server.from_credentials(resource["info"]["host"])
lxc_host = LxcHost(server, resource["info"]["config"])
lxc_host.containers = resource["info"]["container_names"]
lxc_host.destroy_containers()
lxc_host.destroy_ports(resource["info"]["forwarded_ports"])
lxc_host.delete_tunnels()
self.resources.delete(resource["id"])
host_provider = self.get_host_provider()
host_provider.destroy_servers()
| |
import attr
from plumbum import local
import benchbuild as bb
from benchbuild.source import HTTP
from benchbuild.utils.cmd import sh, tar
@attr.s
class RodiniaGroup(bb.Project):
"""Generic handling of Rodinia benchmarks."""
DOMAIN = 'rodinia'
GROUP = 'rodinia'
SOURCE = [
HTTP(remote={
'3.1': 'http://www.cs.virginia.edu/'
'~kw5na/lava/Rodinia/Packages/Current/3.1/'
'rodinia_3.1.tar.bz2'
},
local='rodinia.tar.bz2')
]
CONFIG = {}
config = attr.ib(
default=attr.Factory(lambda self: type(self).CONFIG, takes_self=True))
def compile(self):
tar('xf', 'rodinia.tar.bz2')
rodinia_version = self.version_of('rodinia.tar.bz2')
unpack_dir = local.path(f'rodinia_{rodinia_version}')
c_compiler = bb.compiler.cc(self)
cxx_compiler = bb.compiler.cxx(self)
config_dir = self.config['dir']
config_src = self.config['src']
config_flags = self.config['flags']
with local.cwd(unpack_dir / config_dir):
for outfile, srcfiles in config_src.items():
cls = type(self)
_cc = cls.select_compiler(c_compiler, cxx_compiler)
if "flags" in self.config:
_cc = _cc[config_flags]
_cc = _cc[srcfiles]
_cc = _cc["-o", outfile]
_cc = bb.watch(_cc)
_cc()
@staticmethod
def select_compiler(c_compiler, _):
return c_compiler
def run_tests(self):
rodinia_version = self.version_of('rodinia.tar.bz2')
unpack_dir = local.path(f'rodinia_{rodinia_version}')
in_src_dir = unpack_dir / self.config['dir']
for outfile in self.config['src']:
bb.wrap(in_src_dir / outfile, self)
with local.cwd(in_src_dir):
sh_ = bb.watch(sh)
sh_('./run')
class Backprop(RodiniaGroup):
NAME = 'backprop'
CONFIG = {
"dir": "openmp/backprop",
"src": {
NAME: [
"backprop_kernel.c", "imagenet.c", "facetrain.c", "backprop.c"
]
},
"flags": ["-fopenmp", "-lm"]
}
class BFS(RodiniaGroup):
NAME = 'bfs'
CONFIG = {
"dir": "openmp/bfs",
"src": {
NAME: ["bfs.cpp"]
},
"flags": ["-fopenmp", "-UOPEN"]
}
@staticmethod
def select_compiler(_, cc):
return cc
class BPlusTree(RodiniaGroup):
NAME = 'b+tree'
CONFIG = {
"dir": "openmp/b+tree",
"src": {
"b+tree.out": [
"./main.c", "./kernel/kernel_cpu.c", "./kernel/kernel_cpu_2.c",
"./util/timer/timer.c", "./util/num/num.c"
]
},
"flags": ["-fopenmp", "-lm"]
}
class CFD(RodiniaGroup):
NAME = 'cfd'
CONFIG = {"dir": "openmp/cfd", "src": {"euler3d_cpu": ["euler3d_cpu.cpp"]}}
@staticmethod
def select_compiler(_, cc):
return cc
class HeartWall(RodiniaGroup):
NAME = 'heartwall'
CONFIG = {
"dir": "openmp/heartwall",
"src": {
NAME: ["./AVI/avimod.c", "./AVI/avilib.c", "./main.c"]
},
"flags": ["-I./AVI", "-fopenmp", "-lm"]
}
class Hotspot(RodiniaGroup):
NAME = 'hotspot'
CONFIG = {
"dir": "openmp/hotspot",
"src": {
NAME: ["hotspot_openmp.cpp"]
},
"flags": ["-fopenmp"]
}
@staticmethod
def select_compiler(_, cc):
return cc
class Hotspot3D(RodiniaGroup):
NAME = 'hotspot3D'
CONFIG = {
"dir": "openmp/hotspot3D",
"src": {
"3D": ["./3D.c"]
},
"flags": ["-fopenmp", "-lm"]
}
class KMeans(RodiniaGroup):
NAME = 'kmeans'
CONFIG = {
"dir": "openmp/kmeans",
"src": {
"./kmeans_serial/kmeans": [
"./kmeans_serial/kmeans_clustering.c",
"./kmeans_serial/kmeans.c", "./kmeans_serial/getopt.c",
"./kmeans_serial/cluster.c"
],
"./kmeans_openmp/kmeans": [
"./kmeans_openmp/kmeans_clustering.c",
"./kmeans_openmp/kmeans.c", "./kmeans_openmp/getopt.c",
"./kmeans_openmp/cluster.c"
]
},
"flags": ["-lm", "-fopenmp"]
}
class LavaMD(RodiniaGroup):
NAME = 'lavaMD'
CONFIG = {
"dir": "openmp/lavaMD",
"src": {
NAME: [
"./main.c", "./util/timer/timer.c", "./util/num/num.c",
"./kernel/kernel_cpu.c"
]
},
"flags": ["-lm", "-fopenmp"]
}
class Leukocyte(RodiniaGroup):
NAME = 'leukocyte'
CONFIG = {
"dir":
"openmp/leukocyte",
"src": {
NAME: [
"./meschach_lib/memstat.c", "./meschach_lib/meminfo.c",
"./meschach_lib/version.c", "./meschach_lib/ivecop.c",
"./meschach_lib/matlab.c", "./meschach_lib/machine.c",
"./meschach_lib/otherio.c", "./meschach_lib/init.c",
"./meschach_lib/submat.c", "./meschach_lib/pxop.c",
"./meschach_lib/matop.c", "./meschach_lib/vecop.c",
"./meschach_lib/memory.c", "./meschach_lib/matrixio.c",
"./meschach_lib/err.c", "./meschach_lib/copy.c",
"./meschach_lib/bdfactor.c", "./meschach_lib/mfunc.c",
"./meschach_lib/fft.c", "./meschach_lib/svd.c",
"./meschach_lib/schur.c", "./meschach_lib/symmeig.c",
"./meschach_lib/hessen.c", "./meschach_lib/norm.c",
"./meschach_lib/update.c", "./meschach_lib/givens.c",
"./meschach_lib/hsehldr.c", "./meschach_lib/solve.c",
"./meschach_lib/qrfactor.c", "./meschach_lib/chfactor.c",
"./meschach_lib/bkpfacto.c", "./meschach_lib/lufactor.c",
"./meschach_lib/iternsym.c", "./meschach_lib/itersym.c",
"./meschach_lib/iter0.c", "./meschach_lib/spswap.c",
"./meschach_lib/spbkp.c", "./meschach_lib/splufctr.c",
"./meschach_lib/spchfctr.c", "./meschach_lib/sparseio.c",
"./meschach_lib/sprow.c", "./meschach_lib/sparse.c",
"./meschach_lib/zfunc.c", "./meschach_lib/znorm.c",
"./meschach_lib/zmatop.c", "./meschach_lib/zvecop.c",
"./meschach_lib/zmemory.c", "./meschach_lib/zmatio.c",
"./meschach_lib/zcopy.c", "./meschach_lib/zmachine.c",
"./meschach_lib/zschur.c", "./meschach_lib/zhessen.c",
"./meschach_lib/zgivens.c", "./meschach_lib/zqrfctr.c",
"./meschach_lib/zhsehldr.c", "./meschach_lib/zmatlab.c",
"./meschach_lib/zsolve.c", "./meschach_lib/zlufctr.c",
"./OpenMP/detect_main.c", "./OpenMP/misc_math.c",
"./OpenMP/track_ellipse.c", "./OpenMP/find_ellipse.c",
"./OpenMP/avilib.c"
]
},
"flags": [
"-DSPARSE", "-DCOMPLEX", "-DREAL_FLT", "-DREAL_DBL",
"-I./meschach_lib", "-lm", "-lpthread", "-fopenmp"
]
}
class LUD(RodiniaGroup):
NAME = 'lud'
CONFIG = {
"dir": "openmp/lud",
"src": {
"./omp/lud_omp": [
"./common/common.c", "./omp/lud_omp.c", "./omp/lud.c"
]
},
"flags": ["-I./common", "-lm", "-fopenmp"]
}
class Myocyte(RodiniaGroup):
NAME = 'myocyte'
CONFIG = {
"dir": "openmp/myocyte",
"src": {
"./myocyte.out": ["main.c"]
},
"flags": ["-lm", "-fopenmp"]
}
class NN(RodiniaGroup):
NAME = 'nn'
CONFIG = {
"dir": "openmp/nn",
"src": {
NAME: ["./nn_openmp.c"]
},
"flags": ["-lm", "-fopenmp"]
}
class NW(RodiniaGroup):
NAME = 'nw'
CONFIG = {
"dir": "openmp/nw",
"src": {
"needle": ["./needle.cpp"]
},
"flags": ["-lm", "-fopenmp"]
}
@staticmethod
def select_compiler(_, cc):
return cc
class ParticleFilter(RodiniaGroup):
NAME = 'particlefilter'
CONFIG = {
"dir": "openmp/particlefilter",
"src": {
"particle_filter": ["./ex_particle_OPENMP_seq.c"]
},
"flags": ["-lm", "-fopenmp"]
}
class PathFinder(RodiniaGroup):
NAME = 'pathfinder'
CONFIG = {
"dir": "openmp/pathfinder",
"src": {
"pathfinder": ["./pathfinder.cpp"]
},
"flags": ["-fopenmp"]
}
@staticmethod
def select_compiler(_, cc):
return cc
class SRAD1(RodiniaGroup):
NAME = 'srad-1'
CONFIG = {
"dir": "openmp/srad/srad_v1",
"src": {
"srad": ["./main.c"]
},
"flags": ["-I.", "-lm", "-fopenmp"]
}
class SRAD2(RodiniaGroup):
NAME = 'srad-2'
CONFIG = {
"dir": "openmp/srad/srad_v2",
"src": {
"srad": ["./srad.cpp"]
},
"flags": ["-lm", "-fopenmp"]
}
@staticmethod
def select_compiler(_, cc):
return cc
class StreamCluster(RodiniaGroup):
NAME = 'streamcluster'
CONFIG = {
"dir": "openmp/streamcluster",
"src": {
"./sc_omp": ["./streamcluster_omp.cpp"]
},
"flags": ["-lpthread", "-fopenmp"]
}
@staticmethod
def select_compiler(_, cc):
return cc
| |
import sys, os
import types
from . import model, ffiplatform
class VGenericEngine(object):
_class_key = 'g'
_gen_python_module = False
def __init__(self, verifier):
self.verifier = verifier
self.ffi = verifier.ffi
self.export_symbols = []
self._struct_pending_verification = {}
def patch_extension_kwds(self, kwds):
# add 'export_symbols' to the dictionary. Note that we add the
# list before filling it. When we fill it, it will thus also show
# up in kwds['export_symbols'].
kwds.setdefault('export_symbols', self.export_symbols)
def find_module(self, module_name, path, so_suffixes):
for so_suffix in so_suffixes:
basename = module_name + so_suffix
if path is None:
path = sys.path
for dirname in path:
filename = os.path.join(dirname, basename)
if os.path.isfile(filename):
return filename
def collect_types(self):
pass # not needed in the generic engine
def _prnt(self, what=''):
self._f.write(what + '\n')
def write_source_to_f(self):
prnt = self._prnt
# first paste some standard set of lines that are mostly '#include'
prnt(cffimod_header)
# then paste the C source given by the user, verbatim.
prnt(self.verifier.preamble)
#
# call generate_gen_xxx_decl(), for every xxx found from
# ffi._parser._declarations. This generates all the functions.
self._generate('decl')
#
# on Windows, distutils insists on putting init_cffi_xyz in
# 'export_symbols', so instead of fighting it, just give up and
# give it one
if sys.platform == 'win32':
if sys.version_info >= (3,):
prefix = 'PyInit_'
else:
prefix = 'init'
modname = self.verifier.get_module_name()
prnt("void %s%s(void) { }\n" % (prefix, modname))
def load_library(self):
# import it with the CFFI backend
backend = self.ffi._backend
# needs to make a path that contains '/', on Posix
filename = os.path.join(os.curdir, self.verifier.modulefilename)
module = backend.load_library(filename)
#
# call loading_gen_struct() to get the struct layout inferred by
# the C compiler
self._load(module, 'loading')
# build the FFILibrary class and instance, this is a module subclass
# because modules are expected to have usually-constant-attributes and
# in PyPy this means the JIT is able to treat attributes as constant,
# which we want.
class FFILibrary(types.ModuleType):
_cffi_generic_module = module
_cffi_ffi = self.ffi
_cffi_dir = []
def __dir__(self):
return FFILibrary._cffi_dir
library = FFILibrary("")
#
# finally, call the loaded_gen_xxx() functions. This will set
# up the 'library' object.
self._load(module, 'loaded', library=library)
return library
def _get_declarations(self):
return sorted(self.ffi._parser._declarations.items())
def _generate(self, step_name):
for name, tp in self._get_declarations():
kind, realname = name.split(' ', 1)
try:
method = getattr(self, '_generate_gen_%s_%s' % (kind,
step_name))
except AttributeError:
raise ffiplatform.VerificationError(
"not implemented in verify(): %r" % name)
try:
method(tp, realname)
except Exception as e:
model.attach_exception_info(e, name)
raise
def _load(self, module, step_name, **kwds):
for name, tp in self._get_declarations():
kind, realname = name.split(' ', 1)
method = getattr(self, '_%s_gen_%s' % (step_name, kind))
try:
method(tp, realname, module, **kwds)
except Exception as e:
model.attach_exception_info(e, name)
raise
def _generate_nothing(self, tp, name):
pass
def _loaded_noop(self, tp, name, module, **kwds):
pass
# ----------
# typedefs: generates no code so far
_generate_gen_typedef_decl = _generate_nothing
_loading_gen_typedef = _loaded_noop
_loaded_gen_typedef = _loaded_noop
# ----------
# function declarations
def _generate_gen_function_decl(self, tp, name):
assert isinstance(tp, model.FunctionPtrType)
if tp.ellipsis:
# cannot support vararg functions better than this: check for its
# exact type (including the fixed arguments), and build it as a
# constant function pointer (no _cffi_f_%s wrapper)
self._generate_gen_const(False, name, tp)
return
prnt = self._prnt
numargs = len(tp.args)
argnames = []
for i, type in enumerate(tp.args):
indirection = ''
if isinstance(type, model.StructOrUnion):
indirection = '*'
argnames.append('%sx%d' % (indirection, i))
context = 'argument of %s' % name
arglist = [type.get_c_name(' %s' % arg, context)
for type, arg in zip(tp.args, argnames)]
arglist = ', '.join(arglist) or 'void'
wrappername = '_cffi_f_%s' % name
self.export_symbols.append(wrappername)
funcdecl = ' %s(%s)' % (wrappername, arglist)
context = 'result of %s' % name
prnt(tp.result.get_c_name(funcdecl, context))
prnt('{')
#
if not isinstance(tp.result, model.VoidType):
result_code = 'return '
else:
result_code = ''
prnt(' %s%s(%s);' % (result_code, name, ', '.join(argnames)))
prnt('}')
prnt()
_loading_gen_function = _loaded_noop
def _loaded_gen_function(self, tp, name, module, library):
assert isinstance(tp, model.FunctionPtrType)
if tp.ellipsis:
newfunction = self._load_constant(False, tp, name, module)
else:
indirections = []
base_tp = tp
if any(isinstance(typ, model.StructOrUnion) for typ in tp.args):
indirect_args = []
for i, typ in enumerate(tp.args):
if isinstance(typ, model.StructOrUnion):
typ = model.PointerType(typ)
indirections.append((i, typ))
indirect_args.append(typ)
tp = model.FunctionPtrType(tuple(indirect_args),
tp.result, tp.ellipsis)
BFunc = self.ffi._get_cached_btype(tp)
wrappername = '_cffi_f_%s' % name
newfunction = module.load_function(BFunc, wrappername)
for i, typ in indirections:
newfunction = self._make_struct_wrapper(newfunction, i, typ,
base_tp)
setattr(library, name, newfunction)
type(library)._cffi_dir.append(name)
def _make_struct_wrapper(self, oldfunc, i, tp, base_tp):
backend = self.ffi._backend
BType = self.ffi._get_cached_btype(tp)
def newfunc(*args):
args = args[:i] + (backend.newp(BType, args[i]),) + args[i+1:]
return oldfunc(*args)
newfunc._cffi_base_type = base_tp
return newfunc
# ----------
# named structs
def _generate_gen_struct_decl(self, tp, name):
assert name == tp.name
self._generate_struct_or_union_decl(tp, 'struct', name)
def _loading_gen_struct(self, tp, name, module):
self._loading_struct_or_union(tp, 'struct', name, module)
def _loaded_gen_struct(self, tp, name, module, **kwds):
self._loaded_struct_or_union(tp)
def _generate_gen_union_decl(self, tp, name):
assert name == tp.name
self._generate_struct_or_union_decl(tp, 'union', name)
def _loading_gen_union(self, tp, name, module):
self._loading_struct_or_union(tp, 'union', name, module)
def _loaded_gen_union(self, tp, name, module, **kwds):
self._loaded_struct_or_union(tp)
def _generate_struct_or_union_decl(self, tp, prefix, name):
if tp.fldnames is None:
return # nothing to do with opaque structs
checkfuncname = '_cffi_check_%s_%s' % (prefix, name)
layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name)
cname = ('%s %s' % (prefix, name)).strip()
#
prnt = self._prnt
prnt('static void %s(%s *p)' % (checkfuncname, cname))
prnt('{')
prnt(' /* only to generate compile-time warnings or errors */')
for fname, ftype, fbitsize in tp.enumfields():
if (isinstance(ftype, model.PrimitiveType)
and ftype.is_integer_type()) or fbitsize >= 0:
# accept all integers, but complain on float or double
prnt(' (void)((p->%s) << 1);' % fname)
else:
# only accept exactly the type declared.
try:
prnt(' { %s = &p->%s; (void)tmp; }' % (
ftype.get_c_name('*tmp', 'field %r'%fname), fname))
except ffiplatform.VerificationError as e:
prnt(' /* %s */' % str(e)) # cannot verify it, ignore
prnt('}')
self.export_symbols.append(layoutfuncname)
prnt('intptr_t %s(intptr_t i)' % (layoutfuncname,))
prnt('{')
prnt(' struct _cffi_aligncheck { char x; %s y; };' % cname)
prnt(' static intptr_t nums[] = {')
prnt(' sizeof(%s),' % cname)
prnt(' offsetof(struct _cffi_aligncheck, y),')
for fname, ftype, fbitsize in tp.enumfields():
if fbitsize >= 0:
continue # xxx ignore fbitsize for now
prnt(' offsetof(%s, %s),' % (cname, fname))
if isinstance(ftype, model.ArrayType) and ftype.length is None:
prnt(' 0, /* %s */' % ftype._get_c_name())
else:
prnt(' sizeof(((%s *)0)->%s),' % (cname, fname))
prnt(' -1')
prnt(' };')
prnt(' return nums[i];')
prnt(' /* the next line is not executed, but compiled */')
prnt(' %s(0);' % (checkfuncname,))
prnt('}')
prnt()
def _loading_struct_or_union(self, tp, prefix, name, module):
if tp.fldnames is None:
return # nothing to do with opaque structs
layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name)
#
BFunc = self.ffi._typeof_locked("intptr_t(*)(intptr_t)")[0]
function = module.load_function(BFunc, layoutfuncname)
layout = []
num = 0
while True:
x = function(num)
if x < 0: break
layout.append(x)
num += 1
if isinstance(tp, model.StructOrUnion) and tp.partial:
# use the function()'s sizes and offsets to guide the
# layout of the struct
totalsize = layout[0]
totalalignment = layout[1]
fieldofs = layout[2::2]
fieldsize = layout[3::2]
tp.force_flatten()
assert len(fieldofs) == len(fieldsize) == len(tp.fldnames)
tp.fixedlayout = fieldofs, fieldsize, totalsize, totalalignment
else:
cname = ('%s %s' % (prefix, name)).strip()
self._struct_pending_verification[tp] = layout, cname
def _loaded_struct_or_union(self, tp):
if tp.fldnames is None:
return # nothing to do with opaque structs
self.ffi._get_cached_btype(tp) # force 'fixedlayout' to be considered
if tp in self._struct_pending_verification:
# check that the layout sizes and offsets match the real ones
def check(realvalue, expectedvalue, msg):
if realvalue != expectedvalue:
raise ffiplatform.VerificationError(
"%s (we have %d, but C compiler says %d)"
% (msg, expectedvalue, realvalue))
ffi = self.ffi
BStruct = ffi._get_cached_btype(tp)
layout, cname = self._struct_pending_verification.pop(tp)
check(layout[0], ffi.sizeof(BStruct), "wrong total size")
check(layout[1], ffi.alignof(BStruct), "wrong total alignment")
i = 2
for fname, ftype, fbitsize in tp.enumfields():
if fbitsize >= 0:
continue # xxx ignore fbitsize for now
check(layout[i], ffi.offsetof(BStruct, fname),
"wrong offset for field %r" % (fname,))
if layout[i+1] != 0:
BField = ffi._get_cached_btype(ftype)
check(layout[i+1], ffi.sizeof(BField),
"wrong size for field %r" % (fname,))
i += 2
assert i == len(layout)
# ----------
# 'anonymous' declarations. These are produced for anonymous structs
# or unions; the 'name' is obtained by a typedef.
def _generate_gen_anonymous_decl(self, tp, name):
if isinstance(tp, model.EnumType):
self._generate_gen_enum_decl(tp, name, '')
else:
self._generate_struct_or_union_decl(tp, '', name)
def _loading_gen_anonymous(self, tp, name, module):
if isinstance(tp, model.EnumType):
self._loading_gen_enum(tp, name, module, '')
else:
self._loading_struct_or_union(tp, '', name, module)
def _loaded_gen_anonymous(self, tp, name, module, **kwds):
if isinstance(tp, model.EnumType):
self._loaded_gen_enum(tp, name, module, **kwds)
else:
self._loaded_struct_or_union(tp)
# ----------
# constants, likely declared with '#define'
def _generate_gen_const(self, is_int, name, tp=None, category='const'):
prnt = self._prnt
funcname = '_cffi_%s_%s' % (category, name)
self.export_symbols.append(funcname)
if is_int:
assert category == 'const'
prnt('int %s(long long *out_value)' % funcname)
prnt('{')
prnt(' *out_value = (long long)(%s);' % (name,))
prnt(' return (%s) <= 0;' % (name,))
prnt('}')
else:
assert tp is not None
prnt(tp.get_c_name(' %s(void)' % funcname, name),)
prnt('{')
if category == 'var':
ampersand = '&'
else:
ampersand = ''
prnt(' return (%s%s);' % (ampersand, name))
prnt('}')
prnt()
def _generate_gen_constant_decl(self, tp, name):
is_int = isinstance(tp, model.PrimitiveType) and tp.is_integer_type()
self._generate_gen_const(is_int, name, tp)
_loading_gen_constant = _loaded_noop
def _load_constant(self, is_int, tp, name, module):
funcname = '_cffi_const_%s' % name
if is_int:
BType = self.ffi._typeof_locked("long long*")[0]
BFunc = self.ffi._typeof_locked("int(*)(long long*)")[0]
function = module.load_function(BFunc, funcname)
p = self.ffi.new(BType)
negative = function(p)
value = int(p[0])
if value < 0 and not negative:
BLongLong = self.ffi._typeof_locked("long long")[0]
value += (1 << (8*self.ffi.sizeof(BLongLong)))
else:
BFunc = self.ffi._typeof_locked(tp.get_c_name('(*)(void)', name))[0]
function = module.load_function(BFunc, funcname)
value = function()
return value
def _loaded_gen_constant(self, tp, name, module, library):
is_int = isinstance(tp, model.PrimitiveType) and tp.is_integer_type()
value = self._load_constant(is_int, tp, name, module)
setattr(library, name, value)
type(library)._cffi_dir.append(name)
# ----------
# enums
def _enum_funcname(self, prefix, name):
# "$enum_$1" => "___D_enum____D_1"
name = name.replace('$', '___D_')
return '_cffi_e_%s_%s' % (prefix, name)
def _generate_gen_enum_decl(self, tp, name, prefix='enum'):
if tp.partial:
for enumerator in tp.enumerators:
self._generate_gen_const(True, enumerator)
return
#
funcname = self._enum_funcname(prefix, name)
self.export_symbols.append(funcname)
prnt = self._prnt
prnt('int %s(char *out_error)' % funcname)
prnt('{')
for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues):
if enumvalue < 0:
prnt(' if ((%s) >= 0 || (long)(%s) != %dL) {' % (
enumerator, enumerator, enumvalue))
else:
prnt(' if ((%s) < 0 || (unsigned long)(%s) != %dUL) {' % (
enumerator, enumerator, enumvalue))
prnt(' char buf[64];')
prnt(' if ((%s) < 0)' % enumerator)
prnt(' sprintf(buf, "%%ld", (long)(%s));' % enumerator)
prnt(' else')
prnt(' sprintf(buf, "%%lu", (unsigned long)(%s));' %
enumerator)
prnt(' sprintf(out_error,'
' "%s has the real value %s, not %s",')
prnt(' "%s", buf, "%d");' % (
enumerator[:100], enumvalue))
prnt(' return -1;')
prnt(' }')
prnt(' return 0;')
prnt('}')
prnt()
def _loading_gen_enum(self, tp, name, module, prefix='enum'):
if tp.partial:
enumvalues = [self._load_constant(True, tp, enumerator, module)
for enumerator in tp.enumerators]
tp.enumvalues = tuple(enumvalues)
tp.partial_resolved = True
else:
BType = self.ffi._typeof_locked("char[]")[0]
BFunc = self.ffi._typeof_locked("int(*)(char*)")[0]
funcname = self._enum_funcname(prefix, name)
function = module.load_function(BFunc, funcname)
p = self.ffi.new(BType, 256)
if function(p) < 0:
error = self.ffi.string(p)
if sys.version_info >= (3,):
error = str(error, 'utf-8')
raise ffiplatform.VerificationError(error)
def _loaded_gen_enum(self, tp, name, module, library):
for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues):
setattr(library, enumerator, enumvalue)
type(library)._cffi_dir.append(enumerator)
# ----------
# macros: for now only for integers
def _generate_gen_macro_decl(self, tp, name):
assert tp == '...'
self._generate_gen_const(True, name)
_loading_gen_macro = _loaded_noop
def _loaded_gen_macro(self, tp, name, module, library):
value = self._load_constant(True, tp, name, module)
setattr(library, name, value)
type(library)._cffi_dir.append(name)
# ----------
# global variables
def _generate_gen_variable_decl(self, tp, name):
if isinstance(tp, model.ArrayType):
if tp.length == '...':
prnt = self._prnt
funcname = '_cffi_sizeof_%s' % (name,)
self.export_symbols.append(funcname)
prnt("size_t %s(void)" % funcname)
prnt("{")
prnt(" return sizeof(%s);" % (name,))
prnt("}")
tp_ptr = model.PointerType(tp.item)
self._generate_gen_const(False, name, tp_ptr)
else:
tp_ptr = model.PointerType(tp)
self._generate_gen_const(False, name, tp_ptr, category='var')
_loading_gen_variable = _loaded_noop
def _loaded_gen_variable(self, tp, name, module, library):
if isinstance(tp, model.ArrayType): # int a[5] is "constant" in the
# sense that "a=..." is forbidden
if tp.length == '...':
funcname = '_cffi_sizeof_%s' % (name,)
BFunc = self.ffi._typeof_locked('size_t(*)(void)')[0]
function = module.load_function(BFunc, funcname)
size = function()
BItemType = self.ffi._get_cached_btype(tp.item)
length, rest = divmod(size, self.ffi.sizeof(BItemType))
if rest != 0:
raise ffiplatform.VerificationError(
"bad size: %r does not seem to be an array of %s" %
(name, tp.item))
tp = tp.resolve_length(length)
tp_ptr = model.PointerType(tp.item)
value = self._load_constant(False, tp_ptr, name, module)
# 'value' is a <cdata 'type *'> which we have to replace with
# a <cdata 'type[N]'> if the N is actually known
if tp.length is not None:
BArray = self.ffi._get_cached_btype(tp)
value = self.ffi.cast(BArray, value)
setattr(library, name, value)
type(library)._cffi_dir.append(name)
return
# remove ptr=<cdata 'int *'> from the library instance, and replace
# it by a property on the class, which reads/writes into ptr[0].
funcname = '_cffi_var_%s' % name
BFunc = self.ffi._typeof_locked(tp.get_c_name('*(*)(void)', name))[0]
function = module.load_function(BFunc, funcname)
ptr = function()
def getter(library):
return ptr[0]
def setter(library, value):
ptr[0] = value
setattr(type(library), name, property(getter, setter))
type(library)._cffi_dir.append(name)
cffimod_header = r'''
#include <stdio.h>
#include <stddef.h>
#include <stdarg.h>
#include <errno.h>
#include <sys/types.h> /* XXX for ssize_t on some platforms */
/* this block of #ifs should be kept exactly identical between
c/_cffi_backend.c, cffi/vengine_cpy.py, cffi/vengine_gen.py */
#if defined(_MSC_VER)
# include <malloc.h> /* for alloca() */
# if _MSC_VER < 1600 /* MSVC < 2010 */
typedef __int8 int8_t;
typedef __int16 int16_t;
typedef __int32 int32_t;
typedef __int64 int64_t;
typedef unsigned __int8 uint8_t;
typedef unsigned __int16 uint16_t;
typedef unsigned __int32 uint32_t;
typedef unsigned __int64 uint64_t;
# else
# include <stdint.h>
# endif
# if _MSC_VER < 1800 /* MSVC < 2013 */
typedef unsigned char _Bool;
# endif
#else
# include <stdint.h>
# if (defined (__SVR4) && defined (__sun)) || defined(_AIX)
# include <alloca.h>
# endif
#endif
'''
| |
"""SocksiPy - Python SOCKS module.
Version 1.00
Copyright 2006 Dan-Haim. All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of Dan Haim nor the names of his contributors may be used
to endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY DAN HAIM "AS IS" AND ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
EVENT SHALL DAN HAIM OR HIS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA
OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMANGE.
This module provides a standard socket-like interface for Python
for tunneling connections through SOCKS proxies.
Minor modifications made by Christopher Gilbert (http://motomastyle.com/) for
use in PyLoris (http://pyloris.sourceforge.net/).
Minor modifications made by Mario Vilas (http://breakingcode.wordpress.com/)
mainly to merge bug fixes found in Sourceforge.
"""
import base64
import socket
import struct
import sys
if getattr(socket, "socket", None) is None:
raise ImportError("socket.socket missing, proxy support unusable")
PROXY_TYPE_SOCKS4 = 1
PROXY_TYPE_SOCKS5 = 2
PROXY_TYPE_HTTP = 3
PROXY_TYPE_HTTP_NO_TUNNEL = 4
_defaultproxy = None
_orgsocket = socket.socket
class ProxyError(Exception):
pass
class GeneralProxyError(ProxyError):
pass
class Socks5AuthError(ProxyError):
pass
class Socks5Error(ProxyError):
pass
class Socks4Error(ProxyError):
pass
class HTTPError(ProxyError):
pass
_generalerrors = (
"success",
"invalid data",
"not connected",
"not available",
"bad proxy type",
"bad input",
)
_socks5errors = (
"succeeded",
"general SOCKS server failure",
"connection not allowed by ruleset",
"Network unreachable",
"Host unreachable",
"Connection refused",
"TTL expired",
"Command not supported",
"Address type not supported",
"Unknown error",
)
_socks5autherrors = (
"succeeded",
"authentication is required",
"all offered authentication methods were rejected",
"unknown username or invalid password",
"unknown error",
)
_socks4errors = (
"request granted",
"request rejected or failed",
"request rejected because SOCKS server cannot connect to identd on the client",
"request rejected because the client program and identd report different "
"user-ids",
"unknown error",
)
def setdefaultproxy(
proxytype=None, addr=None, port=None, rdns=True, username=None, password=None
):
"""setdefaultproxy(proxytype, addr[, port[, rdns[, username[, password]]]])
Sets a default proxy which all further socksocket objects will use,
unless explicitly changed.
"""
global _defaultproxy
_defaultproxy = (proxytype, addr, port, rdns, username, password)
def wrapmodule(module):
"""wrapmodule(module)
Attempts to replace a module's socket library with a SOCKS socket. Must set
a default proxy using setdefaultproxy(...) first.
This will only work on modules that import socket directly into the
namespace;
most of the Python Standard Library falls into this category.
"""
if _defaultproxy != None:
module.socket.socket = socksocket
else:
raise GeneralProxyError((4, "no proxy specified"))
class socksocket(socket.socket):
"""socksocket([family[, type[, proto]]]) -> socket object
Open a SOCKS enabled socket. The parameters are the same as
those of the standard socket init. In order for SOCKS to work,
you must specify family=AF_INET, type=SOCK_STREAM and proto=0.
"""
def __init__(
self, family=socket.AF_INET, type=socket.SOCK_STREAM, proto=0, _sock=None
):
_orgsocket.__init__(self, family, type, proto, _sock)
if _defaultproxy != None:
self.__proxy = _defaultproxy
else:
self.__proxy = (None, None, None, None, None, None)
self.__proxysockname = None
self.__proxypeername = None
self.__httptunnel = True
def __recvall(self, count):
"""__recvall(count) -> data
Receive EXACTLY the number of bytes requested from the socket.
Blocks until the required number of bytes have been received.
"""
data = self.recv(count)
while len(data) < count:
d = self.recv(count - len(data))
if not d:
raise GeneralProxyError((0, "connection closed unexpectedly"))
data = data + d
return data
def sendall(self, content, *args):
""" override socket.socket.sendall method to rewrite the header
for non-tunneling proxies if needed
"""
if not self.__httptunnel:
content = self.__rewriteproxy(content)
return super(socksocket, self).sendall(content, *args)
def __rewriteproxy(self, header):
""" rewrite HTTP request headers to support non-tunneling proxies
(i.e. those which do not support the CONNECT method).
This only works for HTTP (not HTTPS) since HTTPS requires tunneling.
"""
host, endpt = None, None
hdrs = header.split("\r\n")
for hdr in hdrs:
if hdr.lower().startswith("host:"):
host = hdr
elif hdr.lower().startswith("get") or hdr.lower().startswith("post"):
endpt = hdr
if host and endpt:
hdrs.remove(host)
hdrs.remove(endpt)
host = host.split(" ")[1]
endpt = endpt.split(" ")
if self.__proxy[4] != None and self.__proxy[5] != None:
hdrs.insert(0, self.__getauthheader())
hdrs.insert(0, "Host: %s" % host)
hdrs.insert(0, "%s http://%s%s %s" % (endpt[0], host, endpt[1], endpt[2]))
return "\r\n".join(hdrs)
def __getauthheader(self):
auth = self.__proxy[4] + ":" + self.__proxy[5]
return "Proxy-Authorization: Basic " + base64.b64encode(auth)
def setproxy(
self,
proxytype=None,
addr=None,
port=None,
rdns=True,
username=None,
password=None,
headers=None,
):
"""setproxy(proxytype, addr[, port[, rdns[, username[, password]]]])
Sets the proxy to be used.
proxytype - The type of the proxy to be used. Three types
are supported: PROXY_TYPE_SOCKS4 (including socks4a),
PROXY_TYPE_SOCKS5 and PROXY_TYPE_HTTP
addr - The address of the server (IP or DNS).
port - The port of the server. Defaults to 1080 for SOCKS
servers and 8080 for HTTP proxy servers.
rdns - Should DNS queries be preformed on the remote side
(rather than the local side). The default is True.
Note: This has no effect with SOCKS4 servers.
username - Username to authenticate with to the server.
The default is no authentication.
password - Password to authenticate with to the server.
Only relevant when username is also provided.
headers - Additional or modified headers for the proxy connect
request.
"""
self.__proxy = (
proxytype,
addr,
port,
rdns,
username.encode() if username else None,
password.encode() if password else None,
headers,
)
def __negotiatesocks5(self, destaddr, destport):
"""__negotiatesocks5(self,destaddr,destport)
Negotiates a connection through a SOCKS5 server.
"""
# First we'll send the authentication packages we support.
if (self.__proxy[4] != None) and (self.__proxy[5] != None):
# The username/password details were supplied to the
# setproxy method so we support the USERNAME/PASSWORD
# authentication (in addition to the standard none).
self.sendall(struct.pack("BBBB", 0x05, 0x02, 0x00, 0x02))
else:
# No username/password were entered, therefore we
# only support connections with no authentication.
self.sendall(struct.pack("BBB", 0x05, 0x01, 0x00))
# We'll receive the server's response to determine which
# method was selected
chosenauth = self.__recvall(2)
if chosenauth[0:1] != chr(0x05).encode():
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
# Check the chosen authentication method
if chosenauth[1:2] == chr(0x00).encode():
# No authentication is required
pass
elif chosenauth[1:2] == chr(0x02).encode():
# Okay, we need to perform a basic username/password
# authentication.
self.sendall(
chr(0x01).encode()
+ chr(len(self.__proxy[4]))
+ self.__proxy[4]
+ chr(len(self.__proxy[5]))
+ self.__proxy[5]
)
authstat = self.__recvall(2)
if authstat[0:1] != chr(0x01).encode():
# Bad response
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
if authstat[1:2] != chr(0x00).encode():
# Authentication failed
self.close()
raise Socks5AuthError((3, _socks5autherrors[3]))
# Authentication succeeded
else:
# Reaching here is always bad
self.close()
if chosenauth[1] == chr(0xFF).encode():
raise Socks5AuthError((2, _socks5autherrors[2]))
else:
raise GeneralProxyError((1, _generalerrors[1]))
# Now we can request the actual connection
req = struct.pack("BBB", 0x05, 0x01, 0x00)
# If the given destination address is an IP address, we'll
# use the IPv4 address request even if remote resolving was specified.
try:
ipaddr = socket.inet_aton(destaddr)
req = req + chr(0x01).encode() + ipaddr
except socket.error:
# Well it's not an IP number, so it's probably a DNS name.
if self.__proxy[3]:
# Resolve remotely
ipaddr = None
req = (
req
+ chr(0x03).encode()
+ chr(len(destaddr)).encode()
+ destaddr.encode()
)
else:
# Resolve locally
ipaddr = socket.inet_aton(socket.gethostbyname(destaddr))
req = req + chr(0x01).encode() + ipaddr
req = req + struct.pack(">H", destport)
self.sendall(req)
# Get the response
resp = self.__recvall(4)
if resp[0:1] != chr(0x05).encode():
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
elif resp[1:2] != chr(0x00).encode():
# Connection failed
self.close()
if ord(resp[1:2]) <= 8:
raise Socks5Error((ord(resp[1:2]), _socks5errors[ord(resp[1:2])]))
else:
raise Socks5Error((9, _socks5errors[9]))
# Get the bound address/port
elif resp[3:4] == chr(0x01).encode():
boundaddr = self.__recvall(4)
elif resp[3:4] == chr(0x03).encode():
resp = resp + self.recv(1)
boundaddr = self.__recvall(ord(resp[4:5]))
else:
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
boundport = struct.unpack(">H", self.__recvall(2))[0]
self.__proxysockname = (boundaddr, boundport)
if ipaddr != None:
self.__proxypeername = (socket.inet_ntoa(ipaddr), destport)
else:
self.__proxypeername = (destaddr, destport)
def getproxysockname(self):
"""getsockname() -> address info
Returns the bound IP address and port number at the proxy.
"""
return self.__proxysockname
def getproxypeername(self):
"""getproxypeername() -> address info
Returns the IP and port number of the proxy.
"""
return _orgsocket.getpeername(self)
def getpeername(self):
"""getpeername() -> address info
Returns the IP address and port number of the destination
machine (note: getproxypeername returns the proxy)
"""
return self.__proxypeername
def __negotiatesocks4(self, destaddr, destport):
"""__negotiatesocks4(self,destaddr,destport)
Negotiates a connection through a SOCKS4 server.
"""
# Check if the destination address provided is an IP address
rmtrslv = False
try:
ipaddr = socket.inet_aton(destaddr)
except socket.error:
# It's a DNS name. Check where it should be resolved.
if self.__proxy[3]:
ipaddr = struct.pack("BBBB", 0x00, 0x00, 0x00, 0x01)
rmtrslv = True
else:
ipaddr = socket.inet_aton(socket.gethostbyname(destaddr))
# Construct the request packet
req = struct.pack(">BBH", 0x04, 0x01, destport) + ipaddr
# The username parameter is considered userid for SOCKS4
if self.__proxy[4] != None:
req = req + self.__proxy[4]
req = req + chr(0x00).encode()
# DNS name if remote resolving is required
# NOTE: This is actually an extension to the SOCKS4 protocol
# called SOCKS4A and may not be supported in all cases.
if rmtrslv:
req = req + destaddr + chr(0x00).encode()
self.sendall(req)
# Get the response from the server
resp = self.__recvall(8)
if resp[0:1] != chr(0x00).encode():
# Bad data
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
if resp[1:2] != chr(0x5A).encode():
# Server returned an error
self.close()
if ord(resp[1:2]) in (91, 92, 93):
self.close()
raise Socks4Error((ord(resp[1:2]), _socks4errors[ord(resp[1:2]) - 90]))
else:
raise Socks4Error((94, _socks4errors[4]))
# Get the bound address/port
self.__proxysockname = (
socket.inet_ntoa(resp[4:]),
struct.unpack(">H", resp[2:4])[0],
)
if rmtrslv != None:
self.__proxypeername = (socket.inet_ntoa(ipaddr), destport)
else:
self.__proxypeername = (destaddr, destport)
def __negotiatehttp(self, destaddr, destport):
"""__negotiatehttp(self,destaddr,destport)
Negotiates a connection through an HTTP server.
"""
# If we need to resolve locally, we do this now
if not self.__proxy[3]:
addr = socket.gethostbyname(destaddr)
else:
addr = destaddr
headers = ["CONNECT ", addr, ":", str(destport), " HTTP/1.1\r\n"]
wrote_host_header = False
wrote_auth_header = False
if self.__proxy[6] != None:
for key, val in self.__proxy[6].iteritems():
headers += [key, ": ", val, "\r\n"]
wrote_host_header = key.lower() == "host"
wrote_auth_header = key.lower() == "proxy-authorization"
if not wrote_host_header:
headers += ["Host: ", destaddr, "\r\n"]
if not wrote_auth_header:
if self.__proxy[4] != None and self.__proxy[5] != None:
headers += [self.__getauthheader(), "\r\n"]
headers.append("\r\n")
self.sendall("".join(headers).encode())
# We read the response until we get the string "\r\n\r\n"
resp = self.recv(1)
while resp.find("\r\n\r\n".encode()) == -1:
resp = resp + self.recv(1)
# We just need the first line to check if the connection
# was successful
statusline = resp.splitlines()[0].split(" ".encode(), 2)
if statusline[0] not in ("HTTP/1.0".encode(), "HTTP/1.1".encode()):
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
try:
statuscode = int(statusline[1])
except ValueError:
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
if statuscode != 200:
self.close()
raise HTTPError((statuscode, statusline[2]))
self.__proxysockname = ("0.0.0.0", 0)
self.__proxypeername = (addr, destport)
def connect(self, destpair):
"""connect(self, despair)
Connects to the specified destination through a proxy.
destpar - A tuple of the IP/DNS address and the port number.
(identical to socket's connect).
To select the proxy server use setproxy().
"""
# Do a minimal input check first
if (
(not type(destpair) in (list, tuple))
or (len(destpair) < 2)
or (not isinstance(destpair[0], basestring))
or (type(destpair[1]) != int)
):
raise GeneralProxyError((5, _generalerrors[5]))
if self.__proxy[0] == PROXY_TYPE_SOCKS5:
if self.__proxy[2] != None:
portnum = self.__proxy[2]
else:
portnum = 1080
_orgsocket.connect(self, (self.__proxy[1], portnum))
self.__negotiatesocks5(destpair[0], destpair[1])
elif self.__proxy[0] == PROXY_TYPE_SOCKS4:
if self.__proxy[2] != None:
portnum = self.__proxy[2]
else:
portnum = 1080
_orgsocket.connect(self, (self.__proxy[1], portnum))
self.__negotiatesocks4(destpair[0], destpair[1])
elif self.__proxy[0] == PROXY_TYPE_HTTP:
if self.__proxy[2] != None:
portnum = self.__proxy[2]
else:
portnum = 8080
_orgsocket.connect(self, (self.__proxy[1], portnum))
self.__negotiatehttp(destpair[0], destpair[1])
elif self.__proxy[0] == PROXY_TYPE_HTTP_NO_TUNNEL:
if self.__proxy[2] != None:
portnum = self.__proxy[2]
else:
portnum = 8080
_orgsocket.connect(self, (self.__proxy[1], portnum))
if destpair[1] == 443:
self.__negotiatehttp(destpair[0], destpair[1])
else:
self.__httptunnel = False
elif self.__proxy[0] == None:
_orgsocket.connect(self, (destpair[0], destpair[1]))
else:
raise GeneralProxyError((4, _generalerrors[4]))
| |
#!/usr/bin/env python
# Copyright 2012 The Swarming Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0 that
# can be found in the LICENSE file.
import json
import logging
import os
import subprocess
import sys
import unittest
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, ROOT_DIR)
import isolated_format
import run_isolated
from utils import file_path
import isolateserver_mock
import test_utils
CONTENTS = {
'check_files.py': """if True:
import os, sys
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
expected = [
'check_files.py', 'file1.txt', 'file1_copy.txt', 'file2.txt',
'repeated_files.py',
]
actual = sorted(os.listdir(ROOT_DIR))
if expected != actual:
print >> sys.stderr, 'Expected list doesn\\'t match:'
print >> sys.stderr, '%s\\n%s' % (','.join(expected), ','.join(actual))
sys.exit(1)
# Check that file2.txt is in reality file3.txt.
with open(os.path.join(ROOT_DIR, 'file2.txt'), 'rb') as f:
if f.read() != 'File3\\n':
print >> sys.stderr, 'file2.txt should be file3.txt in reality'
sys.exit(2)
print('Success')""",
'file1.txt': 'File1\n',
'file2.txt': 'File2.txt\n',
'file3.txt': 'File3\n',
'repeated_files.py': """if True:
import os, sys
expected = ['file1.txt', 'file1_copy.txt', 'repeated_files.py']
actual = sorted(os.listdir(os.path.dirname(os.path.abspath(__file__))))
if expected != actual:
print >> sys.stderr, 'Expected list doesn\\'t match:'
print >> sys.stderr, '%s\\n%s' % (','.join(expected), ','.join(actual))
sys.exit(1)
print('Success')""",
}
def file_meta(filename):
return {
'h': isolateserver_mock.hash_content(CONTENTS[filename]),
's': len(CONTENTS[filename]),
}
CONTENTS['download.isolated'] = json.dumps(
{
'command': ['python', 'repeated_files.py'],
'files': {
'file1.txt': file_meta('file1.txt'),
'file1_symlink.txt': {'l': 'files1.txt'},
'new_folder/file1.txt': file_meta('file1.txt'),
'repeated_files.py': file_meta('repeated_files.py'),
},
})
CONTENTS['file_with_size.isolated'] = json.dumps(
{
'command': [ 'python', '-V' ],
'files': {'file1.txt': file_meta('file1.txt')},
'read_only': 1,
})
CONTENTS['manifest1.isolated'] = json.dumps(
{'files': {'file1.txt': file_meta('file1.txt')}})
CONTENTS['manifest2.isolated'] = json.dumps(
{
'files': {'file2.txt': file_meta('file2.txt')},
'includes': [
isolateserver_mock.hash_content(CONTENTS['manifest1.isolated']),
],
})
CONTENTS['repeated_files.isolated'] = json.dumps(
{
'command': ['python', 'repeated_files.py'],
'files': {
'file1.txt': file_meta('file1.txt'),
'file1_copy.txt': file_meta('file1.txt'),
'repeated_files.py': file_meta('repeated_files.py'),
},
})
CONTENTS['check_files.isolated'] = json.dumps(
{
'command': ['python', 'check_files.py'],
'files': {
'check_files.py': file_meta('check_files.py'),
# Mapping another file.
'file2.txt': file_meta('file3.txt'),
},
'includes': [
isolateserver_mock.hash_content(CONTENTS[i])
for i in ('manifest2.isolated', 'repeated_files.isolated')
]
})
def list_files_tree(directory):
"""Returns the list of all the files in a tree."""
actual = []
for root, _dirs, files in os.walk(directory):
actual.extend(os.path.join(root, f)[len(directory)+1:] for f in files)
return sorted(actual)
def read_content(filepath):
with open(filepath, 'rb') as f:
return f.read()
def write_content(filepath, content):
with open(filepath, 'wb') as f:
f.write(content)
def tree_modes(root):
"""Returns the dict of files in a directory with their filemode.
Includes |root| as '.'.
"""
out = {}
offset = len(root.rstrip('/\\')) + 1
out['.'] = oct(os.stat(root).st_mode)
for dirpath, dirnames, filenames in os.walk(root):
for filename in filenames:
p = os.path.join(dirpath, filename)
out[p[offset:]] = oct(os.stat(p).st_mode)
for dirname in dirnames:
p = os.path.join(dirpath, dirname)
out[p[offset:]] = oct(os.stat(p).st_mode)
return out
class RunIsolatedTest(unittest.TestCase):
def setUp(self):
super(RunIsolatedTest, self).setUp()
self.tempdir = run_isolated.make_temp_dir(
'run_isolated_smoke_test', ROOT_DIR)
logging.debug(self.tempdir)
# run_isolated.zip executable package.
self.run_isolated_zip = os.path.join(self.tempdir, 'run_isolated.zip')
run_isolated.get_as_zip_package().zip_into_file(
self.run_isolated_zip, compress=False)
# The run_isolated local cache.
self.cache = os.path.join(self.tempdir, 'cache')
self.server = isolateserver_mock.MockIsolateServer()
def tearDown(self):
try:
self.server.close_start()
file_path.rmtree(self.tempdir)
self.server.close_end()
finally:
super(RunIsolatedTest, self).tearDown()
def _run(self, args):
cmd = [sys.executable, self.run_isolated_zip]
cmd.extend(args)
pipe = subprocess.PIPE
logging.debug(' '.join(cmd))
proc = subprocess.Popen(
cmd,
stdout=pipe,
stderr=pipe,
universal_newlines=True,
cwd=self.tempdir)
out, err = proc.communicate()
return out, err, proc.returncode
def _store_isolated(self, data):
"""Stores an isolated file and returns its hash."""
return self.server.add_content('default', json.dumps(data, sort_keys=True))
def _store(self, filename):
"""Stores a test data file in the table and returns its hash."""
return self.server.add_content('default', CONTENTS[filename])
def _cmd_args(self, hash_value):
"""Generates the standard arguments used with |hash_value| as the hash.
Returns a list of the required arguments.
"""
return [
'--isolated', hash_value,
'--cache', self.cache,
'--isolate-server', self.server.url,
'--namespace', 'default',
]
def assertTreeModes(self, root, expected):
"""Compares the file modes of everything in |root| with |expected|.
Arguments:
root: directory to list its tree.
expected: dict(relpath: (linux_mode, mac_mode, win_mode)) where each mode
is the expected file mode on this OS. For practical purposes,
linux is "anything but OSX or Windows". The modes should be
ints.
"""
actual = tree_modes(root)
if sys.platform == 'win32':
index = 2
elif sys.platform == 'darwin':
index = 1
else:
index = 0
expected_mangled = dict((k, oct(v[index])) for k, v in expected.iteritems())
self.assertEqual(expected_mangled, actual)
def test_normal(self):
# Loads the .isolated from the store as a hash.
# Load an isolated file with the same content (same SHA-1), listed under two
# different names and ensure both are created.
isolated_hash = self._store('repeated_files.isolated')
expected = [
'state.json',
isolated_hash,
self._store('file1.txt'),
self._store('repeated_files.py'),
]
out, err, returncode = self._run(self._cmd_args(isolated_hash))
self.assertEqual('', err)
self.assertEqual('Success\n', out, out)
self.assertEqual(0, returncode)
actual = list_files_tree(self.cache)
self.assertEqual(sorted(set(expected)), actual)
def test_fail_empty_isolated(self):
isolated_hash = self._store_isolated({})
expected = ['state.json', isolated_hash]
out, err, returncode = self._run(self._cmd_args(isolated_hash))
self.assertEqual('', out)
self.assertIn('No command to run\n', err)
self.assertEqual(1, returncode)
actual = list_files_tree(self.cache)
self.assertEqual(sorted(expected), actual)
def test_includes(self):
# Loads an .isolated that includes another one.
# References manifest2.isolated and repeated_files.isolated. Maps file3.txt
# as file2.txt.
isolated_hash = self._store('check_files.isolated')
expected = [
'state.json',
isolated_hash,
self._store('check_files.py'),
self._store('file1.txt'),
self._store('file3.txt'),
# Maps file1.txt.
self._store('manifest1.isolated'),
# References manifest1.isolated. Maps file2.txt but it is overriden.
self._store('manifest2.isolated'),
self._store('repeated_files.py'),
self._store('repeated_files.isolated'),
]
out, err, returncode = self._run(self._cmd_args(isolated_hash))
self.assertEqual('', err)
self.assertEqual('Success\n', out)
self.assertEqual(0, returncode)
actual = list_files_tree(self.cache)
self.assertEqual(sorted(expected), actual)
def _test_corruption_common(self, new_content):
isolated_hash = self._store('file_with_size.isolated')
file1_hash = self._store('file1.txt')
# Run the test once to generate the cache.
_out, _err, returncode = self._run(self._cmd_args(isolated_hash))
self.assertEqual(0, returncode)
expected = {
'.': (040707, 040707, 040777),
'state.json': (0100606, 0100606, 0100666),
# The reason for 0100666 on Windows is that the file node had to be
# modified to delete the hardlinked node. The read only bit is reset on
# load.
file1_hash: (0100400, 0100400, 0100666),
isolated_hash: (0100400, 0100400, 0100444),
}
self.assertTreeModes(self.cache, expected)
# Modify one of the files in the cache to be invalid.
cached_file_path = os.path.join(self.cache, file1_hash)
previous_mode = os.stat(cached_file_path).st_mode
os.chmod(cached_file_path, 0600)
write_content(cached_file_path, new_content)
os.chmod(cached_file_path, previous_mode)
logging.info('Modified %s', cached_file_path)
# Ensure that the cache has an invalid file.
self.assertNotEqual(CONTENTS['file1.txt'], read_content(cached_file_path))
# Rerun the test and make sure the cache contains the right file afterwards.
_out, _err, returncode = self._run(self._cmd_args(isolated_hash))
self.assertEqual(0, returncode)
expected = {
'.': (040700, 040700, 040777),
'state.json': (0100600, 0100600, 0100666),
file1_hash: (0100400, 0100400, 0100666),
isolated_hash: (0100400, 0100400, 0100444),
}
self.assertTreeModes(self.cache, expected)
return cached_file_path
def test_corrupted_cache_entry_different_size(self):
# Test that an entry with an invalid file size properly gets removed and
# fetched again. This test case also check for file modes.
cached_file_path = self._test_corruption_common(
CONTENTS['file1.txt'] + ' now invalid size')
self.assertEqual(CONTENTS['file1.txt'], read_content(cached_file_path))
def test_corrupted_cache_entry_same_size(self):
# Test that an entry with an invalid file content but same size is NOT
# detected property.
cached_file_path = self._test_corruption_common(
CONTENTS['file1.txt'][:-1] + ' ')
# TODO(maruel): This corruption is NOT detected.
# This needs to be fixed.
self.assertNotEqual(CONTENTS['file1.txt'], read_content(cached_file_path))
if __name__ == '__main__':
test_utils.main()
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 3.0.0.11832 on 2017-03-22.
# 2017, SMART Health IT.
import io
import json
import os
import unittest
from . import eligibilityresponse
from .fhirdate import FHIRDate
class EligibilityResponseTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get('FHIR_UNITTEST_DATADIR') or ''
with io.open(os.path.join(datadir, filename), 'r', encoding='utf-8') as handle:
js = json.load(handle)
self.assertEqual("EligibilityResponse", js["resourceType"])
return eligibilityresponse.EligibilityResponse(js)
def testEligibilityResponse1(self):
inst = self.instantiate_from("eligibilityresponse-example-benefits-2.json")
self.assertIsNotNone(inst, "Must have instantiated a EligibilityResponse instance")
self.implEligibilityResponse1(inst)
js = inst.as_json()
self.assertEqual("EligibilityResponse", js["resourceType"])
inst2 = eligibilityresponse.EligibilityResponse(js)
self.implEligibilityResponse1(inst2)
def implEligibilityResponse1(self, inst):
self.assertEqual(inst.contained[0].id, "patient-1")
self.assertEqual(inst.contained[1].id, "coverage-1")
self.assertEqual(inst.created.date, FHIRDate("2014-09-16").date)
self.assertEqual(inst.created.as_json(), "2014-09-16")
self.assertEqual(inst.disposition, "Policy is currently in-force.")
self.assertEqual(inst.form.coding[0].code, "ELRSP/2017/01")
self.assertEqual(inst.form.coding[0].system, "http://national.org/form")
self.assertEqual(inst.id, "E2502")
self.assertEqual(inst.identifier[0].system, "http://www.BenefitsInc.com/fhir/eligibilityresponse")
self.assertEqual(inst.identifier[0].value, "8812342")
self.assertTrue(inst.inforce)
self.assertEqual(inst.insurance[0].benefitBalance[0].category.coding[0].code, "medical")
self.assertEqual(inst.insurance[0].benefitBalance[0].category.coding[0].system, "http://hl7.org/fhir/benefit-category")
self.assertEqual(inst.insurance[0].benefitBalance[0].financial[0].allowedMoney.code, "USD")
self.assertEqual(inst.insurance[0].benefitBalance[0].financial[0].allowedMoney.system, "urn:iso:std:iso:4217")
self.assertEqual(inst.insurance[0].benefitBalance[0].financial[0].allowedMoney.value, 500000)
self.assertEqual(inst.insurance[0].benefitBalance[0].financial[0].type.coding[0].code, "benefit")
self.assertEqual(inst.insurance[0].benefitBalance[0].financial[0].usedMoney.code, "USD")
self.assertEqual(inst.insurance[0].benefitBalance[0].financial[0].usedMoney.system, "urn:iso:std:iso:4217")
self.assertEqual(inst.insurance[0].benefitBalance[0].financial[0].usedMoney.value, 3748.0)
self.assertEqual(inst.insurance[0].benefitBalance[0].financial[1].allowedMoney.code, "USD")
self.assertEqual(inst.insurance[0].benefitBalance[0].financial[1].allowedMoney.system, "urn:iso:std:iso:4217")
self.assertEqual(inst.insurance[0].benefitBalance[0].financial[1].allowedMoney.value, 100)
self.assertEqual(inst.insurance[0].benefitBalance[0].financial[1].type.coding[0].code, "copay-maximum")
self.assertEqual(inst.insurance[0].benefitBalance[0].financial[2].allowedUnsignedInt, 20)
self.assertEqual(inst.insurance[0].benefitBalance[0].financial[2].type.coding[0].code, "copay-percent")
self.assertEqual(inst.insurance[0].benefitBalance[0].network.coding[0].code, "in")
self.assertEqual(inst.insurance[0].benefitBalance[0].network.coding[0].system, "http://hl7.org/fhir/benefit-network")
self.assertEqual(inst.insurance[0].benefitBalance[0].subCategory.coding[0].code, "30")
self.assertEqual(inst.insurance[0].benefitBalance[0].subCategory.coding[0].display, "Health Benefit Plan Coverage")
self.assertEqual(inst.insurance[0].benefitBalance[0].subCategory.coding[0].system, "http://hl7.org/fhir/benefit-subcategory")
self.assertEqual(inst.insurance[0].benefitBalance[0].term.coding[0].code, "annual")
self.assertEqual(inst.insurance[0].benefitBalance[0].term.coding[0].system, "http://hl7.org/fhir/benefit-term")
self.assertEqual(inst.insurance[0].benefitBalance[0].unit.coding[0].code, "individual")
self.assertEqual(inst.insurance[0].benefitBalance[0].unit.coding[0].system, "http://hl7.org/fhir/benefit-unit")
self.assertEqual(inst.insurance[0].benefitBalance[1].category.coding[0].code, "medical")
self.assertEqual(inst.insurance[0].benefitBalance[1].category.coding[0].system, "http://hl7.org/fhir/benefit-category")
self.assertEqual(inst.insurance[0].benefitBalance[1].financial[0].allowedMoney.code, "USD")
self.assertEqual(inst.insurance[0].benefitBalance[1].financial[0].allowedMoney.system, "urn:iso:std:iso:4217")
self.assertEqual(inst.insurance[0].benefitBalance[1].financial[0].allowedMoney.value, 15000)
self.assertEqual(inst.insurance[0].benefitBalance[1].financial[0].type.coding[0].code, "benefit")
self.assertEqual(inst.insurance[0].benefitBalance[1].network.coding[0].code, "in")
self.assertEqual(inst.insurance[0].benefitBalance[1].network.coding[0].system, "http://hl7.org/fhir/benefit-network")
self.assertEqual(inst.insurance[0].benefitBalance[1].subCategory.coding[0].code, "69")
self.assertEqual(inst.insurance[0].benefitBalance[1].subCategory.coding[0].display, "Maternity")
self.assertEqual(inst.insurance[0].benefitBalance[1].subCategory.coding[0].system, "http://hl7.org/fhir/benefit-subcategory")
self.assertEqual(inst.insurance[0].benefitBalance[1].term.coding[0].code, "annual")
self.assertEqual(inst.insurance[0].benefitBalance[1].term.coding[0].system, "http://hl7.org/fhir/benefit-term")
self.assertEqual(inst.insurance[0].benefitBalance[1].unit.coding[0].code, "individual")
self.assertEqual(inst.insurance[0].benefitBalance[1].unit.coding[0].system, "http://hl7.org/fhir/benefit-unit")
self.assertEqual(inst.insurance[0].benefitBalance[2].category.coding[0].code, "oral")
self.assertEqual(inst.insurance[0].benefitBalance[2].category.coding[0].system, "http://hl7.org/fhir/benefit-category")
self.assertEqual(inst.insurance[0].benefitBalance[2].financial[0].allowedMoney.code, "USD")
self.assertEqual(inst.insurance[0].benefitBalance[2].financial[0].allowedMoney.system, "urn:iso:std:iso:4217")
self.assertEqual(inst.insurance[0].benefitBalance[2].financial[0].allowedMoney.value, 2000)
self.assertEqual(inst.insurance[0].benefitBalance[2].financial[0].type.coding[0].code, "benefit")
self.assertEqual(inst.insurance[0].benefitBalance[2].network.coding[0].code, "in")
self.assertEqual(inst.insurance[0].benefitBalance[2].network.coding[0].system, "http://hl7.org/fhir/benefit-network")
self.assertEqual(inst.insurance[0].benefitBalance[2].subCategory.coding[0].code, "F3")
self.assertEqual(inst.insurance[0].benefitBalance[2].subCategory.coding[0].display, "Dental Coverage")
self.assertEqual(inst.insurance[0].benefitBalance[2].subCategory.coding[0].system, "http://hl7.org/fhir/benefit-subcategory")
self.assertEqual(inst.insurance[0].benefitBalance[2].term.coding[0].code, "annual")
self.assertEqual(inst.insurance[0].benefitBalance[2].term.coding[0].system, "http://hl7.org/fhir/benefit-term")
self.assertEqual(inst.insurance[0].benefitBalance[2].unit.coding[0].code, "individual")
self.assertEqual(inst.insurance[0].benefitBalance[2].unit.coding[0].system, "http://hl7.org/fhir/benefit-unit")
self.assertEqual(inst.insurance[0].benefitBalance[3].category.coding[0].code, "vision")
self.assertEqual(inst.insurance[0].benefitBalance[3].category.coding[0].system, "http://hl7.org/fhir/benefit-category")
self.assertEqual(inst.insurance[0].benefitBalance[3].description, "Vision products and services such as exams, glasses and contatc lenses.")
self.assertTrue(inst.insurance[0].benefitBalance[3].excluded)
self.assertEqual(inst.insurance[0].benefitBalance[3].name, "Vision")
self.assertEqual(inst.insurance[0].benefitBalance[3].subCategory.coding[0].code, "F6")
self.assertEqual(inst.insurance[0].benefitBalance[3].subCategory.coding[0].display, "Vision Coverage")
self.assertEqual(inst.insurance[0].benefitBalance[3].subCategory.coding[0].system, "http://hl7.org/fhir/benefit-subcategory")
self.assertEqual(inst.outcome.coding[0].code, "complete")
self.assertEqual(inst.outcome.coding[0].system, "http://hl7.org/fhir/remittance-outcome")
self.assertEqual(inst.status, "active")
self.assertEqual(inst.text.div, "<div xmlns=\"http://www.w3.org/1999/xhtml\">A human-readable rendering of the EligibilityResponse.</div>")
self.assertEqual(inst.text.status, "generated")
def testEligibilityResponse2(self):
inst = self.instantiate_from("eligibilityresponse-example-benefits.json")
self.assertIsNotNone(inst, "Must have instantiated a EligibilityResponse instance")
self.implEligibilityResponse2(inst)
js = inst.as_json()
self.assertEqual("EligibilityResponse", js["resourceType"])
inst2 = eligibilityresponse.EligibilityResponse(js)
self.implEligibilityResponse2(inst2)
def implEligibilityResponse2(self, inst):
self.assertEqual(inst.created.date, FHIRDate("2014-08-16").date)
self.assertEqual(inst.created.as_json(), "2014-08-16")
self.assertEqual(inst.disposition, "Policy is currently in-force.")
self.assertEqual(inst.id, "E2501")
self.assertEqual(inst.identifier[0].system, "http://www.BenefitsInc.com/fhir/eligibilityresponse")
self.assertEqual(inst.identifier[0].value, "881234")
self.assertTrue(inst.inforce)
self.assertEqual(inst.insurance[0].benefitBalance[0].category.coding[0].code, "medical")
self.assertEqual(inst.insurance[0].benefitBalance[0].category.coding[0].system, "http://hl7.org/fhir/benefit-category")
self.assertEqual(inst.insurance[0].benefitBalance[0].financial[0].allowedMoney.code, "SAR")
self.assertEqual(inst.insurance[0].benefitBalance[0].financial[0].allowedMoney.system, "urn:iso:std:iso:4217")
self.assertEqual(inst.insurance[0].benefitBalance[0].financial[0].allowedMoney.value, 500000)
self.assertEqual(inst.insurance[0].benefitBalance[0].financial[0].type.coding[0].code, "benefit")
self.assertEqual(inst.insurance[0].benefitBalance[0].financial[1].allowedMoney.code, "SAR")
self.assertEqual(inst.insurance[0].benefitBalance[0].financial[1].allowedMoney.system, "urn:iso:std:iso:4217")
self.assertEqual(inst.insurance[0].benefitBalance[0].financial[1].allowedMoney.value, 100)
self.assertEqual(inst.insurance[0].benefitBalance[0].financial[1].type.coding[0].code, "copay-maximum")
self.assertEqual(inst.insurance[0].benefitBalance[0].financial[2].allowedUnsignedInt, 20)
self.assertEqual(inst.insurance[0].benefitBalance[0].financial[2].type.coding[0].code, "copay-percent")
self.assertEqual(inst.insurance[0].benefitBalance[0].network.coding[0].code, "in")
self.assertEqual(inst.insurance[0].benefitBalance[0].network.coding[0].system, "http://hl7.org/fhir/benefit-network")
self.assertEqual(inst.insurance[0].benefitBalance[0].subCategory.coding[0].code, "30")
self.assertEqual(inst.insurance[0].benefitBalance[0].subCategory.coding[0].display, "Health Benefit Plan Coverage")
self.assertEqual(inst.insurance[0].benefitBalance[0].subCategory.coding[0].system, "http://hl7.org/fhir/benefit-subcategory")
self.assertEqual(inst.insurance[0].benefitBalance[0].term.coding[0].code, "annual")
self.assertEqual(inst.insurance[0].benefitBalance[0].term.coding[0].system, "http://hl7.org/fhir/benefit-term")
self.assertEqual(inst.insurance[0].benefitBalance[0].unit.coding[0].code, "individual")
self.assertEqual(inst.insurance[0].benefitBalance[0].unit.coding[0].system, "http://hl7.org/fhir/benefit-unit")
self.assertEqual(inst.insurance[0].benefitBalance[1].category.coding[0].code, "medical")
self.assertEqual(inst.insurance[0].benefitBalance[1].category.coding[0].system, "http://hl7.org/fhir/benefit-category")
self.assertEqual(inst.insurance[0].benefitBalance[1].financial[0].allowedMoney.code, "SAR")
self.assertEqual(inst.insurance[0].benefitBalance[1].financial[0].allowedMoney.system, "urn:iso:std:iso:4217")
self.assertEqual(inst.insurance[0].benefitBalance[1].financial[0].allowedMoney.value, 15000)
self.assertEqual(inst.insurance[0].benefitBalance[1].financial[0].type.coding[0].code, "benefit")
self.assertEqual(inst.insurance[0].benefitBalance[1].network.coding[0].code, "in")
self.assertEqual(inst.insurance[0].benefitBalance[1].network.coding[0].system, "http://hl7.org/fhir/benefit-network")
self.assertEqual(inst.insurance[0].benefitBalance[1].subCategory.coding[0].code, "69")
self.assertEqual(inst.insurance[0].benefitBalance[1].subCategory.coding[0].display, "Maternity")
self.assertEqual(inst.insurance[0].benefitBalance[1].subCategory.coding[0].system, "http://hl7.org/fhir/benefit-subcategory")
self.assertEqual(inst.insurance[0].benefitBalance[1].term.coding[0].code, "annual")
self.assertEqual(inst.insurance[0].benefitBalance[1].term.coding[0].system, "http://hl7.org/fhir/benefit-term")
self.assertEqual(inst.insurance[0].benefitBalance[1].unit.coding[0].code, "individual")
self.assertEqual(inst.insurance[0].benefitBalance[1].unit.coding[0].system, "http://hl7.org/fhir/benefit-unit")
self.assertEqual(inst.insurance[0].benefitBalance[2].category.coding[0].code, "oral")
self.assertEqual(inst.insurance[0].benefitBalance[2].category.coding[0].system, "http://hl7.org/fhir/benefit-category")
self.assertEqual(inst.insurance[0].benefitBalance[2].financial[0].allowedMoney.code, "SAR")
self.assertEqual(inst.insurance[0].benefitBalance[2].financial[0].allowedMoney.system, "urn:iso:std:iso:4217")
self.assertEqual(inst.insurance[0].benefitBalance[2].financial[0].allowedMoney.value, 2000)
self.assertEqual(inst.insurance[0].benefitBalance[2].financial[0].type.coding[0].code, "benefit")
self.assertEqual(inst.insurance[0].benefitBalance[2].network.coding[0].code, "in")
self.assertEqual(inst.insurance[0].benefitBalance[2].network.coding[0].system, "http://hl7.org/fhir/benefit-network")
self.assertEqual(inst.insurance[0].benefitBalance[2].subCategory.coding[0].code, "F3")
self.assertEqual(inst.insurance[0].benefitBalance[2].subCategory.coding[0].display, "Dental Coverage")
self.assertEqual(inst.insurance[0].benefitBalance[2].subCategory.coding[0].system, "http://hl7.org/fhir/benefit-subcategory")
self.assertEqual(inst.insurance[0].benefitBalance[2].term.coding[0].code, "annual")
self.assertEqual(inst.insurance[0].benefitBalance[2].term.coding[0].system, "http://hl7.org/fhir/benefit-term")
self.assertEqual(inst.insurance[0].benefitBalance[2].unit.coding[0].code, "individual")
self.assertEqual(inst.insurance[0].benefitBalance[2].unit.coding[0].system, "http://hl7.org/fhir/benefit-unit")
self.assertEqual(inst.insurance[0].benefitBalance[3].category.coding[0].code, "vision")
self.assertEqual(inst.insurance[0].benefitBalance[3].category.coding[0].system, "http://hl7.org/fhir/benefit-category")
self.assertEqual(inst.insurance[0].benefitBalance[3].financial[0].allowedMoney.code, "SAR")
self.assertEqual(inst.insurance[0].benefitBalance[3].financial[0].allowedMoney.system, "urn:iso:std:iso:4217")
self.assertEqual(inst.insurance[0].benefitBalance[3].financial[0].allowedMoney.value, 400)
self.assertEqual(inst.insurance[0].benefitBalance[3].financial[0].type.coding[0].code, "benefit")
self.assertEqual(inst.insurance[0].benefitBalance[3].network.coding[0].code, "in")
self.assertEqual(inst.insurance[0].benefitBalance[3].network.coding[0].system, "http://hl7.org/fhir/benefit-network")
self.assertEqual(inst.insurance[0].benefitBalance[3].subCategory.coding[0].code, "F6")
self.assertEqual(inst.insurance[0].benefitBalance[3].subCategory.coding[0].display, "Vision Coverage")
self.assertEqual(inst.insurance[0].benefitBalance[3].subCategory.coding[0].system, "http://hl7.org/fhir/benefit-subcategory")
self.assertEqual(inst.insurance[0].benefitBalance[3].term.coding[0].code, "annual")
self.assertEqual(inst.insurance[0].benefitBalance[3].term.coding[0].system, "http://hl7.org/fhir/benefit-term")
self.assertEqual(inst.insurance[0].benefitBalance[3].unit.coding[0].code, "individual")
self.assertEqual(inst.insurance[0].benefitBalance[3].unit.coding[0].system, "http://hl7.org/fhir/benefit-unit")
self.assertEqual(inst.insurance[0].benefitBalance[4].category.coding[0].code, "vision")
self.assertEqual(inst.insurance[0].benefitBalance[4].category.coding[0].system, "http://hl7.org/fhir/benefit-category")
self.assertEqual(inst.insurance[0].benefitBalance[4].financial[0].allowedString, "shared")
self.assertEqual(inst.insurance[0].benefitBalance[4].financial[0].type.coding[0].code, "room")
self.assertEqual(inst.insurance[0].benefitBalance[4].financial[1].allowedMoney.code, "SAR")
self.assertEqual(inst.insurance[0].benefitBalance[4].financial[1].allowedMoney.system, "urn:iso:std:iso:4217")
self.assertEqual(inst.insurance[0].benefitBalance[4].financial[1].allowedMoney.value, 600)
self.assertEqual(inst.insurance[0].benefitBalance[4].financial[1].type.coding[0].code, "benefit")
self.assertEqual(inst.insurance[0].benefitBalance[4].network.coding[0].code, "in")
self.assertEqual(inst.insurance[0].benefitBalance[4].network.coding[0].system, "http://hl7.org/fhir/benefit-network")
self.assertEqual(inst.insurance[0].benefitBalance[4].subCategory.coding[0].code, "49")
self.assertEqual(inst.insurance[0].benefitBalance[4].subCategory.coding[0].display, "Hospital Room and Board")
self.assertEqual(inst.insurance[0].benefitBalance[4].subCategory.coding[0].system, "http://hl7.org/fhir/benefit-subcategory")
self.assertEqual(inst.insurance[0].benefitBalance[4].term.coding[0].code, "day")
self.assertEqual(inst.insurance[0].benefitBalance[4].term.coding[0].system, "http://hl7.org/fhir/benefit-term")
self.assertEqual(inst.insurance[0].benefitBalance[4].unit.coding[0].code, "individual")
self.assertEqual(inst.insurance[0].benefitBalance[4].unit.coding[0].system, "http://hl7.org/fhir/benefit-unit")
self.assertEqual(inst.outcome.coding[0].code, "complete")
self.assertEqual(inst.outcome.coding[0].system, "http://hl7.org/fhir/remittance-outcome")
self.assertEqual(inst.status, "active")
self.assertEqual(inst.text.div, "<div xmlns=\"http://www.w3.org/1999/xhtml\">A human-readable rendering of the EligibilityResponse.</div>")
self.assertEqual(inst.text.status, "generated")
def testEligibilityResponse3(self):
inst = self.instantiate_from("eligibilityresponse-example-error.json")
self.assertIsNotNone(inst, "Must have instantiated a EligibilityResponse instance")
self.implEligibilityResponse3(inst)
js = inst.as_json()
self.assertEqual("EligibilityResponse", js["resourceType"])
inst2 = eligibilityresponse.EligibilityResponse(js)
self.implEligibilityResponse3(inst2)
def implEligibilityResponse3(self, inst):
self.assertEqual(inst.created.date, FHIRDate("2014-09-16").date)
self.assertEqual(inst.created.as_json(), "2014-09-16")
self.assertEqual(inst.disposition, "Eligibiliy request could not be processed, please address errors before submitting.")
self.assertEqual(inst.error[0].code.coding[0].code, "a001")
self.assertEqual(inst.error[0].code.coding[0].system, "http://hl7.org/fhir/adjudication-error")
self.assertEqual(inst.form.coding[0].code, "ELRSP/2017/01")
self.assertEqual(inst.form.coding[0].system, "http://national.org/form")
self.assertEqual(inst.id, "E2503")
self.assertEqual(inst.identifier[0].system, "http://www.BenefitsInc.com/fhir/eligibilityresponse")
self.assertEqual(inst.identifier[0].value, "8812343")
self.assertEqual(inst.outcome.coding[0].code, "error")
self.assertEqual(inst.outcome.coding[0].system, "http://hl7.org/fhir/remittance-outcome")
self.assertEqual(inst.status, "active")
self.assertEqual(inst.text.div, "<div xmlns=\"http://www.w3.org/1999/xhtml\">A human-readable rendering of the EligibilityResponse.</div>")
self.assertEqual(inst.text.status, "generated")
def testEligibilityResponse4(self):
inst = self.instantiate_from("eligibilityresponse-example.json")
self.assertIsNotNone(inst, "Must have instantiated a EligibilityResponse instance")
self.implEligibilityResponse4(inst)
js = inst.as_json()
self.assertEqual("EligibilityResponse", js["resourceType"])
inst2 = eligibilityresponse.EligibilityResponse(js)
self.implEligibilityResponse4(inst2)
def implEligibilityResponse4(self, inst):
self.assertEqual(inst.created.date, FHIRDate("2014-08-16").date)
self.assertEqual(inst.created.as_json(), "2014-08-16")
self.assertEqual(inst.disposition, "Policy is currently in-force.")
self.assertEqual(inst.id, "E2500")
self.assertEqual(inst.identifier[0].system, "http://www.BenefitsInc.com/fhir/eligibilityresponse")
self.assertEqual(inst.identifier[0].value, "881234")
self.assertTrue(inst.inforce)
self.assertEqual(inst.outcome.coding[0].code, "complete")
self.assertEqual(inst.outcome.coding[0].system, "http://hl7.org/fhir/remittance-outcome")
self.assertEqual(inst.status, "active")
self.assertEqual(inst.text.div, "<div xmlns=\"http://www.w3.org/1999/xhtml\">A human-readable rendering of the EligibilityResponse.</div>")
self.assertEqual(inst.text.status, "generated")
| |
# pset7.py
# sys.path.append('..')
import midi
from common.audio import *
from common.clock import *
from common.core import *
from common.gfxutil import *
from common.mixer import *
from common.synth import *
from common.wavegen import *
from common.wavesrc import *
BEAT_LEN = 160 * 6 * 2
class MidiController(object):
def __init__(self, song_path, level_update, start_spot=0):
super(MidiController, self).__init__()
self.audio = Audio(2)
self.mixer = Mixer()
self.synth = Synth('music/The_Nes_Soundfont.sf2')
self.mid = midi.read_midifile(song_path)
messages = list(self.mid.iterevents())
self.mid_messages = [m for m in messages if m.channel == 0]
self.platform_messages = [m for m in messages if m.channel == 1]
# create TempoMap, AudioScheduler
self.tempo_map = SimpleTempoMap(200)
self.sched = AudioScheduler(self.tempo_map)
# connect scheduler into audio system
self.mixer.add(self.sched)
self.sched.set_generator(self.synth)
self.audio.set_generator(self.mixer)
self.mixer.set_gain(1)
# save the level
self.level_update = level_update
# current index in self.mid_messages
self.current_idx = 0
self.paused = False
# amount of time that has passed in-song before next scheduled note
self.cumulative_time = 0.
# whether we're going to reverse at the end of the current measure
self.reverse_pending = False
# whether we're currently moving backwards
self.reversed = False
# thing to call when reversing
self.reverse_callback = None
# this keeps track of the number of reverses we've done so that when we flip we don't keep playing notes from the old path.
self.num_reverses = 0
# this keeps track of all the notes currently playing so we can stop them if we get reset.
self.playing_notes = set([])
# print "SR= ",Audio.sample_rate
# self.beat = WaveFile("music/12911_sweet_trip_mm_hat_cl.wav")
self.current_offset = 0
# current notes on and their velocities. Helps with reversing
self.current_values = dict()
self.started = False
self.lose_tick = None
self.start_spot = start_spot*2*BEAT_LEN
def toggle(self):
"""pauses or plays the music."""
self.paused ^= True
if self.paused:
pass
# TODO: cancel all currently playing notes but save when they were
# TODO: cancel all scheduled notes but store when they were
else:
pass
# TODO: resume playing the paused notes
# TODO: reschedule all scheduled notes
def start(self, start_callback=None):
next_beat = quantize_tick_up(self.sched.get_tick(), BEAT_LEN)
self.current_offset = next_beat - self.start_spot
print self.start_spot, self.current_offset
self.schedule_cmd = self.sched.post_at_tick(next_beat, self._midi_schedule_next_note, self.num_reverses)
def callback(*args):
if start_callback is not None:
start_callback()
def reset(self, lost=False, tick=None):
if self.schedule_cmd:
self.sched.remove(self.schedule_cmd)
if self.schedule_action:
self.sched.remove(self.schedule_action)
for channel, note in self.playing_notes:
self.synth.noteoff(channel, note)
self.playing_notes.clear()
self.reversed = False
# so we don't keep playing scheduled notes
self.num_reverses += 10
if not lost:
self.started = False
self.lose_tick = -tick
self.current_idx = 0
if tick is None:
return
self.start_spot = -tick
while self.mid_messages[self.current_idx].tick <= tick:
self.current_idx += 1
else:
self.lose_tick = self.convert_tick(self.sched.get_tick())
self.current_idx = 0
# reverse music
def reverse(self, callback=None):
"""Reverses the music at the next barline. callback will be called when that happens."""
self.reverse_pending ^= True
if not self.reverse_pending:
# cancel the pending reverse
self.sched.remove(self.reverse_cmd)
# we just cancelled a pending reverse
return
next_beat = quantize_tick_up(self.sched.get_tick(), BEAT_LEN)
self.reverse_cmd = self.sched.post_at_tick(next_beat, self._reverse)
self.reverse_callback = callback
def _reverse(self, tick, arg):
self.reverse_pending = False
self.reversed ^= True
# cancel all calls to _midi_schedule_next_note
if self.schedule_cmd:
self.sched.remove(self.schedule_cmd)
if self.schedule_action:
self.sched.remove(self.schedule_action)
# start with the same index as we last executed.
if self.reversed:
self.current_idx -= 1
else:
self.current_idx += 1
# call the first action in the opposite direction
# print "REVERSE", tick, self.current_offset
self.num_reverses += 1
# old_offset = self.current_offset
# current_tick = tick
# current_offsettick =
# old_func = old_offset + time_on_note
# new func = new_offset - time_on_note
# old_func = new_func when current_tick = time_on_note
# new_offset = old_offset + 2 * current_tick
self.current_offset += 2 * (tick - self.current_offset)
self._midi_schedule_next_note(tick, self.num_reverses)
if self.reverse_callback is not None:
self.reverse_callback()
def convert_tick(self, song_tick):
if self.reversed:
return self.current_offset - song_tick
else:
return self.current_offset + song_tick
def convert_tick_for_level(self, song_tick):
if self.lose_tick is not None:
return self.lose_tick
elif not self.started:
return 0
elif self.reversed:
return self.current_offset - song_tick
else:
return -(self.current_offset - song_tick)
def _midi_schedule_next_note(self, tick, num_reverses):
# to prevent multiple streams of notes from going at once
if num_reverses != self.num_reverses:
return
self.started = True
self.lose_tick = None
if self.reversed:
to_schedule = self.mid_messages[self.current_idx]
self.current_idx -= 1
else:
to_schedule = self.mid_messages[self.current_idx]
self.current_idx += 1
if to_schedule is None:
print "end of song"
return
next_tick = self.convert_tick(to_schedule.tick)
# print "NOTE",next_tick, to_schedule, self.current_idx, self.current_offset
self.schedule_action = self.sched.post_at_tick(next_tick, self._midi_action, to_schedule)
# schedule another call to this function at the same time.
self.schedule_cmd = self.sched.post_at_tick(next_tick, self._midi_schedule_next_note, num_reverses)
def _midi_action(self, tick=0.0, message=None):
self.schedule_action = None
if (message.type == 'NoteOnEvent' and not self.reversed) or (self.reversed and message.type == 'NoteOffEvent'):
self.synth.noteon(message.channel, message.pitch, message.velocity)
if not self.reversed:
self.current_values[(message.channel, message.pitch)] = message.velocity
# self.playing_notes.add((message.channel, message.pitch))
print '%snote on: %d' % ((message.pitch - 20) * ' ', message.pitch)
elif message.type == 'ControlChangeEvent':
self.synth.cc(message.channel, message.control, message.value)
elif (message.type == 'NoteOffEvent' and not self.reversed) or (
self.reversed and message.type == 'NoteOnEvent'):
self.synth.noteoff(message.channel, message.pitch)
if not self.reversed:
message.velocity = self.current_values[(message.channel, message.pitch)]
try:
self.playing_notes.remove((message.channel, message.pitch))
except KeyError:
pass
print "%snote off: %d" % ((message.pitch - 20) * ' ', message.pitch)
else:
print message
# TODO: figure out what to do with program changes
# needed to update audio
def on_update(self):
self.audio.on_update()
# print self.sched.get_tick(), self.current_offset, self.convert_tick_for_level(self.sched.get_tick())
self.level_update(loc=-self.convert_tick_for_level(self.sched.get_tick()))
if __name__ == '__main__':
run(MainWidget)
| |
import pytest
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from nbgrader.tests.formgrader.base import BaseTestFormgrade
@pytest.mark.js
@pytest.mark.usefixtures("formgrader")
class TestFormgraderJS(BaseTestFormgrade):
def _send_keys_to_body(self, *keys):
body = self.browser.find_element_by_tag_name("body")
body.send_keys(*keys)
def _click_element(self, name):
self.browser.find_element_by_css_selector(name).click()
def _get_next_arrow(self):
return self.browser.find_element_by_css_selector(".next a")
def _get_comment_box(self, index):
return self.browser.find_elements_by_css_selector(".comment")[index]
def _get_score_box(self, index):
return self.browser.find_elements_by_css_selector(".score")[index]
def _save_comment(self, index):
self._send_keys_to_body(Keys.ESCAPE)
glyph = self.browser.find_elements_by_css_selector(".comment-saved")[index]
WebDriverWait(self.browser, 30).until(lambda browser: glyph.is_displayed())
WebDriverWait(self.browser, 30).until(lambda browser: not glyph.is_displayed())
def _save_score(self, index):
self._send_keys_to_body(Keys.ESCAPE)
glyph = self.browser.find_elements_by_css_selector(".score-saved")[index]
WebDriverWait(self.browser, 30).until(lambda browser: glyph.is_displayed())
WebDriverWait(self.browser, 30).until(lambda browser: not glyph.is_displayed())
def _get_needs_manual_grade(self, name):
return self.browser.execute_script(
'return formgrader.grades.findWhere({name: "%s"}).get("needs_manual_grade");' % name)
def _flag(self):
self._send_keys_to_body(Keys.SHIFT, Keys.CONTROL, "f")
message = self.browser.find_element_by_id("statusmessage")
WebDriverWait(self.browser, 10).until(lambda browser: message.is_displayed())
WebDriverWait(self.browser, 10).until(lambda browser: not message.is_displayed())
return self.browser.execute_script("return $('#statusmessage').text();")
def _get_active_element(self):
return self.browser.execute_script("return document.activeElement;")
def _get_index(self):
return self.browser.execute_script("return formgrader.getIndex(document.activeElement);")
def _load_formgrade(self):
problem = self.gradebook.find_notebook("Problem 1", "Problem Set 1")
submissions = problem.submissions
submissions.sort(key=lambda x: x.id)
self._load_gradebook_page("assignments/Problem Set 1/Problem 1")
self._click_link("Submission #1")
self._wait_for_formgrader("submissions/{}/?index=0".format(submissions[0].id))
def test_start(self):
# This is just a fake test, since starting up the browser and formgrader
# can take a little while. So if anything goes wrong there, this test
# will fail, rather than having it fail on some other test.
pass
def test_next_prev_assignments(self):
problem = self.gradebook.find_notebook("Problem 1", "Problem Set 1")
submissions = problem.submissions
submissions.sort(key=lambda x: x.id)
# test navigating both with the arrow keys and with clicking the
# next/previous links
next_functions = [
(self._send_keys_to_body, Keys.CONTROL, "."),
(self._click_element, ".next a")
]
prev_functions = [
(self._send_keys_to_body, Keys.CONTROL, ","),
(self._click_element, ".previous a")
]
for n, p in zip(next_functions, prev_functions):
# first element is the function, the other elements are the arguments
# to that function
next_function = lambda: n[0](*n[1:])
prev_function = lambda: p[0](*p[1:])
# Load the first submission
self.browser.get(self.formgrade_url("submissions/{}".format(submissions[0].id)))
self._wait_for_formgrader("submissions/{}/?index=0".format(submissions[0].id))
# Move to the next submission
next_function()
self._wait_for_formgrader("submissions/{}/?index=0".format(submissions[1].id))
# Move to the next submission (should return to notebook list)
next_function()
self._wait_for_gradebook_page("assignments/Problem Set 1/Problem 1")
# Go back
self.browser.back()
self._wait_for_formgrader("submissions/{}/?index=0".format(submissions[1].id))
# Move to the previous submission
prev_function()
self._wait_for_formgrader("submissions/{}/?index=0".format(submissions[0].id))
# Move to the previous submission (should return to the notebook list)
prev_function()
self._wait_for_gradebook_page("assignments/Problem Set 1/Problem 1")
def test_next_prev_failed_assignments(self):
problem = self.gradebook.find_notebook("Problem 1", "Problem Set 1")
submissions = problem.submissions
submissions.sort(key=lambda x: x.id)
# verify that we have the right number of submissions, and that one
# failed tests and the other didn't
assert len(submissions) == 2
if submissions[0].failed_tests:
assert not submissions[1].failed_tests
else:
assert submissions[1].failed_tests
# Load the first submission
self.browser.get(self.formgrade_url("submissions/{}".format(submissions[0].id)))
self._wait_for_formgrader("submissions/{}/?index=0".format(submissions[0].id))
if submissions[0].failed_tests:
# Go to the next failed submission (should return to the notebook list)
self._send_keys_to_body(Keys.CONTROL, Keys.SHIFT, ".")
self._wait_for_gradebook_page("assignments/Problem Set 1/Problem 1")
# Go back
self.browser.back()
self._wait_for_formgrader("submissions/{}/?index=0".format(submissions[0].id))
# Go to the previous failed submission (should return to the notebook list)
self._send_keys_to_body(Keys.CONTROL, Keys.SHIFT, ",")
self._wait_for_gradebook_page("assignments/Problem Set 1/Problem 1")
# Go back
self.browser.back()
self._wait_for_formgrader("submissions/{}/?index=0".format(submissions[0].id))
# Go to the other notebook
self._send_keys_to_body(Keys.CONTROL, ".")
self._wait_for_formgrader("submissions/{}/?index=0".format(submissions[1].id))
# Go to the next failed submission (should return to the notebook list)
self._send_keys_to_body(Keys.CONTROL, Keys.SHIFT, ".")
self._wait_for_gradebook_page("assignments/Problem Set 1/Problem 1")
# Go back
self.browser.back()
self._wait_for_formgrader("submissions/{}/?index=0".format(submissions[1].id))
# Go to the previous failed submission
self._send_keys_to_body(Keys.CONTROL, Keys.SHIFT, ",")
self._wait_for_formgrader("submissions/{}/?index=0".format(submissions[0].id))
else:
# Go to the next failed submission
self._send_keys_to_body(Keys.CONTROL, Keys.SHIFT, ".")
self._wait_for_formgrader("submissions/{}/?index=0".format(submissions[1].id))
# Go back
self.browser.back()
self._wait_for_formgrader("submissions/{}/?index=0".format(submissions[0].id))
# Go to the previous failed submission (should return to the notebook list)
self._send_keys_to_body(Keys.CONTROL, Keys.SHIFT, ",")
self._wait_for_gradebook_page("assignments/Problem Set 1/Problem 1")
# Go back
self.browser.back()
self._wait_for_formgrader("submissions/{}/?index=0".format(submissions[0].id))
# Go to the other notebook
self._send_keys_to_body(Keys.CONTROL, ".")
self._wait_for_formgrader("submissions/{}/?index=0".format(submissions[1].id))
# Go to the next failed submission (should return to the notebook list)
self._send_keys_to_body(Keys.CONTROL, Keys.SHIFT, ".")
self._wait_for_gradebook_page("assignments/Problem Set 1/Problem 1")
# Go back
self.browser.back()
self._wait_for_formgrader("submissions/{}/?index=0".format(submissions[1].id))
# Go to the previous failed submission (should return to the notebook list)
self._send_keys_to_body(Keys.CONTROL, Keys.SHIFT, ",")
self._wait_for_gradebook_page("assignments/Problem Set 1/Problem 1")
def test_tabbing(self):
self._load_formgrade()
# check that the next arrow is selected
assert self._get_active_element() == self._get_next_arrow()
assert self._get_index() == 0
# check that the first comment box is selected
self._send_keys_to_body(Keys.TAB)
assert self._get_active_element() == self._get_comment_box(0)
assert self._get_index() == 1
# tab to the next and check that the first points is selected
self._send_keys_to_body(Keys.TAB)
assert self._get_active_element() == self._get_score_box(0)
assert self._get_index() == 2
# tab to the next and check that the second points is selected
self._send_keys_to_body(Keys.TAB)
assert self._get_active_element() == self._get_score_box(1)
assert self._get_index() == 3
# tab to the next and check that the second comment is selected
self._send_keys_to_body(Keys.TAB)
assert self._get_active_element() == self._get_comment_box(1)
assert self._get_index() == 4
# tab to the next and check that the third points is selected
self._send_keys_to_body(Keys.TAB)
assert self._get_active_element() == self._get_score_box(2)
assert self._get_index() == 5
# tab to the next and check that the fourth points is selected
self._send_keys_to_body(Keys.TAB)
assert self._get_active_element() == self._get_score_box(3)
assert self._get_index() == 6
# tab to the next and check that the fifth points is selected
self._send_keys_to_body(Keys.TAB)
assert self._get_active_element() == self._get_score_box(4)
assert self._get_index() == 7
# tab to the next and check that the third comment is selected
self._send_keys_to_body(Keys.TAB)
assert self._get_active_element() == self._get_comment_box(2)
assert self._get_index() == 8
# tab to the next and check that the sixth points is selected
self._send_keys_to_body(Keys.TAB)
assert self._get_active_element() == self._get_score_box(5)
assert self._get_index() == 9
# tab to the next and check that the fourth comment is selected
self._send_keys_to_body(Keys.TAB)
assert self._get_active_element() == self._get_comment_box(3)
assert self._get_index() == 10
# tab to the next and check that the next arrow is selected
self._send_keys_to_body(Keys.TAB)
assert self._get_active_element() == self._get_next_arrow()
assert self._get_index() == 0
@pytest.mark.parametrize("index", range(4))
def test_save_comment(self, index):
self._load_formgrade()
elem = self._get_comment_box(index)
if elem.get_attribute("value") != "":
elem.click()
elem.clear()
self._save_comment(index)
self._load_formgrade()
elem = self._get_comment_box(index)
assert elem.get_attribute("value") == ""
elem.click()
elem.send_keys("this comment has index {}".format(index))
elem.send_keys(Keys.ENTER)
elem.send_keys("blah blah blah")
self._save_comment(index)
self._load_formgrade()
elem = self._get_comment_box(index)
assert elem.get_attribute("value") == "this comment has index {}\nblah blah blah".format(index)
@pytest.mark.parametrize("index", range(6))
def test_save_score(self, index):
self._load_formgrade()
elem = self._get_score_box(index)
if elem.get_attribute("value") != "":
elem.click()
elem.clear()
self._save_score(index)
self._load_formgrade()
elem = self._get_score_box(index)
assert elem.get_attribute("value") == ""
# check whether it needs manual grading
if elem.get_attribute("placeholder") != "":
assert not self._get_needs_manual_grade(elem.get_attribute("id"))
assert "needs_manual_grade" not in elem.get_attribute("class").split(" ")
else:
assert self._get_needs_manual_grade(elem.get_attribute("id"))
assert "needs_manual_grade" in elem.get_attribute("class").split(" ")
# set the grade
elem.click()
elem.send_keys("{}".format((index + 1) / 10.0))
self._save_score(index)
self._load_formgrade()
elem = self._get_score_box(index)
assert elem.get_attribute("value") == "{}".format((index + 1) / 10.0)
# check whether it needs manual grading
assert not self._get_needs_manual_grade(elem.get_attribute("id"))
assert "needs_manual_grade" not in elem.get_attribute("class").split(" ")
# clear the grade
elem.click()
elem.clear()
self._save_score(index)
self._load_formgrade()
elem = self._get_score_box(index)
assert elem.get_attribute("value") == ""
# check whether it needs manual grading
if elem.get_attribute("placeholder") != "":
assert not self._get_needs_manual_grade(elem.get_attribute("id"))
assert "needs_manual_grade" not in elem.get_attribute("class").split(" ")
else:
assert self._get_needs_manual_grade(elem.get_attribute("id"))
assert "needs_manual_grade" in elem.get_attribute("class").split(" ")
def test_same_part_navigation(self):
problem = self.gradebook.find_notebook("Problem 1", "Problem Set 1")
submissions = problem.submissions
submissions.sort(key=lambda x: x.id)
# Load the first submission
self.browser.get(self.formgrade_url("submissions/{}".format(submissions[0].id)))
self._wait_for_formgrader("submissions/{}/?index=0".format(submissions[0].id))
# Click the second comment box and navigate to the next submission
self._get_comment_box(1).click()
self._send_keys_to_body(Keys.CONTROL, ".")
self._wait_for_formgrader("submissions/{}/?index=4".format(submissions[1].id))
assert self._get_active_element() == self._get_comment_box(1)
# Click the third score box and navigate to the previous submission
self._get_score_box(2).click()
self._send_keys_to_body(Keys.CONTROL, ",")
self._wait_for_formgrader("submissions/{}/?index=5".format(submissions[0].id))
assert self._get_active_element() == self._get_score_box(2)
# Click the third comment box and navigate to the next submission
self._get_comment_box(2).click()
self._send_keys_to_body(Keys.CONTROL, ".")
self._wait_for_formgrader("submissions/{}/?index=7".format(submissions[1].id))
assert self._get_active_element() == self._get_score_box(4)
# Navigate to the previous submission
self._send_keys_to_body(Keys.CONTROL, ",")
self._wait_for_formgrader("submissions/{}/?index=7".format(submissions[0].id))
assert self._get_active_element() == self._get_score_box(4)
def test_keyboard_help(self):
self._load_formgrade()
# show the help dialog
self._click_element(".help")
self._wait_for_element("help-dialog")
WebDriverWait(self.browser, 30).until(EC.visibility_of_element_located((By.CSS_SELECTOR, "#help-dialog button.btn-primary")))
# close it
self._click_element("#help-dialog button.btn-primary")
modal_not_present = lambda browser: browser.execute_script("""return $("#help-dialog").length === 0;""")
WebDriverWait(self.browser, 30).until(modal_not_present)
def test_flag(self):
self._load_formgrade()
# mark as flagged
assert self._flag() == "Submission flagged"
# mark as unflagged
assert self._flag() == "Submission unflagged"
# mark as flagged
assert self._flag() == "Submission flagged"
# mark as unflagged
assert self._flag() == "Submission unflagged"
| |
#!/usr/bin/env python
# Author: Lisandro Dalcin
# Contact: dalcinl@gmail.com
"""
Python bindings for MPI
"""
import sys
import os
import re
try:
import setuptools
except ImportError:
setuptools = None
pyver = sys.version_info[:2]
if pyver < (2, 6) or (3, 0) <= pyver < (3, 2):
raise RuntimeError("Python version 2.6, 2.7 or >= 3.2 required")
if (hasattr(sys, 'pypy_version_info') and
sys.pypy_version_info[:2] < (2, 0)):
raise RuntimeError("PyPy version >= 2.0 required")
topdir = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, os.path.join(topdir, 'conf'))
# --------------------------------------------------------------------
# Metadata
# --------------------------------------------------------------------
def name():
return 'mpi4py'
def version():
with open(os.path.join(topdir, 'src', '__init__.py')) as f:
m = re.search(r"__version__\s*=\s*'(.*)'", f.read())
return m.groups()[0]
def description():
with open(os.path.join(topdir, 'DESCRIPTION.rst')) as f:
return f.read()
name = name()
version = version()
url = 'https://bitbucket.org/mpi4py/%(name)s/' % vars()
download = url + 'downloads/%(name)s-%(version)s.tar.gz' % vars()
classifiers = """
Development Status :: 5 - Production/Stable
Intended Audience :: Developers
Intended Audience :: Science/Research
License :: OSI Approved :: BSD License
Operating System :: MacOS :: MacOS X
Operating System :: Microsoft :: Windows
Operating System :: POSIX
Operating System :: POSIX :: Linux
Operating System :: POSIX :: SunOS/Solaris
Operating System :: Unix
Programming Language :: C
Programming Language :: Cython
Programming Language :: Python
Programming Language :: Python :: 2
Programming Language :: Python :: 2.6
Programming Language :: Python :: 2.7
Programming Language :: Python :: 3
Programming Language :: Python :: 3.2
Programming Language :: Python :: 3.3
Programming Language :: Python :: 3.4
Programming Language :: Python :: Implementation :: CPython
Programming Language :: Python :: Implementation :: PyPy
Topic :: Scientific/Engineering
Topic :: Software Development :: Libraries :: Python Modules
Topic :: System :: Distributed Computing
"""
keywords = """
scientific computing
parallel computing
message passing interface
MPI
"""
platforms = """
Mac OS X
Linux
Solaris
Unix
Windows
"""
metadata = {
'name' : name,
'version' : version,
'description' : __doc__.strip(),
'long_description' : description(),
'url' : url,
'download_url' : download,
'classifiers' : [c for c in classifiers.split('\n') if c],
'keywords' : [k for k in keywords.split('\n') if k],
'platforms' : [p for p in platforms.split('\n') if p],
'license' : 'BSD',
'author' : 'Lisandro Dalcin',
'author_email' : 'dalcinl@gmail.com',
'maintainer' : 'Lisandro Dalcin',
'maintainer_email' : 'dalcinl@gmail.com',
}
metadata['provides'] = ['mpi4py', 'mpi4py.MPI']
# --------------------------------------------------------------------
# Extension modules
# --------------------------------------------------------------------
def run_command(exe, args):
from distutils.spawn import find_executable
from distutils.util import split_quoted
cmd = find_executable(exe)
if not cmd: return []
if not isinstance(args, str):
args = ' '.join(args)
try:
with os.popen(cmd + ' ' + args) as f:
return split_quoted(f.read())
except:
return []
linux = sys.platform.startswith('linux')
solaris = sys.platform.startswith('sunos')
darwin = sys.platform.startswith('darwin')
if linux:
def whole_archive(compiler, name, library_dirs=[]):
return ['-Wl,-whole-archive',
'-l' + name,
'-Wl,-no-whole-archive',
]
elif darwin:
def darwin_linker_dirs(compiler):
from distutils.util import split_quoted
linker_cmd = compiler.linker_so + ['-show']
linker_cmd = run_command(linker_cmd[0], linker_cmd[1:])
library_dirs = compiler.library_dirs[:]
library_dirs += [flag[2:] for flag in linker_cmd
if flag.startswith('-L')]
library_dirs += ['/usr/lib']
library_dirs += ['/usr/local/lib']
return library_dirs
def whole_archive(compiler, name, library_dirs=[]):
library_dirs = library_dirs[:]
library_dirs += darwin_linker_dirs(compiler)
for libdir in library_dirs:
libpath = os.path.join(libdir, 'lib%s.a' % name)
if os.path.isfile(libpath):
return ['-force_load', libpath]
return ['-l%s' % name]
elif solaris:
def whole_archive(compiler, name, library_dirs=[]):
return ['-Wl,-zallextract',
'-l' + name,
'-Wl,-zdefaultextract',
]
else:
whole_archive = None
def configure_dl(ext, config_cmd):
from distutils import log
log.info("checking for dlopen() availability ...")
ok = config_cmd.check_header('dlfcn.h')
if ok : ext.define_macros += [('HAVE_DLFCN_H', 1)]
ok = config_cmd.check_library('dl')
if ok: ext.libraries += ['dl']
ok = config_cmd.check_function('dlopen',
libraries=['dl'],
decl=1, call=1)
if ok: ext.define_macros += [('HAVE_DLOPEN', 1)]
def configure_mpi(ext, config_cmd):
from textwrap import dedent
from distutils import log
from distutils.errors import DistutilsPlatformError
headers = ['stdlib.h', 'mpi.h']
#
log.info("checking for MPI compile and link ...")
ConfigTest = dedent("""\
int main(int argc, char **argv)
{
(void)MPI_Init(&argc, &argv);
(void)MPI_Finalize();
return 0;
}
""")
errmsg = "Cannot %s MPI programs. Check your configuration!!!"
ok = config_cmd.try_compile(ConfigTest, headers=headers)
if not ok: raise DistutilsPlatformError(errmsg % "compile")
ok = config_cmd.try_link(ConfigTest, headers=headers)
if not ok: raise DistutilsPlatformError(errmsg % "link")
#
log.info("checking for missing MPI functions/symbols ...")
tests = ["defined(%s)" % macro for macro in
("OPEN_MPI", "MPICH2", "DEINO_MPI", "MSMPI_VER",)]
tests += ["(defined(MPICH_NAME)&&(MPICH_NAME==3))"]
ConfigTest = dedent("""\
#if !(%s)
#error "Unknown MPI implementation"
#endif
""") % "||".join(tests)
ok = config_cmd.try_compile(ConfigTest, headers=headers)
if not ok:
from mpidistutils import ConfigureMPI
configure = ConfigureMPI(config_cmd)
results = configure.run()
configure.dump(results)
ext.define_macros += [('HAVE_CONFIG_H', 1)]
else:
for function, arglist in (
('MPI_Type_create_f90_integer', '0,(MPI_Datatype*)0'),
('MPI_Type_create_f90_real', '0,0,(MPI_Datatype*)0'),
('MPI_Type_create_f90_complex', '0,0,(MPI_Datatype*)0'),
('MPI_Status_c2f', '(MPI_Status*)0,(MPI_Fint*)0'),
('MPI_Status_f2c', '(MPI_Fint*)0,(MPI_Status*)0'),
):
ok = config_cmd.check_function_call(
function, arglist, headers=headers)
if not ok:
macro = 'PyMPI_MISSING_' + function
ext.define_macros += [(macro, 1)]
#
if os.name == 'posix':
configure_dl(ext, config_cmd)
def configure_libmpe(lib, config_cmd):
#
mpecc = os.environ.get('MPECC') or 'mpecc'
command = run_command(mpecc, '-mpilog -show')
for arg in command:
if arg.startswith('-L'):
libdir = arg[2:]
lib.library_dirs.append(libdir)
lib.runtime_library_dirs.append(libdir)
#
log_lib = 'lmpe'
dep_libs = ('pthread', 'mpe')
ok = config_cmd.check_library(log_lib, lib.library_dirs)
if not ok: return
libraries = []
for libname in dep_libs:
if config_cmd.check_library(
libname, lib.library_dirs,
other_libraries=libraries):
libraries.insert(0, libname)
if whole_archive:
cc = config_cmd.compiler
dirs = lib.library_dirs[:]
lib.extra_link_args += whole_archive(cc, log_lib, dirs)
lib.extra_link_args += ['-l' + libname
for libname in libraries]
else:
lib.libraries += [log_lib] + libraries
def configure_libvt(lib, config_cmd):
#
vtcc = os.environ.get('VTCC') or 'vtcc'
command = run_command(vtcc, '-vt:showme')
for arg in command:
if arg.startswith('-L'):
libdir = arg[2:]
lib.library_dirs.append(libdir)
lib.runtime_library_dirs.append(libdir)
# modern VampirTrace
if lib.name == 'vt':
log_lib = 'vt-mpi'
else:
log_lib = lib.name
ok = config_cmd.check_library(log_lib, lib.library_dirs)
if ok: lib.libraries = [log_lib]
if ok: return
# older VampirTrace, Open MPI <= 1.4
if lib.name == 'vt-hyb':
log_lib = 'vt.ompi'
else:
log_lib = 'vt.mpi'
dep_libs = ('dl', 'z', 'otf',)
ok = config_cmd.check_library(log_lib, lib.library_dirs)
if not ok: return
libraries = []
for libname in dep_libs:
if config_cmd.check_library(
libname, lib.library_dirs,
other_libraries=libraries):
libraries.insert(0, libname)
if whole_archive:
cc = config_cmd.compiler
dirs = lib.library_dirs[:]
lib.extra_link_args += whole_archive(cc, log_lib, dirs)
lib.extra_link_args += ['-l' + libname
for libname in libraries]
else:
lib.libraries += [log_lib] + libraries
lib.define_macros.append(('LIBVT_LEGACY', 1))
if lib.name == 'vt-hyb':
openmp_flag = '-fopenmp' # GCC, Intel
lib.extra_compile_args.append(openmp_flag)
lib.extra_link_args.append(openmp_flag)
def configure_pyexe(exe, config_cmd):
from distutils import sysconfig
if sys.platform.startswith('win'):
return
if (sys.platform == 'darwin' and
('Anaconda' in sys.version or
'Continuum Analytics' in sys.version)):
py_version = sysconfig.get_python_version()
py_abiflags = getattr(sys, 'abiflags', '')
exe.libraries += ['python' + py_version + py_abiflags]
return
#
from distutils.util import split_quoted
cfg_vars = sysconfig.get_config_vars()
libraries = []
library_dirs = []
link_args = []
if not sysconfig.get_config_var('Py_ENABLE_SHARED'):
py_version = sysconfig.get_python_version()
py_abiflags = getattr(sys, 'abiflags', '')
libraries = ['python' + py_version + py_abiflags]
if sys.platform == 'darwin':
fwkdir = cfg_vars.get('PYTHONFRAMEWORKDIR')
if (fwkdir and fwkdir != 'no-framework' and
fwkdir in cfg_vars.get('LINKFORSHARED', '')):
del libraries[:]
for var in ('LIBDIR', 'LIBPL'):
library_dirs += split_quoted(cfg_vars.get(var, ''))
for var in ('LDFLAGS',
'LIBS', 'MODLIBS', 'SYSLIBS',
'LDLAST'):
link_args += split_quoted(cfg_vars.get(var, ''))
exe.libraries += libraries
exe.library_dirs += library_dirs
exe.extra_link_args += link_args
def ext_modules():
modules = []
# MPI extension module
from glob import glob
MPI = dict(
name='mpi4py.MPI',
sources=['src/MPI.c'],
depends=(['src/mpi4py.MPI.c'] +
glob('src/*.h') +
glob('src/lib-mpi/*.h') +
glob('src/lib-mpi/config/*.h') +
glob('src/lib-mpi/compat/*.h')
),
configure=configure_mpi,
)
modules.append(MPI)
# custom dl extension module
dl = dict(
name='mpi4py.dl',
optional=True,
sources=['src/dynload.c'],
depends=['src/dynload.h'],
configure=configure_dl,
)
if os.name == 'posix':
modules.append(dl)
#
return modules
def libraries():
# MPE logging
pmpi_mpe = dict(
name='mpe', kind='dylib',
optional=True,
package='mpi4py',
dest_dir='lib-pmpi',
sources=['src/lib-pmpi/mpe.c'],
configure=configure_libmpe,
)
# VampirTrace logging
pmpi_vt = dict(
name='vt', kind='dylib',
optional=True,
package='mpi4py',
dest_dir='lib-pmpi',
sources=['src/lib-pmpi/vt.c'],
configure=configure_libvt,
)
pmpi_vt_mpi = dict(
name='vt-mpi', kind='dylib',
optional=True,
package='mpi4py',
dest_dir='lib-pmpi',
sources=['src/lib-pmpi/vt-mpi.c'],
configure=configure_libvt,
)
pmpi_vt_hyb = dict(
name='vt-hyb', kind='dylib',
optional=True,
package='mpi4py',
dest_dir='lib-pmpi',
sources=['src/lib-pmpi/vt-hyb.c'],
configure=configure_libvt,
)
#
return [
pmpi_mpe,
pmpi_vt,
pmpi_vt_mpi,
pmpi_vt_hyb,
]
def executables():
# MPI-enabled Python interpreter
pyexe = dict(name='python-mpi',
optional=True,
package='mpi4py',
dest_dir='bin',
sources=['src/python.c'],
configure=configure_pyexe,
)
#
if hasattr(sys, 'pypy_version_info'):
return []
return [pyexe]
# --------------------------------------------------------------------
# Setup
# --------------------------------------------------------------------
from mpidistutils import setup
from mpidistutils import Extension as Ext
from mpidistutils import Library as Lib
from mpidistutils import Executable as Exe
CYTHON = '0.22'
def run_setup():
"""
Call setup(*args, **kargs)
"""
setup_args = metadata.copy()
if setuptools:
setup_args['zip_safe'] = False
if setuptools:
src = os.path.join('src', 'mpi4py.MPI.c')
has_src = os.path.exists(os.path.join(topdir, src))
has_git = os.path.isdir(os.path.join(topdir, '.git'))
has_hg = os.path.isdir(os.path.join(topdir, '.hg'))
if not has_src or has_git or has_hg:
setup_args['setup_requires'] = ['Cython>='+CYTHON]
#
setup(packages = ['mpi4py'],
package_dir = {'mpi4py' : 'src'},
package_data = {'mpi4py' : ['include/mpi4py/*.h',
'include/mpi4py/*.pxd',
'include/mpi4py/*.pyx',
'include/mpi4py/*.pxi',
'include/mpi4py/*.i',
'MPI.pxd',
'libmpi.pxd',]},
ext_modules = [Ext(**ext) for ext in ext_modules()],
libraries = [Lib(**lib) for lib in libraries() ],
executables = [Exe(**exe) for exe in executables()],
**setup_args)
def chk_cython(VERSION):
from distutils import log
from distutils.version import LooseVersion
from distutils.version import StrictVersion
warn = lambda msg='': sys.stderr.write(msg+'\n')
#
try:
import Cython
except ImportError:
warn("*"*80)
warn()
warn(" You need to generate C source files with Cython!!")
warn(" Download and install Cython <http://www.cython.org>")
warn()
warn("*"*80)
return False
#
try:
CYTHON_VERSION = Cython.__version__
except AttributeError:
from Cython.Compiler.Version import version as CYTHON_VERSION
REQUIRED = VERSION
m = re.match(r"(\d+\.\d+(?:\.\d+)?).*", CYTHON_VERSION)
if m:
Version = StrictVersion
AVAILABLE = m.groups()[0]
else:
Version = LooseVersion
AVAILABLE = CYTHON_VERSION
if (REQUIRED is not None and
Version(AVAILABLE) < Version(REQUIRED)):
warn("*"*80)
warn()
warn(" You need to install Cython %s (you have version %s)"
% (REQUIRED, CYTHON_VERSION))
warn(" Download and install Cython <http://www.cython.org>")
warn()
warn("*"*80)
return False
#
return True
def run_cython(source, depends=(), includes=(),
destdir_c=None, destdir_h=None,
wdir=None, force=False, VERSION=None):
from glob import glob
from distutils import log
from distutils import dep_util
from distutils.errors import DistutilsError
target = os.path.splitext(source)[0]+'.c'
cwd = os.getcwd()
try:
if wdir: os.chdir(wdir)
alldeps = [source]
for dep in depends:
alldeps += glob(dep)
if not (force or dep_util.newer_group(alldeps, target)):
log.debug("skipping '%s' -> '%s' (up-to-date)",
source, target)
return
finally:
os.chdir(cwd)
if not chk_cython(VERSION):
raise DistutilsError("requires Cython>=%s" % VERSION)
log.info("cythonizing '%s' -> '%s'", source, target)
from cythonize import cythonize
err = cythonize(source,
includes=includes,
destdir_c=destdir_c,
destdir_h=destdir_h,
wdir=wdir)
if err:
raise DistutilsError(
"Cython failure: '%s' -> '%s'" % (source, target))
def build_sources(cmd):
from distutils.errors import DistutilsError
has_src = os.path.exists(os.path.join(
topdir, 'src', 'mpi4py.MPI.c'))
has_vcs = (os.path.isdir(os.path.join(topdir, '.git')) or
os.path.isdir(os.path.join(topdir, '.hg' )))
if (has_src and not has_vcs and not cmd.force): return
# mpi4py.MPI
source = 'mpi4py.MPI.pyx'
depends = ['include/*/*.pxi',
'include/*/*.pxd',
'MPI/*.pyx',
'MPI/*.pxi',]
includes = ['include']
destdir_h = os.path.join('include', 'mpi4py')
run_cython(source, depends, includes,
destdir_c=None, destdir_h=destdir_h,
wdir='src', force=cmd.force, VERSION=CYTHON)
from mpidistutils import build_src
build_src.run = build_sources
def run_testsuite(cmd):
from distutils.errors import DistutilsError
sys.path.insert(0, 'test')
try:
from runtests import main
finally:
del sys.path[0]
if cmd.dry_run:
return
args = cmd.args[:] or []
if cmd.verbose < 1:
args.insert(0,'-q')
if cmd.verbose > 1:
args.insert(0,'-v')
err = main(args)
if err:
raise DistutilsError("test")
from mpidistutils import test
test.run = run_testsuite
def main():
run_setup()
if __name__ == '__main__':
main()
# --------------------------------------------------------------------
| |
from pkg_resources import require
require("cothread==2.14")
from cothread.catools import *
import cothread
from Generic_BPMDevice import *
from subprocess import Popen, PIPE
import numpy as np
class SparkERXR_EPICS_BPMDevice(Generic_BPMDevice):
"""Libera BPM Device class that uses Epics to communicate with PVs.
All of the methods here will attempt to be generic enough to work for most Libera devices.
Some libera BPM devices will have extra functionality. To implement this make a child class
that will extend this one.
Attributes:
epicsID (str): Channel identifier string that will be used to access PVs.
"""
def _trigger_epics(self):
"""Private method to update the EPICS variables
This will write a value to the .PROC record that will update all of the
process variables on that database.
Args:
Returns:
"""
caput(self.epicsID + ".PROC", 1) # Write to the .PROC data base to update all of the values
def _read_epics_pv(self, pv):
"""Private method to read an Epics process variable.
Args:
pv (str): Name of the Epics process variable to read.
Returns:
variant: Value of requested process variable.
"""
self._trigger_epics() # Update all values before reading
return caget(self.epicsID + pv) # Read selected epics PV
def _write_epics_pv(self, pv, value):
"""Private method to read an Epics process variable.
Args:
pv (str): Name of the Epics process variable to read.
value (variant): The value to be written to the epics variable
Returns:
variant: Value of requested process variable after writing to it
"""
caput(self.epicsID+pv, value) # Write to EPICs PV
return self._read_epics_pv(pv)
def __init__(self, database, daq_type):
"""Initializes the Libera BPM device object and assigns it an ID.
Args:
dev_ID (str/int): The two digit ID number assigned to that specific BPM device.
Returns:
.
"""
if type(database) and type(daq_type) != str:
raise TypeError
self.epicsID = database+":signals:"+daq_type # Different signal types can be used
self._write_epics_pv(".SCAN", 0) # Required so that values can be read from he database
self._trigger_epics() # Triggers the first count
pv = ".X" # Pick a PV that is hosted on the device
node = connect(self.epicsID + pv, cainfo=True).host.split(":")[0] # Get the IP address of the host
host_info = Popen(["arp", "-n", node], stdout=PIPE).communicate()[0] # Get info about the host using arp
host_info = host_info.split("\n")[1] # Split the info sent back
index = host_info.find(":") # Find the first ":", used in the MAC address
host_info = host_info[index - 2:index + 15] # Get the MAC address
self.macaddress = host_info
print "Opened link with" + self.get_device_ID() # Tells the user they have connected to the device
def __del__(self):
print "Closed link with" + self.get_device_ID() # Tells the user they have connected to the device
def get_X_position(self):
"""Override method, gets the calculated X position of the beam.
Args:
Returns:
float: X position in mm
"""
self._trigger_epics() # Triggers the acquisition
x = self._read_epics_pv(".X") # Gets the PV value
x = np.mean(x) # Gets the mean PV value
x = x/1000000.0 # Converts from nm to mm
return x
def get_Y_position(self):
"""Override method, gets the calculated X position of the beam.
Args:
Returns:
float: Y position in mm
"""
self._trigger_epics() # Triggers the acquisition
y = self._read_epics_pv(".Y") # Gets the PV value
y = np.mean(y) # Gets the mean PV value
y = y/1000000.0 # Converts from nm to mm
return y
def get_beam_current(self):
"""Override method, gets the beam current read by the BPMs.
Args:
Returns:
float: Current in mA
"""
# This function is not finished it needs to convert from ADC counts to mA
self._trigger_epics() # Triggers the acquisition
daq_sum = self._read_epics_pv(".Sum") # Gets the PV value
daq_sum = np.mean(daq_sum) # Gets the mean PV value
return daq_sum
def get_input_power(self):
"""Override method, gets the input power of the signals input to the device
Args:
Returns:
float: Input power in dBm
"""
# This function is not finished it needs to convert from ADC counts to dBm
self._trigger_epics() # Triggers the acquisition
daq_sum = self._read_epics_pv(".Sum") # Gets the PV value
daq_sum = np.mean(daq_sum) # Gets the mean PV value
return daq_sum
def get_ADC_sum(self):
"""Override method, gets the input power of the signals input to the device
Args:
Returns:
int: Input power in dBm
"""
self._trigger_epics() # Triggers the acquisition
daq_sum = self._read_epics_pv(".Sum")
daq_sum = np.mean(daq_sum) # Gets the PV value
daq_sum = np.round(daq_sum) # Rounds the mean to the nearest integer
return daq_sum
def get_raw_BPM_buttons(self):
"""Override method, gets the raw signal from each BPM.
Args:
Returns:
int: Raw signal from BPM A
int: Raw signal from BPM B
int: Raw signal from BPM C
int: Raw signal from BPM D
"""
self._trigger_epics() # triggers the acquisition
a = self._read_epics_pv(".A") # gets the PV value
b = self._read_epics_pv(".B")
c = self._read_epics_pv(".C")
d = self._read_epics_pv(".D")
a = np.mean(a) # gets the mean PV value
b = np.mean(b)
c = np.mean(c)
d = np.mean(d)
a = np.round(a) # Round the PV to the nearest integer
b = np.round(b)
c = np.round(c)
d = np.round(d)
return a, b, c, d
def get_normalised_BPM_buttons(self):
"""Override method, gets the normalised signal from each BPM.
Args:
Returns:
float: Normalised signal from BPM A
float: Normalised signal from BPM B
float: Normalised signal from BPM C
float: Normalised signal from BPM D
"""
self._trigger_epics() # Triggers the acquisition
a, b, c, d = self.get_raw_BPM_buttons() # Gets the RAW bpm buttons
sum_button = a + b + c + d # Calculates the BPM sum
sum_button = sum_button/4.0 # Gets the average BPM sum
a = a/sum_button # Normalises the A button
b = b/sum_button # Normalises the B button
c = c/sum_button # Normalises the C button
d = d/sum_button # Normalises the D button
return (a,b,c,d)
def get_device_ID(self):
"""Override method, gets the device's epics ID and MAC address
Args:
Returns:
str: Device with epics channel ID and MAC address
"""
return "Libera BPM with the Epics ID " + "\"" + self.epicsID + "\" and the MAC Address \"" + self.macaddress + "\""
def get_input_tolerance(self):
"""Override method, gets the maximum input power the device can take
The devices will break if the input power is too high, as such, each device has their
own tolerances, this function will return this tolerance. It should be used to ensure
that the power put into the device is not too high to break the device.
Args:
Returns:
float: max input power in dBm
"""
return -40 # The max continuous input the spark can withstand in dBm
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2017 F5 Networks Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {
'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.0'
}
DOCUMENTATION = '''
module: iworkflow_bigip_connector
short_description: Manipulate cloud BIG-IP connectors in iWorkflow.
description:
- Manipulate cloud BIG-IP connectors in iWorkflow.
version_added: 2.4
options:
name:
description:
- Name of the connector to create.
required: True
state:
description:
- When C(present), ensures that the cloud connector exists. When
C(absent), ensures that the cloud connector does not exist.
required: false
default: present
choices:
- present
- absent
notes:
- Requires the f5-sdk Python package on the host. This is as easy as pip
install f5-sdk.
extends_documentation_fragment: f5
requirements:
- f5-sdk >= 2.3.0
- iWorkflow >= 2.1.0
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = '''
- name: Create cloud connector named Private Cloud
iworkflow_bigip_connector:
name: "Private Cloud"
password: "secret"
server: "iwf.mydomain.com"
user: "admin"
delegate_to: localhost
'''
RETURN = '''
'''
from ansible.module_utils.f5_utils import (
AnsibleF5Client,
AnsibleF5Parameters,
F5ModuleError,
HAS_F5SDK,
iControlUnexpectedHTTPError
)
class Parameters(AnsibleF5Parameters):
returnables = ['name']
api_attributes = ['description']
updatables = ['description']
def to_return(self):
result = {}
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
return result
def api_params(self):
result = {}
for api_attribute in self.api_attributes:
if self.api_map is not None and api_attribute in self.api_map:
result[api_attribute] = getattr(self, self.api_map[api_attribute])
else:
result[api_attribute] = getattr(self, api_attribute)
result = self._filter_params(result)
return result
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
self.argument_spec = dict(
name=dict(required=True),
state=dict(
required=False,
default='present',
choices=['absent', 'present']
)
)
self.f5_product_name = 'iworkflow'
class ModuleManager(object):
def __init__(self, client):
self.client = client
self.have = None
self.want = Parameters(self.client.module.params)
self.changes = Parameters()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = Parameters(changed)
def _update_changed_options(self):
changed = {}
for key in Parameters.updatables:
if getattr(self.want, key) is not None:
attr1 = getattr(self.want, key)
attr2 = getattr(self.have, key)
if attr1 != attr2:
changed[key] = attr1
if changed:
self.changes = Parameters(changed)
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
try:
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
result.update(**self.changes.to_return())
result.update(dict(changed=changed))
return result
def exists(self):
"""Checks to see if a connector exists.
This method does not use ODATA queries because that functionality
is broken in iWorkflow. Therefore, we iterate over all connectors
until we find the one we're interested in.
:return:
"""
collection = self.client.api.cm.cloud.connectors.locals.get_collection()
for item in collection:
if item.displayName != "BIG-IP":
continue
if item.name != self.want.name:
continue
return True
return False
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def create(self):
self._set_changed_options()
if self.client.check_mode:
return True
self.create_on_device()
return True
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.client.check_mode:
return True
self.update_on_device()
return True
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def update_on_device(self):
pass
def read_current_from_device(self):
connector = None
collection = self.client.api.cm.cloud.connectors.locals.get_collection()
for item in collection:
if item.displayName != "BIG-IP":
continue
if item.name != self.want.name:
continue
connector = item
break
if not connector:
return None
result = connector.attrs
return Parameters(result)
def create_on_device(self):
params = self.want.api_params()
self.client.api.cm.cloud.connectors.locals.local.create(
name=self.want.name,
**params
)
def absent(self):
if self.exists():
return self.remove()
return False
def remove(self):
if self.client.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the BIG-IP connector")
return True
def remove_from_device(self):
resource = None
collection = self.client.api.cm.cloud.connectors.locals.get_collection()
for item in collection:
if item.displayName != "BIG-IP":
continue
if item.name != self.want.name:
continue
resource = item
break
if resource:
resource.delete()
def main():
if not HAS_F5SDK:
raise F5ModuleError("The python f5-sdk module is required")
spec = ArgumentSpec()
client = AnsibleF5Client(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
f5_product_name=spec.f5_product_name
)
try:
mm = ModuleManager(client)
results = mm.exec_module()
client.module.exit_json(**results)
except F5ModuleError as e:
client.module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
| |
import math
import OpenGL
from OpenGL.GL import *
import coordinate_system
import vector
class Camera:
"""
camera = Camera()
camera.mode = Camera.ORTHOGONAL
camera.set_window_size(800, 600)
camera.set_opengl_projection()
.. draw 3d stuff ..
camera.set_opengl_pixel_projection()
sx, sy, sz = camera.screenspace(vector)
.. draw text/icons at pixel (sx, sy) with depth sz ..
-----------------------
forward-z should be positive, unlike in opengl.. methods like
screenspace, window_ray.. may not work otherwise. don't know.
"""
ORTHOGONAL = 1 # screen center is (0, 0)
PERSPECTIVE = 2
def __init__(self):
self.mode = self.PERSPECTIVE
self.ocs = coordinate_system.CoordinateSystem()
self.pixel_aspect_w_h = 1.
#--------------------
# read-only from here
#--------------------
self.fovx = 80.
self.fovy = 80.
self.orthox = 5000. # window width in opengl units
self.orthoy = 5000.
self.z_near = 50.
self.z_far = 500. * 1000.
self.w_pixels = 1
self.h_pixels = 1
self._tanfovx_2 = 0.
self._tanfovy_2 = 0.
self.set_fovx(self.fovx)
self.set_fovy(self.fovy)
def set_window_size(self, w_pixels, h_pixels):
self.w_pixels = float(w_pixels)
self.h_pixels = float(h_pixels)
def set_pixel_aspect(self, pixel_aspect_w_h = 1.):
self.pixel_aspect_w_h = float(pixel_aspect_w_h)
def set_z(self, z_near = 50., z_far = 500 * 1000.):
self.z_near = float(z_near)
self.z_far = float(z_far)
def set_fovx(self, fovx):
self.fovx = fovx
self._tanfovx_2 = math.tan(math.radians(self.fovx / 2.))
def set_fovy(self, fovy):
self.fovy = fovy
self._tanfovy_2 = math.tan(math.radians(self.fovy / 2.))
def update_fovx(self):
"""
keep fovy and orthoy as they are and recalculate fovx and orthox
according to window size and pixel aspect ratio.
"""
physical_window_w_h = self.w_pixels / self.h_pixels * self.pixel_aspect_w_h
self._tanfovx_2 = self._tanfovy_2 * physical_window_w_h
self.fovx = math.degrees(2. * math.atan(self._tanfovx_2))
self.orthox = self.orthoy * physical_window_w_h
def update_fovy(self):
physical_window_w_h = self.w_pixels / self.h_pixels * self.pixel_aspect_w_h
self._tanfovy_2 = self._tanfovx_2 / physical_window_w_h
self.fovy = math.degrees(2. * math.atan(self._tanfovy_2))
self.orthoy = self.orthox / physical_window_w_h
def set_orthox(self, orthox):
self.orthox = orthox
def set_orthoy(self, orthoy):
self.orthoy = orthoy
def set_opengl_projection(self):
"""
setup opengl projection matrix with camera settings
"""
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
if self.mode == self.ORTHOGONAL:
glOrtho(-self.orthox / 2., self.orthox / 2., \
-self.orthoy / 2., self.orthoy / 2., self.z_near, self.z_far)
elif self.mode == self.PERSPECTIVE:
#gluPerspective(self.fovy, self._tanfovx_2 / self._tanfovy_2, self.z_near, self.z_far)
glFrustum(-self.z_near * self._tanfovx_2, self.z_near * self._tanfovx_2, -self.z_near * self._tanfovy_2, self.z_near * self._tanfovy_2, self.z_near, self.z_far)
def set_opengl_pixel_projection(self, z_near = None, z_far = None):
"""
top-left is (0, 0). (top-left tip of the top-left pixel)
"""
if z_near is None: z_near = self.z_near
if z_far is None: z_far = self.z_far
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glOrtho(0., self.w_pixels, self.h_pixels, 0., z_near, z_far)
def look_direction(self, direction_vector, up_hint_vector):
"""
turn camera to look in direction_vector, up points to
up_hint_vector.
"""
self.ocs.a_frame.look_direction2(direction_vector, up_hint_vector)
def screenspace(self, vect):
"""
NB! beware that vect.z has to be positive ( vect[2] > 0 )
return values for current projection mode.. (doesn't have to
be the same as self.mode)
project vect (has to be already in camera-space) to screen-space
return: (0, 0, z) in pixels. up-left of the given window
the returned z: vect.z if the camera is in orthogonal projection
mode. if in perspective mode, return modified vect.z that generates
the same z-buffer values in pixel-projection that vect.z would
generate in perspective projection mode.
(glOrtho & glFrustum (gluPerspective) use z-buffer differently)
"""
if self.mode == self.ORTHOGONAL:
return self.w_pixels / self.orthox * vect[0] + self.w_pixels / 2., \
-self.h_pixels / self.orthoy * vect[1] + self.h_pixels / 2., vect[2]
elif self.mode == self.PERSPECTIVE:
sx = vect[0] * (self.w_pixels / 2.) / vect[2] / self._tanfovx_2 + self.w_pixels / 2.
sy = -vect[1] * (self.h_pixels / 2.) / vect[2] / self._tanfovy_2 * self.pixel_aspect_w_h + self.h_pixels / 2.
sz = self.z_far + self.z_near - self.z_far * self.z_near / vect[2]
if vect[2] < 0.: sz = -sz
return sx, sy, sz
def screenspace_z(self, z):
"""
return only the z-component of self.screenspace(..)
z has to be positive, greater than zero, here!
"""
if self.mode == self.ORTHOGONAL:
return z
elif self.mode == self.PERSPECTIVE:
return self.z_far + self.z_near - self.z_far * self.z_near / z
def window_ray(self, x, y):
"""
return a ray that goes through the given pixel-coordinate,
in camera-space. NOT normalized.
return: start, direction (vectors. world-space coordinates)
("start" is necessary in case of orthogonal projection)
"""
if self.mode == self.ORTHOGONAL:
# TODO: untested
xx = self.orthox * (float(x) / self.w_pixels - .5)
yy = self.orthoy * (float(y) / self.h_pixels - .5)
return vector.Vector((xx, -yy, 0.)), vector.Vector((0., 0., 1.))
elif self.mode == self.PERSPECTIVE:
w, h = self.w_pixels, self.h_pixels
# TODO: aspect ratio.. or already in tanfov*?
xx = x - w / 2.
yy = (y - h / 2.) * w / h * self._tanfovy_2 / self._tanfovx_2
zz = w / 2. / self._tanfovx_2
return vector.Vector(), vector.Vector((xx, -yy, zz))
return None, None
def get_ocs_to(self, dest_object = None):
"""
TODO: move this to object3d subclass. and make it work with hierarchies deeper than 1 :)
return an ocs where projv_out projects vectors into dest_object space.
dest_object None is the root object. the purest world-space there is..
"""
assert dest_object == None, "not implemented :("
ocs = coordinate_system.CoordinateSystem()
ocs.set(self.ocs)
return ocs
| |
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2022, Exa Analytics Development Team
# Distributed under the terms of the Apache License 2.0
"""
Data Objects
###################################
Data objects are used to store typed data coming from an external source (for
example a file on disk). There are three primary data objects provided by
this module, :class:`~exatomic.exa.core.numerical.Series`, :class:`~exatomic.exa.core.numerical.DataFrame`,
and :class:`~exatomic.exa.core.numerical.Field`. The purpose of these objects is to facilitate
conversion of data into "traits" used in visualization and enforce relationships
between data objects in a given container. Any of the objects provided by this
module may be extended.
"""
import logging
import warnings
import numpy as np
import pandas as pd
from exatomic.exa.core.error import RequiredColumnError
class Numerical(object):
"""
Base class for :class:`~exatomic.exa.core.numerical.Series`,
:class:`~exatomic.exa.core.numerical.DataFrame`, and :class:`~exatomic.exa.numerical.Field`
objects, providing default trait functionality and clean representations
when present as part of containers.
"""
@property
def log(self):
name = '.'.join([self.__module__, self.__class__.__name__])
return logging.getLogger(name)
def slice_naive(self, key):
"""
Slice a data object based on its index, either by value (.loc) or
position (.iloc).
Args:
key: Single index value, slice, tuple, or list of indices/positionals
Returns:
data: Slice of self
"""
cls = self.__class__
key = check_key(self, key)
return cls(self.loc[key])
def __repr__(self):
name = '.'.join([self.__module__, self.__class__.__name__])
return '{0}{1}'.format(name, self.shape)
def __str__(self):
return self.__repr__()
class BaseSeries(Numerical):
"""
Base class for dense and sparse series objects (labeled arrays).
Attributes:
_sname (str): May have a required name (default None)
_iname (str: May have a required index name
_stype (type): May have a required value type
_itype (type): May have a required index type
"""
_metadata = ['name', 'meta']
# These attributes may be set when subclassing Series
_sname = None # Series may have a required name
_iname = None # Series may have a required index name
_stype = None # Series may have a required value type
_itype = None # Series may have a required index type
def __init__(self, *args, **kwargs):
meta = kwargs.pop('meta', None)
super(BaseSeries, self).__init__(*args, **kwargs)
if hasattr(self, "name") and hasattr(self, "_sname") and hasattr(self, "_iname"):
if self._sname is not None and self.name != self._sname:
if self.name is not None:
warnings.warn("Object's name changed")
self.name = self._sname
if self._iname is not None and self.index.name != self._iname:
if self.index.name is not None:
warnings.warn("Object's index name changed")
self.index.name = self._iname
self.meta = meta
class BaseDataFrame(Numerical):
"""
Base class for dense and sparse dataframe objects (labeled matrices).
Note:
If the _cardinal attribute is populated, it will automatically be added
to the _categories and _columns attributes.
Attributes:
_cardinal (tuple): Tuple of column name and raw type that acts as foreign key to index of another table
_index (str): Name of index (may be used as foreign key in another table)
_columns (list): Required columns
_categories (dict): Dict of column names, raw types that if present will be converted to and from categoricals automatically
"""
_metadata = ['name', 'meta']
_cardinal = None # Tuple of column name and raw type that acts as foreign key to index of another table
_index = None # Name of index (may be used as foreign key in another table)
_columns = [] # Required columns
_categories = {} # Dict of column names, raw types that if present will be converted to and from categoricals automatically
def cardinal_groupby(self):
"""
Group this object on it cardinal dimension (_cardinal).
Returns:
grpby: Pandas groupby object (grouped on _cardinal)
"""
g, t = self._cardinal
self[g] = self[g].astype(t)
grpby = self.groupby(g)
self[g] = self[g].astype('category')
return grpby
def slice_cardinal(self, key):
"""
Get the slice of this object by the value or values of the cardinal
dimension.
"""
cls = self.__class__
key = check_key(self, key, cardinal=True)
return cls(self[self[self._cardinal[0]].isin(key)])
def __init__(self, *args, **kwargs):
meta = kwargs.pop('meta', None)
super(BaseDataFrame, self).__init__(*args, **kwargs)
self.meta = meta
class Series(BaseSeries, pd.Series):
"""
A labeled array.
.. code-block:: Python
class MySeries(exatomic.exa.core.numerical.Series):
_sname = 'data' # series default name
_iname = 'data_index' # series default index name
seri = MySeries(np.random.rand(10**5))
"""
@property
def _constructor(self):
return Series
def copy(self, *args, **kwargs):
"""
Make a copy of this object.
See Also:
For arguments and description of behavior see `pandas docs`_.
.. _pandas docs: http://pandas.pydata.org/pandas-docs/stable/generated/pandas.Series.copy.html
"""
cls = self.__class__ # Note that type conversion does not perform copy
return cls(pd.Series(self).copy(*args, **kwargs))
class DataFrame(BaseDataFrame, pd.DataFrame):
"""
A data table
.. code-block:: Python
class MyDF(exatomic.exa.core.numerical.DataFrame):
_cardinal = ('cardinal', int)
_index = 'mydf_index'
_columns = ['x', 'y', 'z', 'symbol']
_categories = {'symbol': str}
"""
_constructor_sliced = Series
@property
def _constructor(self):
return DataFrame
def copy(self, *args, **kwargs):
"""
Make a copy of this object.
See Also:
For arguments and description of behavior see `pandas docs`_.
.. _pandas docs: http://pandas.pydata.org/pandas-docs/stable/generated/pandas.Series.copy.html
"""
cls = self.__class__ # Note that type conversion does not perform copy
return cls(pd.DataFrame(self).copy(*args, **kwargs))
def _revert_categories(self):
"""
Inplace conversion to categories.
"""
for column, dtype in self._categories.items():
if column in self.columns:
self[column] = self[column].astype(dtype)
def _set_categories(self):
"""
Inplace conversion from categories.
"""
for column, _ in self._categories.items():
if column in self.columns:
self[column] = self[column].astype('category')
def __init__(self, *args, **kwargs):
super(DataFrame, self).__init__(*args, **kwargs)
self.log.debug('shape: {}'.format(self.shape))
if self._cardinal is not None:
self._categories[self._cardinal[0]] = self._cardinal[1]
self._columns.append(self._cardinal[0])
self._set_categories()
if len(self) > 0:
name = self.__class__.__name__
if self._columns:
missing = set(self._columns).difference(self.columns)
if missing:
raise RequiredColumnError(missing, name)
if self.index.name != self._index and self._index is not None:
if self.index.name is not None and self.index.name.decode('utf-8') != self._index:
warnings.warn("Object's index name changed from {} to {}".format(self.index.name, self._index))
self.index.name = self._index
class Field(DataFrame):
"""
A field is defined by field data and field values. Field data defines the
discretization of the field (i.e. its origin in a given space, number of
steps/step spaceing, and endpoint for example). Field values can be scalar
(series) and/or vector (dataframe) data defining the magnitude and/or direction
at each given point.
Note:
The convention for generating the discrete field data and ordering of
the field values must be the same (e.g. discrete field points are
generated x, y, then z and scalar field values are a series object
ordered looping first over x then y, then z).
In addition to the :class:`~exatomic.exa.core.numerical.DataFrame` attributes, this object
has the following:
"""
@property
def _constructor(self):
return Field
def copy(self, *args, **kwargs):
"""
Make a copy of this object.
Note:
Copies both field data and field values.
See Also:
For arguments and description of behavior see `pandas docs`_.
.. _pandas docs: http://pandas.pydata.org/pandas-docs/stable/generated/pandas.Series.copy.html
"""
cls = self.__class__ # Note that type conversion does not perform copy
data = pd.DataFrame(self).copy(*args, **kwargs)
values = [field.copy() for field in self.field_values]
return cls(data, field_values=values)
def memory_usage(self):
"""
Get the combined memory usage of the field data and field values.
"""
data = super(Field, self).memory_usage()
values = 0
for value in self.field_values:
values += value.memory_usage()
data['field_values'] = values
return data
def slice_naive(self, key):
"""
Naively (on index) slice the field data and values.
Args:
key: Int, slice, or iterable to select data and values
Returns:
field: Sliced field object
"""
cls = self.__class__
key = check_key(self, key)
enum = pd.Series(range(len(self)))
enum.index = self.index
values = self.field_values[enum[key].values]
data = self.loc[key]
return cls(data, field_values=values)
#def slice_cardinal(self, key):
# cls = self.__class__
# grpby = self.cardinal_groupby()
def __init__(self, *args, **kwargs):
# The following check allows creation of a single field (whose field data
# comes from a series object and field values from another series object).
field_values = kwargs.pop("field_values", None)
if args and isinstance(args[0], pd.Series):
args = (args[0].to_frame().T, )
super(Field, self).__init__(*args, **kwargs)
self._metadata = ['field_values']
if isinstance(field_values, (list, tuple, np.ndarray)):
self.field_values = [Series(v) for v in field_values] # Convert type for nice repr
elif field_values is None:
self.field_values = []
elif isinstance(field_values, pd.Series):
self.field_values = [Series(field_values)]
else:
raise TypeError("Wrong type for field_values with type {}".format(type(field_values)))
for i in range(len(self.field_values)):
self.field_values[i].name = i
self.log.info('contains {} fields'.format(len(self.field_values)))
class Field3D(Field):
"""
Dataframe for storing dimensions of a scalar or vector field of 3D space.
+-------------------+----------+-------------------------------------------+
| Column | Type | Description |
+===================+==========+===========================================+
| nx | int | number of grid points in x |
+-------------------+----------+-------------------------------------------+
| ny | int | number of grid points in y |
+-------------------+----------+-------------------------------------------+
| nz | int | number of grid points in z |
+-------------------+----------+-------------------------------------------+
| ox | float | field origin point in x |
+-------------------+----------+-------------------------------------------+
| oy | float | field origin point in y |
+-------------------+----------+-------------------------------------------+
| oz | float | field origin point in z |
+-------------------+----------+-------------------------------------------+
| xi | float | First component in x |
+-------------------+----------+-------------------------------------------+
| xj | float | Second component in x |
+-------------------+----------+-------------------------------------------+
| xk | float | Third component in x |
+-------------------+----------+-------------------------------------------+
| yi | float | First component in y |
+-------------------+----------+-------------------------------------------+
| yj | float | Second component in y |
+-------------------+----------+-------------------------------------------+
| yk | float | Third component in y |
+-------------------+----------+-------------------------------------------+
| zi | float | First component in z |
+-------------------+----------+-------------------------------------------+
| zj | float | Second component in z |
+-------------------+----------+-------------------------------------------+
| zk | float | Third component in z |
+-------------------+----------+-------------------------------------------+
Note:
Each field should be flattened into an N x 1 (scalar) or N x 3 (vector)
series or dataframe respectively. The orientation of the flattening
should have x as the outer loop and z values as the inner loop (for both
cases). This is sometimes called C-major or C-style order, and has
the last index changing the fastest and the first index changing the
slowest.
See Also:
:class:`~exatomic.exa.core.numerical.Field`
"""
_columns = ['nx', 'ny', 'nz', 'ox', 'oy', 'oz', 'xi', 'xj', 'xk',
'yi', 'yj', 'yk', 'zi', 'zj', 'zk']
@property
def _constructor(self):
return Field3D
def check_key(data_object, key, cardinal=False):
"""
Update the value of an index key by matching values or getting positionals.
"""
itype = (int, np.int32, np.int64)
if not isinstance(key, itype + (slice, tuple, list, np.ndarray)):
raise KeyError("Unknown key type {} for key {}".format(type(key), key))
keys = data_object.index.values
if cardinal and data_object._cardinal is not None:
keys = data_object[data_object._cardinal[0]].unique()
elif isinstance(key, itype) and (key in keys or key < 0):
key = keys[key]
if isinstance(key, itype):
key = [key]
else:
key = list(sorted(key))
elif isinstance(key, itype):
key = [key]
elif isinstance(key, slice):
key = list(sorted(keys[key]))
elif isinstance(key, (tuple, list, pd.Index)) and not np.all(k in keys for k in key):
key = list(sorted(keys[key]))
return key
class SparseDataFrame(BaseDataFrame):
@property
def _constructor(self):
return SparseDataFrame
| |
import wx
import wx.webview as webview
import webbrowser
import templates
import util
import threading
import time
from settings import settings
BLANK = 'about:blank'
COMMAND_CLOSE = 'http://close/'
COMMAND_NEXT = 'http://next/'
COMMAND_PREVIOUS = 'http://previous/'
COMMAND_FIRST = 'http://first/'
COMMAND_LAST = 'http://last/'
COMMAND_PLAY = 'http://play/'
COMMAND_PAUSE = 'http://pause/'
class Event(wx.PyEvent):
def __init__(self, event_object, type):
super(Event, self).__init__()
self.SetEventType(type.typeId)
self.SetEventObject(event_object)
EVT_LINK = wx.PyEventBinder(wx.NewEventType())
EVT_POPUP_CLOSE = wx.PyEventBinder(wx.NewEventType())
class BrowserControl(webview.WebView):
def __init__(self, parent):
super(BrowserControl, self).__init__(parent, -1)
self.lock = threading.Lock()
self.Bind(webview.EVT_WEBVIEW_BEFORE_LOAD, self.on_before_load)
self.Bind(webview.EVT_WEBVIEW_LOAD, self.on_load)
def load_src(self, html):
self.lock.acquire()
path = util.abspath('.')
self.SetPageSource(html, path)
while True:
if self.lock.acquire(False):
self.lock.release()
break
else:
time.sleep(0.01)
wx.SafeYield()
self.update_size()
def update_size(self):
width = int(self.RunScript('document.body.scrollWidth'))
height = int(self.RunScript('document.body.scrollHeight'))
self.SetSize((width, height))
def on_before_load(self, event):
e = Event(self, EVT_LINK)
e.link = event.GetURL()
self.ProcessEvent(e)
if not e.GetSkipped():
event.Cancel()
def on_load(self, event):
if event.GetState() == webview.WEBVIEW_LOAD_ONLOAD_HANDLED:
self.lock.release()
class PopupFrame(wx.Frame):
def __init__(self):
title = settings.APP_NAME
style = wx.STAY_ON_TOP | wx.FRAME_NO_TASKBAR | wx.BORDER_NONE
super(PopupFrame, self).__init__(None, -1, title, style=style)
self.SetTransparent(0)
self.control = BrowserControl(self)
def load_src(self, html):
self.control.load_src(html)
self.update_size()
def update_size(self):
self.Fit()
x, y, w, h = wx.ClientDisplayRect()
cw, ch = self.GetSize()
pad = 10
x1 = x + pad
y1 = y + pad
x2 = x + w - cw - pad
y2 = y + h - ch - pad
x3 = x + w / 2 - cw / 2
y3 = y + h / 2 - ch / 2
lookup = {
(-1, -1): (x1, y1),
(1, -1): (x2, y1),
(-1, 1): (x1, y2),
(1, 1): (x2, y2),
(0, 0): (x3, y3),
}
self.SetPosition(lookup[settings.POPUP_POSITION])
class PopupManager(wx.EvtHandler):
def __init__(self):
super(PopupManager, self).__init__()
self.timer = None
self.auto = settings.POPUP_AUTO_PLAY
self.cache = {}
def set_items(self, items, index=0):
self.items = list(items)
self.index = index
self.count = len(self.items)
self.clear_cache(keep_current_item=True)
self.update()
self.set_timer()
def update(self, focus=False):
item = self.items[self.index]
if item in self.cache:
self.show_frame(focus)
self.update_cache()
else:
self.update_cache(True)
self.show_frame(focus)
self.update_cache()
def update_cache(self, current_only=False):
indexes = set()
indexes.add(self.index)
if not current_only:
indexes.add(self.index - 1)
indexes.add(self.index + 1)
#indexes.add(0)
#indexes.add(self.count - 1)
items = set(self.items[index] for index in indexes if index >= 0 and index < self.count)
for item in items:
if item in self.cache:
continue
frame = self.create_frame(item)
self.cache[item] = frame
for item, frame in self.cache.items():
if item not in items:
frame.Close()
del self.cache[item]
def clear_cache(self, keep_current_item=False):
current_item = self.items[self.index]
for item, frame in self.cache.items():
if keep_current_item and item == current_item:
continue
frame.Close()
del self.cache[item]
def show_frame(self, focus=False):
current_item = self.items[self.index]
current_item.read = True
for item, frame in self.cache.items():
if item == current_item:
if focus:
frame.Show()
else:
frame.Disable()
frame.Show()
frame.Enable()
frame.Update()
frame.SetTransparent(settings.POPUP_TRANSPARENCY)
for item, frame in self.cache.items():
if item != current_item:
frame.Hide()
def create_frame(self, item):
html = self.render_item(item)
frame = PopupFrame()
frame.control.Bind(EVT_LINK, self.on_link)
frame.load_src(html)
return frame
def render_item(self, item):
context = {}
count = str(self.count)
index = str(self.items.index(item) + 1)
index = '%s%s' % ('0' * (len(count) - len(index)), index)
context['item_index'] = index
context['item_count'] = count
context['is_playing'] = self.auto
context['is_paused'] = not self.auto
context['POPUP_WIDTH'] = settings.POPUP_WIDTH
context['COMMAND_CLOSE'] = COMMAND_CLOSE
context['COMMAND_NEXT'] = COMMAND_NEXT
context['COMMAND_PREVIOUS'] = COMMAND_PREVIOUS
context['COMMAND_FIRST'] = COMMAND_FIRST
context['COMMAND_LAST'] = COMMAND_LAST
context['COMMAND_PLAY'] = COMMAND_PLAY
context['COMMAND_PAUSE'] = COMMAND_PAUSE
html = templates.render(settings.POPUP_THEME, item, context)
return html
def set_timer(self):
if self.timer and self.timer.IsRunning():
return
duration = settings.POPUP_DURATION * 1000
self.timer = wx.CallLater(duration, self.on_timer)
def stop_timer(self):
if self.timer and self.timer.IsRunning():
self.timer.Stop()
self.timer = None
def on_link(self, event):
link = event.link
# track the click
item = self.items[self.index]
feed = item.feed
if link == item.link or link == feed.link:
feed.clicks += 1
# handle the click
if link == BLANK:
event.Skip()
elif link == COMMAND_CLOSE:
wx.CallAfter(self.on_close)
elif link == COMMAND_FIRST:
self.auto = False
wx.CallAfter(self.on_first)
elif link == COMMAND_LAST:
self.auto = False
wx.CallAfter(self.on_last)
elif link == COMMAND_NEXT:
self.auto = False
wx.CallAfter(self.on_next)
elif link == COMMAND_PREVIOUS:
self.auto = False
wx.CallAfter(self.on_previous)
elif link == COMMAND_PLAY:
if not self.auto:
self.auto = True
self.stop_timer()
wx.CallAfter(self.on_timer)
elif link == COMMAND_PAUSE:
self.auto = False
else:
webbrowser.open(link)
def on_first(self):
self.index = 0
self.update(True)
def on_last(self):
self.index = self.count - 1
self.update(True)
def on_next(self, focus=True):
if self.index < self.count - 1:
self.index += 1
self.update(focus)
else:
self.on_close()
def on_previous(self):
if self.index > 0:
self.index -= 1
self.update(True)
def on_close(self):
self.stop_timer()
self.clear_cache()
event = Event(self, EVT_POPUP_CLOSE)
wx.PostEvent(self, event)
def on_timer(self):
self.timer = None
if not self.auto:
return
if self.index == self.count - 1:
self.on_close()
else:
self.on_next(False)
self.set_timer()
| |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import mock
from oslo_config import cfg
from oslo_utils import timeutils
from heat.common import context
from heat.common import service_utils
from heat.engine import service
from heat.engine import worker
from heat.objects import service as service_objects
from heat.rpc import worker_api
from heat.tests import common
from heat.tests.engine import tools
from heat.tests import utils
class ServiceEngineTest(common.HeatTestCase):
def setUp(self):
super(ServiceEngineTest, self).setUp()
self.ctx = utils.dummy_context(tenant_id='stack_service_test_tenant')
self.eng = service.EngineService('a-host', 'a-topic')
self.eng.engine_id = 'engine-fake-uuid'
def test_make_sure_rpc_version(self):
self.assertEqual(
'1.36',
service.EngineService.RPC_API_VERSION,
('RPC version is changed, please update this test to new version '
'and make sure additional test cases are added for RPC APIs '
'added in new version'))
@mock.patch.object(service_objects.Service, 'get_all')
@mock.patch.object(service_utils, 'format_service')
def test_service_get_all(self, mock_format_service, mock_get_all):
mock_get_all.return_value = [mock.Mock()]
mock_format_service.return_value = mock.Mock()
self.assertEqual(1, len(self.eng.list_services(self.ctx)))
self.assertTrue(mock_get_all.called)
mock_format_service.assert_called_once_with(mock.ANY)
@mock.patch.object(service_objects.Service, 'update_by_id')
@mock.patch.object(service_objects.Service, 'create')
@mock.patch.object(context, 'get_admin_context')
def test_service_manage_report_start(self,
mock_admin_context,
mock_service_create,
mock_service_update):
self.eng.service_id = None
mock_admin_context.return_value = self.ctx
srv = dict(id='mock_id')
mock_service_create.return_value = srv
self.eng.service_manage_report()
mock_admin_context.assert_called_once_with()
mock_service_create.assert_called_once_with(
self.ctx,
dict(host=self.eng.host,
hostname=self.eng.hostname,
binary=self.eng.binary,
engine_id=self.eng.engine_id,
topic=self.eng.topic,
report_interval=cfg.CONF.periodic_interval))
self.assertEqual(srv['id'], self.eng.service_id)
mock_service_update.assert_called_once_with(
self.ctx,
self.eng.service_id,
dict(deleted_at=None))
@mock.patch.object(service_objects.Service, 'get_all_by_args')
@mock.patch.object(service_objects.Service, 'delete')
@mock.patch.object(context, 'get_admin_context')
def test_service_manage_report_cleanup(self,
mock_admin_context,
mock_service_delete,
mock_get_all):
mock_admin_context.return_value = self.ctx
ages_a_go = timeutils.utcnow() - datetime.timedelta(
seconds=4000)
mock_get_all.return_value = [{'id': 'foo',
'deleted_at': None,
'updated_at': ages_a_go}]
self.eng.service_manage_cleanup()
mock_admin_context.assert_called_once_with()
mock_get_all.assert_called_once_with(self.ctx,
self.eng.host,
self.eng.binary,
self.eng.hostname)
mock_service_delete.assert_called_once_with(
self.ctx, 'foo')
@mock.patch.object(service_objects.Service, 'update_by_id')
@mock.patch.object(context, 'get_admin_context')
def test_service_manage_report_update(self, mock_admin_context,
mock_service_update):
self.eng.service_id = 'mock_id'
mock_admin_context.return_value = self.ctx
self.eng.service_manage_report()
mock_admin_context.assert_called_once_with()
mock_service_update.assert_called_once_with(
self.ctx,
'mock_id',
dict(deleted_at=None))
@mock.patch.object(service_objects.Service, 'update_by_id')
@mock.patch.object(context, 'get_admin_context')
def test_service_manage_report_update_fail(self, mock_admin_context,
mock_service_update):
self.eng.service_id = 'mock_id'
mock_admin_context.return_value = self.ctx
mock_service_update.side_effect = Exception()
self.eng.service_manage_report()
msg = 'Service %s update failed' % self.eng.service_id
self.assertIn(msg, self.LOG.output)
def test_stop_rpc_server(self):
with mock.patch.object(self.eng,
'_rpc_server') as mock_rpc_server:
self.eng._stop_rpc_server()
mock_rpc_server.stop.assert_called_once_with()
mock_rpc_server.wait.assert_called_once_with()
def _test_engine_service_start(
self,
thread_group_class,
worker_service_class,
engine_listener_class,
thread_group_manager_class,
sample_uuid_method,
rpc_client_class,
target_class,
rpc_server_method):
self.patchobject(self.eng, 'service_manage_cleanup')
self.patchobject(self.eng, 'reset_stack_status')
self.eng.start()
# engine id
sample_uuid_method.assert_called_once_with()
sampe_uuid = sample_uuid_method.return_value
self.assertEqual(sampe_uuid,
self.eng.engine_id,
'Failed to generated engine_id')
# Thread group manager
thread_group_manager_class.assert_called_once_with()
thread_group_manager = thread_group_manager_class.return_value
self.assertEqual(thread_group_manager,
self.eng.thread_group_mgr,
'Failed to create Thread Group Manager')
# Engine Listener
engine_listener_class.assert_called_once_with(
self.eng.host,
self.eng.engine_id,
self.eng.thread_group_mgr
)
engine_lister = engine_listener_class.return_value
engine_lister.start.assert_called_once_with()
# Worker Service
if cfg.CONF.convergence_engine:
worker_service_class.assert_called_once_with(
host=self.eng.host,
topic=worker_api.TOPIC,
engine_id=self.eng.engine_id,
thread_group_mgr=self.eng.thread_group_mgr
)
worker_service = worker_service_class.return_value
worker_service.start.assert_called_once_with()
# RPC Target
target_class.assert_called_once_with(
version=service.EngineService.RPC_API_VERSION,
server=self.eng.host,
topic=self.eng.topic)
# RPC server
target = target_class.return_value
rpc_server_method.assert_called_once_with(target,
self.eng)
rpc_server = rpc_server_method.return_value
self.assertEqual(rpc_server,
self.eng._rpc_server,
"Failed to create RPC server")
rpc_server.start.assert_called_once_with()
# RPC client
rpc_client = rpc_client_class.return_value
rpc_client_class.assert_called_once_with(
version=service.EngineService.RPC_API_VERSION)
self.assertEqual(rpc_client,
self.eng._client,
"Failed to create RPC client")
# Manage Thread group
thread_group_class.assert_called_once_with()
manage_thread_group = thread_group_class.return_value
manage_thread_group.add_timer.assert_called_once_with(
cfg.CONF.periodic_interval,
self.eng.service_manage_report
)
@mock.patch('heat.common.messaging.get_rpc_server',
return_value=mock.Mock())
@mock.patch('oslo_messaging.Target',
return_value=mock.Mock())
@mock.patch('heat.common.messaging.get_rpc_client',
return_value=mock.Mock())
@mock.patch('heat.common.service_utils.generate_engine_id',
return_value='sample-uuid')
@mock.patch('heat.engine.service.ThreadGroupManager',
return_value=mock.Mock())
@mock.patch('heat.engine.service.EngineListener',
return_value=mock.Mock())
@mock.patch('heat.engine.worker.WorkerService',
return_value=mock.Mock())
@mock.patch('oslo_service.threadgroup.ThreadGroup',
return_value=mock.Mock())
@mock.patch.object(service.EngineService, '_configure_db_conn_pool_size')
def test_engine_service_start_in_non_convergence_mode(
self,
configure_db_conn_pool_size,
thread_group_class,
worker_service_class,
engine_listener_class,
thread_group_manager_class,
sample_uuid_method,
rpc_client_class,
target_class,
rpc_server_method):
cfg.CONF.set_override('convergence_engine', False)
self._test_engine_service_start(
thread_group_class,
worker_service_class,
engine_listener_class,
thread_group_manager_class,
sample_uuid_method,
rpc_client_class,
target_class,
rpc_server_method
)
@mock.patch('heat.common.messaging.get_rpc_server',
return_value=mock.Mock())
@mock.patch('oslo_messaging.Target',
return_value=mock.Mock())
@mock.patch('heat.common.messaging.get_rpc_client',
return_value=mock.Mock())
@mock.patch('heat.common.service_utils.generate_engine_id',
return_value=mock.Mock())
@mock.patch('heat.engine.service.ThreadGroupManager',
return_value=mock.Mock())
@mock.patch('heat.engine.service.EngineListener',
return_value=mock.Mock())
@mock.patch('heat.engine.worker.WorkerService',
return_value=mock.Mock())
@mock.patch('oslo_service.threadgroup.ThreadGroup',
return_value=mock.Mock())
@mock.patch.object(service.EngineService, '_configure_db_conn_pool_size')
def test_engine_service_start_in_convergence_mode(
self,
configure_db_conn_pool_size,
thread_group_class,
worker_service_class,
engine_listener_class,
thread_group_manager_class,
sample_uuid_method,
rpc_client_class,
target_class,
rpc_server_method):
cfg.CONF.set_override('convergence_engine', True)
self._test_engine_service_start(
thread_group_class,
worker_service_class,
engine_listener_class,
thread_group_manager_class,
sample_uuid_method,
rpc_client_class,
target_class,
rpc_server_method
)
def _test_engine_service_stop(
self,
service_delete_method,
admin_context_method):
cfg.CONF.set_default('periodic_interval', 60)
self.patchobject(self.eng, 'service_manage_cleanup')
self.patchobject(self.eng, 'reset_stack_status')
self.patchobject(self.eng, 'service_manage_report')
self.eng.start()
# Add dummy thread group to test thread_group_mgr.stop() is executed?
dtg1 = tools.DummyThreadGroup()
dtg2 = tools.DummyThreadGroup()
self.eng.thread_group_mgr.groups['sample-uuid1'] = dtg1
self.eng.thread_group_mgr.groups['sample-uuid2'] = dtg2
self.eng.service_id = 'sample-service-uuid'
self.patchobject(self.eng.manage_thread_grp, 'stop',
new=mock.Mock(wraps=self.eng.manage_thread_grp.stop))
self.patchobject(self.eng, '_stop_rpc_server',
new=mock.Mock(wraps=self.eng._stop_rpc_server))
orig_stop = self.eng.thread_group_mgr.stop
with mock.patch.object(self.eng.thread_group_mgr, 'stop') as stop:
stop.side_effect = orig_stop
self.eng.stop()
# RPC server
self.eng._stop_rpc_server.assert_called_once_with()
if cfg.CONF.convergence_engine:
# WorkerService
self.eng.worker_service.stop.assert_called_once_with()
# Wait for all active threads to be finished
calls = [mock.call('sample-uuid1', True),
mock.call('sample-uuid2', True)]
self.eng.thread_group_mgr.stop.assert_has_calls(calls, True)
# Manage Thread group
self.eng.manage_thread_grp.stop.assert_called_with()
# Service delete
admin_context_method.assert_called_once_with()
ctxt = admin_context_method.return_value
service_delete_method.assert_called_once_with(
ctxt,
self.eng.service_id
)
@mock.patch.object(worker.WorkerService,
'stop')
@mock.patch('heat.common.context.get_admin_context',
return_value=mock.Mock())
@mock.patch('heat.objects.service.Service.delete',
return_value=mock.Mock())
def test_engine_service_stop_in_convergence_mode(
self,
service_delete_method,
admin_context_method,
worker_service_stop):
cfg.CONF.set_default('convergence_engine', True)
self._test_engine_service_stop(
service_delete_method,
admin_context_method
)
@mock.patch('heat.common.context.get_admin_context',
return_value=mock.Mock())
@mock.patch('heat.objects.service.Service.delete',
return_value=mock.Mock())
def test_engine_service_stop_in_non_convergence_mode(
self,
service_delete_method,
admin_context_method):
cfg.CONF.set_default('convergence_engine', False)
self._test_engine_service_stop(
service_delete_method,
admin_context_method
)
@mock.patch('oslo_log.log.setup')
def test_engine_service_reset(self, setup_logging_mock):
self.eng.reset()
setup_logging_mock.assert_called_once_with(cfg.CONF, 'heat')
@mock.patch('heat.common.messaging.get_rpc_client',
return_value=mock.Mock())
@mock.patch('heat.common.service_utils.generate_engine_id',
return_value=mock.Mock())
@mock.patch('heat.engine.service.ThreadGroupManager',
return_value=mock.Mock())
@mock.patch('heat.engine.service.EngineListener',
return_value=mock.Mock())
@mock.patch('heat.engine.worker.WorkerService',
return_value=mock.Mock())
@mock.patch('oslo_service.threadgroup.ThreadGroup',
return_value=mock.Mock())
def test_engine_service_configures_connection_pool(
self,
thread_group_class,
worker_service_class,
engine_listener_class,
thread_group_manager_class,
sample_uuid_method,
rpc_client_class):
self.addCleanup(self.eng._stop_rpc_server)
self.eng.start()
self.assertEqual(cfg.CONF.executor_thread_pool_size,
cfg.CONF.database.max_overflow)
| |
#
# GtkMain.py -- pygtk threading help routines.
#
# Eric Jeschke (eric@naoj.org)
#
# Copyright (c) Eric R. Jeschke. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
"""
GUI threading help routines.
Usage:
import GtkMain
# See constructor for GtkMain for options
self.mygtk = GtkMain.GtkMain()
# NOT THIS
#gtk.main()
# INSTEAD, main thread calls this:
self.mygtk.mainloop()
# (asynchronous call)
self.mygtk.gui_do(method, arg1, arg2, ... argN, kwd1=val1, ..., kwdN=valN)
# OR
# (synchronous call)
res = self.mygtk.gui_call(method, arg1, arg2, ... argN, kwd1=val1, ..., kwdN=valN)
# To cause the GUI thread to terminate the mainloop
self.mygtk.qui_quit()
"""
import sys, traceback
import threading
import logging
import ginga.util.six as six
if six.PY2:
import thread
import Queue
else:
import _thread as thread
import queue as Queue
import gtk
from ginga.misc import Task, Future
class GtkMain(object):
def __init__(self, queue=None, logger=None, ev_quit=None):
# You can pass in a queue if you prefer to do so
if not queue:
queue = Queue.Queue()
self.gui_queue = queue
# You can pass in a logger if you prefer to do so
if logger is None:
logger = logging.getLogger('GtkHelper')
self.logger = logger
if not ev_quit:
ev_quit = threading.Event()
self.ev_quit = ev_quit
self.gui_thread_id = None
try:
screen = gtk.gdk.screen_get_default()
self.screen_ht = screen.get_height()
self.screen_wd = screen.get_width()
except:
self.screen_wd = 1600
self.screen_ht = 1200
#print "screen dimensions %dx%d" % (self.screen_wd, self.screen_ht)
def get_screen_size(self):
return (self.screen_wd, self.screen_ht)
def update_pending(self, timeout=0.0):
"""Process all pending GTK events and return. _timeout_ is a tuning
parameter for performance.
"""
# Process "out-of-band" GTK events
try:
while gtk.events_pending():
#gtk.main_iteration(False)
gtk.main_iteration()
finally:
pass
done = False
while not done:
# Process "in-band" GTK events
try:
future = self.gui_queue.get(block=True,
timeout=timeout)
# Execute the GUI method
try:
try:
res = future.thaw(suppress_exception=False)
except Exception as e:
future.resolve(e)
self.logger.error("gui error: %s" % str(e))
try:
(type, value, tb) = sys.exc_info()
tb_str = "".join(traceback.format_tb(tb))
self.logger.error("Traceback:\n%s" % (tb_str))
except Exception as e:
self.logger.error("Traceback information unavailable.")
finally:
pass
except Queue.Empty:
done = True
except Exception as e:
self.logger.error("Main GUI loop error: %s" % str(e))
# Process "out-of-band" GTK events again
try:
while gtk.events_pending():
#gtk.main_iteration(False)
gtk.main_iteration()
finally:
pass
def gui_do(self, method, *args, **kwdargs):
"""General method for asynchronously calling into the GUI.
It makes a future to call the given (method) with the given (args)
and (kwdargs) inside the gui thread. If the calling thread is a
non-gui thread the future is returned.
"""
future = Future.Future()
future.freeze(method, *args, **kwdargs)
self.gui_queue.put(future)
my_id = thread.get_ident()
if my_id != self.gui_thread_id:
return future
def gui_call(self, method, *args, **kwdargs):
"""General method for synchronously calling into the GUI.
This waits until the method has completed before returning.
"""
my_id = thread.get_ident()
if my_id == self.gui_thread_id:
return method(*args, **kwdargs)
else:
future = self.gui_do(method, *args, **kwdargs)
return future.wait()
def gui_do_future(self, future):
self.gui_queue.put(future)
return future
def nongui_do(self, method, *args, **kwdargs):
task = Task.FuncTask(method, args, kwdargs, logger=self.logger)
return self.nongui_do_task(task)
def nongui_do_cb(self, tup, method, *args, **kwdargs):
task = Task.FuncTask(method, args, kwdargs, logger=self.logger)
task.register_callback(tup[0], args=tup[1:])
return self.nongui_do_task(task)
def nongui_do_future(self, future):
task = Task.FuncTask(future.thaw, (), {}, logger=self.logger)
return self.nongui_do_task(task)
def nongui_do_task(self, task):
try:
task.init_and_start(self)
return task
except Exception as e:
self.logger.error("Error starting task: %s" % (str(e)))
raise(e)
def assert_gui_thread(self):
my_id = thread.get_ident()
assert my_id == self.gui_thread_id, \
Exception("Non-GUI thread (%d) is executing GUI code!" % (
my_id))
def assert_nongui_thread(self):
my_id = thread.get_ident()
assert my_id != self.gui_thread_id, \
Exception("GUI thread (%d) is executing non-GUI code!" % (
my_id))
def mainloop(self, timeout=0.001):
# Mark our thread id
self.gui_thread_id = thread.get_ident()
while not self.ev_quit.isSet():
self.update_pending(timeout=timeout)
def gui_quit(self):
"Call this to cause the GUI thread to quit the mainloop."""
self.ev_quit.set()
# END
| |
from __future__ import print_function
import re
import jwt
import boto3
# import json
print('Loading function')
def lambda_handler(event, context):
print('Client token: ' + event['authorizationToken'])
print('Method ARN: ' + event['methodArn'])
'''validate the incoming token'''
'''and produce the principal user identifier associated with the token'''
encoded = event['authorizationToken']
try :
payload = jwt.decode(encoded, verify=False)
except jwt.InvalidTokenError :
raise Exception('Unauthorized')
username = payload["username"]
print('username: ' + username)
dynamo = boto3.resource('dynamodb').Table('auth_user')
response = dynamo.get_item(Key={'username':username})
# print('response: ' + json.dumps(response))
secret = response["Item"]["secret"]
# print('secret: ' + secret)
try :
payload = jwt.decode(encoded, secret, algorithms=['HS256'])
except jwt.InvalidTokenError :
raise Exception('Unauthorized')
'''this could be accomplished in a number of ways:'''
'''1. Call out to OAuth provider'''
'''2. Decode a JWT token inline'''
'''3. Lookup in a self-managed DB'''
principalId = 'user|a1b2c3d4'
'''you can send a 401 Unauthorized response to the client by failing like so:'''
'''raise Exception('Unauthorized')'''
'''if the token is valid, a policy must be generated which will allow or deny access to the client'''
'''if access is denied, the client will recieve a 403 Access Denied response'''
'''if access is allowed, API Gateway will proceed with the backend integration configured on the method that was called'''
'''this function must generate a policy that is associated with the recognized principal user identifier.'''
'''depending on your use case, you might store policies in a DB, or generate them on the fly'''
'''keep in mind, the policy is cached for 5 minutes by default (TTL is configurable in the authorizer)'''
'''and will apply to subsequent calls to any method/resource in the RestApi'''
'''made with the same token'''
'''the example policy below denies access to all resources in the RestApi'''
tmp = event['methodArn'].split(':')
apiGatewayArnTmp = tmp[5].split('/')
awsAccountId = tmp[4]
policy = AuthPolicy(principalId, awsAccountId)
policy.restApiId = apiGatewayArnTmp[0]
policy.region = tmp[3]
policy.stage = apiGatewayArnTmp[1]
'''policy.denyAllMethods()'''
policy.allowMethod(apiGatewayArnTmp[2], '/' + apiGatewayArnTmp[3])
'''finally, build the policy and exit the function using return'''
return policy.build()
class HttpVerb:
GET = 'GET'
POST = 'POST'
PUT = 'PUT'
PATCH = 'PATCH'
HEAD = 'HEAD'
DELETE = 'DELETE'
OPTIONS = 'OPTIONS'
ALL = '*'
class AuthPolicy(object):
awsAccountId = ''
'''The AWS account id the policy will be generated for. This is used to create the method ARNs.'''
principalId = ''
'''The principal used for the policy, this should be a unique identifier for the end user.'''
version = '2012-10-17'
'''The policy version used for the evaluation. This should always be '2012-10-17' '''
pathRegex = '^[/.a-zA-Z0-9-\*_]+$'
'''The regular expression used to validate resource paths for the policy'''
'''these are the internal lists of allowed and denied methods. These are lists
of objects and each object has 2 properties: A resource ARN and a nullable
conditions statement.
the build method processes these lists and generates the approriate
statements for the final policy'''
allowMethods = []
denyMethods = []
restApiId = '*'
'''The API Gateway API id. By default this is set to '*' '''
region = '*'
'''The region where the API is deployed. By default this is set to '*' '''
stage = '*'
'''The name of the stage used in the policy. By default this is set to '*' '''
def __init__(self, principal, awsAccountId):
self.awsAccountId = awsAccountId
self.principalId = principal
self.allowMethods = []
self.denyMethods = []
def _addMethod(self, effect, verb, resource, conditions):
'''Adds a method to the internal lists of allowed or denied methods. Each object in
the internal list contains a resource ARN and a condition statement. The condition
statement can be null.'''
if verb != '*' and not hasattr(HttpVerb, verb):
raise NameError('Invalid HTTP verb ' + verb + '. Allowed verbs in HttpVerb class')
resourcePattern = re.compile(self.pathRegex)
if not resourcePattern.match(resource):
raise NameError('Invalid resource path: ' + resource + '. Path should match ' + self.pathRegex)
if resource[:1] == '/':
resource = resource[1:]
resourceArn = ('arn:aws:execute-api:' +
self.region + ':' +
self.awsAccountId + ':' +
self.restApiId + '/' +
self.stage + '/' +
verb + '/' +
resource)
if effect.lower() == 'allow':
self.allowMethods.append({
'resourceArn' : resourceArn,
'conditions' : conditions
})
elif effect.lower() == 'deny':
self.denyMethods.append({
'resourceArn' : resourceArn,
'conditions' : conditions
})
def _getEmptyStatement(self, effect):
'''Returns an empty statement object prepopulated with the correct action and the
desired effect.'''
statement = {
'Action': 'execute-api:Invoke',
'Effect': effect[:1].upper() + effect[1:].lower(),
'Resource': []
}
return statement
def _getStatementForEffect(self, effect, methods):
'''This function loops over an array of objects containing a resourceArn and
conditions statement and generates the array of statements for the policy.'''
statements = []
if len(methods) > 0:
statement = self._getEmptyStatement(effect)
for curMethod in methods:
if curMethod['conditions'] is None or len(curMethod['conditions']) == 0:
statement['Resource'].append(curMethod['resourceArn'])
else:
conditionalStatement = self._getEmptyStatement(effect)
conditionalStatement['Resource'].append(curMethod['resourceArn'])
conditionalStatement['Condition'] = curMethod['conditions']
statements.append(conditionalStatement)
if statement['Resource']:
statements.append(statement)
return statements
def allowAllMethods(self):
'''Adds a '*' allow to the policy to authorize access to all methods of an API'''
self._addMethod('Allow', HttpVerb.ALL, '*', [])
def denyAllMethods(self):
'''Adds a '*' allow to the policy to deny access to all methods of an API'''
self._addMethod('Deny', HttpVerb.ALL, '*', [])
def allowMethod(self, verb, resource):
'''Adds an API Gateway method (Http verb + Resource path) to the list of allowed
methods for the policy'''
self._addMethod('Allow', verb, resource, [])
def denyMethod(self, verb, resource):
'''Adds an API Gateway method (Http verb + Resource path) to the list of denied
methods for the policy'''
self._addMethod('Deny', verb, resource, [])
def allowMethodWithConditions(self, verb, resource, conditions):
'''Adds an API Gateway method (Http verb + Resource path) to the list of allowed
methods and includes a condition for the policy statement. More on AWS policy
conditions here: http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements.html#Condition'''
self._addMethod('Allow', verb, resource, conditions)
def denyMethodWithConditions(self, verb, resource, conditions):
'''Adds an API Gateway method (Http verb + Resource path) to the list of denied
methods and includes a condition for the policy statement. More on AWS policy
conditions here: http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements.html#Condition'''
self._addMethod('Deny', verb, resource, conditions)
def build(self):
'''Generates the policy document based on the internal lists of allowed and denied
conditions. This will generate a policy with two main statements for the effect:
one statement for Allow and one statement for Deny.
Methods that includes conditions will have their own statement in the policy.'''
if ((self.allowMethods is None or len(self.allowMethods) == 0) and
(self.denyMethods is None or len(self.denyMethods) == 0)):
raise NameError('No statements defined for the policy')
policy = {
'principalId' : self.principalId,
'policyDocument' : {
'Version' : self.version,
'Statement' : []
}
}
policy['policyDocument']['Statement'].extend(self._getStatementForEffect('Allow', self.allowMethods))
policy['policyDocument']['Statement'].extend(self._getStatementForEffect('Deny', self.denyMethods))
return policy
| |
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import sys
import base64
import os
import zlib
import itertools
import hashlib
import datetime
import six
from six.moves import zip as izip
from . import environment
from .console import log
from .machine import Machine
from . import statistics
from . import util
def iter_results_paths(results):
"""
Iterate over all of the result file paths.
"""
skip_files = set([
'machine.json', 'benchmarks.json'
])
for root, dirs, files in os.walk(results):
# Iterate over files only if machine.json is valid json
machine_json = os.path.join(root, "machine.json")
try:
data = util.load_json(machine_json, api_version=Machine.api_version)
machine_name = data.get('machine')
if not isinstance(machine_name, six.text_type):
raise util.UserError("malformed {0}".format(machine_json))
except util.UserError as err:
machine_json_err = "Skipping results: {0}".format(six.text_type(err))
except IOError as err:
machine_json_err = "Skipping results: could not load {0}".format(
machine_json)
else:
machine_json_err = None
# Iterate over files
for filename in files:
if filename not in skip_files and filename.endswith('.json'):
if machine_json_err is not None:
# Show the warning only if there are some files to load
log.warn(machine_json_err)
break
yield (root, filename, machine_name)
def iter_results(results):
"""
Iterate over all of the result files.
"""
for (root, filename, machine_name) in iter_results_paths(results):
try:
yield Results.load(os.path.join(root, filename), machine_name=machine_name)
except util.UserError as exc:
log.warn(six.text_type(exc))
def iter_results_for_machine(results, machine_name):
"""
Iterate over all of the result files for a particular machine.
"""
return iter_results(os.path.join(results, machine_name))
def iter_results_for_machine_and_hash(results, machine_name, commit):
"""
Iterate over all of the result files with a given hash for a
particular machine.
"""
full_commit = get_result_hash_from_prefix(results, machine_name, commit)
for (root, filename, machine_name) in iter_results_paths(
os.path.join(results, machine_name)):
results_commit = filename.split('-')[0]
if results_commit == full_commit:
try:
yield Results.load(os.path.join(root, filename), machine_name=machine_name)
except util.UserError as exc:
log.warn(six.text_type(exc))
def iter_existing_hashes(results):
"""
Iterate over all of the result commit hashes and dates and yields
commit_hash.
May return duplicates. Use `get_existing_hashes` if that matters.
"""
for result in iter_results(results):
yield result.commit_hash
def get_existing_hashes(results):
"""
Get a list of the commit hashes that have already been tested.
"""
log.info("Getting existing hashes")
hashes = list(set(iter_existing_hashes(results)))
return hashes
def get_result_hash_from_prefix(results, machine_name, commit_prefix):
"""
Get the 8-char result commit identifier from a potentially shorter
prefix. Only considers the set of commits that have had
results computed.
Returns None if there are no matches. Raises a UserError
if the prefix is non-unique.
"""
commits = set([])
path = os.path.join(results, machine_name)
for (root, filename, r_machine_name) in iter_results_paths(path):
if r_machine_name != machine_name:
log.warn("Skipping results '{0}': machine name is not '{1}'".format(
os.path.join(root, filename), machine_name))
continue
results_commit = filename.split('-')[0]
cmp_len = min(len(commit_prefix), len(results_commit))
if results_commit[:cmp_len] == commit_prefix[:cmp_len]:
commits.add(results_commit)
if len(commits) > 1:
commit_list_str = ', '.join(sorted(commits))
raise util.UserError('Git hash prefix could represent one of ' +
'multiple commits: {0}'.format(commit_list_str))
elif len(commits) == 1:
return list(commits)[0]
else:
return None
def get_filename(machine, commit_hash, env_name):
"""
Get the result filename for a given machine, commit_hash and
environment.
If the environment name is too long, use its hash instead.
"""
if env_name and len(env_name) >= 128:
env_name = "env-" + hashlib.md5(env_name.encode('utf-8')).hexdigest()
return os.path.join(
machine,
"{0}-{1}.json".format(
commit_hash[:8],
env_name))
def _compatible_results(result, result_params, params):
"""
For parameterized benchmarks, obtain values from *result* that
are compatible with parameters of *benchmark*
"""
if result is None:
# All results missing, eg. build failure
return [None for param in itertools.product(*params)]
# Pick results for those parameters that also appear in the
# current benchmark
old_results = {}
for param, value in izip(itertools.product(*result_params), result):
old_results[param] = value
new_results = []
for param in itertools.product(*params):
new_results.append(old_results.get(param))
return new_results
class Results(object):
"""
Manage a set of benchmark results for a single machine and commit
hash.
"""
api_version = 1
def __init__(self, params, requirements, commit_hash, date, python, env_name):
"""
Parameters
----------
params : dict
Parameters describing the environment in which the
benchmarks were run.
requirements : list
Requirements of the benchmarks being run.
commit_hash : str
The commit hash for the benchmark run.
date : int
JavaScript timestamp for when the commit was merged into
the repository.
python : str
A Python version specifier.
env_name : str
Environment name
"""
self._params = params
self._requirements = requirements
self._commit_hash = commit_hash
self._date = date
self._results = {}
self._samples = {}
self._stats = {}
self._benchmark_params = {}
self._profiles = {}
self._python = python
self._env_name = env_name
self._started_at = {}
self._ended_at = {}
self._benchmark_version = {}
# Note: stderr and errcode are not saved to files
self._stderr = {}
self._errcode = {}
if commit_hash is not None:
self._filename = get_filename(
params['machine'], self._commit_hash, env_name)
else:
self._filename = None
@classmethod
def unnamed(cls):
return cls({}, {}, None, None, None, None)
@property
def commit_hash(self):
return self._commit_hash
@property
def date(self):
return self._date
@property
def params(self):
return self._params
@property
def started_at(self):
return self._started_at
@property
def ended_at(self):
return self._ended_at
@property
def benchmark_version(self):
return self._benchmark_version
@property
def stderr(self):
return self._stderr
@property
def errcode(self):
return self._errcode
def get_all_result_keys(self):
"""
Return all available result keys.
"""
return six.iterkeys(self._results)
def get_result_keys(self, benchmarks):
"""
Return result keys corresponding to benchmarks.
Parameters
----------
benchmarks : Benchmarks
Benchmarks to return results for.
Used for checking benchmark versions.
Returns
-------
keys : set
Set of benchmark result keys
"""
keys = set()
for key in six.iterkeys(self._results):
if key not in benchmarks:
continue
version = self._benchmark_version.get(key)
bench_version = benchmarks[key].get('version')
if version is not None and version != bench_version:
continue
keys.add(key)
return keys
def get_result_value(self, key, params):
"""
Return the value of benchmark result.
Parameters
----------
key : str
Benchmark name to return results for
params : {list of list, None}
Set of benchmark parameters to return values for
Returns
-------
value : {float, list of float}
Benchmark result value. If the benchmark is parameterized, return
a list of values.
"""
return _compatible_results(self._results[key],
self._benchmark_params[key],
params)
def get_result_stats(self, key, params):
"""
Return the statistical information of a benchmark result.
Parameters
----------
key : str
Benchmark name to return results for
params : {list of list, None}
Set of benchmark parameters to return values for
Returns
-------
stats : {None, dict, list of dict}
Result statistics. If the benchmark is parameterized,
return a list of values.
"""
return _compatible_results(self._stats[key],
self._benchmark_params[key],
params)
def get_result_samples(self, key, params):
"""
Return the raw data points of a benchmark result.
Parameters
----------
key : str
Benchmark name to return results for
params : {list of list, None}
Set of benchmark parameters to return values for
Returns
-------
samples : {None, list}
Raw result samples. If the benchmark is parameterized,
return a list of values.
"""
return _compatible_results(self._samples[key],
self._benchmark_params[key],
params)
def get_result_params(self, key):
"""
Return the benchmark parameters of the given result
"""
return self._benchmark_params[key]
def remove_result(self, key):
"""
Remove results corresponding to a given benchmark.
"""
del self._results[key]
del self._benchmark_params[key]
del self._samples[key]
del self._stats[key]
# Remove profiles (may be missing)
self._profiles.pop(key, None)
# Remove run times (may be missing in old files)
self._started_at.pop(key, None)
self._ended_at.pop(key, None)
# Remove version (may be missing)
self._benchmark_version.pop(key, None)
def remove_samples(self, key, selected_idx=None):
"""
Remove measurement samples from the selected benchmark.
"""
if key not in self._results:
raise ValueError(key)
if selected_idx is None:
self._samples[key] = None
elif self._samples[key] is not None:
for j in selected_idx:
self._samples[key][j] = None
def add_result(self, benchmark, result,
started_at=None, ended_at=None,
record_samples=False,
append_samples=False,
selected_idx=None):
"""
Add benchmark result.
Parameters
----------
benchmark : dict
Benchmark object
result : runner.BenchmarkResult
Result of the benchmark.
started_at : datetime.datetime, optional
Benchmark start time.
ended_at : datetime.datetime, optional
Benchmark end time.
record_samples : bool, optional
Whether to save samples.
append_samples : bool, optional
Whether to combine new samples with old.
selected_idx : set, optional
Which indices in a parametrized benchmark to update
"""
new_result = list(result.result)
new_samples = list(result.samples)
new_number = result.number
benchmark_name = benchmark['name']
benchmark_version = benchmark['version']
if started_at is None:
started_at = datetime.datetime.utcnow()
if ended_at is None:
ended_at = started_at
new_stats = [None] * len(new_result)
if (benchmark_name in self._results and
benchmark_version == self._benchmark_version.get(benchmark_name)):
# Append to old samples, if requested
if append_samples:
old_samples = self.get_result_samples(benchmark_name, benchmark['params'])
for j in range(len(new_samples)):
if old_samples[j] is not None and new_samples[j] is not None:
new_samples[j] = old_samples[j] + new_samples[j]
# Retain old result where requested
merge_idx = [j for j in range(len(new_result))
if selected_idx is not None and j not in selected_idx]
if merge_idx:
old_result = self.get_result_value(benchmark_name, benchmark['params'])
old_samples = self.get_result_samples(benchmark_name, benchmark['params'])
old_stats = self.get_result_stats(benchmark_name, benchmark['params'])
for j in merge_idx:
new_result[j] = old_result[j]
new_samples[j] = old_samples[j]
new_stats[j] = old_stats[j]
# Recompute stats for updated entries (and drop unnecessary data)
for j, (r, s, n) in enumerate(zip(new_result, new_samples, new_number)):
if util.is_na(r):
new_samples[j] = None
new_stats[j] = None
continue
if n is not None:
new_result[j], new_stats[j] = statistics.compute_stats(s, n)
# Compress None lists to just None
if all(x is None for x in new_result):
new_result = None
if all(x is None for x in new_samples):
new_samples = None
if all(x is None for x in new_stats):
new_stats = None
# Drop samples if requested
if not record_samples:
new_samples = None
# Store result
self._results[benchmark_name] = new_result
self._stats[benchmark_name] = new_stats
self._samples[benchmark_name] = new_samples
self._benchmark_params[benchmark_name] = benchmark['params'] if benchmark['params'] else []
self._started_at[benchmark_name] = util.datetime_to_js_timestamp(started_at)
self._ended_at[benchmark_name] = util.datetime_to_js_timestamp(ended_at)
self._benchmark_version[benchmark_name] = benchmark_version
self._stderr[benchmark_name] = result.stderr
self._errcode[benchmark_name] = result.errcode
if result.profile:
profile_data = base64.b64encode(zlib.compress(result.profile))
if sys.version_info[0] >= 3:
profile_data = profile_data.decode('ascii')
self._profiles[benchmark_name] = profile_data
def get_profile(self, benchmark_name):
"""
Get the profile data for the given benchmark name.
Parameters
----------
benchmark_name : str
Name of benchmark
Returns
-------
profile_data : bytes
Raw profile data
"""
profile_data = self._profiles[benchmark_name]
if sys.version_info[0] >= 3:
profile_data = profile_data.encode('ascii')
return zlib.decompress(base64.b64decode(profile_data))
def has_profile(self, benchmark_name):
"""
Does the given benchmark data have profiling information?
"""
return benchmark_name in self._profiles
def save(self, result_dir):
"""
Save the results to disk, replacing existing results.
Parameters
----------
result_dir : str
Path to root of results tree.
"""
if self._filename is None:
raise ValueError("Cannot save unnamed Results")
path = os.path.join(result_dir, self._filename)
results = {}
for key in six.iterkeys(self._samples):
# Save omitting default values
value = {'result': self._results[key]}
if self._samples[key] and any(x is not None for x in self._samples[key]):
value['samples'] = self._samples[key]
if self._stats[key] and any(x is not None for x in self._stats[key]):
value['stats'] = self._stats[key]
if self._benchmark_params[key]:
value['params'] = self._benchmark_params[key]
if list(value.keys()) == ['result']:
value = value['result']
if isinstance(value, list) and len(value) == 1:
value = value[0]
results[key] = value
data = {
'results': results,
'params': self._params,
'requirements': self._requirements,
'commit_hash': self._commit_hash,
'date': self._date,
'env_name': self._env_name,
'python': self._python,
'profiles': self._profiles,
'started_at': self._started_at,
'ended_at': self._ended_at,
'benchmark_version': self._benchmark_version,
}
util.write_json(path, data, self.api_version)
def load_data(self, result_dir):
"""
Load previous results for the current parameters (if any).
"""
if self._filename is None:
raise ValueError("Cannot load unnamed Results")
path = os.path.join(result_dir, self._filename)
if os.path.isfile(path):
old = self.load(path)
for dict_name in ('_results', '_samples', '_stats',
'_benchmark_params', '_profiles', '_started_at',
'_ended_at', '_benchmark_version'):
setattr(self, dict_name, getattr(old, dict_name))
@classmethod
def load(cls, path, machine_name=None):
"""
Load results from disk.
Parameters
----------
path : str
Path to results file.
machine_name : str, optional
If given, check that the results file is for the given machine.
"""
d = util.load_json(path, cls.api_version, cleanup=False)
try:
obj = cls(
d['params'],
d['requirements'],
d['commit_hash'],
d['date'],
d['python'],
d.get('env_name',
environment.get_env_name('', d['python'], d['requirements']))
)
obj._results = {}
obj._samples = {}
obj._stats = {}
obj._benchmark_params = {}
for key, value in six.iteritems(d['results']):
# Backward compatibility
if not isinstance(value, dict):
value = {'result': [value], 'samples': None,
'stats': None, 'params': []}
if not isinstance(value['result'], list):
value['result'] = [value['result']]
if 'stats' in value and not isinstance(value['stats'], list):
value['stats'] = [value['stats']]
value.setdefault('samples', None)
value.setdefault('stats', None)
value.setdefault('params', [])
# Assign results
obj._results[key] = value['result']
obj._samples[key] = value['samples']
obj._stats[key] = value['stats']
obj._benchmark_params[key] = value['params']
if 'profiles' in d:
obj._profiles = d['profiles']
obj._filename = os.path.join(*path.split(os.path.sep)[-2:])
obj._started_at = d.get('started_at', {})
obj._ended_at = d.get('ended_at', {})
obj._benchmark_version = d.get('benchmark_version', {})
except KeyError as exc:
raise util.UserError(
"Error loading results file '{0}': missing key {1}".format(
path, six.text_type(exc)))
if machine_name is not None and obj.params.get('machine') != machine_name:
raise util.UserError(
"Error loading results file '{0}': machine name is not '{1}'".format(
path, machine_name))
return obj
def rm(self, result_dir):
if self._filename is None:
raise ValueError("Cannot remove unnamed Results")
path = os.path.join(result_dir, self._filename)
os.remove(path)
@classmethod
def update(cls, path):
util.update_json(cls, path, cls.api_version, cleanup=False)
@property
def env_name(self):
return self._env_name
| |
# -*- coding: utf-8 -*-
import json
import os
import random
import time
import uuid
import pyrax
from pyrax.autoscale import AutoScaleClient
from pyrax.autoscale import AutoScalePolicy
from pyrax.autoscale import AutoScaleWebhook
from pyrax.autoscale import ScalingGroup
from pyrax.autoscale import ScalingGroupManager
from pyrax.client import BaseClient
from pyrax.clouddatabases import CloudDatabaseClient
from pyrax.clouddatabases import CloudDatabaseDatabaseManager
from pyrax.clouddatabases import CloudDatabaseInstance
from pyrax.clouddatabases import CloudDatabaseManager
from pyrax.clouddatabases import CloudDatabaseUser
from pyrax.clouddatabases import CloudDatabaseUserManager
from pyrax.clouddatabases import CloudDatabaseVolume
from pyrax.cloudblockstorage import CloudBlockStorageClient
from pyrax.cloudblockstorage import CloudBlockStorageManager
from pyrax.cloudblockstorage import CloudBlockStorageSnapshot
from pyrax.cloudblockstorage import CloudBlockStorageSnapshotManager
from pyrax.cloudblockstorage import CloudBlockStorageVolume
from pyrax.cloudloadbalancers import CloudLoadBalancer
from pyrax.cloudloadbalancers import CloudLoadBalancerManager
from pyrax.cloudloadbalancers import CloudLoadBalancerClient
from pyrax.cloudloadbalancers import Node
from pyrax.cloudloadbalancers import VirtualIP
from pyrax.clouddns import CloudDNSClient
from pyrax.clouddns import CloudDNSDomain
from pyrax.clouddns import CloudDNSManager
from pyrax.clouddns import CloudDNSRecord
from pyrax.clouddns import CloudDNSPTRRecord
from pyrax.cloudnetworks import CloudNetwork
from pyrax.cloudnetworks import CloudNetworkClient
from pyrax.cloudmonitoring import CloudMonitorClient
from pyrax.cloudmonitoring import CloudMonitorEntity
from pyrax.cloudmonitoring import CloudMonitorCheck
from pyrax.cloudmonitoring import CloudMonitorNotification
from pyrax.image import Image
from pyrax.image import ImageClient
from pyrax.image import ImageManager
from pyrax.image import ImageMemberManager
from pyrax.image import ImageTagManager
from pyrax.object_storage import BulkDeleter
from pyrax.object_storage import Container
from pyrax.object_storage import ContainerManager
from pyrax.object_storage import FolderUploader
from pyrax.object_storage import StorageClient
from pyrax.object_storage import StorageObject
from pyrax.object_storage import StorageObjectManager
from pyrax.queueing import Queue
from pyrax.queueing import QueueClaim
from pyrax.queueing import QueueMessage
from pyrax.queueing import QueueClient
from pyrax.queueing import QueueManager
import pyrax.exceptions as exc
from pyrax.base_identity import BaseIdentity
from pyrax.base_identity import Endpoint
from pyrax.base_identity import Service
from pyrax.identity.rax_identity import RaxIdentity
from pyrax.identity.keystone_identity import KeystoneIdentity
import pyrax.utils as utils
example_uri = "http://example.com"
class FakeResponse(object):
headers = {}
body = ""
status_code = 200
reason = "Oops"
content = "Oops"
@property
def status(self):
# TEMPORARY - until the cf_wrapper code is removed.
return self.status_code
@status.setter
def status(self, val):
# TEMPORARY - until the cf_wrapper code is removed.
self.status_code = val
def getheaders(self):
return self.headers
def read(self):
return "Line1\nLine2"
def get(self, arg):
return self.headers.get(arg)
def json(self):
return self.content
class FakeIterator(utils.ResultsIterator):
def _init_methods(self):
pass
class FakeClient(object):
user_agent = "Fake"
USER_AGENT = "Fake"
def __init__(self, *args, **kwargs):
self.identity = FakeIdentity()
class FakeStorageClient(StorageClient):
def __init__(self, identity=None, *args, **kwargs):
if identity is None:
identity = FakeIdentity()
super(FakeStorageClient, self).__init__(identity, *args, **kwargs)
def create(self, name):
return FakeContainer(self._manager, {"name": name})
class FakeContainerManager(ContainerManager):
def __init__(self, api=None, *args, **kwargs):
if api is None:
api = FakeStorageClient()
super(FakeContainerManager, self).__init__(api, *args, **kwargs)
class FakeContainer(Container):
def __init__(self, *args, **kwargs):
super(FakeContainer, self).__init__(*args, **kwargs)
self.object_manager = FakeStorageObjectManager(self.manager.api,
uri_base=self.name)
self.object_manager._container = self
class FakeStorageObjectManager(StorageObjectManager):
def __init__(self, api=None, *args, **kwargs):
if api is None:
api = FakeStorageClient()
if "uri_base" not in kwargs:
kwargs["uri_base"] = utils.random_ascii()
super(FakeStorageObjectManager, self).__init__(api, *args, **kwargs)
class FakeStorageObject(StorageObject):
def __init__(self, manager, name=None, total_bytes=None, content_type=None,
last_modified=None, etag=None, attdict=None):
"""
The object can either be initialized with individual params, or by
passing the dict that is returned by swiftclient.
"""
self.manager = manager
self.name = name
self.bytes = total_bytes or 0
self.content_type = content_type
self.last_modified = last_modified
self.hash = etag
if attdict:
self._read_attdict(attdict)
fake_attdict = {"name": "fake",
"content-length": 42,
"content-type": "text/html",
"etag": "ABC",
"last-modified": "Tue, 01 Jan 2013 01:02:03 GMT",
}
class FakeServer(object):
id = utils.random_unicode()
class FakeService(object):
user_agent = "FakeService"
USER_AGENT = "FakeService"
def __init__(self, *args, **kwargs):
self.client = FakeClient()
self.Node = FakeNode
self.VirtualIP = FakeVirtualIP
self.loadbalancers = FakeLoadBalancer()
self.id = utils.random_unicode()
def authenticate(self):
pass
def get_protocols(self):
return ["HTTP"]
def get_algorithms(self):
return ["RANDOM"]
def get_usage(self):
pass
class FakeCSClient(FakeService):
def __init__(self, *args, **kwargs):
ident = FakeIdentity()
super(FakeCSClient, self).__init__(ident, *args, **kwargs)
def dummy(self):
pass
self.servers = FakeService()
utils.add_method(self.servers, dummy, "list")
self.images = FakeService()
utils.add_method(self.images, dummy, "list")
self.flavors = FakeService()
utils.add_method(self.flavors, dummy, "list")
class FakeFolderUploader(FolderUploader):
def __init__(self, *args, **kwargs):
super(FakeFolderUploader, self).__init__(*args, **kwargs)
# Useful for when we mock out the run() method.
self.actual_run = self.run
self.run = self.fake_run
def fake_run(self):
pass
class FakeBulkDeleter(BulkDeleter):
def __init__(self, *args, **kwargs):
super(FakeBulkDeleter, self).__init__(*args, **kwargs)
# Useful for when we mock out the run() method.
self.actual_run = self.run
self.run = self.fake_run
def fake_run(self):
time.sleep(0.0001)
self.results = {}
self.completed = True
class FakeManager(object):
def __init__(self, *args, **kwargs):
super(FakeManager, self).__init__(*args, **kwargs)
self.api = FakeClient()
def list(self):
pass
def get(self, item):
pass
def delete(self, item):
pass
def create(self, *args, **kwargs):
pass
def find(self, *args, **kwargs):
pass
def action(self, item, action_type, body={}):
pass
class FakeException(BaseException):
pass
class FakeKeyring(object):
password_set = False
def get_password(self, *args, **kwargs):
return "FAKE_TOKEN|FAKE_URL"
def set_password(self, *args, **kwargs):
self.password_set = True
class FakeEntity(object):
def __init__(self, *args, **kwargs):
self.id = utils.random_unicode()
def get(self, *args, **kwargs):
pass
def list(self, *args, **kwargs):
pass
class FakeDatabaseUser(CloudDatabaseUser):
pass
class FakeDatabaseVolume(CloudDatabaseVolume):
def __init__(self, instance, *args, **kwargs):
self.instance = instance
self.size = 1
self.used = 0.2
class FakeDatabaseInstance(CloudDatabaseInstance):
def __init__(self, *args, **kwargs):
self.id = utils.random_unicode()
self.manager = FakeDatabaseManager()
self.manager.api = FakeDatabaseClient()
self._database_manager = CloudDatabaseDatabaseManager(
FakeDatabaseClient())
self._user_manager = CloudDatabaseUserManager(FakeDatabaseClient())
self.volume = FakeDatabaseVolume(self)
class FakeDatabaseManager(CloudDatabaseManager):
def __init__(self, api=None, *args, **kwargs):
if api is None:
api = FakeDatabaseClient()
super(FakeDatabaseManager, self).__init__(api, *args, **kwargs)
self.uri_base = "instances"
class FakeDatabaseClient(CloudDatabaseClient):
def __init__(self, *args, **kwargs):
self._manager = FakeDatabaseManager(self)
self._flavor_manager = FakeManager()
ident = FakeIdentity()
super(FakeDatabaseClient, self).__init__(ident, "fakeuser",
"fakepassword", *args, **kwargs)
class FakeNovaVolumeClient(BaseClient):
def __init__(self, *args, **kwargs):
pass
class FakeBlockStorageManager(CloudBlockStorageManager):
def __init__(self, api=None, *args, **kwargs):
ident = FakeIdentity()
if api is None:
api = FakeBlockStorageClient(ident)
super(FakeBlockStorageManager, self).__init__(api, *args, **kwargs)
class FakeBlockStorageVolume(CloudBlockStorageVolume):
def __init__(self, *args, **kwargs):
volname = utils.random_unicode(8)
self.id = utils.random_unicode()
self.manager = FakeBlockStorageManager()
self._nova_volumes = FakeNovaVolumeClient()
class FakeBlockStorageSnapshot(CloudBlockStorageSnapshot):
def __init__(self, *args, **kwargs):
self.id = utils.random_unicode()
self.manager = FakeManager()
self.status = "available"
class FakeBlockStorageClient(CloudBlockStorageClient):
def __init__(self, *args, **kwargs):
self._types_manager = FakeManager()
self._snapshot_manager = FakeManager()
ident = FakeIdentity()
super(FakeBlockStorageClient, self).__init__(ident, "fakeuser",
"fakepassword", *args, **kwargs)
class FakeSnapshotManager(CloudBlockStorageSnapshotManager):
def __init__(self, api=None, *args, **kwargs):
ident = FakeIdentity()
if api is None:
api = FakeBlockStorageClient(ident)
super(FakeSnapshotManager, self).__init__(api, *args, **kwargs)
class FakeLoadBalancerClient(CloudLoadBalancerClient):
def __init__(self, *args, **kwargs):
ident = FakeIdentity()
super(FakeLoadBalancerClient, self).__init__(ident, "fakeuser",
"fakepassword", *args, **kwargs)
class FakeLoadBalancerManager(CloudLoadBalancerManager):
def __init__(self, api=None, *args, **kwargs):
if api is None:
api = FakeLoadBalancerClient()
super(FakeLoadBalancerManager, self).__init__(api, *args, **kwargs)
class FakeLoadBalancer(CloudLoadBalancer):
def __init__(self, name=None, info=None, *args, **kwargs):
name = name or utils.random_ascii()
info = info or {"fake": "fake"}
super(FakeLoadBalancer, self).__init__(name, info, *args, **kwargs)
self.id = utils.random_ascii()
self.port = random.randint(1, 256)
self.manager = FakeLoadBalancerManager()
class FakeNode(Node):
def __init__(self, address=None, port=None, condition=None, weight=None,
status=None, parent=None, type=None, id=None):
if address is None:
address = "0.0.0.0"
if port is None:
port = 80
if id is None:
id = utils.random_unicode()
super(FakeNode, self).__init__(address=address, port=port,
condition=condition, weight=weight, status=status,
parent=parent, type=type, id=id)
class FakeVirtualIP(VirtualIP):
pass
class FakeStatusChanger(object):
check_count = 0
id = utils.random_unicode()
@property
def status(self):
if self.check_count < 2:
self.check_count += 1
return "changing"
return "ready"
class FakeDNSClient(CloudDNSClient):
def __init__(self, *args, **kwargs):
ident = FakeIdentity()
super(FakeDNSClient, self).__init__(ident, "fakeuser",
"fakepassword", *args, **kwargs)
class FakeDNSManager(CloudDNSManager):
def __init__(self, api=None, *args, **kwargs):
if api is None:
api = FakeDNSClient()
super(FakeDNSManager, self).__init__(api, *args, **kwargs)
self.resource_class = FakeDNSDomain
self.response_key = "domain"
self.plural_response_key = "domains"
self.uri_base = "domains"
class FakeDNSDomain(CloudDNSDomain):
def __init__(self, *args, **kwargs):
self.id = utils.random_ascii()
self.name = utils.random_unicode()
self.manager = FakeDNSManager()
class FakeDNSRecord(CloudDNSRecord):
def __init__(self, mgr, info, *args, **kwargs):
super(FakeDNSRecord, self).__init__(mgr, info, *args, **kwargs)
class FakeDNSPTRRecord(CloudDNSPTRRecord):
pass
class FakeDNSDevice(FakeLoadBalancer):
def __init__(self, *args, **kwargs):
self.id = utils.random_unicode()
class FakeCloudNetworkClient(CloudNetworkClient):
def __init__(self, *args, **kwargs):
ident = FakeIdentity()
super(FakeCloudNetworkClient, self).__init__(ident, "fakeuser",
"fakepassword", *args, **kwargs)
class FakeCloudNetwork(CloudNetwork):
def __init__(self, *args, **kwargs):
info = kwargs.pop("info", {"fake": "fake"})
label = kwargs.pop("label", kwargs.pop("name", utils.random_unicode()))
info["label"] = label
super(FakeCloudNetwork, self).__init__(manager=None, info=info, *args,
**kwargs)
self.id = uuid.uuid4().hex
class FakeAutoScaleClient(AutoScaleClient):
def __init__(self, *args, **kwargs):
ident = FakeIdentity()
self._manager = FakeManager()
super(FakeAutoScaleClient, self).__init__(ident, *args, **kwargs)
class FakeAutoScalePolicy(AutoScalePolicy):
def __init__(self, *args, **kwargs):
super(FakeAutoScalePolicy, self).__init__(*args, **kwargs)
self.id = utils.random_ascii()
class FakeAutoScaleWebhook(AutoScaleWebhook):
def __init__(self, *args, **kwargs):
super(FakeAutoScaleWebhook, self).__init__(*args, **kwargs)
self.id = utils.random_ascii()
class FakeScalingGroupManager(ScalingGroupManager):
def __init__(self, api=None, *args, **kwargs):
if api is None:
api = FakeAutoScaleClient()
super(FakeScalingGroupManager, self).__init__(api, *args, **kwargs)
self.id = utils.random_ascii()
class FakeScalingGroup(ScalingGroup):
def __init__(self, name=None, info=None, *args, **kwargs):
name = name or utils.random_ascii()
info = info or {"fake": "fake", "scalingPolicies": []}
self.groupConfiguration = {}
super(FakeScalingGroup, self).__init__(name, info, *args, **kwargs)
self.id = utils.random_ascii()
self.name = name
self.manager = FakeScalingGroupManager()
class FakeCloudMonitorClient(CloudMonitorClient):
def __init__(self, *args, **kwargs):
ident = FakeIdentity()
super(FakeCloudMonitorClient, self).__init__(ident, "fakeuser",
"fakepassword", *args, **kwargs)
class FakeCloudMonitorEntity(CloudMonitorEntity):
def __init__(self, *args, **kwargs):
info = kwargs.pop("info", {"fake": "fake"})
info["id"] = utils.random_ascii()
super(FakeCloudMonitorEntity, self).__init__(FakeManager(), info=info,
*args, **kwargs)
self.manager.api = FakeCloudMonitorClient()
class FakeCloudMonitorCheck(CloudMonitorCheck):
def __init__(self, *args, **kwargs):
info = kwargs.pop("info", {"fake": "fake"})
entity = kwargs.pop("entity", None)
info["id"] = utils.random_ascii()
super(FakeCloudMonitorCheck, self).__init__(FakeManager(), info, *args,
**kwargs)
self.set_entity(entity)
self.id = uuid.uuid4()
class FakeCloudMonitorNotification(CloudMonitorNotification):
def __init__(self, *args, **kwargs):
info = kwargs.pop("info", {"fake": "fake"})
super(FakeCloudMonitorNotification, self).__init__(manager=None,
info=info, *args, **kwargs)
self.id = uuid.uuid4()
class FakeQueue(Queue):
def __init__(self, *args, **kwargs):
info = kwargs.pop("info", {"fake": "fake"})
info["name"] = utils.random_unicode()
mgr = kwargs.pop("manager", FakeQueueManager())
super(FakeQueue, self).__init__(manager=mgr, info=info, *args, **kwargs)
class FakeQueueClaim(QueueClaim):
def __init__(self, *args, **kwargs):
info = kwargs.pop("info", {"fake": "fake"})
info["name"] = utils.random_unicode()
mgr = kwargs.pop("manager", FakeQueueManager())
super(FakeQueueClaim, self).__init__(manager=mgr, info=info, *args,
**kwargs)
class FakeQueueClient(QueueClient):
def __init__(self, *args, **kwargs):
ident = FakeIdentity()
super(FakeQueueClient, self).__init__(ident, "fakeuser",
"fakepassword", *args, **kwargs)
class FakeQueueManager(QueueManager):
def __init__(self, api=None, *args, **kwargs):
if api is None:
api = FakeQueueClient()
super(FakeQueueManager, self).__init__(api, *args, **kwargs)
self.id = utils.random_ascii()
class FakeImage(Image):
def __init__(self, *args, **kwargs):
info = kwargs.pop("info", {"fake": "fake"})
info["name"] = utils.random_unicode()
info["id"] = utils.random_unicode()
mgr = kwargs.pop("manager", FakeImageManager())
kwargs["member_manager_class"] = FakeImageMemberManager
kwargs["tag_manager_class"] = FakeImageTagManager
super(FakeImage, self).__init__(mgr, info, *args, **kwargs)
class FakeImageClient(ImageClient):
def __init__(self, identity=None, *args, **kwargs):
if identity is None:
identity = FakeIdentity()
super(FakeImageClient, self).__init__(identity, "fakeuser",
"fakepassword", *args, **kwargs)
class FakeImageMemberManager(ImageMemberManager):
def __init__(self, api=None, *args, **kwargs):
if api is None:
api = FakeImageClient()
super(FakeImageMemberManager, self).__init__(api, *args, **kwargs)
self.id = utils.random_ascii()
class FakeImageTagManager(ImageTagManager):
def __init__(self, api=None, *args, **kwargs):
if api is None:
api = FakeImageClient()
super(FakeImageTagManager, self).__init__(api, *args, **kwargs)
self.id = utils.random_ascii()
class FakeImageManager(ImageManager):
def __init__(self, api=None, *args, **kwargs):
if api is None:
api = FakeImageClient()
super(FakeImageManager, self).__init__(api, *args, **kwargs)
self.plural_response_key = "images"
self.resource_class = FakeImage
self.id = utils.random_ascii()
class FakeIdentityService(Service):
def __init__(self, identity=None, *args, **kwargs):
self.identity = identity or FakeIdentity()
self.name = "fake"
self.prefix = ""
self.service_type = "fake"
self.clients = {}
self.endpoints = utils.DotDict()
class FakeEndpoint(Endpoint):
def __init__(self, ep_dict=None, service=None, region=None, identity=None):
if ep_dict is None:
ep_dict = {}
if identity is None:
identity = FakeIdentity()
if service is None:
service = FakeIdentityService(identity)
if region is None:
region = "fake_region"
super(FakeEndpoint, self).__init__(ep_dict, service, region, identity)
class FakeRaxIdentity(RaxIdentity):
pass
class FakeIdentity(BaseIdentity):
"""Class that returns canned authentication responses."""
def __init__(self, *args, **kwargs):
super(FakeIdentity, self).__init__(*args, **kwargs)
self._good_username = "fakeuser"
self._good_password = "fakeapikey"
self._default_region = random.choice(("DFW", "ORD"))
self.services = {"fake": FakeIdentityService(self)}
def authenticate(self, connect=False):
if ((self.username == self._good_username) and
(self.password == self._good_password)):
self._parse_response(self.fake_response())
self.authenticated = True
else:
self.authenticated = False
raise exc.AuthenticationFailed("No match for '%s'/'%s' "
"username/password" % (self.username, self.password))
def auth_with_token(self, token, tenant_id=None, tenant_name=None):
self.token = token
self.tenant_id = tenant_id
self.tenant_name = tenant_name
self.authenticated = True
def get_token(self, force=False):
return self.token
def fake_response(self):
return fake_identity_response
fake_config_file = """[settings]
identity_type = rackspace
keyring_username =
region = FAKE
custom_user_agent = FAKE
http_debug =
"""
# This will handle both singular and plural responses.
fake_identity_user_response = {
"users": [{"name": "fake", "id": "fake"},
{"name": "faker", "id": "faker"}],
"user": {"name": "fake", "id": "fake"},
"roles": [{u'description': 'User Admin Role.',
'id': '3',
'name': 'identity:user-admin'}],
}
fake_identity_tenant_response = {"name": "fake", "id": "fake",
"description": "fake", "enabled": True}
fake_identity_tenants_response = {
"tenants": [
{"name": "fake", "id": "fake", "description": "fake",
"enabled": True},
{"name": "faker", "id": "faker", "description": "faker",
"enabled": True},
]}
fake_identity_tokens_response = {"access":
{'metadata': {u'is_admin': 0,
'roles': [u'asdfgh',
'sdfghj',
'dfghjk']},
'serviceCatalog': [{u'endpoints': [
{u'adminURL': 'http://10.0.0.0:8774/v2/qweqweqwe',
'id': 'dddddddddd',
'publicURL': 'http://10.0.0.0:8774/v2/qweqweqwe',
'internalURL': 'http://10.0.0.0:8774/v2/qweqweqwe',
'region': 'some_region'}],
'endpoints_links': [],
'name': 'nova',
'type': 'compute'},
{u'endpoints': [{u'adminURL': 'http://10.0.0.0:35357/v2.0',
'id': 'qweqweqwe',
'internalURL': 'http://10.0.0.0:5000/v2.0',
'publicURL': 'http://10.0.0.0:5000/v2.0',
'region': 'some_region'}],
'endpoints_links': [],
'name': 'keystone',
'type': 'identity'}],
'token': {u'expires': '1999-05-04T16:45:05Z',
'id': 'qweqweqwe',
'tenant': {u'description': 'admin Tenant',
'enabled': True,
'id': 'qweqweqwe',
'name': 'admin'}},
'user': {u'id': 'qweqweqwe',
'name': 'admin',
'roles': [{u'id': 'qweqweqwe', 'name': 'admin'},
{u'id': 'qweqweqwe', 'name': 'KeystoneAdmin'},
{u'id': 'qweqweqwe',
'name': 'KeystoneServiceAdmin'}],
'roles_links': [],
'username': 'admin'}}}
fake_identity_endpoints_response = {"access": {
"endpoints": ["fake", "faker", "fakest"]}}
fake_identity_response = {u'access':
{u'serviceCatalog': [
{u'endpoints': [{u'publicURL':
'https://ord.loadbalancers.api.rackspacecloud.com/v1.0/000000',
'region': 'ORD',
'tenantId': '000000'},
{u'publicURL':
'https://dfw.loadbalancers.api.rackspacecloud.com/v1.0/000000',
'region': 'DFW',
'tenantId': '000000'},
{u'publicURL':
'https://syd.loadbalancers.api.rackspacecloud.com/v1.0/000000',
'region': 'SYD',
'tenantId': '000000'}],
'name': 'cloudLoadBalancers',
'type': 'rax:load-balancer'},
{u'endpoints': [{u'internalURL':
'https://snet-aa.fake1.clouddrive.com/v1/MossoCloudFS_abc',
'publicURL': 'https://aa.fake1.clouddrive.com/v1/MossoCloudFS_abc',
'region': 'FAKE',
'tenantId': 'MossoCloudFS_abc'},
{u'internalURL':
'https://snet-aa.dfw1.clouddrive.com/v1/MossoCloudFS_abc',
'publicURL': 'https://aa.dfw1.clouddrive.com/v1/MossoCloudFS_abc',
'region': 'DFW',
'tenantId': 'MossoCloudFS_abc'},
{u'internalURL':
'https://snet-aa.ord1.clouddrive.com/v1/MossoCloudFS_abc',
'publicURL': 'https://aa.ord1.clouddrive.com/v1/MossoCloudFS_abc',
'region': 'ORD',
'tenantId': 'MossoCloudFS_abc'},
{u'internalURL':
'https://snet-aa.syd1.clouddrive.com/v1/MossoCloudFS_abc',
'publicURL': 'https://aa.ord1.clouddrive.com/v1/MossoCloudFS_abc',
'region': 'SYD',
'tenantId': 'MossoCloudFS_abc'}],
'name': 'cloudFiles',
'type': 'object-store'},
{u'endpoints': [{u'publicURL':
'https://dfw.servers.api.rackspacecloud.com/v2/000000',
'region': 'DFW',
'tenantId': '000000',
'versionId': '2',
'versionInfo': 'https://dfw.servers.api.rackspacecloud.com/v2',
'versionList': 'https://dfw.servers.api.rackspacecloud.com/'},
{u'publicURL':
'https://ord.servers.api.rackspacecloud.com/v2/000000',
'region': 'ORD',
'tenantId': '000000',
'versionId': '2',
'versionInfo': 'https://ord.servers.api.rackspacecloud.com/v2',
'versionList': 'https://ord.servers.api.rackspacecloud.com/'},
{u'publicURL':
'https://syd.servers.api.rackspacecloud.com/v2/000000',
'region': 'SYD',
'tenantId': '000000',
'versionId': '2',
'versionInfo': 'https://syd.servers.api.rackspacecloud.com/v2',
'versionList': 'https://syd.servers.api.rackspacecloud.com/'}],
'name': 'cloudServersOpenStack',
'type': 'compute'},
{u'endpoints': [{u'publicURL':
'https://dns.api.rackspacecloud.com/v1.0/000000',
'tenantId': '000000'}],
'name': 'cloudDNS',
'type': 'rax:dns'},
{u'endpoints': [{u'publicURL':
'https://dfw.databases.api.rackspacecloud.com/v1.0/000000',
'region': 'DFW',
'tenantId': '000000'},
{u'publicURL':
'https://syd.databases.api.rackspacecloud.com/v1.0/000000',
'region': 'SYD',
'tenantId': '000000'},
{u'publicURL':
'https://ord.databases.api.rackspacecloud.com/v1.0/000000',
'region': 'ORD',
'tenantId': '000000'}],
'name': 'cloudDatabases',
'type': 'rax:database'},
{u'endpoints': [{u'publicURL':
'https://servers.api.rackspacecloud.com/v1.0/000000',
'tenantId': '000000',
'versionId': '1.0',
'versionInfo': 'https://servers.api.rackspacecloud.com/v1.0',
'versionList': 'https://servers.api.rackspacecloud.com/'}],
'name': 'cloudServers',
'type': 'compute'},
{u'endpoints': [{u'publicURL':
'https://cdn1.clouddrive.com/v1/MossoCloudFS_abc',
'region': 'DFW',
'tenantId': 'MossoCloudFS_abc'},
{u'publicURL': 'https://cdn1.clouddrive.com/v1/MossoCloudFS_abc',
'region': 'FAKE',
'tenantId': 'MossoCloudFS_abc'},
{u'publicURL': 'https://cdn1.clouddrive.com/v1/MossoCloudFS_abc',
'region': 'SYD',
'tenantId': 'MossoCloudFS_abc'},
{u'publicURL': 'https://cdn2.clouddrive.com/v1/MossoCloudFS_abc',
'region': 'ORD',
'tenantId': 'MossoCloudFS_abc'}],
'name': 'cloudFilesCDN',
'type': 'rax:object-cdn'},
{u'endpoints': [{u'publicURL':
'https://monitoring.api.rackspacecloud.com/v1.0/000000',
'tenantId': '000000'}],
'name': 'cloudMonitoring',
'type': 'rax:monitor'}],
u'token': {u'expires': '2222-02-22T22:22:22.000-02:00',
'id': 'xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx',
'tenant': {u'id': '000000', 'name': '000000'}},
u'user': {u'id': '123456',
'name': 'fakeuser',
'RAX-AUTH:defaultRegion': 'DFW',
'roles': [{u'description': 'User Admin Role.',
'id': '3',
'name': 'identity:user-admin'}],
}}}
class FakeIdentityResponse(FakeResponse):
status_code = 200
response_type = "auth"
responses = {"auth": fake_identity_response,
"users": fake_identity_user_response,
"tenant": fake_identity_tenant_response,
"tenants": fake_identity_tenants_response,
"tokens": fake_identity_tokens_response,
"endpoints": fake_identity_endpoints_response,
}
@property
def content(self):
return self.responses.get(self.response_type)
def json(self):
return self.content
def read(self):
return json.dumps(self.content)
| |
"""
Tests that the specified index column (a.k.a "index_col")
is properly handled or inferred during parsing for all of
the parsers defined in parsers.py
"""
from io import StringIO
import numpy as np
import pytest
from pandas import (
DataFrame,
Index,
MultiIndex,
)
import pandas._testing as tm
# TODO(1.4): Change me to xfails at release time
skip_pyarrow = pytest.mark.usefixtures("pyarrow_skip")
@pytest.mark.parametrize("with_header", [True, False])
def test_index_col_named(all_parsers, with_header):
parser = all_parsers
no_header = """\
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
header = "ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir\n"
if with_header:
data = header + no_header
result = parser.read_csv(StringIO(data), index_col="ID")
expected = parser.read_csv(StringIO(data), header=0).set_index("ID")
tm.assert_frame_equal(result, expected)
else:
data = no_header
msg = "Index ID invalid"
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), index_col="ID")
def test_index_col_named2(all_parsers):
parser = all_parsers
data = """\
1,2,3,4,hello
5,6,7,8,world
9,10,11,12,foo
"""
expected = DataFrame(
{"a": [1, 5, 9], "b": [2, 6, 10], "c": [3, 7, 11], "d": [4, 8, 12]},
index=Index(["hello", "world", "foo"], name="message"),
)
names = ["a", "b", "c", "d", "message"]
result = parser.read_csv(StringIO(data), names=names, index_col=["message"])
tm.assert_frame_equal(result, expected)
def test_index_col_is_true(all_parsers):
# see gh-9798
data = "a,b\n1,2"
parser = all_parsers
msg = "The value of index_col couldn't be 'True'"
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), index_col=True)
@skip_pyarrow
def test_infer_index_col(all_parsers):
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
parser = all_parsers
result = parser.read_csv(StringIO(data))
expected = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
index=["foo", "bar", "baz"],
columns=["A", "B", "C"],
)
tm.assert_frame_equal(result, expected)
@skip_pyarrow
@pytest.mark.parametrize(
"index_col,kwargs",
[
(None, {"columns": ["x", "y", "z"]}),
(False, {"columns": ["x", "y", "z"]}),
(0, {"columns": ["y", "z"], "index": Index([], name="x")}),
(1, {"columns": ["x", "z"], "index": Index([], name="y")}),
("x", {"columns": ["y", "z"], "index": Index([], name="x")}),
("y", {"columns": ["x", "z"], "index": Index([], name="y")}),
(
[0, 1],
{
"columns": ["z"],
"index": MultiIndex.from_arrays([[]] * 2, names=["x", "y"]),
},
),
(
["x", "y"],
{
"columns": ["z"],
"index": MultiIndex.from_arrays([[]] * 2, names=["x", "y"]),
},
),
(
[1, 0],
{
"columns": ["z"],
"index": MultiIndex.from_arrays([[]] * 2, names=["y", "x"]),
},
),
(
["y", "x"],
{
"columns": ["z"],
"index": MultiIndex.from_arrays([[]] * 2, names=["y", "x"]),
},
),
],
)
def test_index_col_empty_data(all_parsers, index_col, kwargs):
data = "x,y,z"
parser = all_parsers
result = parser.read_csv(StringIO(data), index_col=index_col)
expected = DataFrame(**kwargs)
tm.assert_frame_equal(result, expected)
@skip_pyarrow
def test_empty_with_index_col_false(all_parsers):
# see gh-10413
data = "x,y"
parser = all_parsers
result = parser.read_csv(StringIO(data), index_col=False)
expected = DataFrame(columns=["x", "y"])
tm.assert_frame_equal(result, expected)
@skip_pyarrow
@pytest.mark.parametrize(
"index_names",
[
["", ""],
["foo", ""],
["", "bar"],
["foo", "bar"],
["NotReallyUnnamed", "Unnamed: 0"],
],
)
def test_multi_index_naming(all_parsers, index_names):
parser = all_parsers
# We don't want empty index names being replaced with "Unnamed: 0"
data = ",".join(index_names + ["col\na,c,1\na,d,2\nb,c,3\nb,d,4"])
result = parser.read_csv(StringIO(data), index_col=[0, 1])
expected = DataFrame(
{"col": [1, 2, 3, 4]}, index=MultiIndex.from_product([["a", "b"], ["c", "d"]])
)
expected.index.names = [name if name else None for name in index_names]
tm.assert_frame_equal(result, expected)
@skip_pyarrow
def test_multi_index_naming_not_all_at_beginning(all_parsers):
parser = all_parsers
data = ",Unnamed: 2,\na,c,1\na,d,2\nb,c,3\nb,d,4"
result = parser.read_csv(StringIO(data), index_col=[0, 2])
expected = DataFrame(
{"Unnamed: 2": ["c", "d", "c", "d"]},
index=MultiIndex(
levels=[["a", "b"], [1, 2, 3, 4]], codes=[[0, 0, 1, 1], [0, 1, 2, 3]]
),
)
tm.assert_frame_equal(result, expected)
@skip_pyarrow
def test_no_multi_index_level_names_empty(all_parsers):
# GH 10984
parser = all_parsers
midx = MultiIndex.from_tuples([("A", 1, 2), ("A", 1, 2), ("B", 1, 2)])
expected = DataFrame(np.random.randn(3, 3), index=midx, columns=["x", "y", "z"])
with tm.ensure_clean() as path:
expected.to_csv(path)
result = parser.read_csv(path, index_col=[0, 1, 2])
tm.assert_frame_equal(result, expected)
@skip_pyarrow
def test_header_with_index_col(all_parsers):
# GH 33476
parser = all_parsers
data = """
I11,A,A
I12,B,B
I2,1,3
"""
midx = MultiIndex.from_tuples([("A", "B"), ("A", "B.1")], names=["I11", "I12"])
idx = Index(["I2"])
expected = DataFrame([[1, 3]], index=idx, columns=midx)
result = parser.read_csv(StringIO(data), index_col=0, header=[0, 1])
tm.assert_frame_equal(result, expected)
col_idx = Index(["A", "A.1"])
idx = Index(["I12", "I2"], name="I11")
expected = DataFrame([["B", "B"], ["1", "3"]], index=idx, columns=col_idx)
result = parser.read_csv(StringIO(data), index_col="I11", header=0)
tm.assert_frame_equal(result, expected)
@pytest.mark.slow
def test_index_col_large_csv(all_parsers):
# https://github.com/pandas-dev/pandas/issues/37094
parser = all_parsers
N = 1_000_001
df = DataFrame({"a": range(N), "b": np.random.randn(N)})
with tm.ensure_clean() as path:
df.to_csv(path, index=False)
result = parser.read_csv(path, index_col=[0])
tm.assert_frame_equal(result, df.set_index("a"))
@skip_pyarrow
def test_index_col_multiindex_columns_no_data(all_parsers):
# GH#38292
parser = all_parsers
result = parser.read_csv(
StringIO("a0,a1,a2\nb0,b1,b2\n"), header=[0, 1], index_col=0
)
expected = DataFrame(
[],
columns=MultiIndex.from_arrays(
[["a1", "a2"], ["b1", "b2"]], names=["a0", "b0"]
),
)
tm.assert_frame_equal(result, expected)
@skip_pyarrow
def test_index_col_header_no_data(all_parsers):
# GH#38292
parser = all_parsers
result = parser.read_csv(StringIO("a0,a1,a2\n"), header=[0], index_col=0)
expected = DataFrame(
[],
columns=["a1", "a2"],
index=Index([], name="a0"),
)
tm.assert_frame_equal(result, expected)
@skip_pyarrow
def test_multiindex_columns_no_data(all_parsers):
# GH#38292
parser = all_parsers
result = parser.read_csv(StringIO("a0,a1,a2\nb0,b1,b2\n"), header=[0, 1])
expected = DataFrame(
[], columns=MultiIndex.from_arrays([["a0", "a1", "a2"], ["b0", "b1", "b2"]])
)
tm.assert_frame_equal(result, expected)
@skip_pyarrow
def test_multiindex_columns_index_col_with_data(all_parsers):
# GH#38292
parser = all_parsers
result = parser.read_csv(
StringIO("a0,a1,a2\nb0,b1,b2\ndata,data,data"), header=[0, 1], index_col=0
)
expected = DataFrame(
[["data", "data"]],
columns=MultiIndex.from_arrays(
[["a1", "a2"], ["b1", "b2"]], names=["a0", "b0"]
),
index=Index(["data"]),
)
tm.assert_frame_equal(result, expected)
| |
# Copyright 2017 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from google.api_core.iam import _DICT_ACCESS_MSG, InvalidOperationException
class TestPolicy:
@staticmethod
def _get_target_class():
from google.api_core.iam import Policy
return Policy
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def test_ctor_defaults(self):
empty = frozenset()
policy = self._make_one()
assert policy.etag is None
assert policy.version is None
assert policy.owners == empty
assert policy.editors == empty
assert policy.viewers == empty
assert len(policy) == 0
assert dict(policy) == {}
def test_ctor_explicit(self):
VERSION = 1
ETAG = "ETAG"
empty = frozenset()
policy = self._make_one(ETAG, VERSION)
assert policy.etag == ETAG
assert policy.version == VERSION
assert policy.owners == empty
assert policy.editors == empty
assert policy.viewers == empty
assert len(policy) == 0
assert dict(policy) == {}
def test___getitem___miss(self):
policy = self._make_one()
assert policy["nonesuch"] == set()
def test___getitem___version3(self):
policy = self._make_one("DEADBEEF", 3)
with pytest.raises(InvalidOperationException, match=_DICT_ACCESS_MSG):
policy["role"]
def test___getitem___with_conditions(self):
USER = "user:phred@example.com"
CONDITION = {"expression": "2 > 1"}
policy = self._make_one("DEADBEEF", 1)
policy.bindings = [
{"role": "role/reader", "members": [USER], "condition": CONDITION}
]
with pytest.raises(InvalidOperationException, match=_DICT_ACCESS_MSG):
policy["role/reader"]
def test___setitem__(self):
USER = "user:phred@example.com"
PRINCIPALS = set([USER])
policy = self._make_one()
policy["rolename"] = [USER]
assert policy["rolename"] == PRINCIPALS
assert len(policy) == 1
assert dict(policy) == {"rolename": PRINCIPALS}
def test__set_item__overwrite(self):
GROUP = "group:test@group.com"
USER = "user:phred@example.com"
ALL_USERS = "allUsers"
MEMBERS = set([ALL_USERS])
GROUPS = set([GROUP])
policy = self._make_one()
policy["first"] = [GROUP]
policy["second"] = [USER]
policy["second"] = [ALL_USERS]
assert policy["second"] == MEMBERS
assert len(policy) == 2
assert dict(policy) == {"first": GROUPS, "second": MEMBERS}
def test___setitem___version3(self):
policy = self._make_one("DEADBEEF", 3)
with pytest.raises(InvalidOperationException, match=_DICT_ACCESS_MSG):
policy["role/reader"] = ["user:phred@example.com"]
def test___setitem___with_conditions(self):
USER = "user:phred@example.com"
CONDITION = {"expression": "2 > 1"}
policy = self._make_one("DEADBEEF", 1)
policy.bindings = [
{"role": "role/reader", "members": set([USER]), "condition": CONDITION}
]
with pytest.raises(InvalidOperationException, match=_DICT_ACCESS_MSG):
policy["role/reader"] = ["user:phred@example.com"]
def test___delitem___hit(self):
policy = self._make_one()
policy.bindings = [
{"role": "to/keep", "members": set(["phred@example.com"])},
{"role": "to/remove", "members": set(["phred@example.com"])}
]
del policy["to/remove"]
assert len(policy) == 1
assert dict(policy) == {"to/keep": set(["phred@example.com"])}
def test___delitem___miss(self):
policy = self._make_one()
with pytest.raises(KeyError):
del policy["nonesuch"]
def test___delitem___version3(self):
policy = self._make_one("DEADBEEF", 3)
with pytest.raises(InvalidOperationException, match=_DICT_ACCESS_MSG):
del policy["role/reader"]
def test___delitem___with_conditions(self):
USER = "user:phred@example.com"
CONDITION = {"expression": "2 > 1"}
policy = self._make_one("DEADBEEF", 1)
policy.bindings = [
{"role": "role/reader", "members": set([USER]), "condition": CONDITION}
]
with pytest.raises(InvalidOperationException, match=_DICT_ACCESS_MSG):
del policy["role/reader"]
def test_bindings_property(self):
USER = "user:phred@example.com"
CONDITION = {"expression": "2 > 1"}
policy = self._make_one()
BINDINGS = [{"role": "role/reader", "members": set([USER]), "condition": CONDITION}]
policy.bindings = BINDINGS
assert policy.bindings == BINDINGS
def test_owners_getter(self):
from google.api_core.iam import OWNER_ROLE
MEMBER = "user:phred@example.com"
expected = frozenset([MEMBER])
policy = self._make_one()
policy[OWNER_ROLE] = [MEMBER]
assert policy.owners == expected
def test_owners_setter(self):
import warnings
from google.api_core.iam import OWNER_ROLE
MEMBER = "user:phred@example.com"
expected = set([MEMBER])
policy = self._make_one()
with warnings.catch_warnings(record=True) as warned:
policy.owners = [MEMBER]
(warning,) = warned
assert warning.category is DeprecationWarning
assert policy[OWNER_ROLE] == expected
def test_editors_getter(self):
from google.api_core.iam import EDITOR_ROLE
MEMBER = "user:phred@example.com"
expected = frozenset([MEMBER])
policy = self._make_one()
policy[EDITOR_ROLE] = [MEMBER]
assert policy.editors == expected
def test_editors_setter(self):
import warnings
from google.api_core.iam import EDITOR_ROLE
MEMBER = "user:phred@example.com"
expected = set([MEMBER])
policy = self._make_one()
with warnings.catch_warnings(record=True) as warned:
policy.editors = [MEMBER]
(warning,) = warned
assert warning.category is DeprecationWarning
assert policy[EDITOR_ROLE] == expected
def test_viewers_getter(self):
from google.api_core.iam import VIEWER_ROLE
MEMBER = "user:phred@example.com"
expected = frozenset([MEMBER])
policy = self._make_one()
policy[VIEWER_ROLE] = [MEMBER]
assert policy.viewers == expected
def test_viewers_setter(self):
import warnings
from google.api_core.iam import VIEWER_ROLE
MEMBER = "user:phred@example.com"
expected = set([MEMBER])
policy = self._make_one()
with warnings.catch_warnings(record=True) as warned:
policy.viewers = [MEMBER]
(warning,) = warned
assert warning.category is DeprecationWarning
assert policy[VIEWER_ROLE] == expected
def test_user(self):
import warnings
EMAIL = "phred@example.com"
MEMBER = "user:%s" % (EMAIL,)
policy = self._make_one()
with warnings.catch_warnings(record=True) as warned:
assert policy.user(EMAIL) == MEMBER
(warning,) = warned
assert warning.category is DeprecationWarning
def test_service_account(self):
import warnings
EMAIL = "phred@example.com"
MEMBER = "serviceAccount:%s" % (EMAIL,)
policy = self._make_one()
with warnings.catch_warnings(record=True) as warned:
assert policy.service_account(EMAIL) == MEMBER
(warning,) = warned
assert warning.category is DeprecationWarning
def test_group(self):
import warnings
EMAIL = "phred@example.com"
MEMBER = "group:%s" % (EMAIL,)
policy = self._make_one()
with warnings.catch_warnings(record=True) as warned:
assert policy.group(EMAIL) == MEMBER
(warning,) = warned
assert warning.category is DeprecationWarning
def test_domain(self):
import warnings
DOMAIN = "example.com"
MEMBER = "domain:%s" % (DOMAIN,)
policy = self._make_one()
with warnings.catch_warnings(record=True) as warned:
assert policy.domain(DOMAIN) == MEMBER
(warning,) = warned
assert warning.category is DeprecationWarning
def test_all_users(self):
import warnings
policy = self._make_one()
with warnings.catch_warnings(record=True) as warned:
assert policy.all_users() == "allUsers"
(warning,) = warned
assert warning.category is DeprecationWarning
def test_authenticated_users(self):
import warnings
policy = self._make_one()
with warnings.catch_warnings(record=True) as warned:
assert policy.authenticated_users() == "allAuthenticatedUsers"
(warning,) = warned
assert warning.category is DeprecationWarning
def test_from_api_repr_only_etag(self):
empty = frozenset()
RESOURCE = {"etag": "ACAB"}
klass = self._get_target_class()
policy = klass.from_api_repr(RESOURCE)
assert policy.etag == "ACAB"
assert policy.version is None
assert policy.owners == empty
assert policy.editors == empty
assert policy.viewers == empty
assert dict(policy) == {}
def test_from_api_repr_complete(self):
from google.api_core.iam import OWNER_ROLE, EDITOR_ROLE, VIEWER_ROLE
OWNER1 = "group:cloud-logs@google.com"
OWNER2 = "user:phred@example.com"
EDITOR1 = "domain:google.com"
EDITOR2 = "user:phred@example.com"
VIEWER1 = "serviceAccount:1234-abcdef@service.example.com"
VIEWER2 = "user:phred@example.com"
RESOURCE = {
"etag": "DEADBEEF",
"version": 1,
"bindings": [
{"role": OWNER_ROLE, "members": [OWNER1, OWNER2]},
{"role": EDITOR_ROLE, "members": [EDITOR1, EDITOR2]},
{"role": VIEWER_ROLE, "members": [VIEWER1, VIEWER2]},
],
}
klass = self._get_target_class()
policy = klass.from_api_repr(RESOURCE)
assert policy.etag == "DEADBEEF"
assert policy.version == 1
assert policy.owners, frozenset([OWNER1 == OWNER2])
assert policy.editors, frozenset([EDITOR1 == EDITOR2])
assert policy.viewers, frozenset([VIEWER1 == VIEWER2])
assert dict(policy) == {
OWNER_ROLE: set([OWNER1, OWNER2]),
EDITOR_ROLE: set([EDITOR1, EDITOR2]),
VIEWER_ROLE: set([VIEWER1, VIEWER2]),
}
assert policy.bindings == [
{"role": OWNER_ROLE, "members": set([OWNER1, OWNER2])},
{"role": EDITOR_ROLE, "members": set([EDITOR1, EDITOR2])},
{"role": VIEWER_ROLE, "members": set([VIEWER1, VIEWER2])},
]
def test_from_api_repr_unknown_role(self):
USER = "user:phred@example.com"
GROUP = "group:cloud-logs@google.com"
RESOURCE = {
"etag": "DEADBEEF",
"version": 1,
"bindings": [{"role": "unknown", "members": [USER, GROUP]}],
}
klass = self._get_target_class()
policy = klass.from_api_repr(RESOURCE)
assert policy.etag == "DEADBEEF"
assert policy.version == 1
assert dict(policy), {"unknown": set([GROUP == USER])}
def test_to_api_repr_defaults(self):
policy = self._make_one()
assert policy.to_api_repr() == {}
def test_to_api_repr_only_etag(self):
policy = self._make_one("DEADBEEF")
assert policy.to_api_repr() == {"etag": "DEADBEEF"}
def test_to_api_repr_binding_wo_members(self):
policy = self._make_one()
policy["empty"] = []
assert policy.to_api_repr() == {}
def test_to_api_repr_binding_w_duplicates(self):
import warnings
from google.api_core.iam import OWNER_ROLE
OWNER = "group:cloud-logs@google.com"
policy = self._make_one()
with warnings.catch_warnings(record=True):
policy.owners = [OWNER, OWNER]
assert policy.to_api_repr() == {
"bindings": [{"role": OWNER_ROLE, "members": [OWNER]}]
}
def test_to_api_repr_full(self):
import operator
from google.api_core.iam import OWNER_ROLE, EDITOR_ROLE, VIEWER_ROLE
OWNER1 = "group:cloud-logs@google.com"
OWNER2 = "user:phred@example.com"
EDITOR1 = "domain:google.com"
EDITOR2 = "user:phred@example.com"
VIEWER1 = "serviceAccount:1234-abcdef@service.example.com"
VIEWER2 = "user:phred@example.com"
CONDITION = {
"title": "title",
"description": "description",
"expression": "true"
}
BINDINGS = [
{"role": OWNER_ROLE, "members": [OWNER1, OWNER2]},
{"role": EDITOR_ROLE, "members": [EDITOR1, EDITOR2]},
{"role": VIEWER_ROLE, "members": [VIEWER1, VIEWER2]},
{"role": VIEWER_ROLE, "members": [VIEWER1, VIEWER2], "condition": CONDITION},
]
policy = self._make_one("DEADBEEF", 1)
policy.bindings = BINDINGS
resource = policy.to_api_repr()
assert resource["etag"] == "DEADBEEF"
assert resource["version"] == 1
key = operator.itemgetter("role")
assert sorted(resource["bindings"], key=key) == sorted(BINDINGS, key=key)
| |
import random
from koon.input import Mouse
from tiles import *
from pickups import *
import ai
import world
from event import Event
class GroundControl:
"""The main control center that interacts with the model
"""
def __init__( self, playfield ):
self.playfield = playfield
self.controllers = []
self.prediction_trees = []
self.contains_ai = False
def game_tick( self, indev ):
# for debugging
if hasattr(self, "views"):
self.views[0].game_tick( indev )
# check each goldcar's keys
for controller in self.controllers:
controller.do_tick( indev )
# check mouse switching
if indev.mouse.went_down( Mouse.LEFT ):
mouse_x, mouse_y = pygame.mouse.get_pos()
X_OFFSET, Y_OFFSET = 20, 300
tile_x = (-mouse_y + (mouse_x+32)/2 - X_OFFSET/2 + Y_OFFSET) / 32
tile_y = (mouse_y + (mouse_x-32)/2 - X_OFFSET/2 - Y_OFFSET) / 32
tile = self.playfield.level.get_tile( tile_x, tile_y )
if tile is not None and tile.is_switch():
i = 0
had_it = False
for goldcar in self.playfield.goldcars:
if tile is goldcar.switch:
if i == 0:
tile.switch_it(goldcar.switch_dir)
Event.switch_trail()
had_it = True
else:
pass # Only free and goldcar1 locked tiles can be switched!
had_it = True
i += 1
if not had_it:
tile.switch_it()
if self.contains_ai:
self._update_prediction_trees()
def add_controllers( self, controllers ):
"""Add the controllers to this ground control
controllers - a list of controllers (in sequence with playfield goldcars)
"""
i = 0
for controller in controllers:
controller.set_goldcar( self.playfield.goldcars[i] )
controller.set_ground_control( self )
self.controllers.append( controller )
prediction_tree = ai.PredictionTree(256*2, 256/4)
self.prediction_trees.append( prediction_tree )
controller.prediction_tree = prediction_tree
if not isinstance( controller, HumanController ):
self.contains_ai = True
i += 1
def _update_prediction_trees( self ):
i = 0
for prediction_tree in self.prediction_trees:
car = self.playfield.goldcars[i]
if car.pos is not None:
car_node = ai.AiNode_create( GoldcarNodeState(car), TrailNode(car.pos.tile, car.pos.get_in_direction()) )
car_node.set_playfield( self.playfield )
car_node.set_other_trees( self._get_other_prediction_trees(car) )
if prediction_tree.root_node is None:
prediction_tree.set_root( ai.Node(car_node) )
elif prediction_tree.root_node.smartnode.nequals(car_node):
prediction_tree.set_root( ai.Node(car_node) )
prediction_tree.root_node.smartnode.set_playfield( self.playfield )
prediction_tree.root_node.smartnode.carstate = GoldcarNodeState(car)
prediction_tree.update()
## if prediction_tree.root_node is not None:
## print prediction_tree.root_node._best_score,
i += 1
def _get_other_prediction_trees( self, car ):
"""Return all prediction trees that are not from car.
"""
trees = []
i = 0
for prediction_tree in self.prediction_trees:
car_it = self.playfield.goldcars[i]
if car_it is not car:
trees.append( prediction_tree )
i += 1
return trees
def get_tree( self, car ):
i = 0
for prediction_tree in self.prediction_trees:
car_it = self.playfield.goldcars[i]
if car_it is car:
return prediction_tree
i += 1
return None
class Controller:
"""Controller of a goldcar
public members:
- prediction_tree: the prediction tree of the goldcar
"""
def __init__( self, goldcar ):
"""goldcar can be None"""
self.goldcar = goldcar
self.prediction_tree = None
def set_goldcar( self, goldcar ):
self.goldcar = goldcar
def do_tick( self, indev ):
pass
def set_ground_control( self, ground_control ):
self.ground_control = ground_control
class HumanController( Controller ):
def __init__( self, goldcar, action_button ):
"""goldcar can be None"""
Controller.__init__( self, goldcar )
self.action_button = action_button
def do_tick( self, indev ):
if self.action_button.dev.went_down( self.action_button.button ):
self.goldcar.keydown()
class GoldcarNodeState:
def __init__( self, goldcar ):
self.goldcar = goldcar
self.collectible = goldcar.collectible
self.modifier = goldcar.modifier
class AiController( Controller ):
def __init__( self, goldcar, iq = 1.0 ):
"""goldcar can be None"""
Controller.__init__( self, goldcar )
self.prev_switch = None
self.best_dir = None
self.iq = iq
def do_tick( self, indev ):
if self.goldcar.switch is not None:
switch_node = self.find_switch_node()
# find best direction for switch
if switch_node is not None:
best_childs = switch_node.get_best_childs()
if best_childs is not None and len(best_childs) == 1:
self.best_dir = best_childs[0].smartnode.trailnode.in_dir.get_opposite()
else:
self.best_dir = None
else:
self.best_dir = None
self.handle_switching()
# TODO: improve algorithm here!
def find_switch_node( self ):
node_it = self.prediction_tree.root_node
while node_it is not None and \
node_it.smartnode.nequals(ai.AiNode_create( None, TrailNode(self.goldcar.switch, self.goldcar.switch_dir) ) ):
childeren = node_it.get_childeren()
if childeren is not None and len( childeren ) > 0:
best_childs = node_it.get_best_childs()
if len(best_childs) > 0:
node_it = best_childs[0]
## if node_it._best_score < 0:
## print node_it._best_score
else:
node_it = None
else:
node_it = None
return node_it
def handle_switching( self ):
if random.random() < self.iq: # Smart move
if self.best_dir is None:
if random.randint(0,16) == 0:
self.goldcar.keydown()
elif self.best_dir not in [self.goldcar.switch.trail.get_out_direction(), \
self.goldcar.switch.trail.get_in_direction()]:
if random.random() < self.iq:
self.goldcar.keydown()
else: # Stupid move
if random.randint(0,32) == 0:
self.goldcar.keydown()
| |
"""Tests for letsencrypt.cli."""
from __future__ import print_function
import argparse
import functools
import itertools
import os
import shutil
import traceback
import tempfile
import unittest
import mock
import six
from six.moves import reload_module # pylint: disable=import-error
from acme import jose
from letsencrypt import account
from letsencrypt import cli
from letsencrypt import configuration
from letsencrypt import constants
from letsencrypt import crypto_util
from letsencrypt import errors
from letsencrypt import le_util
from letsencrypt import main
from letsencrypt import renewal
from letsencrypt import storage
from letsencrypt.plugins import disco
from letsencrypt.plugins import manual
from letsencrypt.tests import storage_test
from letsencrypt.tests import test_util
CERT = test_util.vector_path('cert.pem')
CSR = test_util.vector_path('csr.der')
KEY = test_util.vector_path('rsa256_key.pem')
class CLITest(unittest.TestCase): # pylint: disable=too-many-public-methods
"""Tests for different commands."""
def setUp(self):
self.tmp_dir = tempfile.mkdtemp()
self.config_dir = os.path.join(self.tmp_dir, 'config')
self.work_dir = os.path.join(self.tmp_dir, 'work')
self.logs_dir = os.path.join(self.tmp_dir, 'logs')
self.standard_args = ['--config-dir', self.config_dir,
'--work-dir', self.work_dir,
'--logs-dir', self.logs_dir, '--text']
def tearDown(self):
shutil.rmtree(self.tmp_dir)
# Reset globals in cli
# pylint: disable=protected-access
cli._parser = cli.set_by_cli.detector = None
def _call(self, args, stdout=None):
"Run the cli with output streams and actual client mocked out"
with mock.patch('letsencrypt.main.client') as client:
ret, stdout, stderr = self._call_no_clientmock(args, stdout)
return ret, stdout, stderr, client
def _call_no_clientmock(self, args, stdout=None):
"Run the client with output streams mocked out"
args = self.standard_args + args
toy_stdout = stdout if stdout else six.StringIO()
with mock.patch('letsencrypt.main.sys.stdout', new=toy_stdout):
with mock.patch('letsencrypt.main.sys.stderr') as stderr:
ret = main.main(args[:]) # NOTE: parser can alter its args!
return ret, toy_stdout, stderr
def test_no_flags(self):
with mock.patch('letsencrypt.main.run') as mock_run:
self._call([])
self.assertEqual(1, mock_run.call_count)
def _help_output(self, args):
"Run a command, and return the ouput string for scrutiny"
output = six.StringIO()
self.assertRaises(SystemExit, self._call, args, output)
out = output.getvalue()
return out
def test_help(self):
self.assertRaises(SystemExit, self._call, ['--help'])
self.assertRaises(SystemExit, self._call, ['--help', 'all'])
plugins = disco.PluginsRegistry.find_all()
out = self._help_output(['--help', 'all'])
self.assertTrue("--configurator" in out)
self.assertTrue("how a cert is deployed" in out)
self.assertTrue("--manual-test-mode" in out)
out = self._help_output(['-h', 'nginx'])
if "nginx" in plugins:
# may be false while building distributions without plugins
self.assertTrue("--nginx-ctl" in out)
self.assertTrue("--manual-test-mode" not in out)
self.assertTrue("--checkpoints" not in out)
out = self._help_output(['-h'])
self.assertTrue("letsencrypt-auto" not in out) # test cli.cli_command
if "nginx" in plugins:
self.assertTrue("Use the Nginx plugin" in out)
else:
self.assertTrue("(nginx support is experimental" in out)
out = self._help_output(['--help', 'plugins'])
self.assertTrue("--manual-test-mode" not in out)
self.assertTrue("--prepare" in out)
self.assertTrue("Plugin options" in out)
out = self._help_output(['--help', 'install'])
self.assertTrue("--cert-path" in out)
self.assertTrue("--key-path" in out)
out = self._help_output(['--help', 'revoke'])
self.assertTrue("--cert-path" in out)
self.assertTrue("--key-path" in out)
out = self._help_output(['-h', 'config_changes'])
self.assertTrue("--cert-path" not in out)
self.assertTrue("--key-path" not in out)
out = self._help_output(['-h'])
self.assertTrue(cli.usage_strings(plugins)[0] in out)
def _cli_missing_flag(self, args, message):
"Ensure that a particular error raises a missing cli flag error containing message"
exc = None
try:
with mock.patch('letsencrypt.main.sys.stderr'):
main.main(self.standard_args + args[:]) # NOTE: parser can alter its args!
except errors.MissingCommandlineFlag as exc:
self.assertTrue(message in str(exc))
self.assertTrue(exc is not None)
def test_noninteractive(self):
args = ['-n', 'certonly']
self._cli_missing_flag(args, "specify a plugin")
args.extend(['--standalone', '-d', 'eg.is'])
self._cli_missing_flag(args, "register before running")
with mock.patch('letsencrypt.main._auth_from_domains'):
with mock.patch('letsencrypt.main.client.acme_from_config_key'):
args.extend(['--email', 'io@io.is'])
self._cli_missing_flag(args, "--agree-tos")
@mock.patch('letsencrypt.main.client.acme_client.Client')
@mock.patch('letsencrypt.main._determine_account')
@mock.patch('letsencrypt.main.client.Client.obtain_and_enroll_certificate')
@mock.patch('letsencrypt.main._auth_from_domains')
def test_user_agent(self, afd, _obt, det, _client):
# Normally the client is totally mocked out, but here we need more
# arguments to automate it...
args = ["--standalone", "certonly", "-m", "none@none.com",
"-d", "example.com", '--agree-tos'] + self.standard_args
det.return_value = mock.MagicMock(), None
afd.return_value = mock.MagicMock(), "newcert"
with mock.patch('letsencrypt.main.client.acme_client.ClientNetwork') as acme_net:
self._call_no_clientmock(args)
os_ver = " ".join(le_util.get_os_info())
ua = acme_net.call_args[1]["user_agent"]
self.assertTrue(os_ver in ua)
import platform
plat = platform.platform()
if "linux" in plat.lower():
self.assertTrue(platform.linux_distribution()[0] in ua)
with mock.patch('letsencrypt.main.client.acme_client.ClientNetwork') as acme_net:
ua = "bandersnatch"
args += ["--user-agent", ua]
self._call_no_clientmock(args)
acme_net.assert_called_once_with(mock.ANY, verify_ssl=True, user_agent=ua)
def test_install_abspath(self):
cert = 'cert'
key = 'key'
chain = 'chain'
fullchain = 'fullchain'
with mock.patch('letsencrypt.main.install') as mock_install:
self._call(['install', '--cert-path', cert, '--key-path', 'key',
'--chain-path', 'chain',
'--fullchain-path', 'fullchain'])
args = mock_install.call_args[0][0]
self.assertEqual(args.cert_path, os.path.abspath(cert))
self.assertEqual(args.key_path, os.path.abspath(key))
self.assertEqual(args.chain_path, os.path.abspath(chain))
self.assertEqual(args.fullchain_path, os.path.abspath(fullchain))
@mock.patch('letsencrypt.main.plug_sel.record_chosen_plugins')
@mock.patch('letsencrypt.main.plug_sel.pick_installer')
def test_installer_selection(self, mock_pick_installer, _rec):
self._call(['install', '--domains', 'foo.bar', '--cert-path', 'cert',
'--key-path', 'key', '--chain-path', 'chain'])
self.assertEqual(mock_pick_installer.call_count, 1)
@mock.patch('letsencrypt.le_util.exe_exists')
def test_configurator_selection(self, mock_exe_exists):
mock_exe_exists.return_value = True
real_plugins = disco.PluginsRegistry.find_all()
args = ['--apache', '--authenticator', 'standalone']
# This needed two calls to find_all(), which we're avoiding for now
# because of possible side effects:
# https://github.com/letsencrypt/letsencrypt/commit/51ed2b681f87b1eb29088dd48718a54f401e4855
#with mock.patch('letsencrypt.cli.plugins_testable') as plugins:
# plugins.return_value = {"apache": True, "nginx": True}
# ret, _, _, _ = self._call(args)
# self.assertTrue("Too many flags setting" in ret)
args = ["install", "--nginx", "--cert-path", "/tmp/blah", "--key-path", "/tmp/blah",
"--nginx-server-root", "/nonexistent/thing", "-d",
"example.com", "--debug"]
if "nginx" in real_plugins:
# Sending nginx a non-existent conf dir will simulate misconfiguration
# (we can only do that if letsencrypt-nginx is actually present)
ret, _, _, _ = self._call(args)
self.assertTrue("The nginx plugin is not working" in ret)
self.assertTrue("MisconfigurationError" in ret)
self._cli_missing_flag(["--standalone"], "With the standalone plugin, you probably")
with mock.patch("letsencrypt.main._init_le_client") as mock_init:
with mock.patch("letsencrypt.main._auth_from_domains") as mock_afd:
mock_afd.return_value = (mock.MagicMock(), mock.MagicMock())
self._call(["certonly", "--manual", "-d", "foo.bar"])
unused_config, auth, unused_installer = mock_init.call_args[0]
self.assertTrue(isinstance(auth, manual.Authenticator))
with mock.patch('letsencrypt.main.obtain_cert') as mock_certonly:
self._call(["auth", "--standalone"])
self.assertEqual(1, mock_certonly.call_count)
def test_rollback(self):
_, _, _, client = self._call(['rollback'])
self.assertEqual(1, client.rollback.call_count)
_, _, _, client = self._call(['rollback', '--checkpoints', '123'])
client.rollback.assert_called_once_with(
mock.ANY, 123, mock.ANY, mock.ANY)
def test_config_changes(self):
_, _, _, client = self._call(['config_changes'])
self.assertEqual(1, client.view_config_changes.call_count)
def test_plugins(self):
flags = ['--init', '--prepare', '--authenticators', '--installers']
for args in itertools.chain(
*(itertools.combinations(flags, r)
for r in xrange(len(flags)))):
self._call(['plugins'] + list(args))
@mock.patch('letsencrypt.main.plugins_disco')
@mock.patch('letsencrypt.main.cli.HelpfulArgumentParser.determine_help_topics')
def test_plugins_no_args(self, _det, mock_disco):
ifaces = []
plugins = mock_disco.PluginsRegistry.find_all()
_, stdout, _, _ = self._call(['plugins'])
plugins.visible.assert_called_once_with()
plugins.visible().ifaces.assert_called_once_with(ifaces)
filtered = plugins.visible().ifaces()
self.assertEqual(stdout.getvalue().strip(), str(filtered))
@mock.patch('letsencrypt.main.plugins_disco')
@mock.patch('letsencrypt.main.cli.HelpfulArgumentParser.determine_help_topics')
def test_plugins_init(self, _det, mock_disco):
ifaces = []
plugins = mock_disco.PluginsRegistry.find_all()
_, stdout, _, _ = self._call(['plugins', '--init'])
plugins.visible.assert_called_once_with()
plugins.visible().ifaces.assert_called_once_with(ifaces)
filtered = plugins.visible().ifaces()
self.assertEqual(filtered.init.call_count, 1)
filtered.verify.assert_called_once_with(ifaces)
verified = filtered.verify()
self.assertEqual(stdout.getvalue().strip(), str(verified))
@mock.patch('letsencrypt.main.plugins_disco')
@mock.patch('letsencrypt.main.cli.HelpfulArgumentParser.determine_help_topics')
def test_plugins_prepare(self, _det, mock_disco):
ifaces = []
plugins = mock_disco.PluginsRegistry.find_all()
_, stdout, _, _ = self._call(['plugins', '--init', '--prepare'])
plugins.visible.assert_called_once_with()
plugins.visible().ifaces.assert_called_once_with(ifaces)
filtered = plugins.visible().ifaces()
self.assertEqual(filtered.init.call_count, 1)
filtered.verify.assert_called_once_with(ifaces)
verified = filtered.verify()
verified.prepare.assert_called_once_with()
verified.available.assert_called_once_with()
available = verified.available()
self.assertEqual(stdout.getvalue().strip(), str(available))
def test_certonly_abspath(self):
cert = 'cert'
key = 'key'
chain = 'chain'
fullchain = 'fullchain'
with mock.patch('letsencrypt.main.obtain_cert') as mock_obtaincert:
self._call(['certonly', '--cert-path', cert, '--key-path', 'key',
'--chain-path', 'chain',
'--fullchain-path', 'fullchain'])
config, unused_plugins = mock_obtaincert.call_args[0]
self.assertEqual(config.cert_path, os.path.abspath(cert))
self.assertEqual(config.key_path, os.path.abspath(key))
self.assertEqual(config.chain_path, os.path.abspath(chain))
self.assertEqual(config.fullchain_path, os.path.abspath(fullchain))
def test_certonly_bad_args(self):
try:
self._call(['-a', 'bad_auth', 'certonly'])
assert False, "Exception should have been raised"
except errors.PluginSelectionError as e:
self.assertTrue('The requested bad_auth plugin does not appear' in e.message)
def test_check_config_sanity_domain(self):
# Punycode
self.assertRaises(errors.ConfigurationError,
self._call,
['-d', 'this.is.xn--ls8h.tld'])
# FQDN
self.assertRaises(errors.ConfigurationError,
self._call,
['-d', 'comma,gotwrong.tld'])
# FQDN 2
self.assertRaises(errors.ConfigurationError,
self._call,
['-d', 'illegal.character=.tld'])
# Wildcard
self.assertRaises(errors.ConfigurationError,
self._call,
['-d', '*.wildcard.tld'])
# Bare IP address (this is actually a different error message now)
self.assertRaises(errors.ConfigurationError,
self._call,
['-d', '204.11.231.35'])
def test_csr_with_besteffort(self):
args = ["--csr", CSR, "--allow-subset-of-names"]
self.assertRaises(errors.Error, self._call, args)
def test_run_with_csr(self):
# This is an error because you can only use --csr with certonly
try:
self._call(['--csr', CSR])
except errors.Error as e:
assert "Please try the certonly" in repr(e)
return
assert False, "Expected supplying --csr to fail with default verb"
def _get_argument_parser(self):
plugins = disco.PluginsRegistry.find_all()
return functools.partial(cli.prepare_and_parse_args, plugins)
def test_parse_domains(self):
parse = self._get_argument_parser()
short_args = ['-d', 'example.com']
namespace = parse(short_args)
self.assertEqual(namespace.domains, ['example.com'])
short_args = ['-d', 'trailing.period.com.']
namespace = parse(short_args)
self.assertEqual(namespace.domains, ['trailing.period.com'])
short_args = ['-d', 'example.com,another.net,third.org,example.com']
namespace = parse(short_args)
self.assertEqual(namespace.domains, ['example.com', 'another.net',
'third.org'])
long_args = ['--domains', 'example.com']
namespace = parse(long_args)
self.assertEqual(namespace.domains, ['example.com'])
long_args = ['--domains', 'trailing.period.com.']
namespace = parse(long_args)
self.assertEqual(namespace.domains, ['trailing.period.com'])
long_args = ['--domains', 'example.com,another.net,example.com']
namespace = parse(long_args)
self.assertEqual(namespace.domains, ['example.com', 'another.net'])
def test_server_flag(self):
parse = self._get_argument_parser()
namespace = parse('--server example.com'.split())
self.assertEqual(namespace.server, 'example.com')
def _check_server_conflict_message(self, parser_args, conflicting_args):
parse = self._get_argument_parser()
try:
parse(parser_args)
self.fail( # pragma: no cover
"The following flags didn't conflict with "
'--server: {0}'.format(', '.join(conflicting_args)))
except errors.Error as error:
self.assertTrue('--server' in error.message)
for arg in conflicting_args:
self.assertTrue(arg in error.message)
def test_staging_flag(self):
parse = self._get_argument_parser()
short_args = ['--staging']
namespace = parse(short_args)
self.assertTrue(namespace.staging)
self.assertEqual(namespace.server, constants.STAGING_URI)
short_args += '--server example.com'.split()
self._check_server_conflict_message(short_args, '--staging')
def _assert_dry_run_flag_worked(self, namespace, existing_account):
self.assertTrue(namespace.dry_run)
self.assertTrue(namespace.break_my_certs)
self.assertTrue(namespace.staging)
self.assertEqual(namespace.server, constants.STAGING_URI)
if existing_account:
self.assertTrue(namespace.tos)
self.assertTrue(namespace.register_unsafely_without_email)
else:
self.assertFalse(namespace.tos)
self.assertFalse(namespace.register_unsafely_without_email)
def test_dry_run_flag(self):
parse = self._get_argument_parser()
config_dir = tempfile.mkdtemp()
short_args = '--dry-run --config-dir {0}'.format(config_dir).split()
self.assertRaises(errors.Error, parse, short_args)
self._assert_dry_run_flag_worked(
parse(short_args + ['auth']), False)
self._assert_dry_run_flag_worked(
parse(short_args + ['certonly']), False)
self._assert_dry_run_flag_worked(
parse(short_args + ['renew']), False)
account_dir = os.path.join(config_dir, constants.ACCOUNTS_DIR)
os.mkdir(account_dir)
os.mkdir(os.path.join(account_dir, 'fake_account_dir'))
self._assert_dry_run_flag_worked(parse(short_args + ['auth']), True)
self._assert_dry_run_flag_worked(parse(short_args + ['renew']), True)
short_args += ['certonly']
self._assert_dry_run_flag_worked(parse(short_args), True)
short_args += '--server example.com'.split()
conflicts = ['--dry-run']
self._check_server_conflict_message(short_args, '--dry-run')
short_args += ['--staging']
conflicts += ['--staging']
self._check_server_conflict_message(short_args, conflicts)
def _certonly_new_request_common(self, mock_client, args=None):
with mock.patch('letsencrypt.main._treat_as_renewal') as mock_renewal:
mock_renewal.return_value = ("newcert", None)
with mock.patch('letsencrypt.main._init_le_client') as mock_init:
mock_init.return_value = mock_client
if args is None:
args = []
args += '-d foo.bar -a standalone certonly'.split()
self._call(args)
@mock.patch('letsencrypt.main.zope.component.getUtility')
def test_certonly_dry_run_new_request_success(self, mock_get_utility):
mock_client = mock.MagicMock()
mock_client.obtain_and_enroll_certificate.return_value = None
self._certonly_new_request_common(mock_client, ['--dry-run'])
self.assertEqual(
mock_client.obtain_and_enroll_certificate.call_count, 1)
self.assertTrue(
'dry run' in mock_get_utility().add_message.call_args[0][0])
# Asserts we don't suggest donating after a successful dry run
self.assertEqual(mock_get_utility().add_message.call_count, 1)
@mock.patch('letsencrypt.crypto_util.notAfter')
@mock.patch('letsencrypt.main.zope.component.getUtility')
def test_certonly_new_request_success(self, mock_get_utility, mock_notAfter):
cert_path = '/etc/letsencrypt/live/foo.bar'
date = '1970-01-01'
mock_notAfter().date.return_value = date
mock_lineage = mock.MagicMock(cert=cert_path, fullchain=cert_path)
mock_client = mock.MagicMock()
mock_client.obtain_and_enroll_certificate.return_value = mock_lineage
self._certonly_new_request_common(mock_client)
self.assertEqual(
mock_client.obtain_and_enroll_certificate.call_count, 1)
cert_msg = mock_get_utility().add_message.call_args_list[0][0][0]
self.assertTrue(cert_path in cert_msg)
self.assertTrue(date in cert_msg)
self.assertTrue(
'donate' in mock_get_utility().add_message.call_args[0][0])
def test_certonly_new_request_failure(self):
mock_client = mock.MagicMock()
mock_client.obtain_and_enroll_certificate.return_value = False
self.assertRaises(errors.Error,
self._certonly_new_request_common, mock_client)
def _test_renewal_common(self, due_for_renewal, extra_args, log_out=None,
args=None, should_renew=True, error_expected=False):
# pylint: disable=too-many-locals,too-many-arguments
cert_path = 'letsencrypt/tests/testdata/cert.pem'
chain_path = '/etc/letsencrypt/live/foo.bar/fullchain.pem'
mock_lineage = mock.MagicMock(cert=cert_path, fullchain=chain_path)
mock_lineage.should_autorenew.return_value = due_for_renewal
mock_certr = mock.MagicMock()
mock_key = mock.MagicMock(pem='pem_key')
mock_client = mock.MagicMock()
stdout = None
mock_client.obtain_certificate.return_value = (mock_certr, 'chain',
mock_key, 'csr')
try:
with mock.patch('letsencrypt.main._find_duplicative_certs') as mock_fdc:
mock_fdc.return_value = (mock_lineage, None)
with mock.patch('letsencrypt.main._init_le_client') as mock_init:
mock_init.return_value = mock_client
get_utility_path = 'letsencrypt.main.zope.component.getUtility'
with mock.patch(get_utility_path) as mock_get_utility:
with mock.patch('letsencrypt.main.renewal.OpenSSL') as mock_ssl:
mock_latest = mock.MagicMock()
mock_latest.get_issuer.return_value = "Fake fake"
mock_ssl.crypto.load_certificate.return_value = mock_latest
with mock.patch('letsencrypt.main.renewal.crypto_util'):
if not args:
args = ['-d', 'isnot.org', '-a', 'standalone', 'certonly']
if extra_args:
args += extra_args
try:
ret, stdout, _, _ = self._call(args)
if ret:
print("Returned", ret)
raise AssertionError(ret)
assert not error_expected, "renewal should have errored"
except: # pylint: disable=bare-except
if not error_expected:
raise AssertionError(
"Unexpected renewal error:\n" +
traceback.format_exc())
if should_renew:
mock_client.obtain_certificate.assert_called_once_with(['isnot.org'])
else:
self.assertEqual(mock_client.obtain_certificate.call_count, 0)
except:
self._dump_log()
raise
finally:
if log_out:
with open(os.path.join(self.logs_dir, "letsencrypt.log")) as lf:
self.assertTrue(log_out in lf.read())
return mock_lineage, mock_get_utility, stdout
def test_certonly_renewal(self):
lineage, get_utility, _ = self._test_renewal_common(True, [])
self.assertEqual(lineage.save_successor.call_count, 1)
lineage.update_all_links_to.assert_called_once_with(
lineage.latest_common_version())
cert_msg = get_utility().add_message.call_args_list[0][0][0]
self.assertTrue('fullchain.pem' in cert_msg)
self.assertTrue('donate' in get_utility().add_message.call_args[0][0])
def test_certonly_renewal_triggers(self):
# --dry-run should force renewal
_, get_utility, _ = self._test_renewal_common(False, ['--dry-run', '--keep'],
log_out="simulating renewal")
self.assertEqual(get_utility().add_message.call_count, 1)
self.assertTrue('dry run' in get_utility().add_message.call_args[0][0])
self.assertEqual(get_utility().add_message.call_count, 1)
self._test_renewal_common(False, ['-tvv', '--debug', '--keep'],
log_out="not yet due", should_renew=False)
def _dump_log(self):
with open(os.path.join(self.logs_dir, "letsencrypt.log")) as lf:
print("Logs:")
print(lf.read())
def _make_test_renewal_conf(self, testfile):
with open(test_util.vector_path(testfile)) as src:
# put the correct path for cert.pem, chain.pem etc in the renewal conf
renewal_conf = src.read().replace("MAGICDIR", test_util.vector_path())
rd = os.path.join(self.config_dir, "renewal")
if not os.path.exists(rd):
os.makedirs(rd)
rc = os.path.join(rd, "sample-renewal.conf")
with open(rc, "w") as dest:
dest.write(renewal_conf)
return rc
def test_renew_verb(self):
self._make_test_renewal_conf('sample-renewal.conf')
args = ["renew", "--dry-run", "-tvv"]
self._test_renewal_common(True, [], args=args, should_renew=True)
def test_quiet_renew(self):
self._make_test_renewal_conf('sample-renewal.conf')
args = ["renew", "--dry-run"]
_, _, stdout = self._test_renewal_common(True, [], args=args, should_renew=True)
out = stdout.getvalue()
self.assertTrue("renew" in out)
args = ["renew", "--dry-run", "-q"]
_, _, stdout = self._test_renewal_common(True, [], args=args, should_renew=True)
out = stdout.getvalue()
self.assertEqual("", out)
@mock.patch("letsencrypt.cli.set_by_cli")
def test_ancient_webroot_renewal_conf(self, mock_set_by_cli):
mock_set_by_cli.return_value = False
rc_path = self._make_test_renewal_conf('sample-renewal-ancient.conf')
args = mock.MagicMock(account=None, email=None, webroot_path=None)
config = configuration.NamespaceConfig(args)
lineage = storage.RenewableCert(rc_path,
configuration.RenewerConfiguration(config))
renewalparams = lineage.configuration["renewalparams"]
# pylint: disable=protected-access
renewal._restore_webroot_config(config, renewalparams)
self.assertEqual(config.webroot_path, ["/var/www/"])
def test_renew_verb_empty_config(self):
rd = os.path.join(self.config_dir, 'renewal')
if not os.path.exists(rd):
os.makedirs(rd)
with open(os.path.join(rd, 'empty.conf'), 'w'):
pass # leave the file empty
args = ["renew", "--dry-run", "-tvv"]
self._test_renewal_common(False, [], args=args, should_renew=False, error_expected=True)
def _make_dummy_renewal_config(self):
renewer_configs_dir = os.path.join(self.config_dir, 'renewal')
os.makedirs(renewer_configs_dir)
with open(os.path.join(renewer_configs_dir, 'test.conf'), 'w') as f:
f.write("My contents don't matter")
def _test_renew_common(self, renewalparams=None, names=None,
assert_oc_called=None, **kwargs):
self._make_dummy_renewal_config()
with mock.patch('letsencrypt.storage.RenewableCert') as mock_rc:
mock_lineage = mock.MagicMock()
mock_lineage.fullchain = "somepath/fullchain.pem"
if renewalparams is not None:
mock_lineage.configuration = {'renewalparams': renewalparams}
if names is not None:
mock_lineage.names.return_value = names
mock_rc.return_value = mock_lineage
with mock.patch('letsencrypt.main.obtain_cert') as mock_obtain_cert:
kwargs.setdefault('args', ['renew'])
self._test_renewal_common(True, None, should_renew=False, **kwargs)
if assert_oc_called is not None:
if assert_oc_called:
self.assertTrue(mock_obtain_cert.called)
else:
self.assertFalse(mock_obtain_cert.called)
def test_renew_no_renewalparams(self):
self._test_renew_common(assert_oc_called=False, error_expected=True)
def test_renew_no_authenticator(self):
self._test_renew_common(renewalparams={}, assert_oc_called=False,
error_expected=True)
def test_renew_with_bad_int(self):
renewalparams = {'authenticator': 'webroot',
'rsa_key_size': 'over 9000'}
self._test_renew_common(renewalparams=renewalparams, error_expected=True,
assert_oc_called=False)
def test_renew_with_nonetype_http01(self):
renewalparams = {'authenticator': 'webroot',
'http01_port': 'None'}
self._test_renew_common(renewalparams=renewalparams,
assert_oc_called=True)
def test_renew_with_bad_domain(self):
renewalparams = {'authenticator': 'webroot'}
names = ['*.example.com']
self._test_renew_common(renewalparams=renewalparams, error_expected=True,
names=names, assert_oc_called=False)
def test_renew_with_configurator(self):
renewalparams = {'authenticator': 'webroot'}
self._test_renew_common(
renewalparams=renewalparams, assert_oc_called=True,
args='renew --configurator apache'.split())
def test_renew_plugin_config_restoration(self):
renewalparams = {'authenticator': 'webroot',
'webroot_path': 'None',
'webroot_imaginary_flag': '42'}
self._test_renew_common(renewalparams=renewalparams,
assert_oc_called=True)
def test_renew_reconstitute_error(self):
# pylint: disable=protected-access
with mock.patch('letsencrypt.main.renewal._reconstitute') as mock_reconstitute:
mock_reconstitute.side_effect = Exception
self._test_renew_common(assert_oc_called=False, error_expected=True)
def test_renew_obtain_cert_error(self):
self._make_dummy_renewal_config()
with mock.patch('letsencrypt.storage.RenewableCert') as mock_rc:
mock_lineage = mock.MagicMock()
mock_lineage.fullchain = "somewhere/fullchain.pem"
mock_rc.return_value = mock_lineage
mock_lineage.configuration = {
'renewalparams': {'authenticator': 'webroot'}}
with mock.patch('letsencrypt.main.obtain_cert') as mock_obtain_cert:
mock_obtain_cert.side_effect = Exception
self._test_renewal_common(True, None, error_expected=True,
args=['renew'], should_renew=False)
def test_renew_with_bad_cli_args(self):
self._test_renewal_common(True, None, args='renew -d example.com'.split(),
should_renew=False, error_expected=True)
self._test_renewal_common(True, None, args='renew --csr {0}'.format(CSR).split(),
should_renew=False, error_expected=True)
@mock.patch('letsencrypt.main.zope.component.getUtility')
@mock.patch('letsencrypt.main._treat_as_renewal')
@mock.patch('letsencrypt.main._init_le_client')
def test_certonly_reinstall(self, mock_init, mock_renewal, mock_get_utility):
mock_renewal.return_value = ('reinstall', mock.MagicMock())
mock_init.return_value = mock_client = mock.MagicMock()
self._call(['-d', 'foo.bar', '-a', 'standalone', 'certonly'])
self.assertFalse(mock_client.obtain_certificate.called)
self.assertFalse(mock_client.obtain_and_enroll_certificate.called)
self.assertEqual(mock_get_utility().add_message.call_count, 0)
#self.assertTrue('donate' not in mock_get_utility().add_message.call_args[0][0])
def _test_certonly_csr_common(self, extra_args=None):
certr = 'certr'
chain = 'chain'
mock_client = mock.MagicMock()
mock_client.obtain_certificate_from_csr.return_value = (certr, chain)
cert_path = '/etc/letsencrypt/live/example.com/cert.pem'
mock_client.save_certificate.return_value = cert_path, None, None
with mock.patch('letsencrypt.main._init_le_client') as mock_init:
mock_init.return_value = mock_client
get_utility_path = 'letsencrypt.main.zope.component.getUtility'
with mock.patch(get_utility_path) as mock_get_utility:
chain_path = '/etc/letsencrypt/live/example.com/chain.pem'
full_path = '/etc/letsencrypt/live/example.com/fullchain.pem'
args = ('-a standalone certonly --csr {0} --cert-path {1} '
'--chain-path {2} --fullchain-path {3}').format(
CSR, cert_path, chain_path, full_path).split()
if extra_args:
args += extra_args
with mock.patch('letsencrypt.main.crypto_util'):
self._call(args)
if '--dry-run' in args:
self.assertFalse(mock_client.save_certificate.called)
else:
mock_client.save_certificate.assert_called_once_with(
certr, chain, cert_path, chain_path, full_path)
return mock_get_utility
def test_certonly_csr(self):
mock_get_utility = self._test_certonly_csr_common()
cert_msg = mock_get_utility().add_message.call_args_list[0][0][0]
self.assertTrue('cert.pem' in cert_msg)
self.assertTrue(
'donate' in mock_get_utility().add_message.call_args[0][0])
def test_certonly_csr_dry_run(self):
mock_get_utility = self._test_certonly_csr_common(['--dry-run'])
self.assertEqual(mock_get_utility().add_message.call_count, 1)
self.assertTrue(
'dry run' in mock_get_utility().add_message.call_args[0][0])
@mock.patch('letsencrypt.main.client.acme_client')
def test_revoke_with_key(self, mock_acme_client):
server = 'foo.bar'
self._call_no_clientmock(['--cert-path', CERT, '--key-path', KEY,
'--server', server, 'revoke'])
with open(KEY) as f:
mock_acme_client.Client.assert_called_once_with(
server, key=jose.JWK.load(f.read()), net=mock.ANY)
with open(CERT) as f:
cert = crypto_util.pyopenssl_load_certificate(f.read())[0]
mock_revoke = mock_acme_client.Client().revoke
mock_revoke.assert_called_once_with(jose.ComparableX509(cert))
@mock.patch('letsencrypt.main._determine_account')
def test_revoke_without_key(self, mock_determine_account):
mock_determine_account.return_value = (mock.MagicMock(), None)
_, _, _, client = self._call(['--cert-path', CERT, 'revoke'])
with open(CERT) as f:
cert = crypto_util.pyopenssl_load_certificate(f.read())[0]
mock_revoke = client.acme_from_config_key().revoke
mock_revoke.assert_called_once_with(jose.ComparableX509(cert))
@mock.patch('letsencrypt.main.sys')
def test_handle_exception(self, mock_sys):
# pylint: disable=protected-access
from acme import messages
config = mock.MagicMock()
mock_open = mock.mock_open()
with mock.patch('letsencrypt.main.open', mock_open, create=True):
exception = Exception('detail')
config.verbose_count = 1
main._handle_exception(
Exception, exc_value=exception, trace=None, config=None)
mock_open().write.assert_called_once_with(''.join(
traceback.format_exception_only(Exception, exception)))
error_msg = mock_sys.exit.call_args_list[0][0][0]
self.assertTrue('unexpected error' in error_msg)
with mock.patch('letsencrypt.main.open', mock_open, create=True):
mock_open.side_effect = [KeyboardInterrupt]
error = errors.Error('detail')
main._handle_exception(
errors.Error, exc_value=error, trace=None, config=None)
# assert_any_call used because sys.exit doesn't exit in cli.py
mock_sys.exit.assert_any_call(''.join(
traceback.format_exception_only(errors.Error, error)))
exception = messages.Error(detail='alpha', typ='urn:acme:error:triffid',
title='beta')
config = mock.MagicMock(debug=False, verbose_count=-3)
main._handle_exception(
messages.Error, exc_value=exception, trace=None, config=config)
error_msg = mock_sys.exit.call_args_list[-1][0][0]
self.assertTrue('unexpected error' in error_msg)
self.assertTrue('acme:error' not in error_msg)
self.assertTrue('alpha' in error_msg)
self.assertTrue('beta' in error_msg)
config = mock.MagicMock(debug=False, verbose_count=1)
main._handle_exception(
messages.Error, exc_value=exception, trace=None, config=config)
error_msg = mock_sys.exit.call_args_list[-1][0][0]
self.assertTrue('unexpected error' in error_msg)
self.assertTrue('acme:error' in error_msg)
self.assertTrue('alpha' in error_msg)
interrupt = KeyboardInterrupt('detail')
main._handle_exception(
KeyboardInterrupt, exc_value=interrupt, trace=None, config=None)
mock_sys.exit.assert_called_with(''.join(
traceback.format_exception_only(KeyboardInterrupt, interrupt)))
def test_read_file(self):
rel_test_path = os.path.relpath(os.path.join(self.tmp_dir, 'foo'))
self.assertRaises(
argparse.ArgumentTypeError, cli.read_file, rel_test_path)
test_contents = 'bar\n'
with open(rel_test_path, 'w') as f:
f.write(test_contents)
path, contents = cli.read_file(rel_test_path)
self.assertEqual(path, os.path.abspath(path))
self.assertEqual(contents, test_contents)
def test_agree_dev_preview_config(self):
with mock.patch('letsencrypt.main.run') as mocked_run:
self._call(['-c', test_util.vector_path('cli.ini')])
self.assertTrue(mocked_run.called)
class DetermineAccountTest(unittest.TestCase):
"""Tests for letsencrypt.cli._determine_account."""
def setUp(self):
self.args = mock.MagicMock(account=None, email=None,
register_unsafely_without_email=False)
self.config = configuration.NamespaceConfig(self.args)
self.accs = [mock.MagicMock(id='x'), mock.MagicMock(id='y')]
self.account_storage = account.AccountMemoryStorage()
def _call(self):
# pylint: disable=protected-access
from letsencrypt.main import _determine_account
with mock.patch('letsencrypt.main.account.AccountFileStorage') as mock_storage:
mock_storage.return_value = self.account_storage
return _determine_account(self.config)
def test_args_account_set(self):
self.account_storage.save(self.accs[1])
self.config.account = self.accs[1].id
self.assertEqual((self.accs[1], None), self._call())
self.assertEqual(self.accs[1].id, self.config.account)
self.assertTrue(self.config.email is None)
def test_single_account(self):
self.account_storage.save(self.accs[0])
self.assertEqual((self.accs[0], None), self._call())
self.assertEqual(self.accs[0].id, self.config.account)
self.assertTrue(self.config.email is None)
@mock.patch('letsencrypt.client.display_ops.choose_account')
def test_multiple_accounts(self, mock_choose_accounts):
for acc in self.accs:
self.account_storage.save(acc)
mock_choose_accounts.return_value = self.accs[1]
self.assertEqual((self.accs[1], None), self._call())
self.assertEqual(
set(mock_choose_accounts.call_args[0][0]), set(self.accs))
self.assertEqual(self.accs[1].id, self.config.account)
self.assertTrue(self.config.email is None)
@mock.patch('letsencrypt.client.display_ops.get_email')
def test_no_accounts_no_email(self, mock_get_email):
mock_get_email.return_value = 'foo@bar.baz'
with mock.patch('letsencrypt.main.client') as client:
client.register.return_value = (
self.accs[0], mock.sentinel.acme)
self.assertEqual((self.accs[0], mock.sentinel.acme), self._call())
client.register.assert_called_once_with(
self.config, self.account_storage, tos_cb=mock.ANY)
self.assertEqual(self.accs[0].id, self.config.account)
self.assertEqual('foo@bar.baz', self.config.email)
def test_no_accounts_email(self):
self.config.email = 'other email'
with mock.patch('letsencrypt.main.client') as client:
client.register.return_value = (self.accs[1], mock.sentinel.acme)
self._call()
self.assertEqual(self.accs[1].id, self.config.account)
self.assertEqual('other email', self.config.email)
class DuplicativeCertsTest(storage_test.BaseRenewableCertTest):
"""Test to avoid duplicate lineages."""
def setUp(self):
super(DuplicativeCertsTest, self).setUp()
self.config.write()
self._write_out_ex_kinds()
def tearDown(self):
shutil.rmtree(self.tempdir)
@mock.patch('letsencrypt.le_util.make_or_verify_dir')
def test_find_duplicative_names(self, unused_makedir):
from letsencrypt.main import _find_duplicative_certs
test_cert = test_util.load_vector('cert-san.pem')
with open(self.test_rc.cert, 'w') as f:
f.write(test_cert)
# No overlap at all
result = _find_duplicative_certs(
self.cli_config, ['wow.net', 'hooray.org'])
self.assertEqual(result, (None, None))
# Totally identical
result = _find_duplicative_certs(
self.cli_config, ['example.com', 'www.example.com'])
self.assertTrue(result[0].configfile.filename.endswith('example.org.conf'))
self.assertEqual(result[1], None)
# Superset
result = _find_duplicative_certs(
self.cli_config, ['example.com', 'www.example.com', 'something.new'])
self.assertEqual(result[0], None)
self.assertTrue(result[1].configfile.filename.endswith('example.org.conf'))
# Partial overlap doesn't count
result = _find_duplicative_certs(
self.cli_config, ['example.com', 'something.new'])
self.assertEqual(result, (None, None))
class DefaultTest(unittest.TestCase):
"""Tests for letsencrypt.cli._Default."""
def setUp(self):
# pylint: disable=protected-access
self.default1 = cli._Default()
self.default2 = cli._Default()
def test_boolean(self):
self.assertFalse(self.default1)
self.assertFalse(self.default2)
def test_equality(self):
self.assertEqual(self.default1, self.default2)
def test_hash(self):
self.assertEqual(hash(self.default1), hash(self.default2))
class SetByCliTest(unittest.TestCase):
"""Tests for letsencrypt.set_by_cli and related functions."""
def setUp(self):
reload_module(cli)
def test_webroot_map(self):
args = '-w /var/www/html -d example.com'.split()
verb = 'renew'
self.assertTrue(_call_set_by_cli('webroot_map', args, verb))
def test_report_config_interaction_str(self):
cli.report_config_interaction('manual_public_ip_logging_ok',
'manual_test_mode')
cli.report_config_interaction('manual_test_mode', 'manual')
self._test_report_config_interaction_common()
def test_report_config_interaction_iterable(self):
cli.report_config_interaction(('manual_public_ip_logging_ok',),
('manual_test_mode',))
cli.report_config_interaction(('manual_test_mode',), ('manual',))
self._test_report_config_interaction_common()
def _test_report_config_interaction_common(self):
"""Tests implied interaction between manual flags.
--manual implies --manual-test-mode
These interactions don't actually
exist in the client, but are used here for testing purposes.
"""
args = ['--manual']
verb = 'renew'
for v in ('manual', 'manual_test_mode', 'manual_public_ip_logging_ok'):
self.assertTrue(_call_set_by_cli(v, args, verb))
cli.set_by_cli.detector = None
args = ['--manual-test-mode']
for v in ('manual_test_mode', 'manual_public_ip_logging_ok'):
self.assertTrue(_call_set_by_cli(v, args, verb))
self.assertFalse(_call_set_by_cli('manual', args, verb))
def _call_set_by_cli(var, args, verb):
with mock.patch('letsencrypt.cli.helpful_parser') as mock_parser:
mock_parser.args = args
mock_parser.verb = verb
return cli.set_by_cli(var)
if __name__ == '__main__':
unittest.main() # pragma: no cover
| |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from textwrap import dedent
from pants.backend.codegen.targets.java_thrift_library import JavaThriftLibrary
from pants.backend.codegen.targets.python_thrift_library import PythonThriftLibrary
from pants.backend.core.targets.resources import Resources
from pants.backend.core.tasks.what_changed import WhatChanged
from pants.backend.jvm.targets.jar_dependency import JarDependency
from pants.backend.jvm.targets.jar_library import JarLibrary
from pants.backend.jvm.targets.java_library import JavaLibrary
from pants.backend.python.targets.python_library import PythonLibrary
from pants.base.build_file_aliases import BuildFileAliases
from pants.base.source_root import SourceRoot
from pants.goal.workspace import Workspace
from pants_test.tasks.task_test_base import ConsoleTaskTestBase
class BaseWhatChangedTest(ConsoleTaskTestBase):
@property
def alias_groups(self):
return BuildFileAliases.create(
targets={
'java_library': JavaLibrary,
'python_library': PythonLibrary,
'jar_library': JarLibrary,
'resources': Resources,
'java_thrift_library': JavaThriftLibrary,
'python_thrift_library': PythonThriftLibrary,
},
context_aware_object_factories={
'source_root': SourceRoot.factory,
},
objects={
'jar': JarDependency,
}
)
@classmethod
def task_type(cls):
return WhatChanged
def assert_console_output(self, *output, **kwargs):
options = { 'spec_excludes': [], 'exclude_target_regexp': [] }
if 'options' in kwargs:
options.update(kwargs['options'])
kwargs['options'] = options
super(BaseWhatChangedTest, self).assert_console_output(*output, **kwargs)
def workspace(self, files=None, parent=None, diffspec=None, diff_files=None):
class MockWorkspace(Workspace):
def touched_files(_, p):
self.assertEqual(parent or 'HEAD', p)
return files or []
def changes_in(_, ds):
self.assertEqual(diffspec, ds)
return diff_files or []
return MockWorkspace()
class WhatChangedTestBasic(BaseWhatChangedTest):
def test_nochanges(self):
self.assert_console_output(workspace=self.workspace())
def test_parent(self):
self.assert_console_output(options={'changes_since': '42'},
workspace=self.workspace(parent='42'))
def test_files(self):
self.assert_console_output(
'a/b/c',
'd',
'e/f',
options={'files': True},
workspace=self.workspace(files=['a/b/c', 'd', 'e/f'])
)
class WhatChangedTest(BaseWhatChangedTest):
def setUp(self):
super(WhatChangedTest, self).setUp()
self.add_to_build_file('root', dedent("""
source_root('src/py', python_library, resources)
source_root('resources/a1', resources)
"""))
self.add_to_build_file('root/src/py/a', dedent("""
python_library(
name='alpha',
sources=['b/c', 'd'],
resources=['test.resources']
)
jar_library(
name='beta',
jars=[
jar(org='gamma', name='ray', rev='1.137.bruce_banner')
]
)
"""))
self.add_to_build_file('root/src/py/1', dedent("""
python_library(
name='numeric',
sources=['2']
)
"""))
self.add_to_build_file('root/src/py/dependency_tree/a', dedent("""
python_library(
name='a',
sources=['a.py'],
)
"""))
self.add_to_build_file('root/src/py/dependency_tree/b', dedent("""
python_library(
name='b',
sources=['b.py'],
dependencies=['root/src/py/dependency_tree/a']
)
"""))
self.add_to_build_file('root/src/py/dependency_tree/c', dedent("""
python_library(
name='c',
sources=['c.py'],
dependencies=['root/src/py/dependency_tree/b']
)
"""))
self.add_to_build_file('root/src/thrift', dedent("""
java_thrift_library(
name='thrift',
sources=['a.thrift']
)
python_thrift_library(
name='py-thrift',
sources=['a.thrift']
)
"""))
self.add_to_build_file('root/resources/a', dedent("""
resources(
name='a_resources',
sources=['a.resources']
)
"""))
self.add_to_build_file('root/src/java/a', dedent("""
java_library(
name='a_java',
sources=['a.java'],
)
"""))
self.add_to_build_file('root/3rdparty/BUILD.twitter', dedent("""
jar_library(
name='dummy',
jars=[
jar(org='foo', name='ray', rev='1.45')
])
"""))
self.add_to_build_file('root/3rdparty/BUILD', dedent("""
jar_library(
name='dummy1',
jars=[
jar(org='foo1', name='ray', rev='1.45')
])
"""))
# This is a directory that might confuse case insensitive file systems (on macs for example).
# It should not be treated as a BUILD file.
self.create_dir('root/scripts/a/build')
self.add_to_build_file('root/scripts/BUILD', dedent("""
java_library(
name='scripts',
sources=['a/build/scripts.java'],
)
"""))
def test_spec_excludes(self):
self.assert_console_output(
'root/src/py/a:alpha',
options = { 'spec_excludes': 'root/src/py/1' },
workspace=self.workspace(files=['root/src/py/a/b/c', 'root/src/py/a/d'])
)
def test_owned(self):
self.assert_console_output(
'root/src/py/a:alpha',
'root/src/py/1:numeric',
workspace=self.workspace(files=['root/src/py/a/b/c', 'root/src/py/a/d', 'root/src/py/1/2'])
)
def test_multiply_owned(self):
self.assert_console_output(
'root/src/thrift:thrift',
'root/src/thrift:py-thrift',
workspace=self.workspace(files=['root/src/thrift/a.thrift'])
)
def test_build(self):
self.assert_console_output(
'root/src/py/a:alpha',
'root/src/py/a:beta',
workspace=self.workspace(files=['root/src/py/a/BUILD'])
)
def test_resource_changed(self):
self.assert_console_output(
'root/src/py/a:alpha',
workspace=self.workspace(files=['root/src/py/a/test.resources'])
)
def test_resource_changed_for_java_lib(self):
self.assert_console_output(
'root/resources/a:a_resources',
workspace=self.workspace(files=['root/resources/a/a.resources'])
)
def test_build_sibling(self):
self.assert_console_output(
'root/3rdparty:dummy',
workspace=self.workspace(files=['root/3rdparty/BUILD.twitter'])
)
def test_resource_type_error(self):
self.add_to_build_file('root/resources/a1', dedent("""
java_library(
name='a1',
sources=['a1.test'],
resources=[1]
)
"""))
self.assert_console_raises(
Exception,
workspace=self.workspace(files=['root/resources/a1/a1.test'])
)
def test_build_directory(self):
# This should ensure that a directory named the same as build files does not cause an exception.
self.assert_console_output(
'root/scripts:scripts',
workspace=self.workspace(files=['root/scripts/a/build', 'root/scripts/a/build/scripts.java'])
)
def test_fast(self):
self.assert_console_output(
'root/src/py/a:alpha',
'root/src/py/1:numeric',
options={'fast': True},
workspace=self.workspace(
files=['root/src/py/a/b/c', 'root/src/py/a/d', 'root/src/py/1/2'],
),
)
def test_diffspec(self):
self.assert_console_output(
'root/src/py/a:alpha',
'root/src/py/1:numeric',
options={'diffspec': '42'},
workspace=self.workspace(
diffspec='42',
diff_files=['root/src/py/a/b/c', 'root/src/py/a/d', 'root/src/py/1/2'],
),
)
def test_include_dependees(self):
self.assert_console_output(
'root/src/py/dependency_tree/a:a',
workspace=self.workspace(files=['root/src/py/dependency_tree/a/a.py'])
)
self.assert_console_output(
'root/src/py/dependency_tree/a:a',
'root/src/py/dependency_tree/b:b',
options={'include_dependees': 'direct'},
workspace=self.workspace(files=['root/src/py/dependency_tree/a/a.py'])
)
self.assert_console_output(
'root/src/py/dependency_tree/a:a',
'root/src/py/dependency_tree/b:b',
'root/src/py/dependency_tree/c:c',
options={'include_dependees': 'transitive'},
workspace=self.workspace(files=['root/src/py/dependency_tree/a/a.py'])
)
def test_exclude(self):
self.assert_console_output(
'root/src/py/dependency_tree/a:a',
'root/src/py/dependency_tree/b:b',
'root/src/py/dependency_tree/c:c',
options={'include_dependees': 'transitive'},
workspace=self.workspace(files=['root/src/py/dependency_tree/a/a.py'])
)
self.assert_console_output(
'root/src/py/dependency_tree/a:a',
'root/src/py/dependency_tree/c:c',
options={'include_dependees': 'transitive', 'exclude_target_regexp': [':b']},
workspace=self.workspace(files=['root/src/py/dependency_tree/a/a.py'])
)
| |
"""
dj-stripe Event Handler tests
"""
import decimal
from copy import deepcopy
from unittest.mock import patch
from django.contrib.auth import get_user_model
from django.test import TestCase
from djstripe.models import (
Card, Charge, Coupon, Customer, Dispute, DjstripePaymentMethod,
Event, Invoice, InvoiceItem, Plan, Subscription, Transfer
)
from . import (
FAKE_CARD, FAKE_CHARGE, FAKE_CHARGE_II, FAKE_COUPON, FAKE_CUSTOMER,
FAKE_CUSTOMER_II, FAKE_DISPUTE, FAKE_EVENT_ACCOUNT_APPLICATION_DEAUTHORIZED,
FAKE_EVENT_CHARGE_SUCCEEDED, FAKE_EVENT_CUSTOMER_CREATED,
FAKE_EVENT_CUSTOMER_DELETED, FAKE_EVENT_CUSTOMER_DISCOUNT_CREATED,
FAKE_EVENT_CUSTOMER_DISCOUNT_DELETED, FAKE_EVENT_CUSTOMER_SOURCE_CREATED,
FAKE_EVENT_CUSTOMER_SOURCE_DELETED, FAKE_EVENT_CUSTOMER_SOURCE_DELETED_DUPE,
FAKE_EVENT_CUSTOMER_SUBSCRIPTION_CREATED, FAKE_EVENT_CUSTOMER_SUBSCRIPTION_DELETED,
FAKE_EVENT_DISPUTE_CREATED, FAKE_EVENT_INVOICE_CREATED, FAKE_EVENT_INVOICE_DELETED,
FAKE_EVENT_INVOICE_UPCOMING, FAKE_EVENT_INVOICEITEM_CREATED,
FAKE_EVENT_INVOICEITEM_DELETED, FAKE_EVENT_PLAN_CREATED, FAKE_EVENT_PLAN_DELETED,
FAKE_EVENT_PLAN_REQUEST_IS_OBJECT, FAKE_EVENT_TRANSFER_CREATED,
FAKE_EVENT_TRANSFER_DELETED, FAKE_INVOICE, FAKE_INVOICE_II, FAKE_INVOICEITEM,
FAKE_PLAN, FAKE_SUBSCRIPTION, FAKE_SUBSCRIPTION_III, FAKE_TRANSFER, default_account
)
class EventTestCase(TestCase):
#
# Helpers
#
@patch("stripe.Event.retrieve")
def _create_event(self, event_data, event_retrieve_mock, patch_data=None):
event_data = deepcopy(event_data)
if patch_data:
event_data.update(patch_data)
event_retrieve_mock.return_value = event_data
event = Event.sync_from_stripe_data(event_data)
return event
class TestAccountEvents(EventTestCase):
@patch("stripe.Event.retrieve")
def test_account_deauthorized_event(self, event_retrieve_mock):
fake_stripe_event = deepcopy(FAKE_EVENT_ACCOUNT_APPLICATION_DEAUTHORIZED)
event = Event.sync_from_stripe_data(fake_stripe_event)
event.invoke_webhook_handlers()
class TestChargeEvents(EventTestCase):
def setUp(self):
self.user = get_user_model().objects.create_user(
username="pydanny", email="pydanny@gmail.com"
)
@patch("djstripe.models.Account.get_default_account")
@patch("stripe.Charge.retrieve")
@patch("stripe.Event.retrieve")
@patch("stripe.Invoice.retrieve", return_value=deepcopy(FAKE_INVOICE))
@patch("stripe.Subscription.retrieve", return_value=deepcopy(FAKE_SUBSCRIPTION))
def test_charge_created(
self,
subscription_retrieve_mock,
invoice_retrieve_mock,
event_retrieve_mock,
charge_retrieve_mock,
account_mock,
):
FAKE_CUSTOMER.create_for_user(self.user)
fake_stripe_event = deepcopy(FAKE_EVENT_CHARGE_SUCCEEDED)
event_retrieve_mock.return_value = fake_stripe_event
charge_retrieve_mock.return_value = fake_stripe_event["data"]["object"]
account_mock.return_value = default_account()
event = Event.sync_from_stripe_data(fake_stripe_event)
event.invoke_webhook_handlers()
charge = Charge.objects.get(id=fake_stripe_event["data"]["object"]["id"])
self.assertEqual(
charge.amount, fake_stripe_event["data"]["object"]["amount"] / decimal.Decimal("100")
)
self.assertEqual(charge.status, fake_stripe_event["data"]["object"]["status"])
class TestCustomerEvents(EventTestCase):
def setUp(self):
self.user = get_user_model().objects.create_user(
username="pydanny", email="pydanny@gmail.com"
)
self.customer = FAKE_CUSTOMER.create_for_user(self.user)
@patch("stripe.Customer.retrieve", return_value=FAKE_CUSTOMER)
@patch("stripe.Event.retrieve")
def test_customer_created(self, event_retrieve_mock, customer_retrieve_mock):
fake_stripe_event = deepcopy(FAKE_EVENT_CUSTOMER_CREATED)
event_retrieve_mock.return_value = fake_stripe_event
event = Event.sync_from_stripe_data(fake_stripe_event)
event.invoke_webhook_handlers()
customer = Customer.objects.get(id=fake_stripe_event["data"]["object"]["id"])
self.assertEqual(
customer.account_balance, fake_stripe_event["data"]["object"]["account_balance"]
)
self.assertEqual(customer.currency, fake_stripe_event["data"]["object"]["currency"])
@patch("stripe.Customer.retrieve", return_value=FAKE_CUSTOMER)
def test_customer_deleted(self, customer_retrieve_mock):
FAKE_CUSTOMER.create_for_user(self.user)
event = self._create_event(FAKE_EVENT_CUSTOMER_CREATED)
event.invoke_webhook_handlers()
event = self._create_event(FAKE_EVENT_CUSTOMER_DELETED)
event.invoke_webhook_handlers()
customer = Customer.objects.get(id=FAKE_CUSTOMER["id"])
self.assertIsNotNone(customer.date_purged)
@patch("stripe.Coupon.retrieve", return_value=FAKE_COUPON)
@patch("stripe.Event.retrieve", return_value=FAKE_EVENT_CUSTOMER_DISCOUNT_CREATED)
def test_customer_discount_created(self, event_retrieve_mock, coupon_retrieve_mock):
fake_stripe_event = deepcopy(FAKE_EVENT_CUSTOMER_DISCOUNT_CREATED)
event = Event.sync_from_stripe_data(fake_stripe_event)
event.invoke_webhook_handlers()
self.assertIsNotNone(event.customer)
self.assertEqual(event.customer.id, FAKE_CUSTOMER["id"])
self.assertIsNotNone(event.customer.coupon)
@patch("stripe.Coupon.retrieve", return_value=FAKE_COUPON)
@patch("stripe.Event.retrieve", return_value=FAKE_EVENT_CUSTOMER_DISCOUNT_DELETED)
def test_customer_discount_deleted(self, event_retrieve_mock, coupon_retrieve_mock):
coupon = Coupon.sync_from_stripe_data(FAKE_COUPON)
self.customer.coupon = coupon
fake_stripe_event = deepcopy(FAKE_EVENT_CUSTOMER_DISCOUNT_DELETED)
event = Event.sync_from_stripe_data(fake_stripe_event)
event.invoke_webhook_handlers()
self.assertIsNotNone(event.customer)
self.assertEqual(event.customer.id, FAKE_CUSTOMER["id"])
self.assertIsNone(event.customer.coupon)
@patch("stripe.Customer.retrieve", return_value=FAKE_CUSTOMER)
@patch("stripe.Event.retrieve")
def test_customer_card_created(self, event_retrieve_mock, customer_retrieve_mock):
fake_stripe_event = deepcopy(FAKE_EVENT_CUSTOMER_SOURCE_CREATED)
event_retrieve_mock.return_value = fake_stripe_event
event = Event.sync_from_stripe_data(fake_stripe_event)
event.invoke_webhook_handlers()
card = Card.objects.get(id=fake_stripe_event["data"]["object"]["id"])
self.assertIn(card, self.customer.legacy_cards.all())
self.assertEqual(card.brand, fake_stripe_event["data"]["object"]["brand"])
self.assertEqual(card.last4, fake_stripe_event["data"]["object"]["last4"])
@patch("stripe.Event.retrieve")
def test_customer_unknown_source_created(self, event_retrieve_mock):
fake_stripe_event = deepcopy(FAKE_EVENT_CUSTOMER_SOURCE_CREATED)
fake_stripe_event["data"]["object"]["object"] = "unknown"
fake_stripe_event["data"]["object"][
"id"
] = "card_xxx_test_customer_unk_source_created"
event_retrieve_mock.return_value = fake_stripe_event
FAKE_CUSTOMER.create_for_user(self.user)
event = Event.sync_from_stripe_data(fake_stripe_event)
event.invoke_webhook_handlers()
self.assertFalse(
Card.objects.filter(id=fake_stripe_event["data"]["object"]["id"]).exists()
)
def test_customer_default_source_deleted(self):
self.customer.default_source = DjstripePaymentMethod.objects.get(id=FAKE_CARD["id"])
self.customer.save()
self.assertIsNotNone(self.customer.default_source)
self.assertTrue(self.customer.has_valid_source())
event = self._create_event(FAKE_EVENT_CUSTOMER_SOURCE_DELETED)
event.invoke_webhook_handlers()
customer = Customer.objects.get(id=FAKE_CUSTOMER["id"])
self.assertIsNone(customer.default_source)
self.assertFalse(customer.has_valid_source())
def test_customer_source_double_delete(self):
event = self._create_event(FAKE_EVENT_CUSTOMER_SOURCE_DELETED)
event.invoke_webhook_handlers()
event = self._create_event(FAKE_EVENT_CUSTOMER_SOURCE_DELETED_DUPE)
event.invoke_webhook_handlers()
@patch("stripe.Plan.retrieve", return_value=deepcopy(FAKE_PLAN))
@patch("stripe.Subscription.retrieve", return_value=deepcopy(FAKE_SUBSCRIPTION))
@patch("stripe.Event.retrieve")
def test_customer_subscription_created(
self, event_retrieve_mock, subscription_retrieve_mock, plan_retrieve_mock
):
fake_stripe_event = deepcopy(FAKE_EVENT_CUSTOMER_SUBSCRIPTION_CREATED)
event_retrieve_mock.return_value = fake_stripe_event
event = Event.sync_from_stripe_data(fake_stripe_event)
event.invoke_webhook_handlers()
subscription = Subscription.objects.get(id=fake_stripe_event["data"]["object"]["id"])
self.assertIn(subscription, self.customer.subscriptions.all())
self.assertEqual(subscription.status, fake_stripe_event["data"]["object"]["status"])
self.assertEqual(
subscription.quantity, fake_stripe_event["data"]["object"]["quantity"]
)
@patch("stripe.Plan.retrieve", return_value=deepcopy(FAKE_PLAN))
@patch("stripe.Subscription.retrieve", return_value=deepcopy(FAKE_SUBSCRIPTION))
@patch("stripe.Customer.retrieve", return_value=deepcopy(FAKE_CUSTOMER))
def test_customer_subscription_deleted(
self, customer_retrieve_mock, subscription_retrieve_mock, plan_retrieve_mock
):
event = self._create_event(FAKE_EVENT_CUSTOMER_SUBSCRIPTION_CREATED)
event.invoke_webhook_handlers()
Subscription.objects.get(id=FAKE_SUBSCRIPTION["id"])
event = self._create_event(FAKE_EVENT_CUSTOMER_SUBSCRIPTION_DELETED)
event.invoke_webhook_handlers()
with self.assertRaises(Subscription.DoesNotExist):
Subscription.objects.get(id=FAKE_SUBSCRIPTION["id"])
@patch("stripe.Customer.retrieve")
@patch("stripe.Event.retrieve")
def test_customer_bogus_event_type(self, event_retrieve_mock, customer_retrieve_mock):
fake_stripe_event = deepcopy(FAKE_EVENT_CUSTOMER_CREATED)
fake_stripe_event["data"]["object"]["customer"] = fake_stripe_event["data"][
"object"
]["id"]
fake_stripe_event["type"] = "customer.praised"
event_retrieve_mock.return_value = fake_stripe_event
customer_retrieve_mock.return_value = fake_stripe_event["data"]["object"]
event = Event.sync_from_stripe_data(fake_stripe_event)
event.invoke_webhook_handlers()
class TestDisputeEvents(EventTestCase):
@patch("stripe.Dispute.retrieve", return_value=deepcopy(FAKE_DISPUTE))
@patch("stripe.Event.retrieve", return_value=deepcopy(FAKE_EVENT_DISPUTE_CREATED))
def test_dispute_created(self, event_retrieve_mock, dispute_retrieve_mock):
fake_stripe_event = deepcopy(FAKE_EVENT_DISPUTE_CREATED)
event = Event.sync_from_stripe_data(fake_stripe_event)
event.invoke_webhook_handlers()
dispute = Dispute.objects.get()
self.assertEqual(dispute.id, FAKE_DISPUTE["id"])
class TestInvoiceEvents(EventTestCase):
@patch("djstripe.models.Account.get_default_account")
@patch("stripe.Subscription.retrieve", return_value=deepcopy(FAKE_SUBSCRIPTION))
@patch("stripe.Customer.retrieve", return_value=deepcopy(FAKE_CUSTOMER))
@patch("stripe.Charge.retrieve", return_value=deepcopy(FAKE_CHARGE))
@patch("stripe.Invoice.retrieve", return_value=deepcopy(FAKE_INVOICE))
@patch("stripe.Event.retrieve")
def test_invoice_created_no_existing_customer(
self,
event_retrieve_mock,
invoice_retrieve_mock,
charge_retrieve_mock,
customer_retrieve_mock,
subscription_retrieve_mock,
default_account_mock,
):
default_account_mock.return_value = default_account()
fake_stripe_event = deepcopy(FAKE_EVENT_INVOICE_CREATED)
event_retrieve_mock.return_value = fake_stripe_event
invoice_retrieve_mock.return_value = fake_stripe_event["data"]["object"]
event = Event.sync_from_stripe_data(fake_stripe_event)
event.invoke_webhook_handlers()
self.assertEqual(Customer.objects.count(), 1)
customer = Customer.objects.get()
self.assertEqual(customer.subscriber, None)
@patch("djstripe.models.Account.get_default_account")
@patch("stripe.Subscription.retrieve", return_value=deepcopy(FAKE_SUBSCRIPTION))
@patch("stripe.Customer.retrieve", return_value=deepcopy(FAKE_CUSTOMER))
@patch("stripe.Charge.retrieve", return_value=deepcopy(FAKE_CHARGE))
@patch("stripe.Invoice.retrieve")
@patch("stripe.Event.retrieve")
def test_invoice_created(
self,
event_retrieve_mock,
invoice_retrieve_mock,
charge_retrieve_mock,
customer_retrieve_mock,
subscription_retrieve_mock,
default_account_mock,
):
default_account_mock.return_value = default_account()
user = get_user_model().objects.create_user(
username="pydanny", email="pydanny@gmail.com"
)
FAKE_CUSTOMER.create_for_user(user)
fake_stripe_event = deepcopy(FAKE_EVENT_INVOICE_CREATED)
event_retrieve_mock.return_value = fake_stripe_event
invoice_retrieve_mock.return_value = fake_stripe_event["data"]["object"]
event = Event.sync_from_stripe_data(fake_stripe_event)
event.invoke_webhook_handlers()
invoice = Invoice.objects.get(id=fake_stripe_event["data"]["object"]["id"])
self.assertEqual(
invoice.amount_due,
fake_stripe_event["data"]["object"]["amount_due"] / decimal.Decimal("100"),
)
self.assertEqual(invoice.paid, fake_stripe_event["data"]["object"]["paid"])
@patch("djstripe.models.Account.get_default_account")
@patch("stripe.Subscription.retrieve", return_value=deepcopy(FAKE_SUBSCRIPTION))
@patch("stripe.Charge.retrieve", return_value=deepcopy(FAKE_CHARGE))
@patch("stripe.Invoice.retrieve", return_value=deepcopy(FAKE_INVOICE))
def test_invoice_deleted(
self,
invoice_retrieve_mock,
charge_retrieve_mock,
subscription_retrieve_mock,
default_account_mock,
):
default_account_mock.return_value = default_account()
user = get_user_model().objects.create_user(
username="pydanny", email="pydanny@gmail.com"
)
FAKE_CUSTOMER.create_for_user(user)
event = self._create_event(FAKE_EVENT_INVOICE_CREATED)
event.invoke_webhook_handlers()
Invoice.objects.get(id=FAKE_INVOICE["id"])
event = self._create_event(FAKE_EVENT_INVOICE_DELETED)
event.invoke_webhook_handlers()
with self.assertRaises(Invoice.DoesNotExist):
Invoice.objects.get(id=FAKE_INVOICE["id"])
def test_invoice_upcoming(self):
# Ensure that invoice upcoming events are processed - No actual
# process occurs so the operation is an effective no-op.
event = self._create_event(FAKE_EVENT_INVOICE_UPCOMING)
event.invoke_webhook_handlers()
class TestInvoiceItemEvents(EventTestCase):
@patch("djstripe.models.Account.get_default_account")
@patch("stripe.Subscription.retrieve", return_value=deepcopy(FAKE_SUBSCRIPTION_III))
@patch("stripe.Charge.retrieve", return_value=deepcopy(FAKE_CHARGE_II))
@patch("stripe.Invoice.retrieve", return_value=deepcopy(FAKE_INVOICE_II))
@patch("stripe.InvoiceItem.retrieve")
@patch("stripe.Event.retrieve")
def test_invoiceitem_created(
self,
event_retrieve_mock,
invoiceitem_retrieve_mock,
invoice_retrieve_mock,
charge_retrieve_mock,
subscription_retrieve_mock,
default_account_mock,
):
default_account_mock.return_value = default_account()
user = get_user_model().objects.create_user(
username="pydanny", email="pydanny@gmail.com"
)
FAKE_CUSTOMER_II.create_for_user(user)
fake_stripe_event = deepcopy(FAKE_EVENT_INVOICEITEM_CREATED)
event_retrieve_mock.return_value = fake_stripe_event
invoiceitem_retrieve_mock.return_value = fake_stripe_event["data"]["object"]
event = Event.sync_from_stripe_data(fake_stripe_event)
event.invoke_webhook_handlers()
invoiceitem = InvoiceItem.objects.get(id=fake_stripe_event["data"]["object"]["id"])
self.assertEqual(
invoiceitem.amount,
fake_stripe_event["data"]["object"]["amount"] / decimal.Decimal("100"),
)
@patch("djstripe.models.Account.get_default_account")
@patch("stripe.Subscription.retrieve", return_value=deepcopy(FAKE_SUBSCRIPTION_III))
@patch("stripe.Charge.retrieve", return_value=deepcopy(FAKE_CHARGE_II))
@patch("stripe.Invoice.retrieve", return_value=deepcopy(FAKE_INVOICE_II))
@patch("stripe.InvoiceItem.retrieve", return_value=deepcopy(FAKE_INVOICEITEM))
def test_invoiceitem_deleted(
self,
invoiceitem_retrieve_mock,
invoice_retrieve_mock,
charge_retrieve_mock,
subscription_retrieve_mock,
default_account_mock,
):
default_account_mock.return_value = default_account()
user = get_user_model().objects.create_user(
username="pydanny", email="pydanny@gmail.com"
)
FAKE_CUSTOMER_II.create_for_user(user)
event = self._create_event(FAKE_EVENT_INVOICEITEM_CREATED)
event.invoke_webhook_handlers()
InvoiceItem.objects.get(id=FAKE_INVOICEITEM["id"])
event = self._create_event(FAKE_EVENT_INVOICEITEM_DELETED)
event.invoke_webhook_handlers()
with self.assertRaises(InvoiceItem.DoesNotExist):
InvoiceItem.objects.get(id=FAKE_INVOICEITEM["id"])
class TestPlanEvents(EventTestCase):
@patch("stripe.Plan.retrieve")
@patch("stripe.Event.retrieve")
def test_plan_created(self, event_retrieve_mock, plan_retrieve_mock):
fake_stripe_event = deepcopy(FAKE_EVENT_PLAN_CREATED)
event_retrieve_mock.return_value = fake_stripe_event
plan_retrieve_mock.return_value = fake_stripe_event["data"]["object"]
event = Event.sync_from_stripe_data(fake_stripe_event)
event.invoke_webhook_handlers()
plan = Plan.objects.get(id=fake_stripe_event["data"]["object"]["id"])
self.assertEqual(plan.name, fake_stripe_event["data"]["object"]["name"])
@patch("stripe.Plan.retrieve", return_value=FAKE_PLAN)
@patch("stripe.Event.retrieve", return_value=FAKE_EVENT_PLAN_REQUEST_IS_OBJECT)
def test_plan_updated_request_object(self, event_retrieve_mock, plan_retrieve_mock):
plan_retrieve_mock.return_value = FAKE_EVENT_PLAN_REQUEST_IS_OBJECT["data"]["object"]
event = Event.sync_from_stripe_data(FAKE_EVENT_PLAN_REQUEST_IS_OBJECT)
event.invoke_webhook_handlers()
plan = Plan.objects.get(id=FAKE_EVENT_PLAN_REQUEST_IS_OBJECT["data"]["object"]["id"])
self.assertEqual(
plan.name, FAKE_EVENT_PLAN_REQUEST_IS_OBJECT["data"]["object"]["name"]
)
@patch("stripe.Plan.retrieve", return_value=FAKE_PLAN)
def test_plan_deleted(self, plan_retrieve_mock):
event = self._create_event(FAKE_EVENT_PLAN_CREATED)
event.invoke_webhook_handlers()
Plan.objects.get(id=FAKE_PLAN["id"])
event = self._create_event(FAKE_EVENT_PLAN_DELETED)
event.invoke_webhook_handlers()
with self.assertRaises(Plan.DoesNotExist):
Plan.objects.get(id=FAKE_PLAN["id"])
class TestTransferEvents(EventTestCase):
@patch("stripe.Transfer.retrieve")
@patch("stripe.Event.retrieve")
def test_transfer_created(self, event_retrieve_mock, transfer_retrieve_mock):
fake_stripe_event = deepcopy(FAKE_EVENT_TRANSFER_CREATED)
event_retrieve_mock.return_value = fake_stripe_event
transfer_retrieve_mock.return_value = fake_stripe_event["data"]["object"]
event = Event.sync_from_stripe_data(fake_stripe_event)
event.invoke_webhook_handlers()
transfer = Transfer.objects.get(id=fake_stripe_event["data"]["object"]["id"])
self.assertEqual(
transfer.amount,
fake_stripe_event["data"]["object"]["amount"] / decimal.Decimal("100"),
)
@patch("stripe.Transfer.retrieve", return_value=FAKE_TRANSFER)
def test_transfer_deleted(self, transfer_retrieve_mock):
event = self._create_event(FAKE_EVENT_TRANSFER_CREATED)
event.invoke_webhook_handlers()
Transfer.objects.get(id=FAKE_TRANSFER["id"])
event = self._create_event(FAKE_EVENT_TRANSFER_DELETED)
event.invoke_webhook_handlers()
with self.assertRaises(Transfer.DoesNotExist):
Transfer.objects.get(id=FAKE_TRANSFER["id"])
event = self._create_event(FAKE_EVENT_TRANSFER_DELETED)
event.invoke_webhook_handlers()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.