gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
"""
CORE MARKDOWN BLOCKPARSER
===========================================================================
This parser handles basic parsing of Markdown blocks. It doesn't concern itself
with inline elements such as **bold** or *italics*, but rather just catches
blocks, lists, quotes, etc.
The BlockParser is made up of a bunch of BlockProssors, each handling a
different type of block. Extensions may add/replace/remove BlockProcessors
as they need to alter how markdown blocks are parsed.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import logging
import re
from . import util
from .blockparser import BlockParser
logger = logging.getLogger('MARKDOWN')
def build_block_parser(md_instance, **kwargs):
""" Build the default block parser used by Markdown. """
parser = BlockParser(md_instance)
parser.blockprocessors['empty'] = EmptyBlockProcessor(parser)
parser.blockprocessors['indent'] = ListIndentProcessor(parser)
parser.blockprocessors['code'] = CodeBlockProcessor(parser)
parser.blockprocessors['hashheader'] = HashHeaderProcessor(parser)
parser.blockprocessors['setextheader'] = SetextHeaderProcessor(parser)
parser.blockprocessors['hr'] = HRProcessor(parser)
parser.blockprocessors['olist'] = OListProcessor(parser)
parser.blockprocessors['ulist'] = UListProcessor(parser)
parser.blockprocessors['quote'] = BlockQuoteProcessor(parser)
parser.blockprocessors['paragraph'] = ParagraphProcessor(parser)
return parser
class BlockProcessor:
""" Base class for block processors.
Each subclass will provide the methods below to work with the source and
tree. Each processor will need to define it's own ``test`` and ``run``
methods. The ``test`` method should return True or False, to indicate
whether the current block should be processed by this processor. If the
test passes, the parser will call the processors ``run`` method.
"""
def __init__(self, parser):
self.parser = parser
self.tab_length = parser.markdown.tab_length
def lastChild(self, parent):
""" Return the last child of an etree element. """
if len(parent):
return parent[-1]
else:
return None
def detab(self, text):
""" Remove a tab from the front of each line of the given text. """
newtext = []
lines = text.split('\n')
for line in lines:
if line.startswith(' '*self.tab_length):
newtext.append(line[self.tab_length:])
elif not line.strip():
newtext.append('')
else:
break
return '\n'.join(newtext), '\n'.join(lines[len(newtext):])
def looseDetab(self, text, level=1):
""" Remove a tab from front of lines but allowing dedented lines. """
lines = text.split('\n')
for i in range(len(lines)):
if lines[i].startswith(' '*self.tab_length*level):
lines[i] = lines[i][self.tab_length*level:]
return '\n'.join(lines)
def test(self, parent, block):
""" Test for block type. Must be overridden by subclasses.
As the parser loops through processors, it will call the ``test`` method
on each to determine if the given block of text is of that type. This
method must return a boolean ``True`` or ``False``. The actual method of
testing is left to the needs of that particular block type. It could
be as simple as ``block.startswith(some_string)`` or a complex regular
expression. As the block type may be different depending on the parent
of the block (i.e. inside a list), the parent etree element is also
provided and may be used as part of the test.
Keywords:
* ``parent``: A etree element which will be the parent of the block.
* ``block``: A block of text from the source which has been split at
blank lines.
"""
pass
def run(self, parent, blocks):
""" Run processor. Must be overridden by subclasses.
When the parser determines the appropriate type of a block, the parser
will call the corresponding processor's ``run`` method. This method
should parse the individual lines of the block and append them to
the etree.
Note that both the ``parent`` and ``etree`` keywords are pointers
to instances of the objects which should be edited in place. Each
processor must make changes to the existing objects as there is no
mechanism to return new/different objects to replace them.
This means that this method should be adding SubElements or adding text
to the parent, and should remove (``pop``) or add (``insert``) items to
the list of blocks.
Keywords:
* ``parent``: A etree element which is the parent of the current block.
* ``blocks``: A list of all remaining blocks of the document.
"""
pass
class ListIndentProcessor(BlockProcessor):
""" Process children of list items.
Example:
* a list item
process this part
or this part
"""
ITEM_TYPES = ['li']
LIST_TYPES = ['ul', 'ol']
def __init__(self, *args):
BlockProcessor.__init__(self, *args)
self.INDENT_RE = re.compile(r'^(([ ]{%s})+)'% self.tab_length)
def test(self, parent, block):
return block.startswith(' '*self.tab_length) and \
not self.parser.state.isstate('detabbed') and \
(parent.tag in self.ITEM_TYPES or \
(len(parent) and parent[-1] and \
(parent[-1].tag in self.LIST_TYPES)
)
)
def run(self, parent, blocks):
block = blocks.pop(0)
level, sibling = self.get_level(parent, block)
block = self.looseDetab(block, level)
self.parser.state.set('detabbed')
if parent.tag in self.ITEM_TYPES:
# It's possible that this parent has a 'ul' or 'ol' child list
# with a member. If that is the case, then that should be the
# parent. This is intended to catch the edge case of an indented
# list whose first member was parsed previous to this point
# see OListProcessor
if len(parent) and parent[-1].tag in self.LIST_TYPES:
self.parser.parseBlocks(parent[-1], [block])
else:
# The parent is already a li. Just parse the child block.
self.parser.parseBlocks(parent, [block])
elif sibling.tag in self.ITEM_TYPES:
# The sibling is a li. Use it as parent.
self.parser.parseBlocks(sibling, [block])
elif len(sibling) and sibling[-1].tag in self.ITEM_TYPES:
# The parent is a list (``ol`` or ``ul``) which has children.
# Assume the last child li is the parent of this block.
if sibling[-1].text:
# If the parent li has text, that text needs to be moved to a p
# The p must be 'inserted' at beginning of list in the event
# that other children already exist i.e.; a nested sublist.
p = util.etree.Element('p')
p.text = sibling[-1].text
sibling[-1].text = ''
sibling[-1].insert(0, p)
self.parser.parseChunk(sibling[-1], block)
else:
self.create_item(sibling, block)
self.parser.state.reset()
def create_item(self, parent, block):
""" Create a new li and parse the block with it as the parent. """
li = util.etree.SubElement(parent, 'li')
self.parser.parseBlocks(li, [block])
def get_level(self, parent, block):
""" Get level of indent based on list level. """
# Get indent level
m = self.INDENT_RE.match(block)
if m:
indent_level = len(m.group(1))/self.tab_length
else:
indent_level = 0
if self.parser.state.isstate('list'):
# We're in a tightlist - so we already are at correct parent.
level = 1
else:
# We're in a looselist - so we need to find parent.
level = 0
# Step through children of tree to find matching indent level.
while indent_level > level:
child = self.lastChild(parent)
if child is not None and (child.tag in self.LIST_TYPES or child.tag in self.ITEM_TYPES):
if child.tag in self.LIST_TYPES:
level += 1
parent = child
else:
# No more child levels. If we're short of indent_level,
# we have a code block. So we stop here.
break
return level, parent
class CodeBlockProcessor(BlockProcessor):
""" Process code blocks. """
def test(self, parent, block):
return block.startswith(' '*self.tab_length)
def run(self, parent, blocks):
sibling = self.lastChild(parent)
block = blocks.pop(0)
theRest = ''
if sibling is not None and sibling.tag == "pre" and len(sibling) \
and sibling[0].tag == "code":
# The previous block was a code block. As blank lines do not start
# new code blocks, append this block to the previous, adding back
# linebreaks removed from the split into a list.
code = sibling[0]
block, theRest = self.detab(block)
code.text = util.AtomicString('%s\n%s\n' % (code.text, block.rstrip()))
else:
# This is a new codeblock. Create the elements and insert text.
pre = util.etree.SubElement(parent, 'pre')
code = util.etree.SubElement(pre, 'code')
block, theRest = self.detab(block)
code.text = util.AtomicString('%s\n' % block.rstrip())
if theRest:
# This block contained unindented line(s) after the first indented
# line. Insert these lines as the first block of the master blocks
# list for future processing.
blocks.insert(0, theRest)
class BlockQuoteProcessor(BlockProcessor):
RE = re.compile(r'(^|\n)[ ]{0,3}>[ ]?(.*)')
def test(self, parent, block):
return bool(self.RE.search(block))
def run(self, parent, blocks):
block = blocks.pop(0)
m = self.RE.search(block)
if m:
before = block[:m.start()] # Lines before blockquote
# Pass lines before blockquote in recursively for parsing forst.
self.parser.parseBlocks(parent, [before])
# Remove ``> `` from begining of each line.
block = '\n'.join([self.clean(line) for line in
block[m.start():].split('\n')])
sibling = self.lastChild(parent)
if sibling is not None and sibling.tag == "blockquote":
# Previous block was a blockquote so set that as this blocks parent
quote = sibling
else:
# This is a new blockquote. Create a new parent element.
quote = util.etree.SubElement(parent, 'blockquote')
# Recursively parse block with blockquote as parent.
# change parser state so blockquotes embedded in lists use p tags
self.parser.state.set('blockquote')
self.parser.parseChunk(quote, block)
self.parser.state.reset()
def clean(self, line):
""" Remove ``>`` from beginning of a line. """
m = self.RE.match(line)
if line.strip() == ">":
return ""
elif m:
return m.group(2)
else:
return line
class OListProcessor(BlockProcessor):
""" Process ordered list blocks. """
TAG = 'ol'
# Detect an item (``1. item``). ``group(1)`` contains contents of item.
RE = re.compile(r'^[ ]{0,3}\d+\.[ ]+(.*)')
# Detect items on secondary lines. they can be of either list type.
CHILD_RE = re.compile(r'^[ ]{0,3}((\d+\.)|[*+-])[ ]+(.*)')
# Detect indented (nested) items of either type
INDENT_RE = re.compile(r'^[ ]{4,7}((\d+\.)|[*+-])[ ]+.*')
# The integer (python string) with which the lists starts (default=1)
# Eg: If list is intialized as)
# 3. Item
# The ol tag will get starts="3" attribute
STARTSWITH = '1'
# List of allowed sibling tags.
SIBLING_TAGS = ['ol', 'ul']
def test(self, parent, block):
return bool(self.RE.match(block))
def run(self, parent, blocks):
# Check fr multiple items in one block.
items = self.get_items(blocks.pop(0))
sibling = self.lastChild(parent)
if sibling is not None and sibling.tag in self.SIBLING_TAGS:
# Previous block was a list item, so set that as parent
lst = sibling
# make sure previous item is in a p- if the item has text, then it
# it isn't in a p
if lst[-1].text:
# since it's possible there are other children for this sibling,
# we can't just SubElement the p, we need to insert it as the
# first item
p = util.etree.Element('p')
p.text = lst[-1].text
lst[-1].text = ''
lst[-1].insert(0, p)
# if the last item has a tail, then the tail needs to be put in a p
# likely only when a header is not followed by a blank line
lch = self.lastChild(lst[-1])
if lch is not None and lch.tail:
p = util.etree.SubElement(lst[-1], 'p')
p.text = lch.tail.lstrip()
lch.tail = ''
# parse first block differently as it gets wrapped in a p.
li = util.etree.SubElement(lst, 'li')
self.parser.state.set('looselist')
firstitem = items.pop(0)
self.parser.parseBlocks(li, [firstitem])
self.parser.state.reset()
elif parent.tag in ['ol', 'ul']:
# this catches the edge case of a multi-item indented list whose
# first item is in a blank parent-list item:
# * * subitem1
# * subitem2
# see also ListIndentProcessor
lst = parent
else:
# This is a new list so create parent with appropriate tag.
lst = util.etree.SubElement(parent, self.TAG)
# Check if a custom start integer is set
if not self.parser.markdown.lazy_ol and self.STARTSWITH !='1':
lst.attrib['start'] = self.STARTSWITH
self.parser.state.set('list')
# Loop through items in block, recursively parsing each with the
# appropriate parent.
for item in items:
if item.startswith(' '*self.tab_length):
# Item is indented. Parse with last item as parent
self.parser.parseBlocks(lst[-1], [item])
else:
# New item. Create li and parse with it as parent
li = util.etree.SubElement(lst, 'li')
self.parser.parseBlocks(li, [item])
self.parser.state.reset()
def get_items(self, block):
""" Break a block into list items. """
items = []
for line in block.split('\n'):
m = self.CHILD_RE.match(line)
if m:
# This is a new list item
# Check first item for the start index
if not items and self.TAG=='ol':
# Detect the integer value of first list item
INTEGER_RE = re.compile('(\d+)')
self.STARTSWITH = INTEGER_RE.match(m.group(1)).group()
# Append to the list
items.append(m.group(3))
elif self.INDENT_RE.match(line):
# This is an indented (possibly nested) item.
if items[-1].startswith(' '*self.tab_length):
# Previous item was indented. Append to that item.
items[-1] = '%s\n%s' % (items[-1], line)
else:
items.append(line)
else:
# This is another line of previous item. Append to that item.
items[-1] = '%s\n%s' % (items[-1], line)
return items
class UListProcessor(OListProcessor):
""" Process unordered list blocks. """
TAG = 'ul'
RE = re.compile(r'^[ ]{0,3}[*+-][ ]+(.*)')
class HashHeaderProcessor(BlockProcessor):
""" Process Hash Headers. """
# Detect a header at start of any line in block
RE = re.compile(r'(^|\n)(?P<level>#{1,6})(?P<header>.*?)#*(\n|$)')
def test(self, parent, block):
return bool(self.RE.search(block))
def run(self, parent, blocks):
block = blocks.pop(0)
m = self.RE.search(block)
if m:
before = block[:m.start()] # All lines before header
after = block[m.end():] # All lines after header
if before:
# As the header was not the first line of the block and the
# lines before the header must be parsed first,
# recursively parse this lines as a block.
self.parser.parseBlocks(parent, [before])
# Create header using named groups from RE
h = util.etree.SubElement(parent, 'h%d' % len(m.group('level')))
h.text = m.group('header').strip()
if after:
# Insert remaining lines as first block for future parsing.
blocks.insert(0, after)
else:
# This should never happen, but just in case...
logger.warn("We've got a problem header: %r" % block)
class SetextHeaderProcessor(BlockProcessor):
""" Process Setext-style Headers. """
# Detect Setext-style header. Must be first 2 lines of block.
RE = re.compile(r'^.*?\n[=-]+[ ]*(\n|$)', re.MULTILINE)
def test(self, parent, block):
return bool(self.RE.match(block))
def run(self, parent, blocks):
lines = blocks.pop(0).split('\n')
# Determine level. ``=`` is 2 and ``-`` is 3.
if lines[1].startswith('='):
level = 2
else:
level = 3
h = util.etree.SubElement(parent, 'h%d' % level)
h.text = lines[0].strip()
if len(lines) > 2:
# Block contains additional lines. Add to master blocks for later.
blocks.insert(0, '\n'.join(lines[2:]))
class HRProcessor(BlockProcessor):
""" Process Horizontal Rules. """
RE = r'^[ ]{0,3}((-+[ ]{0,2}){3,}|(_+[ ]{0,2}){3,}|(\*+[ ]{0,2}){3,})[ ]*'
# Detect hr on any line of a block.
SEARCH_RE = re.compile(RE, re.MULTILINE)
def test(self, parent, block):
m = self.SEARCH_RE.search(block)
# No atomic grouping in python so we simulate it here for performance.
# The regex only matches what would be in the atomic group - the HR.
# Then check if we are at end of block or if next char is a newline.
if m and (m.end() == len(block) or block[m.end()] == '\n'):
# Save match object on class instance so we can use it later.
self.match = m
return True
return False
def run(self, parent, blocks):
block = blocks.pop(0)
# Check for lines in block before hr.
prelines = block[:self.match.start()].rstrip('\n')
if prelines:
# Recursively parse lines before hr so they get parsed first.
self.parser.parseBlocks(parent, [prelines])
# create hr
util.etree.SubElement(parent, 'hr')
# check for lines in block after hr.
postlines = block[self.match.end():].lstrip('\n')
if postlines:
# Add lines after hr to master blocks for later parsing.
blocks.insert(0, postlines)
class EmptyBlockProcessor(BlockProcessor):
""" Process blocks that are empty or start with an empty line. """
def test(self, parent, block):
return not block or block.startswith('\n')
def run(self, parent, blocks):
block = blocks.pop(0)
filler = '\n\n'
if block:
# Starts with empty line
# Only replace a single line.
filler = '\n'
# Save the rest for later.
theRest = block[1:]
if theRest:
# Add remaining lines to master blocks for later.
blocks.insert(0, theRest)
sibling = self.lastChild(parent)
if sibling is not None and sibling.tag == 'pre' and len(sibling) and sibling[0].tag == 'code':
# Last block is a codeblock. Append to preserve whitespace.
sibling[0].text = util.AtomicString('%s%s' % (sibling[0].text, filler))
class ParagraphProcessor(BlockProcessor):
""" Process Paragraph blocks. """
def test(self, parent, block):
return True
def run(self, parent, blocks):
block = blocks.pop(0)
if block.strip():
# Not a blank block. Add to parent, otherwise throw it away.
if self.parser.state.isstate('list'):
# The parent is a tight-list.
#
# Check for any children. This will likely only happen in a
# tight-list when a header isn't followed by a blank line.
# For example:
#
# * # Header
# Line 2 of list item - not part of header.
sibling = self.lastChild(parent)
if sibling is not None:
# Insetrt after sibling.
if sibling.tail:
sibling.tail = '%s\n%s' % (sibling.tail, block)
else:
sibling.tail = '\n%s' % block
else:
# Append to parent.text
if parent.text:
parent.text = '%s\n%s' % (parent.text, block)
else:
parent.text = block.lstrip()
else:
# Create a regular paragraph
p = util.etree.SubElement(parent, 'p')
p.text = block.lstrip()
| |
# Copyright (c) 2010-2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
import os
import random
import math
import time
import shutil
import uuid
import errno
import re
from eventlet import GreenPool, sleep, Timeout
from eventlet.green import subprocess
import simplejson
import swift.common.db
from swift.common.direct_client import quote
from swift.common.utils import get_logger, whataremyips, storage_directory, \
renamer, mkdirs, lock_parent_directory, config_true_value, \
unlink_older_than, dump_recon_cache, rsync_ip
from swift.common import ring
from swift.common.http import HTTP_NOT_FOUND, HTTP_INSUFFICIENT_STORAGE
from swift.common.bufferedhttp import BufferedHTTPConnection
from swift.common.exceptions import DriveNotMounted, ConnectionTimeout
from swift.common.daemon import Daemon
from swift.common.swob import Response, HTTPNotFound, HTTPNoContent, \
HTTPAccepted, HTTPBadRequest
DEBUG_TIMINGS_THRESHOLD = 10
def quarantine_db(object_file, server_type):
"""
In the case that a corrupt file is found, move it to a quarantined area to
allow replication to fix it.
:param object_file: path to corrupt file
:param server_type: type of file that is corrupt
('container' or 'account')
"""
object_dir = os.path.dirname(object_file)
quarantine_dir = os.path.abspath(
os.path.join(object_dir, '..', '..', '..', '..', 'quarantined',
server_type + 's', os.path.basename(object_dir)))
try:
renamer(object_dir, quarantine_dir)
except OSError, e:
if e.errno not in (errno.EEXIST, errno.ENOTEMPTY):
raise
quarantine_dir = "%s-%s" % (quarantine_dir, uuid.uuid4().hex)
renamer(object_dir, quarantine_dir)
def roundrobin_datadirs(datadirs):
"""
Generator to walk the data dirs in a round robin manner, evenly
hitting each device on the system, and yielding any .db files
found (in their proper places). The partitions within each data
dir are walked randomly, however.
:param datadirs: a list of (path, node_id) to walk
:returns: A generator of (partition, path_to_db_file, node_id)
"""
def walk_datadir(datadir, node_id):
partitions = os.listdir(datadir)
random.shuffle(partitions)
for partition in partitions:
part_dir = os.path.join(datadir, partition)
if not os.path.isdir(part_dir):
continue
suffixes = os.listdir(part_dir)
for suffix in suffixes:
suff_dir = os.path.join(part_dir, suffix)
if not os.path.isdir(suff_dir):
continue
hashes = os.listdir(suff_dir)
for hsh in hashes:
hash_dir = os.path.join(suff_dir, hsh)
if not os.path.isdir(hash_dir):
continue
object_file = os.path.join(hash_dir, hsh + '.db')
if os.path.exists(object_file):
yield (partition, object_file, node_id)
its = [walk_datadir(datadir, node_id) for datadir, node_id in datadirs]
while its:
for it in its:
try:
yield it.next()
except StopIteration:
its.remove(it)
class ReplConnection(BufferedHTTPConnection):
"""
Helper to simplify REPLICATEing to a remote server.
"""
def __init__(self, node, partition, hash_, logger):
""
self.logger = logger
self.node = node
host = "%s:%s" % (node['replication_ip'], node['replication_port'])
BufferedHTTPConnection.__init__(self, host)
self.path = '/%s/%s/%s' % (node['device'], partition, hash_)
def replicate(self, *args):
"""
Make an HTTP REPLICATE request
:param args: list of json-encodable objects
:returns: httplib response object
"""
try:
body = simplejson.dumps(args)
self.request('REPLICATE', self.path, body,
{'Content-Type': 'application/json'})
response = self.getresponse()
response.data = response.read()
return response
except (Exception, Timeout):
self.logger.exception(
_('ERROR reading HTTP response from %s'), self.node)
return None
class Replicator(Daemon):
"""
Implements the logic for directing db replication.
"""
def __init__(self, conf):
self.conf = conf
self.logger = get_logger(conf, log_route='replicator')
self.root = conf.get('devices', '/srv/node')
self.mount_check = config_true_value(conf.get('mount_check', 'true'))
self.port = int(conf.get('bind_port', self.default_port))
concurrency = int(conf.get('concurrency', 8))
self.cpool = GreenPool(size=concurrency)
swift_dir = conf.get('swift_dir', '/etc/swift')
self.ring = ring.Ring(swift_dir, ring_name=self.server_type)
self.per_diff = int(conf.get('per_diff', 1000))
self.max_diffs = int(conf.get('max_diffs') or 100)
self.interval = int(conf.get('interval') or
conf.get('run_pause') or 30)
self.vm_test_mode = config_true_value(conf.get('vm_test_mode', 'no'))
self.node_timeout = int(conf.get('node_timeout', 10))
self.conn_timeout = float(conf.get('conn_timeout', 0.5))
self.reclaim_age = float(conf.get('reclaim_age', 86400 * 7))
swift.common.db.DB_PREALLOCATION = \
config_true_value(conf.get('db_preallocation', 'f'))
self._zero_stats()
self.recon_cache_path = conf.get('recon_cache_path',
'/var/cache/swift')
self.recon_replicator = '%s.recon' % self.server_type
self.rcache = os.path.join(self.recon_cache_path,
self.recon_replicator)
self.extract_device_re = re.compile('%s%s([^%s]+)' % (
self.root, os.path.sep, os.path.sep))
def _zero_stats(self):
"""Zero out the stats."""
self.stats = {'attempted': 0, 'success': 0, 'failure': 0, 'ts_repl': 0,
'no_change': 0, 'hashmatch': 0, 'rsync': 0, 'diff': 0,
'remove': 0, 'empty': 0, 'remote_merge': 0,
'start': time.time(), 'diff_capped': 0}
def _report_stats(self):
"""Report the current stats to the logs."""
self.logger.info(
_('Attempted to replicate %(count)d dbs in %(time).5f seconds '
'(%(rate).5f/s)'),
{'count': self.stats['attempted'],
'time': time.time() - self.stats['start'],
'rate': self.stats['attempted'] /
(time.time() - self.stats['start'] + 0.0000001)})
self.logger.info(_('Removed %(remove)d dbs') % self.stats)
self.logger.info(_('%(success)s successes, %(failure)s failures')
% self.stats)
dump_recon_cache(
{'replication_stats': self.stats,
'replication_time': time.time() - self.stats['start'],
'replication_last': time.time()},
self.rcache, self.logger)
self.logger.info(' '.join(['%s:%s' % item for item in
self.stats.items() if item[0] in
('no_change', 'hashmatch', 'rsync', 'diff', 'ts_repl',
'empty', 'diff_capped')]))
def _rsync_file(self, db_file, remote_file, whole_file=True):
"""
Sync a single file using rsync. Used by _rsync_db to handle syncing.
:param db_file: file to be synced
:param remote_file: remote location to sync the DB file to
:param whole-file: if True, uses rsync's --whole-file flag
:returns: True if the sync was successful, False otherwise
"""
popen_args = ['rsync', '--quiet', '--no-motd',
'--timeout=%s' % int(math.ceil(self.node_timeout)),
'--contimeout=%s' % int(math.ceil(self.conn_timeout))]
if whole_file:
popen_args.append('--whole-file')
popen_args.extend([db_file, remote_file])
proc = subprocess.Popen(popen_args)
proc.communicate()
if proc.returncode != 0:
self.logger.error(_('ERROR rsync failed with %(code)s: %(args)s'),
{'code': proc.returncode, 'args': popen_args})
return proc.returncode == 0
def _rsync_db(self, broker, device, http, local_id,
replicate_method='complete_rsync', replicate_timeout=None):
"""
Sync a whole db using rsync.
:param broker: DB broker object of DB to be synced
:param device: device to sync to
:param http: ReplConnection object
:param local_id: unique ID of the local database replica
:param replicate_method: remote operation to perform after rsync
:param replicate_timeout: timeout to wait in seconds
"""
device_ip = rsync_ip(device['replication_ip'])
if self.vm_test_mode:
remote_file = '%s::%s%s/%s/tmp/%s' % (
device_ip, self.server_type, device['replication_port'],
device['device'], local_id)
else:
remote_file = '%s::%s/%s/tmp/%s' % (
device_ip, self.server_type, device['device'], local_id)
mtime = os.path.getmtime(broker.db_file)
if not self._rsync_file(broker.db_file, remote_file):
return False
# perform block-level sync if the db was modified during the first sync
if os.path.exists(broker.db_file + '-journal') or \
os.path.getmtime(broker.db_file) > mtime:
# grab a lock so nobody else can modify it
with broker.lock():
if not self._rsync_file(broker.db_file, remote_file, False):
return False
with Timeout(replicate_timeout or self.node_timeout):
response = http.replicate(replicate_method, local_id)
return response and response.status >= 200 and response.status < 300
def _usync_db(self, point, broker, http, remote_id, local_id):
"""
Sync a db by sending all records since the last sync.
:param point: synchronization high water mark between the replicas
:param broker: database broker object
:param http: ReplConnection object for the remote server
:param remote_id: database id for the remote replica
:param local_id: database id for the local replica
:returns: boolean indicating completion and success
"""
self.stats['diff'] += 1
self.logger.increment('diffs')
self.logger.debug(_('Syncing chunks with %s'), http.host)
sync_table = broker.get_syncs()
objects = broker.get_items_since(point, self.per_diff)
diffs = 0
while len(objects) and diffs < self.max_diffs:
diffs += 1
with Timeout(self.node_timeout):
response = http.replicate('merge_items', objects, local_id)
if not response or response.status >= 300 or response.status < 200:
if response:
self.logger.error(_('ERROR Bad response %(status)s from '
'%(host)s'),
{'status': response.status,
'host': http.host})
return False
point = objects[-1]['ROWID']
objects = broker.get_items_since(point, self.per_diff)
if objects:
self.logger.debug(_(
'Synchronization for %s has fallen more than '
'%s rows behind; moving on and will try again next pass.') %
(broker.db_file, self.max_diffs * self.per_diff))
self.stats['diff_capped'] += 1
self.logger.increment('diff_caps')
else:
with Timeout(self.node_timeout):
response = http.replicate('merge_syncs', sync_table)
if response and response.status >= 200 and response.status < 300:
broker.merge_syncs([{'remote_id': remote_id,
'sync_point': point}],
incoming=False)
return True
return False
def _in_sync(self, rinfo, info, broker, local_sync):
"""
Determine whether or not two replicas of a databases are considered
to be in sync.
:param rinfo: remote database info
:param info: local database info
:param broker: database broker object
:param local_sync: cached last sync point between replicas
:returns: boolean indicating whether or not the replicas are in sync
"""
if max(rinfo['point'], local_sync) >= info['max_row']:
self.stats['no_change'] += 1
self.logger.increment('no_changes')
return True
if rinfo['hash'] == info['hash']:
self.stats['hashmatch'] += 1
self.logger.increment('hashmatches')
broker.merge_syncs([{'remote_id': rinfo['id'],
'sync_point': rinfo['point']}],
incoming=False)
return True
def _http_connect(self, node, partition, db_file):
"""
Make an http_connection using ReplConnection
:param node: node dictionary from the ring
:param partition: partition partition to send in the url
:param db_file: DB file
:returns: ReplConnection object
"""
return ReplConnection(node, partition,
os.path.basename(db_file).split('.', 1)[0],
self.logger)
def _repl_to_node(self, node, broker, partition, info):
"""
Replicate a database to a node.
:param node: node dictionary from the ring to be replicated to
:param broker: DB broker for the DB to be replication
:param partition: partition on the node to replicate to
:param info: DB info as a dictionary of {'max_row', 'hash', 'id',
'created_at', 'put_timestamp', 'delete_timestamp',
'metadata'}
:returns: True if successful, False otherwise
"""
with ConnectionTimeout(self.conn_timeout):
http = self._http_connect(node, partition, broker.db_file)
if not http:
self.logger.error(
_('ERROR Unable to connect to remote server: %s'), node)
return False
with Timeout(self.node_timeout):
response = http.replicate(
'sync', info['max_row'], info['hash'], info['id'],
info['created_at'], info['put_timestamp'],
info['delete_timestamp'], info['metadata'])
if not response:
return False
elif response.status == HTTP_NOT_FOUND: # completely missing, rsync
self.stats['rsync'] += 1
self.logger.increment('rsyncs')
return self._rsync_db(broker, node, http, info['id'])
elif response.status == HTTP_INSUFFICIENT_STORAGE:
raise DriveNotMounted()
elif response.status >= 200 and response.status < 300:
rinfo = simplejson.loads(response.data)
local_sync = broker.get_sync(rinfo['id'], incoming=False)
if self._in_sync(rinfo, info, broker, local_sync):
return True
# if the difference in rowids between the two differs by
# more than 50%, rsync then do a remote merge.
if rinfo['max_row'] / float(info['max_row']) < 0.5:
self.stats['remote_merge'] += 1
self.logger.increment('remote_merges')
return self._rsync_db(broker, node, http, info['id'],
replicate_method='rsync_then_merge',
replicate_timeout=(info['count'] / 2000))
# else send diffs over to the remote server
return self._usync_db(max(rinfo['point'], local_sync),
broker, http, rinfo['id'], info['id'])
def _replicate_object(self, partition, object_file, node_id):
"""
Replicate the db, choosing method based on whether or not it
already exists on peers.
:param partition: partition to be replicated to
:param object_file: DB file name to be replicated
:param node_id: node id of the node to be replicated to
"""
start_time = time.time()
self.logger.debug(_('Replicating db %s'), object_file)
self.stats['attempted'] += 1
self.logger.increment('attempts')
shouldbehere = True
try:
broker = self.brokerclass(object_file, pending_timeout=30)
broker.reclaim(time.time() - self.reclaim_age,
time.time() - (self.reclaim_age * 2))
info = broker.get_replication_info()
full_info = broker.get_info()
bpart = self.ring.get_part(
full_info['account'], full_info.get('container'))
if bpart != int(partition):
partition = bpart
# Important to set this false here since the later check only
# checks if it's on the proper device, not partition.
shouldbehere = False
name = '/' + quote(full_info['account'])
if 'container' in full_info:
name += '/' + quote(full_info['container'])
self.logger.error(
'Found %s for %s when it should be on partition %s; will '
'replicate out and remove.' % (object_file, name, bpart))
except (Exception, Timeout), e:
if 'no such table' in str(e):
self.logger.error(_('Quarantining DB %s'), object_file)
quarantine_db(broker.db_file, broker.db_type)
else:
self.logger.exception(_('ERROR reading db %s'), object_file)
self.stats['failure'] += 1
self.logger.increment('failures')
return
# The db is considered deleted if the delete_timestamp value is greater
# than the put_timestamp, and there are no objects.
delete_timestamp = 0
try:
delete_timestamp = float(info['delete_timestamp'])
except ValueError:
pass
put_timestamp = 0
try:
put_timestamp = float(info['put_timestamp'])
except ValueError:
pass
if delete_timestamp < (time.time() - self.reclaim_age) and \
delete_timestamp > put_timestamp and \
info['count'] in (None, '', 0, '0'):
if self.report_up_to_date(full_info):
self.delete_db(object_file)
self.logger.timing_since('timing', start_time)
return
responses = []
nodes = self.ring.get_part_nodes(int(partition))
if shouldbehere:
shouldbehere = bool([n for n in nodes if n['id'] == node_id])
# See Footnote [1] for an explanation of the repl_nodes assignment.
i = 0
while i < len(nodes) and nodes[i]['id'] != node_id:
i += 1
repl_nodes = nodes[i + 1:] + nodes[:i]
more_nodes = self.ring.get_more_nodes(int(partition))
for node in repl_nodes:
success = False
try:
success = self._repl_to_node(node, broker, partition, info)
except DriveNotMounted:
repl_nodes.append(more_nodes.next())
self.logger.error(_('ERROR Remote drive not mounted %s'), node)
except (Exception, Timeout):
self.logger.exception(_('ERROR syncing %(file)s with node'
' %(node)s'),
{'file': object_file, 'node': node})
self.stats['success' if success else 'failure'] += 1
self.logger.increment('successes' if success else 'failures')
responses.append(success)
if not shouldbehere and all(responses):
# If the db shouldn't be on this node and has been successfully
# synced to all of its peers, it can be removed.
self.delete_db(object_file)
self.logger.timing_since('timing', start_time)
def delete_db(self, object_file):
hash_dir = os.path.dirname(object_file)
suf_dir = os.path.dirname(hash_dir)
with lock_parent_directory(object_file):
shutil.rmtree(hash_dir, True)
try:
os.rmdir(suf_dir)
except OSError, err:
if err.errno not in (errno.ENOENT, errno.ENOTEMPTY):
self.logger.exception(
_('ERROR while trying to clean up %s') % suf_dir)
self.stats['remove'] += 1
device_name = self.extract_device(object_file)
self.logger.increment('removes.' + device_name)
def extract_device(self, object_file):
"""
Extract the device name from an object path. Returns "UNKNOWN" if the
path could not be extracted successfully for some reason.
:param object_file: the path to a database file.
"""
match = self.extract_device_re.match(object_file)
if match:
return match.groups()[0]
return "UNKNOWN"
def report_up_to_date(self, full_info):
return True
def run_once(self, *args, **kwargs):
"""Run a replication pass once."""
self._zero_stats()
dirs = []
ips = whataremyips()
if not ips:
self.logger.error(_('ERROR Failed to get my own IPs?'))
return
for node in self.ring.devs:
if (node and node['replication_ip'] in ips and
node['replication_port'] == self.port):
if self.mount_check and not os.path.ismount(
os.path.join(self.root, node['device'])):
self.logger.warn(
_('Skipping %(device)s as it is not mounted') % node)
continue
unlink_older_than(
os.path.join(self.root, node['device'], 'tmp'),
time.time() - self.reclaim_age)
datadir = os.path.join(self.root, node['device'], self.datadir)
if os.path.isdir(datadir):
dirs.append((datadir, node['id']))
self.logger.info(_('Beginning replication run'))
for part, object_file, node_id in roundrobin_datadirs(dirs):
self.cpool.spawn_n(
self._replicate_object, part, object_file, node_id)
self.cpool.waitall()
self.logger.info(_('Replication run OVER'))
self._report_stats()
def run_forever(self, *args, **kwargs):
"""
Replicate dbs under the given root in an infinite loop.
"""
sleep(random.random() * self.interval)
while True:
begin = time.time()
try:
self.run_once()
except (Exception, Timeout):
self.logger.exception(_('ERROR trying to replicate'))
elapsed = time.time() - begin
if elapsed < self.interval:
sleep(self.interval - elapsed)
class ReplicatorRpc(object):
"""Handle Replication RPC calls. TODO(redbo): document please :)"""
def __init__(self, root, datadir, broker_class, mount_check=True,
logger=None):
self.root = root
self.datadir = datadir
self.broker_class = broker_class
self.mount_check = mount_check
self.logger = logger or get_logger({}, log_route='replicator-rpc')
def dispatch(self, replicate_args, args):
if not hasattr(args, 'pop'):
return HTTPBadRequest(body='Invalid object type')
op = args.pop(0)
drive, partition, hsh = replicate_args
if self.mount_check and \
not os.path.ismount(os.path.join(self.root, drive)):
return Response(status='507 %s is not mounted' % drive)
db_file = os.path.join(self.root, drive,
storage_directory(self.datadir, partition, hsh),
hsh + '.db')
if op == 'rsync_then_merge':
return self.rsync_then_merge(drive, db_file, args)
if op == 'complete_rsync':
return self.complete_rsync(drive, db_file, args)
else:
# someone might be about to rsync a db to us,
# make sure there's a tmp dir to receive it.
mkdirs(os.path.join(self.root, drive, 'tmp'))
if not os.path.exists(db_file):
return HTTPNotFound()
return getattr(self, op)(self.broker_class(db_file), args)
def sync(self, broker, args):
(remote_sync, hash_, id_, created_at, put_timestamp,
delete_timestamp, metadata) = args
timemark = time.time()
try:
info = broker.get_replication_info()
except (Exception, Timeout), e:
if 'no such table' in str(e):
self.logger.error(_("Quarantining DB %s") % broker.db_file)
quarantine_db(broker.db_file, broker.db_type)
return HTTPNotFound()
raise
timespan = time.time() - timemark
if timespan > DEBUG_TIMINGS_THRESHOLD:
self.logger.debug(_('replicator-rpc-sync time for info: %.02fs') %
timespan)
if metadata:
timemark = time.time()
broker.update_metadata(simplejson.loads(metadata))
timespan = time.time() - timemark
if timespan > DEBUG_TIMINGS_THRESHOLD:
self.logger.debug(_('replicator-rpc-sync time for '
'update_metadata: %.02fs') % timespan)
if info['put_timestamp'] != put_timestamp or \
info['created_at'] != created_at or \
info['delete_timestamp'] != delete_timestamp:
timemark = time.time()
broker.merge_timestamps(
created_at, put_timestamp, delete_timestamp)
timespan = time.time() - timemark
if timespan > DEBUG_TIMINGS_THRESHOLD:
self.logger.debug(_('replicator-rpc-sync time for '
'merge_timestamps: %.02fs') % timespan)
timemark = time.time()
info['point'] = broker.get_sync(id_)
timespan = time.time() - timemark
if timespan > DEBUG_TIMINGS_THRESHOLD:
self.logger.debug(_('replicator-rpc-sync time for get_sync: '
'%.02fs') % timespan)
if hash_ == info['hash'] and info['point'] < remote_sync:
timemark = time.time()
broker.merge_syncs([{'remote_id': id_,
'sync_point': remote_sync}])
info['point'] = remote_sync
timespan = time.time() - timemark
if timespan > DEBUG_TIMINGS_THRESHOLD:
self.logger.debug(_('replicator-rpc-sync time for '
'merge_syncs: %.02fs') % timespan)
return Response(simplejson.dumps(info))
def merge_syncs(self, broker, args):
broker.merge_syncs(args[0])
return HTTPAccepted()
def merge_items(self, broker, args):
broker.merge_items(args[0], args[1])
return HTTPAccepted()
def complete_rsync(self, drive, db_file, args):
old_filename = os.path.join(self.root, drive, 'tmp', args[0])
if os.path.exists(db_file):
return HTTPNotFound()
if not os.path.exists(old_filename):
return HTTPNotFound()
broker = self.broker_class(old_filename)
broker.newid(args[0])
renamer(old_filename, db_file)
return HTTPNoContent()
def rsync_then_merge(self, drive, db_file, args):
old_filename = os.path.join(self.root, drive, 'tmp', args[0])
if not os.path.exists(db_file) or not os.path.exists(old_filename):
return HTTPNotFound()
new_broker = self.broker_class(old_filename)
existing_broker = self.broker_class(db_file)
point = -1
objects = existing_broker.get_items_since(point, 1000)
while len(objects):
new_broker.merge_items(objects)
point = objects[-1]['ROWID']
objects = existing_broker.get_items_since(point, 1000)
sleep()
new_broker.newid(args[0])
renamer(old_filename, db_file)
return HTTPNoContent()
# Footnote [1]:
# This orders the nodes so that, given nodes a b c, a will contact b then c,
# b will contact c then a, and c will contact a then b -- in other words, each
# node will always contact the next node in the list first.
# This helps in the case where databases are all way out of sync, so each
# node is likely to be sending to a different node than it's receiving from,
# rather than two nodes talking to each other, starving out the third.
# If the third didn't even have a copy and the first two nodes were way out
# of sync, such starvation would mean the third node wouldn't get any copy
# until the first two nodes finally got in sync, which could take a while.
# This new ordering ensures such starvation doesn't occur, making the data
# more durable.
| |
#!/usr/bin/python
#
# Copyright (c) 2019 Yunge Zhu, <yungez@microsoft.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_rediscachefirewallrule
version_added: "2.8"
short_description: Manage Azure Cache for Redis Firewall rules
description:
- Create, update and delete Azure Cache for Redis Firewall rules.
options:
resource_group:
description:
- Name of the resource group to which the resource belongs.
required: True
cache_name:
description:
- Name of the Azure Cache for Redis.
required: True
name:
description:
- Name of the Firewall rule.
required: True
start_ip_address:
description:
- The start IP address of the Azure Cache for Redis Firewall rule. Must be IPv4 format.
- Required when creating Firewall rule.
end_ip_address:
description:
- The end IP address of the Azure Cache for Redis Firewall rule. Must be IPv4 format.
- Required when creating Firewall rule.
state:
description:
- Assert the state of the Firewall rule of Azure Cache for Redis.
- Use C(present) to create or update Firewall rule of Azure Cache for Redis and C(absent) to delete it.
default: present
choices:
- absent
- present
extends_documentation_fragment:
- azure
author:
- Yunge Zhu(@yungezz)
'''
EXAMPLES = '''
- name: Create a Firewall rule for Azure Cache for Redis
azure_rm_rediscachefirewallrule:
resource_group: myResourceGroup
cache_name: myRedisCache
name: myRule
start_ip_address: 192.168.1.1
end_ip_address: 192.168.1.4
- name: Update a Firewall rule for Azure Cache for Redis
azure_rm_rediscachefirewallrule:
resource_group: myResourceGroup
cache_name: myRedisCache
name: myRule
end_ip_address: 192.168.1.5
'''
RETURN = '''
id:
description:
- Id of the Azure Cache for Redis.
returned: always
type: str
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Cache/redis/myRedis/firewallRules/myRule"
'''
import time
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
try:
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller
from msrestazure.azure_operation import AzureOperationPoller
from msrest.serialization import Model
from azure.mgmt.redis import RedisManagementClient
except ImportError:
# This is handled in azure_rm_common
pass
def firewall_rule_to_dict(rule):
return dict(
id=rule.id,
name=rule.name,
start_ip_address=rule.start_ip,
end_ip_address=rule.end_ip,
type=rule.type
)
class Actions:
NoAction, CreateUpdate, Delete = range(3)
class AzureRMRedisCacheFirewallRule(AzureRMModuleBase):
"""Configuration class for an Azure RM Cache for Redis Firewall Rule resource"""
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(
type='str',
required=True
),
name=dict(
type='str',
required=True
),
cache_name=dict(
type='str',
required=True
),
start_ip_address=dict(
type='str'
),
end_ip_address=dict(
type='str'
),
state=dict(
type='str',
default='present',
choices=['present', 'absent']
)
)
self._client = None
self.resource_group = None
self.name = None
self.cache_name = None
self.start_ip_address = None
self.end_ip_address = None
self.results = dict(
changed=False,
id=None
)
self.state = None
self.to_do = Actions.NoAction
super(AzureRMRedisCacheFirewallRule, self).__init__(derived_arg_spec=self.module_arg_spec,
supports_check_mode=True,
supports_tags=False)
def exec_module(self, **kwargs):
"""Main module execution method"""
for key in list(self.module_arg_spec.keys()):
setattr(self, key, kwargs[key])
old_response = None
response = None
# get management client
self._client = self.get_mgmt_svc_client(RedisManagementClient,
base_url=self._cloud_environment.endpoints.resource_manager,
api_version='2018-03-01')
# check if the firewall rule exists
old_response = self.get()
if old_response:
self.results['id'] = old_response['id']
if self.state == 'present':
# if firewall rule not exists
if not old_response:
self.log("Firewall Rule of Azure Cache for Redis doesn't exist")
self.to_do = Actions.CreateUpdate
else:
# redis exists already, do update
self.log("Firewall Rule of Azure Cache for Redis already exists")
if self.start_ip_address is None:
self.start_ip_address = old_response['start_ip_address']
if self.end_ip_address is None:
self.end_ip_address = old_response['end_ip_address']
# check if update
if self.check_update(old_response):
self.to_do = Actions.CreateUpdate
elif self.state == 'absent':
if old_response:
self.log("Delete Firewall Rule of Azure Cache for Redis")
self.results['id'] = old_response['id']
self.to_do = Actions.Delete
else:
self.results['changed'] = False
self.log("Azure Cache for Redis {0} doesn't exist.".format(self.name))
if self.to_do == Actions.CreateUpdate:
self.log('Need to Create/Update Firewall rule of Azure Cache for Redis')
self.results['changed'] = True
if self.check_mode:
return self.results
response = self.create_or_update()
self.results['id'] = response['id']
if self.to_do == Actions.Delete:
self.log('Delete Firewall rule of Azure Cache for Redis')
self.results['changed'] = True
if self.check_mode:
return self.results
self.delete()
self.log('Firewall rule of Azure Cache for Redis deleted')
return self.results
def check_update(self, existing):
if self.start_ip_address and self.start_ip_address != existing['start_ip_address']:
self.log("start_ip_address diff: origin {0} / update {1}".format(existing['start_ip_address'], self.start_ip_address))
return True
if self.end_ip_address and self.end_ip_address != existing['end_ip_address']:
self.log("end_ip_address diff: origin {0} / update {1}".format(existing['end_ip_address'], self.end_ip_address))
return True
return False
def create_or_update(self):
'''
Creates Firewall rule of Azure Cache for Redis with the specified configuration.
:return: deserialized Firewall rule of Azure Cache for Redis state dictionary
'''
self.log(
"Creating Firewall rule of Azure Cache for Redis {0}".format(self.name))
try:
response = self._client.firewall_rules.create_or_update(resource_group_name=self.resource_group,
cache_name=self.cache_name,
rule_name=self.name,
start_ip=self.start_ip_address,
end_ip=self.end_ip_address)
if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller):
response = self.get_poller_result(response)
except CloudError as exc:
self.log('Error attempting to create/update Firewall rule of Azure Cache for Redis.')
self.fail(
"Error creating/updating Firewall rule of Azure Cache for Redis: {0}".format(str(exc)))
return firewall_rule_to_dict(response)
def delete(self):
'''
Deletes specified Firewall rule of Azure Cache for Redis in the specified subscription and resource group.
:return: True
'''
self.log("Deleting the Firewall rule of Azure Cache for Redis {0}".format(self.name))
try:
response = self._client.firewall_rules.delete(resource_group_name=self.resource_group,
rule_name=self.name,
cache_name=self.cache_name)
except CloudError as e:
self.log('Error attempting to delete the Firewall rule of Azure Cache for Redis.')
self.fail(
"Error deleting the Firewall rule of Azure Cache for Redis: {0}".format(str(e)))
return True
def get(self):
'''
Gets the properties of the specified Firewall rule of Azure Cache for Redis.
:return: Azure Cache for Redis Firewall Rule instance state dictionary
'''
self.log("Checking if the Firewall Rule {0} is present".format(self.name))
response = None
try:
response = self._client.firewall_rules.get(resource_group_name=self.resource_group,
rule_name=self.name,
cache_name=self.cache_name)
self.log("Response : {0}".format(response))
self.log("Redis Firewall Rule : {0} found".format(response.name))
return firewall_rule_to_dict(response)
except CloudError as ex:
self.log("Didn't find Azure Redis Firewall rule {0} in resource group {1}".format(
self.name, self.resource_group))
return False
def main():
"""Main execution"""
AzureRMRedisCacheFirewallRule()
if __name__ == '__main__':
main()
| |
"""
Component that will handle the storing of details in xmpp for configuration.
"""
import logging
from sleekxmpp.plugins.base import base_plugin
from sleekxmpp.xmlstream import ElementBase, register_stanza_plugin
logger = logging.getLogger(__name__)
class ConfigurationStanza(ElementBase):
"""
Stanza responsible for handling the configuration core.
<configuration xmlns='rho:configuration'>
<entry>
<key>some_key</key>
<value>some_value</value>
</entry>
</configuration>
"""
name = 'configuration'
namespace = 'rho:configuration'
plugin_attrib = 'configuration'
interfaces = {}
sub_interfaces = {}
def add_entry(self, key, value):
entry_stanza = EntryStanza()
entry_stanza['key'] = str(key)
entry_stanza['value'] = str(value)
self.append(entry_stanza)
class EntryStanza(ElementBase):
"""
Entry Stanza.
"""
name = 'entry'
namespace = 'rho:configuration'
plugin_attrib = 'entry'
plugin_multi_attrib = 'entries'
interfaces = {'key', 'value'}
sub_interfaces = interfaces
class BotConfiguration(base_plugin):
CONFIGURATION_RECEIVED_EVENT = 'rho::configuration_received'
CONFIGURATION_UPDATED_EVENT = 'rho::configuration_updated'
_configuration_data_node = 'rho:configuration'
name = 'rho_bot_configuration'
dependencies = {'xep_0060', 'rho_bot_scheduler', }
description = 'RHO: Configuration Plugin'
def plugin_init(self):
"""
Configure the plugin to handle the private storage of data.
:return:
"""
self._configuration = dict()
register_stanza_plugin(ConfigurationStanza, EntryStanza, iterable=True)
self.xmpp.add_event_handler("session_start", self._start)
def _start(self, event):
"""
When connected to the service, request the configuration details for the object. When finished, notify all
listeners that configuration details have been fetched from the server.
:return:
"""
promise = self.xmpp['rho_bot_scheduler'].promise()
self.xmpp['xep_0060'].get_nodes(jid=self.xmpp.boundjid.bare,
callback=self.xmpp['rho_bot_scheduler'].generate_callback_promise(promise))
promise = promise.then(self._found_nodes)
promise = promise.then(None, self._create_node)
promise = promise.then(self._fetch_configuration)
promise.then(self._configuration_data_retrieved)
def _found_nodes(self, stanza):
"""
Check to see if the configuration node is defined or not. If it's not defined, then create it.
:param stanza:
:return:
"""
promise = self.xmpp['rho_bot_scheduler'].promise()
logger.info('Found Nodes: %s' % stanza)
found = False
for item in stanza['disco_items']['items']:
if item[1] == self._configuration_data_node:
found = True
break
if found:
promise.resolved(None)
else:
promise.rejected('Node not found')
return promise
def _fetch_configuration(self, ignored):
"""
Request that the configuration be loaded.
:return:
"""
promise = self.xmpp['rho_bot_scheduler'].promise()
logger.info('Fetching Configuration: %s' % self._configuration_data_node)
self.xmpp['xep_0060'].get_items(jid=self.xmpp.boundjid.bare, node=self._configuration_data_node,
callback=self.xmpp['rho_bot_scheduler'].generate_callback_promise(promise))
return promise
def _create_node(self, ignored):
"""
Create the configuration storage node, and then store the data.
:return:
"""
promise = self.xmpp['rho_bot_scheduler'].promise()
logger.info('Creating node: %s' % self._configuration_data_node)
configuration_form = self.xmpp['xep_0004'].make_form(ftype='submit', title='Node Configuration')
configuration_form.add_field(var='pubsub#access_model', value='whitelist')
configuration_form.add_field(var='pubsub#persist_items', value='1')
configuration_form.add_field(var='pubsub#max_items', value='1')
self.xmpp['xep_0060'].create_node(jid=self.xmpp.boundjid.bare,
node=self._configuration_data_node,
callback=self.xmpp['rho_bot_scheduler'].generate_callback_promise(promise),
config=configuration_form)
return promise
def _configuration_data_retrieved(self, stanza):
"""
Call back that is called when the data is retrieved.
Translates the stanza into configuration details and then notifies listeners that the configuration has been
retrieved.
:return:
"""
logger.debug('Received configuration data: %s' % stanza)
configuration_node = None
for item in stanza['pubsub']['items']['substanzas']:
configuration_node = item.get_payload()
if configuration_node:
configuration = ConfigurationStanza(xml=configuration_node)
if 'entries' in configuration.keys():
for entry in configuration['entries']:
self._configuration[entry['key']] = entry['value']
self.xmpp.event(self.CONFIGURATION_RECEIVED_EVENT)
def store_data(self):
"""
Store data into the pub subscribe values.
:return:
"""
configuration_stanza = ConfigurationStanza()
for key in sorted(self._configuration.keys()):
configuration_stanza.add_entry(key, self._configuration[key])
self.xmpp['xep_0060'].publish(jid=self.xmpp.boundjid.bare, payload=configuration_stanza,
node=self._configuration_data_node,
block=False)
self.xmpp.event(self.CONFIGURATION_UPDATED_EVENT)
def get_configuration(self):
"""
Return a configuration dictionary for this bot.
:return:
"""
return self._configuration
def get_value(self, key, default=None, persist_if_missing=True):
"""
Returns the value of the key, or the default value. If the default value is returned, then the default value
is persisted.
:param key:
:param default:
:param persist_if_missing: should the value be persisted if it's not in the configuration value.
:return:
"""
if key in self._configuration:
return self._configuration[key]
elif default is not None and persist_if_missing:
self._configuration[key] = default
self.store_data()
return default
def merge_configuration(self, configuration_dictionary, persist=True):
"""
Merge the configuration dictionary into the current configuration.
:param configuration_dictionary:
:param persist: should the configuration be persisted if missing.
:return:
"""
self._configuration.update(configuration_dictionary)
if persist:
self.store_data()
rho_bot_configuration = BotConfiguration
| |
#!/usr/bin/env python
"""
@file ion/core/security/authentication.py
@author Roger Unwin
@author Dorian Raymer
@brief routines for working with crypto (x509 certificates and private_keys)
"""
import binascii
import urllib
import os
import datetime
import hashlib
try:
from M2Crypto import EVP, X509
except ImportError:
pass
from pyon.core.bootstrap import CFG
from pyon.container.cc import Container
from pyon.util.log import log
#XXX @note What is this?
#sys.path.insert(0, "build/lib.linux-i686-2.4/")
#XXX @todo Fix: Should not need absolute paths.
BASEPATH = os.path.realpath(".")
CERTSTORE_PATH = BASEPATH + '/res/certstore/'
KEYSTORE_PATH = BASEPATH + '/res/keystore/'
CONTAINER_CERT_NAME = 'container.crt'
CONTAINER_KEY_NAME = 'container.key'
ORG_CERT_NAME = 'root.crt'
class Authentication(object):
"""
routines for working with crypto (x509 certificates and private_keys)
"""
def __init__(self):
self.cont_cert = None
self.cont_key = None
self.root_cert = None
self.white_list = []
# Look for certificates and keys in "the usual places"
certstore_path = self.certstore = CFG.get_safe('authentication.certstore', CERTSTORE_PATH)
log.debug("certstore_path: %s" % str(certstore_path))
keystore_path = self.certstore = CFG.get_safe('authentication.keystore', KEYSTORE_PATH)
log.debug("keystore_path: %s" % str(keystore_path))
if certstore_path and keystore_path:
if certstore_path == 'directory':
log.debug("Container.instance.directory: " % str(Container.instance.directory))
Container.instance.directory.load_authentication()
else:
cont_cert_path = os.path.join(certstore_path, CONTAINER_CERT_NAME)
log.debug("cont_cert_path: %s" % cont_cert_path)
cont_key_path = os.path.join(keystore_path, CONTAINER_KEY_NAME)
log.debug("cont_key_path: %s" % cont_key_path)
root_cert_path = os.path.join(certstore_path, ORG_CERT_NAME)
log.debug("root_cert_path: %s" % root_cert_path)
if os.path.exists(cont_cert_path) and os.path.exists(cont_key_path) and os.path.exists(root_cert_path):
with open(cont_cert_path, 'r') as f:
self.cont_cert = f.read()
log.debug("cont_cert: %s" % self.cont_cert)
self.cont_key = EVP.load_key(cont_key_path)
with open(root_cert_path, 'r') as f:
self.root_cert = f.read()
log.debug("root_cert: %s" % self.root_cert)
self.add_to_white_list(self.root_cert)
def add_to_white_list(self, root_cert_string):
log.debug("Adding certificate <%s> to white list" % root_cert_string)
self.white_list.append(root_cert_string)
def get_container_cert(self):
return self.cont_cert
def authentication_enabled(self):
if self.cont_key:
return True
else:
return False
def sign_message_hex(self, message, rsa_private_key=None):
"""
@param message byte string
return a hex encoded signature for a message
"""
return binascii.hexlify(self.sign_message(message, rsa_private_key))
def sign_message(self, message, rsa_private_key=None):
"""
take a message, and return a binary signature of it
"""
hash = hashlib.sha1(message).hexdigest()
if rsa_private_key:
pkey = EVP.load_key_string(rsa_private_key)
else:
pkey = self.cont_key
pkey.sign_init()
pkey.sign_update(hash)
sig = pkey.sign_final()
return sig
def verify_message_hex(self, message, cert_string, signed_message_hex):
"""
verify a hex encoded signature for a message
"""
return self.verify_message(message, cert_string, binascii.unhexlify(signed_message_hex))
def verify_message(self, message, cert_string, signed_message):
"""
This verifies that the message and the signature are indeed signed by the certificate
"""
# Check validity of certificate
status, cause = self.is_certificate_valid(cert_string)
if status != "Valid":
log.debug("Message <%s> signed with invalid certificate <%s>. Cause <%s>" % (str(message), cert_string, cause))
return status, cause
hash = hashlib.sha1(message).hexdigest()
# Check validity of signature
x509 = X509.load_cert_string(cert_string)
pubkey = x509.get_pubkey()
pubkey.verify_init()
pubkey.verify_update(hash)
outcome = pubkey.verify_final(signed_message)
if outcome == 1:
return 'Valid', 'OK'
else:
return 'Invalid', 'Signature failed verification'
def decode_certificate_string(self, cert_string):
"""
Return a Dict of all known attributes for the certificate
"""
return self.decode_certificate(X509.load_cert_string(cert_string, format=1))
def decode_certificate(self, x509):
"""
Return a Dict of all known attributes for the certificate
"""
attributes = {}
attributes['subject_items'] = {}
attributes['subject'] = str(x509.get_subject())
for item in attributes['subject'].split('/'):
try:
key, value = item.split('=')
attributes['subject_items'][key] = urllib.unquote(value)
except:
"""
"""
attributes['issuer_items'] = {}
attributes['issuer'] = str(x509.get_issuer())
for item in attributes['issuer'].split('/'):
try:
key, value = item.split('=')
attributes['issuer_items'][key] = urllib.unquote(value)
except:
"""
"""
attributes['not_valid_before'] = str(x509.get_not_before())
attributes['not_valid_after'] = str(x509.get_not_after())
attributes['ext_count'] = str(x509.get_ext_count())
attributes['fingerprint'] = str(x509.get_fingerprint())
attributes['text'] = str(x509.as_text())
attributes['serial_number'] = str(x509.get_serial_number())
attributes['version'] = str(x509.get_version())
return attributes
def is_certificate_valid(self, cert_string):
"""
This returns if the certificate is valid.
"""
if not self.is_certificate_within_date_range(cert_string):
return 'Invalid', 'Certificate is not within date range'
if self.is_certificate_in_white_list(cert_string):
return 'Valid', 'OK'
else:
return 'Invalid', ' Certificate does not derive from any known root certificates'
def is_certificate_in_white_list(self, cert_string):
for root_cert in self.white_list:
if self.is_certificate_descended_from(cert_string, root_cert):
return True
return False
def is_certificate_descended_from(self, cert_string, root_cert):
"""
tests if the certificate was issued by the passed in certificate authority
"""
root_cert_attrs = self.decode_certificate_string(root_cert)
root_subject = root_cert_attrs['subject']
cert_attrs = self.decode_certificate_string(cert_string)
cert_issuer = cert_attrs['issuer']
if root_subject == cert_issuer:
return True
return False
# store = X509.X509_Store()
# store.add_x509(root_cert)
# x509 = X509.load_cert_string(cert_string)
# return X509.X509.verify(x509)
def is_certificate_within_date_range(self, cert_string):
"""
Test if the current date is covered by the certificates valid within date range.
"""
cert = X509.load_cert_string(cert_string)
nvb = datetime.datetime.strptime(str(cert.get_not_before()), "%b %d %H:%M:%S %Y %Z")
nva = datetime.datetime.strptime(str(cert.get_not_after()), "%b %d %H:%M:%S %Y %Z")
now = datetime.datetime.utcnow()
if now < nvb:
return False
if now > nva:
return False
return True
| |
# Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Top-level presubmit script for V8.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into gcl.
"""
import sys
_EXCLUDED_PATHS = (
r"^test[\\\/].*",
r"^testing[\\\/].*",
r"^third_party[\\\/].*",
r"^tools[\\\/].*",
)
# Regular expression that matches code only used for test binaries
# (best effort).
_TEST_CODE_EXCLUDED_PATHS = (
r'.+-unittest\.cc',
# Has a method VisitForTest().
r'src[\\\/]compiler[\\\/]ast-graph-builder\.cc',
# Test extension.
r'src[\\\/]extensions[\\\/]gc-extension\.cc',
)
_TEST_ONLY_WARNING = (
'You might be calling functions intended only for testing from\n'
'production code. It is OK to ignore this warning if you know what\n'
'you are doing, as the heuristics used to detect the situation are\n'
'not perfect. The commit queue will not block on this warning.')
def _V8PresubmitChecks(input_api, output_api):
"""Runs the V8 presubmit checks."""
import sys
sys.path.append(input_api.os_path.join(
input_api.PresubmitLocalPath(), 'tools'))
from presubmit import CppLintProcessor
from presubmit import SourceProcessor
from presubmit import CheckRuntimeVsNativesNameClashes
from presubmit import CheckExternalReferenceRegistration
from presubmit import CheckAuthorizedAuthor
results = []
if not CppLintProcessor().Run(input_api.PresubmitLocalPath()):
results.append(output_api.PresubmitError("C++ lint check failed"))
if not SourceProcessor().Run(input_api.PresubmitLocalPath()):
results.append(output_api.PresubmitError(
"Copyright header, trailing whitespaces and two empty lines " \
"between declarations check failed"))
if not CheckRuntimeVsNativesNameClashes(input_api.PresubmitLocalPath()):
results.append(output_api.PresubmitError(
"Runtime/natives name clash check failed"))
if not CheckExternalReferenceRegistration(input_api.PresubmitLocalPath()):
results.append(output_api.PresubmitError(
"External references registration check failed"))
results.extend(CheckAuthorizedAuthor(input_api, output_api))
return results
def _CheckUnwantedDependencies(input_api, output_api):
"""Runs checkdeps on #include statements added in this
change. Breaking - rules is an error, breaking ! rules is a
warning.
"""
# We need to wait until we have an input_api object and use this
# roundabout construct to import checkdeps because this file is
# eval-ed and thus doesn't have __file__.
original_sys_path = sys.path
try:
sys.path = sys.path + [input_api.os_path.join(
input_api.PresubmitLocalPath(), 'buildtools', 'checkdeps')]
import checkdeps
from cpp_checker import CppChecker
from rules import Rule
finally:
# Restore sys.path to what it was before.
sys.path = original_sys_path
added_includes = []
for f in input_api.AffectedFiles():
if not CppChecker.IsCppFile(f.LocalPath()):
continue
changed_lines = [line for line_num, line in f.ChangedContents()]
added_includes.append([f.LocalPath(), changed_lines])
deps_checker = checkdeps.DepsChecker(input_api.PresubmitLocalPath())
error_descriptions = []
warning_descriptions = []
for path, rule_type, rule_description in deps_checker.CheckAddedCppIncludes(
added_includes):
description_with_path = '%s\n %s' % (path, rule_description)
if rule_type == Rule.DISALLOW:
error_descriptions.append(description_with_path)
else:
warning_descriptions.append(description_with_path)
results = []
if error_descriptions:
results.append(output_api.PresubmitError(
'You added one or more #includes that violate checkdeps rules.',
error_descriptions))
if warning_descriptions:
results.append(output_api.PresubmitPromptOrNotify(
'You added one or more #includes of files that are temporarily\n'
'allowed but being removed. Can you avoid introducing the\n'
'#include? See relevant DEPS file(s) for details and contacts.',
warning_descriptions))
return results
def _CheckNoProductionCodeUsingTestOnlyFunctions(input_api, output_api):
"""Attempts to prevent use of functions intended only for testing in
non-testing code. For now this is just a best-effort implementation
that ignores header files and may have some false positives. A
better implementation would probably need a proper C++ parser.
"""
# We only scan .cc files, as the declaration of for-testing functions in
# header files are hard to distinguish from calls to such functions without a
# proper C++ parser.
file_inclusion_pattern = r'.+\.cc'
base_function_pattern = r'[ :]test::[^\s]+|ForTest(ing)?|for_test(ing)?'
inclusion_pattern = input_api.re.compile(r'(%s)\s*\(' % base_function_pattern)
comment_pattern = input_api.re.compile(r'//.*(%s)' % base_function_pattern)
exclusion_pattern = input_api.re.compile(
r'::[A-Za-z0-9_]+(%s)|(%s)[^;]+\{' % (
base_function_pattern, base_function_pattern))
def FilterFile(affected_file):
black_list = (_EXCLUDED_PATHS +
_TEST_CODE_EXCLUDED_PATHS +
input_api.DEFAULT_BLACK_LIST)
return input_api.FilterSourceFile(
affected_file,
white_list=(file_inclusion_pattern, ),
black_list=black_list)
problems = []
for f in input_api.AffectedSourceFiles(FilterFile):
local_path = f.LocalPath()
for line_number, line in f.ChangedContents():
if (inclusion_pattern.search(line) and
not comment_pattern.search(line) and
not exclusion_pattern.search(line)):
problems.append(
'%s:%d\n %s' % (local_path, line_number, line.strip()))
if problems:
return [output_api.PresubmitPromptOrNotify(_TEST_ONLY_WARNING, problems)]
else:
return []
def _CommonChecks(input_api, output_api):
"""Checks common to both upload and commit."""
results = []
results.extend(input_api.canned_checks.CheckOwners(
input_api, output_api, source_file_filter=None))
results.extend(input_api.canned_checks.CheckPatchFormatted(
input_api, output_api))
results.extend(_V8PresubmitChecks(input_api, output_api))
results.extend(_CheckUnwantedDependencies(input_api, output_api))
results.extend(
_CheckNoProductionCodeUsingTestOnlyFunctions(input_api, output_api))
return results
def _SkipTreeCheck(input_api, output_api):
"""Check the env var whether we want to skip tree check.
Only skip if include/v8-version.h has been updated."""
src_version = 'include/v8-version.h'
FilterFile = lambda file: file.LocalPath() == src_version
if not input_api.AffectedSourceFiles(
lambda file: file.LocalPath() == src_version):
return False
return input_api.environ.get('PRESUBMIT_TREE_CHECK') == 'skip'
def _CheckChangeLogFlag(input_api, output_api):
"""Checks usage of LOG= flag in the commit message."""
results = []
if input_api.change.BUG and not 'LOG' in input_api.change.tags:
results.append(output_api.PresubmitError(
'An issue reference (BUG=) requires a change log flag (LOG=). '
'Use LOG=Y for including this commit message in the change log. '
'Use LOG=N or leave blank otherwise.'))
return results
def CheckChangeOnUpload(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
results.extend(_CheckChangeLogFlag(input_api, output_api))
return results
def CheckChangeOnCommit(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
results.extend(_CheckChangeLogFlag(input_api, output_api))
results.extend(input_api.canned_checks.CheckChangeHasDescription(
input_api, output_api))
if not _SkipTreeCheck(input_api, output_api):
results.extend(input_api.canned_checks.CheckTreeIsOpen(
input_api, output_api,
json_url='http://v8-status.appspot.com/current?format=json'))
return results
def GetPreferredTryMasters(project, change):
return {
'tryserver.v8': {
'v8_linux_rel': set(['defaulttests']),
'v8_linux_dbg': set(['defaulttests']),
'v8_linux_nodcheck_rel': set(['defaulttests']),
'v8_linux_gcc_compile_rel': set(['defaulttests']),
'v8_linux64_rel': set(['defaulttests']),
'v8_linux64_asan_rel': set(['defaulttests']),
'v8_win_rel': set(['defaulttests']),
'v8_win_compile_dbg': set(['defaulttests']),
'v8_win64_rel': set(['defaulttests']),
'v8_mac_rel': set(['defaulttests']),
'v8_linux_arm_rel': set(['defaulttests']),
'v8_linux_arm64_rel': set(['defaulttests']),
'v8_android_arm_compile_rel': set(['defaulttests']),
'v8_linux_chromium_gn_rel': set(['defaulttests']),
},
}
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
from tempest.common.utils import data_utils
from tempest import config
from tempest import exceptions
from tempest.test import attr
from tempest.test import skip_because
class SecurityGroupRulesTestJSON(base.BaseComputeTest):
_interface = 'json'
@classmethod
def setUpClass(cls):
super(SecurityGroupRulesTestJSON, cls).setUpClass()
cls.client = cls.security_groups_client
@attr(type='gate')
def test_security_group_rules_create(self):
# Positive test: Creation of Security Group rule
# should be successful
# Creating a Security Group to add rules to it
s_name = data_utils.rand_name('securitygroup-')
s_description = data_utils.rand_name('description-')
resp, securitygroup = \
self.client.create_security_group(s_name, s_description)
securitygroup_id = securitygroup['id']
self.addCleanup(self.client.delete_security_group, securitygroup_id)
# Adding rules to the created Security Group
ip_protocol = 'tcp'
from_port = 22
to_port = 22
resp, rule = \
self.client.create_security_group_rule(securitygroup_id,
ip_protocol,
from_port,
to_port)
self.addCleanup(self.client.delete_security_group_rule, rule['id'])
self.assertEqual(200, resp.status)
@attr(type='gate')
def test_security_group_rules_create_with_optional_arguments(self):
# Positive test: Creation of Security Group rule
# with optional arguments
# should be successful
secgroup1 = None
secgroup2 = None
# Creating a Security Group to add rules to it
s_name = data_utils.rand_name('securitygroup-')
s_description = data_utils.rand_name('description-')
resp, securitygroup = \
self.client.create_security_group(s_name, s_description)
secgroup1 = securitygroup['id']
self.addCleanup(self.client.delete_security_group, secgroup1)
# Creating a Security Group so as to assign group_id to the rule
s_name2 = data_utils.rand_name('securitygroup-')
s_description2 = data_utils.rand_name('description-')
resp, securitygroup = \
self.client.create_security_group(s_name2, s_description2)
secgroup2 = securitygroup['id']
self.addCleanup(self.client.delete_security_group, secgroup2)
# Adding rules to the created Security Group with optional arguments
parent_group_id = secgroup1
ip_protocol = 'tcp'
from_port = 22
to_port = 22
cidr = '10.2.3.124/24'
group_id = secgroup2
resp, rule = \
self.client.create_security_group_rule(parent_group_id,
ip_protocol,
from_port,
to_port,
cidr=cidr,
group_id=group_id)
self.addCleanup(self.client.delete_security_group_rule, rule['id'])
self.assertEqual(200, resp.status)
@skip_because(bug="1182384",
condition=config.TempestConfig().service_available.neutron)
@attr(type=['negative', 'gate'])
def test_security_group_rules_create_with_invalid_id(self):
# Negative test: Creation of Security Group rule should FAIL
# with invalid Parent group id
# Adding rules to the invalid Security Group id
parent_group_id = data_utils.rand_int_id(start=999)
ip_protocol = 'tcp'
from_port = 22
to_port = 22
self.assertRaises(exceptions.NotFound,
self.client.create_security_group_rule,
parent_group_id, ip_protocol, from_port, to_port)
@attr(type=['negative', 'gate'])
def test_security_group_rules_create_with_invalid_ip_protocol(self):
# Negative test: Creation of Security Group rule should FAIL
# with invalid ip_protocol
# Creating a Security Group to add rule to it
s_name = data_utils.rand_name('securitygroup-')
s_description = data_utils.rand_name('description-')
resp, securitygroup = self.client.create_security_group(s_name,
s_description)
# Adding rules to the created Security Group
parent_group_id = securitygroup['id']
ip_protocol = data_utils.rand_name('999')
from_port = 22
to_port = 22
self.addCleanup(self.client.delete_security_group, securitygroup['id'])
self.assertRaises(exceptions.BadRequest,
self.client.create_security_group_rule,
parent_group_id, ip_protocol, from_port, to_port)
@attr(type=['negative', 'gate'])
def test_security_group_rules_create_with_invalid_from_port(self):
# Negative test: Creation of Security Group rule should FAIL
# with invalid from_port
# Creating a Security Group to add rule to it
s_name = data_utils.rand_name('securitygroup-')
s_description = data_utils.rand_name('description-')
resp, securitygroup = self.client.create_security_group(s_name,
s_description)
# Adding rules to the created Security Group
parent_group_id = securitygroup['id']
ip_protocol = 'tcp'
from_port = data_utils.rand_int_id(start=999, end=65535)
to_port = 22
self.addCleanup(self.client.delete_security_group, securitygroup['id'])
self.assertRaises(exceptions.BadRequest,
self.client.create_security_group_rule,
parent_group_id, ip_protocol, from_port, to_port)
@attr(type=['negative', 'gate'])
def test_security_group_rules_create_with_invalid_to_port(self):
# Negative test: Creation of Security Group rule should FAIL
# with invalid to_port
# Creating a Security Group to add rule to it
s_name = data_utils.rand_name('securitygroup-')
s_description = data_utils.rand_name('description-')
resp, securitygroup = self.client.create_security_group(s_name,
s_description)
# Adding rules to the created Security Group
parent_group_id = securitygroup['id']
ip_protocol = 'tcp'
from_port = 22
to_port = data_utils.rand_int_id(start=65536)
self.addCleanup(self.client.delete_security_group, securitygroup['id'])
self.assertRaises(exceptions.BadRequest,
self.client.create_security_group_rule,
parent_group_id, ip_protocol, from_port, to_port)
@attr(type=['negative', 'gate'])
def test_security_group_rules_create_with_invalid_port_range(self):
# Negative test: Creation of Security Group rule should FAIL
# with invalid port range.
# Creating a Security Group to add rule to it.
s_name = data_utils.rand_name('securitygroup-')
s_description = data_utils.rand_name('description-')
resp, securitygroup = self.client.create_security_group(s_name,
s_description)
# Adding a rule to the created Security Group
secgroup_id = securitygroup['id']
ip_protocol = 'tcp'
from_port = 22
to_port = 21
self.addCleanup(self.client.delete_security_group, securitygroup['id'])
self.assertRaises(exceptions.BadRequest,
self.client.create_security_group_rule,
secgroup_id, ip_protocol, from_port, to_port)
@skip_because(bug="1182384",
condition=config.TempestConfig().service_available.neutron)
@attr(type=['negative', 'gate'])
def test_security_group_rules_delete_with_invalid_id(self):
# Negative test: Deletion of Security Group rule should be FAIL
# with invalid rule id
self.assertRaises(exceptions.NotFound,
self.client.delete_security_group_rule,
data_utils.rand_int_id(start=999))
@attr(type='gate')
def test_security_group_rules_list(self):
# Positive test: Created Security Group rules should be
# in the list of all rules
# Creating a Security Group to add rules to it
s_name = data_utils.rand_name('securitygroup-')
s_description = data_utils.rand_name('description-')
resp, securitygroup = \
self.client.create_security_group(s_name, s_description)
securitygroup_id = securitygroup['id']
# Delete the Security Group at the end of this method
self.addCleanup(self.client.delete_security_group, securitygroup_id)
# Add a first rule to the created Security Group
ip_protocol1 = 'tcp'
from_port1 = 22
to_port1 = 22
resp, rule = \
self.client.create_security_group_rule(securitygroup_id,
ip_protocol1,
from_port1, to_port1)
rule1_id = rule['id']
# Delete the Security Group rule1 at the end of this method
self.addCleanup(self.client.delete_security_group_rule, rule1_id)
# Add a second rule to the created Security Group
ip_protocol2 = 'icmp'
from_port2 = -1
to_port2 = -1
resp, rule = \
self.client.create_security_group_rule(securitygroup_id,
ip_protocol2,
from_port2, to_port2)
rule2_id = rule['id']
# Delete the Security Group rule2 at the end of this method
self.addCleanup(self.client.delete_security_group_rule, rule2_id)
# Get rules of the created Security Group
resp, rules = \
self.client.list_security_group_rules(securitygroup_id)
self.assertTrue(any([i for i in rules if i['id'] == rule1_id]))
self.assertTrue(any([i for i in rules if i['id'] == rule2_id]))
class SecurityGroupRulesTestXML(SecurityGroupRulesTestJSON):
_interface = 'xml'
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import threading
import time
import Queue
import logging
_log = logging.getLogger('tashi.parallel')
def threaded(func):
def fn(*args, **kwargs):
thread = threading.Thread(target=func, args=args, kwargs=kwargs)
thread.start()
return thread
return fn
class ThreadPool(Queue.Queue):
def __init__(self, size=8, maxsize=0):
Queue.Queue.__init__(self, maxsize)
for i in range(size):
name = "parallel.ThreadPool#%s" % (i)
thread = threading.Thread(name=name, target=self._worker)
thread.setDaemon(True)
thread.start()
def _worker(self):
while True:
try:
func, args, kwargs = self.get()
func(*args, **kwargs)
except Exception, e:
_log.error(e)
# FIXME: do something smarter here, backtrace, log,
# allow user-defined error handling...
def submit(self, func, *args, **kwargs):
self.put((func, args, kwargs))
def submitlist(self, func, args, kwargs):
self.put((func, args, kwargs))
class ThreadPoolClass:
def __init__(self, size=8, maxsize=0):
self._threadpool_pool = ThreadPool(size=size, maxsize=maxsize)
def threadpool(pool):
def dec(func):
def fn(*args, **kwargs):
pool.submit(func, *args, **kwargs)
return fn
return dec
def threadpoolmethod(meth):
def fn(*args, **kwargs):
try:
pool = args[0]._threadpool_pool
except AttributeError:
pool = args[0].__dict__.setdefault('_threadpool_pool', ThreadPool())
# FIXME: how do we check parent class?
# assert args[0].__class__ == ThreadPoolClass, "Thread pool method must be in a ThreadPoolClass"
pool.submit(meth, *args, **kwargs)
return fn
def synchronized(lock=None):
if lock==None:
lock = threading.RLock()
def dec(func):
def fn(*args, **kwargs):
lock.acquire()
ex = None
try:
r = func(*args, **kwargs)
except Exception, e:
ex = e
lock.release()
if ex != None:
raise e
return r
return fn
return dec
def synchronizedmethod(func):
def fn(*args, **kwargs):
try:
lock = args[0]._synchronized_lock
except AttributeError:
lock = args[0].__dict__.setdefault('_synchronized_lock', threading.RLock())
lock.acquire()
ex = None
try:
res = func(*args, **kwargs)
except Exception, e:
ex = e
lock.release()
if ex != None:
raise e
return res
return fn
##############################
# Test Code
##############################
import unittest
#import sys
#import time
class TestThreadPool(unittest.TestCase):
def setUp(self):
self.errmargin = 0.5
def testUnthreaded(self):
queue = Queue.Queue()
def slowfunc(sleep=1):
time.sleep(sleep)
queue.put(None)
tt = time.time()
for _ in range(4):
slowfunc()
for _ in range(4):
queue.get()
tt = time.time() - tt
self.assertAlmostEqual(tt, 4, 1)
def testThreaded(self):
queue = Queue.Queue()
@threaded
def slowthreadfunc(sleep=1):
time.sleep(sleep)
queue.put(None)
tt = time.time()
for _ in range(8):
slowthreadfunc()
for _ in range(8):
queue.get()
tt = time.time() - tt
self.assertAlmostEqual(tt, 1, 1)
def testThreadPool(self):
pool = ThreadPool(size=4)
queue = Queue.Queue()
@threadpool(pool)
def slowpoolfunc(sleep=1):
time.sleep(sleep)
queue.put(None)
tt = time.time()
for _ in range(8):
slowpoolfunc()
for _ in range(8):
queue.get()
tt = time.time() - tt
self.assertAlmostEqual(tt, 2, 1)
def testUnthreadedMethod(self):
queue = Queue.Queue()
class slowclass:
def __init__(self, sleep=1):
self.sleep=sleep
def beslow(self):
time.sleep(self.sleep)
queue.put(None)
sc = slowclass()
tt = time.time()
for _ in range(4):
sc.beslow()
for _ in range(4):
queue.get()
tt = time.time() - tt
self.assertAlmostEqual(tt, 4, 1)
def testThreadedMethod(self):
queue = Queue.Queue()
class slowclass:
def __init__(self, sleep=1):
self.sleep=sleep
@threaded
def beslow(self):
time.sleep(self.sleep)
queue.put(None)
sc = slowclass()
tt = time.time()
for _ in range(4):
sc.beslow()
for _ in range(4):
queue.get()
tt = time.time() - tt
self.assertAlmostEqual(tt, 1, 1)
def testThreadPoolMethod(self):
queue = Queue.Queue()
class slowclass:
def __init__(self, sleep=1):
self.sleep=sleep
@threadpoolmethod
def beslow(self):
time.sleep(self.sleep)
queue.put(None)
sc = slowclass()
tt = time.time()
for _ in range(16):
sc.beslow()
for _ in range(16):
queue.get()
tt = time.time() - tt
self.assertAlmostEqual(tt, 2, 1)
def testSynchronized(self):
queue = Queue.Queue()
@synchronized()
def addtoqueue():
time.sleep(1)
queue.put(None)
@threaded
def slowthreadfunc():
addtoqueue()
tt = time.time()
for _ in range(4):
slowthreadfunc()
for _ in range(4):
queue.get()
tt = time.time() - tt
self.assertAlmostEqual(tt, 4, 1)
def testSynchronizedMethod(self):
queue = Queue.Queue()
class addtoqueue:
@synchronizedmethod
def addtoqueue1(self):
time.sleep(1)
queue.put(None)
@synchronizedmethod
def addtoqueue2(self):
time.sleep(1)
queue.put(None)
atc = addtoqueue()
@threaded
def slowthreadfunc1():
atc.addtoqueue1()
@threaded
def slowthreadfunc2():
atc.addtoqueue2()
tt = time.time()
for _ in range(4):
slowthreadfunc1()
slowthreadfunc2()
for _ in range(8):
queue.get()
tt = time.time() - tt
self.assertAlmostEqual(tt, 8, 1)
def testUnsynchronizedMethod(self):
queue = Queue.Queue()
class addtoqueue:
def addtoqueue1(self):
time.sleep(1)
queue.put(None)
def addtoqueue2(self):
time.sleep(1)
queue.put(None)
atc = addtoqueue()
@threaded
def slowthreadfunc1():
atc.addtoqueue1()
@threaded
def slowthreadfunc2():
atc.addtoqueue2()
tt = time.time()
for _ in range(4):
slowthreadfunc1()
slowthreadfunc2()
for _ in range(8):
queue.get()
tt = time.time() - tt
self.assertAlmostEqual(tt, 1, 1)
if __name__=='__main__':
import sys
logging.basicConfig(level=logging.INFO,
format="%(asctime)s %(levelname)s:\t %(message)s",
stream=sys.stdout)
suite = unittest.TestLoader().loadTestsFromTestCase(TestThreadPool)
unittest.TextTestRunner(verbosity=2).run(suite)
| |
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The PlanBcoin developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the importmulti RPC."""
from test_framework.test_framework import PlanbcoinTestFramework
from test_framework.util import *
class ImportMultiTest (PlanbcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 2
self.setup_clean_chain = True
def setup_network(self):
self.setup_nodes()
def run_test (self):
self.log.info("Mining blocks...")
self.nodes[0].generate(1)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
# keyword definition
PRIV_KEY = 'privkey'
PUB_KEY = 'pubkey'
ADDRESS_KEY = 'address'
SCRIPT_KEY = 'script'
node0_address1 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
node0_address2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
node0_address3 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
#Check only one address
assert_equal(node0_address1['ismine'], True)
#Node 1 sync test
assert_equal(self.nodes[1].getblockcount(),1)
#Address Test - before import
address_info = self.nodes[1].validateaddress(node0_address1['address'])
assert_equal(address_info['iswatchonly'], False)
assert_equal(address_info['ismine'], False)
# RPC importmulti -----------------------------------------------
# Planbcoin Address
self.log.info("Should import an address")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": address['address']
},
"timestamp": "now",
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], True)
assert_equal(address_assert['ismine'], False)
assert_equal(address_assert['timestamp'], timestamp)
watchonly_address = address['address']
watchonly_timestamp = timestamp
self.log.info("Should not import an invalid address")
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": "not valid address",
},
"timestamp": "now",
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -5)
assert_equal(result[0]['error']['message'], 'Invalid address')
# ScriptPubKey + internal
self.log.info("Should import a scriptPubKey with internal flag")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "now",
"internal": True
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], True)
assert_equal(address_assert['ismine'], False)
assert_equal(address_assert['timestamp'], timestamp)
# ScriptPubKey + !internal
self.log.info("Should not import a scriptPubKey without internal flag")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "now",
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -8)
assert_equal(result[0]['error']['message'], 'Internal must be set for hex scriptPubKey')
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], False)
assert_equal('timestamp' in address_assert, False)
# Address + Public key + !Internal
self.log.info("Should import an address with public key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": address['address']
},
"timestamp": "now",
"pubkeys": [ address['pubkey'] ]
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], True)
assert_equal(address_assert['ismine'], False)
assert_equal(address_assert['timestamp'], timestamp)
# ScriptPubKey + Public key + internal
self.log.info("Should import a scriptPubKey with internal and with public key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
request = [{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "now",
"pubkeys": [ address['pubkey'] ],
"internal": True
}]
result = self.nodes[1].importmulti(request)
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], True)
assert_equal(address_assert['ismine'], False)
assert_equal(address_assert['timestamp'], timestamp)
# ScriptPubKey + Public key + !internal
self.log.info("Should not import a scriptPubKey without internal and with public key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
request = [{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "now",
"pubkeys": [ address['pubkey'] ]
}]
result = self.nodes[1].importmulti(request)
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -8)
assert_equal(result[0]['error']['message'], 'Internal must be set for hex scriptPubKey')
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], False)
assert_equal('timestamp' in address_assert, False)
# Address + Private key + !watchonly
self.log.info("Should import an address with private key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": address['address']
},
"timestamp": "now",
"keys": [ self.nodes[0].dumpprivkey(address['address']) ]
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], True)
assert_equal(address_assert['timestamp'], timestamp)
# Address + Private key + watchonly
self.log.info("Should not import an address with private key and with watchonly")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": address['address']
},
"timestamp": "now",
"keys": [ self.nodes[0].dumpprivkey(address['address']) ],
"watchonly": True
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -8)
assert_equal(result[0]['error']['message'], 'Incompatibility found between watchonly and keys')
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], False)
assert_equal('timestamp' in address_assert, False)
# ScriptPubKey + Private key + internal
self.log.info("Should import a scriptPubKey with internal and with private key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "now",
"keys": [ self.nodes[0].dumpprivkey(address['address']) ],
"internal": True
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], True)
assert_equal(address_assert['timestamp'], timestamp)
# ScriptPubKey + Private key + !internal
self.log.info("Should not import a scriptPubKey without internal and with private key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "now",
"keys": [ self.nodes[0].dumpprivkey(address['address']) ]
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -8)
assert_equal(result[0]['error']['message'], 'Internal must be set for hex scriptPubKey')
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], False)
assert_equal('timestamp' in address_assert, False)
# P2SH address
sig_address_1 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
sig_address_2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
sig_address_3 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
multi_sig_script = self.nodes[0].createmultisig(2, [sig_address_1['address'], sig_address_2['address'], sig_address_3['pubkey']])
self.nodes[1].generate(100)
transactionid = self.nodes[1].sendtoaddress(multi_sig_script['address'], 10.00)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
transaction = self.nodes[1].gettransaction(transactionid)
self.log.info("Should import a p2sh")
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": multi_sig_script['address']
},
"timestamp": "now",
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(multi_sig_script['address'])
assert_equal(address_assert['isscript'], True)
assert_equal(address_assert['iswatchonly'], True)
assert_equal(address_assert['timestamp'], timestamp)
p2shunspent = self.nodes[1].listunspent(0,999999, [multi_sig_script['address']])[0]
assert_equal(p2shunspent['spendable'], False)
assert_equal(p2shunspent['solvable'], False)
# P2SH + Redeem script
sig_address_1 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
sig_address_2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
sig_address_3 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
multi_sig_script = self.nodes[0].createmultisig(2, [sig_address_1['address'], sig_address_2['address'], sig_address_3['pubkey']])
self.nodes[1].generate(100)
transactionid = self.nodes[1].sendtoaddress(multi_sig_script['address'], 10.00)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
transaction = self.nodes[1].gettransaction(transactionid)
self.log.info("Should import a p2sh with respective redeem script")
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": multi_sig_script['address']
},
"timestamp": "now",
"redeemscript": multi_sig_script['redeemScript']
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(multi_sig_script['address'])
assert_equal(address_assert['timestamp'], timestamp)
p2shunspent = self.nodes[1].listunspent(0,999999, [multi_sig_script['address']])[0]
assert_equal(p2shunspent['spendable'], False)
assert_equal(p2shunspent['solvable'], True)
# P2SH + Redeem script + Private Keys + !Watchonly
sig_address_1 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
sig_address_2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
sig_address_3 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
multi_sig_script = self.nodes[0].createmultisig(2, [sig_address_1['address'], sig_address_2['address'], sig_address_3['pubkey']])
self.nodes[1].generate(100)
transactionid = self.nodes[1].sendtoaddress(multi_sig_script['address'], 10.00)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
transaction = self.nodes[1].gettransaction(transactionid)
self.log.info("Should import a p2sh with respective redeem script and private keys")
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": multi_sig_script['address']
},
"timestamp": "now",
"redeemscript": multi_sig_script['redeemScript'],
"keys": [ self.nodes[0].dumpprivkey(sig_address_1['address']), self.nodes[0].dumpprivkey(sig_address_2['address'])]
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(multi_sig_script['address'])
assert_equal(address_assert['timestamp'], timestamp)
p2shunspent = self.nodes[1].listunspent(0,999999, [multi_sig_script['address']])[0]
assert_equal(p2shunspent['spendable'], False)
assert_equal(p2shunspent['solvable'], True)
# P2SH + Redeem script + Private Keys + Watchonly
sig_address_1 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
sig_address_2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
sig_address_3 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
multi_sig_script = self.nodes[0].createmultisig(2, [sig_address_1['address'], sig_address_2['address'], sig_address_3['pubkey']])
self.nodes[1].generate(100)
transactionid = self.nodes[1].sendtoaddress(multi_sig_script['address'], 10.00)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
transaction = self.nodes[1].gettransaction(transactionid)
self.log.info("Should import a p2sh with respective redeem script and private keys")
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": multi_sig_script['address']
},
"timestamp": "now",
"redeemscript": multi_sig_script['redeemScript'],
"keys": [ self.nodes[0].dumpprivkey(sig_address_1['address']), self.nodes[0].dumpprivkey(sig_address_2['address'])],
"watchonly": True
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -8)
assert_equal(result[0]['error']['message'], 'Incompatibility found between watchonly and keys')
# Address + Public key + !Internal + Wrong pubkey
self.log.info("Should not import an address with a wrong public key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
address2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": address['address']
},
"timestamp": "now",
"pubkeys": [ address2['pubkey'] ]
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -5)
assert_equal(result[0]['error']['message'], 'Consistency check failed')
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], False)
assert_equal('timestamp' in address_assert, False)
# ScriptPubKey + Public key + internal + Wrong pubkey
self.log.info("Should not import a scriptPubKey with internal and with a wrong public key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
address2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
request = [{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "now",
"pubkeys": [ address2['pubkey'] ],
"internal": True
}]
result = self.nodes[1].importmulti(request)
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -5)
assert_equal(result[0]['error']['message'], 'Consistency check failed')
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], False)
assert_equal('timestamp' in address_assert, False)
# Address + Private key + !watchonly + Wrong private key
self.log.info("Should not import an address with a wrong private key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
address2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": address['address']
},
"timestamp": "now",
"keys": [ self.nodes[0].dumpprivkey(address2['address']) ]
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -5)
assert_equal(result[0]['error']['message'], 'Consistency check failed')
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], False)
assert_equal('timestamp' in address_assert, False)
# ScriptPubKey + Private key + internal + Wrong private key
self.log.info("Should not import a scriptPubKey with internal and with a wrong private key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
address2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "now",
"keys": [ self.nodes[0].dumpprivkey(address2['address']) ],
"internal": True
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -5)
assert_equal(result[0]['error']['message'], 'Consistency check failed')
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], False)
assert_equal('timestamp' in address_assert, False)
# Importing existing watch only address with new timestamp should replace saved timestamp.
assert_greater_than(timestamp, watchonly_timestamp)
self.log.info("Should replace previously saved watch only timestamp.")
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": watchonly_address,
},
"timestamp": "now",
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(watchonly_address)
assert_equal(address_assert['iswatchonly'], True)
assert_equal(address_assert['ismine'], False)
assert_equal(address_assert['timestamp'], timestamp)
watchonly_timestamp = timestamp
# restart nodes to check for proper serialization/deserialization of watch only address
self.stop_nodes()
self.nodes = self.start_nodes(2, self.options.tmpdir)
address_assert = self.nodes[1].validateaddress(watchonly_address)
assert_equal(address_assert['iswatchonly'], True)
assert_equal(address_assert['ismine'], False)
assert_equal(address_assert['timestamp'], watchonly_timestamp)
# Bad or missing timestamps
self.log.info("Should throw on invalid or missing timestamp values")
assert_raises_message(JSONRPCException, 'Missing required timestamp field for key',
self.nodes[1].importmulti, [{
"scriptPubKey": address['scriptPubKey'],
}])
assert_raises_message(JSONRPCException, 'Expected number or "now" timestamp value for key. got type string',
self.nodes[1].importmulti, [{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "",
}])
if __name__ == '__main__':
ImportMultiTest ().main ()
| |
"""
This module handles serialization of arbitrary python structural data,
intended primarily to be stored in the database. It also supports
storing Django model instances (which plain pickle cannot do).
This serialization is used internally by the server, notably for
storing data in Attributes and for piping data to process pools.
The purpose of dbserialize is to handle all forms of data. For
well-structured non-arbitrary exchange, such as communicating with a
rich web client, a simpler JSON serialization makes more sense.
This module also implements the SaverList, SaverDict and SaverSet
classes. These are iterables that track their position in a nested
structure and makes sure to send updates up to their root. This is
used by Attributes - without it, one would not be able to update mutables
in-situ, e.g obj.db.mynestedlist[3][5] = 3 would never be saved and
be out of sync with the database.
"""
from functools import update_wrapper
from collections import defaultdict, MutableSequence, MutableSet, MutableMapping
try:
from cPickle import dumps, loads
except ImportError:
from pickle import dumps, loads
from django.db import transaction
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.contenttypes.models import ContentType
from src.utils.utils import to_str, uses_database
from src.utils import logger
__all__ = ("to_pickle", "from_pickle", "do_pickle", "do_unpickle")
PICKLE_PROTOCOL = 2
# initialization and helpers
_GA = object.__getattribute__
_SA = object.__setattr__
_FROM_MODEL_MAP = None
_TO_MODEL_MAP = None
_TO_TYPECLASS = lambda o: hasattr(o, 'typeclass') and o.typeclass or o
_IS_PACKED_DBOBJ = lambda o: type(o) == tuple and len(o) == 4 and o[0] == '__packed_dbobj__'
_TO_DATESTRING = lambda o: _GA(o, "db_date_created").strftime("%Y:%m:%d-%H:%M:%S:%f")
if uses_database("mysql"):
from src.server.models import ServerConfig
mysql_version = ServerConfig.objects.get_mysql_db_version()
if mysql_version < '5.6.4':
# mysql <5.6.4 don't support millisecond precision
_TO_DATESTRING = lambda o: _GA(o, "db_date_created").strftime("%Y:%m:%d-%H:%M:%S:000000")
def _init_globals():
"Lazy importing to avoid circular import issues"
global _FROM_MODEL_MAP, _TO_MODEL_MAP
if not _FROM_MODEL_MAP:
_FROM_MODEL_MAP = defaultdict(str)
_FROM_MODEL_MAP.update(dict((c.model, c.natural_key()) for c in ContentType.objects.all()))
if not _TO_MODEL_MAP:
_TO_MODEL_MAP = defaultdict(str)
_TO_MODEL_MAP.update(dict((c.natural_key(), c.model_class()) for c in ContentType.objects.all()))
#
# SaverList, SaverDict, SaverSet - Attribute-specific helper classes and functions
#
def _save(method):
"method decorator that saves data to Attribute"
def save_wrapper(self, *args, **kwargs):
self.__doc__ = method.__doc__
ret = method(self, *args, **kwargs)
self._save_tree()
return ret
return update_wrapper(save_wrapper, method)
class _SaverMutable(object):
"""
Parent class for properly handling of nested mutables in
an Attribute. If not used something like
obj.db.mylist[1][2] = "test" (allocation to a nested list)
will not save the updated value to the database.
"""
def __init__(self, *args, **kwargs):
"store all properties for tracking the tree"
self._parent = kwargs.pop("parent", None)
self._db_obj = kwargs.pop("db_obj", None)
self._data = None
def _save_tree(self):
"recursively traverse back up the tree, save when we reach the root"
if self._parent:
self._parent._save_tree()
elif self._db_obj:
self._db_obj.value = self
else:
logger.log_errmsg("_SaverMutable %s has no root Attribute to save to." % self)
def _convert_mutables(self, data):
"converts mutables to Saver* variants and assigns .parent property"
def process_tree(item, parent):
"recursively populate the tree, storing parents"
dtype = type(item)
if dtype in (basestring, int, long, float, bool, tuple):
return item
elif dtype == list:
dat = _SaverList(parent=parent)
dat._data.extend(process_tree(val, dat) for val in item)
return dat
elif dtype == dict:
dat = _SaverDict(parent=parent)
dat._data.update((key, process_tree(val, dat)) for key, val in item.items())
return dat
elif dtype == set:
dat = _SaverSet(parent=parent)
dat._data.update(process_tree(val, dat) for val in item)
return dat
return item
return process_tree(data, self)
def __repr__(self):
return self._data.__repr__()
def __len__(self):
return self._data.__len__()
def __iter__(self):
return self._data.__iter__()
def __getitem__(self, key):
return self._data.__getitem__(key)
@_save
def __setitem__(self, key, value):
self._data.__setitem__(key, self._convert_mutables(value))
@_save
def __delitem__(self, key):
self._data.__delitem__(key)
class _SaverList(_SaverMutable, MutableSequence):
"""
A list that saves itself to an Attribute when updated.
"""
def __init__(self, *args, **kwargs):
super(_SaverList, self).__init__(*args, **kwargs)
self._data = list(*args)
@_save
def __add__(self, otherlist):
self._data = self._data.__add__(otherlist)
return self._data
@_save
def insert(self, index, value):
self._data.insert(index, self._convert_mutables(value))
class _SaverDict(_SaverMutable, MutableMapping):
"""
A dict that stores changes to an Attribute when updated
"""
def __init__(self, *args, **kwargs):
super(_SaverDict, self).__init__(*args, **kwargs)
self._data = dict(*args)
class _SaverSet(_SaverMutable, MutableSet):
"""
A set that saves to an Attribute when updated
"""
def __init__(self, *args, **kwargs):
super(_SaverSet, self).__init__(*args, **kwargs)
self._data = set(*args)
def __contains__(self, value):
return self._data.__contains__(value)
@_save
def add(self, value):
self._data.add(self._convert_mutables(value))
@_save
def discard(self, value):
self._data.discard(value)
#
# serialization helpers
#
def _pack_dbobj(item):
"""
Check and convert django database objects to an internal representation.
This either returns the original input item or a tuple ("__packed_dbobj__", key, creation_time, id)
"""
_init_globals()
obj = hasattr(item, 'dbobj') and item.dbobj or item
natural_key = _FROM_MODEL_MAP[hasattr(obj, "id") and hasattr(obj, "db_date_created") and
hasattr(obj, '__class__') and obj.__class__.__name__.lower()]
# build the internal representation as a tuple ("__packed_dbobj__", key, creation_time, id)
return natural_key and ('__packed_dbobj__', natural_key, _TO_DATESTRING(obj), _GA(obj, "id")) or item
def _unpack_dbobj(item):
"""
Check and convert internal representations back to Django database models.
The fact that item is a packed dbobj should be checked before this call.
This either returns the original input or converts the internal store back
to a database representation (its typeclass is returned if applicable).
"""
_init_globals()
try:
obj = item[3] and _TO_TYPECLASS(_TO_MODEL_MAP[item[1]].objects.get(id=item[3]))
except ObjectDoesNotExist:
return None
# even if we got back a match, check the sanity of the date (some databases may 're-use' the id)
return _TO_DATESTRING(obj.dbobj) == item[2] and obj or None
#
# Access methods
#
def to_pickle(data):
"""
This prepares data on arbitrary form to be pickled. It handles any nested structure
and returns data on a form that is safe to pickle (including having converted any
database models to their internal representation). We also convert any Saver*-type
objects back to their normal representations, they are not pickle-safe.
"""
def process_item(item):
"Recursive processor and identification of data"
dtype = type(item)
if dtype in (basestring, int, long, float, bool):
return item
elif dtype == tuple:
return tuple(process_item(val) for val in item)
elif dtype in (list, _SaverList):
return [process_item(val) for val in item]
elif dtype in (dict, _SaverDict):
return dict((key, process_item(val)) for key, val in item.items())
elif dtype in (set, _SaverSet):
return set(process_item(val) for val in item)
elif hasattr(item, '__item__'):
# we try to conserve the iterable class, if not convert to list
try:
return item.__class__([process_item(val) for val in item])
except (AttributeError, TypeError):
return [process_item(val) for val in item]
return _pack_dbobj(item)
return process_item(data)
@transaction.autocommit
def from_pickle(data, db_obj=None):
"""
This should be fed a just de-pickled data object. It will be converted back
to a form that may contain database objects again. Note that if a database
object was removed (or changed in-place) in the database, None will be returned.
db_obj - this is the model instance (normally an Attribute) that Saver*-type
iterables will save to when they update. It must have a 'value'
property that saves assigned data to the database.
If db_obj is given, this function will convert lists, dicts and sets to their
_SaverList, _SaverDict and _SaverSet counterparts.
"""
def process_item(item):
"Recursive processor and identification of data"
dtype = type(item)
if dtype in (basestring, int, long, float, bool):
return item
elif _IS_PACKED_DBOBJ(item):
# this must be checked before tuple
return _unpack_dbobj(item)
elif dtype == tuple:
return tuple(process_item(val) for val in item)
elif dtype == dict:
return dict((key, process_item(val)) for key, val in item.items())
elif dtype == set:
return set(process_item(val) for val in item)
elif hasattr(item, '__iter__'):
try:
# we try to conserve the iterable class if it accepts an iterator
return item.__class__(process_item(val) for val in item)
except (AttributeError, TypeError):
return [process_item(val) for val in item]
return item
def process_tree(item, parent):
"Recursive processor, building a parent-tree from iterable data"
dtype = type(item)
if dtype in (basestring, int, long, float, bool):
return item
elif _IS_PACKED_DBOBJ(item):
# this must be checked before tuple
return _unpack_dbobj(item)
elif dtype == tuple:
return tuple(process_tree(val) for val in item)
elif dtype == list:
dat = _SaverList(parent=parent)
dat._data.extend(process_tree(val, dat) for val in item)
return dat
elif dtype == dict:
dat = _SaverDict(parent=parent)
dat._data.update(dict((key, process_tree(val, dat)) for key, val in item.items()))
return dat
elif dtype == set:
dat = _SaverSet(parent=parent)
dat._data.update(set(process_tree(val, dat) for val in item))
return dat
elif hasattr(item, '__iter__'):
try:
# we try to conserve the iterable class if it accepts an iterator
return item.__class__(process_tree(val, parent) for val in item)
except (AttributeError, TypeError):
dat = _SaverList(parent=parent)
dat._data.extend(process_tree(val, dat) for val in item)
return dat
return item
if db_obj:
# convert lists, dicts and sets to their Saved* counterparts. It
# is only relevant if the "root" is an iterable of the right type.
dtype = type(data)
if dtype == list:
dat = _SaverList(db_obj=db_obj)
dat._data.extend(process_tree(val, parent=dat) for val in data)
return dat
elif dtype == dict:
dat = _SaverDict(db_obj=db_obj)
dat._data.update((key, process_tree(val, parent=dat)) for key, val in data.items())
return dat
elif dtype == set:
dat = _SaverSet(db_obj=db_obj)
dat._data.update(process_tree(val, parent=dat) for val in data)
return dat
return process_item(data)
def do_pickle(data):
"Perform pickle to string"
return to_str(dumps(data, protocol=PICKLE_PROTOCOL))
def do_unpickle(data):
"Retrieve pickle from pickled string"
return loads(to_str(data))
| |
#!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Script that attempts to push to a special git repository to verify that git
credentials are configured correctly. It also verifies that gclient solution is
configured to use git checkout.
It will be added as gclient hook shortly before Chromium switches to git and
removed after the switch.
When running as hook in *.corp.google.com network it will also report status
of the push attempt to the server (on appengine), so that chrome-infra team can
collect information about misconfigured Git accounts.
"""
import contextlib
import datetime
import errno
import getpass
import json
import logging
import netrc
import optparse
import os
import pprint
import shutil
import socket
import ssl
import subprocess
import sys
import tempfile
import time
import urllib2
import urlparse
# Absolute path to src/ directory.
REPO_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Absolute path to a file with gclient solutions.
GCLIENT_CONFIG = os.path.join(os.path.dirname(REPO_ROOT), '.gclient')
# Incremented whenever some changes to scrip logic are made. Change in version
# will cause the check to be rerun on next gclient runhooks invocation.
CHECKER_VERSION = 1
# Do not attempt to upload a report after this date.
UPLOAD_DISABLE_TS = datetime.datetime(2014, 10, 1)
# URL to POST json with results to.
MOTHERSHIP_URL = (
'https://chromium-git-access.appspot.com/'
'git_access/api/v1/reports/access_check')
# Repository to push test commits to.
TEST_REPO_URL = 'https://chromium.googlesource.com/a/playground/access_test'
# Git-compatible gclient solution.
GOOD_GCLIENT_SOLUTION = {
'name': 'src',
'deps_file': 'DEPS',
'managed': False,
'url': 'https://chromium.googlesource.com/chromium/src.git',
}
# Possible chunks of git push response in case .netrc is misconfigured.
BAD_ACL_ERRORS = (
'(prohibited by Gerrit)',
'does not match your user account',
'Git repository not found',
'Invalid user name or password',
'Please make sure you have the correct access rights',
)
# Git executable to call.
GIT_EXE = 'git.bat' if sys.platform == 'win32' else 'git'
def is_on_bot():
"""True when running under buildbot."""
return os.environ.get('CHROME_HEADLESS') == '1'
def is_in_google_corp():
"""True when running in google corp network."""
try:
return socket.getfqdn().endswith('.corp.google.com')
except socket.error:
logging.exception('Failed to get FQDN')
return False
def is_using_git():
"""True if git checkout is used."""
return os.path.exists(os.path.join(REPO_ROOT, '.git', 'objects'))
def is_using_svn():
"""True if svn checkout is used."""
return os.path.exists(os.path.join(REPO_ROOT, '.svn'))
def read_git_config(prop):
"""Reads git config property of src.git repo.
Returns empty string in case of errors.
"""
try:
proc = subprocess.Popen(
[GIT_EXE, 'config', prop], stdout=subprocess.PIPE, cwd=REPO_ROOT)
out, _ = proc.communicate()
return out.strip().decode('utf-8')
except OSError as exc:
if exc.errno != errno.ENOENT:
logging.exception('Unexpected error when calling git')
return ''
def read_netrc_user(netrc_obj, host):
"""Reads 'user' field of a host entry in netrc.
Returns empty string if netrc is missing, or host is not there.
"""
if not netrc_obj:
return ''
entry = netrc_obj.authenticators(host)
if not entry:
return ''
return entry[0]
def get_git_version():
"""Returns version of git or None if git is not available."""
try:
proc = subprocess.Popen([GIT_EXE, '--version'], stdout=subprocess.PIPE)
out, _ = proc.communicate()
return out.strip() if proc.returncode == 0 else ''
except OSError as exc:
if exc.errno != errno.ENOENT:
logging.exception('Unexpected error when calling git')
return ''
def read_gclient_solution():
"""Read information about 'src' gclient solution from .gclient file.
Returns tuple:
(url, deps_file, managed)
or
(None, None, None) if no such solution.
"""
try:
env = {}
execfile(GCLIENT_CONFIG, env, env)
for sol in (env.get('solutions') or []):
if sol.get('name') == 'src':
return sol.get('url'), sol.get('deps_file'), sol.get('managed')
return None, None, None
except Exception:
logging.exception('Failed to read .gclient solution')
return None, None, None
def read_git_insteadof(host):
"""Reads relevant insteadOf config entries."""
try:
proc = subprocess.Popen([GIT_EXE, 'config', '-l'], stdout=subprocess.PIPE)
out, _ = proc.communicate()
lines = []
for line in out.strip().split('\n'):
line = line.lower()
if 'insteadof=' in line and host in line:
lines.append(line)
return '\n'.join(lines)
except OSError as exc:
if exc.errno != errno.ENOENT:
logging.exception('Unexpected error when calling git')
return ''
def scan_configuration():
"""Scans local environment for git related configuration values."""
# Git checkout?
is_git = is_using_git()
# On Windows HOME should be set.
if 'HOME' in os.environ:
netrc_path = os.path.join(
os.environ['HOME'],
'_netrc' if sys.platform.startswith('win') else '.netrc')
else:
netrc_path = None
# Netrc exists?
is_using_netrc = netrc_path and os.path.exists(netrc_path)
# Read it.
netrc_obj = None
if is_using_netrc:
try:
netrc_obj = netrc.netrc(netrc_path)
except Exception:
logging.exception('Failed to read netrc from %s', netrc_path)
netrc_obj = None
# Read gclient 'src' solution.
gclient_url, gclient_deps, gclient_managed = read_gclient_solution()
return {
'checker_version': CHECKER_VERSION,
'is_git': is_git,
'is_home_set': 'HOME' in os.environ,
'is_using_netrc': is_using_netrc,
'netrc_file_mode': os.stat(netrc_path).st_mode if is_using_netrc else 0,
'git_version': get_git_version(),
'platform': sys.platform,
'username': getpass.getuser(),
'git_user_email': read_git_config('user.email') if is_git else '',
'git_user_name': read_git_config('user.name') if is_git else '',
'git_insteadof': read_git_insteadof('chromium.googlesource.com'),
'chromium_netrc_email':
read_netrc_user(netrc_obj, 'chromium.googlesource.com'),
'chrome_internal_netrc_email':
read_netrc_user(netrc_obj, 'chrome-internal.googlesource.com'),
'gclient_deps': gclient_deps,
'gclient_managed': gclient_managed,
'gclient_url': gclient_url,
}
def last_configuration_path():
"""Path to store last checked configuration."""
if is_using_git():
return os.path.join(REPO_ROOT, '.git', 'check_git_push_access_conf.json')
elif is_using_svn():
return os.path.join(REPO_ROOT, '.svn', 'check_git_push_access_conf.json')
else:
return os.path.join(REPO_ROOT, '.check_git_push_access_conf.json')
def read_last_configuration():
"""Reads last checked configuration if it exists."""
try:
with open(last_configuration_path(), 'r') as f:
return json.load(f)
except (IOError, ValueError):
return None
def write_last_configuration(conf):
"""Writes last checked configuration to a file."""
try:
with open(last_configuration_path(), 'w') as f:
json.dump(conf, f, indent=2, sort_keys=True)
except IOError:
logging.exception('Failed to write JSON to %s', path)
@contextlib.contextmanager
def temp_directory():
"""Creates a temp directory, then nukes it."""
tmp = tempfile.mkdtemp()
try:
yield tmp
finally:
try:
shutil.rmtree(tmp)
except (OSError, IOError):
logging.exception('Failed to remove temp directory %s', tmp)
class Runner(object):
"""Runs a bunch of commands in some directory, collects logs from them."""
def __init__(self, cwd, verbose):
self.cwd = cwd
self.verbose = verbose
self.log = []
def run(self, cmd):
self.append_to_log('> ' + ' '.join(cmd))
retcode = -1
try:
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=self.cwd)
out, _ = proc.communicate()
out = out.strip()
retcode = proc.returncode
except OSError as exc:
out = str(exc)
if retcode:
out += '\n(exit code: %d)' % retcode
self.append_to_log(out)
return retcode
def append_to_log(self, text):
if text:
self.log.append(text)
if self.verbose:
logging.warning(text)
def check_git_config(conf, report_url, verbose):
"""Attempts to push to a git repository, reports results to a server.
Returns True if the check finished without incidents (push itself may
have failed) and should NOT be retried on next invocation of the hook.
"""
# Don't even try to push if netrc is not configured.
if not conf['chromium_netrc_email']:
return upload_report(
conf,
report_url,
verbose,
push_works=False,
push_log='',
push_duration_ms=0)
# Ref to push to, each user has its own ref.
ref = 'refs/push-test/%s' % conf['chromium_netrc_email']
push_works = False
flake = False
started = time.time()
try:
logging.warning('Checking push access to the git repository...')
with temp_directory() as tmp:
# Prepare a simple commit on a new timeline.
runner = Runner(tmp, verbose)
runner.run([GIT_EXE, 'init', '.'])
if conf['git_user_name']:
runner.run([GIT_EXE, 'config', 'user.name', conf['git_user_name']])
if conf['git_user_email']:
runner.run([GIT_EXE, 'config', 'user.email', conf['git_user_email']])
with open(os.path.join(tmp, 'timestamp'), 'w') as f:
f.write(str(int(time.time() * 1000)))
runner.run([GIT_EXE, 'add', 'timestamp'])
runner.run([GIT_EXE, 'commit', '-m', 'Push test.'])
# Try to push multiple times if it fails due to issues other than ACLs.
attempt = 0
while attempt < 5:
attempt += 1
logging.info('Pushing to %s %s', TEST_REPO_URL, ref)
ret = runner.run(
[GIT_EXE, 'push', TEST_REPO_URL, 'HEAD:%s' % ref, '-f'])
if not ret:
push_works = True
break
if any(x in runner.log[-1] for x in BAD_ACL_ERRORS):
push_works = False
break
except Exception:
logging.exception('Unexpected exception when pushing')
flake = True
if push_works:
logging.warning('Git push works!')
else:
logging.warning(
'Git push doesn\'t work, which is fine if you are not a committer.')
uploaded = upload_report(
conf,
report_url,
verbose,
push_works=push_works,
push_log='\n'.join(runner.log),
push_duration_ms=int((time.time() - started) * 1000))
return uploaded and not flake
def check_gclient_config(conf):
"""Shows warning if gclient solution is not properly configured for git."""
# Ignore configs that do not have 'src' solution at all.
if not conf['gclient_url']:
return
current = {
'name': 'src',
'deps_file': conf['gclient_deps'] or 'DEPS',
'managed': conf['gclient_managed'] or False,
'url': conf['gclient_url'],
}
# After depot_tools r291592 both DEPS and .DEPS.git are valid.
good = GOOD_GCLIENT_SOLUTION.copy()
good['deps_file'] = current['deps_file']
if current == good:
return
# Show big warning if url or deps_file is wrong.
if current['url'] != good['url'] or current['deps_file'] != good['deps_file']:
print '-' * 80
print 'Your gclient solution is not set to use supported git workflow!'
print
print 'Your \'src\' solution (in %s):' % GCLIENT_CONFIG
print pprint.pformat(current, indent=2)
print
print 'Correct \'src\' solution to use git:'
print pprint.pformat(good, indent=2)
print
print 'Please update your .gclient file ASAP.'
print '-' * 80
# Show smaller (additional) warning about managed workflow.
if current['managed']:
print '-' * 80
print (
'You are using managed gclient mode with git, which was deprecated '
'on 8/22/13:')
print (
'https://groups.google.com/a/chromium.org/'
'forum/#!topic/chromium-dev/n9N5N3JL2_U')
print
print (
'It is strongly advised to switch to unmanaged mode. For more '
'information about managed mode and reasons for its deprecation see:')
print 'http://www.chromium.org/developers/how-tos/get-the-code/gclient-managed-mode'
print
print (
'There\'s also a large suite of tools to assist managing git '
'checkouts.\nSee \'man depot_tools\' (or read '
'depot_tools/man/html/depot_tools.html).')
print '-' * 80
def upload_report(
conf, report_url, verbose, push_works, push_log, push_duration_ms):
"""Posts report to the server, returns True if server accepted it.
Uploads the report only if script is running in Google corp network. Otherwise
just prints the report.
"""
report = conf.copy()
report.update(
push_works=push_works,
push_log=push_log,
push_duration_ms=push_duration_ms)
as_bytes = json.dumps({'access_check': report}, indent=2, sort_keys=True)
if verbose:
print 'Status of git push attempt:'
print as_bytes
# Do not upload it outside of corp or if server side is already disabled.
if not is_in_google_corp() or datetime.datetime.now() > UPLOAD_DISABLE_TS:
if verbose:
print (
'You can send the above report to chrome-git-migration@google.com '
'if you need help to set up you committer git account.')
return True
req = urllib2.Request(
url=report_url,
data=as_bytes,
headers={'Content-Type': 'application/json; charset=utf-8'})
attempt = 0
success = False
while not success and attempt < 10:
attempt += 1
try:
logging.warning(
'Attempting to upload the report to %s...',
urlparse.urlparse(report_url).netloc)
resp = urllib2.urlopen(req, timeout=5)
report_id = None
try:
report_id = json.load(resp)['report_id']
except (ValueError, TypeError, KeyError):
pass
logging.warning('Report uploaded: %s', report_id)
success = True
except (urllib2.URLError, socket.error, ssl.SSLError) as exc:
logging.warning('Failed to upload the report: %s', exc)
return success
def main(args):
parser = optparse.OptionParser(description=sys.modules[__name__].__doc__)
parser.add_option(
'--running-as-hook',
action='store_true',
help='Set when invoked from gclient hook')
parser.add_option(
'--report-url',
default=MOTHERSHIP_URL,
help='URL to submit the report to')
parser.add_option(
'--verbose',
action='store_true',
help='More logging')
options, args = parser.parse_args()
if args:
parser.error('Unknown argument %s' % args)
logging.basicConfig(
format='%(message)s',
level=logging.INFO if options.verbose else logging.WARN)
# When invoked not as a hook, always run the check.
if not options.running_as_hook:
config = scan_configuration()
check_gclient_config(config)
check_git_config(config, options.report_url, True)
return 0
# Always do nothing on bots.
if is_on_bot():
return 0
# Read current config, verify gclient solution looks correct.
config = scan_configuration()
check_gclient_config(config)
# Do not attempt to push from non-google owned machines.
if not is_in_google_corp():
logging.info('Skipping git push check: non *.corp.google.com machine.')
return 0
# Skip git push check if current configuration was already checked.
if config == read_last_configuration():
logging.info('Check already performed, skipping.')
return 0
# Run the check. Mark configuration as checked only on success. Ignore any
# exceptions or errors. This check must not break gclient runhooks.
try:
ok = check_git_config(config, options.report_url, False)
if ok:
write_last_configuration(config)
else:
logging.warning('Check failed and will be retried on the next run')
except Exception:
logging.exception('Unexpected exception when performing git access check')
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| |
# -*- coding: utf8 -*-
# Copyright (C) 2013 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
"""
Minimalist asynchronous network library just to fit Anaconda's needs and
replace the horrible asyncore/asynchat
Example of usage:
import ioloop
class TestClient(ioloop.EventHandler):
'''Client for test
'''
def __init__(self, host, port):
ioloop.EventHandler.__init__(self, (host, port))
self.message = []
def ready_to_write(self):
return True if self.outbuffer else False
def handle_read(self, data):
self.message.append(data)
def process_message(self):
print(b''.join(self.message))
self.message = []
"""
import os
import sys
import time
import errno
import socket
import select
import logging
import traceback
import threading
from .typing import List, Tuple, Any # noqa
NOT_TERMINATE = True
class IOHandlers(object):
"""Class that register and unregister IOHandler
"""
_shared_state = {} # type: Dict[Any, Any]
def __init__(self) -> None:
self.__dict__ = IOHandlers._shared_state
if hasattr(self, 'instanced') and self.instanced is True:
return
self._handler_pool = {} # type: Dict[int, EventHandler]
self._lock = threading.Lock()
self.instanced = True # type: bool
def ready_to_read(self) -> List['EventHandler']:
"""Return back all the handlers that are ready to read
"""
return [h for h in self._handler_pool.values() if h.ready_to_read()]
def ready_to_write(self):
"""Return back all the handlers that are ready to write
"""
return [h for h in self._handler_pool.values() if h.ready_to_write()]
def register(self, handler):
"""Register a new handler
"""
logging.info(
'Registering handler with address {}'.format(handler.address))
with self._lock:
if handler.fileno() not in self._handler_pool:
self._handler_pool.update({handler.fileno(): handler})
def unregister(self, handler):
"""Unregister the given handler
"""
with self._lock:
if handler.fileno() in self._handler_pool:
self._handler_pool.pop(handler.fileno())
class EventHandler(object):
"""Event handler class
"""
def __init__(self, address: Tuple[str, int], sock: socket.socket=None) -> None: # noqa
self._write_lock = threading.RLock()
self._read_lock = threading.RLock()
self.address = address
self.outbuffer = b''
self.inbuffer = b''
self.sock = sock
if sock is None:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect(address)
self.connected = True
self.sock.setblocking(False)
IOHandlers().register(self)
def __del__(self) -> None:
if self in IOHandlers()._handler_pool.values():
IOHandlers().unregister(self)
def fileno(self) -> int:
"""Return the associated file descriptor
"""
return self.sock.fileno()
def send(self) -> int:
"""Send outgoing data
"""
with self._write_lock:
while len(self.outbuffer) > 0:
try:
sent = self.sock.send(self.outbuffer)
self.outbuffer = self.outbuffer[sent:]
except socket.error as error:
if error.args[0] == errno.EAGAIN:
time.sleep(0.1)
elif error.args[0] in (
errno.ECONNRESET, errno.ENOTCONN, errno.ESHUTDOWN,
errno.ECONNABORTED, errno.EPIPE
):
self.close()
return 0
elif os.name == 'posix':
# Windows doesn't seems to have EBADFD
if sys.platform == 'darwin':
# OS X uses EBADF as EBADFD. why? no idea asks Tim
if error.args[0] == errno.EBADF:
self.close()
return 0
else:
if error.args[0] == errno.EBADFD:
self.close()
return 0
raise
else:
raise
def recv(self) -> None:
"""Receive some data
"""
try:
data = self.sock.recv(4096)
except socket.error as error:
if error.args[0] in (errno.EWOULDBLOCK, errno.EAGAIN):
return None
elif error.args[0] == errno.ECONNRESET:
self.close()
return None
else:
raise
if not data:
self.close()
return None
self.inbuffer += data
while self.inbuffer:
match = b'\r\n'
index = self.inbuffer.find(match)
if index != -1:
if index > 0:
self.handle_read(self.inbuffer[:index])
self.inbuffer = self.inbuffer[index+len(match):]
self.process_message()
else:
index = len(match) - 1
while index and not self.inbuffer.endswith(match[:index]):
index -= 1
if index:
if index != len(self.inbuffer):
self.handle_read(self.inbuffer[:-index])
self.inbuffer = self.inbuffer[-index:]
break
else:
self.handle_read(self.inbuffer)
self.inbuffer = b''
def push(self, data: bytes) -> None:
"""Push some bytes into the write buffer
"""
self.outbuffer += data
def handle_read(self, data: bytes) -> None:
"""Handle data readign from select
"""
raise RuntimeError('You have to implement this method')
def process_message(self) -> None:
"""Process the full message
"""
raise RuntimeError('You have to implement this method')
def ready_to_read(self) -> bool:
"""This handler is ready to read
"""
return True
def ready_to_write(self) -> bool:
"""This handler is ready to write
"""
return True
def close(self) -> None:
"""Close the socket and unregister the handler
"""
if self in IOHandlers()._handler_pool.values():
IOHandlers().unregister(self)
self.sock.close()
self.connected = False
def poll() -> None:
"""Poll the select
"""
recv = send = [] # type: List[bytes]
try:
if os.name != 'posix':
if IOHandlers()._handler_pool:
recv, send, _ = select.select(
IOHandlers().ready_to_read(),
IOHandlers().ready_to_write(),
[], 0
)
else:
recv, send, _ = select.select(
IOHandlers().ready_to_read(), IOHandlers().ready_to_write(),
[], 0
)
except select.error:
err = sys.exc_info()[1]
if err.args[0] == errno.EINTR:
return
raise
for handler in recv:
if handler is None or handler.ready_to_read() is not True:
continue
handler.recv()
for handler in send:
if handler is None or handler.ready_to_write() is not True:
continue
handler.send()
def loop() -> None:
"""Main event loop
"""
def restart_poll(error: Exception) -> None:
logging.error(
'Unhandled exception in poll, restarting the poll request')
logging.error(error)
for traceback_line in traceback.format_exc().splitlines():
logging.error(traceback_line)
with IOHandlers()._lock:
for handler in IOHandlers()._handler_pool.values():
handler.close()
IOHandlers()._handler_pool = {}
def inner_loop() -> None:
while NOT_TERMINATE:
try:
poll()
time.sleep(0.01)
except OSError as error:
if os.name != 'posix' and error.errno == os.errno.WSAENOTSOCK:
msg = (
'Unfortunately, the Windows socket is in inconsistent'
' state, restart your sublime text 3. If the problem '
'persist, fill an issue report on:'
' https://github.com/DamnWidget/anaconda/issues'
)
logging.error(msg)
import sublime
sublime.error_message(msg)
terminate()
else:
restart_poll(error)
except Exception as error:
restart_poll(error)
# cleanup
for handler in IOHandlers()._handler_pool.values():
handler.close()
threading.Thread(target=inner_loop).start()
def terminate() -> None:
"""Terminate the loop
"""
global NOT_TERMINATE
NOT_TERMINATE = False
def restart() -> None:
"""Restart the loop
"""
global NOT_TERMINATE
if NOT_TERMINATE is True:
NOT_TERMINATE = False
terminate()
NOT_TERMINATE = True
loop()
| |
import abc
import copy
import logging
import typing as tp
import math
import re
from enum import Enum
import attr
from benchbuild.settings import CFG
LOG = logging.getLogger(__name__)
RequirementSubType = tp.TypeVar("RequirementSubType", bound='Requirement')
@attr.s
class Requirement:
"""
Base class for requirements.
"""
@abc.abstractmethod
def to_option(self) -> str:
"""
Converts Requirement to a script options.
"""
@abc.abstractmethod
def to_cli_option(self) -> str:
"""
Converts Requirement to a command line options.
"""
@classmethod
@abc.abstractmethod
def merge_requirements(
cls: tp.Type[RequirementSubType], lhs_option: RequirementSubType,
rhs_option: RequirementSubType) -> RequirementSubType:
"""
Merge the requirements of the same type together.
"""
return type(lhs_option).merge_requirements(lhs_option, rhs_option)
################################################################################
# Slurm Requirements #
################################################################################
class SlurmRequirement(Requirement):
"""
Base class for slurm requirements.
"""
def to_option(self) -> str:
"""
Converts Requirement to a script options.
"""
return self.to_slurm_opt()
def to_cli_option(self) -> str:
"""
Converts Requirement to a command line options.
"""
return self.to_slurm_cli_opt()
def to_slurm_opt(self) -> str:
"""
Convert slurm option into a script usable option string, i.e., bash
#SBATCH option line.
"""
return f"#SBATCH {self.to_slurm_cli_opt()}"
@abc.abstractmethod
def to_slurm_cli_opt(self) -> str:
"""
Convert slurm option to command line string.
"""
@attr.s
class SlurmCoresPerSocket(SlurmRequirement):
"""
Restrict node selection to nodes with at least the specified number of
cores per socket. See additional information under -B option in the slurm
documentation. Only works when task/affinity plugin is enabled.
"""
cores: int = attr.ib()
def to_slurm_cli_opt(self) -> str:
return f"--cores-per-socket={self.cores}"
@classmethod
def merge_requirements(
cls, lhs_option: 'SlurmCoresPerSocket',
rhs_option: 'SlurmCoresPerSocket') -> 'SlurmCoresPerSocket':
"""
Merge the requirements of the same type together.
"""
return SlurmCoresPerSocket(max(lhs_option.cores, rhs_option.cores))
class SlurmExclusive(SlurmRequirement):
"""
The job allocation can not share nodes with other running jobs.
"""
def to_slurm_cli_opt(self) -> str:
return "--exclusive"
def __str__(self) -> str:
return "Run Exclusive"
def __repr__(self) -> str:
return "Exclusive"
@classmethod
def merge_requirements(cls, lhs_option: 'SlurmExclusive',
rhs_option: 'SlurmExclusive') -> 'SlurmExclusive':
"""
Merge the requirements of the same type together.
"""
return SlurmExclusive()
@attr.s
class SlurmNiceness(SlurmRequirement):
"""
Run the job with an adjusted scheduling priority within Slurm. With no
adjustment value the scheduling priority is decreased by 100. A negative
nice value increases the priority, otherwise decreases it. The adjustment
range is +/- 2147483645. Only privileged users can specify a negative
adjustment.
"""
niceness: int = attr.ib()
def to_slurm_cli_opt(self) -> str:
return f"--nice={self.niceness}"
@classmethod
def merge_requirements(cls, lhs_option: 'SlurmNiceness',
rhs_option: 'SlurmNiceness') -> 'SlurmNiceness':
"""
Merge the requirements of the same type together.
"""
if lhs_option.niceness != rhs_option.niceness:
LOG.info("Multiple different slurm niceness values specifcied, "
"choosing the smaller value.")
return SlurmNiceness(min(lhs_option.niceness, rhs_option.niceness))
@attr.s
class SlurmHint(SlurmRequirement):
"""
Bind tasks according to application hints.
* compute_bound
Select settings for compute bound applications: use all cores in
each socket, one thread per core.
* memory_bound
Select settings for memory bound applications: use only one core
in each socket, one thread per core.
* [no]multithread
[don't] use extra threads with in-core multi-threading which can
benefit communication intensive applications. Only supported with
the task/affinity plugin.
"""
class SlurmHints(Enum):
compute_bound = "compute_bound"
memory_bound = "memory_bound"
multithread = "multithread"
nomultithread = "nomultithread"
def __str__(self) -> str:
return str(self.value)
hints: tp.Set[SlurmHints] = attr.ib()
def to_slurm_cli_opt(self) -> str:
return f"--hint={','.join(map(str, self.hints))}"
def __str__(self) -> str:
return f"Hints: {','.join(map(str, self.hints))}"
def __repr__(self) -> str:
return f"Hint ({str(self)})"
@classmethod
def merge_requirements(cls, lhs_option: 'SlurmHint',
rhs_option: 'SlurmHint') -> 'SlurmHint':
"""
Merge the requirements of the same type together.
"""
combined_hints = set()
combined_hints |= lhs_option.hints | rhs_option.hints
if not cls.__hints_not_mutually_exclusive(combined_hints):
raise ValueError(
"Two mutally exclusive hints for slurm have be specified.")
return SlurmHint(combined_hints)
@staticmethod
def __hints_not_mutually_exclusive(hints: tp.Set[SlurmHints]) -> bool:
"""
Checks that a list of `SlurmHints` does not include mutally exclusive
hints.
Returns:
True, if no mutally exclusive hints are in the list
"""
if (SlurmHint.SlurmHints.compute_bound in hints and
SlurmHint.SlurmHints.memory_bound in hints):
return False
if (SlurmHint.SlurmHints.nomultithread in hints and
SlurmHint.SlurmHints.multithread in hints):
return False
return True
def _convert_to_time_tuple(time_specifier: str) -> tp.Tuple[int, int, int, int]:
"""
Convert slurm time specifier to tuple.
Returns:
time tuple with (days, hours, minutes, seconds)
Examples:
>>> _convert_to_time_tuple("4")
(0, 0, 4, 0)
>>> _convert_to_time_tuple("4:2")
(0, 0, 4, 2)
>>> _convert_to_time_tuple("8:4:2")
(0, 8, 4, 2)
>>> _convert_to_time_tuple("16-8")
(16, 8, 0, 0)
>>> _convert_to_time_tuple("16-8:4")
(16, 8, 4, 0)
>>> _convert_to_time_tuple("16-8:4:2")
(16, 8, 4, 2)
"""
days = 0
hours = 0
minutes = 0
seconds = 0
if time_specifier.count('-'):
with_days = True
days = int(time_specifier.split('-')[0])
time_specifier = time_specifier.split('-')[1]
else:
with_days = False
num_colon = time_specifier.count(':')
if num_colon == 0:
if with_days:
hours = int(time_specifier)
else:
minutes = int(time_specifier)
elif num_colon == 1:
if with_days:
hours = int(time_specifier.split(':')[0])
minutes = int(time_specifier.split(':')[1])
else:
minutes = int(time_specifier.split(':')[0])
seconds = int(time_specifier.split(':')[1])
elif num_colon == 2:
hours = int(time_specifier.split(':')[0])
minutes = int(time_specifier.split(':')[1])
seconds = int(time_specifier.split(':')[2])
return (days, hours, minutes, seconds)
@attr.s
class SlurmTime(SlurmRequirement):
"""
Set a limit on the total run time of the job allocation.
A time limit of zero requests that no time limit be imposed. Acceptable
time formats include "minutes", "minutes:seconds", "hours:minutes:seconds",
"days-hours", "days-hours:minutes" and "days-hours:minutes:seconds".
"""
timelimit: tp.Tuple[int, int, int,
int] = attr.ib(converter=_convert_to_time_tuple)
def to_slurm_time_format(self) -> str:
"""
Converst Time option into slurm compatible time format.
"""
days = self.timelimit[0]
hours = self.timelimit[1]
minutes = self.timelimit[2]
seconds = self.timelimit[3]
tmp_str = ""
if days > 0:
tmp_str += f"{days}-{hours:02d}"
if minutes > 0 or seconds > 0:
tmp_str += f":{minutes:02d}"
if seconds > 0:
tmp_str += f":{seconds:02d}"
else:
if hours > 0:
tmp_str += f"{hours}"
tmp_str += f":{minutes:02d}"
tmp_str += f":{seconds:02d}"
else:
tmp_str += f"{minutes}"
if seconds > 0:
tmp_str += f":{seconds:02d}"
return tmp_str
def to_slurm_cli_opt(self) -> str:
return f"--time={self.to_slurm_time_format()}"
@classmethod
def merge_requirements(cls, lhs_option: 'SlurmTime',
rhs_option: 'SlurmTime') -> 'SlurmTime':
"""
Merge the requirements of the same type together.
"""
if lhs_option < rhs_option:
return copy.deepcopy(lhs_option)
return copy.deepcopy(rhs_option)
def _get_byte_size_factor(byte_suffix: str) -> int:
"""
Returns the factor for a specific bytesize.
"""
byte_suffix = byte_suffix.lower()
if byte_suffix == "b":
return 1
if byte_suffix in ("k", "kb"):
return 1024
if byte_suffix in ("m", "mb"):
return 1024 * 1024
if byte_suffix in ("g", "gb"):
return 1024 * 1024 * 1024
if byte_suffix in ("t", "tb"):
return 1024 * 1024 * 1024 * 1024
raise ValueError("Unsupported byte suffix")
_BYTE_RGX = re.compile(r"(?P<size>\d*)(?P<byte_suffix>.*)")
def _to_bytes(byte_str: str) -> int:
"""
>>> _to_bytes("4B")
4
>>> _to_bytes("4MB")
4194304
>>> _to_bytes("10G")
10737418240
"""
match = _BYTE_RGX.search(byte_str)
if match:
size = int(match.group("size"))
byte_suffix = match.group("byte_suffix")
return size * _get_byte_size_factor(byte_suffix)
raise ValueError("Passed byte size was wrongly formatted")
def _to_biggests_byte_size(num_bytes: int) -> tp.Tuple[int, str]:
"""
>>> _to_biggests_byte_size(4)
(4, 'B')
>>> _to_biggests_byte_size(4194304)
(4, 'M')
>>> _to_biggests_byte_size(4194305)
(5, 'M')
>>> _to_biggests_byte_size(10737418240)
(10, 'G')
>>> _to_biggests_byte_size(1099511627776)
(1, 'T')
"""
if num_bytes >= _get_byte_size_factor("TB"):
return (math.ceil(num_bytes / _get_byte_size_factor("TB")), "T")
if num_bytes >= _get_byte_size_factor("GB"):
return (math.ceil(num_bytes / _get_byte_size_factor("GB")), "G")
if num_bytes >= _get_byte_size_factor("MB"):
return (math.ceil(num_bytes / _get_byte_size_factor("MB")), "M")
if num_bytes >= _get_byte_size_factor("KB"):
return (math.ceil(num_bytes / _get_byte_size_factor("KB")), "K")
return (num_bytes, "B")
@attr.s
class SlurmMem(SlurmRequirement):
"""
Set memory requirements that specify the maximal amount of memory needed.
Specify the real memory required per node. Different units can be specified
using the suffix [K|M|G|T].
"""
mem_req: int = attr.ib(converter=_to_bytes)
def to_slurm_cli_opt(self) -> str:
byte_size_tuple = _to_biggests_byte_size(self.mem_req)
return f"--mem={byte_size_tuple[0]}{byte_size_tuple[1]}"
@classmethod
def merge_requirements(cls, lhs_option: 'SlurmMem',
rhs_option: 'SlurmMem') -> 'SlurmMem':
"""
Merge the requirements of the same type together.
"""
return copy.deepcopy(max(lhs_option, rhs_option))
def merge_slurm_options(list_1: tp.List[Requirement],
list_2: tp.List[Requirement]) -> tp.List[Requirement]:
"""
Merged two lists of SlurmOptions into one.
"""
merged_options: tp.Dict[tp.Type[Requirement], Requirement] = dict()
for opt in list_1 + list_2:
key = type(opt)
if key in merged_options:
current_opt = merged_options[key]
merged_options[key] = current_opt.merge_requirements(
current_opt, opt)
else:
merged_options[key] = opt
return list(merged_options.values())
def get_slurm_options_from_config() -> tp.List[Requirement]:
"""
Generates a list of `SlurmOptions` which are specified in the BenchBuild
config.
"""
slurm_options: tp.List[Requirement] = []
if CFG['slurm']['exclusive']:
slurm_options.append(SlurmExclusive())
if not CFG['slurm']['multithread']:
slurm_options.append(SlurmHint({SlurmHint.SlurmHints.nomultithread}))
slurm_options.append(SlurmTime(str(CFG['slurm']['timelimit'])))
slurm_options.append(SlurmNiceness(int(CFG['slurm']['nice'])))
return slurm_options
| |
# Copyright 2018 The TensorFlow Hub Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common testing functions."""
import os
import socket
import sys
import threading
from absl import flags
import tensorflow as tf
import tensorflow_hub as hub
from tensorflow_hub import resolver
def _do_redirect(handler, location):
handler.send_response(301)
handler.send_header("Location", location)
handler.end_headers()
def _do_documentation(handler):
handler.send_response(200)
handler.end_headers()
handler.wfile.write(b"Here is some documentation.")
def start_smart_module_server(download_url):
"""Serve documentation and module requests at the same URL."""
# pylint:disable=g-import-not-at-top
if sys.version_info[0] == 2:
import BaseHTTPServer
import SimpleHTTPServer
import urlparse
class HTTPServerV6(BaseHTTPServer.HTTPServer):
address_family = socket.AF_INET6
class RequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def do_GET(self):
parsed_url = urlparse.urlparse(self.path)
qs = urlparse.parse_qs(parsed_url.query)
if qs["tf-hub-format"][0] == "compressed":
_do_redirect(self, download_url)
else:
_do_documentation(self)
server = HTTPServerV6(("", 0), RequestHandler)
server_port = server.server_port
else:
import http.server
import socketserver
import urllib
class TCPServerV6(socketserver.TCPServer):
address_family = socket.AF_INET6
class RequestHandler(http.server.SimpleHTTPRequestHandler):
def do_GET(self):
parsed_url = urllib.parse.urlparse(self.path)
qs = urllib.parse.parse_qs(parsed_url.query)
if qs["tf-hub-format"][0] == "compressed":
_do_redirect(self, download_url)
else:
_do_documentation(self)
server = TCPServerV6(("", 0), RequestHandler)
_, server_port, _, _ = server.server_address
# pylint:disable=g-import-not-at-top
thread = threading.Thread(target=server.serve_forever)
thread.daemon = True
thread.start()
return server_port
def start_http_server(redirect=None):
"""Returns the port of the newly started HTTP server."""
# Start HTTP server to serve TAR files.
# pylint:disable=g-import-not-at-top
if sys.version_info[0] == 2:
import BaseHTTPServer
import SimpleHTTPServer
class HTTPServerV6(BaseHTTPServer.HTTPServer):
address_family = socket.AF_INET6
class RedirectHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def do_GET(self):
_do_redirect(self, redirect)
server = HTTPServerV6(("", 0), RedirectHandler if redirect else
SimpleHTTPServer.SimpleHTTPRequestHandler)
server_port = server.server_port
else:
import http.server
import socketserver
class TCPServerV6(socketserver.TCPServer):
address_family = socket.AF_INET6
class RedirectHandler(http.server.SimpleHTTPRequestHandler):
def do_GET(self):
_do_redirect(self, redirect)
server = TCPServerV6(("", 0), RedirectHandler if redirect else
http.server.SimpleHTTPRequestHandler)
_, server_port, _, _ = server.server_address
# pylint:disable=g-import-not-at-top
thread = threading.Thread(target=server.serve_forever)
thread.daemon = True
thread.start()
return server_port
def test_srcdir():
"""Returns the path where to look for test data files."""
if "test_srcdir" in flags.FLAGS:
return flags.FLAGS["test_srcdir"].value
elif "TEST_SRCDIR" in os.environ:
return os.environ["TEST_SRCDIR"]
else:
raise RuntimeError("Missing TEST_SRCDIR environment.")
def get_test_data_path(file_or_dirname):
"""Return full test data path."""
for directory, subdirs, files in tf.io.gfile.walk(test_srcdir()):
for f in subdirs + files:
if f.endswith(file_or_dirname):
return os.path.join(directory, f)
raise ValueError("No %s in test directory" % file_or_dirname)
def export_module(module_export_path):
"""Create and export a simple module to the specified path."""
def _stateless_module_fn():
"""Simple module that squares an input."""
x = tf.compat.v1.placeholder(tf.int64)
y = x * x
hub.add_signature(inputs=x, outputs=y)
spec = hub.create_module_spec(_stateless_module_fn)
m = hub.Module(spec, name="test_module")
with tf.compat.v1.Session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
m.export(module_export_path, sess)
class EnvVariableContextManager(object):
"""Set an environment variable for the context and unset it afterwards."""
def __init__(self, key, value):
self.key = key
self.value = value
def __enter__(self):
os.environ[self.key] = self.value
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
del os.environ[self.key]
return True
class CompressedLoadFormatContext(EnvVariableContextManager):
"""Set the load format to COMPRESSED during the execution of the context."""
def __init__(self):
super().__init__(resolver._TFHUB_MODEL_LOAD_FORMAT,
resolver.ModelLoadFormat.COMPRESSED.value)
class UncompressedLoadFormatContext(EnvVariableContextManager):
"""Set the load format to UNCOMPRESSED during the execution of the context."""
def __init__(self):
super().__init__(resolver._TFHUB_MODEL_LOAD_FORMAT,
resolver.ModelLoadFormat.UNCOMPRESSED.value)
class AutoLoadFormatContext(EnvVariableContextManager):
"""Set the load format to AUTO during the execution of the context."""
def __init__(self):
super().__init__(resolver._TFHUB_MODEL_LOAD_FORMAT,
resolver.ModelLoadFormat.AUTO.value)
| |
import time
import json
import shlex
import base64
import hashlib
import getpass
import logging
from pathlib import PurePosixPath
from xml.etree import ElementTree
from collections import namedtuple
import cryptography
from cryptography.hazmat.primitives.ciphers.aead import AESGCM
from .contrib import open_remote_sqlite_database, escape_sql_string
from .otp import TOTPAccount, HOTPAccount, SteamAccount, lenient_base32_decode
LOGGER = logging.getLogger(__name__)
SupportedApp = namedtuple('SupportedApp', ['name', 'simple_name', 'extractor'])
SUPPORTED_APPS = []
def supported_app(name):
'''
Simple decorator to populate the SUPPORTED_APPS list
'''
simple_name = name.lower().replace('+', '_plus').replace(' ', '_').replace('.', '')
def inner(extractor):
SUPPORTED_APPS.append(SupportedApp(name, simple_name, extractor))
return extractor
return inner
@supported_app('Authy')
def read_authy_accounts(adb):
for pref_file in ['com.authy.storage.tokens.authenticator.xml', 'com.authy.storage.tokens.authy.xml']:
try:
f = adb.read_file(adb.data_root/'com.authy.authy/shared_prefs'/pref_file)
except FileNotFoundError:
continue
accounts = json.loads(ElementTree.parse(f).find('string').text)
for account in accounts:
if 'decryptedSecret' in account:
period = 30
dec_secret = account['decryptedSecret']
# Authy strips all digits that aren't Base32
fixed_secret = ''.join(c for c in dec_secret if c.upper() in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ234567')
if dec_secret.upper() != fixed_secret.upper():
LOGGER.warning("Transformed Authy secret %s into %s", dec_secret, fixed_secret)
secret = lenient_base32_decode(fixed_secret.upper())
else:
period = 10
secret = bytes.fromhex(account['secretSeed'])
# Authy stores its secrets in the same format as they're provided so we have to guess their type
yield TOTPAccount(account['name'], secret=secret, digits=account['digits'], period=period)
def _read_freeotp_accounts(adb, *, package_name):
try:
f = adb.read_file(adb.data_root/package_name/'shared_prefs/tokens.xml')
except FileNotFoundError:
return
for string in ElementTree.parse(f).findall('string'):
account = json.loads(string.text)
# <string name="tokenOrder"> doesn't contain an account
if 'secret' not in account:
continue
secret = bytes([b & 0xFF for b in account['secret']])
issuer = account.get('issuerAlt') or account['issuerExt'] or None
name = account['label']
if account['type'] == 'TOTP':
yield TOTPAccount(name, secret, issuer=issuer, digits=account['digits'], period=account['period'], algorithm=account['algo'])
elif account['type'] == 'HOTP':
yield HOTPAccount(name, secret, issuer=issuer, digits=account['digits'], counter=account['counter'], algorithm=account['algo'])
else:
LOGGER.warning('Unknown FreeOTP account type: %s', account['type'])
@supported_app('FreeOTP')
def read_freeotp_accounts(adb):
return _read_freeotp_accounts(adb, package_name='org.fedorahosted.freeotp')
@supported_app('FreeOTP+')
def read_freeotp_plus_accounts(adb):
return _read_freeotp_accounts(adb, package_name='org.liberty.android.freeotpplus')
@supported_app('Duo')
def read_duo_accounts(adb):
try:
f = adb.read_file(adb.data_root/'com.duosecurity.duomobile/files/duokit/accounts.json')
except FileNotFoundError:
return
for account in json.load(f):
try:
secret = base64.b32decode(account['otpGenerator']['otpSecret'])
except ValueError:
secret = base64.b64decode(account['otpGenerator']['otpSecret'])
if 'counter' in account['otpGenerator']:
yield HOTPAccount(account['name'], secret, counter=account['otpGenerator']['counter'])
else:
yield TOTPAccount(account['name'], secret)
@supported_app('Google Authenticator')
def read_google_authenticator_accounts(adb):
try:
with open_remote_sqlite_database(adb, adb.data_root/'com.google.android.apps.authenticator2/databases/databases') as connection:
cursor = connection.cursor()
cursor.execute('SELECT * FROM accounts;')
for row in cursor.fetchall():
row = dict(row)
name = row.get('name') or row.get('original_name')
email = row.get('email')
issuer = row.get('issuer')
if not name:
name = email
elif not issuer:
issuer = email
# Google Authenticator's Base-32 decoder is case-insensitive
secret = lenient_base32_decode(row['secret'].upper())
if row['type'] == 0:
yield TOTPAccount(name, secret, issuer=issuer)
elif row['type'] == 1:
yield HOTPAccount(name, secret, issuer=issuer, counter=row['counter'])
else:
LOGGER.warning('Unknown Google Authenticator account type: %s', row['type'])
except FileNotFoundError:
return
@supported_app('Microsoft Authenticator')
def read_microsoft_authenticator_accounts(adb):
try:
with open_remote_sqlite_database(adb, adb.data_root/'com.azure.authenticator/databases/PhoneFactor') as connection:
cursor = connection.cursor()
cursor.execute('SELECT * FROM accounts;')
for row in cursor.fetchall():
secret_key = base64.b64decode(row['oath_secret_key'])
if row['account_type'] == 0:
yield TOTPAccount(name=row['username'], issuer=row['name'], secret=secret_key, digits=6)
elif row['account_type'] == 2:
yield TOTPAccount(name=row['username'], issuer=row['name'], secret=secret_key, digits=8)
else:
LOGGER.warning('Unknown Microsoft account type: %r', row['account_type'])
except FileNotFoundError:
return
@supported_app('AndOTP')
def read_andotp_accounts(adb):
# Parse the preferences file to determine what kind of backups we can have AndOTP generate and where they will reside
try:
f = adb.read_file(adb.data_root/'org.shadowice.flocke.andotp/shared_prefs/org.shadowice.flocke.andotp_preferences.xml')
except FileNotFoundError:
return
preferences = ElementTree.parse(f)
try:
backup_path = PurePosixPath(preferences.find('.//string[@name="pref_backup_directory"]').text)
except AttributeError:
backup_path = PurePosixPath('$EXTERNAL_STORAGE/andOTP')
try:
allowed_backup_broadcasts = [s.text for s in preferences.findall('.//set[@name="pref_backup_broadcasts"]/string')]
except AttributeError:
allowed_backup_broadcasts = []
try:
initial_backup_files = {f: adb.hash_file(f) for f in adb.list_dir(backup_path)}
except FileNotFoundError:
initial_backup_files = {}
LOGGER.info('Sending AndOTP a broadcast to create a backup. This may take a few seconds...')
if 'encrypted' in allowed_backup_broadcasts:
adb.run('am broadcast -a org.shadowice.flocke.andotp.broadcast.ENCRYPTED_BACKUP org.shadowice.flocke.andotp', prefix=b'am: ')
elif 'plain' in allowed_backup_broadcasts:
LOGGER.error('Plaintext AndOTP backups are not supported. Please enable encrypted backups instead.')
return
else:
LOGGER.error('No AndOTP backup broadcasts are setup. Enable encrypted backups in the app settings, under "Backup Broadcasts".')
return
backup_data = None
backup_file = None
# Find all newly-created backup files
for i in range(10):
try:
LOGGER.info('Waiting for AndOTP to generate the backup file (attempt %d)', i + 1)
time.sleep(1)
new_backups = [f for f in adb.list_dir(backup_path) if initial_backup_files.get(f) != adb.hash_file(f)]
if not new_backups:
continue
LOGGER.debug('Found AndOTP backup files: %s', new_backups)
backup_file = new_backups[0]
backup_data = adb.read_file(backup_file)
break
except FileNotFoundError:
continue
else:
LOGGER.error('Could not find the AndOTP backup file. Do you have a backup password set?')
return
while True:
backup_password = getpass.getpass('Enter the AndOTP backup password: ')
if not backup_password:
LOGGER.warning('Aborting AndOTP export because user did not enter a password!')
return
success = False
# Try interpreting the data as both the old and new formats
for new_format in (False, True):
backup_data.seek(0)
if new_format:
num_iterations = int.from_bytes(backup_data.read(4), 'big')
salt = backup_data.read(12)
key = hashlib.pbkdf2_hmac(
hash_name='sha1',
password=backup_password.encode('utf-8'),
salt=salt,
iterations=num_iterations,
dklen=32
)
else:
key = hashlib.sha256(backup_password.encode('utf-8')).digest()
# The encrypted data at the end is the same for both formats
nonce = backup_data.read(12)
ciphertext_and_tag = backup_data.read()
try:
accounts_json = AESGCM(key).decrypt(nonce, ciphertext_and_tag, associated_data=None)
success = True
break
except cryptography.exceptions.InvalidTag:
if new_format:
# At this point we've tried both formats so the password is wrong
LOGGER.error('Could not decrypt the AndOTP backup. Is your password correct?')
continue
if success:
break
LOGGER.info('Deleting generated backup file: %s', backup_file)
adb.run(f'rm {shlex.quote(str(backup_file))}', prefix=b'rm: ', root=True)
for account in json.loads(accounts_json):
secret = base64.b32decode(account['secret'])
if account['type'] == 'TOTP':
yield TOTPAccount(account['label'], secret, digits=account['digits'], period=account['period'], algorithm=account['algorithm'])
elif account['type'] == 'HOTP':
yield HOTPAccount(account['label'], secret, digits=account['digits'], counter=account['counter'], algorithm=account['algorithm'])
elif account['type'] == 'STEAM':
yield SteamAccount(account['label'], secret)
else:
LOGGER.warning('Unknown AndOTP account type: %s', account['type'])
@supported_app('Steam Authenticator')
def read_steam_authenticator_accounts(adb):
try:
account_files = adb.list_dir(adb.data_root/'com.valvesoftware.android.steam.community/files')
except FileNotFoundError:
return
for account_file in account_files:
account_json = json.load(adb.read_file(account_file))
secret = base64.b64decode(account_json['shared_secret'])
yield SteamAccount(account_json['account_name'], secret)
@supported_app('Battle.net Authenticator')
def read_battle_net_authenticator_accounts(adb):
try:
f = adb.read_file(adb.data_root/'com.blizzard.bma/shared_prefs/com.blizzard.bma.AUTH_STORE.xml')
except FileNotFoundError:
return
encoded_hash = ElementTree.parse(f).find('.//string[@name="com.blizzard.bma.AUTH_STORE.HASH"]').text
key = bytes.fromhex('398e27fc50276a656065b0e525f4c06c04c61075286b8e7aeda59da9813b5dd6c80d2fb38068773fa59ba47c17ca6c6479015c1d5b8b8f6b9a')
decoded_hash = bytes([a ^ b for a, b in zip(bytes.fromhex(encoded_hash), key)]).decode('ascii')
secret = bytes.fromhex(decoded_hash[:40])
serial = decoded_hash[40:]
yield TOTPAccount(f"Battle.net {serial}", issuer="Battle.net", secret=secret, digits=8, period=30)
@supported_app('Aegis')
def read_aegis_accounts(adb):
try:
f = adb.read_file(adb.data_root/'com.beemdevelopment.aegis/files/aegis.json')
except FileNotFoundError:
return
aegis = json.load(f)
db = aegis['db']
if isinstance(db, str):
LOGGER.error('Aegis DB is encrypted. Decryption is currently not supported.')
return
if db['version'] != 1:
LOGGER.error('Invalid Aegis DB version: %d. Only 1 is supported.', db['version'])
return
for entry in db['entries']:
info = entry['info']
secret = lenient_base32_decode(info['secret'])
if entry['type'] == 'totp':
yield TOTPAccount(entry['name'], issuer=entry['issuer'], secret=secret, algorithm=info['algo'], digits=info['digits'], period=info['period'])
elif entry['type'] == 'hotp':
yield HOTPAccount(entry['name'], issuer=entry['issuer'], secret=secret, algorithm=info['algo'], digits=info['digits'], counter=info['counter'])
elif entry['type'] == 'steam':
yield SteamAccount(entry['name'], issuer=entry['issuer'], secret=secret)
else:
LOGGER.warning('Unknown Aegis account type: %s', entry['type'])
@supported_app('Authenticator Plus')
def read_authenticator_plus_accounts(adb):
try:
f = adb.read_file(adb.data_root/'com.mufri.authenticatorplus/databases/databases')
except FileNotFoundError:
return
try:
from pysqlcipher3 import dbapi2 as sqlcipher_sqlite
except ImportError:
LOGGER.error("Decrypting Authenticator Plus databases requires the `pysqlcipher3` Python package")
return
with open_remote_sqlite_database(
adb, adb.data_root/'com.mufri.authenticatorplus/databases/databases',
sqlite3=sqlcipher_sqlite
) as connection:
cursor = connection.cursor()
master_password = getpass.getpass('Enter the Authenticator Plus master password: ')
# XXX: This is intentional. You can't parameterize PRAGMA queries.
cursor.execute(f"PRAGMA key = {escape_sql_string(master_password)};")
cursor.execute("PRAGMA cipher_page_size = 1024;")
cursor.execute("PRAGMA kdf_iter = 64000;")
cursor.execute("PRAGMA cipher_hmac_algorithm = HMAC_SHA1;")
cursor.execute("PRAGMA cipher_kdf_algorithm = PBKDF2_HMAC_SHA1;")
cursor.execute('SELECT * FROM accounts;')
for row in cursor.fetchall():
secret = lenient_base32_decode(row['secret'])
if row['type'] == 0:
yield TOTPAccount(row['email'], issuer=row['issuer'], secret=secret)
elif row['type'] == 1:
yield HOTPAccount(row['email'], issuer=row['issuer'], secret=secret, counter=row['counter'])
else:
LOGGER.warning("Unknown account type %s: %s", row['type'], dict(row))
def read_accounts(adb, apps):
'''
Extracts accounts from multiple apps, removing duplicates.
'''
accounts = set()
for app in apps:
LOGGER.info('Reading %s accounts', app.name)
new = list(app.extractor(adb))
old_count = len(accounts)
for account in new:
LOGGER.debug('Found an account %s', account)
# Only HOTP accounts need special treatment
if not isinstance(account, HOTPAccount):
accounts.add(account)
continue
try:
duplicate = next(account for other in accounts if account.counterless_eq(other) and account != other)
except StopIteration:
accounts.add(account)
continue
LOGGER.warning('Identical HOTP accounts exist with different counters: %s != %s', account, duplicate)
LOGGER.warning('Picking the one with the largest counter.')
if duplicate.counter < account.counter:
accounts.remove(duplicate)
account.add(account)
LOGGER.info('Found %d accounts (%d new)', len(new), len(accounts) - old_count)
return accounts
| |
"""Utilities for manipulating variant files in standard VCF format.
"""
from collections import namedtuple, defaultdict
import copy
import os
import pprint
import shutil
import subprocess
import toolz as tz
import six
from six.moves import zip
from bcbio import broad, utils
from bcbio.bam import ref
from bcbio.distributed.multi import run_multicore, zeromq_aware_logging
from bcbio.distributed.split import parallel_split_combine
from bcbio.distributed.transaction import file_transaction
from bcbio.pipeline import config_utils, tools
from bcbio.pipeline import datadict as dd
from bcbio.provenance import do
# ## Tumor/normal paired cancer analyses
PairedData = namedtuple("PairedData", ["tumor_bam", "tumor_name",
"normal_bam", "normal_name", "normal_panel",
"tumor_config", "tumor_data", "normal_data"])
def is_paired_analysis(align_bams, items):
"""Determine if BAMs are from a tumor/normal paired analysis.
"""
return get_paired_bams(align_bams, items) is not None
def somatic_batches(items):
"""Group items into somatic calling batches (tumor-only or tumor/normal).
Returns batches, where a data item may be in pairs, and somatic and non_somatic
(which are the original list of items).
"""
non_somatic = []
somatic = []
data_by_batches = defaultdict(list)
for data in items:
if not get_paired_phenotype(data):
non_somatic.append(data)
else:
somatic.append(data)
batches = dd.get_batches(data)
if batches:
for batch in batches:
data_by_batches[batch].append(data)
return data_by_batches.values(), somatic, non_somatic
def get_paired(items):
return get_paired_bams([dd.get_align_bam(d) for d in items], items)
def get_paired_bams(align_bams, items):
"""Split aligned bams into tumor / normal pairs if this is a paired analysis.
Allows cases with only tumor BAMs to handle callers that can work without
normal BAMs or with normal VCF panels.
"""
tumor_bam, tumor_name, normal_bam, normal_name, normal_panel, tumor_config, normal_data = (None,) * 7
for bamfile, item in zip(align_bams, items):
phenotype = get_paired_phenotype(item)
if phenotype == "normal":
normal_bam = bamfile
normal_name = dd.get_sample_name(item)
normal_data = item
elif phenotype == "tumor":
tumor_bam = bamfile
tumor_name = dd.get_sample_name(item)
tumor_data = item
tumor_config = item["config"]
normal_panel = dd.get_background_variant(item)
if tumor_bam or tumor_name:
return PairedData(tumor_bam, tumor_name, normal_bam,
normal_name, normal_panel, tumor_config,
tumor_data, normal_data)
def get_somatic_variantcallers(items):
"""Retrieve all variant callers for somatic calling, handling somatic/germline.
"""
out = []
for data in items:
vcs = dd.get_variantcaller(data)
if isinstance(vcs, dict) and "somatic" in vcs:
vcs = vcs["somatic"]
if not isinstance(vcs, (list, tuple)):
vcs = [vcs]
out += vcs
return set(vcs)
def check_paired_problems(items):
"""Check for incorrectly paired tumor/normal samples in a batch.
"""
# ensure we're in a paired batch
if not get_paired(items):
return
num_tumor = len([x for x in items if dd.get_phenotype(x).lower() == "tumor"])
if num_tumor > 1:
raise ValueError("Unsupported configuration: found multiple tumor samples in batch %s: %s" %
(tz.get_in(["metadata", "batch"], items[0]),
[dd.get_sample_name(data) for data in items]))
elif num_tumor == 0 and any(dd.get_phenotype(data).lower() == "normal" for data in items):
raise ValueError("Found normal sample without tumor in batch %s: %s" %
(tz.get_in(["metadata", "batch"], items[0]),
[dd.get_sample_name(data) for data in items]))
else:
vcs = get_somatic_variantcallers(items)
if "mutect" in vcs or "mutect2" in vcs or "strelka2" in vcs:
paired = get_paired(items)
if not (paired.normal_data or paired.normal_panel):
raise ValueError("MuTect, MuTect2 and Strelka2 somatic calling requires normal sample or panel: %s" %
[dd.get_sample_name(data) for data in items])
def get_paired_phenotype(data):
"""Retrieve the phenotype for a paired tumor/normal analysis.
"""
allowed_names = set(["tumor", "normal"])
p = tz.get_in(["metadata", "phenotype"], data)
return p if p in allowed_names else None
# ## General utilities
def fix_ambiguous_cl(column=4):
"""awk command to replace non-N ambiguous REF bases with N.
Some callers include these if present in the reference genome but GATK does
not like them.
"""
return r"""awk -F$'\t' -v OFS='\t' '{if ($0 !~ /^#/) gsub(/[KMRYSWBVHDXkmryswbvhdx]/, "N", $%s) } {print}'""" % column
def remove_dup_cl():
"""awk command line to remove duplicate alleles where the ref and alt are the same.
"""
return r""" awk -F$'\t' -v OFS='\t' '$1!~/^#/ && $4 == $5 {next} {print}'"""
def get_indelcaller(d_or_c):
"""Retrieve string for indelcaller to use, or empty string if not specified.
"""
config = d_or_c if isinstance(d_or_c, dict) and "config" in d_or_c else d_or_c
indelcaller = config["algorithm"].get("indelcaller", "")
if not indelcaller:
indelcaller = ""
if isinstance(indelcaller, (list, tuple)):
indelcaller = indelcaller[0] if (len(indelcaller) > 0) else ""
return indelcaller
def write_empty_vcf(out_file, config=None, samples=None):
needs_bgzip = False
if out_file.endswith(".vcf.gz"):
needs_bgzip = True
out_file = out_file.replace(".vcf.gz", ".vcf")
with open(out_file, "w") as out_handle:
format_samples = ("\tFORMAT\t" + "\t".join(samples)) if samples else ""
out_handle.write("##fileformat=VCFv4.1\n"
"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO%s\n" % (format_samples))
if needs_bgzip:
return bgzip_and_index(out_file, config or {})
else:
return out_file
def to_standardonly(in_file, ref_file, data):
"""Subset a VCF input file to standard chromosomes (1-22,X,Y,MT).
"""
from bcbio.heterogeneity import chromhacks
out_file = "%s-stdchrs.vcf.gz" % utils.splitext_plus(in_file)[0]
if not utils.file_exists(out_file):
stds = []
for c in ref.file_contigs(ref_file):
if chromhacks.is_nonalt(c.name):
stds.append(c.name)
if stds:
with file_transaction(data, out_file) as tx_out_file:
stds = ",".join(stds)
in_file = bgzip_and_index(in_file, data["config"])
cmd = "bcftools view -o {tx_out_file} -O z {in_file} {stds}"
do.run(cmd.format(**locals()), "Subset to standard chromosomes")
return bgzip_and_index(out_file, data["config"]) if utils.file_exists(out_file) else in_file
def split_snps_indels(orig_file, ref_file, config):
"""Split a variant call file into SNPs and INDELs for processing.
"""
base, ext = utils.splitext_plus(orig_file)
snp_file = "{base}-snp{ext}".format(base=base, ext=ext)
indel_file = "{base}-indel{ext}".format(base=base, ext=ext)
for out_file, select_arg in [(snp_file, "--types snps"),
(indel_file, "--exclude-types snps")]:
if not utils.file_exists(out_file):
with file_transaction(config, out_file) as tx_out_file:
bcftools = config_utils.get_program("bcftools", config)
output_type = "z" if out_file.endswith(".gz") else "v"
cmd = "{bcftools} view -O {output_type} {orig_file} {select_arg} > {tx_out_file}"
do.run(cmd.format(**locals()), "Subset to SNPs and indels")
if out_file.endswith(".gz"):
bgzip_and_index(out_file, config)
return snp_file, indel_file
def get_normal_sample(in_file):
"""Retrieve normal sample if normal/turmor
"""
with utils.open_gzipsafe(in_file) as in_handle:
for line in in_handle:
if line.startswith("##PEDIGREE"):
parts = line.strip().split("Original=")[1][:-1]
return parts
def get_samples(in_file):
"""Retrieve samples present in a VCF file
"""
with utils.open_gzipsafe(in_file) as in_handle:
for line in in_handle:
if line.startswith("#CHROM"):
parts = line.strip().split("\t")
return parts[9:]
raise ValueError("Did not find sample header in VCF file %s" % in_file)
def _get_exclude_samples(in_file, to_exclude):
"""Identify samples in the exclusion list which are actually in the VCF.
"""
include, exclude = [], []
to_exclude = set(to_exclude)
for s in get_samples(in_file):
if s in to_exclude:
exclude.append(s)
else:
include.append(s)
return include, exclude
def exclude_samples(in_file, out_file, to_exclude, ref_file, config, filters=None):
"""Exclude specific samples from an input VCF file.
"""
include, exclude = _get_exclude_samples(in_file, to_exclude)
# can use the input sample, all exclusions already gone
if len(exclude) == 0:
out_file = in_file
elif not utils.file_exists(out_file):
with file_transaction(config, out_file) as tx_out_file:
bcftools = config_utils.get_program("bcftools", config)
output_type = "z" if out_file.endswith(".gz") else "v"
include_str = ",".join(include)
filter_str = "-f %s" % filters if filters is not None else "" # filters could be e.g. 'PASS,.'
cmd = "{bcftools} view -O {output_type} -s {include_str} {filter_str} {in_file} > {tx_out_file}"
do.run(cmd.format(**locals()), "Exclude samples: {}".format(to_exclude))
return out_file
def select_sample(in_file, sample, out_file, config, filters=None):
"""Select a single sample from the supplied multisample VCF file.
"""
if not utils.file_exists(out_file):
with file_transaction(config, out_file) as tx_out_file:
if len(get_samples(in_file)) == 1:
shutil.copy(in_file, tx_out_file)
else:
if in_file.endswith(".gz"):
bgzip_and_index(in_file, config)
bcftools = config_utils.get_program("bcftools", config)
output_type = "z" if out_file.endswith(".gz") else "v"
filter_str = "-f %s" % filters if filters is not None else "" # filters could be e.g. 'PASS,.'
cmd = "{bcftools} view -O {output_type} {filter_str} {in_file} -s {sample} > {tx_out_file}"
do.run(cmd.format(**locals()), "Select sample: %s" % sample)
if out_file.endswith(".gz"):
bgzip_and_index(out_file, config)
return out_file
def vcf_has_variants(in_file):
if os.path.exists(in_file):
with utils.open_gzipsafe(in_file) as in_handle:
for line in in_handle:
if line.strip() and not line.startswith("#"):
return True
return False
def vcf_has_nonfiltered_variants(in_file):
if os.path.exists(in_file):
with utils.open_gzipsafe(in_file) as in_handle:
for line in in_handle:
if line.strip() and not line.startswith("#"):
parts = line.split("\t")
if parts[6] in set(["PASS", "."]):
return True
return False
# ## Merging of variant files
def merge_variant_files(orig_files, out_file, ref_file, config, region=None):
"""Combine multiple VCF files with different samples into a single output file.
Uses bcftools merge on bgzipped input files, handling both tricky merge and
concatenation of files. Does not correctly handle files with the same
sample (use combine_variant_files instead).
"""
in_pipeline = False
if isinstance(orig_files, dict):
file_key = config["file_key"]
in_pipeline = True
orig_files = orig_files[file_key]
out_file = _do_merge(orig_files, out_file, config, region)
if in_pipeline:
return [{file_key: out_file, "region": region, "sam_ref": ref_file, "config": config}]
else:
return out_file
def _do_merge(orig_files, out_file, config, region):
"""Do the actual work of merging with bcftools merge.
"""
if not utils.file_exists(out_file):
with file_transaction(config, out_file) as tx_out_file:
_check_samples_nodups(orig_files)
prep_files = run_multicore(p_bgzip_and_index, [[x, config] for x in orig_files], config)
input_vcf_file = "%s-files.txt" % utils.splitext_plus(out_file)[0]
with open(input_vcf_file, "w") as out_handle:
for fname in prep_files:
out_handle.write(fname + "\n")
bcftools = config_utils.get_program("bcftools", config)
output_type = "z" if out_file.endswith(".gz") else "v"
region_str = "-r {}".format(region) if region else ""
cmd = "{bcftools} merge -O {output_type} {region_str} `cat {input_vcf_file}` > {tx_out_file}"
do.run(cmd.format(**locals()), "Merge variants")
if out_file.endswith(".gz"):
bgzip_and_index(out_file, config)
return out_file
def _check_samples_nodups(fnames):
"""Ensure a set of input VCFs do not have duplicate samples.
"""
counts = defaultdict(int)
for f in fnames:
for s in get_samples(f):
counts[s] += 1
duplicates = [s for s, c in counts.items() if c > 1]
if duplicates:
raise ValueError("Duplicate samples found in inputs %s: %s" % (duplicates, fnames))
def _sort_by_region(fnames, regions, ref_file, config):
"""Sort a set of regionally split files by region for ordered output.
"""
contig_order = {}
for i, sq in enumerate(ref.file_contigs(ref_file, config)):
contig_order[sq.name] = i
sitems = []
assert len(regions) == len(fnames), (regions, fnames)
added_fnames = set([])
for region, fname in zip(regions, fnames):
if fname not in added_fnames:
if isinstance(region, (list, tuple)):
c, s, e = region
elif isinstance(region, six.string_types) and region.find(":") >= 0:
c, coords = region.split(":")
s, e = [int(x) for x in coords.split("-")]
else:
c = region
s, e = 0, 0
sitems.append(((contig_order[c], s, e), c, fname))
added_fnames.add(fname)
sitems.sort()
return [(x[1], x[2]) for x in sitems]
def concat_variant_files(orig_files, out_file, regions, ref_file, config):
"""Concatenate multiple variant files from regions into a single output file.
Uses GATK4's GatherVcfs, falling back to bcftools concat --naive if it fails.
These both only combine samples and avoid parsing, allowing scaling to large
file sizes.
"""
if not utils.file_exists(out_file):
input_file_list = _get_file_list(orig_files, out_file, regions, ref_file, config)
try:
out_file = _run_concat_variant_files_gatk4(input_file_list, out_file, config)
except subprocess.CalledProcessError as msg:
if ("We require all VCFs to have complete VCF headers" in str(msg)
or "Features added out of order" in str(msg)
or "The reference allele cannot be missing" in str(msg)):
out_file = _run_concat_variant_files_bcftools(input_file_list, out_file, config, naive=True)
else:
print("## Original contigs")
pprint.pprint(zip(regions, orig_files))
print("## Ordered file list")
with open(input_file_list) as in_handle:
print(in_handle.read())
raise
if out_file.endswith(".gz"):
bgzip_and_index(out_file, config)
return out_file
def _run_concat_variant_files_gatk4(input_file_list, out_file, config):
"""Use GATK4 GatherVcfs for concatenation of scattered VCFs.
"""
if not utils.file_exists(out_file):
with file_transaction(config, out_file) as tx_out_file:
params = ["-T", "GatherVcfs", "-I", input_file_list, "-O", tx_out_file]
# Use GATK4 for merging, tools_off: [gatk4] applies to variant calling
config = utils.deepish_copy(config)
if "gatk4" in dd.get_tools_off({"config": config}):
config["algorithm"]["tools_off"].remove("gatk4")
# Allow specification of verbosity in the unique style this tool uses
resources = config_utils.get_resources("gatk", config)
opts = [str(x) for x in resources.get("options", [])]
if "--verbosity" in opts:
params += ["--VERBOSITY:%s" % opts[opts.index("--verbosity") + 1]]
broad_runner = broad.runner_from_config(config)
broad_runner.run_gatk(params)
return out_file
def _get_file_list(orig_files, out_file, regions, ref_file, config):
"""Create file with region sorted list of non-empty VCFs for concatenating.
"""
sorted_files = _sort_by_region(orig_files, regions, ref_file, config)
exist_files = [(c, x) for c, x in sorted_files if os.path.exists(x) and vcf_has_variants(x)]
if len(exist_files) == 0: # no non-empty inputs, merge the empty ones
exist_files = [x for c, x in sorted_files if os.path.exists(x)]
elif len(exist_files) > 1:
exist_files = _fix_gatk_header(exist_files, out_file, config)
else:
exist_files = [x for c, x in exist_files]
ready_files = run_multicore(p_bgzip_and_index, [[x, config] for x in exist_files], config)
input_file_list = "%s-files.list" % utils.splitext_plus(out_file)[0]
with open(input_file_list, "w") as out_handle:
for fname in ready_files:
out_handle.write(fname + "\n")
return input_file_list
def _fix_gatk_header(exist_files, out_file, config):
"""Ensure consistent headers for VCF concatenation.
Fixes problems for genomes that start with chrM by reheadering the first file.
These files do haploid variant calling which lack the PID phasing key/value
pair in FORMAT, so initial chrM samples cause errors during concatenation
due to the lack of header merging. This fixes this by updating the first header.
"""
from bcbio.variation import ploidy
c, base_file = exist_files[0]
replace_file = base_file
items = [{"config": config}]
if ploidy.get_ploidy(items, region=(c, 1, 2)) == 1:
for c, x in exist_files[1:]:
if ploidy.get_ploidy(items, (c, 1, 2)) > 1:
replace_file = x
break
base_fix_file = os.path.join(os.path.dirname(out_file),
"%s-fixheader%s" % utils.splitext_plus(os.path.basename(base_file)))
with file_transaction(config, base_fix_file) as tx_out_file:
header_file = "%s-header.vcf" % utils.splitext_plus(tx_out_file)[0]
do.run("zgrep ^# %s > %s"
% (replace_file, header_file), "Prepare header file for merging")
resources = config_utils.get_resources("picard", config)
ropts = []
if "options" in resources:
ropts += [str(x) for x in resources.get("options", [])]
do.run("%s && picard FixVcfHeader HEADER=%s INPUT=%s OUTPUT=%s %s" %
(utils.get_java_clprep(), header_file, base_file, base_fix_file, " ".join(ropts)),
"Reheader initial VCF file in merge")
bgzip_and_index(base_fix_file, config)
return [base_fix_file] + [x for (c, x) in exist_files[1:]]
def concat_variant_files_bcftools(orig_files, out_file, config):
if not utils.file_exists(out_file):
exist_files = [x for x in orig_files if os.path.exists(x)]
ready_files = run_multicore(p_bgzip_and_index, [[x, config] for x in exist_files], config)
input_file_list = "%s-files.list" % utils.splitext_plus(out_file)[0]
with open(input_file_list, "w") as out_handle:
for fname in ready_files:
out_handle.write(fname + "\n")
return _run_concat_variant_files_bcftools(input_file_list, out_file, config)
else:
return bgzip_and_index(out_file, config)
def _run_concat_variant_files_bcftools(in_list, out_file, config, naive=False):
"""Concatenate variant files using bcftools concat, potentially using the fast naive option.
"""
if not utils.file_exists(out_file):
with file_transaction(config, out_file) as tx_out_file:
bcftools = config_utils.get_program("bcftools", config)
output_type = "z" if out_file.endswith(".gz") else "v"
if naive:
args = "--naive"
else:
args = "--allow-overlaps"
cmd = "{bcftools} concat {args} -O {output_type} --file-list {in_list} -o {tx_out_file}"
do.run(cmd.format(**locals()), "bcftools concat variants")
if out_file.endswith(".gz"):
bgzip_and_index(out_file, config)
return out_file
def combine_variant_files(orig_files, out_file, ref_file, config,
quiet_out=True, region=None):
"""Combine VCF files from the same sample into a single output file.
Handles cases where we split files into SNPs/Indels for processing then
need to merge back into a final file.
"""
in_pipeline = False
if isinstance(orig_files, dict):
file_key = config["file_key"]
in_pipeline = True
orig_files = orig_files[file_key]
if not utils.file_exists(out_file):
with file_transaction(config, out_file) as tx_out_file:
exist_files = [x for x in orig_files if os.path.exists(x)]
ready_files = run_multicore(p_bgzip_and_index, [[x, config] for x in exist_files], config)
dict_file = "%s.dict" % utils.splitext_plus(ref_file)[0]
cores = dd.get_num_cores({"config": config})
memscale = {"magnitude": 0.9 * cores, "direction": "increase"} if cores > 1 else None
cmd = ["picard"] + broad.get_picard_opts(config, memscale) + \
["MergeVcfs", "D=%s" % dict_file, "O=%s" % tx_out_file] + \
["I=%s" % f for f in ready_files]
cmd = "%s && %s" % (utils.get_java_clprep(), " ".join(cmd))
do.run(cmd, "Combine variant files")
if out_file.endswith(".gz"):
bgzip_and_index(out_file, config)
if in_pipeline:
return [{file_key: out_file, "region": region, "sam_ref": ref_file, "config": config}]
else:
return out_file
def sort_by_ref(vcf_file, data):
"""Sort a VCF file by genome reference and position, adding contig information.
"""
out_file = "%s-prep.vcf.gz" % utils.splitext_plus(vcf_file)[0]
if not utils.file_uptodate(out_file, vcf_file):
with file_transaction(data, out_file) as tx_out_file:
header_file = "%s-header.txt" % utils.splitext_plus(tx_out_file)[0]
with open(header_file, "w") as out_handle:
for region in ref.file_contigs(dd.get_ref_file(data), data["config"]):
out_handle.write("##contig=<ID=%s,length=%s>\n" % (region.name, region.size))
cat_cmd = "zcat" if vcf_file.endswith("vcf.gz") else "cat"
cmd = ("{cat_cmd} {vcf_file} | grep -v ^##contig | bcftools annotate -h {header_file} | "
"vt sort -m full -o {tx_out_file} -")
with utils.chdir(os.path.dirname(tx_out_file)):
do.run(cmd.format(**locals()), "Sort VCF by reference")
return bgzip_and_index(out_file, data["config"])
def add_contig_to_header_cl(ref_file, out_file):
"""Add update ##contig lines to VCF header, required for bcftools/GATK compatibility.
"""
header_file = "%s-contig_header.txt" % utils.splitext_plus(out_file)[0]
with open(header_file, "w") as out_handle:
for region in ref.file_contigs(ref_file, {}):
out_handle.write("##contig=<ID=%s,length=%s>\n" % (region.name, region.size))
return ("grep -v ^##contig | bcftools annotate -h %s" % header_file)
def add_contig_to_header(line, ref_file):
"""Streaming target to add contigs to a VCF file header.
"""
if line.startswith("##fileformat=VCF"):
out = [line]
for region in ref.file_contigs(ref_file):
out.append("##contig=<ID=%s,length=%s>" % (region.name, region.size))
return "\n".join(out)
else:
return line
# ## Parallel VCF file combining
def parallel_combine_variants(orig_files, out_file, ref_file, config, run_parallel):
"""Combine variants in parallel by chromosome, concatenating final outputs.
"""
file_key = "vcf_files"
def split_by_region(data):
base, ext = utils.splitext_plus(os.path.basename(out_file))
args = []
for region in [x.name for x in ref.file_contigs(ref_file, config)]:
region_out = os.path.join(os.path.dirname(out_file), "%s-regions" % base,
"%s-%s%s" % (base, region, ext))
utils.safe_makedir(os.path.dirname(region_out))
args.append((region_out, ref_file, config, region))
return out_file, args
config = copy.deepcopy(config)
config["file_key"] = file_key
prep_files = run_multicore(p_bgzip_and_index, [[x, config] for x in orig_files], config)
items = [[{file_key: prep_files}]]
parallel_split_combine(items, split_by_region, run_parallel,
"merge_variant_files", "concat_variant_files",
file_key, ["region", "sam_ref", "config"], split_outfile_i=0)
return out_file
# ## VCF preparation
def move_vcf(orig_file, new_file):
"""Move a VCF file with associated index.
"""
for ext in ["", ".idx", ".tbi"]:
to_move = orig_file + ext
if os.path.exists(to_move):
shutil.move(to_move, new_file + ext)
def bgzip_and_index(in_file, config=None, remove_orig=True, prep_cmd="", tabix_args=None, out_dir=None):
"""bgzip and tabix index an input file, handling VCF and BED.
"""
if config is None:
config = {}
out_file = in_file if in_file.endswith(".gz") else in_file + ".gz"
if out_dir:
remove_orig = False
out_file = os.path.join(out_dir, os.path.basename(out_file))
if (not utils.file_exists(out_file) or not os.path.lexists(out_file)
or (utils.file_exists(in_file) and not utils.file_uptodate(out_file, in_file))):
assert not in_file == out_file, "Input file is bgzipped but not found: %s" % in_file
assert os.path.exists(in_file), "Input file %s not found" % in_file
if not utils.file_uptodate(out_file, in_file):
with file_transaction(config, out_file) as tx_out_file:
bgzip = tools.get_bgzip_cmd(config)
cat_cmd = "zcat" if in_file.endswith(".gz") else "cat"
if prep_cmd:
prep_cmd = "| %s " % prep_cmd
cmd = "{cat_cmd} {in_file} {prep_cmd} | {bgzip} -c > {tx_out_file}"
try:
do.run(cmd.format(**locals()), "bgzip %s" % os.path.basename(in_file))
except subprocess.CalledProcessError:
# Race conditions: ignore errors where file has been deleted by another
if os.path.exists(in_file) and not os.path.exists(out_file):
raise
if remove_orig:
try:
os.remove(in_file)
except OSError: # Handle cases where run in parallel and file has been deleted
pass
tabix_index(out_file, config, tabix_args=tabix_args)
return out_file
@utils.map_wrap
@zeromq_aware_logging
def p_bgzip_and_index(in_file, config):
"""Parallel-aware bgzip and indexing
"""
return [bgzip_and_index(in_file, config)]
def _guess_preset(f):
if f.lower().endswith(".vcf.gz"):
return "vcf"
elif f.lower().endswith(".bed.gz"):
return "bed"
elif f.lower().endswith(".gff.gz"):
return "gff"
else:
raise ValueError("Unexpected tabix input: %s" % f)
def tabix_index(in_file, config, preset=None, tabix_args=None):
"""Index a file using tabix.
"""
in_file = os.path.abspath(in_file)
out_file = in_file + ".tbi"
if not utils.file_exists(out_file) or not utils.file_uptodate(out_file, in_file):
# Remove old index files to prevent linking into tx directory
utils.remove_safe(out_file)
with file_transaction(config, out_file) as tx_out_file:
tabix = tools.get_tabix_cmd(config)
tx_in_file = os.path.splitext(tx_out_file)[0]
utils.symlink_plus(in_file, tx_in_file)
if tabix_args:
cmd = "{tabix} -f {tabix_args} {tx_in_file}"
else:
preset = _guess_preset(in_file) if preset is None else preset
cmd = "{tabix} -f -p {preset} {tx_in_file}"
do.run(cmd.format(**locals()), "tabix index %s" % os.path.basename(in_file))
return out_file
def is_gvcf_file(in_file):
"""Check if an input file is raw gVCF
"""
to_check = 100
n = 0
with utils.open_gzipsafe(in_file) as in_handle:
for line in in_handle:
if not line.startswith("##"):
if n > to_check:
break
n += 1
parts = line.split("\t")
# GATK
if parts[4] == "<NON_REF>":
return True
# strelka2
if parts[4] == "." and parts[7].startswith("BLOCKAVG"):
return True
# freebayes
if parts[4] == "<*>":
return True
# platypue
if parts[4] == "N" and parts[6] == "REFCALL":
return True
def cyvcf_add_filter(rec, name):
"""Add a FILTER value to a cyvcf2 record
"""
if rec.FILTER:
filters = rec.FILTER.split(";")
else:
filters = []
if name not in filters:
filters.append(name)
rec.FILTER = filters
return rec
def cyvcf_remove_filter(rec, name):
"""Remove filter with the given name from a cyvcf2 record
"""
if rec.FILTER:
filters = rec.FILTER.split(";")
else:
filters = []
new_filters = [x for x in filters if not str(x) == name]
if len(new_filters) == 0:
new_filters = ["PASS"]
rec.FILTER = new_filters
return rec
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ======================================
"""Experimental support for defining XLA shardings."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from tensorflow.compiler.xla import xla_data_pb2
from tensorflow.compiler.xla.python_api import xla_shape
from tensorflow.core.framework import attr_value_pb2
class Sharding(object):
"""A class to support adding sharding attributes to Ops.
Use the factory constructors and then call apply_to_tensor:
Sharding.replicate().apply_to_tensor(tensor)
"""
def __init__(self, proto=None):
"""Do not use this constructor; use the factory functions below."""
self._proto = proto
@classmethod
def replicate(cls):
"""Returns a replicated sharding attribute.
This causes an op to be computed in its entirety independently on all
cores in the XLA device.
"""
return Sharding(
proto=xla_data_pb2.OpSharding(type=xla_data_pb2.OpSharding.REPLICATED))
@classmethod
def assign_device(cls, core):
"""Returns an AssignDevice sharding attribute.
This causes an op to be computed in its entirety only on one core in
the XLA device.
Args:
core: The core to assign this Op to.
"""
return Sharding(
proto=xla_data_pb2.OpSharding(
type=xla_data_pb2.OpSharding.MAXIMAL,
tile_assignment_dimensions=[1],
tile_assignment_devices=[core]))
@classmethod
def tile(cls, tile_shape, tile_assignment):
"""Returns a Tiled sharding attribute.
This causes an op to be partially computed on multiple cores in the
XLA device.
Args:
tile_shape: A xla_shape.Shape describing the tile shape that each core
will compute.
The tile shape does not need to be divisible by the tile assignment.
tile_assignment: An np.ndarray describing the topology of the tiling and
which device will compute which part of the topology.
Raises:
TypeError: tile_assignment was not of np.array type or tile_shape was
not of xla_shape.Shape type.
TODO(jmolloy): This concept is nefarious and is not
something we really want to expose to users (especially as the
contract for tile_assignment is very strict).
"""
if not isinstance(tile_assignment, np.ndarray):
raise TypeError('Tile assignment must be of type np.ndarray')
if not isinstance(tile_shape, xla_shape.Shape):
raise TypeError('Tile shape must be of type xla_shape.Shape')
dims = list(tile_assignment.shape)
flattened_devices = tile_assignment.reshape(-1, order='C')
return Sharding(
proto=xla_data_pb2.OpSharding(
type=xla_data_pb2.OpSharding.OTHER,
tile_shape=tile_shape.message,
tile_assignment_dimensions=dims,
tile_assignment_devices=list(flattened_devices)))
@classmethod
def split(cls, tensor, split_dimension, num_devices):
"""Returns a Sharding that splits a tensor across a dimension.
This creates a Tiled attribute, similar to tile(), but easier to use for the
common case of tiling a tensor N ways in one dimension.
Args:
tensor: A tf.Tensor to split.
split_dimension: The dimension number to split.
num_devices: The number of cores to split `tensor` over.
Raises:
ValueError: The tensor to split was smaller in the split dimension than
the number of devices to split over.
"""
tensor.shape.assert_is_fully_defined()
shape = tensor.shape.as_list()
if shape[split_dimension] < num_devices:
raise ValueError('Split dimension was smaller than the required number '
'of splits: shape=%r, dimension=%r, num_devices=%r',
shape, split_dimension, num_devices)
tile_shape = shape
tile_shape[split_dimension] = int(
math.ceil(tile_shape[split_dimension] / num_devices))
tile_shape_proto = xla_data_pb2.Shape(
element_type=xla_data_pb2.F32, dimensions=tile_shape)
tile_assignment_dims = [1] * len(shape)
tile_assignment_dims[split_dimension] = num_devices
return Sharding(
proto=xla_data_pb2.OpSharding(
type=xla_data_pb2.OpSharding.OTHER,
tile_shape=tile_shape_proto,
tile_assignment_dimensions=tile_assignment_dims,
tile_assignment_devices=range(num_devices)))
def apply_to_tensor(self, tensor):
"""Applies this Sharding attribute to `tensor`."""
if len(tensor.op.outputs) > 1:
proto = self._get_or_create_tuple_proto(tensor.op)
# We can't mutate an element of old_proto.tuple_shardings, so create
# a new proto.
tuple_shardings = list(proto.tuple_shardings)
tuple_shardings[tensor.value_index] = self._proto
proto = xla_data_pb2.OpSharding(
type=xla_data_pb2.OpSharding.TUPLE, tuple_shardings=tuple_shardings)
else:
proto = self._proto
attr_value = attr_value_pb2.AttrValue(s=proto.SerializeToString())
# TODO(jmolloy): This need to be seriously revisited before declaring this
# API available for public use.
# pylint: disable=protected-access
tensor.op._set_attr('_XlaSharding', attr_value)
@property
def proto(self):
"""Return the sharding protobuf of type xla_data_pb2.OpSharding."""
return self._proto
def _get_or_create_tuple_proto(self, op):
try:
attr = op.get_attr('_XlaSharding')
proto = xla_data_pb2.OpSharding()
proto.ParseFromString(attr)
return proto
except ValueError:
return self._create_tuple_proto(op)
def _create_tuple_proto(self, op):
shardings = [
xla_data_pb2.OpSharding(type=xla_data_pb2.OpSharding.REPLICATED)
for _ in op.outputs
]
return xla_data_pb2.OpSharding(
type=xla_data_pb2.OpSharding.TUPLE, tuple_shardings=shardings)
# Helpers for the above factory functions that allow easy application of
# shardings, for example:
# tensor = xla_sharding.replicate(tensor)
def replicate(tensor):
Sharding.replicate().apply_to_tensor(tensor)
return tensor
def assign_device(tensor, device):
Sharding.assign_device(device).apply_to_tensor(tensor)
return tensor
def tile(tensor, tile_shape, tile_assignment):
Sharding.tile(tile_shape, tile_assignment).apply_to_tensor(tensor)
return tensor
def split(tensor, split_dimension, num_devices):
Sharding.split(tensor, split_dimension, num_devices).apply_to_tensor(tensor)
return tensor
| |
from __future__ import print_function
from subprocess import Popen, PIPE
import subprocess
import os
import re
import glob
import shutil
from Bio import Entrez
#from Bio import SeqIO
import pandas as pd
from pyfaidx import Fasta
from more_itertools import unique_everseen
from subprocess import check_output
from http.client import HTTPException
import time
Entrez.email = "Your email id"
species_name = "mus musculus"
#variables
all_accession = []
#opening files to write
all_geneids_file = open('all_gene_ids.txt', 'w')
all_genename_geneid = open('all_genename_geneid.txt', 'w')
gene_information_file = open('gene_information_file.txt','w')
gene_information_file.write('Genename,Chromosome,nc_accession,Start_End,GeneID'+"\n")
fuzz_run_output_file = open('fuzz_run_output_file.txt', 'w')
patterns = ['CTTTGTTAT[GT][TC][TA][ATC]AT','CATTGTGAT[GT][TC][TA][ATC]AT','CAGGACGAT[GT][TC][TA][ATC]AT','CAGGGTGAT[GT][TC][TA][ATC]AT',
'TTTTGTAAT[GT][TC][TA][ATC]AT','GATTGTCAT[GT][TC][TA][ATC]AT','CATTACGAT[GT][TC][TA][ATC]AT','ATTTGTAAT[GT][TC][TA][ATC]AT']
# Given gene name (eg; ADAM28) and species name (mus musculus) it fetches their gene ids
def fetch_gene_ids(gene_name, species_name):
print ("fetching the gene name:", gene_name)
search_string = gene_name + "[Gene] AND " + species_name
handle = Entrez.esearch(db="gene", term=search_string)
record = Entrez.read(handle)
ids = record['IdList']
single_id = ', '.join(ids[0:1])
return single_id
def extract_gene_information(gene_id):
"This takes the gene id and fetches their fasta sequence"
print ("Extracting the fasta sequence for the gene id:", gene_id)
handle = Entrez.efetch(db="gene", id=gene_id, rettype="fasta", retmode="text")
gene_information = handle.read()
return gene_information
def format_gene_record(gene_information):
lines = gene_information.split('\n')
for line in lines:
try:
if line.startswith('Annotation:'):
match = re.search('\Chromosome\s[\w+](.*)', line)
match = match.group(0)
match = match.replace(', complement', '')
match = match.replace('(','')
match = match.replace(')', '')
match = match.split()
chromosome = match[1]
nc_accession = match[2]
start_end = match[3]
return nc_accession, chromosome, start_end
except Exception as e:
print (e)
pass
def check_length_gene_record(nc_accession,chromosome,start_end):
if nc_accession == 'None' and \
chromosome == 'None' and \
start_end == 'None':
return False
else:
return True
def check_unique(nc_accession):
if nc_accession in all_accession:
print ("This NC accession number downloaded already")
return False
else:
all_accession.append(nc_accession)
print("This NC accession number is new")
return True
def extract_fasta_sequence(nc_accession):
while True:
try:
"This takes the gene id and fetches their fasta sequence"
print("Extracting the fasta sequence for the nc_accession:", nc_accession)
handle = Entrez.efetch(db="nucleotide", id=nc_accession, rettype="fasta", retmode="text")
record = handle.read()
return record
except HTTPException as e:
print("Trying again ...")
continue
def save_nc_accession(nc_accession, record):
filename = nc_accession.rstrip() + ".txt"
with open(filename, 'w') as nc_accession:
nc_accession.write("%s" % record)
def split_fasta_file(gene, geneid, nc_accession, chromosome, start_end):
try:
start, end = start_end.split("..")
filename = start + "_" + end
filename = filename + ".fasta"
start = int(start)
end = int(end)
new_start = start
start = start - 50000
end = end + 10000
if start < 0:
start = new_start
end = end
writefile = open(filename, 'w')
genes = Fasta(nc_accession + ".txt")
title = list(genes.keys())
save_file = genes[(title[0])][start:end]
start_sequence = genes[title[0]][start:end].start
end_sequence = genes[title[0]][start:end].end
save_title = ">" + gene + "|" + geneid + "|" + nc_accession + "|" + chromosome + "|" \
+ str(start_sequence) + "|" + str(end_sequence) + "\n"
writefile.write(save_title)
writefile.write(str(save_file))
return True
except:
data = open(filename, 'r').read()
print ("There is some problem in this file", filename)
print ("filename,starting sequence,ending sequence", filename, start, end)
print("Total number of nucletoides are:", len(data))
return False
def fetch_name_by_pattern(pattern):
patterns_dictionary = {'CTTTGTTAT[GT][TC][TA][ATC]AT': 'FGF4_OCT4',
'CATTGTGAT[GT][TC][TA][ATC]AT': 'SOX2_OCT4',
'CAGGACGAT[GT][TC][TA][ATC]AT': 'MUTANT_GGAC_OCT4',
'CAGGGTGAT[GT][TC][TA][ATC]AT': 'MUTANT_GG_OCT4',
'TTTTGTAAT[GT][TC][TA][ATC]AT': 'MUTANT_TT_OCT4',
'GATTGTCAT[GT][TC][TA][ATC]AT': 'MUTANT_GA_OCT4',
'CATTACGAT[GT][TC][TA][ATC]AT': 'MUTANT_AC_OCT4',
'ATTTGTAAT[GT][TC][TA][ATC]AT': 'DPPA4_OCT4'}
return patterns_dictionary[pattern]
def run_fuzz(filename,pattern):
fasta_filename = filename+".fasta"
name = fetch_name_by_pattern(str(pattern))
out_name = name + '_' + filename + '_fuzz_output' + '.txt'
cmd = ['fuzznuc', '-sequence', fasta_filename, '-pattern', pattern, '-outfile', out_name]
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
(out, err) = p.communicate()
return out_name
def filename_with_hits(filename):
textfile = open(filename, 'r')
lines = textfile.read()
textfile.close()
matches = re.findall("\# Sequence", lines)
matches1 = re.findall("\# HitCount", lines)
if matches and matches1:
return True
else:
return False
def format_fuzz_results(filename):
with open(filename, 'r') as f:
lines = f.readlines()
for index, line in enumerate(lines):
line = line.rstrip('\r\n')
if line.startswith('# Sequence'):
match = re.search(r'\sfrom\:\s\d\s+to\:\s\d+', line)
match = match.group(0)
match = re.findall(r'\d+', match)
start_sequence = match[0]
end_sequence = match[1]
elif line.startswith('# HitCount'):
match = re.search(r'\d+', line)
hitcount = match.group(0)
elif 'Start' in line:
line = line[index]
next_line = lines[index+1]
next_line = next_line.split()
next_line[3] = next_line[3].replace('pattern:', '')
start = next_line[0]
end = next_line[1]
pattern1 = next_line[3]
pattern2 = next_line[5]
return start_sequence,end_sequence, hitcount, start,end, pattern1,pattern2
with open("gene.txt") as genes:
for gene in genes:
gene = gene.strip()
geneid = fetch_gene_ids(gene, species_name)
if geneid is not None:
gene_information = extract_gene_information(geneid)
if_data = format_gene_record(gene_information)
if if_data is not None:
nc_accession, chromosome, start_end = format_gene_record(gene_information)
check_any_empty_values = check_length_gene_record(nc_accession,chromosome,start_end)
if check_any_empty_values:
check_repeat = check_unique(nc_accession)
if check_repeat:
record = extract_fasta_sequence(nc_accession)
if record:
save_nc_accession(nc_accession, record)
data = split_fasta_file(gene, geneid, nc_accession, chromosome, start_end)
if data:
for pattern in patterns:
filename = start_end.replace('..', '_')
print ("Running fuzz now for the pattern", pattern)
out_name = run_fuzz(filename, pattern)
if filename_with_hits(out_name):
print ("This is having some results",out_name)
start_sequence, end_sequence, hitcount, start, end, pattern1, pattern2 = format_fuzz_results(
out_name)
pattern1 = fetch_name_by_pattern(pattern1)
print (gene, geneid, nc_accession, chromosome, start_end, \
hitcount, start, end, pattern1, pattern2 + "\n")
gene_information_file.write("{0},{1},{2},{3},{4},{5},{6},{7},{8},{9}".format \
(gene, geneid, nc_accession, chromosome, start_end, \
hitcount, start, end, pattern1, pattern2 + "\n"))
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Canonicalizes functions with multiple returns to use just one."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gast
from tensorflow.python.autograph.core import converter
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.autograph.pyct import qual_names
from tensorflow.python.autograph.pyct import templates
from tensorflow.python.autograph.pyct.static_analysis import activity
from tensorflow.python.autograph.pyct.static_analysis.annos import NodeAnno
BODY_DEFINITELY_RETURNS = 'BODY_DEFINITELY_RETURNS'
ORELSE_DEFINITELY_RETURNS = 'ORELSE_DEFINITELY_RETURNS'
STMT_DEFINITELY_RETURNS = 'STMT_DEFINITELY_RETURNS'
class _RewriteBlock(object):
def __init__(self):
self.definitely_returns = False
class ConditionalReturnRewriter(converter.Base):
"""Rewrites a pattern where it's unobvious that all paths return a value.
This rewrite allows avoiding intermediate None return values.
The following pattern:
if cond:
<block 1>
return
else:
<block 2>
<block 3>
is converted to:
if cond:
<block 1>
return
else:
<block 2>
<block 3>
and vice-versa (if the else returns, subsequent statements are moved under the
if branch).
"""
def visit_Return(self, node):
self.state[_RewriteBlock].definitely_returns = True
return node
def _postprocess_statement(self, node):
# If the node definitely returns (e.g. it's a with statement with a
# return statement in it), then the current block also definitely returns.
if anno.getanno(node, STMT_DEFINITELY_RETURNS, default=False):
self.state[_RewriteBlock].definitely_returns = True
# The special case: collapse a typical conditional return pattern into
# a single conditional with possibly returns on both branches. This
# reduces the use of None return values, which don't work with TF
# conditionals.
if (isinstance(node, gast.If)
and anno.getanno(node, BODY_DEFINITELY_RETURNS, default=False)):
return node, node.orelse
elif (isinstance(node, gast.If)
and anno.getanno(node, ORELSE_DEFINITELY_RETURNS, default=False)):
return node, node.body
return node, None
def _visit_statement_block(self, node, nodes):
self.state[_RewriteBlock].enter()
new_nodes = self.visit_block(nodes, after_visit=self._postprocess_statement)
block_definitely_returns = self.state[_RewriteBlock].definitely_returns
self.state[_RewriteBlock].exit()
return new_nodes, block_definitely_returns
def visit_While(self, node):
node.test = self.visit(node.test)
node.body, _ = self._visit_statement_block(node, node.body)
node.orelse, _ = self._visit_statement_block(node, node.orelse)
return node
def visit_For(self, node):
node.iter = self.visit(node.iter)
node.target = self.visit(node.target)
node.body, _ = self._visit_statement_block(node, node.body)
node.orelse, _ = self._visit_statement_block(node, node.orelse)
return node
def visit_With(self, node):
node.items = self.visit_block(node.items)
node.body, definitely_returns = self._visit_statement_block(node, node.body)
if definitely_returns:
anno.setanno(node, STMT_DEFINITELY_RETURNS, True)
return node
def visit_Try(self, node):
# We could decide whether a 'try' DEFINITELY_RETURNS based on its components
# It is not clear whether we want to do anything with this given
# a 'try' is likely to throw an exception in some circumstances.
node.body, _ = self._visit_statement_block(node, node.body)
node.orelse, _ = self._visit_statement_block(node, node.orelse)
node.finalbody, _ = self._visit_statement_block(node, node.finalbody)
node.handlers = self.visit_block(node.handlers)
return node
def visit_ExceptHandler(self, node):
# To determine whether `try` DEFINITELY_RETURNS we need to revisit this.
node.body, _ = self._visit_statement_block(node, node.body)
return node
def visit_If(self, node):
node.test = self.visit(node.test)
node.body, body_definitely_returns = self._visit_statement_block(
node, node.body)
if body_definitely_returns:
anno.setanno(node, BODY_DEFINITELY_RETURNS, True)
node.orelse, orelse_definitely_returns = self._visit_statement_block(
node, node.orelse)
if orelse_definitely_returns:
anno.setanno(node, ORELSE_DEFINITELY_RETURNS, True)
if body_definitely_returns and orelse_definitely_returns:
self.state[_RewriteBlock].definitely_returns = True
return node
def visit_FunctionDef(self, node):
node.args = self.visit(node.args)
node.body, _ = self._visit_statement_block(node, node.body)
return node
class _Block(object):
def __init__(self):
self.is_function = False
self.return_used = False
self.create_guard_next = False
self.create_guard_now = False
def __repr__(self):
return 'used: {}'.format(
self.return_used)
class _Function(object):
def __init__(self):
self.do_return_var_name = None
self.retval_var_name = None
def __repr__(self):
return 'return control: {}, return value: {}'.format(
self.do_return_var_name, self.retval_var_name)
class ReturnStatementsTransformer(converter.Base):
"""Lowers return statements into variables and conditionals.
Specifically, the following pattern:
<block 1>
return val
<block 2>
is converted to:
do_return = False
retval = None
<block 1>
do_return = True
retval = val
if not do_return:
<block 2>
return retval
The conversion adjusts loops as well:
<block 1>
while cond:
<block 2>
return retval
is converted to:
<block 1>
while not do_return and cond:
<block 2>
do_return = True
retval = val
"""
def __init__(self, ctx, allow_missing_return):
super(ReturnStatementsTransformer, self).__init__(ctx)
self.allow_missing_return = allow_missing_return
def visit_Return(self, node):
for block in reversed(self.state[_Block].stack):
block.return_used = True
block.create_guard_next = True
if block.is_function:
break
retval = node.value if node.value else parser.parse_expression('None')
# Note: If `return <expr> raises, then the return is aborted.
# The try-catch below ensures the variables remain consistent in that case.
template = """
try:
do_return_var_name = True
retval_var_name = retval
except:
do_return_var_name = False
raise
"""
node = templates.replace(
template,
do_return_var_name=self.state[_Function].do_return_var_name,
retval_var_name=self.state[_Function].retval_var_name,
retval=retval)
return node
def _postprocess_statement(self, node):
if not self.state[_Block].return_used:
return node, None
state = self.state[_Block]
if state.create_guard_now:
template = """
if not do_return_var_name:
original_node
"""
cond, = templates.replace(
template,
do_return_var_name=self.state[_Function].do_return_var_name,
original_node=node)
node, block = cond, cond.body
else:
node, block = node, None
state.create_guard_now = state.create_guard_next
state.create_guard_next = False
return node, block
def _visit_statement_block(self, node, nodes):
self.state[_Block].enter()
nodes = self.visit_block(nodes, after_visit=self._postprocess_statement)
self.state[_Block].exit()
return nodes
def visit_While(self, node):
node.test = self.visit(node.test)
# Add the check for return to the loop condition.
node.body = self._visit_statement_block(node, node.body)
if self.state[_Block].return_used:
node.test = templates.replace_as_expression(
'not control_var and test',
test=node.test,
control_var=self.state[_Function].do_return_var_name)
node.orelse = self._visit_statement_block(node, node.orelse)
return node
def visit_For(self, node):
node.iter = self.visit(node.iter)
node.target = self.visit(node.target)
# Add the check for return to the loop condition.
node.body = self._visit_statement_block(node, node.body)
if self.state[_Block].return_used:
extra_test = anno.getanno(node, anno.Basic.EXTRA_LOOP_TEST, default=None)
if extra_test is not None:
extra_test = templates.replace_as_expression(
'not control_var and extra_test',
extra_test=extra_test,
control_var=self.state[_Function].do_return_var_name)
else:
extra_test = templates.replace_as_expression(
'not control_var',
control_var=self.state[_Function].do_return_var_name)
anno.setanno(node, anno.Basic.EXTRA_LOOP_TEST, extra_test)
node.orelse = self._visit_statement_block(node, node.orelse)
return node
def visit_With(self, node):
node.items = self.visit_block(node.items)
node.body = self._visit_statement_block(node, node.body)
return node
def visit_Try(self, node):
node.body = self._visit_statement_block(node, node.body)
node.orelse = self._visit_statement_block(node, node.orelse)
node.finalbody = self._visit_statement_block(node, node.finalbody)
node.handlers = self.visit_block(node.handlers)
return node
def visit_ExceptHandler(self, node):
node.body = self._visit_statement_block(node, node.body)
return node
def visit_If(self, node):
node.test = self.visit(node.test)
node.body = self._visit_statement_block(node, node.body)
node.orelse = self._visit_statement_block(node, node.orelse)
return node
def visit_FunctionDef(self, node):
with self.state[_Function] as fn:
with self.state[_Block] as block:
block.is_function = True
scope = anno.getanno(node, NodeAnno.BODY_SCOPE)
do_return_var_name = self.ctx.namer.new_symbol('do_return',
scope.referenced)
retval_var_name = self.ctx.namer.new_symbol('retval_', scope.referenced)
fn.do_return_var_name = do_return_var_name
fn.retval_var_name = retval_var_name
node.body = self._visit_statement_block(node, node.body)
if block.return_used:
if self.allow_missing_return:
# The function would have a single `with` node that wraps the
# entire body. If the function had a docstring, the body has two
# nodes, with the `with` as the second node.
wrapper_node = node.body[-1]
assert isinstance(wrapper_node, gast.With), (
'This transformer requires the functions converter.')
template = """
do_return_var_name = False
retval_var_name = ag__.UndefinedReturnValue()
body
return function_context.ret(retval_var_name, do_return_var_name)
"""
wrapper_node.body = templates.replace(
template,
body=wrapper_node.body,
do_return_var_name=do_return_var_name,
function_context=anno.getanno(node, 'function_context_name'),
retval_var_name=retval_var_name)
else:
template = """
body
return retval_var_name
"""
node.body = templates.replace(
template,
body=node.body,
do_return_var_name=do_return_var_name,
retval_var_name=retval_var_name)
return node
def transform(node, ctx, default_to_null_return=True):
"""Ensure a function has only a single return, at the end."""
node = qual_names.resolve(node)
node = activity.resolve(node, ctx, None)
# Note: Technically, these two could be merged into a single walk, but
# keeping them separate helps with readability.
node = ConditionalReturnRewriter(ctx).visit(node)
node = qual_names.resolve(node)
node = activity.resolve(node, ctx, None)
transformer = ReturnStatementsTransformer(
ctx, allow_missing_return=default_to_null_return)
node = transformer.visit(node)
return node
| |
"""
Periodically update bundled versions.
"""
from __future__ import absolute_import, unicode_literals
import json
import logging
import os
import ssl
import subprocess
import sys
from datetime import datetime, timedelta
from itertools import groupby
from shutil import copy2
from textwrap import dedent
from threading import Thread
from six.moves.urllib.error import URLError
from six.moves.urllib.request import urlopen
from virtualenv.app_data import AppDataDiskFolder
from virtualenv.info import PY2
from virtualenv.util.path import Path
from virtualenv.util.subprocess import CREATE_NO_WINDOW, Popen
from ..wheels.embed import BUNDLE_SUPPORT
from ..wheels.util import Wheel
if PY2:
# on Python 2 datetime.strptime throws the error below if the import did not trigger on main thread
# Failed to import _strptime because the import lock is held by
try:
import _strptime # noqa
except ImportError: # pragma: no cov
pass # pragma: no cov
GRACE_PERIOD_CI = timedelta(hours=1) # prevent version switch in the middle of a CI run
GRACE_PERIOD_MINOR = timedelta(days=28)
UPDATE_PERIOD = timedelta(days=14)
UPDATE_ABORTED_DELAY = timedelta(hours=1)
def periodic_update(distribution, of_version, for_py_version, wheel, search_dirs, app_data, do_periodic_update, env):
if do_periodic_update:
handle_auto_update(distribution, for_py_version, wheel, search_dirs, app_data, env)
now = datetime.now()
def _update_wheel(ver):
updated_wheel = Wheel(app_data.house / ver.filename)
logging.debug("using %supdated wheel %s", "periodically " if updated_wheel else "", updated_wheel)
return updated_wheel
u_log = UpdateLog.from_app_data(app_data, distribution, for_py_version)
if of_version is None:
for _, group in groupby(u_log.versions, key=lambda v: v.wheel.version_tuple[0:2]):
# use only latest patch version per minor, earlier assumed to be buggy
all_patches = list(group)
ignore_grace_period_minor = any(version for version in all_patches if version.use(now))
for version in all_patches:
if wheel is not None and Path(version.filename).name == wheel.name:
return wheel
if version.use(now, ignore_grace_period_minor):
return _update_wheel(version)
else:
for version in u_log.versions:
if version.wheel.version == of_version:
return _update_wheel(version)
return wheel
def handle_auto_update(distribution, for_py_version, wheel, search_dirs, app_data, env):
embed_update_log = app_data.embed_update_log(distribution, for_py_version)
u_log = UpdateLog.from_dict(embed_update_log.read())
if u_log.needs_update:
u_log.periodic = True
u_log.started = datetime.now()
embed_update_log.write(u_log.to_dict())
trigger_update(distribution, for_py_version, wheel, search_dirs, app_data, periodic=True, env=env)
def add_wheel_to_update_log(wheel, for_py_version, app_data):
embed_update_log = app_data.embed_update_log(wheel.distribution, for_py_version)
logging.debug("adding %s information to %s", wheel.name, embed_update_log.file)
u_log = UpdateLog.from_dict(embed_update_log.read())
if any(version.filename == wheel.name for version in u_log.versions):
logging.warning("%s already present in %s", wheel.name, embed_update_log.file)
return
# we don't need a release date for sources other than "periodic"
version = NewVersion(wheel.name, datetime.now(), None, "download")
u_log.versions.append(version) # always write at the end for proper updates
embed_update_log.write(u_log.to_dict())
DATETIME_FMT = "%Y-%m-%dT%H:%M:%S.%fZ"
def dump_datetime(value):
return None if value is None else value.strftime(DATETIME_FMT)
def load_datetime(value):
return None if value is None else datetime.strptime(value, DATETIME_FMT)
class NewVersion(object):
def __init__(self, filename, found_date, release_date, source):
self.filename = filename
self.found_date = found_date
self.release_date = release_date
self.source = source
@classmethod
def from_dict(cls, dictionary):
return cls(
filename=dictionary["filename"],
found_date=load_datetime(dictionary["found_date"]),
release_date=load_datetime(dictionary["release_date"]),
source=dictionary["source"],
)
def to_dict(self):
return {
"filename": self.filename,
"release_date": dump_datetime(self.release_date),
"found_date": dump_datetime(self.found_date),
"source": self.source,
}
def use(self, now, ignore_grace_period_minor=False, ignore_grace_period_ci=False):
if self.source == "manual":
return True
elif self.source == "periodic":
if self.found_date < now - GRACE_PERIOD_CI or ignore_grace_period_ci:
if not ignore_grace_period_minor:
compare_from = self.release_date or self.found_date
return now - compare_from >= GRACE_PERIOD_MINOR
return True
return False
def __repr__(self):
return "{}(filename={}), found_date={}, release_date={}, source={})".format(
self.__class__.__name__,
self.filename,
self.found_date,
self.release_date,
self.source,
)
def __eq__(self, other):
return type(self) == type(other) and all(
getattr(self, k) == getattr(other, k) for k in ["filename", "release_date", "found_date", "source"]
)
def __ne__(self, other):
return not (self == other)
@property
def wheel(self):
return Wheel(Path(self.filename))
class UpdateLog(object):
def __init__(self, started, completed, versions, periodic):
self.started = started
self.completed = completed
self.versions = versions
self.periodic = periodic
@classmethod
def from_dict(cls, dictionary):
if dictionary is None:
dictionary = {}
return cls(
load_datetime(dictionary.get("started")),
load_datetime(dictionary.get("completed")),
[NewVersion.from_dict(v) for v in dictionary.get("versions", [])],
dictionary.get("periodic"),
)
@classmethod
def from_app_data(cls, app_data, distribution, for_py_version):
raw_json = app_data.embed_update_log(distribution, for_py_version).read()
return cls.from_dict(raw_json)
def to_dict(self):
return {
"started": dump_datetime(self.started),
"completed": dump_datetime(self.completed),
"periodic": self.periodic,
"versions": [r.to_dict() for r in self.versions],
}
@property
def needs_update(self):
now = datetime.now()
if self.completed is None: # never completed
return self._check_start(now)
else:
if now - self.completed <= UPDATE_PERIOD:
return False
return self._check_start(now)
def _check_start(self, now):
return self.started is None or now - self.started > UPDATE_ABORTED_DELAY
def trigger_update(distribution, for_py_version, wheel, search_dirs, app_data, env, periodic):
wheel_path = None if wheel is None else str(wheel.path)
cmd = [
sys.executable,
"-c",
dedent(
"""
from virtualenv.report import setup_report, MAX_LEVEL
from virtualenv.seed.wheels.periodic_update import do_update
setup_report(MAX_LEVEL, show_pid=True)
do_update({!r}, {!r}, {!r}, {!r}, {!r}, {!r})
""",
)
.strip()
.format(distribution, for_py_version, wheel_path, str(app_data), [str(p) for p in search_dirs], periodic),
]
debug = env.get(str("_VIRTUALENV_PERIODIC_UPDATE_INLINE")) == str("1")
pipe = None if debug else subprocess.PIPE
kwargs = {"stdout": pipe, "stderr": pipe}
if not debug and sys.platform == "win32":
kwargs["creationflags"] = CREATE_NO_WINDOW
process = Popen(cmd, **kwargs)
logging.info(
"triggered periodic upgrade of %s%s (for python %s) via background process having PID %d",
distribution,
"" if wheel is None else "=={}".format(wheel.version),
for_py_version,
process.pid,
)
if debug:
process.communicate() # on purpose not called to make it a background process
def do_update(distribution, for_py_version, embed_filename, app_data, search_dirs, periodic):
versions = None
try:
versions = _run_do_update(app_data, distribution, embed_filename, for_py_version, periodic, search_dirs)
finally:
logging.debug("done %s %s with %s", distribution, for_py_version, versions)
return versions
def _run_do_update(app_data, distribution, embed_filename, for_py_version, periodic, search_dirs):
from virtualenv.seed.wheels import acquire
wheel_filename = None if embed_filename is None else Path(embed_filename)
embed_version = None if wheel_filename is None else Wheel(wheel_filename).version_tuple
app_data = AppDataDiskFolder(app_data) if isinstance(app_data, str) else app_data
search_dirs = [Path(p) if isinstance(p, str) else p for p in search_dirs]
wheelhouse = app_data.house
embed_update_log = app_data.embed_update_log(distribution, for_py_version)
u_log = UpdateLog.from_dict(embed_update_log.read())
now = datetime.now()
update_versions, other_versions = [], []
for version in u_log.versions:
if version.source in {"periodic", "manual"}:
update_versions.append(version)
else:
other_versions.append(version)
if periodic:
source = "periodic"
else:
source = "manual"
# mark the most recent one as source "manual"
if update_versions:
update_versions[0].source = source
if wheel_filename is not None:
dest = wheelhouse / wheel_filename.name
if not dest.exists():
copy2(str(wheel_filename), str(wheelhouse))
last, last_version, versions, filenames = None, None, [], set()
while last is None or not last.use(now, ignore_grace_period_ci=True):
download_time = datetime.now()
dest = acquire.download_wheel(
distribution=distribution,
version_spec=None if last_version is None else "<{}".format(last_version),
for_py_version=for_py_version,
search_dirs=search_dirs,
app_data=app_data,
to_folder=wheelhouse,
env=os.environ,
)
if dest is None or (update_versions and update_versions[0].filename == dest.name):
break
release_date = release_date_for_wheel_path(dest.path)
last = NewVersion(filename=dest.path.name, release_date=release_date, found_date=download_time, source=source)
logging.info("detected %s in %s", last, datetime.now() - download_time)
versions.append(last)
filenames.add(last.filename)
last_wheel = last.wheel
last_version = last_wheel.version
if embed_version is not None:
if embed_version >= last_wheel.version_tuple: # stop download if we reach the embed version
break
u_log.periodic = periodic
if not u_log.periodic:
u_log.started = now
# update other_versions by removing version we just found
other_versions = [version for version in other_versions if version.filename not in filenames]
u_log.versions = versions + update_versions + other_versions
u_log.completed = datetime.now()
embed_update_log.write(u_log.to_dict())
return versions
def release_date_for_wheel_path(dest):
wheel = Wheel(dest)
# the most accurate is to ask PyPi - e.g. https://pypi.org/pypi/pip/json,
# see https://warehouse.pypa.io/api-reference/json/ for more details
content = _pypi_get_distribution_info_cached(wheel.distribution)
if content is not None:
try:
upload_time = content["releases"][wheel.version][0]["upload_time"]
return datetime.strptime(upload_time, "%Y-%m-%dT%H:%M:%S")
except Exception as exception:
logging.error("could not load release date %s because %r", content, exception)
return None
def _request_context():
yield None
# fallback to non verified HTTPS (the information we request is not sensitive, so fallback)
yield ssl._create_unverified_context() # noqa
_PYPI_CACHE = {}
def _pypi_get_distribution_info_cached(distribution):
if distribution not in _PYPI_CACHE:
_PYPI_CACHE[distribution] = _pypi_get_distribution_info(distribution)
return _PYPI_CACHE[distribution]
def _pypi_get_distribution_info(distribution):
content, url = None, "https://pypi.org/pypi/{}/json".format(distribution)
try:
for context in _request_context():
try:
with urlopen(url, context=context) as file_handler:
content = json.load(file_handler)
break
except URLError as exception:
logging.error("failed to access %s because %r", url, exception)
except Exception as exception:
logging.error("failed to access %s because %r", url, exception)
return content
def manual_upgrade(app_data, env):
threads = []
for for_py_version, distribution_to_package in BUNDLE_SUPPORT.items():
# load extra search dir for the given for_py
for distribution in distribution_to_package.keys():
thread = Thread(target=_run_manual_upgrade, args=(app_data, distribution, for_py_version, env))
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
def _run_manual_upgrade(app_data, distribution, for_py_version, env):
start = datetime.now()
from .bundle import from_bundle
current = from_bundle(
distribution=distribution,
version=None,
for_py_version=for_py_version,
search_dirs=[],
app_data=app_data,
do_periodic_update=False,
env=env,
)
logging.warning(
"upgrade %s for python %s with current %s",
distribution,
for_py_version,
"" if current is None else current.name,
)
versions = do_update(
distribution=distribution,
for_py_version=for_py_version,
embed_filename=current.path,
app_data=app_data,
search_dirs=[],
periodic=False,
)
msg = "upgraded %s for python %s in %s {}".format(
"new entries found:\n%s" if versions else "no new versions found",
)
args = [
distribution,
for_py_version,
datetime.now() - start,
]
if versions:
args.append("\n".join("\t{}".format(v) for v in versions))
logging.warning(msg, *args)
__all__ = (
"add_wheel_to_update_log",
"periodic_update",
"do_update",
"manual_upgrade",
"NewVersion",
"UpdateLog",
"load_datetime",
"dump_datetime",
"trigger_update",
"release_date_for_wheel_path",
)
| |
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import stat
import unittest
import mock
from catapult_base import dependency_manager
from catapult_base import cloud_storage
from catapult_base.dependency_manager import exceptions
class DependencyManagerTest(unittest.TestCase):
@mock.patch(
'catapult_base.dependency_manager.DependencyManager._UpdateDependencies')
def testInit(self, update_mock):
self.assertRaises(ValueError, dependency_manager.DependencyManager, None)
self.assertFalse(update_mock.call_args)
self.assertRaises(ValueError, dependency_manager.DependencyManager,
'config_file?')
self.assertFalse(update_mock.call_args)
dependency_manager.DependencyManager([])
self.assertFalse(update_mock.call_args)
dependency_manager.DependencyManager(['config_file'])
update_mock.called_once_with_args('config_file')
update_mock.reset_mock()
dependency_manager.DependencyManager(
['config_file1', 'config_file2', 'config_file3', 'config_file4'])
expected_calls = [mock.call('config_file1'), mock.call('config_file2'),
mock.call('config_file3'), mock.call('config_file4')]
update_mock.assert_has_calls(expected_calls, any_order=True)
update_mock.reset_mock()
@mock.patch('os.path')
@mock.patch('catapult_base.support_binaries.FindPath')
@mock.patch(
'catapult_base.dependency_manager.DependencyManager._GetDependencyInfo')
@mock.patch(
'catapult_base.dependency_manager.DependencyManager._CloudStoragePath')
@mock.patch('catapult_base.dependency_manager.DependencyManager._LocalPath')
def testFetchPathSupportBinaries(self, local_path_mock, cs_path_mock,
dep_info_mock, sb_find_path_mock, path_mock):
dep_manager = dependency_manager.DependencyManager([])
self.assertFalse(local_path_mock.call_args)
self.assertFalse(cs_path_mock.call_args)
self.assertFalse(sb_find_path_mock.call_args)
sb_path = 'sb_path'
local_path = 'local_path'
cs_path = 'cs_path'
dep_info = 'dep_info'
local_path_mock.return_value = local_path
cs_path_mock.return_value = cs_path
sb_find_path_mock.return_value = sb_path
dep_info_mock.return_value = dep_info
# Empty lookup_dict
found_path = dep_manager.FetchPath('dep', 'plat_arch_x86')
self.assertEqual(sb_path, found_path)
self.assertFalse(local_path_mock.call_args)
self.assertFalse(cs_path_mock.call_args)
sb_find_path_mock.assert_called_once_with('dep', 'arch_x86', 'plat')
local_path_mock.reset_mock()
cs_path_mock.reset_mock()
sb_find_path_mock.reset_mock()
# Non-empty lookup dict that doesn't contain the dependency we're looking
# for.
dep_manager._lookup_dict = {'dep1': mock.MagicMock(),
'dep2': mock.MagicMock()}
found_path = dep_manager.FetchPath('dep', 'plat_arch_x86')
self.assertEqual(sb_path, found_path)
self.assertFalse(local_path_mock.call_args)
self.assertFalse(cs_path_mock.call_args)
sb_find_path_mock.assert_called_once_with('dep', 'arch_x86', 'plat')
local_path_mock.reset_mock()
cs_path_mock.reset_mock()
sb_find_path_mock.reset_mock()
@mock.patch('os.path')
@mock.patch('catapult_base.support_binaries.FindPath')
@mock.patch(
'catapult_base.dependency_manager.DependencyManager._GetDependencyInfo')
@mock.patch(
'catapult_base.dependency_manager.DependencyManager._CloudStoragePath')
@mock.patch('catapult_base.dependency_manager.DependencyManager._LocalPath')
def testFetchPathLocalFile(self, local_path_mock, cs_path_mock, dep_info_mock,
sb_find_path_mock, path_mock):
dep_manager = dependency_manager.DependencyManager([])
self.assertFalse(local_path_mock.call_args)
self.assertFalse(cs_path_mock.call_args)
self.assertFalse(sb_find_path_mock.call_args)
sb_path = 'sb_path'
local_path = 'local_path'
cs_path = 'cs_path'
dep_info = 'dep_info'
local_path_mock.return_value = local_path
cs_path_mock.return_value = cs_path
sb_find_path_mock.return_value = sb_path
dep_info_mock.return_value = dep_info
# Non-empty lookup dict that contains the dependency we're looking for.
# Local path exists.
dep_manager._lookup_dict = {'dep1': mock.MagicMock(),
'dep2': mock.MagicMock()}
path_mock.exists.return_value = True
found_path = dep_manager.FetchPath('dep1', 'plat')
self.assertEqual(local_path, found_path)
local_path_mock.assert_called_with('dep_info')
self.assertFalse(cs_path_mock.call_args)
self.assertFalse(sb_find_path_mock.call_args)
# If the below assert fails, the ordering assumption that determined the
# path_mock return values is incorrect, and should be updated.
path_mock.exists.assert_called_once_with('local_path')
local_path_mock.reset_mock()
cs_path_mock.reset_mock()
sb_find_path_mock.reset_mock()
@mock.patch('os.path')
@mock.patch('catapult_base.support_binaries.FindPath')
@mock.patch(
'catapult_base.dependency_manager.DependencyManager._GetDependencyInfo')
@mock.patch(
'catapult_base.dependency_manager.DependencyManager._CloudStoragePath')
@mock.patch('catapult_base.dependency_manager.DependencyManager._LocalPath')
def testFetchPathRemoteFile(self, local_path_mock, cs_path_mock,
dep_info_mock, sb_find_path_mock, path_mock):
dep_manager = dependency_manager.DependencyManager([])
self.assertFalse(local_path_mock.call_args)
self.assertFalse(cs_path_mock.call_args)
self.assertFalse(sb_find_path_mock.call_args)
local_path = 'local_path'
cs_path = 'cs_path'
dep_info = 'dep_info'
cs_path_mock.return_value = cs_path
dep_info_mock.return_value = dep_info
# Non-empty lookup dict that contains the dependency we're looking for.
# Local path doesn't exist, but cloud_storage_path is downloaded.
dep_manager._lookup_dict = {'dep1': mock.MagicMock(),
'dep2': mock.MagicMock()}
path_mock.exists.side_effect = [False, True]
local_path_mock.return_value = local_path
found_path = dep_manager.FetchPath('dep1', 'plat')
self.assertEqual(cs_path, found_path)
local_path_mock.assert_called_with(dep_info)
cs_path_mock.assert_called_once_with(dep_info)
self.assertFalse(sb_find_path_mock.call_args)
# If the below assert fails, the ordering assumption that determined the
# path_mock return values is incorrect, and should be updated.
path_mock.exists.assert_has_calls([mock.call(local_path),
mock.call(cs_path)], any_order=False)
local_path_mock.reset_mock()
cs_path_mock.reset_mock()
sb_find_path_mock.reset_mock()
# Non-empty lookup dict that contains the dependency we're looking for.
# Local path isn't found, but cloud_storage_path is downloaded.
dep_manager._lookup_dict = {'dep1': mock.MagicMock(),
'dep2': mock.MagicMock()}
path_mock.exists.side_effect = [True]
local_path_mock.return_value = None
found_path = dep_manager.FetchPath('dep1', 'plat')
self.assertEqual(cs_path, found_path)
local_path_mock.assert_called_with(dep_info)
cs_path_mock.assert_called_once_with(dep_info)
self.assertFalse(sb_find_path_mock.call_args)
# If the below assert fails, the ordering assumption that determined the
# path_mock return values is incorrect, and should be updated.
path_mock.exists.assert_has_calls([mock.call(local_path),
mock.call(cs_path)], any_order=False)
@mock.patch('os.path')
@mock.patch('catapult_base.support_binaries.FindPath')
@mock.patch(
'catapult_base.dependency_manager.DependencyManager._GetDependencyInfo')
@mock.patch(
'catapult_base.dependency_manager.DependencyManager._CloudStoragePath')
@mock.patch('catapult_base.dependency_manager.DependencyManager._LocalPath')
def testFetchPathError(self, local_path_mock, cs_path_mock, dep_info_mock,
sb_find_path_mock, path_mock):
dep_manager = dependency_manager.DependencyManager([])
self.assertFalse(local_path_mock.call_args)
self.assertFalse(cs_path_mock.call_args)
self.assertFalse(sb_find_path_mock.call_args)
local_path_mock.return_value = None
cs_path_mock.return_value = None
dep_manager._lookup_dict = {'dep1': mock.MagicMock(),
'dep2': mock.MagicMock()}
# Non-empty lookup dict that contains the dependency we're looking for.
# Local path doesn't exist, and cloud_storage path wasn't successfully
# found.
self.assertRaises(exceptions.NoPathFoundError,
dep_manager.FetchPath, 'dep1', 'plat')
cs_path_mock.side_effect = cloud_storage.CredentialsError
self.assertRaises(cloud_storage.CredentialsError,
dep_manager.FetchPath, 'dep1', 'plat')
cs_path_mock.side_effect = cloud_storage.CloudStorageError
self.assertRaises(cloud_storage.CloudStorageError,
dep_manager.FetchPath, 'dep1', 'plat')
cs_path_mock.side_effect = cloud_storage.PermissionError
self.assertRaises(cloud_storage.PermissionError,
dep_manager.FetchPath, 'dep1', 'plat')
@mock.patch('os.path')
@mock.patch('catapult_base.support_binaries.FindLocallyBuiltPath')
@mock.patch(
'catapult_base.dependency_manager.DependencyManager._GetDependencyInfo')
@mock.patch('catapult_base.dependency_manager.DependencyManager._LocalPath')
def testLocalPath(self, local_path_mock, dep_info_mock, sb_find_path_mock,
path_mock):
dep_manager = dependency_manager.DependencyManager([])
self.assertFalse(local_path_mock.call_args)
self.assertFalse(sb_find_path_mock.call_args)
sb_path = 'sb_path'
local_path = 'local_path'
dep_info = 'dep_info'
local_path_mock.return_value = local_path
sb_find_path_mock.return_value = sb_path
dep_info_mock.return_value = dep_info
# Empty lookup_dict
found_path = dep_manager.LocalPath('dep', 'plat')
self.assertEqual(sb_path, found_path)
self.assertFalse(local_path_mock.call_args)
sb_find_path_mock.assert_called_once_with('dep')
local_path_mock.reset_mock()
sb_find_path_mock.reset_mock()
# Non-empty lookup dict that doesn't contain the dependency we're looking
# for.
dep_manager._lookup_dict = {'dep1': mock.MagicMock(),
'dep2': mock.MagicMock()}
found_path = dep_manager.LocalPath('dep', 'plat')
self.assertEqual(sb_path, found_path)
self.assertFalse(local_path_mock.call_args)
sb_find_path_mock.assert_called_once_with('dep')
local_path_mock.reset_mock()
sb_find_path_mock.reset_mock()
# Non-empty lookup dict that contains the dependency we're looking for.
# Local path exists.
dep_manager._lookup_dict = {'dep1': mock.MagicMock(),
'dep2': mock.MagicMock()}
path_mock.exists.return_value = True
found_path = dep_manager.LocalPath('dep1', 'plat')
self.assertEqual(local_path, found_path)
local_path_mock.assert_called_with('dep_info')
self.assertFalse(sb_find_path_mock.call_args)
# If the below assert fails, the ordering assumption that determined the
# path_mock return values is incorrect, and should be updated.
path_mock.exists.assert_called_once_with('local_path')
local_path_mock.reset_mock()
sb_find_path_mock.reset_mock()
# Non-empty lookup dict that contains the dependency we're looking for.
# Local path is found but doesn't exist.
dep_manager._lookup_dict = {'dep1': mock.MagicMock(),
'dep2': mock.MagicMock()}
path_mock.exists.return_value = False
local_path_mock.return_value = local_path
self.assertRaises(exceptions.NoPathFoundError,
dep_manager.LocalPath, 'dep1', 'plat')
# Non-empty lookup dict that contains the dependency we're looking for.
# Local path isn't found.
dep_manager._lookup_dict = {'dep1': mock.MagicMock(),
'dep2': mock.MagicMock()}
local_path_mock.return_value = None
self.assertRaises(exceptions.NoPathFoundError,
dep_manager.LocalPath, 'dep1', 'plat')
def testInitialUpdateDependencies(self):
dep_manager = dependency_manager.DependencyManager([])
# Empty BaseConfig.
dep_manager._lookup_dict = {}
base_config_mock = mock.MagicMock(spec=dependency_manager.BaseConfig)
base_config_mock.IterDependencyInfo.return_value = iter([])
dep_manager._UpdateDependencies(base_config_mock)
self.assertFalse(dep_manager._lookup_dict)
# One dependency/platform in a BaseConfig.
dep_manager._lookup_dict = {}
base_config_mock = mock.MagicMock(spec=dependency_manager.BaseConfig)
dep_info = mock.MagicMock(spec=dependency_manager.DependencyInfo)
dep = 'dependency'
plat = 'platform'
dep_info.dependency = dep
dep_info.platform = plat
base_config_mock.IterDependencyInfo.return_value = iter([dep_info])
expected_lookup_dict = {dep: {plat: dep_info}}
dep_manager._UpdateDependencies(base_config_mock)
self.assertEqual(expected_lookup_dict, dep_manager._lookup_dict)
self.assertFalse(dep_info.Update.called)
# One dependency multiple platforms in a BaseConfig.
dep_manager._lookup_dict = {}
base_config_mock = mock.MagicMock(spec=dependency_manager.BaseConfig)
dep = 'dependency'
plat1 = 'platform1'
plat2 = 'platform2'
dep_info1 = mock.MagicMock(spec=dependency_manager.DependencyInfo)
dep_info1.dependency = dep
dep_info1.platform = plat1
dep_info2 = mock.MagicMock(spec=dependency_manager.DependencyInfo)
dep_info2.dependency = dep
dep_info2.platform = plat2
base_config_mock.IterDependencyInfo.return_value = iter([dep_info1,
dep_info2])
expected_lookup_dict = {dep: {plat1: dep_info1,
plat2: dep_info2}}
dep_manager._UpdateDependencies(base_config_mock)
self.assertEqual(expected_lookup_dict, dep_manager._lookup_dict)
self.assertFalse(dep_info1.Update.called)
self.assertFalse(dep_info2.Update.called)
# Multiple dependencies, multiple platforms in a BaseConfig.
dep_manager._lookup_dict = {}
base_config_mock = mock.MagicMock(spec=dependency_manager.BaseConfig)
dep1 = 'dependency1'
dep2 = 'dependency2'
plat1 = 'platform1'
plat2 = 'platform2'
dep_info1 = mock.MagicMock(spec=dependency_manager.DependencyInfo)
dep_info1.dependency = dep1
dep_info1.platform = plat1
dep_info2 = mock.MagicMock(spec=dependency_manager.DependencyInfo)
dep_info2.dependency = dep1
dep_info2.platform = plat2
dep_info3 = mock.MagicMock(spec=dependency_manager.DependencyInfo)
dep_info3.dependency = dep2
dep_info3.platform = plat2
base_config_mock.IterDependencyInfo.return_value = iter(
[dep_info1, dep_info2, dep_info3])
expected_lookup_dict = {dep1: {plat1: dep_info1,
plat2: dep_info2},
dep2: {plat2: dep_info3}}
dep_manager._UpdateDependencies(base_config_mock)
self.assertEqual(expected_lookup_dict, dep_manager._lookup_dict)
self.assertFalse(dep_info1.Update.called)
self.assertFalse(dep_info2.Update.called)
self.assertFalse(dep_info3.Update.called)
def testFollowupUpdateDependenciesNoOverlap(self):
dep_manager = dependency_manager.DependencyManager([])
dep = 'dependency'
dep1 = 'dependency1'
dep2 = 'dependency2'
dep3 = 'dependency3'
plat1 = 'platform1'
plat2 = 'platform2'
plat3 = 'platform3'
dep_info_a = mock.MagicMock(spec=dependency_manager.DependencyInfo)
dep_info_a.dependency = dep1
dep_info_a.platform = plat1
dep_info_b = mock.MagicMock(spec=dependency_manager.DependencyInfo)
dep_info_b.dependency = dep1
dep_info_b.platform = plat2
dep_info_c = mock.MagicMock(spec=dependency_manager.DependencyInfo)
dep_info_c.dependency = dep
dep_info_c.platform = plat1
start_lookup_dict = {dep: {plat1: dep_info_a,
plat2: dep_info_b},
dep1: {plat1: dep_info_c}}
base_config_mock = mock.MagicMock(spec=dependency_manager.BaseConfig)
# Empty BaseConfig.
dep_manager._lookup_dict = start_lookup_dict.copy()
base_config_mock.IterDependencyInfo.return_value = iter([])
dep_manager._UpdateDependencies(base_config_mock)
self.assertEqual(start_lookup_dict, dep_manager._lookup_dict)
# One dependency/platform in a BaseConfig.
dep_manager._lookup_dict = start_lookup_dict.copy()
dep_info = mock.MagicMock(spec=dependency_manager.DependencyInfo)
dep_info.dependency = dep3
dep_info.platform = plat1
base_config_mock.IterDependencyInfo.return_value = iter([dep_info])
expected_lookup_dict = {dep: {plat1: dep_info_a,
plat2: dep_info_b},
dep1: {plat1: dep_info_c},
dep3: {plat3: dep_info}}
dep_manager._UpdateDependencies(base_config_mock)
self.assertItemsEqual(expected_lookup_dict, dep_manager._lookup_dict)
self.assertFalse(dep_info.Update.called)
self.assertFalse(dep_info_a.Update.called)
self.assertFalse(dep_info_b.Update.called)
self.assertFalse(dep_info_c.Update.called)
# One dependency multiple platforms in a BaseConfig.
dep_manager._lookup_dict = start_lookup_dict.copy()
dep_info1 = mock.MagicMock(spec=dependency_manager.DependencyInfo)
dep_info1.dependency = dep2
dep_info1.platform = plat1
dep_info2 = mock.MagicMock(spec=dependency_manager.DependencyInfo)
dep_info2.dependency = dep2
dep_info2.platform = plat2
base_config_mock.IterDependencyInfo.return_value = iter([dep_info1,
dep_info2])
expected_lookup_dict = {dep: {plat1: dep_info_a,
plat2: dep_info_b},
dep1: {plat1: dep_info_c},
dep2: {plat1: dep_info1,
plat2: dep_info2}}
dep_manager._UpdateDependencies(base_config_mock)
self.assertEqual(expected_lookup_dict, dep_manager._lookup_dict)
self.assertFalse(dep_info1.Update.called)
self.assertFalse(dep_info2.Update.called)
self.assertFalse(dep_info_a.Update.called)
self.assertFalse(dep_info_b.Update.called)
self.assertFalse(dep_info_c.Update.called)
# Multiple dependencies, multiple platforms in a BaseConfig.
dep_manager._lookup_dict = start_lookup_dict.copy()
dep1 = 'dependency1'
plat1 = 'platform1'
plat2 = 'platform2'
dep_info1 = mock.MagicMock(spec=dependency_manager.DependencyInfo)
dep_info1.dependency = dep2
dep_info1.platform = plat1
dep_info2 = mock.MagicMock(spec=dependency_manager.DependencyInfo)
dep_info2.dependency = dep2
dep_info2.platform = plat2
dep_info3 = mock.MagicMock(spec=dependency_manager.DependencyInfo)
dep_info3.dependency = dep3
dep_info3.platform = plat2
base_config_mock.IterDependencyInfo.return_value = iter(
[dep_info1, dep_info2, dep_info3])
expected_lookup_dict = {dep: {plat1: dep_info_a,
plat2: dep_info_b},
dep1: {plat1: dep_info_c},
dep2: {plat1: dep_info1,
plat2: dep_info2},
dep3: {plat2: dep_info3}}
dep_manager._UpdateDependencies(base_config_mock)
self.assertEqual(expected_lookup_dict, dep_manager._lookup_dict)
self.assertFalse(dep_info1.Update.called)
self.assertFalse(dep_info2.Update.called)
self.assertFalse(dep_info3.Update.called)
self.assertFalse(dep_info_a.Update.called)
self.assertFalse(dep_info_b.Update.called)
self.assertFalse(dep_info_c.Update.called)
# Ensure the testing data wasn't corrupted.
self.assertEqual(start_lookup_dict,
{dep: {plat1: dep_info_a,
plat2: dep_info_b},
dep1: {plat1: dep_info_c}})
def testFollowupUpdateDependenciesWithCollisions(self):
dep_manager = dependency_manager.DependencyManager([])
dep = 'dependency'
dep1 = 'dependency1'
dep2 = 'dependency2'
plat1 = 'platform1'
plat2 = 'platform2'
dep_info_a = mock.MagicMock(spec=dependency_manager.DependencyInfo)
dep_info_a.dependency = dep1
dep_info_a.platform = plat1
dep_info_b = mock.MagicMock(spec=dependency_manager.DependencyInfo)
dep_info_b.dependency = dep1
dep_info_b.platform = plat2
dep_info_c = mock.MagicMock(spec=dependency_manager.DependencyInfo)
dep_info_c.dependency = dep
dep_info_c.platform = plat1
start_lookup_dict = {dep: {plat1: dep_info_a,
plat2: dep_info_b},
dep1: {plat1: dep_info_c}}
base_config_mock = mock.MagicMock(spec=dependency_manager.BaseConfig)
# One dependency/platform.
dep_manager._lookup_dict = start_lookup_dict.copy()
dep_info = mock.MagicMock(spec=dependency_manager.DependencyInfo)
dep_info.dependency = dep
dep_info.platform = plat1
base_config_mock.IterDependencyInfo.return_value = iter([dep_info])
expected_lookup_dict = {dep: {plat1: dep_info_a,
plat2: dep_info_b},
dep1: {plat1: dep_info_c}}
dep_manager._UpdateDependencies(base_config_mock)
self.assertItemsEqual(expected_lookup_dict, dep_manager._lookup_dict)
dep_info_a.Update.assert_called_once_with(dep_info)
self.assertFalse(dep_info.Update.called)
self.assertFalse(dep_info_b.Update.called)
self.assertFalse(dep_info_c.Update.called)
dep_info_a.reset_mock()
dep_info_b.reset_mock()
dep_info_c.reset_mock()
# One dependency multiple platforms in a BaseConfig.
dep_manager._lookup_dict = start_lookup_dict.copy()
dep_info1 = mock.MagicMock(spec=dependency_manager.DependencyInfo)
dep_info1.dependency = dep1
dep_info1.platform = plat1
dep_info2 = mock.MagicMock(spec=dependency_manager.DependencyInfo)
dep_info2.dependency = dep2
dep_info2.platform = plat2
base_config_mock.IterDependencyInfo.return_value = iter([dep_info1,
dep_info2])
expected_lookup_dict = {dep: {plat1: dep_info_a,
plat2: dep_info_b},
dep1: {plat1: dep_info_c},
dep2: {plat2: dep_info2}}
dep_manager._UpdateDependencies(base_config_mock)
self.assertEqual(expected_lookup_dict, dep_manager._lookup_dict)
self.assertFalse(dep_info1.Update.called)
self.assertFalse(dep_info2.Update.called)
self.assertFalse(dep_info_a.Update.called)
self.assertFalse(dep_info_b.Update.called)
dep_info_c.Update.assert_called_once_with(dep_info1)
dep_info_a.reset_mock()
dep_info_b.reset_mock()
dep_info_c.reset_mock()
# Multiple dependencies, multiple platforms in a BaseConfig.
dep_manager._lookup_dict = start_lookup_dict.copy()
dep1 = 'dependency1'
plat1 = 'platform1'
plat2 = 'platform2'
dep_info1 = mock.MagicMock(spec=dependency_manager.DependencyInfo)
dep_info1.dependency = dep
dep_info1.platform = plat1
dep_info2 = mock.MagicMock(spec=dependency_manager.DependencyInfo)
dep_info2.dependency = dep1
dep_info2.platform = plat1
dep_info3 = mock.MagicMock(spec=dependency_manager.DependencyInfo)
dep_info3.dependency = dep2
dep_info3.platform = plat2
base_config_mock.IterDependencyInfo.return_value = iter(
[dep_info1, dep_info2, dep_info3])
expected_lookup_dict = {dep: {plat1: dep_info_a,
plat2: dep_info_b},
dep1: {plat1: dep_info_c},
dep2: {plat2: dep_info3}}
dep_manager._UpdateDependencies(base_config_mock)
self.assertEqual(expected_lookup_dict, dep_manager._lookup_dict)
self.assertFalse(dep_info1.Update.called)
self.assertFalse(dep_info2.Update.called)
self.assertFalse(dep_info3.Update.called)
self.assertFalse(dep_info_b.Update.called)
dep_info_a.Update.assert_called_once_with(dep_info1)
dep_info_c.Update.assert_called_once_with(dep_info2)
# Collision error.
dep_manager._lookup_dict = start_lookup_dict.copy()
dep_info = mock.MagicMock(spec=dependency_manager.DependencyInfo)
dep_info.dependency = dep
dep_info.platform = plat1
base_config_mock.IterDependencyInfo.return_value = iter([dep_info])
dep_info_a.Update.side_effect = ValueError
self.assertRaises(ValueError,
dep_manager._UpdateDependencies, base_config_mock)
# Ensure the testing data wasn't corrupted.
self.assertEqual(start_lookup_dict,
{dep: {plat1: dep_info_a,
plat2: dep_info_b},
dep1: {plat1: dep_info_c}})
def testGetDependencyInfo(self):
dep_manager = dependency_manager.DependencyManager([])
self.assertFalse(dep_manager._lookup_dict)
# No dependencies in the dependency manager.
self.assertEqual(None, dep_manager._GetDependencyInfo('missing_dep',
'missing_plat'))
dep_manager._lookup_dict = {'dep1': {'plat1': 'dep_info11',
'plat2': 'dep_info12',
'plat3': 'dep_info13'},
'dep2': {'plat1': 'dep_info11',
'plat2': 'dep_info21',
'plat3': 'dep_info23',
'default': 'dep_info2d'},
'dep3': {'plat1': 'dep_info31',
'plat2': 'dep_info32',
'default': 'dep_info3d'}}
# Dependency not in the dependency manager.
self.assertEqual(None, dep_manager._GetDependencyInfo(
'missing_dep', 'missing_plat'))
# Dependency in the dependency manager, but not the platform. No default.
self.assertEqual(None, dep_manager._GetDependencyInfo(
'dep1', 'missing_plat'))
# Dependency in the dependency manager, but not the platform, but a default
# exists.
self.assertEqual('dep_info2d', dep_manager._GetDependencyInfo(
'dep2', 'missing_plat'))
# Dependency and platform in the dependency manager. A default exists.
self.assertEqual('dep_info23', dep_manager._GetDependencyInfo(
'dep2', 'plat3'))
# Dependency and platform in the dependency manager. No default exists.
self.assertEqual('dep_info12', dep_manager._GetDependencyInfo(
'dep1', 'plat2'))
@mock.patch('os.path.exists')
def testLocalPathHelper(self, exists_mock):
dep_info = mock.MagicMock(spec=dependency_manager.DependencyInfo)
# There is no local path for the given dependency.
dep_info.local_paths = {}
self.assertEqual(None,
dependency_manager.DependencyManager._LocalPath(dep_info))
# There is a local path for the given dependency, but it doesn't exist.
exists_mock.side_effect = [False]
dep_info.local_paths = {'local_path0'}
self.assertEqual(None,
dependency_manager.DependencyManager._LocalPath(dep_info))
exists_mock.assert_called_once_with('local_path0')
exists_mock.reset_mock()
# There is a local path for the given dependency, and it does exist.
exists_mock.side_effect = [True]
dep_info.local_paths = {'local_path0'}
self.assertEqual('local_path0',
dependency_manager.DependencyManager._LocalPath(dep_info))
exists_mock.assert_called_once_with('local_path0')
exists_mock.reset_mock()
# There are multiple local paths for the given dependency, and the first one
# exists.
exists_mock.side_effect = [True]
dep_info.local_paths = {'local_path0', 'local_path1', 'local_path2'}
self.assertEqual('local_path0',
dependency_manager.DependencyManager._LocalPath(dep_info))
exists_mock.assert_called_once_with('local_path0')
exists_mock.reset_mock()
# There are multiple local paths for the given dependency, and the first one
# doesn't exist but the second one does.
exists_mock.side_effect = [False, True]
dep_info.local_paths = {'local_path0', 'local_path1', 'local_path2'}
self.assertEqual('local_path1',
dependency_manager.DependencyManager._LocalPath(dep_info))
expected_calls = [mock.call('local_path0'), mock.call('local_path1')]
exists_mock.assert_has_calls(expected_calls, any_order=False)
exists_mock.reset_mock()
# There are multiple local paths for the given dependency, and the first and
# second ones don't exist but the third one does.
exists_mock.side_effect = [False, False, True]
dep_info.local_paths = {'local_path0', 'local_path1', 'local_path2'}
self.assertEqual('local_path2',
dependency_manager.DependencyManager._LocalPath(dep_info))
expected_calls = [mock.call('local_path0'), mock.call('local_path1'),
mock.call('local_path2')]
exists_mock.assert_has_calls(expected_calls, any_order=False)
exists_mock.reset_mock()
# There are multiple local paths for the given dependency, but none of them
# exist.
exists_mock.side_effect = [False, False, False]
dep_info.local_paths = {'local_path0', 'local_path1', 'local_path2'}
self.assertEqual(None,
dependency_manager.DependencyManager._LocalPath(dep_info))
expected_calls = [mock.call('local_path0'), mock.call('local_path1'),
mock.call('local_path2')]
exists_mock.assert_has_calls(expected_calls, any_order=False)
exists_mock.reset_mock()
@mock.patch('os.path.exists')
@mock.patch('os.chmod')
@mock.patch(
'catapult_base.cloud_storage.GetIfHashChanged')
def testCloudStoragePathMissingData(
self, cs_get_mock, chmod_mock, exists_mock):
dep_info = mock.MagicMock(spec=dependency_manager.DependencyInfo)
cs_remote_path = 'cs_remote_path'
cs_hash = 'cs_hash'
cs_bucket = 'cs_bucket'
download_path = 'download_path'
# No dependency info.
self.assertEqual(
None, dependency_manager.DependencyManager._CloudStoragePath(None))
# There is no cloud_storage information for the dependency.
dep_info.cs_remote_path = None
dep_info.cs_hash = None
dep_info.cs_bucket = None
dep_info.download_path = None
self.assertEqual(
None, dependency_manager.DependencyManager._CloudStoragePath(dep_info))
# There is no cloud_storage remote_path the dependency.
dep_info.cs_remote_path = None
dep_info.cs_hash = cs_hash
dep_info.cs_bucket = cs_bucket
dep_info.download_path = download_path
self.assertEqual(
None, dependency_manager.DependencyManager._CloudStoragePath(dep_info))
# There is no cloud_storage hash for the dependency.
dep_info.cs_remote_path = cs_remote_path
dep_info.cs_hash = None
dep_info.cs_bucket = cs_bucket
dep_info.download_path = download_path
self.assertEqual(
None, dependency_manager.DependencyManager._CloudStoragePath(dep_info))
# There is no cloud_storage bucket for the dependency.
dep_info.cs_remote_path = cs_remote_path
dep_info.cs_hash = cs_hash
dep_info.cs_bucket = None
dep_info.download_path = download_path
self.assertEqual(
None, dependency_manager.DependencyManager._CloudStoragePath(dep_info))
# There is no download_path for the dependency.
dep_info.cs_remote_path = cs_remote_path
dep_info.cs_hash = cs_hash
dep_info.cs_bucket = cs_bucket
dep_info.download_path = None
self.assertEqual(
None, dependency_manager.DependencyManager._CloudStoragePath(dep_info))
@mock.patch('os.path.exists')
@mock.patch('os.chmod')
@mock.patch(
'catapult_base.cloud_storage.GetIfHashChanged')
def testCloudStoragePath(self, cs_get_mock, chmod_mock, exists_mock):
dep_info = mock.MagicMock(spec=dependency_manager.DependencyInfo)
cs_remote_path = 'cs_remote_path'
cs_hash = 'cs_hash'
cs_bucket = 'cs_bucket'
download_path = 'download_path'
# All of the needed information is given, and the downloaded path exists
# after calling cloud storage.
dep_info.cs_remote_path = cs_remote_path
dep_info.cs_hash = cs_hash
dep_info.cs_bucket = cs_bucket
dep_info.download_path = download_path
exists_mock.return_value = True
self.assertEqual(
os.path.abspath(download_path),
dependency_manager.DependencyManager._CloudStoragePath(dep_info))
chmod_mock.assert_called_once_with(
download_path,
stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRGRP)
# All of the needed information is given, but the downloaded path doesn't
# exists after calling cloud storage.
dep_info.cs_remote_path = cs_remote_path
dep_info.cs_hash = cs_hash
dep_info.cs_bucket = cs_bucket
dep_info.download_path = download_path
exists_mock.return_value = False
with mock.patch(
'catapult_base.dependency_manager.dependency_manager.os.makedirs'):
self.assertRaises(
exceptions.FileNotFoundError,
dependency_manager.DependencyManager._CloudStoragePath, dep_info)
exists_mock.assert_called_with(download_path)
@mock.patch('os.path.exists')
@mock.patch(
'catapult_base.cloud_storage.GetIfHashChanged')
def testCloudStoragePathCloudStorageErrors(self, cs_get_mock, exists_mock):
dep_info = mock.MagicMock(spec=dependency_manager.DependencyInfo)
dep_info.download_path = 'download_path'
cs_get_mock.side_effect = cloud_storage.CloudStorageError
self.assertRaises(
cloud_storage.CloudStorageError,
dependency_manager.DependencyManager._CloudStoragePath, dep_info)
cs_get_mock.side_effect = cloud_storage.ServerError
self.assertRaises(
cloud_storage.ServerError,
dependency_manager.DependencyManager._CloudStoragePath, dep_info)
cs_get_mock.side_effect = cloud_storage.NotFoundError
self.assertRaises(
cloud_storage.NotFoundError,
dependency_manager.DependencyManager._CloudStoragePath, dep_info)
cs_get_mock.side_effect = cloud_storage.PermissionError
self.assertRaises(
cloud_storage.PermissionError,
dependency_manager.DependencyManager._CloudStoragePath, dep_info)
cs_get_mock.side_effect = cloud_storage.CredentialsError
self.assertRaises(
cloud_storage.CredentialsError,
dependency_manager.DependencyManager._CloudStoragePath, dep_info)
| |
from crowdsourcing import models
from datetime import datetime
from rest_framework import serializers
from crowdsourcing.serializers.dynamic import DynamicFieldsModelSerializer
import json
from crowdsourcing.serializers.template import TemplateSerializer
from crowdsourcing.serializers.task import TaskSerializer, TaskCommentSerializer
from rest_framework.exceptions import ValidationError
from crowdsourcing.serializers.requester import RequesterSerializer
from django.utils import timezone
from crowdsourcing.serializers.message import CommentSerializer
from django.db.models import F, Count, Q
from crowdsourcing.utils import get_model_or_none
class CategorySerializer(DynamicFieldsModelSerializer):
class Meta:
model = models.Category
fields = ('id', 'name', 'parent')
def update(self, instance, validated_data):
instance.name = validated_data.get('name', instance.name)
instance.parent = validated_data.get('parent', instance.parent)
instance.save()
return instance
def delete(self, instance):
instance.deleted = True
instance.save()
return instance
class ModuleSerializer(DynamicFieldsModelSerializer):
deleted = serializers.BooleanField(read_only=True)
template = TemplateSerializer(many=True)
total_tasks = serializers.SerializerMethodField()
file_id = serializers.IntegerField(write_only=True, allow_null=True)
age = serializers.SerializerMethodField()
has_comments = serializers.SerializerMethodField()
available_tasks = serializers.SerializerMethodField()
comments = serializers.SerializerMethodField()
# comments = TaskCommentSerializer(many=True, source='module_tasks__task_workers__taskcomment_task', read_only=True)
class Meta:
model = models.Module
fields = ('id', 'name', 'owner', 'project', 'description', 'status', 'repetition', 'module_timeout',
'deleted', 'template', 'created_timestamp', 'last_updated', 'price', 'has_data_set',
'data_set_location', 'total_tasks', 'file_id', 'age', 'is_micro', 'is_prototype', 'task_time',
'allow_feedback', 'feedback_permissions', 'min_rating', 'has_comments', 'available_tasks', 'comments')
read_only_fields = (
'created_timestamp', 'last_updated', 'deleted', 'owner', 'has_comments', 'available_tasks', 'comments')
def create(self, **kwargs):
templates = self.validated_data.pop('template')
project = self.validated_data.pop('project')
file_id = self.validated_data.pop('file_id')
csv_data = []
if file_id is not None:
uploaded_file = models.RequesterInputFile.objects.get(id=file_id)
csv_data = uploaded_file.parse_csv()
uploaded_file.delete()
# module_tasks = self.validated_data.pop('module_tasks')
module = models.Module.objects.create(deleted=False, project=project,
owner=kwargs['owner'].requester, **self.validated_data)
for template in templates:
template_items = template.pop('template_items')
t = models.Template.objects.get_or_create(owner=kwargs['owner'], **template)
models.ModuleTemplate.objects.get_or_create(module=module, template=t[0])
for item in template_items:
models.TemplateItem.objects.get_or_create(template=t[0], **item)
if module.has_data_set:
for row in csv_data:
task = {
'module': module.id,
'data': json.dumps(row)
}
task_serializer = TaskSerializer(data=task)
if task_serializer.is_valid():
task_serializer.create(**kwargs)
else:
raise ValidationError(task_serializer.errors)
else:
task = {
'module': module.id,
'data': "{\"type\": \"static\"}"
}
task_serializer = TaskSerializer(data=task)
if task_serializer.is_valid():
task_serializer.create(**kwargs)
else:
raise ValidationError(task_serializer.errors)
return module
def delete(self, instance):
instance.deleted = True
instance.save()
return instance
def get_age(self, model):
from crowdsourcing.utils import get_time_delta
delta = get_time_delta(model.created_timestamp)
return "Posted " + delta
def get_total_tasks(self, obj):
return obj.module_tasks.all().count()
def get_has_comments(self, obj):
return obj.modulecomment_module.count() > 0
def get_available_tasks(self, obj):
available_task_count = models.Module.objects.values('id').raw('''
select count(*) id from (
SELECT
"crowdsourcing_task"."id"
FROM "crowdsourcing_task"
INNER JOIN "crowdsourcing_module" ON ("crowdsourcing_task"."module_id" = "crowdsourcing_module"."id")
LEFT OUTER JOIN "crowdsourcing_taskworker" ON ("crowdsourcing_task"."id" =
"crowdsourcing_taskworker"."task_id" and task_status not in (4,6))
WHERE ("crowdsourcing_task"."module_id" = %s AND NOT (
("crowdsourcing_task"."id" IN (SELECT U1."task_id" AS Col1
FROM "crowdsourcing_taskworker" U1 WHERE U1."worker_id" = %s and U1.task_status<>6))))
GROUP BY "crowdsourcing_task"."id", "crowdsourcing_module"."repetition"
HAVING "crowdsourcing_module"."repetition" > (COUNT("crowdsourcing_taskworker"."id"))) available_tasks
''', params=[obj.id, self.context['request'].user.userprofile.worker.id])[0].id
return available_task_count
def get_comments(self, obj):
if obj:
comments = []
tasks = obj.module_tasks.all()
for task in tasks:
task_comments = task.taskcomment_task.all()
for task_comment in task_comments:
comments.append(task_comment)
serializer = TaskCommentSerializer(many=True, instance=comments, read_only=True)
return serializer.data
return []
class ProjectSerializer(DynamicFieldsModelSerializer):
deleted = serializers.BooleanField(read_only=True)
categories = serializers.PrimaryKeyRelatedField(queryset=models.Category.objects.all(), many=True)
owner = RequesterSerializer(read_only=True)
module_count = serializers.SerializerMethodField()
modules = ModuleSerializer(many=True, fields=('id', 'name', 'description', 'status', 'repetition', 'module_timeout',
'price', 'template', 'total_tasks', 'file_id', 'has_data_set', 'age',
'is_micro', 'is_prototype', 'task_time', 'has_comments',
'allow_feedback', 'feedback_permissions', 'available_tasks'))
modules_filtered = serializers.SerializerMethodField()
class Meta:
model = models.Project
fields = ('id', 'name', 'owner', 'description', 'deleted',
'categories', 'modules', 'module_count', 'modules_filtered')
def create(self, **kwargs):
categories = self.validated_data.pop('categories')
modules = self.validated_data.pop('modules')
project = models.Project.objects.create(owner=kwargs['owner'].requester, deleted=False, **self.validated_data)
for category in categories:
models.ProjectCategory.objects.create(project=project, category=category)
for module in modules:
module['project'] = project.id
module_serializer = ModuleSerializer(data=module)
if module_serializer.is_valid():
module_serializer.create(owner=kwargs['owner'])
else:
raise ValidationError(module_serializer.errors)
return project
def update(self, instance, validated_data):
instance.name = validated_data.get('name', instance.name)
instance.save()
return instance
def delete(self, instance):
instance.deleted = True
instance.save()
return instance
def get_module_count(self, obj):
return obj.modules.all().count()
class ProjectRequesterSerializer(serializers.ModelSerializer):
class Meta:
model = models.ProjectRequester
class ModuleReviewSerializer(serializers.ModelSerializer):
class Meta:
model = models.ModuleReview
fields = ('id', 'worker', 'annonymous', 'module', 'comments')
read_only_fields = ('last_updated')
class ModuleRatingSerializer(serializers.ModelSerializer):
class Meta:
model = models.ModuleRating
fields = ('id', 'worker', 'module', 'value')
read_only_fields = ('last_updated')
class WorkerModuleApplicationSerializer(serializers.ModelSerializer):
class Meta:
model = models.WorkerModuleApplication
class QualificationApplicationSerializer(serializers.ModelSerializer):
class Meta:
model = models.Qualification
class QualificationItemSerializer(serializers.ModelSerializer):
class Meta:
model = models.QualificationItem
class BookmarkedProjectsSerializer(serializers.ModelSerializer):
class Meta:
model = models.BookmarkedProjects
fields = ('id', 'project')
def create(self, **kwargs):
models.BookmarkedProjects.objects.get_or_create(profile=kwargs['profile'], **self.validated_data)
class ModuleCommentSerializer(DynamicFieldsModelSerializer):
comment = CommentSerializer()
class Meta:
model = models.ModuleComment
fields = ('id', 'module', 'comment')
read_only_fields = ('module',)
def create(self, **kwargs):
comment_data = self.validated_data.pop('comment')
comment_serializer = CommentSerializer(data=comment_data)
if comment_serializer.is_valid():
comment = comment_serializer.create(sender=kwargs['sender'])
module_comment = models.ModuleComment.objects.create(module_id=kwargs['module'], comment_id=comment.id)
return {'id': module_comment.id, 'comment': comment}
'''
class ModuleSerializer(DynamicFieldsModelSerializer):
avg_rating = serializers.SerializerMethodField()
num_reviews = serializers.SerializerMethodField()
num_raters = serializers.SerializerMethodField()
avg_pay = serializers.SerializerMethodField()
min_pay = serializers.SerializerMethodField()
completed_on = serializers.SerializerMethodField()
total_submissions = serializers.SerializerMethodField()
num_contributors = serializers.SerializerMethodField()
num_accepted = serializers.SerializerMethodField()
num_rejected = serializers.SerializerMethodField()
total_tasks = serializers.SerializerMethodField()
average_time = serializers.SerializerMethodField()
deleted = serializers.BooleanField(read_only=True)
categories = CategorySerializer(many=True,read_only=True,fields=('id','name'))
project = ProjectSerializer(many = False, read_only = True, fields=('id','name'))
def create(self, validated_data):
categories = validated_data.pop('categories')
module = models.Module.objects.create(deleted = False,**validated_data)
for c in categories:
models.ModuleCategory.objects.create(module=module, category=c)
return module
def update(self,instance,validated_data):
instance.name = validated_data.get('name', instance.name)
instance.keywords = validated_data.get('keywords', instance.keywords)
instance.description = validated_data.get('description', instance.description)
instance.price = validated_data.get('price',instance.price)
instance.repetition = validated_data.get('repetition',instance.repetition)
instance.module_timeout = validated_data.get('module_timeout',instance.module_timeout)
return instance
def delete(self, instance):
instance.deleted = True
instance.save()
return instance
def get_num_reviews(self,model):
return model.modulereview_set.count()
def get_num_raters(self,model):
return model.modulerating_set.count()
def get_avg_rating(self, model):
return model.modulerating_set.all().aggregate(avg=Avg('value')).get('avg') # should be updated automatically
def get_avg_pay(self, model):
return model.task_set.all().aggregate(avg=Avg('price')).get('avg')
def get_min_pay(self, model):
return model.task_set.all().aggregate(min=Min('price')).get('min') # should be updated automatically
def get_num_accepted(self, model):
return models.TaskWorkerResult.objects.all().filter(task_worker__task__module = model,status = 2).count()
def get_num_rejected(self, model):
return models.TaskWorkerResult.objects.all().filter(task_worker__task__module = model,status = 3).count()
def get_total_tasks(self, model):
return model.task_set.all().count()
def get_completed_on(self, model):
if model.task_set.all().exclude(status = 4).count()>0:
return "Not Comlpeted"
else:
return model.task_set.all().aggregate(date=Max('last_updated')).get('date').date()
def get_total_submissions(self, model):
return models.TaskWorkerResult.objects.all().filter(task_worker__task__module=model).count()
def get_num_contributors(self,model):
acceptedTaskWorker = models.TaskWorker.objects.all().filter(task__module = model,taskworkerresult__status = 2)
return acceptedTaskWorker.order_by('worker').distinct('worker').count()
def get_average_time(self,model):
taskworkers = models.TaskWorker.objects.all().filter(task__module = model)
time_spent = 0
count = 0
for taskworker in taskworkers:
init = taskworker.created_timestamp
maxend = taskworker.taskworkerresult_set.all().aggregate(max = Max('created_timestamp')).get('max')
if maxend != None:
time_spent = time_spent+(((maxend - init).total_seconds())/3600)
count = count + 1
return time_spent/count
class Meta:
model = models.Module
fields = ('id', 'name', 'owner', 'project', 'categories', 'description', 'keywords', 'status',
'repetition','module_timeout','deleted','created_timestamp','last_updated','avg_rating',
'num_reviews','completed_on','total_submissions','num_contributors','num_raters','min_pay','avg_pay','num_accepted','num_rejected','total_tasks','average_time')
read_only_fields = ('created_timestamp','last_updated')
'''
| |
# -*- coding: utf-8 -*-
"""
jinja2.loaders
~~~~~~~~~~~~~~
Jinja loader classes.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import os
import sys
import weakref
from types import ModuleType
from os import path
from hashlib import sha1
from jinja2.exceptions import TemplateNotFound
from jinja2.utils import open_if_exists, internalcode
from jinja2._compat import string_types, iteritems
def split_template_path(template):
"""Split a path into segments and perform a sanity check. If it detects
'..' in the path it will raise a `TemplateNotFound` error.
"""
pieces = []
for piece in template.split('/'):
if path.sep in piece \
or (path.altsep and path.altsep in piece) or \
piece == path.pardir:
raise TemplateNotFound(template)
elif piece and piece != '.':
pieces.append(piece)
return pieces
class BaseLoader(object):
"""Baseclass for all loaders. Subclass this and override `get_source` to
implement a custom loading mechanism. The environment provides a
`get_template` method that calls the loader's `load` method to get the
:class:`Template` object.
A very basic example for a loader that looks up templates on the file
system could look like this::
from jinja2 import BaseLoader, TemplateNotFound
from os.path import join, exists, getmtime
class MyLoader(BaseLoader):
def __init__(self, path):
self.path = path
def get_source(self, environment, template):
path = join(self.path, template)
if not exists(path):
raise TemplateNotFound(template)
mtime = getmtime(path)
with file(path) as f:
source = f.read().decode('utf-8')
return source, path, lambda: mtime == getmtime(path)
"""
#: if set to `False` it indicates that the loader cannot provide access
#: to the source of templates.
#:
#: .. versionadded:: 2.4
has_source_access = True
def get_source(self, environment, template):
"""Get the template source, filename and reload helper for a template.
It's passed the environment and template name and has to return a
tuple in the form ``(source, filename, uptodate)`` or raise a
`TemplateNotFound` error if it can't locate the template.
The source part of the returned tuple must be the source of the
template as unicode string or a ASCII bytestring. The filename should
be the name of the file on the filesystem if it was loaded from there,
otherwise `None`. The filename is used by python for the tracebacks
if no loader extension is used.
The last item in the tuple is the `uptodate` function. If auto
reloading is enabled it's always called to check if the template
changed. No arguments are passed so the function must store the
old state somewhere (for example in a closure). If it returns `False`
the template will be reloaded.
"""
if not self.has_source_access:
raise RuntimeError('%s cannot provide access to the source' %
self.__class__.__name__)
raise TemplateNotFound(template)
def list_templates(self):
"""Iterates over all templates. If the loader does not support that
it should raise a :exc:`TypeError` which is the default behavior.
"""
raise TypeError('this loader cannot iterate over all templates')
@internalcode
def load(self, environment, name, globals=None):
"""Loads a template. This method looks up the template in the cache
or loads one by calling :meth:`get_source`. Subclasses should not
override this method as loaders working on collections of other
loaders (such as :class:`PrefixLoader` or :class:`ChoiceLoader`)
will not call this method but `get_source` directly.
"""
code = None
if globals is None:
globals = {}
# first we try to get the source for this template together
# with the filename and the uptodate function.
source, filename, uptodate = self.get_source(environment, name)
# try to load the code from the bytecode cache if there is a
# bytecode cache configured.
bcc = environment.bytecode_cache
if bcc is not None:
bucket = bcc.get_bucket(environment, name, filename, source)
code = bucket.code
# if we don't have code so far (not cached, no longer up to
# date) etc. we compile the template
if code is None:
code = environment.compile(source, name, filename)
# if the bytecode cache is available and the bucket doesn't
# have a code so far, we give the bucket the new code and put
# it back to the bytecode cache.
if bcc is not None and bucket.code is None:
bucket.code = code
bcc.set_bucket(bucket)
return environment.template_class.from_code(environment, code,
globals, uptodate)
class FileSystemLoader(BaseLoader):
"""Loads templates from the file system. This loader can find templates
in folders on the file system and is the preferred way to load them.
The loader takes the path to the templates as string, or if multiple
locations are wanted a list of them which is then looked up in the
given order:
>>> loader = FileSystemLoader('/path/to/templates')
>>> loader = FileSystemLoader(['/path/to/templates', '/other/path'])
Per default the template encoding is ``'utf-8'`` which can be changed
by setting the `encoding` parameter to something else.
"""
def __init__(self, searchpath, encoding='utf-8'):
if isinstance(searchpath, string_types):
searchpath = [searchpath]
self.searchpath = list(searchpath)
self.encoding = encoding
def get_source(self, environment, template):
pieces = split_template_path(template)
for searchpath in self.searchpath:
filename = path.join(searchpath, *pieces)
f = open_if_exists(filename)
if f is None:
continue
try:
contents = f.read().decode(self.encoding)
finally:
f.close()
mtime = path.getmtime(filename)
def uptodate():
try:
return path.getmtime(filename) == mtime
except OSError:
return False
return contents, filename, uptodate
raise TemplateNotFound(template)
def list_templates(self):
found = set()
for searchpath in self.searchpath:
for dirpath, dirnames, filenames in os.walk(searchpath):
for filename in filenames:
template = os.path.join(dirpath, filename) \
[len(searchpath):].strip(os.path.sep) \
.replace(os.path.sep, '/')
if template[:2] == './':
template = template[2:]
if template not in found:
found.add(template)
return sorted(found)
class PackageLoader(BaseLoader):
"""Load templates from python eggs or packages. It is constructed with
the name of the python package and the path to the templates in that
package::
loader = PackageLoader('mypackage', 'views')
If the package path is not given, ``'templates'`` is assumed.
Per default the template encoding is ``'utf-8'`` which can be changed
by setting the `encoding` parameter to something else. Due to the nature
of eggs it's only possible to reload templates if the package was loaded
from the file system and not a zip file.
"""
def __init__(self, package_name, package_path='templates',
encoding='utf-8'):
from pkg_resources import DefaultProvider, ResourceManager, \
get_provider
provider = get_provider(package_name)
self.encoding = encoding
self.manager = ResourceManager()
self.filesystem_bound = isinstance(provider, DefaultProvider)
self.provider = provider
self.package_path = package_path
def get_source(self, environment, template):
pieces = split_template_path(template)
p = '/'.join((self.package_path,) + tuple(pieces))
if not self.provider.has_resource(p):
raise TemplateNotFound(template)
filename = uptodate = None
if self.filesystem_bound:
filename = self.provider.get_resource_filename(self.manager, p)
mtime = path.getmtime(filename)
def uptodate():
try:
return path.getmtime(filename) == mtime
except OSError:
return False
source = self.provider.get_resource_string(self.manager, p)
return source.decode(self.encoding), filename, uptodate
def list_templates(self):
path = self.package_path
if path[:2] == './':
path = path[2:]
elif path == '.':
path = ''
offset = len(path)
results = []
def _walk(path):
for filename in self.provider.resource_listdir(path):
fullname = path + '/' + filename
if self.provider.resource_isdir(fullname):
_walk(fullname)
else:
results.append(fullname[offset:].lstrip('/'))
_walk(path)
results.sort()
return results
class DictLoader(BaseLoader):
"""Loads a template from a python dict. It's passed a dict of unicode
strings bound to template names. This loader is useful for unittesting:
>>> loader = DictLoader({'index.html': 'source here'})
Because auto reloading is rarely useful this is disabled per default.
"""
def __init__(self, mapping):
self.mapping = mapping
def get_source(self, environment, template):
if template in self.mapping:
source = self.mapping[template]
return source, None, lambda: source == self.mapping.get(template)
raise TemplateNotFound(template)
def list_templates(self):
return sorted(self.mapping)
class FunctionLoader(BaseLoader):
"""A loader that is passed a function which does the loading. The
function becomes the name of the template passed and has to return either
an unicode string with the template source, a tuple in the form ``(source,
filename, uptodatefunc)`` or `None` if the template does not exist.
>>> def load_template(name):
... if name == 'index.html':
... return '...'
...
>>> loader = FunctionLoader(load_template)
The `uptodatefunc` is a function that is called if autoreload is enabled
and has to return `True` if the template is still up to date. For more
details have a look at :meth:`BaseLoader.get_source` which has the same
return value.
"""
def __init__(self, load_func):
self.load_func = load_func
def get_source(self, environment, template):
rv = self.load_func(template)
if rv is None:
raise TemplateNotFound(template)
elif isinstance(rv, string_types):
return rv, None, None
return rv
class PrefixLoader(BaseLoader):
"""A loader that is passed a dict of loaders where each loader is bound
to a prefix. The prefix is delimited from the template by a slash per
default, which can be changed by setting the `delimiter` argument to
something else::
loader = PrefixLoader({
'app1': PackageLoader('mypackage.app1'),
'app2': PackageLoader('mypackage.app2')
})
By loading ``'app1/index.html'`` the file from the app1 package is loaded,
by loading ``'app2/index.html'`` the file from the second.
"""
def __init__(self, mapping, delimiter='/'):
self.mapping = mapping
self.delimiter = delimiter
def get_loader(self, template):
try:
prefix, name = template.split(self.delimiter, 1)
loader = self.mapping[prefix]
except (ValueError, KeyError):
raise TemplateNotFound(template)
return loader, name
def get_source(self, environment, template):
loader, name = self.get_loader(template)
try:
return loader.get_source(environment, name)
except TemplateNotFound:
# re-raise the exception with the correct fileame here.
# (the one that includes the prefix)
raise TemplateNotFound(template)
@internalcode
def load(self, environment, name, globals=None):
loader, local_name = self.get_loader(name)
try:
return loader.load(environment, local_name, globals)
except TemplateNotFound:
# re-raise the exception with the correct fileame here.
# (the one that includes the prefix)
raise TemplateNotFound(name)
def list_templates(self):
result = []
for prefix, loader in iteritems(self.mapping):
for template in loader.list_templates():
result.append(prefix + self.delimiter + template)
return result
class ChoiceLoader(BaseLoader):
"""This loader works like the `PrefixLoader` just that no prefix is
specified. If a template could not be found by one loader the next one
is tried.
>>> loader = ChoiceLoader([
... FileSystemLoader('/path/to/user/templates'),
... FileSystemLoader('/path/to/system/templates')
... ])
This is useful if you want to allow users to override builtin templates
from a different location.
"""
def __init__(self, loaders):
self.loaders = loaders
def get_source(self, environment, template):
for loader in self.loaders:
try:
return loader.get_source(environment, template)
except TemplateNotFound:
pass
raise TemplateNotFound(template)
@internalcode
def load(self, environment, name, globals=None):
for loader in self.loaders:
try:
return loader.load(environment, name, globals)
except TemplateNotFound:
pass
raise TemplateNotFound(name)
def list_templates(self):
found = set()
for loader in self.loaders:
found.update(loader.list_templates())
return sorted(found)
class _TemplateModule(ModuleType):
"""Like a normal module but with support for weak references"""
class ModuleLoader(BaseLoader):
"""This loader loads templates from precompiled templates.
Example usage:
>>> loader = ChoiceLoader([
... ModuleLoader('/path/to/compiled/templates'),
... FileSystemLoader('/path/to/templates')
... ])
Templates can be precompiled with :meth:`Environment.compile_templates`.
"""
has_source_access = False
def __init__(self, path):
package_name = '_jinja2_module_templates_%x' % id(self)
# create a fake module that looks for the templates in the
# path given.
mod = _TemplateModule(package_name)
if isinstance(path, string_types):
path = [path]
else:
path = list(path)
mod.__path__ = path
sys.modules[package_name] = weakref.proxy(mod,
lambda x: sys.modules.pop(package_name, None))
# the only strong reference, the sys.modules entry is weak
# so that the garbage collector can remove it once the
# loader that created it goes out of business.
self.module = mod
self.package_name = package_name
@staticmethod
def get_template_key(name):
return 'tmpl_' + sha1(name.encode('utf-8')).hexdigest()
@staticmethod
def get_module_filename(name):
return ModuleLoader.get_template_key(name) + '.py'
@internalcode
def load(self, environment, name, globals=None):
key = self.get_template_key(name)
module = '%s.%s' % (self.package_name, key)
mod = getattr(self.module, module, None)
if mod is None:
try:
mod = __import__(module, None, None, ['root'])
except ImportError:
raise TemplateNotFound(name)
# remove the entry from sys.modules, we only want the attribute
# on the module object we have stored on the loader.
sys.modules.pop(module, None)
return environment.template_class.from_module_dict(
environment, mod.__dict__, globals)
| |
# -*- coding: utf-8 -*-
"""
Production Configurations
- Use djangosecure
- Use Amazon's S3 for storing static files and uploaded media
- Use mailgun to send emails
- Use Redis on Heroku
- Use sentry for error logging
"""
from __future__ import absolute_import, unicode_literals
from boto.s3.connection import OrdinaryCallingFormat
from django.utils import six
import logging
from .common import * # noqa
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Raises ImproperlyConfigured exception if DJANGO_SECRET_KEY not in os.environ
SECRET_KEY = env('DJANGO_SECRET_KEY')
# This ensures that Django will be able to detect a secure connection
# properly on Heroku.
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# raven sentry client
# See https://docs.getsentry.com/hosted/clients/python/integrations/django/
INSTALLED_APPS += ('raven.contrib.django.raven_compat', )
# Use Whitenoise to serve static files
# See: https://whitenoise.readthedocs.io/
WHITENOISE_MIDDLEWARE = ('whitenoise.middleware.WhiteNoiseMiddleware', )
MIDDLEWARE_CLASSES = WHITENOISE_MIDDLEWARE + MIDDLEWARE_CLASSES
RAVEN_MIDDLEWARE = ('raven.contrib.django.raven_compat.middleware.SentryResponseErrorIdMiddleware', )
MIDDLEWARE_CLASSES = RAVEN_MIDDLEWARE + MIDDLEWARE_CLASSES
# SECURITY CONFIGURATION
# ------------------------------------------------------------------------------
# See https://docs.djangoproject.com/en/1.9/ref/middleware/#module-django.middleware.security
# and https://docs.djangoproject.com/ja/1.9/howto/deployment/checklist/#run-manage-py-check-deploy
# set this to 60 seconds and then to 518400 when you can prove it works
SECURE_HSTS_SECONDS = 60
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool(
'DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS', default=True)
SECURE_CONTENT_TYPE_NOSNIFF = env.bool(
'DJANGO_SECURE_CONTENT_TYPE_NOSNIFF', default=True)
SECURE_BROWSER_XSS_FILTER = True
SESSION_COOKIE_SECURE = False
SESSION_COOKIE_HTTPONLY = True
SECURE_SSL_REDIRECT = env.bool('DJANGO_SECURE_SSL_REDIRECT', default=True)
CSRF_COOKIE_SECURE = False
CSRF_COOKIE_HTTPONLY = True
X_FRAME_OPTIONS = 'DENY'
# SITE CONFIGURATION
# ------------------------------------------------------------------------------
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/1.6/ref/settings/#allowed-hosts
ALLOWED_HOSTS = env.list('DJANGO_ALLOWED_HOSTS', default=['game.acubed.me'])
# END SITE CONFIGURATION
INSTALLED_APPS += ('gunicorn', )
# STORAGE CONFIGURATION
# ------------------------------------------------------------------------------
# Uploaded Media Files
# ------------------------
# See: http://django-storages.readthedocs.io/en/latest/index.html
INSTALLED_APPS += (
'storages',
)
AWS_ACCESS_KEY_ID = env('DJANGO_AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = env('DJANGO_AWS_SECRET_ACCESS_KEY')
AWS_STORAGE_BUCKET_NAME = env('DJANGO_AWS_STORAGE_BUCKET_NAME')
AWS_AUTO_CREATE_BUCKET = True
AWS_QUERYSTRING_AUTH = False
AWS_S3_CALLING_FORMAT = OrdinaryCallingFormat()
# AWS cache settings, don't change unless you know what you're doing:
AWS_EXPIRY = 60 * 60 * 24 * 7
# TODO See: https://github.com/jschneier/django-storages/issues/47
# Revert the following and use str after the above-mentioned bug is fixed in
# either django-storage-redux or boto
AWS_HEADERS = {
'Cache-Control': six.b('max-age=%d, s-maxage=%d, must-revalidate' % (
AWS_EXPIRY, AWS_EXPIRY))
}
# URL that handles the media served from MEDIA_ROOT, used for managing
# stored files.
MEDIA_URL = 'https://s3.amazonaws.com/%s/' % AWS_STORAGE_BUCKET_NAME
# Static Assets
# ------------------------
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# EMAIL
# ------------------------------------------------------------------------------
DEFAULT_FROM_EMAIL = env('DJANGO_DEFAULT_FROM_EMAIL',
default='Game <noreply@game.acubed.me>')
EMAIL_SUBJECT_PREFIX = env('DJANGO_EMAIL_SUBJECT_PREFIX', default='[Game] ')
SERVER_EMAIL = env('DJANGO_SERVER_EMAIL', default=DEFAULT_FROM_EMAIL)
# Anymail with Mailgun
if env('DJANGO_SENDGRID_API_KEY'):
INSTALLED_APPS += ("anymail", )
ANYMAIL = {
"SENDGRID_API_KEY": env('DJANGO_SENDGRID_API_KEY'),
}
EMAIL_BACKEND = "anymail.backends.sendgrid.SendGridBackend"
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See:
# https://docs.djangoproject.com/en/dev/ref/templates/api/#django.template.loaders.cached.Loader
TEMPLATES[0]['OPTIONS']['loaders'] = [
('django.template.loaders.cached.Loader', [
'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', ]),
]
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
DATABASES['default'] = env.db('DATABASE_URL')
# CACHING
# ------------------------------------------------------------------------------
# Heroku URL does not pass the DB number, so we parse it in
CACHES = {
'default': {
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': '{0}/{1}'.format(env('REDIS_URL', default='redis://127.0.0.1:6379'), 0),
'OPTIONS': {
'CLIENT_CLASS': 'django_redis.client.DefaultClient',
'IGNORE_EXCEPTIONS': True, # mimics memcache behavior.
# http://niwinz.github.io/django-redis/latest/#_memcached_exceptions_behavior
}
}
}
# Sentry Configuration
SENTRY_DSN = env('DJANGO_SENTRY_DSN')
SENTRY_CLIENT = env('DJANGO_SENTRY_CLIENT', default='raven.contrib.django.raven_compat.DjangoClient')
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'root': {
'level': 'WARNING',
'handlers': ['sentry'],
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s '
'%(process)d %(thread)d %(message)s'
},
},
'handlers': {
'sentry': {
'level': 'ERROR',
'class': 'raven.contrib.django.raven_compat.handlers.SentryHandler',
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose'
}
},
'loggers': {
'django.db.backends': {
'level': 'ERROR',
'handlers': ['console'],
'propagate': False,
},
'raven': {
'level': 'DEBUG',
'handlers': ['console'],
'propagate': False,
},
'sentry.errors': {
'level': 'DEBUG',
'handlers': ['console'],
'propagate': False,
},
'django.security.DisallowedHost': {
'level': 'ERROR',
'handlers': ['console', 'sentry'],
'propagate': False,
},
},
}
SENTRY_CELERY_LOGLEVEL = env.int('DJANGO_SENTRY_LOG_LEVEL', logging.INFO)
RAVEN_CONFIG = {
'CELERY_LOGLEVEL': env.int('DJANGO_SENTRY_LOG_LEVEL', logging.INFO),
'DSN': SENTRY_DSN
}
# Custom Admin URL, use {% url 'admin:index' %}
ADMIN_URL = env('DJANGO_ADMIN_URL')
# Your production stuff: Below this line define 3rd party library settings
| |
###########################################################################
#
# OpenOPC for Python Library Module
#
# Copyright (c) 2007-2014 Barry Barnreiter (barry_b@users.sourceforge.net)
#
###########################################################################
import os
import sys
import time
import types
import string
import socket
import re
import Queue
__version__ = '1.3.1'
current_client = None
# Win32 only modules not needed for 'open' protocol mode
if os.name == 'nt':
try:
import win32com.client
import win32com.server.util
import win32event
import pythoncom
import pywintypes
import SystemHealth
# Win32 variant types
vt = dict([(pythoncom.__dict__[vtype], vtype) for vtype in pythoncom.__dict__.keys() if vtype[:2] == "VT"])
# Allow gencache to create the cached wrapper objects
win32com.client.gencache.is_readonly = False
# Under p2exe the call in gencache to __init__() does not happen
# so we use Rebuild() to force the creation of the gen_py folder
win32com.client.gencache.Rebuild(verbose=0)
# So we can work on Windows in "open" protocol mode without the need for the win32com modules
except ImportError:
win32com_found = False
else:
win32com_found = True
else:
win32com_found = False
# OPC Constants
SOURCE_CACHE = 1
SOURCE_DEVICE = 2
OPC_STATUS = (0, 'Running', 'Failed', 'NoConfig', 'Suspended', 'Test')
BROWSER_TYPE = (0, 'Hierarchical', 'Flat')
ACCESS_RIGHTS = (0, 'Read', 'Write', 'Read/Write')
OPC_QUALITY = ('Bad', 'Uncertain', 'Unknown', 'Good')
OPC_CLASS = 'Matrikon.OPC.Automation;Graybox.OPC.DAWrapper;HSCOPC.Automation;RSI.OPCAutomation;OPC.Automation'
OPC_SERVER = 'Hci.TPNServer;HwHsc.OPCServer;opc.deltav.1;AIM.OPC.1;Yokogawa.ExaopcDAEXQ.1;OSI.DA.1;OPC.PHDServerDA.1;Aspen.Infoplus21_DA.1;National Instruments.OPCLabVIEW;RSLinx OPC Server;KEPware.KEPServerEx.V4;Matrikon.OPC.Simulation;Prosys.OPC.Simulation'
OPC_CLIENT = 'OpenOPC'
def quality_str(quality_bits):
"""Convert OPC quality bits to a descriptive string"""
quality = (quality_bits >> 6) & 3
return OPC_QUALITY[quality]
def type_check(tags):
"""Perform a type check on a list of tags"""
if type(tags) in (types.ListType, types.TupleType):
single = False
elif tags == None:
tags = []
single = False
else:
tags = [tags]
single = True
if len([t for t in tags if type(t) not in types.StringTypes]) == 0:
valid = True
else:
valid = False
return tags, single, valid
def wild2regex(string):
"""Convert a Unix wildcard glob into a regular expression"""
return string.replace('.','\.').replace('*','.*').replace('?','.').replace('!','^')
def tags2trace(tags):
"""Convert a list tags into a formatted string suitable for the trace callback log"""
arg_str = ''
for i,t in enumerate(tags[1:]):
if i > 0: arg_str += ','
arg_str += '%s' % t
return arg_str
def exceptional(func, alt_return=None, alt_exceptions=(Exception,), final=None, catch=None):
"""Turns exceptions into an alternative return value"""
def _exceptional(*args, **kwargs):
try:
try:
return func(*args, **kwargs)
except alt_exceptions:
return alt_return
except:
if catch: return catch(sys.exc_info(), lambda:func(*args, **kwargs))
raise
finally:
if final: final()
return _exceptional
def get_sessions(host=None, port=7766):
if host is None: host = 'localhost'
import Pyro.core
Pyro.core.initClient(banner = 0)
server_obj = Pyro.core.getProxyForURI("PYROLOC://%s:%s/opc" % (host, port))
return server_obj.get_clients()
def close_session(guid, host=None, port=7766):
if host is None: host = 'localhost'
import Pyro.core
Pyro.core.initClient(banner = 0)
server_obj = Pyro.core.getProxyForURI("PYROLOC://%s:%s/opc" % (host, port))
return server_obj.force_close(guid)
def open_client(host='localhost', port=7766):
"""Connect to the specified OpenOPC Gateway Service"""
import Pyro.core
Pyro.core.initClient(banner=0)
server_obj = Pyro.core.getProxyForURI("PYROLOC://%s:%s/opc" % (host, port))
return server_obj.create_client()
class TimeoutError(Exception):
def __init__(self, txt):
Exception.__init__(self, txt)
class OPCError(Exception):
def __init__(self, txt):
Exception.__init__(self, txt)
class GroupEvents:
def __init__(self):
self.client = current_client
def OnDataChange(self, TransactionID, NumItems, ClientHandles, ItemValues, Qualities, TimeStamps):
self.client.callback_queue.put((TransactionID, ClientHandles, ItemValues, Qualities, TimeStamps))
class client():
def __init__(self, opc_class=None, client_name=None):
"""Instantiate OPC automation class"""
self.callback_queue = Queue.Queue()
pythoncom.CoInitialize()
if opc_class == None:
if os.environ.has_key('OPC_CLASS'):
opc_class = os.environ['OPC_CLASS']
else:
opc_class = OPC_CLASS
opc_class_list = opc_class.split(';')
for i,c in enumerate(opc_class_list):
try:
self._opc = win32com.client.gencache.EnsureDispatch(c, 0)
self.opc_class = c
break
except pythoncom.com_error, err:
if i == len(opc_class_list)-1:
error_msg = 'Dispatch: %s' % self._get_error_str(err)
raise OPCError, error_msg
self._event = win32event.CreateEvent(None,0,0,None)
self.opc_server = None
self.opc_host = None
self.client_name = client_name
self._groups = {}
self._group_tags = {}
self._group_valid_tags = {}
self._group_server_handles = {}
self._group_handles_tag = {}
self._group_hooks = {}
self._open_serv = None
self._open_self = None
self._open_host = None
self._open_port = None
self._open_guid = None
self._prev_serv_time = None
self._tx_id = 0
self.trace = None
self.cpu = None
def set_trace(self, trace):
if self._open_serv == None:
self.trace = trace
def connect(self, opc_server=None, opc_host='localhost'):
"""Connect to the specified OPC server"""
pythoncom.CoInitialize()
if opc_server == None:
# Initial connect using environment vars
if self.opc_server == None:
if os.environ.has_key('OPC_SERVER'):
opc_server = os.environ['OPC_SERVER']
else:
opc_server = OPC_SERVER
# Reconnect using previous server name
else:
opc_server = self.opc_server
opc_host = self.opc_host
opc_server_list = opc_server.split(';')
connected = False
for s in opc_server_list:
try:
if self.trace: self.trace('Connect(%s,%s)' % (s, opc_host))
self._opc.Connect(s, opc_host)
except pythoncom.com_error, err:
if len(opc_server_list) == 1:
error_msg = 'Connect: %s' % self._get_error_str(err)
raise OPCError, error_msg
else:
# Set client name since some OPC servers use it for security
try:
if self.client_name == None:
if os.environ.has_key('OPC_CLIENT'):
self._opc.ClientName = os.environ['OPC_CLIENT']
else:
self._opc.ClientName = OPC_CLIENT
else:
self._opc.ClientName = self.client_name
except:
pass
connected = True
break
if not connected:
raise OPCError, 'Connect: Cannot connect to any of the servers in the OPC_SERVER list'
# With some OPC servers, the next OPC call immediately after Connect()
# will occationally fail. Sleeping for 1/100 second seems to fix this.
time.sleep(0.01)
self.opc_server = opc_server
if opc_host == 'localhost':
opc_host = socket.gethostname()
self.opc_host = opc_host
# On reconnect we need to remove the old group names from OpenOPC's internal
# cache since they are now invalid
self._groups = {}
self._group_tags = {}
self._group_valid_tags = {}
self._group_server_handles = {}
self._group_handles_tag = {}
self._group_hooks = {}
def close(self, del_object=True):
"""Disconnect from the currently connected OPC server"""
try:
self.remove(self.groups())
except pythoncom.com_error, err:
error_msg = 'Disconnect: %s' % self._get_error_str(err)
raise OPCError, error_msg
except OPCError:
pass
finally:
if self.trace: self.trace('Disconnect()')
self._opc.Disconnect()
# Remove this object from the open gateway service
if self._open_serv and del_object:
self._open_serv.release_client(self._open_self)
def iread(self, tags=None, group=None, size=None, pause=0, source='hybrid', update=-1, timeout=5000, sync=False, include_error=False, rebuild=False):
"""Iterable version of read()"""
def add_items(tags):
names = list(tags)
names.insert(0,0)
errors = []
if self.trace: self.trace('Validate(%s)' % tags2trace(names))
try:
errors = opc_items.Validate(len(names)-1, names)
except:
pass
valid_tags = []
valid_values = []
client_handles = []
if not self._group_handles_tag.has_key(sub_group):
self._group_handles_tag[sub_group] = {}
n = 0
elif len(self._group_handles_tag[sub_group]) > 0:
n = max(self._group_handles_tag[sub_group]) + 1
else:
n = 0
for i, tag in enumerate(tags):
if errors[i] == 0:
valid_tags.append(tag)
client_handles.append(n)
self._group_handles_tag[sub_group][n] = tag
n += 1
elif include_error:
error_msgs[tag] = self._opc.GetErrorString(errors[i])
if self.trace and errors[i] != 0: self.trace('%s failed validation' % tag)
client_handles.insert(0,0)
valid_tags.insert(0,0)
server_handles = []
errors = []
if self.trace: self.trace('AddItems(%s)' % tags2trace(valid_tags))
try:
server_handles, errors = opc_items.AddItems(len(client_handles)-1, valid_tags, client_handles)
except:
pass
valid_tags_tmp = []
server_handles_tmp = []
valid_tags.pop(0)
if not self._group_server_handles.has_key(sub_group):
self._group_server_handles[sub_group] = {}
for i, tag in enumerate(valid_tags):
if errors[i] == 0:
valid_tags_tmp.append(tag)
server_handles_tmp.append(server_handles[i])
self._group_server_handles[sub_group][tag] = server_handles[i]
elif include_error:
error_msgs[tag] = self._opc.GetErrorString(errors[i])
valid_tags = valid_tags_tmp
server_handles = server_handles_tmp
return valid_tags, server_handles
def remove_items(tags):
if self.trace: self.trace('RemoveItems(%s)' % tags2trace(['']+tags))
server_handles = [self._group_server_handles[sub_group][tag] for tag in tags]
server_handles.insert(0,0)
errors = []
try:
errors = opc_items.Remove(len(server_handles)-1, server_handles)
except pythoncom.com_error, err:
error_msg = 'RemoveItems: %s' % self._get_error_str(err)
raise OPCError, error_msg
try:
if include_error:
sync = True
if sync:
update = -1
tags, single, valid = type_check(tags)
if not valid:
raise TypeError, "iread(): 'tags' parameter must be a string or a list of strings"
# Group exists
if self._groups.has_key(group) and not rebuild:
num_groups = self._groups[group]
data_source = SOURCE_CACHE
# Group non-existant
else:
if size:
# Break-up tags into groups of 'size' tags
tag_groups = [tags[i:i+size] for i in range(0, len(tags), size)]
else:
tag_groups = [tags]
num_groups = len(tag_groups)
data_source = SOURCE_DEVICE
results = []
for gid in range(num_groups):
if gid > 0 and pause > 0: time.sleep(pause/1000.0)
error_msgs = {}
opc_groups = self._opc.OPCGroups
opc_groups.DefaultGroupUpdateRate = update
# Anonymous group
if group == None:
try:
if self.trace: self.trace('AddGroup()')
opc_group = opc_groups.Add()
except pythoncom.com_error, err:
error_msg = 'AddGroup: %s' % self._get_error_str(err)
raise OPCError, error_msg
sub_group = group
new_group = True
else:
sub_group = '%s.%d' % (group, gid)
# Existing named group
try:
if self.trace: self.trace('GetOPCGroup(%s)' % sub_group)
opc_group = opc_groups.GetOPCGroup(sub_group)
new_group = False
# New named group
except:
try:
if self.trace: self.trace('AddGroup(%s)' % sub_group)
opc_group = opc_groups.Add(sub_group)
except pythoncom.com_error, err:
error_msg = 'AddGroup: %s' % self._get_error_str(err)
raise OPCError, error_msg
self._groups[str(group)] = len(tag_groups)
new_group = True
opc_items = opc_group.OPCItems
if new_group:
opc_group.IsSubscribed = 1
opc_group.IsActive = 1
if not sync:
if self.trace: self.trace('WithEvents(%s)' % opc_group.Name)
global current_client
current_client = self
self._group_hooks[opc_group.Name] = win32com.client.WithEvents(opc_group, GroupEvents)
tags = tag_groups[gid]
valid_tags, server_handles = add_items(tags)
self._group_tags[sub_group] = tags
self._group_valid_tags[sub_group] = valid_tags
# Rebuild existing group
elif rebuild:
tags = tag_groups[gid]
valid_tags = self._group_valid_tags[sub_group]
add_tags = [t for t in tags if t not in valid_tags]
del_tags = [t for t in valid_tags if t not in tags]
if len(add_tags) > 0:
valid_tags, server_handles = add_items(add_tags)
valid_tags = self._group_valid_tags[sub_group] + valid_tags
if len(del_tags) > 0:
remove_items(del_tags)
valid_tags = [t for t in valid_tags if t not in del_tags]
self._group_tags[sub_group] = tags
self._group_valid_tags[sub_group] = valid_tags
if source == 'hybrid': data_source = SOURCE_DEVICE
# Existing group
else:
tags = self._group_tags[sub_group]
valid_tags = self._group_valid_tags[sub_group]
if sync:
server_handles = [item.ServerHandle for item in opc_items]
tag_value = {}
tag_quality = {}
tag_time = {}
tag_error = {}
# Sync Read
if sync:
values = []
errors = []
qualities = []
timestamps= []
if len(valid_tags) > 0:
server_handles.insert(0,0)
if source != 'hybrid':
data_source = SOURCE_CACHE if source == 'cache' else SOURCE_DEVICE
if self.trace: self.trace('SyncRead(%s)' % data_source)
try:
values, errors, qualities, timestamps = opc_group.SyncRead(data_source, len(server_handles)-1, server_handles)
except pythoncom.com_error, err:
error_msg = 'SyncRead: %s' % self._get_error_str(err)
raise OPCError, error_msg
for i,tag in enumerate(valid_tags):
tag_value[tag] = values[i]
tag_quality[tag] = qualities[i]
tag_time[tag] = timestamps[i]
tag_error[tag] = errors[i]
# Async Read
else:
if len(valid_tags) > 0:
if self._tx_id >= 0xFFFF:
self._tx_id = 0
self._tx_id += 1
if source != 'hybrid':
data_source = SOURCE_CACHE if source == 'cache' else SOURCE_DEVICE
if self.trace: self.trace('AsyncRefresh(%s)' % data_source)
try:
opc_group.AsyncRefresh(data_source, self._tx_id)
except pythoncom.com_error, err:
error_msg = 'AsyncRefresh: %s' % self._get_error_str(err)
raise OPCError, error_msg
tx_id = 0
start = time.time() * 1000
while tx_id != self._tx_id:
now = time.time() * 1000
if now - start > timeout:
raise TimeoutError, 'Callback: Timeout waiting for data'
if self.callback_queue.empty():
pythoncom.PumpWaitingMessages()
else:
tx_id, handles, values, qualities, timestamps = self.callback_queue.get()
for i,h in enumerate(handles):
tag = self._group_handles_tag[sub_group][h]
tag_value[tag] = values[i]
tag_quality[tag] = qualities[i]
tag_time[tag] = timestamps[i]
for tag in tags:
if tag_value.has_key(tag):
if (not sync and len(valid_tags) > 0) or (sync and tag_error[tag] == 0):
value = tag_value[tag]
if type(value) == pywintypes.TimeType:
value = str(value)
quality = quality_str(tag_quality[tag])
try:
timestamp = str(tag_time[tag])
except ValueError:
timestamp = None
else:
value = None
quality = 'Error'
timestamp = None
if include_error:
error_msgs[tag] = self._opc.GetErrorString(tag_error[tag]).strip('\r\n')
else:
value = None
quality = 'Error'
timestamp = None
if include_error and not error_msgs.has_key(tag):
error_msgs[tag] = ''
if single:
if include_error:
yield (value, quality, timestamp, error_msgs[tag])
else:
yield (value, quality, timestamp)
else:
if include_error:
yield (tag, value, quality, timestamp, error_msgs[tag])
else:
yield (tag, value, quality, timestamp)
if group == None:
try:
if not sync and self._group_hooks.has_key(opc_group.Name):
if self.trace: self.trace('CloseEvents(%s)' % opc_group.Name)
self._group_hooks[opc_group.Name].close()
if self.trace: self.trace('RemoveGroup(%s)' % opc_group.Name)
opc_groups.Remove(opc_group.Name)
except pythoncom.com_error, err:
error_msg = 'RemoveGroup: %s' % self._get_error_str(err)
raise OPCError, error_msg
except pythoncom.com_error, err:
error_msg = 'read: %s' % self._get_error_str(err)
raise OPCError, error_msg
def read(self, tags=None, group=None, size=None, pause=0, source='hybrid', update=-1, timeout=5000, sync=False, include_error=False, rebuild=False):
"""Return list of (value, quality, time) tuples for the specified tag(s)"""
tags_list, single, valid = type_check(tags)
if not valid:
raise TypeError, "read(): 'tags' parameter must be a string or a list of strings"
num_health_tags = len([t for t in tags_list if t[:1] == '@'])
num_opc_tags = len([t for t in tags_list if t[:1] != '@'])
if num_health_tags > 0:
if num_opc_tags > 0:
raise TypeError, "read(): system health and OPC tags cannot be included in the same group"
results = self._read_health(tags)
else:
results = self.iread(tags, group, size, pause, source, update, timeout, sync, include_error, rebuild)
if single:
return list(results)[0]
else:
return list(results)
def _read_health(self, tags):
"""Return values of special system health monitoring tags"""
tags, single, valid = type_check(tags)
time_str = time.strftime('%x %H:%M:%S')
results = []
for t in tags:
if t == '@MemFree': value = SystemHealth.mem_free()
elif t == '@MemUsed': value = SystemHealth.mem_used()
elif t == '@MemTotal': value = SystemHealth.mem_total()
elif t == '@MemPercent': value = SystemHealth.mem_percent()
elif t == '@DiskFree': value = SystemHealth.disk_free()
elif t == '@SineWave': value = SystemHealth.sine_wave()
elif t == '@SawWave': value = SystemHealth.saw_wave()
elif t == '@CpuUsage':
if self.cpu == None:
self.cpu = SystemHealth.CPU()
time.sleep(0.1)
value = self.cpu.get_usage()
else:
value = None
m = re.match('@TaskMem\((.*?)\)', t)
if m:
image_name = m.group(1)
value = SystemHealth.task_mem(image_name)
m = re.match('@TaskCpu\((.*?)\)', t)
if m:
image_name = m.group(1)
value = SystemHealth.task_cpu(image_name)
m = re.match('@TaskExists\((.*?)\)', t)
if m:
image_name = m.group(1)
value = SystemHealth.task_exists(image_name)
if value == None:
quality = 'Error'
else:
quality = 'Good'
if single:
results.append((value, quality, time_str))
else:
results.append((t, value, quality, time_str))
return results
def iwrite(self, tag_value_pairs, size=None, pause=0, include_error=False):
"""Iterable version of write()"""
try:
def _valid_pair(p):
if type(p) in (types.ListType, types.TupleType) and len(p) >= 2 and type(p[0]) in types.StringTypes:
return True
else:
return False
if type(tag_value_pairs) not in (types.ListType, types.TupleType):
raise TypeError, "write(): 'tag_value_pairs' parameter must be a (tag, value) tuple or a list of (tag,value) tuples"
if tag_value_pairs == None:
tag_value_pairs = ['']
single = False
elif type(tag_value_pairs[0]) in types.StringTypes:
tag_value_pairs = [tag_value_pairs]
single = True
else:
single = False
invalid_pairs = [p for p in tag_value_pairs if not _valid_pair(p)]
if len(invalid_pairs) > 0:
raise TypeError, "write(): 'tag_value_pairs' parameter must be a (tag, value) tuple or a list of (tag,value) tuples"
names = [tag[0] for tag in tag_value_pairs]
tags = [tag[0] for tag in tag_value_pairs]
values = [tag[1] for tag in tag_value_pairs]
# Break-up tags & values into groups of 'size' tags
if size:
name_groups = [names[i:i+size] for i in range(0, len(names), size)]
tag_groups = [tags[i:i+size] for i in range(0, len(tags), size)]
value_groups = [values[i:i+size] for i in range(0, len(values), size)]
else:
name_groups = [names]
tag_groups = [tags]
value_groups = [values]
num_groups = len(tag_groups)
status = []
for gid in range(num_groups):
if gid > 0 and pause > 0: time.sleep(pause/1000.0)
opc_groups = self._opc.OPCGroups
opc_group = opc_groups.Add()
opc_items = opc_group.OPCItems
names = name_groups[gid]
tags = tag_groups[gid]
values = value_groups[gid]
names.insert(0,0)
errors = []
try:
errors = opc_items.Validate(len(names)-1, names)
except:
pass
n = 1
valid_tags = []
valid_values = []
client_handles = []
error_msgs = {}
for i, tag in enumerate(tags):
if errors[i] == 0:
valid_tags.append(tag)
valid_values.append(values[i])
client_handles.append(n)
error_msgs[tag] = ''
n += 1
elif include_error:
error_msgs[tag] = self._opc.GetErrorString(errors[i])
client_handles.insert(0,0)
valid_tags.insert(0,0)
server_handles = []
errors = []
try:
server_handles, errors = opc_items.AddItems(len(client_handles)-1, valid_tags, client_handles)
except:
pass
valid_tags_tmp = []
valid_values_tmp = []
server_handles_tmp = []
valid_tags.pop(0)
for i, tag in enumerate(valid_tags):
if errors[i] == 0:
valid_tags_tmp.append(tag)
valid_values_tmp.append(valid_values[i])
server_handles_tmp.append(server_handles[i])
error_msgs[tag] = ''
elif include_error:
error_msgs[tag] = self._opc.GetErrorString(errors[i])
valid_tags = valid_tags_tmp
valid_values = valid_values_tmp
server_handles = server_handles_tmp
server_handles.insert(0,0)
valid_values.insert(0,0)
errors = []
if len(valid_values) > 1:
try:
errors = opc_group.SyncWrite(len(server_handles)-1, server_handles, valid_values)
except:
pass
n = 0
for tag in tags:
if tag in valid_tags:
if errors[n] == 0:
status = 'Success'
else:
status = 'Error'
if include_error: error_msgs[tag] = self._opc.GetErrorString(errors[n])
n += 1
else:
status = 'Error'
# OPC servers often include newline and carriage return characters
# in their error message strings, so remove any found.
if include_error: error_msgs[tag] = error_msgs[tag].strip('\r\n')
if single:
if include_error:
yield (status, error_msgs[tag])
else:
yield status
else:
if include_error:
yield (tag, status, error_msgs[tag])
else:
yield (tag, status)
opc_groups.Remove(opc_group.Name)
except pythoncom.com_error, err:
error_msg = 'write: %s' % self._get_error_str(err)
raise OPCError, error_msg
def write(self, tag_value_pairs, size=None, pause=0, include_error=False):
"""Write list of (tag, value) pair(s) to the server"""
if type(tag_value_pairs) in (types.ListType, types.TupleType) and type(tag_value_pairs[0]) in (types.ListType, types.TupleType):
single = False
else:
single = True
status = self.iwrite(tag_value_pairs, size, pause, include_error)
if single:
return list(status)[0]
else:
return list(status)
def groups(self):
"""Return a list of active tag groups"""
return self._groups.keys()
def remove(self, groups):
"""Remove the specified tag group(s)"""
try:
opc_groups = self._opc.OPCGroups
if type(groups) in types.StringTypes:
groups = [groups]
single = True
else:
single = False
status = []
for group in groups:
if self._groups.has_key(group):
for i in range(self._groups[group]):
sub_group = '%s.%d' % (group, i)
if self._group_hooks.has_key(sub_group):
if self.trace: self.trace('CloseEvents(%s)' % sub_group)
self._group_hooks[sub_group].close()
try:
if self.trace: self.trace('RemoveGroup(%s)' % sub_group)
errors = opc_groups.Remove(sub_group)
except pythoncom.com_error, err:
error_msg = 'RemoveGroup: %s' % self._get_error_str(err)
raise OPCError, error_msg
del(self._group_tags[sub_group])
del(self._group_valid_tags[sub_group])
del(self._group_handles_tag[sub_group])
del(self._group_server_handles[sub_group])
del(self._groups[group])
except pythoncom.com_error, err:
error_msg = 'remove: %s' % self._get_error_str(err)
raise OPCError, error_msg
def iproperties(self, tags, id=None):
"""Iterable version of properties()"""
try:
tags, single_tag, valid = type_check(tags)
if not valid:
raise TypeError, "properties(): 'tags' parameter must be a string or a list of strings"
try:
id.remove(0)
include_name = True
except:
include_name = False
if id != None:
descriptions= []
if isinstance(id, list) or isinstance(id, tuple):
property_id = list(id)
single_property = False
else:
property_id = [id]
single_property = True
for i in property_id:
descriptions.append('Property id %d' % i)
else:
single_property = False
properties = []
for tag in tags:
if id == None:
descriptions = []
property_id = []
count, property_id, descriptions, datatypes = self._opc.QueryAvailableProperties(tag)
# Remove bogus negative property id (not sure why this sometimes happens)
tag_properties = map(None, property_id, descriptions)
property_id = [p for p, d in tag_properties if p > 0]
descriptions = [d for p, d in tag_properties if p > 0]
property_id.insert(0, 0)
values = []
errors = []
values, errors = self._opc.GetItemProperties(tag, len(property_id)-1, property_id)
property_id.pop(0)
values = [str(v) if type(v) == pywintypes.TimeType else v for v in values]
# Replace variant id with type strings
try:
i = property_id.index(1)
values[i] = vt[values[i]]
except:
pass
# Replace quality bits with quality strings
try:
i = property_id.index(3)
values[i] = quality_str(values[i])
except:
pass
# Replace access rights bits with strings
try:
i = property_id.index(5)
values[i] = ACCESS_RIGHTS[values[i]]
except:
pass
if id != None:
if single_property:
if single_tag:
tag_properties = values
else:
tag_properties = [values]
else:
tag_properties = map(None, property_id, values)
else:
tag_properties = map(None, property_id, descriptions, values)
tag_properties.insert(0, (0, 'Item ID (virtual property)', tag))
if include_name: tag_properties.insert(0, (0, tag))
if not single_tag: tag_properties = [tuple([tag] + list(p)) for p in tag_properties]
for p in tag_properties: yield p
except pythoncom.com_error, err:
error_msg = 'properties: %s' % self._get_error_str(err)
raise OPCError, error_msg
def properties(self, tags, id=None):
"""Return list of property tuples (id, name, value) for the specified tag(s) """
if type(tags) not in (types.ListType, types.TupleType) and type(id) not in (types.NoneType, types.ListType, types.TupleType):
single = True
else:
single = False
props = self.iproperties(tags, id)
if single:
return list(props)[0]
else:
return list(props)
def ilist(self, paths='*', recursive=False, flat=False, include_type=False):
"""Iterable version of list()"""
try:
try:
browser = self._opc.CreateBrowser()
# For OPC servers that don't support browsing
except:
return
paths, single, valid = type_check(paths)
if not valid:
raise TypeError, "list(): 'paths' parameter must be a string or a list of strings"
if len(paths) == 0: paths = ['*']
nodes = {}
for path in paths:
if flat:
browser.MoveToRoot()
browser.Filter = ''
browser.ShowLeafs(True)
pattern = re.compile('^%s$' % wild2regex(path) , re.IGNORECASE)
matches = filter(pattern.search, browser)
if include_type: matches = [(x, node_type) for x in matches]
for node in matches: yield node
continue
queue = []
queue.append(path)
while len(queue) > 0:
tag = queue.pop(0)
browser.MoveToRoot()
browser.Filter = ''
pattern = None
path_str = '/'
path_list = tag.replace('.','/').split('/')
path_list = [p for p in path_list if len(p) > 0]
found_filter = False
path_postfix = '/'
for i, p in enumerate(path_list):
if found_filter:
path_postfix += p + '/'
elif p.find('*') >= 0:
pattern = re.compile('^%s$' % wild2regex(p) , re.IGNORECASE)
found_filter = True
elif len(p) != 0:
pattern = re.compile('^.*$')
browser.ShowBranches()
# Branch node, so move down
if len(browser) > 0:
try:
browser.MoveDown(p)
path_str += p + '/'
except:
if i < len(path_list)-1: return
pattern = re.compile('^%s$' % wild2regex(p) , re.IGNORECASE)
# Leaf node, so append all remaining path parts together
# to form a single search expression
else:
p = string.join(path_list[i:], '.')
pattern = re.compile('^%s$' % wild2regex(p) , re.IGNORECASE)
break
browser.ShowBranches()
if len(browser) == 0:
browser.ShowLeafs(False)
lowest_level = True
node_type = 'Leaf'
else:
lowest_level = False
node_type = 'Branch'
matches = filter(pattern.search, browser)
if not lowest_level and recursive:
queue += [path_str + x + path_postfix for x in matches]
else:
if lowest_level: matches = [exceptional(browser.GetItemID,x)(x) for x in matches]
if include_type: matches = [(x, node_type) for x in matches]
for node in matches:
if not nodes.has_key(node): yield node
nodes[node] = True
except pythoncom.com_error, err:
error_msg = 'list: %s' % self._get_error_str(err)
raise OPCError, error_msg
def list(self, paths='*', recursive=False, flat=False, include_type=False):
"""Return list of item nodes at specified path(s) (tree browser)"""
nodes = self.ilist(paths, recursive, flat, include_type)
return list(nodes)
def servers(self, opc_host='localhost'):
"""Return list of available OPC servers"""
try:
pythoncom.CoInitialize()
servers = self._opc.GetOPCServers(opc_host)
servers = [s for s in servers if s != None]
return servers
except pythoncom.com_error, err:
error_msg = 'servers: %s' % self._get_error_str(err)
raise OPCError, error_msg
def info(self):
"""Return list of (name, value) pairs about the OPC server"""
try:
info_list = []
if self._open_serv:
mode = 'OpenOPC'
else:
mode = 'DCOM'
info_list += [('Protocol', mode)]
if mode == 'OpenOPC':
info_list += [('Gateway Host', '%s:%s' % (self._open_host, self._open_port))]
info_list += [('Gateway Version', '%s' % __version__)]
info_list += [('Class', self.opc_class)]
info_list += [('Client Name', self._opc.ClientName)]
info_list += [('OPC Host', self.opc_host)]
info_list += [('OPC Server', self._opc.ServerName)]
info_list += [('State', OPC_STATUS[self._opc.ServerState])]
info_list += [('Version', '%d.%d (Build %d)' % (self._opc.MajorVersion, self._opc.MinorVersion, self._opc.BuildNumber))]
try:
browser = self._opc.CreateBrowser()
browser_type = BROWSER_TYPE[browser.Organization]
except:
browser_type = 'Not Supported'
info_list += [('Browser', browser_type)]
info_list += [('Start Time', str(self._opc.StartTime))]
info_list += [('Current Time', str(self._opc.CurrentTime))]
info_list += [('Vendor', self._opc.VendorInfo)]
return info_list
except pythoncom.com_error, err:
error_msg = 'info: %s' % self._get_error_str(err)
raise OPCError, error_msg
def ping(self):
"""Check if we are still talking to the OPC server"""
try:
# Convert OPC server time to milliseconds
opc_serv_time = int(float(self._opc.CurrentTime) * 1000000.0)
if opc_serv_time == self._prev_serv_time:
return False
else:
self._prev_serv_time = opc_serv_time
return True
except pythoncom.com_error:
return False
def _get_error_str(self, err):
"""Return the error string for a OPC or COM error code"""
hr, msg, exc, arg = err
if exc == None:
error_str = str(msg)
else:
scode = exc[5]
try:
opc_err_str = unicode(self._opc.GetErrorString(scode)).strip('\r\n')
except:
opc_err_str = None
try:
com_err_str = unicode(pythoncom.GetScodeString(scode)).strip('\r\n')
except:
com_err_str = None
# OPC error codes and COM error codes are overlapping concepts,
# so we combine them together into a single error message.
if opc_err_str == None and com_err_str == None:
error_str = str(scode)
elif opc_err_str == com_err_str:
error_str = opc_err_str
elif opc_err_str == None:
error_str = com_err_str
elif com_err_str == None:
error_str = opc_err_str
else:
error_str = '%s (%s)' % (opc_err_str, com_err_str)
return error_str
def __getitem__(self, key):
"""Read single item (tag as dictionary key)"""
value, quality, time = self.read(key)
return value
def __setitem__(self, key, value):
"""Write single item (tag as dictionary key)"""
self.write((key, value))
return
| |
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import re
from collections import defaultdict
from skbio._base import SkbioObject
from skbio.sequence import Protein, InvalidCodonError, GeneticCodeInitError
# py3k compatibility
try:
from string import maketrans
except ImportError:
maketrans = str.maketrans
_dna_trans = maketrans('TCAG', 'AGTC')
def _simple_rc(seq):
"""simple reverse-complement: works only on unambiguous uppercase DNA"""
return seq.translate(_dna_trans)[::-1]
class GeneticCode(SkbioObject):
"""Class to hold codon to amino acid mapping, and vice versa.
Attributes
----------
code_sequence
id
name
start_codon_sequence
start_codons
codons
synonyms
sense_codons
anticodons
blocks
Parameters
----------
code_sequence : str
64-character string containing NCBI representation.
id : str, optional
identifier for the object.
name : str, optional
name for the object.
start_codon_sequence : str, optional
starting point for the codon sequence.
Returns
-------
GeneticCode
initialized ``GeneticCode`` object.
Raises
------
GeneticCodeInitError
If the length of `code_sequence` is different to `64`.
Methods
-------
changes
get_stop_indices
is_start
is_stop
translate_six_frames
translate
__repr__
__getitem__
__str__
__eq__
Examples
--------
>>> from skbio.sequence import GeneticCode
>>> sgc = GeneticCode('FFLLSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSR'
... 'RVVVVAAAADDEEGGGG')
.. note:: `*` is used to denote termination as per the NCBI standard.
Although the genetic code objects convert DNA to RNA and vice versa,
lists of codons that they produce will be provided in DNA format.
"""
# class data: need the bases, the list of codons in UUU -> GGG order, and
# a mapping from positions in the list back to codons. These should be the
# same for all GeneticCode instances, and are immutable (therefore
# private).
_codons = [a + b + c for a in "TCAG" for b in "TCAG" for c in "TCAG"]
def __init__(self, code_sequence, id=None, name=None,
start_codon_sequence=None):
if len(code_sequence) != 64:
raise GeneticCodeInitError("code_sequence: %s has length %d, but "
"expected 64" % (code_sequence,
len(code_sequence)))
self.code_sequence = code_sequence
self.id = id
self.name = name
self.start_codon_sequence = start_codon_sequence
start_codons = {}
if start_codon_sequence is not None:
for codon, aa in zip(self._codons, start_codon_sequence):
if aa != '-':
start_codons[codon] = aa
self.start_codons = start_codons
codon_lookup = {key: value for (key, value) in zip(self._codons,
code_sequence)}
self.codons = codon_lookup
# create synonyms for each aa
aa_lookup = defaultdict(list)
for codon in self._codons:
aa = codon_lookup[codon]
aa_lookup[aa].append(codon)
self.synonyms = dict(aa_lookup)
sense_codons = codon_lookup.copy()
# create sense codons
stop_codons = self['*']
for c in stop_codons:
del sense_codons[c]
self.sense_codons = sense_codons
# create anticodons
ac = {}
for aa, codons in self.synonyms.items():
ac[aa] = [_simple_rc(element) for element in codons]
self.anticodons = ac
def _analyze_quartet(self, codons, aa):
"""Analyzes a quartet of codons and amino acids: returns list of lists.
Each list contains one block, splitting at purine/pyrimidine boundary
if necessary.
codons should be a list of 4 codons.
aa should be a list of 4 amino acid symbols.
Possible states:
- All amino acids are the same: returns list of one quartet.
- Two groups of 2 aa: returns list of two doublets.
- One group of 2 and 2 groups of 1: list of one doublet, 2 singles.
- 4 groups of 1: four singles.
Note: codon blocks like Ile in the standard code (AUU, AUC, AUA) will
be split when they cross the R/Y boundary, so [[AUU, AUC], [AUA]]. This
would also apply to a block like AUC AUA AUG -> [[AUC],[AUA,AUG]],
although this latter pattern is not observed in the standard code.
"""
if aa[0] == aa[1]:
first_doublet = True
else:
first_doublet = False
if aa[2] == aa[3]:
second_doublet = True
else:
second_doublet = False
if first_doublet and second_doublet and aa[1] == aa[2]:
return [codons]
else:
blocks = []
if first_doublet:
blocks.append(codons[:2])
else:
blocks.extend([[codons[0]], [codons[1]]])
if second_doublet:
blocks.append(codons[2:])
else:
blocks.extend([[codons[2]], [codons[3]]])
return blocks
def _get_blocks(self):
"""Returns list of lists of codon blocks in the genetic code.
A codon block can be:
- a quartet, if all 4 XYn codons have the same amino acid.
- a doublet, if XYt and XYc or XYa and XYg have the same aa.
- a singlet, otherwise.
Returns
-------
list
Returns a list of the quartets, doublets, and singlets in the order
UUU -> GGG.
Notes
-----
A doublet cannot span the purine/pyrimidine boundary, and a quartet
cannot span the boundary between two codon blocks whose first two bases
differ.
"""
if hasattr(self, '_blocks'):
return self._blocks
else:
blocks = []
curr_codons = []
curr_aa = []
for index, codon, aa in zip(range(64), self._codons,
self.code_sequence):
# we're in a new block if it's a new quartet or a different aa
new_quartet = not index % 4
if new_quartet and curr_codons:
blocks.extend(self._analyze_quartet(curr_codons, curr_aa))
curr_codons = []
curr_aa = []
curr_codons.append(codon)
curr_aa.append(aa)
# don't forget to append last block
if curr_codons:
blocks.extend(self._analyze_quartet(curr_codons, curr_aa))
self._blocks = blocks
return self._blocks
blocks = property(_get_blocks)
def __str__(self):
"""Returns code_sequence that constructs the GeneticCode
.. shownumpydoc
"""
return self.code_sequence
def __repr__(self):
"""Returns reconstructable representation of the GeneticCode
.. shownumpydoc
"""
return 'GeneticCode(%s)' % str(self)
def __eq__(self, other):
""" Allows two GeneticCode objects to be compared to each other.
Two GeneticCode objects are equal if they have equal code_sequences.
.. shownumpydoc
"""
if not isinstance(other, GeneticCode):
return False
return self.code_sequence == other.code_sequence
def __ne__(self, other):
"""Required in Py2."""
return not self == other
def __getitem__(self, item):
"""Returns amino acid corresponding to codon, or codons for an aa.
Returns [] for empty list of codons, 'X' for unknown amino acid.
.. shownumpydoc
"""
item = str(item)
if len(item) == 1: # amino acid
return self.synonyms.get(item, [])
elif len(item) == 3: # codon
key = item.upper()
key = key.replace('U', 'T')
return self.codons.get(key, 'X')
else:
raise InvalidCodonError("Codon or aa %s has wrong length" % item)
def translate(self, nucleotide_sequence, start=0):
"""Translate nucleotide to protein sequence
Parameters
----------
nucleotide_sequence : NucleotideSequence
sequence to be translated
start : int, optional
position to begin translation
Returns
-------
ProteinSequence
translation of nucleotide_sequence
Notes
-----
``translate`` returns the translation of the entire sequence, (i.e., of
``nucleotide_sequence[start:]``). It is the user's responsibility to
trim to an open reading frame, either from the input or using the
output, if that is desired.
See Also
--------
translate_six_frames
Examples
--------
>>> from skbio.sequence import GeneticCode
>>> sgc = GeneticCode('FFLLSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSS'
... 'RRVVVVAAAADDEEGGGG')
>>> sgc.translate('AUGCAUGACUUUUGA', 1)
<ProteinSequence: CMTF (length: 4)>
"""
if len(nucleotide_sequence) == 0:
return Protein('')
if start + 1 > len(nucleotide_sequence):
raise ValueError("Translation starts after end of"
"NucleotideSequence")
translation = []
for i in range(start, len(nucleotide_sequence) - 2, 3):
translation.append(self[nucleotide_sequence[i:i + 3]])
translation = Protein(''.join(translation))
return translation
def get_stop_indices(self, nucleotide_sequence, start=0):
"""returns indexes for stop codons in the specified frame
Parameters
----------
nucleotide_sequence : str, NucleotideSequence
sequence to be scanned for stop codons
start : int, optional
position where the search begins.
Returns
-------
list
indices of the stop codons.
Examples
--------
>>> from skbio.sequence import GeneticCode, DNA
>>> sgc = GeneticCode('FFLLSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSS'
... 'RRVVVVAAAADDEEGGGG')
>>> seq = DNA('ATGCTAACATAAA')
>>> sgc.get_stop_indices(seq, 0)
[9]
"""
stops = self['*']
stop_pattern = '(%s)' % '|'.join(stops)
stop_pattern = re.compile(stop_pattern)
seq = str(nucleotide_sequence)
found = [hit.start() for hit in stop_pattern.finditer(seq)]
found = [index for index in found if index % 3 == start]
return found
def translate_six_frames(self, nucleotide_sequence):
"""Translate nucleotide to protein sequences for all six reading frames
Parameters
----------
nucleotide_sequence : NucleotideSequence
sequence to be translated
Returns
-------
list
the six translated ProteinSequence objects
See Also
--------
translate
Examples
--------
>>> from skbio.sequence import GeneticCode, RNA
>>> sgc = GeneticCode('FFLLSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSS'
... 'RRVVVVAAAADDEEGGGG')
>>> results = sgc.translate_six_frames(RNA('AUGCUAACAUAAA'))
>>> for e in results: e
<ProteinSequence: MLT* (length: 4)>
<ProteinSequence: C*HK (length: 4)>
<ProteinSequence: ANI (length: 3)>
<ProteinSequence: FMLA (length: 4)>
<ProteinSequence: LC*H (length: 4)>
<ProteinSequence: YVS (length: 3)>
"""
rc_nucleotide_sequence = nucleotide_sequence.rc()
results = []
for start in range(3):
translation = self.translate(nucleotide_sequence, start)
results.append(translation)
for start in range(3):
translation = self.translate(rc_nucleotide_sequence, start)
results.append(translation)
return results
def is_start(self, codon):
"""Checks if codon is a start codon
Parameters
----------
codon : str
codon string
Returns
-------
bool
``True`` if codon is a start codon, ``False`` otherwise
Examples
--------
>>> from skbio.sequence import GeneticCode
>>> sgc = GeneticCode('FFLLSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSS'
... 'RRVVVVAAAADDEEGGGG')
>>> sgc.is_start('ATG')
False
>>> sgc.is_start('AAA')
False
"""
fixed_codon = codon.upper().replace('U', 'T')
return fixed_codon in self.start_codons
def is_stop(self, codon):
"""Checks if codon is a stop codon
Parameters
----------
codon : str
codon string
Returns
-------
bool
``True`` if codon is a stop codon, ``False`` otherwise
Examples
--------
>>> from skbio.sequence import GeneticCode
>>> sgc = GeneticCode('FFLLSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSS'
... 'RRVVVVAAAADDEEGGGG')
>>> sgc.is_stop('UAA')
True
>>> sgc.is_stop('AAA')
False
"""
return self[codon] == '*'
def changes(self, other):
"""Returns dictionary of codons that differ
Parameters
----------
other : GeneticCode
genetic code object
Returns
-------
dict
Returns a dictionary of the form ``{codon:'XY'}`` for codons that
differ. X is the string representation of the amino acid in the
object calling this method, Y is the string representation of the
amino acid in `other`. Always returns a 2-character string.
Examples
--------
>>> from skbio.sequence import GeneticCode
>>> from pprint import pprint
>>> sgc = GeneticCode('FFLLSSSSYY**CCWWLLLLPPPPHHQQRRRRIIMMTTTTNNKKSS*'
... '*VVVVAAAADDEEGGGG')
>>> pprint(sgc.changes('FFLLSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTT'
... 'TNNKKSSRRVVVVAAAADDEEGGGG'))
{'AGA': '*R', 'AGG': '*R', 'ATA': 'MI', 'TGA': 'W*'}
"""
changes = {}
try:
other_code = other.code_sequence
except AttributeError: # try using other directly as sequence
other_code = other
for codon, old, new in zip(self._codons, self.code_sequence,
other_code):
if old != new:
changes[codon] = old + new
return changes
_ncbi_genetic_code_data = [
[
'FFLLSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG',
1,
'Standard Nuclear',
'---M---------------M---------------M----------------------------',
],
[
'FFLLSSSSYY**CCWWLLLLPPPPHHQQRRRRIIMMTTTTNNKKSS**VVVVAAAADDEEGGGG',
2,
'Vertebrate Mitochondrial',
'--------------------------------MMMM---------------M------------',
],
[
'FFLLSSSSYY**CCWWTTTTPPPPHHQQRRRRIIMMTTTTNNKKSSRRVVVVAAAADDEEGGGG',
3,
'Yeast Mitochondrial',
'----------------------------------MM----------------------------',
],
[
'FFLLSSSSYY**CCWWLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG',
4,
'Mold, Protozoan, and Coelenterate Mitochondrial, and Mycoplasma/'
'Spiroplasma Nuclear',
'--MM---------------M------------MMMM---------------M------------',
],
[
'FFLLSSSSYY**CCWWLLLLPPPPHHQQRRRRIIMMTTTTNNKKSSSSVVVVAAAADDEEGGGG',
5,
'Invertebrate Mitochondrial',
'---M----------------------------MMMM---------------M------------',
],
[
'FFLLSSSSYYQQCC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG',
6,
'Ciliate, Dasycladacean and Hexamita Nuclear',
'-----------------------------------M----------------------------',
],
[
'FFLLSSSSYY**CCWWLLLLPPPPHHQQRRRRIIIMTTTTNNNKSSSSVVVVAAAADDEEGGGG',
9,
'Echinoderm and Flatworm Mitochondrial',
'-----------------------------------M---------------M------------',
],
[
'FFLLSSSSYY**CCCWLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG',
10,
'Euplotid Nuclear',
'-----------------------------------M----------------------------',
],
[
'FFLLSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG',
11,
'Bacterial Nuclear and Plant Plastid',
'---M---------------M------------MMMM---------------M------------',
],
[
'FFLLSSSSYY**CC*WLLLSPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG',
12,
'Alternative Yeast Nuclear',
'-------------------M---------------M----------------------------',
],
[
'FFLLSSSSYY**CCWWLLLLPPPPHHQQRRRRIIMMTTTTNNKKSSGGVVVVAAAADDEEGGGG',
13,
'Ascidian Mitochondrial',
'-----------------------------------M----------------------------',
],
[
'FFLLSSSSYYY*CCWWLLLLPPPPHHQQRRRRIIIMTTTTNNNKSSSSVVVVAAAADDEEGGGG',
14,
'Alternative Flatworm Mitochondrial',
'-----------------------------------M----------------------------',
],
[
'FFLLSSSSYY*QCC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG',
15,
'Blepharisma Nuclear',
'-----------------------------------M----------------------------',
],
[
'FFLLSSSSYY*LCC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG',
16,
'Chlorophycean Mitochondrial',
'-----------------------------------M----------------------------',
],
[
'FFLLSSSSYY**CCWWLLLLPPPPHHQQRRRRIIMMTTTTNNNKSSSSVVVVAAAADDEEGGGG',
20,
'Trematode Mitochondrial',
'-----------------------------------M---------------M------------',
],
[
'FFLLSS*SYY*LCC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG',
22,
'Scenedesmus obliquus Mitochondrial',
'-----------------------------------M----------------------------',
],
[
'FF*LSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG',
23,
'Thraustochytrium Mitochondrial',
],
]
def genetic_code(*id):
"""``skbio.sequence.GeneticCode`` factory given an optional id.
Parameters
----------
id : int or str optional
Indicates the ``skbio.sequence.GeneticCode`` to return. Must be in the
range of [1, 23] inclusive. If `id` is not provided, the Standard
Nuclear genetic code will be returned.
Returns
-------
skbio.sequence.GeneticCode
"""
key = 1
if len(id) == 1:
key = int(id[0])
if len(id) > 1:
raise TypeError('genetic_code takes 0 or 1 arguments (%d given)'
% len(id))
for n in _ncbi_genetic_code_data:
if n[1] == key:
return GeneticCode(*n)
raise ValueError('Genetic code could not be found for %d.' % id)
| |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Script for running experiments.
Example to run locally:
python bih.py --output_dir=bih_may21 --channel=both\
--hdim=3 --num_clusters=2
The outputs will show up in output_dir ucr_may19.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import csv
import logging
import os
from absl import app
from absl import flags
import matplotlib
matplotlib.use('Agg')
from matplotlib import pylab # pylint: disable=g-import-not-at-top
import numpy as np
import pandas as pd
import seaborn as sns
import six
import sklearn
# pylint: disable=g-bad-import-order
import arma
import clustering
import lds
FLAGS = flags.FLAGS
# Flags for IO and plotting.
flags.DEFINE_string('output_dir', None, 'Output filepath.')
flags.DEFINE_boolean(
'load_results', False, 'Whether to skip experiments '
'and only plot existing results from output_dir.')
flags.DEFINE_boolean(
'plot_clusters', False, 'Whether to visualize each '
'experiment run and plot clustering results.')
flags.DEFINE_integer('sample_size', None, 'Sample size of signals for each '
'clustering run.')
flags.DEFINE_boolean(
'filter_type', False, 'Whether to select only certain '
'types of labels according to prior work.')
flags.DEFINE_integer(
'label_count_threshold', 0, 'Threshold for label counts, '
'label as `other` if below the threshold.')
flags.DEFINE_integer('num_repeat', 1,
'Number of repeated runs for bootstrapping neg examples.')
flags.DEFINE_integer('subsample_step_size', 1, '1 for not subsampling')
flags.DEFINE_string('channel', 'both', 'Which channel to use, both or 0 or 1.')
# Flags for hparams in clustering algorithms.
flags.DEFINE_integer('hdim', 0, 'Hidden state dimension.')
flags.DEFINE_integer('num_clusters', 0, 'Desired number of clusters.')
flags.DEFINE_integer(
'LDS_GIBBS_num_update_samples', 100, 'Number of update '
'samples used for fitting LDS in pylds package.')
flags.DEFINE_integer('random_seed', 0, 'Random seed.')
# Flags for whether to include certain baselines.
flags.DEFINE_boolean(
'include_LDS_MLE', False, 'Whether to include MLE '
'estimation for LDS in the experiments. Could be slow.')
flags.DEFINE_boolean(
'include_tslearn', True, 'Whether to include time series '
'clustering methods from the tslearn package in the '
'experiments.')
flags.DEFINE_boolean(
'include_tslearn_slow', False, 'Whether to include time '
'series clustering methods from the tslearn package '
'that are slow: DTW and GAK.')
flags.DEFINE_boolean('include_LDS_GIBBS', True, 'Whether to include the '
'Gibbs sampling method for LDS.')
flags.DEFINE_boolean('include_ARMA_MLE', False, 'Whether to include the '
'MLE method for ARMA.')
def _drop_nan_rows(arr):
return arr[~np.isnan(arr).any(axis=1)]
def _replace_nan_with_0(arr):
return np.where(np.isnan(arr), 0.0, arr)
def create_model_fns(hdim):
"""Util function to create model fns to fit model params to sequences.
Args:
hdim: Guessed hidden dimension for model fitting.
Returns:
A dictionary mapping method names to model_fns. Each model_fn
takes output seq and input seq, and returns fitted model params.
"""
model_fns = collections.OrderedDict()
# Using raw outputs.
# model_fns['raw_output'] = lambda s: _replace_nan_with_0(s.outputs)
# pylint: disable=g-long-lambda
# Pure AR.
model_fns['AR'] = lambda s: arma.fit_ar(
_replace_nan_with_0(s.outputs), None, hdim)
# Iterated regression without regularization and with regularization.
model_fns['ARMA_OLS'] = lambda s: arma.fit_arma_iter(s.outputs, None, hdim)
model_fns['ARMA'] = lambda s: arma.fit_arma_iter(
s.outputs, None, hdim, l2_reg=0.01)
model_fns['ARMA_roots'] = lambda s: arma.get_eig_from_arparams(
arma.fit_arma_iter(s.outputs, None, hdim, l2_reg=0.01))
if FLAGS.include_LDS_GIBBS:
model_fns['LDS'] = lambda s: lds.fit_lds_gibbs(
_replace_nan_with_0(s.outputs),
None,
hdim,
num_update_samples=FLAGS.LDS_GIBBS_num_update_samples)
if FLAGS.include_ARMA_MLE:
model_fns['ARMA_MLE'] = lambda s: arma.fit_arma_mle(
_replace_nan_with_0(s.outputs), None, hdim)
if FLAGS.include_LDS_MLE:
model_fns['LDS_MLE'] = lambda s: lds.fit_lds_mle(
_replace_nan_with_0(s.outputs), None, hdim)
return model_fns
def parse_csv(filename, hdim):
"""Reads ECG data from csv file."""
labels = []
seqs = []
unprocessed_key = None
unprocessed_label = None
unprocessed_ch0 = None
not_full_length = 0
with open(filename, 'rb') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
key = row[0]
channel = row[1]
label = row[2]
channel_signal = np.array(row[3:]).reshape(-1, 1)
try:
channel_signal = channel_signal.astype(np.float32)
except ValueError:
channel_signal = np.array([float(x) if x else np.nan for x in row[3:]
]).reshape(-1, 1)
# logging.info('Partial signal of len %d with key %s',
# sum(~np.isnan(channel_signal)), key)
not_full_length += 1
if channel == '0':
assert unprocessed_ch0 is None
unprocessed_ch0 = channel_signal
unprocessed_key = key
unprocessed_label = label
if channel == '1':
assert unprocessed_ch0 is not None
seq_len = len(channel_signal)
assert len(unprocessed_ch0) == seq_len
if FLAGS.channel == 'both':
vals = np.concatenate([unprocessed_ch0, channel_signal], axis=1)
elif FLAGS.channel == '0':
vals = unprocessed_ch0
elif FLAGS.channel == '1':
vals = channel_signal
else:
raise ValueError('Unexpected FLAGS.channel value: %s' % FLAGS.channel)
seqs.append(
lds.LinearDynamicalSystemSequence(
np.zeros((seq_len, 1)), np.zeros((seq_len, hdim)), vals))
assert label == unprocessed_label
assert key.split(':')[:2] == unprocessed_key.split(':')[:2]
labels.append(label)
unprocessed_label = None
unprocessed_key = None
unprocessed_ch0 = None
logging.info('Total seqs: %d, partial length seqs: %d.', len(seqs),
not_full_length)
if FLAGS.filter_type:
seqs, labels = filter_type(seqs, labels)
seqs, labels = drop_infreq_labels(seqs, labels)
return seqs, labels
def _subsample_rows(arr, step_size):
return np.concatenate(
[arr[j:j + 1, :] for j in xrange(0, arr.shape[0], step_size)], axis=0)
def subsample(sequences, step_size=5):
subsampled = []
for s in sequences:
subsampled.append(
lds.LinearDynamicalSystemSequence(
_subsample_rows(s.inputs, step_size),
_subsample_rows(s.hidden_states, step_size),
_subsample_rows(s.outputs, step_size)))
return subsampled
def print_label_info(labels):
label_vocab, label_counts = np.unique(labels, return_counts=True)
df = pd.DataFrame(index=label_vocab, data={'count': label_counts})
print(df.sort_values('count', ascending=False).to_latex())
def filter_type(seqs, labels):
types = ['N', 'AFIB', 'VT', 'P', 'AFL']
seqs = [seqs[i] for i in xrange(len(seqs)) if labels[i] in types]
labels = [l for l in labels if l in types]
return seqs, labels
def drop_infreq_labels(seqs, labels):
"""Filter out infrequent labels."""
label_vocab, label_counts = np.unique(labels, return_counts=True)
is_dropped = {}
for i in xrange(len(label_vocab)):
logging.info('Found label %s, with count %d.', label_vocab[i],
label_counts[i])
if label_counts[i] < FLAGS.label_count_threshold:
logging.info('Dropped label %s.', label_vocab[i])
is_dropped[label_vocab[i]] = True
else:
is_dropped[label_vocab[i]] = False
seqs = [seqs[i] for i in xrange(len(seqs)) if not is_dropped[labels[i]]]
labels = [l for l in labels if not is_dropped[l]]
return seqs, labels
def sample_rebalance(seqs, labels):
"""Resample the data to have equal prevalence for each label."""
label_vocab = np.unique(labels)
n_samples_per_class = int(FLAGS.sample_size / len(label_vocab))
sampled_seqs = []
sampled_labels = []
for l in label_vocab:
l_seqs = [seqs[i] for i in xrange(len(seqs)) if labels[i] == l]
l_labels = [labels[i] for i in xrange(len(seqs)) if labels[i] == l]
sampled_l_seqs, sampled_l_labels = sklearn.utils.resample(
l_seqs, l_labels, n_samples=n_samples_per_class)
sampled_seqs.extend(sampled_l_seqs)
sampled_labels.extend(sampled_l_labels)
return sklearn.utils.shuffle(sampled_seqs, sampled_labels)
def get_results_bih_dataset(seqs, labels, hdim, num_clusters):
"""Returns a dataframe of clustering results on the ECG dataset."""
label_vocab, label_counts = np.unique(labels, return_counts=True)
logging.info('Counts of labels in current run: %s',
str(label_vocab) + ' ' + str(label_counts))
label_lookup = {l: i for i, l in enumerate(label_vocab)}
cluster_ids = [label_lookup[l] for l in labels]
model_fns = create_model_fns(hdim)
padded = clustering.pad_seqs_to_matrix(seqs)
max_seq_len = np.max([s.seq_len for s in seqs])
pca = sklearn.decomposition.PCA(n_components=hdim).fit(_drop_nan_rows(padded))
# pylint: disable=g-long-lambda
model_fns['PCA'] = lambda s: pca.transform(
_replace_nan_with_0(clustering.pad_seqs_to_matrix([s], max_seq_len))
).flatten()
# Get clustering results.
results_df = clustering.get_results(
seqs,
num_clusters,
cluster_ids,
None,
model_fns,
include_tslearn=FLAGS.include_tslearn,
include_slow_methods=FLAGS.include_tslearn_slow)
logging.info(results_df)
if FLAGS.plot_clusters:
clustering.visualize_clusters(
seqs, None, labels, model_fns,
os.path.join(FLAGS.output_dir, 'visualization.png'))
return results_df
def get_agg_stats(df):
"""Writes a csv file with aggregated stats."""
for metric in df.columns.values:
if metric == 'method':
continue
stats = df.groupby(['method'])[metric].agg(['mean', 'count', 'std'])
ci95_hi = []
ci95_lo = []
mean_w_ci = []
for i in stats.index:
m, c, s = stats.loc[i]
ci95_hi.append(m + 1.96 * s / np.sqrt(c))
ci95_lo.append(m - 1.96 * s / np.sqrt(c))
mean_w_ci.append(
'%.2f (%.2f-%.2f)' %
(m, m - 1.96 * s / np.sqrt(c), m + 1.96 * s / np.sqrt(c)))
stats['ci95_hi'] = ci95_hi
stats['ci95_lo'] = ci95_lo
stats['mean_w_ci'] = mean_w_ci
logging.info(metric)
logging.info(stats[['mean_w_ci']])
stats.to_csv(os.path.join(FLAGS.output_dir, metric + '_agg.csv'))
def plot_results(results_df, output_dir):
"""Plots metrics and saves plots as png files."""
for metric_name in results_df.columns:
if metric_name == 'seq_len' or metric_name == 'method':
continue
pylab.figure()
sns.lineplot(
x='seq_len',
y=metric_name,
data=results_df,
hue='method',
estimator=np.mean,
err_style='bars')
output = six.StringIO()
pylab.savefig(output, format='png')
image = output.getvalue()
with open(os.path.join(output_dir, metric_name + '.png'), 'w+') as f:
f.write(image)
def main(unused_argv):
np.random.seed(0)
if FLAGS.load_results:
with open(os.path.join(FLAGS.output_dir, 'results.csv'), 'r') as f:
results_df = pd.read_csv(f, index_col=False)
plot_results(results_df, FLAGS.output_dir)
return
if not os.path.exists(FLAGS.output_dir):
os.mkdir(FLAGS.output_dir)
combined_results_list = []
with open(os.path.join(FLAGS.output_dir, 'flags.txt'), 'w+') as f:
f.write(str(FLAGS.flag_values_dict()))
seqs, labels = parse_csv('mit-bih/all_classes.csv', FLAGS.hdim)
for _ in xrange(FLAGS.num_repeat):
seqs, labels = sample_rebalance(seqs, labels)
results_df = get_results_bih_dataset(seqs, labels, FLAGS.hdim,
FLAGS.num_clusters)
combined_results_list.append(results_df)
results_df = pd.concat(combined_results_list)
with open(os.path.join(FLAGS.output_dir, 'results.csv'), 'w+') as f:
results_df.to_csv(f, index=False)
get_agg_stats(results_df)
# plot_results(results_df, FLAGS.output_dir)
if __name__ == '__main__':
flags.mark_flag_as_required('output_dir')
app.run(main)
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
class MatrixDiagTest(test.TestCase):
@test_util.run_deprecated_v1
def testVector(self):
with self.session(use_gpu=True):
v = np.array([1.0, 2.0, 3.0])
mat = np.diag(v)
v_diag = array_ops.matrix_diag(v)
self.assertEqual((3, 3), v_diag.get_shape())
self.assertAllEqual(v_diag.eval(), mat)
def _testBatchVector(self, dtype):
with self.cached_session(use_gpu=True):
v_batch = np.array([[1.0, 0.0, 3.0], [4.0, 5.0, 6.0]]).astype(dtype)
mat_batch = np.array([[[1.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 3.0]],
[[4.0, 0.0, 0.0], [0.0, 5.0, 0.0],
[0.0, 0.0, 6.0]]]).astype(dtype)
v_batch_diag = array_ops.matrix_diag(v_batch)
self.assertEqual((2, 3, 3), v_batch_diag.get_shape())
self.assertAllEqual(v_batch_diag.eval(), mat_batch)
@test_util.run_deprecated_v1
def testBatchVector(self):
self._testBatchVector(np.float32)
self._testBatchVector(np.float64)
self._testBatchVector(np.int32)
self._testBatchVector(np.int64)
self._testBatchVector(np.bool)
@test_util.run_deprecated_v1
def testInvalidShape(self):
with self.assertRaisesRegexp(ValueError, "must be at least rank 1"):
array_ops.matrix_diag(0)
@test_util.run_deprecated_v1
def testInvalidShapeAtEval(self):
with self.session(use_gpu=True):
v = array_ops.placeholder(dtype=dtypes_lib.float32)
with self.assertRaisesOpError("input must be at least 1-dim"):
array_ops.matrix_diag(v).eval(feed_dict={v: 0.0})
@test_util.run_deprecated_v1
def testGrad(self):
shapes = ((3,), (7, 4))
with self.session(use_gpu=True):
for shape in shapes:
x = constant_op.constant(np.random.rand(*shape), np.float32)
y = array_ops.matrix_diag(x)
error = gradient_checker.compute_gradient_error(x,
x.get_shape().as_list(),
y,
y.get_shape().as_list())
self.assertLess(error, 1e-4)
class MatrixSetDiagTest(test.TestCase):
@test_util.run_deprecated_v1
def testSquare(self):
with self.session(use_gpu=True):
v = np.array([1.0, 2.0, 3.0])
mat = np.array([[0.0, 1.0, 0.0], [1.0, 0.0, 1.0], [1.0, 1.0, 1.0]])
mat_set_diag = np.array([[1.0, 1.0, 0.0], [1.0, 2.0, 1.0],
[1.0, 1.0, 3.0]])
output = array_ops.matrix_set_diag(mat, v)
self.assertEqual((3, 3), output.get_shape())
self.assertAllEqual(mat_set_diag, self.evaluate(output))
@test_util.run_deprecated_v1
def testRectangular(self):
with self.session(use_gpu=True):
v = np.array([3.0, 4.0])
mat = np.array([[0.0, 1.0, 0.0], [1.0, 0.0, 1.0]])
expected = np.array([[3.0, 1.0, 0.0], [1.0, 4.0, 1.0]])
output = array_ops.matrix_set_diag(mat, v)
self.assertEqual((2, 3), output.get_shape())
self.assertAllEqual(expected, self.evaluate(output))
v = np.array([3.0, 4.0])
mat = np.array([[0.0, 1.0], [1.0, 0.0], [1.0, 1.0]])
expected = np.array([[3.0, 1.0], [1.0, 4.0], [1.0, 1.0]])
output = array_ops.matrix_set_diag(mat, v)
self.assertEqual((3, 2), output.get_shape())
self.assertAllEqual(expected, self.evaluate(output))
def _testSquareBatch(self, dtype):
with self.cached_session(use_gpu=True):
v_batch = np.array([[-1.0, 0.0, -3.0], [-4.0, -5.0, -6.0]]).astype(dtype)
mat_batch = np.array([[[1.0, 0.0, 3.0], [0.0, 2.0, 0.0], [1.0, 0.0, 3.0]],
[[4.0, 0.0, 4.0], [0.0, 5.0, 0.0],
[2.0, 0.0, 6.0]]]).astype(dtype)
mat_set_diag_batch = np.array([[[-1.0, 0.0, 3.0], [0.0, 0.0, 0.0],
[1.0, 0.0, -3.0]],
[[-4.0, 0.0, 4.0], [0.0, -5.0, 0.0],
[2.0, 0.0, -6.0]]]).astype(dtype)
output = array_ops.matrix_set_diag(mat_batch, v_batch)
self.assertEqual((2, 3, 3), output.get_shape())
self.assertAllEqual(mat_set_diag_batch, self.evaluate(output))
@test_util.run_deprecated_v1
def testSquareBatch(self):
self._testSquareBatch(np.float32)
self._testSquareBatch(np.float64)
self._testSquareBatch(np.int32)
self._testSquareBatch(np.int64)
self._testSquareBatch(np.bool)
@test_util.run_deprecated_v1
def testRectangularBatch(self):
with self.session(use_gpu=True):
v_batch = np.array([[-1.0, -2.0], [-4.0, -5.0]])
mat_batch = np.array([[[1.0, 0.0, 3.0], [0.0, 2.0, 0.0]],
[[4.0, 0.0, 4.0], [0.0, 5.0, 0.0]]])
mat_set_diag_batch = np.array([[[-1.0, 0.0, 3.0], [0.0, -2.0, 0.0]],
[[-4.0, 0.0, 4.0], [0.0, -5.0, 0.0]]])
output = array_ops.matrix_set_diag(mat_batch, v_batch)
self.assertEqual((2, 2, 3), output.get_shape())
self.assertAllEqual(mat_set_diag_batch, self.evaluate(output))
@test_util.run_deprecated_v1
def testInvalidShape(self):
with self.assertRaisesRegexp(ValueError, "must be at least rank 2"):
array_ops.matrix_set_diag(0, [0])
with self.assertRaisesRegexp(ValueError, "must be at least rank 1"):
array_ops.matrix_set_diag([[0]], 0)
@test_util.run_deprecated_v1
def testInvalidShapeAtEval(self):
with self.session(use_gpu=True):
v = array_ops.placeholder(dtype=dtypes_lib.float32)
with self.assertRaisesOpError("input must be at least 2-dim"):
array_ops.matrix_set_diag(v, [v]).eval(feed_dict={v: 0.0})
with self.assertRaisesOpError(
r"but received input shape: \[1,1\] and diagonal shape: \[\]"):
array_ops.matrix_set_diag([[v]], v).eval(feed_dict={v: 0.0})
@test_util.run_deprecated_v1
def testGrad(self):
shapes = ((3, 4, 4), (3, 3, 4), (3, 4, 3), (7, 4, 8, 8))
with self.session(use_gpu=True):
for shape in shapes:
x = constant_op.constant(
np.random.rand(*shape), dtype=dtypes_lib.float32)
diag_shape = shape[:-2] + (min(shape[-2:]),)
x_diag = constant_op.constant(
np.random.rand(*diag_shape), dtype=dtypes_lib.float32)
y = array_ops.matrix_set_diag(x, x_diag)
error_x = gradient_checker.compute_gradient_error(
x,
x.get_shape().as_list(), y,
y.get_shape().as_list())
self.assertLess(error_x, 1e-4)
error_x_diag = gradient_checker.compute_gradient_error(
x_diag,
x_diag.get_shape().as_list(), y,
y.get_shape().as_list())
self.assertLess(error_x_diag, 1e-4)
@test_util.run_deprecated_v1
def testGradWithNoShapeInformation(self):
with self.session(use_gpu=True) as sess:
v = array_ops.placeholder(dtype=dtypes_lib.float32)
mat = array_ops.placeholder(dtype=dtypes_lib.float32)
grad_input = array_ops.placeholder(dtype=dtypes_lib.float32)
output = array_ops.matrix_set_diag(mat, v)
grads = gradients_impl.gradients(output, [mat, v], grad_ys=grad_input)
grad_input_val = np.random.rand(3, 3).astype(np.float32)
grad_vals = sess.run(
grads,
feed_dict={
v: 2 * np.ones(3),
mat: np.ones((3, 3)),
grad_input: grad_input_val
})
self.assertAllEqual(np.diag(grad_input_val), grad_vals[1])
self.assertAllEqual(grad_input_val - np.diag(np.diag(grad_input_val)),
grad_vals[0])
class MatrixDiagPartTest(test.TestCase):
@test_util.run_deprecated_v1
def testSquare(self):
with self.session(use_gpu=True):
v = np.array([1.0, 2.0, 3.0])
mat = np.diag(v)
mat_diag = array_ops.matrix_diag_part(mat)
self.assertEqual((3,), mat_diag.get_shape())
self.assertAllEqual(mat_diag.eval(), v)
@test_util.run_deprecated_v1
def testRectangular(self):
with self.session(use_gpu=True):
mat = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
mat_diag = array_ops.matrix_diag_part(mat)
self.assertAllEqual(mat_diag.eval(), np.array([1.0, 5.0]))
mat = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]])
mat_diag = array_ops.matrix_diag_part(mat)
self.assertAllEqual(mat_diag.eval(), np.array([1.0, 4.0]))
def _testSquareBatch(self, dtype):
with self.cached_session(use_gpu=True):
v_batch = np.array([[1.0, 0.0, 3.0], [4.0, 5.0, 6.0]]).astype(dtype)
mat_batch = np.array([[[1.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 3.0]],
[[4.0, 0.0, 0.0], [0.0, 5.0, 0.0],
[0.0, 0.0, 6.0]]]).astype(dtype)
self.assertEqual(mat_batch.shape, (2, 3, 3))
mat_batch_diag = array_ops.matrix_diag_part(mat_batch)
self.assertEqual((2, 3), mat_batch_diag.get_shape())
self.assertAllEqual(mat_batch_diag.eval(), v_batch)
@test_util.run_deprecated_v1
def testSquareBatch(self):
self._testSquareBatch(np.float32)
self._testSquareBatch(np.float64)
self._testSquareBatch(np.int32)
self._testSquareBatch(np.int64)
self._testSquareBatch(np.bool)
@test_util.run_deprecated_v1
def testRectangularBatch(self):
with self.session(use_gpu=True):
v_batch = np.array([[1.0, 2.0], [4.0, 5.0]])
mat_batch = np.array([[[1.0, 0.0, 0.0], [0.0, 2.0, 0.0]],
[[4.0, 0.0, 0.0], [0.0, 5.0, 0.0]]])
self.assertEqual(mat_batch.shape, (2, 2, 3))
mat_batch_diag = array_ops.matrix_diag_part(mat_batch)
self.assertEqual((2, 2), mat_batch_diag.get_shape())
self.assertAllEqual(mat_batch_diag.eval(), v_batch)
@test_util.run_deprecated_v1
def testInvalidShape(self):
with self.assertRaisesRegexp(ValueError, "must be at least rank 2"):
array_ops.matrix_diag_part(0)
@test_util.run_deprecated_v1
def testInvalidShapeAtEval(self):
with self.session(use_gpu=True):
v = array_ops.placeholder(dtype=dtypes_lib.float32)
with self.assertRaisesOpError("input must be at least 2-dim"):
array_ops.matrix_diag_part(v).eval(feed_dict={v: 0.0})
@test_util.run_deprecated_v1
def testGrad(self):
shapes = ((3, 3), (2, 3), (3, 2), (5, 3, 3))
with self.session(use_gpu=True):
for shape in shapes:
x = constant_op.constant(np.random.rand(*shape), dtype=np.float32)
y = array_ops.matrix_diag_part(x)
error = gradient_checker.compute_gradient_error(x,
x.get_shape().as_list(),
y,
y.get_shape().as_list())
self.assertLess(error, 1e-4)
class DiagTest(test.TestCase):
def _diagOp(self, diag, dtype, expected_ans, use_gpu):
with self.cached_session(use_gpu=use_gpu):
tf_ans = array_ops.diag(ops.convert_to_tensor(diag.astype(dtype)))
out = self.evaluate(tf_ans)
tf_ans_inv = array_ops.diag_part(expected_ans)
inv_out = self.evaluate(tf_ans_inv)
self.assertAllClose(out, expected_ans)
self.assertAllClose(inv_out, diag)
self.assertShapeEqual(expected_ans, tf_ans)
self.assertShapeEqual(diag, tf_ans_inv)
def diagOp(self, diag, dtype, expected_ans):
self._diagOp(diag, dtype, expected_ans, False)
self._diagOp(diag, dtype, expected_ans, True)
def testEmptyTensor(self):
x = np.array([])
expected_ans = np.empty([0, 0])
self.diagOp(x, np.int32, expected_ans)
def testRankOneIntTensor(self):
x = np.array([1, 2, 3])
expected_ans = np.array([[1, 0, 0], [0, 2, 0], [0, 0, 3]])
self.diagOp(x, np.int32, expected_ans)
self.diagOp(x, np.int64, expected_ans)
def testRankOneFloatTensor(self):
x = np.array([1.1, 2.2, 3.3])
expected_ans = np.array([[1.1, 0, 0], [0, 2.2, 0], [0, 0, 3.3]])
self.diagOp(x, np.float32, expected_ans)
self.diagOp(x, np.float64, expected_ans)
def testRankOneComplexTensor(self):
for dtype in [np.complex64, np.complex128]:
x = np.array([1.1 + 1.1j, 2.2 + 2.2j, 3.3 + 3.3j], dtype=dtype)
expected_ans = np.array(
[[1.1 + 1.1j, 0 + 0j, 0 + 0j], [0 + 0j, 2.2 + 2.2j, 0 + 0j],
[0 + 0j, 0 + 0j, 3.3 + 3.3j]],
dtype=dtype)
self.diagOp(x, dtype, expected_ans)
def testRankTwoIntTensor(self):
x = np.array([[1, 2, 3], [4, 5, 6]])
expected_ans = np.array([[[[1, 0, 0], [0, 0, 0]], [[0, 2, 0], [0, 0, 0]],
[[0, 0, 3], [0, 0, 0]]],
[[[0, 0, 0], [4, 0, 0]], [[0, 0, 0], [0, 5, 0]],
[[0, 0, 0], [0, 0, 6]]]])
self.diagOp(x, np.int32, expected_ans)
self.diagOp(x, np.int64, expected_ans)
def testRankTwoFloatTensor(self):
x = np.array([[1.1, 2.2, 3.3], [4.4, 5.5, 6.6]])
expected_ans = np.array(
[[[[1.1, 0, 0], [0, 0, 0]], [[0, 2.2, 0], [0, 0, 0]],
[[0, 0, 3.3], [0, 0, 0]]], [[[0, 0, 0], [4.4, 0, 0]],
[[0, 0, 0], [0, 5.5, 0]], [[0, 0, 0],
[0, 0, 6.6]]]])
self.diagOp(x, np.float32, expected_ans)
self.diagOp(x, np.float64, expected_ans)
def testRankTwoComplexTensor(self):
for dtype in [np.complex64, np.complex128]:
x = np.array(
[[1.1 + 1.1j, 2.2 + 2.2j, 3.3 + 3.3j],
[4.4 + 4.4j, 5.5 + 5.5j, 6.6 + 6.6j]],
dtype=dtype)
expected_ans = np.array(
[[[[1.1 + 1.1j, 0 + 0j, 0 + 0j], [0 + 0j, 0 + 0j, 0 + 0j]], [
[0 + 0j, 2.2 + 2.2j, 0 + 0j], [0 + 0j, 0 + 0j, 0 + 0j]
], [[0 + 0j, 0 + 0j, 3.3 + 3.3j], [0 + 0j, 0 + 0j, 0 + 0j]]], [[
[0 + 0j, 0 + 0j, 0 + 0j], [4.4 + 4.4j, 0 + 0j, 0 + 0j]
], [[0 + 0j, 0 + 0j, 0 + 0j], [0 + 0j, 5.5 + 5.5j, 0 + 0j]
], [[0 + 0j, 0 + 0j, 0 + 0j], [0 + 0j, 0 + 0j, 6.6 + 6.6j]]]],
dtype=dtype)
self.diagOp(x, dtype, expected_ans)
def testRankThreeFloatTensor(self):
x = np.array([[[1.1, 2.2], [3.3, 4.4]], [[5.5, 6.6], [7.7, 8.8]]])
expected_ans = np.array([[[[[[1.1, 0], [0, 0]], [[0, 0], [0, 0]]],
[[[0, 2.2], [0, 0]], [[0, 0], [0, 0]]]],
[[[[0, 0], [3.3, 0]], [[0, 0], [0, 0]]],
[[[0, 0], [0, 4.4]], [[0, 0], [0, 0]]]]],
[[[[[0, 0], [0, 0]], [[5.5, 0], [0, 0]]],
[[[0, 0], [0, 0]], [[0, 6.6], [0, 0]]]],
[[[[0, 0], [0, 0]], [[0, 0], [7.7, 0]]],
[[[0, 0], [0, 0]], [[0, 0], [0, 8.8]]]]]])
self.diagOp(x, np.float32, expected_ans)
self.diagOp(x, np.float64, expected_ans)
def testRankThreeComplexTensor(self):
for dtype in [np.complex64, np.complex128]:
x = np.array(
[[[1.1 + 1.1j, 2.2 + 2.2j], [3.3 + 3.3j, 4.4 + 4.4j]],
[[5.5 + 5.5j, 6.6 + 6.6j], [7.7 + 7.7j, 8.8 + 8.8j]]],
dtype=dtype)
expected_ans = np.array(
[[[[[[1.1 + 1.1j, 0 + 0j], [0 + 0j, 0 + 0j]], [[0 + 0j, 0 + 0j], [
0 + 0j, 0 + 0j
]]], [[[0 + 0j, 2.2 + 2.2j], [0 + 0j, 0 + 0j]], [[0 + 0j, 0 + 0j], [
0 + 0j, 0 + 0j
]]]], [[[[0 + 0j, 0 + 0j], [3.3 + 3.3j, 0 + 0j]], [[0 + 0j, 0 + 0j], [
0 + 0j, 0 + 0j
]]], [[[0 + 0j, 0 + 0j], [0 + 0j, 4.4 + 4.4j]], [[0 + 0j, 0 + 0j], [
0 + 0j, 0 + 0j
]]]]], [[[[[0 + 0j, 0 + 0j], [0 + 0j, 0 + 0j]], [
[5.5 + 5.5j, 0 + 0j], [0 + 0j, 0 + 0j]
]], [[[0 + 0j, 0 + 0j], [0 + 0j, 0 + 0j]], [[0 + 0j, 6.6 + 6.6j], [
0 + 0j, 0 + 0j
]]]], [[[[0 + 0j, 0 + 0j], [0 + 0j, 0 + 0j]], [[0 + 0j, 0 + 0j], [
7.7 + 7.7j, 0 + 0j
]]], [[[0 + 0j, 0 + 0j], [0 + 0j, 0 + 0j]],
[[0 + 0j, 0 + 0j], [0 + 0j, 8.8 + 8.8j]]]]]],
dtype=dtype)
self.diagOp(x, dtype, expected_ans)
def testRankFourNumberTensor(self):
for dtype in [np.float32, np.float64, np.int64, np.int32]:
# Input with shape [2, 1, 2, 3]
x = np.array(
[[[[1, 2, 3], [4, 5, 6]]], [[[7, 8, 9], [10, 11, 12]]]], dtype=dtype)
# Output with shape [2, 1, 2, 3, 2, 1, 2, 3]
expected_ans = np.array(
[[[[[[[[1, 0, 0], [0, 0, 0]]], [[[0, 0, 0], [0, 0, 0]]]], [
[[[0, 2, 0], [0, 0, 0]]], [[[0, 0, 0], [0, 0, 0]]]
], [[[[0, 0, 3], [0, 0, 0]]], [[[0, 0, 0], [0, 0, 0]]]]], [[
[[[0, 0, 0], [4, 0, 0]]], [[[0, 0, 0], [0, 0, 0]]]
], [[[[0, 0, 0], [0, 5, 0]]], [[[0, 0, 0], [0, 0, 0]]]], [
[[[0, 0, 0], [0, 0, 6]]], [[[0, 0, 0], [0, 0, 0]]]
]]]], [[[[[[[0, 0, 0], [0, 0, 0]]], [[[7, 0, 0], [0, 0, 0]]]], [
[[[0, 0, 0], [0, 0, 0]]], [[[0, 8, 0], [0, 0, 0]]]
], [[[[0, 0, 0], [0, 0, 0]]], [[[0, 0, 9], [0, 0, 0]]]]], [[
[[[0, 0, 0], [0, 0, 0]]], [[[0, 0, 0], [10, 0, 0]]]
], [[[[0, 0, 0], [0, 0, 0]]], [[[0, 0, 0], [0, 11, 0]]]
], [[[[0, 0, 0], [0, 0, 0]]], [[[0, 0, 0], [0, 0, 12]]]]]]]],
dtype=dtype)
self.diagOp(x, dtype, expected_ans)
@test_util.run_deprecated_v1
def testInvalidRank(self):
with self.assertRaisesRegexp(ValueError, "must be at least rank 1"):
array_ops.diag(0.0)
class DiagPartOpTest(test.TestCase):
def setUp(self):
np.random.seed(0)
def _diagPartOp(self, tensor, dtype, expected_ans, use_gpu):
with self.cached_session(use_gpu=use_gpu):
tensor = ops.convert_to_tensor(tensor.astype(dtype))
tf_ans_inv = array_ops.diag_part(tensor)
inv_out = self.evaluate(tf_ans_inv)
self.assertAllClose(inv_out, expected_ans)
self.assertShapeEqual(expected_ans, tf_ans_inv)
def diagPartOp(self, tensor, dtype, expected_ans):
self._diagPartOp(tensor, dtype, expected_ans, False)
self._diagPartOp(tensor, dtype, expected_ans, True)
def testRankTwoFloatTensor(self):
x = np.random.rand(3, 3)
i = np.arange(3)
expected_ans = x[i, i]
self.diagPartOp(x, np.float32, expected_ans)
self.diagPartOp(x, np.float64, expected_ans)
def testRankFourFloatTensorUnknownShape(self):
x = np.random.rand(3, 3)
i = np.arange(3)
expected_ans = x[i, i]
for shape in None, (None, 3), (3, None):
with self.cached_session(use_gpu=False):
t = ops.convert_to_tensor(x.astype(np.float32))
t.set_shape(shape)
tf_ans = array_ops.diag_part(t)
out = self.evaluate(tf_ans)
self.assertAllClose(out, expected_ans)
self.assertShapeEqual(expected_ans, tf_ans)
def testRankFourFloatTensor(self):
x = np.random.rand(2, 3, 2, 3)
i = np.arange(2)[:, None]
j = np.arange(3)
expected_ans = x[i, j, i, j]
self.diagPartOp(x, np.float32, expected_ans)
self.diagPartOp(x, np.float64, expected_ans)
def testRankSixFloatTensor(self):
x = np.random.rand(2, 2, 2, 2, 2, 2)
i = np.arange(2)[:, None, None]
j = np.arange(2)[:, None]
k = np.arange(2)
expected_ans = x[i, j, k, i, j, k]
self.diagPartOp(x, np.float32, expected_ans)
self.diagPartOp(x, np.float64, expected_ans)
def testRankEightComplexTensor(self):
x = np.random.rand(2, 2, 2, 3, 2, 2, 2, 3)
i = np.arange(2)[:, None, None, None]
j = np.arange(2)[:, None, None]
k = np.arange(2)[:, None]
l = np.arange(3)
expected_ans = x[i, j, k, l, i, j, k, l]
self.diagPartOp(x, np.complex64, expected_ans)
self.diagPartOp(x, np.complex128, expected_ans)
@test_util.run_deprecated_v1
def testOddRank(self):
w = np.random.rand(2)
x = np.random.rand(2, 2, 2)
self.assertRaises(ValueError, self.diagPartOp, w, np.float32, 0)
self.assertRaises(ValueError, self.diagPartOp, x, np.float32, 0)
with self.assertRaises(ValueError):
array_ops.diag_part(0.0)
@test_util.run_deprecated_v1
def testUnevenDimensions(self):
w = np.random.rand(2, 5)
x = np.random.rand(2, 1, 2, 3)
self.assertRaises(ValueError, self.diagPartOp, w, np.float32, 0)
self.assertRaises(ValueError, self.diagPartOp, x, np.float32, 0)
class DiagGradOpTest(test.TestCase):
@test_util.run_deprecated_v1
def testDiagGrad(self):
np.random.seed(0)
shapes = ((3,), (3, 3), (3, 3, 3))
dtypes = (dtypes_lib.float32, dtypes_lib.float64)
with self.session(use_gpu=False):
errors = []
for shape in shapes:
for dtype in dtypes:
x1 = constant_op.constant(np.random.rand(*shape), dtype=dtype)
y = array_ops.diag(x1)
error = gradient_checker.compute_gradient_error(
x1,
x1.get_shape().as_list(), y,
y.get_shape().as_list())
tf_logging.info("error = %f", error)
self.assertLess(error, 1e-4)
class DiagGradPartOpTest(test.TestCase):
@test_util.run_deprecated_v1
def testDiagPartGrad(self):
np.random.seed(0)
shapes = ((3, 3), (3, 3, 3, 3))
dtypes = (dtypes_lib.float32, dtypes_lib.float64)
with self.session(use_gpu=False):
errors = []
for shape in shapes:
for dtype in dtypes:
x1 = constant_op.constant(np.random.rand(*shape), dtype=dtype)
y = array_ops.diag_part(x1)
error = gradient_checker.compute_gradient_error(
x1,
x1.get_shape().as_list(), y,
y.get_shape().as_list())
tf_logging.info("error = %f", error)
self.assertLess(error, 1e-4)
if __name__ == "__main__":
test.main()
| |
# general class for exoplanet populations
from ..imports import *
from ..telescopes import *
from ..models import *
import string
basic_columns = [
'name',
'hostname',
'ra', 'dec',
'distance',
'discoverer']
transit_columns = [
'period',
'semimajoraxis',
'e', 'omega', 'inclination',
'transit_epoch',
'transit_duration',
'transit_depth',
'stellar_teff',
'stellar_mass',
'stellar_radius',
'radius',
'mass',
'transit_ar',
'transit_b']
calculated_columns = [
'a_over_rs',
'b',
'insolation',
'relative_insolation',
'log_relative_insolation',
'teq',
'planet_luminosity',
'density',
'surface_gravity',
'distance_modulus',
'escape_velocity',
'escape_parameter',
'angular_separation',
'imaging_contrast',
'stellar_luminosity',
]
table_columns = basic_columns + transit_columns
attribute_columns = table_columns + calculated_columns
method_columns = ['scale_height',
'transmission_signal', 'transmission_snr',
'emission_signal', 'emission_snr',
'reflection_signal', 'reflection_snr',
'stellar_brightness',
'stellar_brightness_in_telescope_units',
'depth_uncertainty']
desired_columns = [
'mass_uncertainty_upper',
'mass_uncertainty_lower',
'radius_uncertainty_upper',
'radius_uncertainty_lower',
'distance_uncertainty_upper',
'distance_uncertainty_lower']
# these are keywords that can be set for a population
default_plotkw = dict(color='black',
alpha=1,
zorder=0,
marker='o',
linewidth=1,
respond_to_color=True,
respond_to_size=True,
exact=False,
label_planets=False,
filled=True,
outlined=False)
# what keywords can we set for the population plotkw?
allowed_plotkw = list(default_plotkw.keys())
allowed_plotkw += ['s',
'c',
'cmap',
'norm',
'vmin',
'vmax'
'outlined',
'filled']
class Population(Talker):
'''
Create a population from a standardized table.
'''
#kludge?
_pithy = True
def __init__(self, standard, label='unknown', verbose=False, **plotkw):
'''
Initialize a Population of exoplanets from a standardized table.
Parameters
----------
standard : astropy.table.Table
A table that contains all the necessary columns.
**plotkw : dict
All other keyword arguments wil
'''
# a standardized table with a minimum set of columns we can expect
self.standard = Table(standard)
# store a label for this population
self.label = label
# keywords to use for plotting
self.plotkw = plotkw
self._pithy = verbose == False
# define some cleaned names and hostnames, for indexing
try:
self.standard['tidyname']
except KeyError:
self.standard['tidyname'] = [clean(x).lower() for x in self.standard['name']]
try:
self.standard['tidyhostname']
except KeyError:
self.standard['tidyhostname'] = [clean(x).lower() for x in self.standard['hostname']]
# make sure the table is searchable via names
self.standard.add_index('tidyname')
self.standard.add_index('tidyhostname')
def sort(self, x, reverse=False):
'''
Sort this population by some key or attribute.
'''
to_sort = getattr(self, x)
i = np.argsort(to_sort)
if reverse:
i = i[::-1]
self.standard = self.standard[i]
return self
def __add__(self, other):
'''
Create a new population by adding two together:
`bigger = this + other`
Parameters
----------
other : Population
The population to be tacked onto this one.
Returns
-------
bigger : Population
A new population, consisting of all the planets
in `this` population and some extra ones added
from `other`.
'''
# skip any warnings that pop up
with warnings.catch_warnings():
warnings.simplefilter('ignore')
# create a new table, joining both together
table = join(self.standard,
other.standard,
join_type='outer')
# create an informative label
label = f'{self.label} + {other.label}'
# create and return the new population
return Population(table, label=label)
def remove_by_key(self, other, key='tidyname'):
'''
Create a new population by removing some rows from here:
`smaller = this - other`
Parameters
----------
other : Population
The population of planets to be removed from
`this` population to create a new `smaller` one.
Returns
-------
smaller : Population
A subset of `this` population, where some rows
have been removed.
'''
# skip any warnings that pop up
with warnings.catch_warnings():
warnings.simplefilter('ignore')
# create a new table, joining both together
table = setdiff(self.standard,
other.standard,
keys=key)
# create an informative label
label = f'{self.label} - {other.label}'
# create and return the new population
return Population(table, label=label)
def __sub__(self, other):
'''
Create a new population by removing some rows from here:
`smaller = this - other`
Parameters
----------
other : Population
The population of planets to be removed from
`this` population to create a new `smaller` one.
Returns
-------
smaller : Population
A subset of `this` population, where some rows
have been removed.
'''
return self.remove_by_key(other)
def __getitem__(self, key):
'''
Create a subpopulation of planets by indexing, slicing, or masking.
'''
# FIXME -- maybe make it easier to pull out intermediate masks?
try:
# if the key is an index/slice/mask, return it
if self.label is None:
label = None
else:
label = f'Subset of {self.label}'
subset = Population(standard=self.standard[key],
label=label,
**self.plotkw)
# if the key is a column, raise an error
if type(key) in self.standard.colnames:
raise IndexError(f'''
You seem to be trying to access a column from this
population via `pop[{key}]`. For clarity, all `[]`
indexing is reserved for selecting subsets of the
population.
To access your particular column, please try either
`pop.{key}` or `pop.standard[{key}]` to return a
1D array of the entries in that column.
''')
except KeyError:
# use a string or a list of strings make a subset by planet name
# FIXME - maybe we should make this say more when it's making a sneaky choice for us?
try:
subset = self.create_subset_by_name(key)
except KeyError:
subset = self.create_subset_by_hostname(key)
return subset
def create_subset_by_name(self, key):
'''
Extract a subset of this population,
based on one or more planet names.
Parameters
----------
key : strings, list of strings
The name of a planet ("GJ1132b")
or a list of planet names.
(All names will be stripped of
special characters and converted
to lower case before indexing.)
Returns
-------
subset : Population
A new population containing
some subset of the original.
'''
# use a (list of) string(s) to index population by name
if isinstance(key, str):
# is it just one name?
key = clean(key).lower()
elif isinstance(key[0], str):
# is it a list of names?
key = [clean(k).lower() for k in key]
# pull out rows by planet name
subset = self.standard.loc['tidyname', key]
# create a useful label for the population
if isinstance(key, str):
label = key
elif isinstance(key[0], str):
label = '+'.join(key)
# create that new sub-population
return Population(standard=subset,
label=label,
**self.plotkw)
def create_subset_by_hostname(self, key):
'''
Extract a subset of this population,
based on one or more planet hostnames.
Parameters
----------
key : strings, list of strings
The hostname of a planet ("GJ1132")
or a list of planet hostnames.
(All names will be stripped of
special characters and converted
to lower case before indexing.)
Returns
-------
subset : Population
A new population containing
some subset of the original.
'''
# use a string or a list of strings to index the population by name
if isinstance(key, str):
# is it just one name?
key = clean(key).lower()
elif isinstance(key[0], str):
# is it a list of names?
key = [clean(k).lower() for k in key]
# pull out rows by planet name
subset = self.standard.loc['tidyhostname', key]
# create a useful label for the population
if isinstance(key, str):
label = key
elif isinstance(key[0], str):
label = '+'.join(key)
# create that new sub-population
return Population(standard=subset,
label=label,
**self.plotkw)
def create_subset_by_position(self,
coordinates,
radius=1*u.arcmin,
use_proper_motions=False,
return_indices=False):
'''
Extract a subset of this population,
by performing a spatial cross-match by
RA and Dec. This will return all plannets
from this population that fall within
the specified radius of at least one of
the specified coordinates.
Parameters
----------
coordinates : astropy.coordinates.SkyCoord
The sky coordinate (RA, Dec) or list
of coordinates we want to search for
nearby objects.
radius : astropy.units.Quantity
The angular radius around each position
that we should include in each search.
use_proper_motions : bool
Should we use available proper motions,
embedded in the skycoords, to propagate
positions to a shared epoch before
cross-matching? Alas, this ability
is *not yet implemented*. FIXME!
return_indices : bool
Should we also return the indices
of the original coordinates that
were matched to existing positions?
Returns
-------
subset : Population
A new population containing a subset
of the original, including *all* planets
that fall within the 2D sky search space.
'''
if use_proper_motions:
raise NotImplementedError('No cross-matching with proper motions yet :-(')
# create astropy coordinates for this population
population_coords = SkyCoord(ra=self.ra, dec=self.dec)
# do a spatial cross match on the sky
# (idx gives the index into coordinates,
# each corresponding to an entry in population_coords)
idx, d2d, d3d = population_coords.match_to_catalog_sky(coordinates)
# identify which systems are actually close on the sky
match = d2d < radius
# create new populations that are linked by spatial position
i_match = match.nonzero()[0]
#matched_coordinates = coordinates[idx[i_match]]
subset = self.standard[i_match]
# define a meaningful label
label = f'Spatial Cross-Match ({len(coordinates)} positions, {radius} radius)'
# create that new sub-population
new_population = Population(standard=subset,
label=label,
**self.plotkw)
# choose what to return
if return_indices:
i_from_original_coordinates = idx[i_match]
return new_population, i_from_original_coordinates
else:
return new_population
def __getattr__(self, key):
'''
If an attribute/method isn't defined for a population,
look for it as a column of the standardized table.
For example, `population.stellar_radius` will try to
access `population.standard['stellar_radius']`.
Parameters
----------
key : str
The attribute we're trying to get.
'''
if key == 'label':
raise RuntimeError('Yikes!')
try:
# extract the column from the standardized table
try:
# try to return the array of quantities (with units)
return self.standard[key].quantity
except TypeError:
# columns without units don't have quantities
return self.standard[key].data
except KeyError:
# try to get a plotkw from this pop, from the plotting defaults, from None
try:
assert(key in allowed_plotkw)
return self.plotkw.get(key, default_plotkw[key])
except (AssertionError, KeyError):
raise AttributeError(f"""
Alas, there seems to be no way to find `.{key}`
as an attribute or propetry of {self}.
""") #AtlasError
def __setattr__(self, key, value):
'''
Define what happens when we try to set an attribute via `pop.attr = x`.
If the keyword is a pre-defined "plotting" keyword in `allowed_plotkw`,
then we should save it in a special `plotkw` dictionary. Otherwise,
the attribute should be set as normal.
Parameters
----------
key : str
The attribute we're trying to set.
value : anything
The value we're trying to give that attribute.
'''
if key in allowed_plotkw:
# store plotting keywords in a separate plotting dictionary
self.plotkw[key] = value
else:
# otherwise, store attributes as normal for objects
self.__dict__[key] = value
def __repr__(self):
'''
How should this object appear as a repr/str?
'''
return f'<{self.label} | population of {self.n} planets>'
def uncertainty(self, key):
'''
Return an array of symmetric uncertainties on a column.
Parameters
----------
key : str
The column for which we want errors.
'''
# first try for an `uncertainty_{key}` column
try:
return self.__getattr__(f'{key}_uncertainty')
except (KeyError, AssertionError, AtlasError, AttributeError): # is including AttributeError a kludge?
# this can be removed after debugging
self.speak(f'no symmetric uncertainties found for "{key}"')
# then try for crudely averaging asymmetric uncertainties
try:
lower = self.__getattr__(f'{key}_uncertainty_lower')
upper = self.__getattr__(f'{key}_uncertainty_upper')
avg = 0.5*(np.abs(lower) + np.abs(upper))
return avg
except (KeyError, AssertionError, AtlasError, AttributeError):
# this can be removed after debugging
self.speak(f'no asymmetric uncertainties found for "{key}"')
# then give up and return nans
return np.nan*self.standard[key]
def uncertainty_lowerupper(self, key):
'''
Return two arrays of lower and upper uncertainties on a column.
Parameters
----------
key : str
The column for which we want errors.
Returns
-------
lower : np.array
The magnitude of the lower uncertainties (x_{-lower}^{+upper})
upper : np.array
The magnitude of the upper uncertainties (x_{-lower}^{+upper})
'''
# first try for actual asymmetric uncertainties
try:
lower = self.__getattr__(f'{key}_uncertainty_lower')
upper = self.__getattr__(f'{key}_uncertainty_upper')
return np.abs(lower), np.abs(upper)
except (KeyError, AssertionError, AttributeError):
# this can be removed after debugging
self.speak(f'no asymmetric uncertainties found for "{key}"')
# first try for an `uncertainty_{key}` column
try:
sym = self.__getattr__(f'{key}_uncertainty')
return np.abs(sym), np.abs(sym)
except (KeyError, AssertionError, AttributeError):
# this can be removed after debugging
self.speak(f'no symmetric uncertainties found for "{key}"')
# then give up and return nans
unc = np.nan*self.__getattr__(key)
return unc, unc
def single(self, name):
'''
Create a subpopulation of a single planet.
'''
# create a subset of the standardized table
subset = self.standard.loc[name]
# create a new object, from this subset
return Population(standard=subset, label=name, **self.plotkw)
def validate_columns(self):
'''
Make sure this standardized table has all the necessary columns.
Summarize the amount of good data in each.
'''
N = len(self.standard)
for k in table_columns:
try:
n = sum(self.standard[k].mask == False)
except AttributeError:
try:
n = sum(np.isfinite(self.standard[k]))
except TypeError:
n = sum(self.standard[k] != '')
self.speak(f'{k:>25} | {n:4}/{N} rows = {n/N:4.0%} are not empty')
def find(self, name):
'''
Return index of a particular planet in the population.
??? = maybe this could/should be replaced with some table cleverness?
'''
return np.array([clean(name) in clean(x) for x in self.name]).nonzero()[0]
def update_planet(self, planet_name, **kwargs):
'''
Correct the properties of a particular planet,
modifying its values in the standardized table.
Parameters
----------
planet_name : str
The name of the planet to fix.
**kwargs : dict
Keyword arguments will go into modifying
the properties of that planet.
'''
# find the entry to replace
match = self.find(planet_name)
if len(match) != 1:
self.speak(f'failed when trying to modify parameters for {planet_name}')
return
else:
match = match[0]
# loop over the keys, modifying each
self.speak(f'for planet "{planet_name}"')
for k, new in kwargs.items():
old = self.standard[k][match]
self.speak(f' {k} changed from {old} to {new}')
self.standard[k][match] = new
if k == 'name':
self.standard['tidyname'][match] = clean(new).lower()
if k == 'hostname':
self.standard['tidyhostname'][match] = clean(new).lower()
def removeRows(self, indices):
raise NotImplementedError('''
The `removeRows` method has been removed. Please use something like
`population[0:42]` or `population[ok]` to use slices, indices, or masks
to create new sub-populations that extract subsets from this one.
''')
@property
def n(self):
'''
How many planets are in this population?
'''
return len(self.standard)
def __len__(self):
'''
How many planets are in this population?
'''
return len(self.standard)
@property
def semimajor_axis(self):
'''
Have a safe way to calculate the semimajor axis of planets,
that fills in gaps as necessary. Basic strategy:
First from table.
Then from NVK3L.
Then from a/R*.
'''
# pull out the actual values from the table
a = self.standard['semimajoraxis'].copy().quantity
# try to replace bad ones with NVK3L
bad = np.isfinite(a) == False
self.speak(f'{sum(bad)}/{self.n} semimajoraxes are missing')
# calculate from the period and the stellar mass
P = self.period[bad]
M = self.stellar_mass[bad]
G = con.G
a[bad] = ((G*M*P**2/4/np.pi**2)**(1/3)).to('AU')
# replace those that are still bad with the a/R*
stillbad = np.isfinite(a) == False
self.speak(f'{sum(stillbad)}/{self.n} are still missing after NVK3L')
# (pull from table to avoid potential for recursion)
a_over_rs = self.standard['transit_ar'][stillbad].quantity
rs = self.standard['stellar_radius'][stillbad].quantity
a[stillbad] = a_over_rs*rs
return a
@property
def angular_separation(self):
'''
Calculate the angular separation,
simply as theta = a/D
'''
a = self.semimajor_axis
D = self.distance
theta = np.arctan(a/D).to(u.arcsec)
return theta
@property
def imaging_contrast(self):
'''
What is the reflected light eclipse depth,
for an albedo of 100%?
But use a kludged radius
'''
return 0.25*(self.kludge_radius/self.semimajor_axis).decompose()**2
@property
def a_over_rs(self):
'''
Have a safe way to calculate the scaled semimajor axis of planets,
that fills in gaps as necessary. Basic strategy:
First from table, mostly derived from transit.
Then from the semimajor axis.
'''
# pull out the values from the table
a_over_rs = self.standard['transit_ar'].copy()
# try to replace bad ones with NVK3L
bad = np.isfinite(a_over_rs) == False
self.speak(f'{sum(bad)}/{self.n} values for a/R* are missing')
a = self.semimajor_axis[bad]
R = self.stellar_radius[bad]
a_over_rs[bad] = a/R
stillbad = np.isfinite(a_over_rs) == False
self.speak(f'{sum(stillbad)}/{self.n} are still missing after a and R*')
return a_over_rs
@property
def stellar_luminosity(self):
T = self.stellar_teff
R = self.stellar_radius
sigma = con.sigma_sb
return (4*np.pi*R**2*sigma*T**4).to(u.Lsun)
@property
def e(self):
'''
FIXME -- assumes are missing eccentricities are 0!
'''
# pull out the actual values from the table
e = self.standard['e'].copy().quantity
# try to replace bad ones with NVK3L
bad = np.isfinite(e) == False
self.speak(f'{sum(bad)}/{self.n} eccentricities are missing')
self.speak(f'assuming they are all zero')
e[bad] = 0
return e
@property
def omega(self):
'''
(FIXME! we need better longitudes of periastron)
'''
# pull out the actual values from the table
omega = self.standard['omega'].copy().quantity
# try to replace bad ones with NVK3L
bad = np.isfinite(omega) == False
self.speak(f'{sum(bad)}/{self.n} longitudes of periastron are missing')
e_zero = self.e == 0
self.speak(f'{sum(e_zero)} have eccentricities assumed to be 0')
omega[e_zero] = 0*u.deg
return omega
@property
def b(self):
'''
Transit impact parameter.
(FIXME! split this into transit and occultation)
'''
# pull out the actual values from the table
b = self.standard['transit_b'].copy().quantity
# try to replace bad ones with NVK3L
bad = np.isfinite(b) == False
self.speak(f'{sum(bad)}/{self.n} impact parameters are missing')
# calculate from the period and the stellar mass
a_over_rs = self.a_over_rs[bad]
i = self.standard['inclination'][bad].quantity
e = self.e[bad]
omega = self.omega[bad]
b[bad] = a_over_rs*np.cos(i)*((1-e**2)/(1+e*np.sin(omega)))
# report those that are still bad
stillbad = np.isfinite(b) == False
self.speak(f'{sum(stillbad)}/{self.n} are still missing after using i')
return b
# the 1360 W/m^2 that Earth receives from the Sun
earth_insolation = (1*u.Lsun/4/np.pi/u.AU**2).to(u.W/u.m**2)
@property
def insolation(self):
'''
The insolation the planet receives, in W/m^2.
'''
# calculate the average insolation the planet receives
insolation = self.stellar_luminosity/4/np.pi/self.semimajor_axis**2
return insolation.to(u.W/u.m**2)
@property
def relative_insolation(self):
'''
The insolation the planet receives, relative to Earth.
'''
return self.insolation/self.earth_insolation
@property
def log_relative_insolation(self):
return np.log10(self.relative_insolation)
@property
def relative_cumulative_xuv(self):
xuv_proxy = (self.stellar_luminosity/u.Lsun)**-0.6
return self.relative_insolation*xuv_proxy
@property
def teq(self):
'''
The equilibrium temperature of the planet.
'''
f = self.insolation
sigma = con.sigma_sb
A = 1
return ((f*A/4/sigma)**(1/4)).to(u.K)
@property
def planet_luminosity(self):
'''
The bolometric luminosity of the planet (assuming zero albedo).
'''
return (self.teq**4*con.sigma_sb*4*np.pi*self.radius**2).to(u.W)
@property
def transit_depth(self):
'''
The depth of the transit
(FIXME, clarify if this is 1.5-3.5 or what)
'''
# pull out the actual values from the table
d = self.standard['transit_depth'].copy().quantity
# try to replace bad ones with NVK3L
bad = np.isfinite(d) == False
self.speak(f'{sum(bad)}/{self.n} transit depths are missing')
Rp = self.radius[bad]
Rs = self.stellar_radius[bad]
d[bad] = (Rp/Rs).decompose()**2
# report those that are still bad
stillbad = np.isfinite(d) == False
self.speak(f'{sum(stillbad)}/{self.n} are still missing after Rp/Rs')
return d
@property
def transit_duration(self):
'''
The duration of the transit
(FIXME, clarify if this is 1.5-3.5 or what)
'''
with warnings.catch_warnings():
warnings.simplefilter('ignore')
# pull out the actual values from the table
d = self.standard['transit_duration'].copy().quantity
# try to replace bad ones with NVK3L
bad = np.isfinite(d) == False
self.speak(f'{sum(bad)}/{self.n} transit durations are missing')
P = self.period[bad]
a_over_rs = self.a_over_rs[bad]
b = self.b[bad]
T0 = P/np.pi/a_over_rs
T = T0*np.sqrt(1-b**2)
e = self.e[bad]
omega = self.omega[bad]
factor = np.sqrt(1 - e**2)/(1 + e*np.sin(omega))
d[bad] = (T*factor).to(u.day)
# report those that are still bad
stillbad = np.isfinite(d) == False
self.speak(f'{sum(stillbad)}/{self.n} are still missing after P, a/R*, b')
return d
@property
def kludge_mass(self):
'''
Have a safe way to calculate the mass of planets,
that fills in gaps as necessary. Basic strategy:
First from table.
Then from msini.
'''
# pull out the actual values from the table
M = self.standard['mass'].copy().quantity
# try to replace bad ones with NVK3L
bad = np.isfinite(M) == False
self.speak(f'{sum(bad)}/{self.n} masses are missing')
# estimate from the msini
try:
M[bad] = self.msini[bad]
except (KeyError, AssertionError, AtlasError, AttributeError):
pass
# replace those that are still bad with the a/R*
stillbad = np.isfinite(M) == False
self.speak(f'{sum(stillbad)}/{self.n} are still missing after msini')
return M
@property
def kludge_radius(self):
'''
Have a safe way to calculate the radii of planets,
that fills in gaps as necessary. Basic strategy:
First from table.
Then from mass, via Chen & Kipping (2017).
'''
# pull out the actual values from the table
R = self.standard['radius'].copy().quantity
# try to replace bad ones with NVK3L
bad = np.isfinite(R) == False
self.speak(f'{sum(bad)}/{self.n} radii are missing')
# estimate from Chen and Kipping
try:
M = self.kludge_mass
R[bad] = estimate_radius(M[bad])
except (KeyError, AssertionError, AtlasError, AttributeError):
pass
# replace those that are still bad with the a/R*
stillbad = np.isfinite(R) == False
self.speak(f'{sum(stillbad)}/{self.n} are still missing after Chen & Kipping (2017)')
return R
@property
def kludge_age(self):
'''
Have a safe way to calculate the age of planets,
that fills in gaps as necessary. Basic strategy:
First from table.
Then assume 5 Gyr.
'''
# pull out the actual values from the table
age = self.standard['stellar_age'].copy().quantity
# try to replace bad ones with NVK3L
bad = np.isfinite(age) == False
self.speak(f'{sum(bad)}/{self.n} ages are missing')
# estimate from the msini
try:
age[bad] = 5*u.Gyr
except (KeyError, AssertionError, AtlasError, AttributeError):
pass
# replace those that are still bad with the a/R*
stillbad = np.isfinite(age) == False
self.speak(f'{sum(stillbad)}/{self.n} are still missing after blindly assuming 5Gyr for missing ages')
return age
@property
def surface_gravity(self):
'''
(FIXME) -- make an assumption for planets without masses
'''
G = con.G
M = self.mass
R = self.radius
g = (G*M/R**2).to('m/s**2')
return g
@property
def density(self):
'''
The density of the planet.
'''
mass = self.mass
volume = 4/3*np.pi*(self.radius)**3
return (mass/volume).to('g/cm**3')
@property
def escape_velocity(self):
'''
The escape velocity of the planet.
'''
G = con.G
M = self.mass
R = self.radius
return np.sqrt(2*G*M/R).to('km/s')
@property
def escape_parameter(self):
'''
The Jeans atmospheric escape parameter for atomic hydrogen,
at the equilibrium temperature of the planet.
'''
k = con.k_B
T = self.teq
mu = 1
m_p = con.m_p
G = con.G
M = self.mass
R = self.radius
e_thermal = k*T
e_grav = G*M*m_p/R
return (e_grav/e_thermal).decompose()
@property
def distance_modulus(self):
'''
The distance modulus to the system, in magnitudes.
'''
mu = 5*np.log10(self.distance/(10*u.pc))
return mu
def scale_height(self, mu=2.32):
'''
The scale height of the atmosphere, at equilibrium temperature.
'''
k = con.k_B
T = self.teq
m_p = con.m_p
g = self.surface_gravity
return (k*T/mu/m_p/g).to('km')
def transmission_signal(self, mu=2.32, threshold=2):
'''
What is the transit depth of 1 scale height of an
atmosphere transiting in front of the star.
Parameters
----------
mu : float
Mean molecular weight (default 2.2 for H/He)
threshold : float
By how many sigma must the planet mass be detected?
'''
with np.errstate(invalid='ignore'):
H = self.scale_height(mu)
Rp = self.radius
Rs = self.stellar_radius
depth = (2*H*Rp/Rs**2).decompose()
dlnm = self.uncertainty('mass')/self.mass
bad = dlnm > 1/threshold
depth[bad] = np.nan
return depth
def reflection_signal(self, albedo=1.0):
'''
What is the reflected light eclipse depth,
for an albedo of 100%?
'''
return albedo*0.25*(self.radius/self.semimajor_axis).decompose()**2
def emission_signal(self, wavelength=5*u.micron):
'''
What is the thermal emission eclipse depth,
assuming Planck spectra for both star and planet?
This calculation assumes a Bond albedo of 0
and that heat is uniformly distributed over the planet.
Parameters
----------
wavelength : astropy.unit.Quantity
The wavelength at which it should be calculated.
'''
# create thermal emission sources for both star and planet
import rainbowconnection as rc
star = rc.Thermal(teff=self.stellar_teff, radius=self.stellar_radius)
planet = rc.Thermal(teff=self.teq, radius=self.radius)
# calculate the depth as the luminosity ratio
depths = planet.spectrum(wavelength)/star.spectrum(wavelength)
return depths
def stellar_brightness(self, wavelength=5*u.micron):
'''
How many photons/s/m^2/micron do we receive from the star?
This is calculated from the distance, radius, and
stellar effective temperature of the stars.
(It could be potentially be improved with PHOENIX
model grids and/or cleverness with photometry.)
Parameters
----------
wavelength : astropy.unit.Quantity
The wavelength at which it should be calculated.
'''
# import some tools for easy cartoon spectra
import rainbowconnection as rc
# create source with right temperature, size, distance
teff, radius = self.stellar_teff, self.stellar_radius
star = rc.Thermal(teff=teff,
radius=radius).at(self.distance)
# calculate the energy flux
flux_in_energy = star.spectrum(wavelength)
# convert to photon flux
photon_energy = con.h*con.c/wavelength/u.ph
flux_in_photons = flux_in_energy/photon_energy
# return the
return flux_in_photons.to('ph s^-1 m^-2 micron^-1')
def stellar_brightness_in_telescope_units(self, telescope_name='JWST', **kw):
'''
The stellar brightness, converted to telescope units.
Parameters
----------
telescope_name : str
The name of the telescope.
wavelength : astropy.unit.Quantity
The wavelength at which it should be calculated.
R : float
The spectral resolution at which the
telescope will bin wavelengths.
dt : astropy.units.quantity.Quantity
The time over which the telescope exposes.
'''
# what counts as 1 "telescope unit" (e.g. JWST at R=20 at 5 microns for 1 hour)
telescope_unit = define_telescope_unit_by_name(telescope_name, **kw)
# what's the photon flux (photons/m**2/s)
flux_in_photons = self.stellar_brightness(telescope_unit.wavelength)
# quote the brightness as (for example) gigaphotons/JWST at R=20 at 5 microns in 1 hour
unit = lotsofphotons_unit/telescope_unit
return flux_in_photons.to(unit)
def depth_uncertainty(self, telescope_name='JWST',
per_transit=False,
dt=1*u.hour,
**kw):
'''
What is the transit/eclipse depth uncertainty
with a particular telescope
at a particular wavelength
at a particular resolution?
By default, this will be calculated for one transit.
Optionally, it can be calculated for a given amount of time instead.
Parameters
----------
telescope_name : str
The name of the telescope.
per_transit : bool
If True, calculate the depth uncertainty for one transit.
If False, calculate the depth uncertainty for a certain amount
of in-transit time. You likely want to specify `dt` as a
keyword argument to set that amount of in-transit time.
In either case, an out-of-transit baseline equal to the
total in-transit time will be assumed. This means the actual
time cost will be twice the transit duration or `dt` chosen,
and the depth uncertainty will be a factor sqrt(2) larger
than the pure photon noise binned to the relevant timescale.
wavelength : astropy.unit.Quantity
The wavelength at which it should be calculated.
R : float
The spectral resolution at which the
telescope will bin wavelengths.
dt : astropy.units.quantity.Quantity
The time over which the telescope exposes. If `per_transit=True`,
this will be ignored. Otherwise, it will set the total amount
of in-transit time observed, assuming that an equal amount of
time will *also* be observed out of transit.
'''
# what counts as 1 "telescope unit" (e.g. JWST at R=20 at 5 microns for 1 hour)
telescope_unit = define_telescope_unit_by_name(telescope_name,
dt=dt,
**kw)
# what's the photon flux (photons/m**2/s)
flux_in_photons = self.stellar_brightness(telescope_unit.wavelength)
# what's the total collecting power?
if per_transit:
ratio_of_collecting_time = self.transit_duration/dt
else:
ratio_of_collecting_time = 1
collecting_power = 1*telescope_unit*ratio_of_collecting_time
# what's the total number of photons collected during transit
N = (flux_in_photons*collecting_power).to(u.ph).value
# what's the flux uncertainty on the time scale of one transit?
sigma = 1/np.sqrt(N)
# inflate by a factor of sqrt(2) for equal out-of-transit
oot = np.sqrt(2)
sigma_depth = sigma*oot
return sigma_depth
def _get_noise_and_unit(self, telescope_name='JWST',
per_transit=False,
**kw):
'''
Tiny helper to get the noise and the telescope_unit
for a telescope observation of a planet.
'''
# figure out the noise
noise = self.depth_uncertainty(telescope_name=telescope_name, per_transit=per_transit, **kw)
# create a telescope unit (mostly to get a default wavelength)
telescope_unit = define_telescope_unit_by_name(telescope_name, **kw)
return noise, telescope_unit
def emission_snr(self, telescope_name='JWST', **kw):
'''
What's the approximate S/N for the detection of the
thermal emission eclipse of a planet?
'''
noise, telescope_unit = self._get_noise_and_unit(telescope_name=telescope_name, **kw)
signal = self.emission_signal(wavelength=telescope_unit.wavelength)
return signal/noise
def reflection_snr(self, telescope_name='JWST', albedo=1, **kw):
'''
What's the approximate S/N for the detection of the
reflected light eclipse of a planet?
'''
noise, telescope_unit = self._get_noise_and_unit(telescope_name=telescope_name, **kw)
signal = self.reflection_signal(albedo=albedo)
return signal/noise
def transmission_snr(self, telescope_name='JWST', mu=2.32, threshold=2, **kw):
'''
What's the approximate S/N for the detection of the
reflected light eclipse of a planet?
'''
noise, telescope_unit = self._get_noise_and_unit(telescope_name=telescope_name, **kw)
signal = self.transmission_signal(mu=mu, threshold=threshold)
return signal/noise
def scatter(self, xname, yname, c=None, s=None, names=True, xlog=True, ylog=True, **kw):
'''
Quick tool to plot one parameter against another.
'''
plt.ion()
x, y = self.__getattr__(xname), self.__getattr__(yname)
try:
self.ax.cla()
except:
self.figure = plt.figure('Exoplanet Population')
self.ax = plt.subplot()
self.ax.set_xlabel(xname)
self.ax.set_ylabel(yname)
self.ax.scatter(x, y, c=c, s=s, **kw)
if False:
for i in range(len(x)):
self.ax.text(x[i], y[i], self.table['NAME'][i])
if xlog:
plt.xscale('log')
if ylog:
plt.yscale('log')
plt.draw()
def thumbtack(self, maxr=1000, dr=100, labels=False):
'''Plot the planets as thumbtacks.'''
def scale(d):
return np.array(d)**1.5
r = scale(self.distance)
x, y = r*np.cos(self.ra*np.pi/180), r*np.sin(self.ra*np.pi/180)
plt.ion()
plt.figure('thumbtacks')
ax = plt.subplot()
ax.cla()
ax.set_aspect('equal')
theta = np.linspace(0,2*np.pi,1000)
angle = -90*np.pi/180
gridkw = dict(alpha=0.25, color='green')
for originalradius in np.arange(dr,maxr*2,dr):
radii = scale(originalradius)
ax.plot(radii*np.cos(theta), radii*np.sin(theta), linewidth=3, **gridkw)
ax.text(radii*np.cos(angle), radii*np.sin(angle), '{0:.0f} pc'.format(originalradius), rotation=90+ angle*180/np.pi, va='bottom', ha='center', size=13, weight='extra bold', **gridkw)
ax.plot(x, y, marker='o', alpha=0.5, color='gray', linewidth=0, markeredgewidth=0)
close = (self.name == 'WASP-94A b').nonzero()[0]#(self.distance < maxr).nonzero()[0]
if labels:
for c in close:
plt.text(x[c], y[c], self.name[c])
ax.set_xlim(-scale(maxr), scale(maxr))
ax.set_ylim(-scale(maxr), scale(maxr))
def compare(self, x='teq', y='radius', area='depth', color='stellar_radius'):
xplot = self.__dict__[x]
yplot = self.__dict__[y]
sizeplot = self.__dict__[size]
colorplot = self.__dict__[color]
maxarea = 1000
area = self.__dict__[area]
sizeplot = np.sqrt(area/np.nanmax(area)*maxarea)
plt.scatter(xplot, yplot, linewidth=0, marker='o', markersize=sizeplot)
class PredefinedPopulation(Population):
'''
Population object keeps track of an exoplanet population.
'''
expiration = 0.00001
def __init__(self, label='exoplanets', remake=False, skip_update=False, **plotkw):
'''
Initialize a population, by trying the following steps:
1) Load a standardized ascii table.
2) Ingest a raw table, and standardize it.
Parameters
----------
label : str
The name of this population, for use both in filenames
and labeling points on plots.
remake : bool
Should we re-ingest this table from its raw ingredients?
skip_update : bool
Should we skip checking for updates in the existing data?
**plotkw : dict
All other keywords are stored as plotting suggestions.
'''
# set the name for this population
self.label = label
try:
# try to load the standardized table
assert(remake == False)
standard = self.load_standard(skip_update=skip_update)
except (IOError,FileNotFoundError,AssertionError):
# or create a new standardized table and save it
standard = self.ingest_table(remake=remake)
# initialize with a standard table
Population.__init__(self,
standard=standard,
label=label,
**plotkw)
@property
def fileprefix(self):
'''
Define a fileprefix for this population, to be used
for setting the filename of the standardized population.
'''
return clean(self.label)
def ingest_table(self, **kwargs):
'''
Ingest a new population table of arbitrary format,
and then standardize it, using the tools defined in
inherited population classes.'''
# load the raw table
raw = self.load_raw()
# trim elements from raw table as necessary
trimmed = self.trim_raw(raw)
# create a standardized table from the array
standard = self.create_standard(trimmed)
# save the standardized table
self.save_standard(standard)
return standard
@property
def standard_path(self):
'''
Define the filepath for the standardized table.
'''
return os.path.join(directories['data'],
f'standardized-{self.fileprefix}.txt')
def load_raw(self):
raise NotImplementedError('''
Yikes! The `.load_raw` method has not been defined
for whatever object is trying to call it!
''')
def trim_raw(self, raw):
'''
Trim bad/unnecessary rows out of a raw table of planet properties.
'''
# no trimming necessary
trimmed = raw
# for debugging, hang onto the trimmed table as a hidden attribute
self._trimmed = trimmed
# a trimmed table
return self._trimmed
def load_standard(self, skip_update=False):
'''
Load a standardized population table. Generally this
will be from a file like ~/.exoatlas/standardized-*.txt
Returns
-------
standard : astropy.table.Table
A table of planet properties,
with a minimal set of columns.
skip_update : bool
Should we skip checks to see if the data are too stale?
'''
# make sure this file is recent enough (unless we're skipping updates)
if not skip_update:
old = check_if_needs_updating(self.standard_path, self.expiration)
assert(old == False)
# keywords for reading a standardized table
readkw = dict(format='ecsv', fill_values=[('',np.nan), ('--', np.nan)])
standard = ascii.read(self.standard_path, **readkw)
self.speak(f'Loaded standardized table from {self.standard_path}')
# ??? change this to do something more clever with tables
# masked = np.ma.filled(standard, fill_value = np.nan)
return standard
def save_standard(self, standard):
'''
Save the standardized table out to a text file
like ~/exoatlas/standardized-*.txt
'''
# save it as an ascii table for humans to read
standard.write(self.standard_path,
format='ascii.ecsv',
overwrite=True )
self.speak(f'Saved a standardized text table to {self.standard_path}')
def create_table(self, desired_columns=['name',
'radius', 'relative_insolation',
'stellar_radius', 'stellar_teff',
'ra', 'dec', 'distance']):
'''
Create an astropy table based on this population,
using a subset of columns, which may include ones
that have been calculated as Population properties.
Parameters
----------
desired_columns : list
The columns you want to include. Anything that
can be accessed via Population.??? can be provided
here as a string.
Returns
-------
table : astropy.table.Table
A table, with those columns, in the same order
as the Population itself.
'''
# FIXME! need to add method support for arguments
# create a dictionary with the desired columns
d = {c:getattr(self, c) for c in desired_columns}
# turn that into an astropy Table
t = Table(d)
return t
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.test_util."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import random
import threading
import numpy as np
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_ops # pylint: disable=unused-import
from tensorflow.python.framework import test_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.platform import googletest
@test_util.with_c_api
class TestUtilTest(test_util.TensorFlowTestCase):
def test_assert_ops_in_graph(self):
with self.test_session():
constant_op.constant(["hello", "taffy"], name="hello")
test_util.assert_ops_in_graph({"hello": "Const"}, ops.get_default_graph())
self.assertRaises(ValueError, test_util.assert_ops_in_graph,
{"bye": "Const"}, ops.get_default_graph())
self.assertRaises(ValueError, test_util.assert_ops_in_graph,
{"hello": "Variable"}, ops.get_default_graph())
def test_assert_equal_graph_def(self):
with ops.Graph().as_default() as g:
def_empty = g.as_graph_def()
constant_op.constant(5, name="five")
constant_op.constant(7, name="seven")
def_57 = g.as_graph_def()
with ops.Graph().as_default() as g:
constant_op.constant(7, name="seven")
constant_op.constant(5, name="five")
def_75 = g.as_graph_def()
# Comparing strings is order dependent
self.assertNotEqual(str(def_57), str(def_75))
# assert_equal_graph_def doesn't care about order
test_util.assert_equal_graph_def(def_57, def_75)
# Compare two unequal graphs
with self.assertRaisesRegexp(AssertionError,
r"^Found unexpected node 'seven"):
test_util.assert_equal_graph_def(def_57, def_empty)
def testIsGoogleCudaEnabled(self):
# The test doesn't assert anything. It ensures the py wrapper
# function is generated correctly.
if test_util.IsGoogleCudaEnabled():
print("GoogleCuda is enabled")
else:
print("GoogleCuda is disabled")
def testIsMklEnabled(self):
# This test doesn't assert anything.
# It ensures the py wrapper function is generated correctly.
if test_util.IsMklEnabled():
print("MKL is enabled")
else:
print("MKL is disabled")
def testAssertProtoEqualsStr(self):
graph_str = "node { name: 'w1' op: 'params' }"
graph_def = graph_pb2.GraphDef()
text_format.Merge(graph_str, graph_def)
# test string based comparison
self.assertProtoEquals(graph_str, graph_def)
# test original comparison
self.assertProtoEquals(graph_def, graph_def)
def testAssertProtoEqualsAny(self):
# Test assertProtoEquals with a protobuf.Any field.
meta_graph_def_str = """
meta_info_def {
meta_graph_version: "outer"
any_info {
[type.googleapis.com/tensorflow.MetaGraphDef] {
meta_info_def {
meta_graph_version: "inner"
}
}
}
}
"""
meta_graph_def_outer = meta_graph_pb2.MetaGraphDef()
meta_graph_def_outer.meta_info_def.meta_graph_version = "outer"
meta_graph_def_inner = meta_graph_pb2.MetaGraphDef()
meta_graph_def_inner.meta_info_def.meta_graph_version = "inner"
meta_graph_def_outer.meta_info_def.any_info.Pack(meta_graph_def_inner)
self.assertProtoEquals(meta_graph_def_str, meta_graph_def_outer)
self.assertProtoEquals(meta_graph_def_outer, meta_graph_def_outer)
# Check if the assertion failure message contains the content of
# the inner proto.
with self.assertRaisesRegexp(AssertionError,
r'meta_graph_version: "inner"'):
self.assertProtoEquals("", meta_graph_def_outer)
def testNDArrayNear(self):
a1 = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
a2 = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
a3 = np.array([[10.0, 20.0, 30.0], [40.0, 50.0, 60.0]])
self.assertTrue(self._NDArrayNear(a1, a2, 1e-5))
self.assertFalse(self._NDArrayNear(a1, a3, 1e-5))
def testCheckedThreadSucceeds(self):
def noop(ev):
ev.set()
event_arg = threading.Event()
self.assertFalse(event_arg.is_set())
t = self.checkedThread(target=noop, args=(event_arg,))
t.start()
t.join()
self.assertTrue(event_arg.is_set())
def testCheckedThreadFails(self):
def err_func():
return 1 // 0
t = self.checkedThread(target=err_func)
t.start()
with self.assertRaises(self.failureException) as fe:
t.join()
self.assertTrue("integer division or modulo by zero" in str(fe.exception))
def testCheckedThreadWithWrongAssertionFails(self):
x = 37
def err_func():
self.assertTrue(x < 10)
t = self.checkedThread(target=err_func)
t.start()
with self.assertRaises(self.failureException) as fe:
t.join()
self.assertTrue("False is not true" in str(fe.exception))
def testMultipleThreadsWithOneFailure(self):
def err_func(i):
self.assertTrue(i != 7)
threads = [
self.checkedThread(
target=err_func, args=(i,)) for i in range(10)
]
for t in threads:
t.start()
for i, t in enumerate(threads):
if i == 7:
with self.assertRaises(self.failureException):
t.join()
else:
t.join()
def _WeMustGoDeeper(self, msg):
with self.assertRaisesOpError(msg):
with ops.Graph().as_default():
node_def = ops._NodeDef("IntOutput", "name")
node_def_orig = ops._NodeDef("IntOutput", "orig")
op_orig = ops.Operation(node_def_orig, ops.get_default_graph())
op = ops.Operation(node_def, ops.get_default_graph(),
original_op=op_orig)
raise errors.UnauthenticatedError(node_def, op, "true_err")
def testAssertRaisesOpErrorDoesNotPassMessageDueToLeakedStack(self):
with self.assertRaises(AssertionError):
self._WeMustGoDeeper("this_is_not_the_error_you_are_looking_for")
self._WeMustGoDeeper("true_err")
self._WeMustGoDeeper("name")
self._WeMustGoDeeper("orig")
def testAllCloseScalars(self):
self.assertAllClose(7, 7 + 1e-8)
with self.assertRaisesRegexp(AssertionError, r"Not equal to tolerance"):
self.assertAllClose(7, 7 + 1e-5)
def testAllCloseDictToNonDict(self):
with self.assertRaisesRegexp(ValueError, r"Can't compare dict to non-dict"):
self.assertAllClose(1, {"a": 1})
with self.assertRaisesRegexp(ValueError, r"Can't compare dict to non-dict"):
self.assertAllClose({"a": 1}, 1)
def testAllCloseNamedtuples(self):
a = 7
b = (2., 3.)
c = np.ones((3, 2, 4)) * 7.
expected = {"a": a, "b": b, "c": c}
my_named_tuple = collections.namedtuple("MyNamedTuple", ["a", "b", "c"])
# Identity.
self.assertAllClose(expected, my_named_tuple(a=a, b=b, c=c))
self.assertAllClose(
my_named_tuple(a=a, b=b, c=c), my_named_tuple(a=a, b=b, c=c))
def testAllCloseDicts(self):
a = 7
b = (2., 3.)
c = np.ones((3, 2, 4)) * 7.
expected = {"a": a, "b": b, "c": c}
# Identity.
self.assertAllClose(expected, expected)
self.assertAllClose(expected, dict(expected))
# With each item removed.
for k in expected:
actual = dict(expected)
del actual[k]
with self.assertRaisesRegexp(AssertionError, r"mismatched keys"):
self.assertAllClose(expected, actual)
# With each item changed.
with self.assertRaisesRegexp(AssertionError, r"Not equal to tolerance"):
self.assertAllClose(expected, {"a": a + 1e-5, "b": b, "c": c})
with self.assertRaisesRegexp(AssertionError, r"Shape mismatch"):
self.assertAllClose(expected, {"a": a, "b": b + (4.,), "c": c})
c_copy = np.array(c)
c_copy[1, 1, 1] += 1e-5
with self.assertRaisesRegexp(AssertionError, r"Not equal to tolerance"):
self.assertAllClose(expected, {"a": a, "b": b, "c": c_copy})
def testAllCloseListOfNamedtuples(self):
my_named_tuple = collections.namedtuple("MyNamedTuple", ["x", "y"])
l1 = [
my_named_tuple(x=np.array([[2.3, 2.5]]), y=np.array([[0.97, 0.96]])),
my_named_tuple(x=np.array([[3.3, 3.5]]), y=np.array([[0.98, 0.99]]))
]
l2 = [
([[2.3, 2.5]], [[0.97, 0.96]]),
([[3.3, 3.5]], [[0.98, 0.99]]),
]
self.assertAllClose(l1, l2)
def testAllCloseNestedStructure(self):
a = {"x": np.ones((3, 2, 4)) * 7, "y": (2, [{"nested": {"m": 3, "n": 4}}])}
self.assertAllClose(a, a)
b = copy.deepcopy(a)
self.assertAllClose(a, b)
# Test mismatched values
b["y"][1][0]["nested"]["n"] = 4.2
with self.assertRaisesRegexp(AssertionError,
r"\[y\]\[1\]\[0\]\[nested\]\[n\]"):
self.assertAllClose(a, b)
def testArrayNear(self):
a = [1, 2]
b = [1, 2, 5]
with self.assertRaises(AssertionError):
self.assertArrayNear(a, b, 0.001)
a = [1, 2]
b = [[1, 2], [3, 4]]
with self.assertRaises(TypeError):
self.assertArrayNear(a, b, 0.001)
a = [1, 2]
b = [1, 2]
self.assertArrayNear(a, b, 0.001)
def testForceGPU(self):
with self.assertRaises(errors.InvalidArgumentError):
with self.test_session(force_gpu=True):
# this relies on us not having a GPU implementation for assert, which
# seems sensible
x = constant_op.constant(True)
y = [15]
control_flow_ops.Assert(x, y).run()
def testAssertAllCloseAccordingToType(self):
# test plain int
self.assertAllCloseAccordingToType(1, 1, rtol=1e-8, atol=1e-8)
# test float64
self.assertAllCloseAccordingToType(
np.asarray([1e-8], dtype=np.float64),
np.asarray([2e-8], dtype=np.float64),
rtol=1e-8, atol=1e-8
)
with (self.assertRaises(AssertionError)):
self.assertAllCloseAccordingToType(
np.asarray([1e-7], dtype=np.float64),
np.asarray([2e-7], dtype=np.float64),
rtol=1e-8, atol=1e-8
)
# test float32
self.assertAllCloseAccordingToType(
np.asarray([1e-7], dtype=np.float32),
np.asarray([2e-7], dtype=np.float32),
rtol=1e-8, atol=1e-8,
float_rtol=1e-7, float_atol=1e-7
)
with (self.assertRaises(AssertionError)):
self.assertAllCloseAccordingToType(
np.asarray([1e-6], dtype=np.float32),
np.asarray([2e-6], dtype=np.float32),
rtol=1e-8, atol=1e-8,
float_rtol=1e-7, float_atol=1e-7
)
# test float16
self.assertAllCloseAccordingToType(
np.asarray([1e-4], dtype=np.float16),
np.asarray([2e-4], dtype=np.float16),
rtol=1e-8, atol=1e-8,
float_rtol=1e-7, float_atol=1e-7,
half_rtol=1e-4, half_atol=1e-4
)
with (self.assertRaises(AssertionError)):
self.assertAllCloseAccordingToType(
np.asarray([1e-3], dtype=np.float16),
np.asarray([2e-3], dtype=np.float16),
rtol=1e-8, atol=1e-8,
float_rtol=1e-7, float_atol=1e-7,
half_rtol=1e-4, half_atol=1e-4
)
def testRandomSeed(self):
# Call setUp again for WithCApi case (since it makes a new defeault graph
# after setup).
# TODO(skyewm): remove this when C API is permanently enabled.
self.setUp()
a = random.randint(1, 1000)
a_np_rand = np.random.rand(1)
with self.test_session():
a_rand = random_ops.random_normal([1]).eval()
# ensure that randomness in multiple testCases is deterministic.
self.setUp()
b = random.randint(1, 1000)
b_np_rand = np.random.rand(1)
with self.test_session():
b_rand = random_ops.random_normal([1]).eval()
self.assertEqual(a, b)
self.assertEqual(a_np_rand, b_np_rand)
self.assertEqual(a_rand, b_rand)
@test_util.run_in_graph_and_eager_modes()
def test_callable_evaluate(self):
def model():
return resource_variable_ops.ResourceVariable(
name="same_name",
initial_value=1) + 1
with context.eager_mode():
self.assertEqual(2, self.evaluate(model))
@test_util.run_in_graph_and_eager_modes()
def test_nested_tensors_evaluate(self):
expected = {"a": 1, "b": 2, "nested": {"d": 3, "e": 4}}
nested = {"a": constant_op.constant(1),
"b": constant_op.constant(2),
"nested": {"d": constant_op.constant(3),
"e": constant_op.constant(4)}}
self.assertEqual(expected, self.evaluate(nested))
def test_get_node_def_from_graph(self):
graph_def = graph_pb2.GraphDef()
node_foo = graph_def.node.add()
node_foo.name = "foo"
self.assertIs(test_util.get_node_def_from_graph("foo", graph_def), node_foo)
self.assertIsNone(test_util.get_node_def_from_graph("bar", graph_def))
@test_util.with_c_api
class GarbageCollectionTest(test_util.TensorFlowTestCase):
def test_no_reference_cycle_decorator(self):
class ReferenceCycleTest(object):
def __init__(inner_self): # pylint: disable=no-self-argument
inner_self.assertEqual = self.assertEqual # pylint: disable=invalid-name
@test_util.assert_no_garbage_created
def test_has_cycle(self):
a = []
a.append(a)
@test_util.assert_no_garbage_created
def test_has_no_cycle(self):
pass
with self.assertRaises(AssertionError):
ReferenceCycleTest().test_has_cycle()
ReferenceCycleTest().test_has_no_cycle()
def test_no_leaked_tensor_decorator(self):
class LeakedTensorTest(object):
def __init__(inner_self): # pylint: disable=no-self-argument
inner_self.assertEqual = self.assertEqual # pylint: disable=invalid-name
@test_util.assert_no_new_tensors
def test_has_leak(self):
self.a = constant_op.constant([3.])
@test_util.assert_no_new_tensors
def test_has_no_leak(self):
constant_op.constant([3.])
with self.assertRaisesRegexp(AssertionError, "Tensors not deallocated"):
LeakedTensorTest().test_has_leak()
LeakedTensorTest().test_has_no_leak()
def test_no_new_objects_decorator(self):
class LeakedObjectTest(object):
def __init__(inner_self): # pylint: disable=no-self-argument
inner_self.assertEqual = self.assertEqual # pylint: disable=invalid-name
inner_self.accumulation = []
@test_util.assert_no_new_pyobjects_executing_eagerly
def test_has_leak(self):
self.accumulation.append([1.])
@test_util.assert_no_new_pyobjects_executing_eagerly
def test_has_no_leak(self):
self.not_accumulating = [1.]
with self.assertRaises(AssertionError):
LeakedObjectTest().test_has_leak()
LeakedObjectTest().test_has_no_leak()
if __name__ == "__main__":
googletest.main()
| |
# Copyright 2014 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Class representing a Cloudstack instance. This module uses the csapi
library which calls the cloudstack API. For more information refer to
the Cloudstack documentation at https://github.com/syed/PerfKitBenchmarker.git
"""
import logging
from perfkitbenchmarker import flags
from perfkitbenchmarker import linux_virtual_machine as linux_vm
from perfkitbenchmarker import virtual_machine
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.providers.cloudstack import cloudstack_disk
from perfkitbenchmarker.providers.cloudstack import cloudstack_network
from perfkitbenchmarker.providers.cloudstack import util
from perfkitbenchmarker import providers
UBUNTU_IMAGE = 'Ubuntu 14.04.2 HVM base (64bit)'
RHEL_IMAGE = 'CentOS 7 HVM base (64bit)'
FLAGS = flags.FLAGS
class CloudStackVirtualMachine(virtual_machine.BaseVirtualMachine):
"""Object representing a CloudStack Virtual Machine."""
CLOUD = providers.CLOUDSTACK
DEFAULT_ZONE = 'QC-1'
DEFAULT_MACHINE_TYPE = '1vCPU.1GB'
DEFAULT_IMAGE = 'Ubuntu 14.04.2 HVM base (64bit)'
DEFAULT_USER_NAME = 'cca-user'
DEFAULT_PROJECT = 'cloudops-Engineering'
def __init__(self, vm_spec):
"""Initialize a CloudStack virtual machine.
Args:
vm_spec: virtual_machine.BaseVirtualMachineSpec object of the vm.
"""
super(CloudStackVirtualMachine, self).__init__(vm_spec)
self.network = cloudstack_network.CloudStackNetwork.GetNetwork(self)
self.cs = util.CsClient(FLAGS.CS_API_URL,
FLAGS.CS_API_KEY,
FLAGS.CS_API_SECRET)
self.project_id = None
if FLAGS.project:
project = self.cs.get_project(FLAGS.project)
assert project, "Project not found"
self.project_id = project['id']
zone = self.cs.get_zone(self.zone)
assert zone, "Zone not found"
self.zone_id = zone['id']
self.user_name = self.DEFAULT_USER_NAME
self.image = self.image or self.DEFAULT_IMAGE
self.disk_counter = 0
@vm_util.Retry(max_retries=3)
def _CreateDependencies(self):
"""Create VM dependencies."""
# Create an ssh keypair
with open(self.ssh_public_key) as keyfd:
self.ssh_keypair_name = 'perfkit-sshkey-%s' % FLAGS.run_uri
pub_key = keyfd.read()
if not self.cs.get_ssh_keypair(self.ssh_keypair_name, self.project_id):
res = self.cs.register_ssh_keypair(self.ssh_keypair_name,
pub_key,
self.project_id)
assert res, "Unable to create ssh keypair"
# Allocate a public ip
network_id = self.network.id
if self.network.is_vpc:
network_id = self.network.vpc_id
public_ip = self.cs.alloc_public_ip(network_id, self.network.is_vpc)
if public_ip:
self.ip_address = public_ip['ipaddress']
self.ip_address_id = public_ip['id']
else:
logging.warn("Unable to allocate public IP")
def _DeleteDependencies(self):
"""Delete VM dependencies."""
# Remove the keypair
if self.cs.get_ssh_keypair(self.ssh_keypair_name, self.project_id):
self.cs.unregister_ssh_keypair(self.ssh_keypair_name, self.project_id)
# Remove the IP
if self.ip_address_id:
self.cs.release_public_ip(self.ip_address_id)
@vm_util.Retry(max_retries=3)
def _Create(self):
"""Create a Cloudstack VM instance."""
service_offering = self.cs.get_serviceoffering(self.machine_type)
assert service_offering, "No service offering found"
template = self.cs.get_template(self.image, self.project_id)
assert template, "No template found"
network_id = self.network.id
vm = None
vm = self.cs.create_vm(self.name,
self.zone_id,
service_offering['id'],
template['id'],
[network_id],
self.ssh_keypair_name,
self.project_id)
assert vm, "Unable to create VM"
self._vm = vm
self.id = vm['virtualmachine']['id']
@vm_util.Retry(max_retries=3)
def _PostCreate(self):
"""Get the instance's data."""
# assosiate the public ip created with the VMid
network_interface = self._vm['virtualmachine']['nic'][0]
self.internal_ip = network_interface['ipaddress']
# Create a Static NAT rule
if not self.cs.snat_rule_exists(self.ip_address_id, self.id):
snat_rule = self.cs.enable_static_nat(self.ip_address_id,
self.id,
self.network.id)
assert snat_rule, "Unable to create static NAT"
def _Delete(self):
"""Delete the VM instance."""
# Delete the VM
self.cs.delete_vm(self.id)
def _Exists(self):
"""Returns true if the VM exists."""
# Check if VM exisits
vm = self.cs.get_virtual_machine(self.name, self.project_id)
if vm and 'id' in vm:
return True
return False
def CreateScratchDisk(self, disk_spec):
"""Create a VM's scratch disk.
Args:
disk_spec: virtual_machine.BaseDiskSpec object of the disk.
"""
# Cloudstack doesn't really have a concept of local or remote disks A VM
# starts with one disk and all other volumes have to be attached via the
# API
self.disks = []
for i in xrange(disk_spec.num_striped_disks):
name = 'disk-%s-%d-%d' % (self.name, i + 1, self.disk_counter)
scratch_disk = cloudstack_disk.CloudStackDisk(disk_spec,
name,
self.zone_id,
self.project_id)
self.disks.append(scratch_disk)
self.disk_counter += 1
self._CreateScratchDiskFromDisks(disk_spec, self.disks)
class DebianBasedCloudStackVirtualMachine(CloudStackVirtualMachine,
linux_vm.DebianMixin):
DEFAULT_IMAGE = UBUNTU_IMAGE
class RhelBasedCloudStackVirtualMachine(CloudStackVirtualMachine,
linux_vm.RhelMixin):
DEFAULT_IMAGE = RHEL_IMAGE
| |
"""Utility functions for computing contour features.
Each of these functions computes single sets of contour features using
information such as the times, frequencies, salience, sample rate, etc.
Each function returns a flattened numpy array for easy concatenation.
@author: mariapanteli, rabitt
"""
from __future__ import print_function
import librosa
import numpy as np
import numpy.polynomial.polynomial as Poly
import scipy
def hz_to_cents(freq_hz, ref_hz=32.0):
'''Convert frequency values from Hz to cents
Parameters
----------
freq_hz : np.array
Array of contour frequencies in Hz
ref_hz : float
Reference frequency in Hz
Returns
-------
freq_cents : np.array
Array of contour frequencies in cents
'''
freq_cents = 1200.0 * np.log2(freq_hz / ref_hz)
return freq_cents
def get_contour_onset(times):
'''Get the first time stamp of a contour
Parameters
----------
times : np.array
Array of contour times
Returns
-------
onset : float
The contour onset in seconds
'''
return np.array([times[0]])
def get_contour_offset(times):
'''Get the last time stamp of a contour
Parameters
----------
times : np.array
Array of contour times
Returns
-------
offset : float
The contour offset in seconds
'''
return np.array([times[-1]])
def get_contour_duration(times):
'''Get contour duration in seconds
Parameters
----------
times : np.array
Array of contour times
Returns
-------
duration : float
Duration in seconds
'''
return np.array([times[-1] - times[0]])
def get_mean(signal):
'''Get the mean of a signal.
Parameters
----------
signal : np.array
Array of values
Returns
-------
mean : float
The mean of the signal
'''
return np.array([np.mean(signal)])
def get_std(signal):
'''Get the standard deviation of a signal.
Parameters
----------
signal : np.array
Array of values
Returns
-------
std : float
The standard deviation of the signal
'''
return np.array([np.std(signal)])
def get_sum(signal):
'''Get the sum of a signal.
Parameters
----------
signal : np.array
Array of values
Returns
-------
sum : sum of the signal
'''
return np.array([np.sum(signal)])
def get_range(signal):
'''Get the range of a signal.
Parameters
----------
signal : np.array
Array of values
Returns
-------
range : float
The range of the signal
'''
return np.array([np.max(signal) - np.min(signal)])
def get_total_variation(signal):
'''Get the total variation of a signal.
Parameters
----------
signal : np.array
Array of values
Returns
-------
total_variation : float
The total variation of the signal
'''
return np.array([np.sum(np.abs(signal[1:] - signal[:-1]))])
def get_polynomial_fit_features(times, signal, n_deg=5, norm=False):
'''Fit a signal to a polynomial, return coefficients of polynomial and
residual error.
Parameters
----------
times : np.array
Array of contour times.
signal : np.array
Array of values to fit.
n_deg : int, default=5
Number of polynomial degrees to fit.
norm : bool, default=False
If True, scales the signal to be between 0 and 1
If False, the signal is not altered.
Returns
-------
poly_coeff : np.array
The coefficients of the polynomial.
poly_approx : np.array
The polynomial approximation of the signal.
residual : np.array
The pointwise difference between the signal and the polynomial.
'''
poly_coeff, _, residual = _fit_poly(
n_deg, signal, grid=times, norm=norm
)
return np.concatenate([poly_coeff.flatten(), [np.linalg.norm(residual)]])
def _fit_poly(n_poly_degrees, signal, grid=None, norm=False):
'''Fit a signal to a polynomial. If grid is not given, assumes a uniform
grid between 0 and 1 of length len(signal).
Parameters
----------
n_poly_degrees : int
Number of polynomial degrees to fit.
signal : np.array
Array of values to fit.
grid : np.array or None, default=None
Array of x-values, or None.
If None, uses a uniform time grid between 0 and 1.
norm : bool, default=False
If True, scales the signal to be between 0 and 1
If False, the signal is not altered.
Returns
-------
poly_coeff : np.array
The coefficients of the polynomial.
poly_approx : np.array
The polynomial approximation of the signal.
residual : np.array
The pointwise difference between the signal and the polynomial.
'''
n_points = len(signal)
if n_points < n_poly_degrees + 1:
raise ValueError('signal must be at least as long as n_poly_degrees')
if norm:
signal = signal - np.min(signal)
max_val = np.max(signal)
if max_val > 0:
signal = signal / max_val
if grid is None:
grid = np.linspace(-1, 1, num=n_points)
else:
grid = grid - np.mean(grid)
poly_coeff = Poly.polyfit(grid, signal, n_poly_degrees)
poly_approx = Poly.polyval(grid, poly_coeff)
residual = signal - poly_approx
return poly_coeff, poly_approx, residual
def _fit_normalized_cosine(x, y, min_freq, max_freq, step):
'''Assuming the amplitude is 1, find the optimal frequency and phase of a
to fit a cosine. Searches within the frequency range (min_freq, max_freq).
Fits to a cosine of the form:
y = cos((2pi * freq * x) - phase)
Parameters
----------
x : np.array
Array of evenly spaced x-values.
y : np.array
Array of y-values.
min_freq : float
The minimum allowed vibrato frequency.
max_freq : float
The maximum allowed vibrato frequency.
step : float
The step size between vibrato search frequencies.
Returns
-------
freq : float
The estimated optimal frequency.
phase : float
The estimated optimal phase (in radians)
'''
freqs = np.arange(min_freq, max_freq, step)
dot_prod = np.dot(
np.exp(2.0 * np.pi * 1j * np.multiply.outer(freqs, x)),
y
)
dot_prod_mag = np.abs(dot_prod)
peak_idx = list(scipy.signal.argrelmax(dot_prod_mag)[0])
if len(peak_idx) == 0:
return 0, 0
idx = peak_idx[np.argmax(dot_prod_mag[peak_idx])]
freq = freqs[idx]
phase = np.angle(dot_prod[idx])
return freq, phase
def _compute_coverage_array(y_sinfit_diff, cycle_length, vibrato_threshold):
'''Given an array of residual differences, compute the vibrato coverage
over time by splitting the interval up chunks of size cycle_length.
Parameters
----------
y_sinfit_diff : np.array
Array of residual differences between 0 and 1.
cycle_length : float
Optimal number of intervals for the estimated vibrato frequency
vibrato_threshold : float
Value between 0 and 1 to determine whether the fit is good enough.
Returns
-------
coverage : np.array
Array of booleans indicating if the current frame contains vibrato.
'''
n_points = len(y_sinfit_diff)
half_period_idx = list(
np.round(
cycle_length * np.arange(
0, int(np.ceil(float(n_points) / float(cycle_length)) + 1)
)
).astype(int)
)
if half_period_idx[-1] > n_points:
half_period_idx = half_period_idx[:-1]
if half_period_idx[-1] < n_points:
half_period_idx.append(n_points)
# compute the goodness of fit for each half period
diff_thresh = np.zeros(y_sinfit_diff.shape)
diffs = np.zeros((len(half_period_idx) - 1, ))
for k, (i, j) in enumerate(zip(half_period_idx[:-1], half_period_idx[1:])):
diffs[k] = np.mean(y_sinfit_diff[i:j])
diff_thresh[i:j] = diffs[k]
# vibrato is active when the fit diff is below a threshold
coverage = np.less_equal(diff_thresh, vibrato_threshold)
diff_coverage = np.less_equal(diffs, vibrato_threshold)
# If vibrato is active for less than 2 full periods, set coverage to None
if sum(diff_coverage) <= 3:
coverage[:] = False
return coverage
def get_contour_shape_features(times, freqs, sample_rate, poly_degree=5,
min_freq=3, max_freq=30, freq_step=0.1,
vibrato_threshold=0.25):
'''Fit contour to a low order polynomial plus sinusoidal vibrato.
Parameters
----------
times : np.array
Sequence of contour times
freqs : np.array
Sequence of contour frequencies
sample_rate : float
Contour sample rate
poly_degree : float, default=5
Low order polynomial degree
min_freq : float, default=3
The minimum allowed vibrato frequency
max_freq : float, default=30
The maximum allowed vibrato frequency
freq_step : float, default=0.1
The step size between vibrato search frequencies
vibrato_threshold : float, default=0.25
The fitness threshold for a half period to be considered vibrato.
Regions with normalized fitness differences below vibrato_threshold are
considered to have vibrato.
Returns
-------
features : np.array
Array of feautres. Elements (in order) are:
- vibrato rate (in Hz)
- vibrato extent (in the same units as freqs)
- vibrato coverage (between 0 and 1)
- vibrato coverage beginning (between 0 and 1)
- vibrato coverage middle (between 0 and 1)
- vibrato coverage end (between 0 and 1)
- 0th polynomial coefficient
- 1st polynomial coefficient
- ...
- Kth polynomial coefficient (K = poly_degree)
- polynomial fit residual
- overall model fit residual
'''
n_points = len(freqs)
times_shifted = times - np.mean(times)
# fit contour to a low order polynomial
poly_coeffs, y_poly, y_diff = _fit_poly(
poly_degree, freqs, grid=times_shifted
)
# remove amplitude envelope using hilbert transform
y_hilbert = np.abs(scipy.signal.hilbert(y_diff))
y_hilbert[y_hilbert == 0] = 1.0
y_sin = y_diff / y_hilbert
# get ideal vibrato parameters from resulting signal
vib_freq, vib_phase = _fit_normalized_cosine(
times_shifted, y_sin, min_freq=min_freq, max_freq=max_freq,
step=freq_step
)
y_sinfit = np.cos(2. * np.pi * vib_freq * times_shifted - vib_phase)
# get residual of sinusoidal fit
y_sinfit_diff = np.abs(y_sin - y_sinfit)
# compute vibrato coverage
if vib_freq > 0:
cycle_length = 0.5 * ((sample_rate) / vib_freq)
coverage = _compute_coverage_array(
y_sinfit_diff, cycle_length, vibrato_threshold
)
else:
coverage = np.zeros((n_points, )).astype(bool)
# compute percentage of coverage
vib_coverage = coverage.mean()
# if vibrato is present, set extent and rate. Otherwise they are zero.
if vib_coverage > 0:
vib_extent = np.mean(y_hilbert[coverage])
vib_rate = vib_freq
else:
vib_extent = 0.0
vib_rate = 0.0
# compute the overall model fit
y_vib = vib_extent * y_sinfit
y_vib[~coverage] = 0
y_modelfit = y_vib + y_poly
# compute residuals
polyfit_residual = np.linalg.norm(y_diff) / float(n_points)
modelfit_residual = np.linalg.norm(freqs - y_modelfit) / float(n_points)
# aggregate features
thirds = int(np.round(n_points / 3.0))
return np.concatenate([
np.array([vib_rate, vib_extent, vib_coverage]),
np.array([coverage[:thirds].mean(), coverage[thirds:2 * thirds].mean(),
coverage[2 * thirds:].mean()]),
poly_coeffs,
np.array([polyfit_residual, modelfit_residual])
])
def vibrato_essentia(freqs, sample_rate, hop_size=1):
"""Estimate vibrato parameters as in essentia.
Warning: These features work but aren't very precise (e.g a perfect
12 Hz sine wav estimates a rate of 9.8).
Parameters
----------
freqs : np.array
Sequence of contour frequencies
sample_rate : float
Contour sample rate
hop_size : int, default=1
Number of samples to advance each frame
Returns
-------
features : np.array
Array of feautres. Elements (in order) are:
- vibrato active (1 if active, 0 if not)
- vibrato rate (in Hz, 0 if inactive)
- vibrato extent (in the same units as freqs, 0 if inactive)
- vibrato coverage (between 0 and 1, 0 if inactive)
"""
contour = freqs - np.mean(freqs)
frame_size = int(np.round(0.35 * sample_rate))
fft_size = 4 * frame_size
n_frames = len(contour) - frame_size
freqs = np.fft.fftfreq(fft_size, 1. / sample_rate)[0:int(fft_size / 2.)]
vib_inds = np.where((freqs >= 2) & (freqs <= 20))[0] # vibrato 2-20 Hz
rate = []
extent = []
coverage = []
for frame in np.arange(0, n_frames, hop_size):
contour_segment = (
contour[frame:frame + frame_size] -
np.mean(contour[frame:frame + frame_size])
)
spec = np.abs(np.fft.fft(contour_segment, n=fft_size))
peak_inds = librosa.util.peak_pick(spec, 3, 3, 3, 5, 0.5, 10)
# top 3 peaks
if len(peak_inds) > 0:
top_peak_idx = np.argsort(spec[peak_inds])[::-1][:3]
peak_inds = peak_inds[top_peak_idx]
vib_peaks = list(
np.intersect1d(vib_inds, peak_inds, assume_unique=True)
)
if len(vib_peaks) > 0:
rate.append(np.mean(freqs[vib_peaks]))
extent.append(
np.max(contour[frame:frame + frame_size]) -
np.min(contour[frame:frame + frame_size])
)
# append '1' if current frame has vibrato
coverage.append(1.0)
rate = np.mean(rate) if len(rate) > 0 else 0.0
extent = np.mean(extent) if len(extent) > 0 else 0.0
coverage = sum(coverage) / n_frames if len(coverage) > 0 else 0.0
vib_params = [rate, extent, coverage]
if vib_params == [0.0, 0.0, 0.0]:
feats = np.array([0] + vib_params)
else:
feats = np.array([1] + vib_params)
return feats
| |
"""
This code is an example of how to train MNIST Variational Autoencoder (VAE).
VAE includes reparameterization trick, trainer, and generator.
"""
import os
import time
import numpy as np
import theano
import theano.tensor as T
import scipy.misc
from theano.sandbox.rng_mrg import MRG_RandomStreams as MRG
from lemontree.data.mnist import MNIST
from lemontree.graphs.graph import SimpleGraph
from lemontree.generators.generator import SimpleGenerator
from lemontree.controls.history import HistoryWithEarlyStopping
from lemontree.controls.scheduler import LearningRateMultiplyScheduler
from lemontree.graphs.graph import SimpleGraph
from lemontree.layers.activation import ReLU, Sigmoid, Tanh
from lemontree.layers.dense import DenseLayer
from lemontree.layers.normalization import BatchNormalization1DLayer
from lemontree.layers.variational import Latent1DLayer
from lemontree.initializers import HeNormal
from lemontree.objectives import BinaryCrossentropy, KLGaussianNormal
from lemontree.optimizers import Adam
from lemontree.parameters import SimpleParameter
from lemontree.utils.param_utils import filter_params_by_tags, print_tags_in_params, print_params_num
from lemontree.utils.type_utils import merge_dicts
from lemontree.utils.graph_utils import get_inputs_of_variables
from lemontree.utils.data_utils import split_data
np.random.seed(9999)
# base_datapath = 'C:/Users/skhu2/Dropbox/Project/data/'
# base_datapath = 'D:/Dropbox/Project/data/'
base_datapath = '/home/khshim/data/'
experiment_name = 'mnist_vae'
#================Prepare data================#
mnist = MNIST(base_datapath, 'flat')
train_data = mnist.train_data
train_data, valid_data = split_data(train_data, 0.9)
test_data = mnist.test_data
train_gen = SimpleGenerator([train_data], 250, 'train')
valid_gen = SimpleGenerator([valid_data], 250, 'valid')
test_gen = SimpleGenerator([test_data], 250, 'test')
rng = MRG(9999)
#================Build graph================#
x = T.fmatrix('X')
z = T.fmatrix('Z')
graph = SimpleGraph(experiment_name, 250)
graph.add_layer(DenseLayer((784,), (1024,), use_bias=False), get_from=[]) # 0
graph.add_layer(BatchNormalization1DLayer((1024,))) # 1
graph.add_layer(ReLU(0.1)) # 2
graph.add_layer(DenseLayer((1024,), (1024,), use_bias=False)) # 3
graph.add_layer(BatchNormalization1DLayer((1024,))) # 4
graph.add_layer(ReLU(0.1)) # 5
graph.add_layer(DenseLayer((1024,), (256,))) # 6
graph.add_layer(Latent1DLayer((256,), (128,))) # 7
graph.add_layer(DenseLayer((128,), (1024,), use_bias=False)) # 8
graph.add_layer(BatchNormalization1DLayer((1024,))) # 9
graph.add_layer(ReLU(0.1)) # 10
graph.add_layer(DenseLayer((1024,), (1024,), use_bias=False)) # 11
graph.add_layer(BatchNormalization1DLayer((1024,))) # 12
graph.add_layer(ReLU(0.1)) # 13
graph.add_layer(DenseLayer((1024,), (784,))) # 14
graph.add_layer(Sigmoid()) # 15
graph_output, graph_layers = graph.get_output({0:[x]}, -1, 0)
latent_output, latent_layers = graph.get_output({0:[x]}, 6, 0)
reconstruct_loss = BinaryCrossentropy().get_output(graph_output, x)
latent_loss = KLGaussianNormal((256,), (128,)).get_output(latent_output)
loss = latent_loss + reconstruct_loss
graph_params = graph.get_params()
graph_updates = graph.get_updates()
# generator
gen_output, _ = graph.get_output({8:[z]}, -1, 8)
#================Prepare arguments================#
HeNormal().initialize_params(filter_params_by_tags(graph_params, ['weight']))
print_tags_in_params(graph_params)
print_params_num(graph_params)
optimizer = Adam(0.001)
optimizer_updates = optimizer.get_updates(loss, graph_params)
optimizer_params = optimizer.get_params()
total_params = optimizer_params + graph_params
total_updates = merge_dicts([optimizer_updates, graph_updates])
params_saver = SimpleParameter(total_params, experiment_name + '_params/')
params_saver.save_params()
lr_scheduler = LearningRateMultiplyScheduler(optimizer.lr, 0.1)
hist = HistoryWithEarlyStopping(experiment_name + '_history/', 5, 5)
hist.add_keys(['train_latent_loss', 'train_reconstruct_loss'])
hist.add_keys(['valid_latent_loss', 'valid_reconstruct_loss'])
hist.add_keys(['test_latent_loss', 'test_reconstruct_loss'])
#================Compile functions================#
#test_func = theano.function(inputs=[x],
# outputs=[enc_mu, enc_var, latent],
# allow_input_downcast=True)
#test_output = test_func(train_gen.get_minibatch(0)[0])
#print(test_output[0])
#print(test_output[1])
#print(test_output[2])
train_func = theano.function(inputs=[x],
outputs=[loss, latent_loss, reconstruct_loss],
updates=total_updates,
allow_input_downcast=True)
test_func = theano.function(inputs=[x],
outputs=[loss, latent_loss, reconstruct_loss],
allow_input_downcast=True)
gen_func = theano.function(inputs=[z],
outputs=gen_output,
allow_input_downcast=True)
#================Convenient functions================#
def train_trainset():
train_loss = []
latent_loss = []
reconstruct_loss = []
for index in range(train_gen.max_index):
trainset = train_gen.get_minibatch(index)
train_batch_loss = train_func(trainset[0])
train_loss.append(train_batch_loss[0])
latent_loss.append(train_batch_loss[1])
reconstruct_loss.append(train_batch_loss[2])
hist.history['train_loss'].append(np.mean(np.asarray(train_loss)))
hist.history['train_latent_loss'].append(np.mean(np.asarray(latent_loss)))
hist.history['train_reconstruct_loss'].append(np.mean(np.asarray(reconstruct_loss)))
def test_validset():
valid_loss = []
valid_latent_loss = []
valid_reconstruct_loss = []
for index in range(valid_gen.max_index):
validset = valid_gen.get_minibatch(index)
valid_batch_loss = test_func(validset[0])
valid_loss.append(valid_batch_loss[0])
valid_latent_loss.append(valid_batch_loss[1])
valid_reconstruct_loss.append(valid_batch_loss[2])
hist.history['valid_loss'].append(np.mean(np.asarray(valid_loss)))
hist.history['valid_latent_loss'].append(np.mean(np.asarray(valid_latent_loss)))
hist.history['valid_reconstruct_loss'].append(np.mean(np.asarray(valid_reconstruct_loss)))
def test_testset():
test_loss = []
test_latent_loss = []
test_reconstruct_loss = []
for index in range(test_gen.max_index):
testset = test_gen.get_minibatch(index)
test_batch_loss = test_func(testset[0])
test_loss.append(test_batch_loss[0])
test_latent_loss.append(test_batch_loss[1])
test_reconstruct_loss.append(test_batch_loss[2])
hist.history['test_loss'].append(np.mean(np.asarray(test_loss)))
hist.history['test_latent_loss'].append(np.mean(np.asarray(test_latent_loss)))
hist.history['test_reconstruct_loss'].append(np.mean(np.asarray(test_reconstruct_loss)))
result_folder = experiment_name + '_result/'
if not os.path.exists(result_folder):
os.makedirs(result_folder)
def generate(epoch):
random_z = np.random.normal(0, 1, (250, 128))
generated = gen_func(random_z)
manifold = np.zeros((28*8, 28*8), dtype=theano.config.floatX)
for indx in range(8):
for indy in range(8):
current_img = np.reshape(generated[indx * 8 + indy], (28,28))
manifold[indx * 28:(indx+1) * 28, indy * 28:(indy+1) * 28] = current_img
manifold = np.asarray(manifold * 255, dtype='int32')
manifold = scipy.misc.toimage(manifold, cmin=0, cmax=255)
scipy.misc.imsave(result_folder + str(epoch) + '.png', manifold)
#================Train================#
change_lr = False
end_train = False
for epoch in range(1000):
if end_train:
params_saver.load_params()
break
if change_lr:
params_saver.load_params()
lr_scheduler.change_learningrate(epoch)
# optimizer.reset_params()
train_gen.shuffle()
print('...Epoch', epoch)
start_time = time.clock()
train_trainset()
test_validset()
generate(epoch)
end_time = time.clock()
print('......time:', end_time - start_time)
hist.print_history_of_epoch()
checker = hist.check_earlystopping('valid_loss')
if checker == 'save_param':
time.sleep(1)
hist.save_history_to_csv()
params_saver.save_params()
change_lr = False
end_train = False
elif checker == 'change_lr':
change_lr = True
end_train = False
elif checker == 'end_train':
change_lr = False
end_train = True
elif checker == 'keep_train':
change_lr = False
end_train = False
else:
raise NotImplementedError('Not supported checker type')
#================Test================#
test_testset()
best_loss, best_epoch = hist.best_loss_and_epoch_of_key('valid_loss')
hist.print_history_of_epoch(best_epoch, ['train_loss', 'valid_loss'])
hist.print_history_of_epoch(-1, ['test_loss'])
hist.save_history_to_csv()
| |
import os
import codecs
from collections import namedtuple
from posixpath import normpath
try:
from urllib.request import urlopen
from urllib.parse import quote
unicode = str
except:
from urllib import urlopen
from urllib import quote
__version__ = '0.3.2'
__all__ = ['URL', 'SplitResult', 'parse', 'extract', 'construct', 'normalize',
'compare', 'normalize_host', 'normalize_path', 'normalize_query',
'normalize_fragment', 'encode', 'unquote', 'split', 'split_netloc',
'split_host']
PSL_URL = 'https://publicsuffix.org/list/effective_tld_names.dat'
def _get_public_suffix_list():
"""Return a set containing all Public Suffixes.
If the env variable PUBLIC_SUFFIX_LIST does not point to a local copy of the
public suffix list it is downloaded into memory each time urltools is
imported.
"""
local_psl = os.environ.get('PUBLIC_SUFFIX_LIST')
if local_psl:
with codecs.open(local_psl, 'r', 'utf-8') as f:
psl_raw = f.readlines()
else:
psl_raw = unicode(urlopen(PSL_URL).read(), 'utf-8').split('\n')
psl = set()
for line in psl_raw:
item = line.strip()
if item != '' and not item.startswith('//'):
psl.add(item)
return psl
PSL = _get_public_suffix_list()
assert len(PSL) > 0, 'Public Suffix List is empty!'
SCHEMES = ['http', 'https', 'ftp', 'sftp', 'file', 'gopher', 'imap', 'mms',
'news', 'nntp', 'telnet', 'prospero', 'rsync', 'rtsp', 'rtspu',
'svn', 'git', 'ws', 'wss']
SCHEME_CHARS = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
IP_CHARS = '0123456789.:'
DEFAULT_PORT = {
'http': '80',
'https': '443',
'ws': '80',
'wss': '443',
'ftp': '21',
'sftp': '22',
'ldap': '389'
}
QUOTE_EXCEPTIONS = {
'path': ' /?+#',
'query': ' &=+#',
'fragment': ' +#'
}
SplitResult = namedtuple('SplitResult', ['scheme', 'netloc', 'path', 'query',
'fragment'])
URL = namedtuple('URL', ['scheme', 'username', 'password', 'subdomain',
'domain', 'tld', 'port', 'path', 'query', 'fragment',
'url'])
def normalize(url):
"""Normalize a URL.
>>> normalize('hTtp://ExAMPLe.COM:80')
'http://example.com/'
"""
url = url.strip()
if url == '':
return ''
parts = split(url)
if parts.scheme:
netloc = parts.netloc
if parts.scheme in SCHEMES:
path = normalize_path(parts.path)
else:
path = parts.path
# url is relative, netloc (if present) is part of path
else:
netloc = parts.path
path = ''
if '/' in netloc:
netloc, path_raw = netloc.split('/', 1)
path = normalize_path('/' + path_raw)
username, password, host, port = split_netloc(netloc)
host = normalize_host(host)
port = _normalize_port(parts.scheme, port)
query = normalize_query(parts.query)
fragment = normalize_fragment(parts.fragment)
return construct(URL(parts.scheme, username, password, None, host, None,
port, path, query, fragment, None))
def compare(url1, url2):
"""Check if url1 and url2 are the same after normalizing both.
>>> compare("http://examPLe.com:80/abc?x=&b=1", "http://eXAmple.com/abc?b=1")
True
"""
return normalize(url1) == normalize(url2)
def _idna_encode(x):
return x.encode('idna').decode('utf-8')
def _encode_query(query):
"""Quote all values of a query string."""
if query == '':
return query
query_args = []
for query_kv in query.split('&'):
k, v = query_kv.split('=')
query_args.append(k + "=" + quote(v.encode('utf-8')))
return '&'.join(query_args)
def encode(url):
"""Encode URL."""
parts = extract(url)
return construct(URL(parts.scheme,
parts.username,
parts.password,
_idna_encode(parts.subdomain),
_idna_encode(parts.domain),
_idna_encode(parts.tld),
parts.port,
quote(parts.path.encode('utf-8')),
_encode_query(parts.query),
quote(parts.fragment.encode('utf-8')),
None))
def construct(parts):
"""Construct a new URL from parts."""
url = ''
if parts.scheme:
if parts.scheme in SCHEMES:
url += parts.scheme + '://'
else:
url += parts.scheme + ':'
if parts.username and parts.password:
url += parts.username + ':' + parts.password + '@'
elif parts.username:
url += parts.username + '@'
if parts.subdomain:
url += parts.subdomain + '.'
url += parts.domain
if parts.tld:
url += '.' + parts.tld
if parts.port:
url += ':' + parts.port
if parts.path:
url += parts.path
if parts.query:
url += '?' + parts.query
if parts.fragment:
url += '#' + parts.fragment
return url
def _idna_decode(x):
return codecs.decode(x.encode('utf-8'), 'idna')
def normalize_host(host):
"""Normalize host (decode IDNA)."""
if 'xn--' not in host:
return host
return '.'.join([_idna_decode(p) for p in host.split('.')])
def _normalize_port(scheme, port):
"""Return port if it is not default port, else None.
>>> _normalize_port('http', '80')
>>> _normalize_port('http', '8080')
'8080'
"""
if not scheme:
return port
if port and port != DEFAULT_PORT[scheme]:
return port
def normalize_path(path):
"""Normalize path: collapse etc.
>>> normalize_path('/a/b///c')
'/a/b/c'
"""
if path in ['//', '/', '']:
return '/'
npath = normpath(unquote(path, exceptions=QUOTE_EXCEPTIONS['path']))
if path[-1] == '/' and npath != '/':
npath += '/'
return npath
def normalize_query(query):
"""Normalize query: sort params by name, remove params without value.
>>> normalize_query('z=3&y=&x=1')
'x=1&z=3'
"""
if query == '' or len(query) <= 2:
return ''
nquery = unquote(query, exceptions=QUOTE_EXCEPTIONS['query'])
params = nquery.split('&')
nparams = []
for param in params:
if '=' in param:
k, v = param.split('=', 1)
if k and v:
nparams.append("%s=%s" % (k, v))
nparams.sort()
return '&'.join(nparams)
def normalize_fragment(fragment):
"""Normalize fragment (unquote with exceptions only)."""
return unquote(fragment, QUOTE_EXCEPTIONS['fragment'])
_hextochr = dict(('%02x' % i, chr(i)) for i in range(256))
_hextochr.update(dict(('%02X' % i, chr(i)) for i in range(256)))
def unquote(text, exceptions=[]):
"""Unquote a text but ignore the exceptions.
>>> unquote('foo%23bar')
'foo#bar'
>>> unquote('foo%23bar', ['#'])
'foo%23bar'
"""
if not text:
if text is None:
raise TypeError('None object cannot be unquoted')
else:
return text
if '%' not in text:
return text
s = text.split('%')
res = [s[0]]
for h in s[1:]:
c = _hextochr.get(h[:2])
if c and c not in exceptions:
if len(h) > 2:
res.append(c + h[2:])
else:
res.append(c)
else:
res.append('%' + h)
return ''.join(res)
def parse(url):
"""Parse a URL.
>>> parse('http://example.com/foo/')
URL(scheme='http', ..., domain='example', tld='com', ..., path='/foo/', ...)
"""
parts = split(url)
if parts.scheme:
username, password, host, port = split_netloc(parts.netloc)
subdomain, domain, tld = split_host(host)
else:
username = password = subdomain = domain = tld = port = ''
return URL(parts.scheme, username, password, subdomain, domain, tld,
port, parts.path, parts.query, parts.fragment, url)
def extract(url):
"""Extract as much information from a (relative) URL as possible.
>>> extract('example.com/abc')
URL(..., domain='example', tld='com', ..., path='/abc', ...)
"""
parts = split(url)
if parts.scheme:
netloc = parts.netloc
path = parts.path
else:
netloc = parts.path
path = ''
if '/' in netloc:
netloc, path_raw = netloc.split('/', 1)
path = '/' + path_raw
username, password, host, port = split_netloc(netloc)
subdomain, domain, tld = split_host(host)
return URL(parts.scheme, username, password, subdomain, domain, tld,
port, path, parts.query, parts.fragment, url)
def split(url):
"""Split URL into scheme, netloc, path, query and fragment.
>>> split('http://www.example.com/abc?x=1&y=2#foo')
SplitResult(scheme='http', netloc='www.example.com', path='/abc', query='x=1&y=2', fragment='foo')
"""
scheme = netloc = path = query = fragment = ''
ip6_start = url.find('[')
scheme_end = url.find(':')
if ip6_start > 0 and ip6_start < scheme_end:
scheme_end = -1
if scheme_end > 0:
for c in url[:scheme_end]:
if c not in SCHEME_CHARS:
break
else:
scheme = url[:scheme_end].lower()
rest = url[scheme_end:].lstrip(':/')
if not scheme:
rest = url
l_path = rest.find('/')
l_query = rest.find('?')
l_frag = rest.find('#')
if l_path > 0:
if l_query > 0 and l_frag > 0:
netloc = rest[:l_path]
path = rest[l_path:min(l_query, l_frag)]
elif l_query > 0:
if l_query > l_path:
netloc = rest[:l_path]
path = rest[l_path:l_query]
else:
netloc = rest[:l_query]
path = ''
elif l_frag > 0:
netloc = rest[:l_path]
path = rest[l_path:l_frag]
else:
netloc = rest[:l_path]
path = rest[l_path:]
else:
if l_query > 0:
netloc = rest[:l_query]
elif l_frag > 0:
netloc = rest[:l_frag]
else:
netloc = rest
if l_query > 0:
if l_frag > 0:
query = rest[l_query+1:l_frag]
else:
query = rest[l_query+1:]
if l_frag > 0:
fragment = rest[l_frag+1:]
if not scheme:
path = netloc + path
netloc = ''
return SplitResult(scheme, netloc, path, query, fragment)
def _clean_netloc(netloc):
"""Remove trailing '.' and ':' and tolower.
>>> _clean_netloc('eXample.coM:')
'example.com'
"""
try:
return netloc.rstrip('.:').lower()
except:
return netloc.rstrip('.:').decode('utf-8').lower().encode('utf-8')
def split_netloc(netloc):
"""Split netloc into username, password, host and port.
>>> split_netloc('foo:bar@www.example.com:8080')
('foo', 'bar', 'www.example.com', '8080')
"""
username = password = host = port = ''
if '@' in netloc:
user_pw, netloc = netloc.split('@', 1)
if ':' in user_pw:
username, password = user_pw.split(':', 1)
else:
username = user_pw
netloc = _clean_netloc(netloc)
if ':' in netloc and netloc[-1] != ']':
host, port = netloc.rsplit(':', 1)
else:
host = netloc
return username, password, host, port
def split_host(host):
"""Use the Public Suffix List to split host into subdomain, domain and tld.
>>> split_host('foo.bar.co.uk')
('foo', 'bar', 'co.uk')
"""
# host is IPv6?
if '[' in host:
return '', host, ''
# host is IPv4?
for c in host:
if c not in IP_CHARS:
break
else:
return '', host, ''
# host is a domain name
domain = subdomain = tld = ''
parts = host.split('.')
for i in range(len(parts)):
tld = '.'.join(parts[i:])
wildcard_tld = '*.' + tld
exception_tld = '!' + tld
if exception_tld in PSL:
domain = '.'.join(parts[:i+1])
tld = '.'.join(parts[i+1:])
break
if tld in PSL:
domain = '.'.join(parts[:i])
break
if wildcard_tld in PSL:
domain = '.'.join(parts[:i-1])
tld = '.'.join(parts[i-1:])
break
if '.' in domain:
subdomain, domain = domain.rsplit('.', 1)
return subdomain, domain, tld
| |
# Copyright 2014 CloudFounders NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Generic system module, executing statements on local node
"""
import os
import uuid
import time
from ConfigParser import RawConfigParser
from subprocess import check_output
from StringIO import StringIO
from ovs.log.logHandler import LogHandler
logger = LogHandler('lib', name='system')
class System(object):
"""
Generic helper class
"""
OVS_CONFIG = '/opt/OpenvStorage/config/ovs.cfg'
my_machine_id = ''
my_storagerouter_guid = ''
my_storagedriver_id = ''
def __init__(self):
"""
Dummy init method
"""
_ = self
@staticmethod
def get_my_ips(client=None):
"""
Returns configured ip addresses for this host
"""
cmd = "ip a | grep 'inet ' | sed 's/\s\s*/ /g' | cut -d ' ' -f 3 | cut -d '/' -f 1"
output = System.run(cmd, client)
my_ips = output.split('\n')
my_ips = [found_ip.strip() for found_ip in my_ips if found_ip.strip() != '127.0.0.1']
return my_ips
@staticmethod
def get_my_machine_id(client=None):
"""
Returns unique machine id based on mac address
"""
if not System.my_machine_id or client:
cmd = """ip a | grep link/ether | sed 's/\s\s*/ /g' | cut -d ' ' -f 3 | sed 's/://g' | sort"""
output = System.run(cmd, client)
for mac in output.split('\n'):
if mac.strip() != '000000000000':
if client:
return mac.strip()
else:
System.my_machine_id = mac.strip()
break
return System.my_machine_id
@staticmethod
def get_my_storagerouter():
"""
Returns unique machine storagerouter id
"""
from ovs.dal.hybrids.storagerouter import StorageRouter
from ovs.dal.lists.storagerouterlist import StorageRouterList
if not System.my_storagerouter_guid:
for storagerouter in StorageRouterList.get_storagerouters():
if storagerouter.machine_id == System.get_my_machine_id():
System.my_storagerouter_guid = storagerouter.guid
return StorageRouter(System.my_storagerouter_guid)
@staticmethod
def get_my_storagedriver_id(vpool_name):
"""
Returns unique machine storagedriver_id based on vpool_name and machineid
"""
return vpool_name + System.get_my_machine_id()
@staticmethod
def get_storagedriver(vpool_name):
"""
Returns storagedriver object based on vpool_name
"""
my_storagedriver_id = System.get_my_storagedriver_id(vpool_name)
my_storagerouter = System.get_my_storagerouter()
for storagedriver in my_storagerouter.storagedrivers:
if storagedriver.name == my_storagedriver_id:
return storagedriver
raise ValueError('No storagedriver found for vpool_name: {0}'.format(vpool_name))
@staticmethod
def update_hosts_file(hostname, ip):
"""
Update/add entry for hostname ip in /etc/hosts
"""
import re
with open('/etc/hosts', 'r') as hosts_file:
contents = hosts_file.read()
if isinstance(hostname, list):
hostnames = ' '.join(hostname)
else:
hostnames = hostname
result = re.search('^{0}\s.*\n'.format(ip), contents, re.MULTILINE)
if result:
contents = contents.replace(result.group(0), '{0} {1}\n'.format(ip, hostnames))
else:
contents += '{0} {1}\n'.format(ip, hostnames)
with open('/etc/hosts', 'wb') as hosts_file:
hosts_file.write(contents)
@staticmethod
def exec_remote_python(client, script):
"""
Executes a python script on a client
"""
return client.run('python -c """{0}"""'.format(script))
@staticmethod
def read_remote_config(client, key):
"""
Reads remote configuration key
"""
read = """
from ovs.plugin.provider.configuration import Configuration
print Configuration.get('{0}')
""".format(key)
return System.exec_remote_python(client, read)
@staticmethod
def set_remote_config(client, key, value):
"""
Sets remote configuration key
"""
write = """
from ovs.plugin.provider.configuration import Configuration
Configuration.set('{0}', '{1}')
""".format(key, value)
System.exec_remote_python(client, write)
@staticmethod
def ports_in_use(client=None):
"""
Returns the ports in use
"""
cmd = """netstat -ln4 | sed 1,2d | sed 's/\s\s*/ /g' | cut -d ' ' -f 4 | cut -d ':' -f 2"""
output = System.run(cmd, client)
for found_port in output.split('\n'):
yield int(found_port.strip())
@staticmethod
def get_free_ports(selected_range, exclude=None, nr=1, client=None):
"""
Return requested nr of free ports not currently in use and not within excluded range
:param selected_range: e.g. '2000-2010' or '50000-6000, 8000-8999' ; note single port extends to [port -> 65535]
:param exclude: excluded list
:param nr: nr of free ports requested
:return: sorted incrementing list of nr of free ports
"""
requested_range = list()
selected_range = str(selected_range)
for port_range in str(selected_range).split(','):
port_range = port_range.strip()
if '-' in port_range:
current_range = (int(port_range.split('-')[0]), int(port_range.split('-')[1]))
else:
current_range = (int(port_range), 65535)
if 0 <= current_range[0] <= 1024:
current_range = (1025, current_range[1])
requested_range.extend(xrange(current_range[0], current_range[1] + 1))
free_ports = list()
if exclude is None:
exclude = list()
exclude_list = list(exclude)
ports_in_use = System.ports_in_use(client)
for port in ports_in_use:
exclude_list.append(port)
cmd = """cat /proc/sys/net/ipv4/ip_local_port_range"""
output = System.run(cmd, client)
start_end = list(output.split())
ephemeral_port_range = xrange(int(min(start_end)), int(max(start_end)))
for possible_free_port in requested_range:
if possible_free_port not in ephemeral_port_range and possible_free_port not in exclude_list:
free_ports.append(possible_free_port)
if len(free_ports) == nr:
return free_ports
raise ValueError('Unable to find requested nr of free ports')
@staticmethod
def run(cmd, client=None):
if client is None:
output = check_output(cmd, shell=True).strip()
else:
output = client.run(cmd).strip()
return output
@staticmethod
def get_arakoon_cluster_names(client=None, arakoon_config_dir=None):
"""
:param client: optional remote client
:param arakoon_config_dir: default /opt/OpenvStorage/config/arakoon for ovs
:return: list of configured arakoon cluster names on this client
"""
if arakoon_config_dir is None:
arakoon_config_dir = '/opt/OpenvStorage/config/arakoon'
cmd = """ls {0} """.format(arakoon_config_dir)
output = System.run(cmd, client)
return list(output.split())
@staticmethod
def read_config(filename, client=None):
if client is None:
cp = RawConfigParser()
with open(filename, 'r') as config_file:
cfg = config_file.read()
cp.readfp(StringIO(cfg))
return cp
else:
contents = client.file_read(filename)
cp = RawConfigParser()
cp.readfp(StringIO(contents))
return cp
@staticmethod
def write_config(config, filename, client=None):
if client is None:
with open(filename, 'w') as config_file:
config.write(config_file)
else:
temp_filename = '/var/tmp/{0}'.format(str(uuid.uuid4()).replace('-', ''))
with open(temp_filename, 'w') as config_file:
config.write(config_file)
time.sleep(1)
client.file_upload(filename, temp_filename)
os.remove(temp_filename)
@staticmethod
def read_ovs_config():
return System.read_config(System.OVS_CONFIG)
| |
"""
This module is used to auto-detect the type of a device in order to automatically create a
Netmiko connection.
The will avoid to hard coding the 'device_type' when using the ConnectHandler factory function
from Netmiko.
Example:
------------------
from netmiko.snmp_autodetect import SNMPDetect
my_snmp = SNMPDetect(hostname='1.1.1.70', user='pysnmp', auth_key='key1', encrypt_key='key2')
device_type = my_snmp.autodetect()
------------------
autodetect will return None if no match.
SNMPDetect class defaults to SNMPv3
Note, pysnmp is a required dependency for SNMPDetect and is intentionally not included in
netmiko requirements. So installation of pysnmp might be required.
"""
from __future__ import unicode_literals
import re
try:
from pysnmp.entity.rfc3413.oneliner import cmdgen
except ImportError:
raise ImportError("pysnmp not installed; please install it: 'pip install pysnmp'")
from netmiko.ssh_dispatcher import CLASS_MAPPER
# Higher priority indicates a better match.
SNMP_MAPPER_BASE = {
'arista_eos': {"oid": ".1.3.6.1.2.1.1.1.0",
"expr": re.compile(r".*Arista Networks EOS.*", re.IGNORECASE),
"priority": 99},
'hp_comware': {"oid": ".1.3.6.1.2.1.1.1.0",
"expr": re.compile(r".*HP Comware.*", re.IGNORECASE),
"priority": 99},
'cisco_ios': {"oid": ".1.3.6.1.2.1.1.1.0",
"expr": re.compile(r".*Cisco IOS Software,.*", re.IGNORECASE),
"priority": 60},
'cisco_xe': {"oid": ".1.3.6.1.2.1.1.1.0",
"expr": re.compile(r".*IOS-XE Software,.*", re.IGNORECASE),
"priority": 99},
'cisco_xr': {"oid": ".1.3.6.1.2.1.1.1.0",
"expr": re.compile(r".*Cisco IOS XR Software.*", re.IGNORECASE),
"priority": 99},
'cisco_asa': {"oid": ".1.3.6.1.2.1.1.1.0",
"expr": re.compile(r".*Cisco Adaptive Security Appliance.*", re.IGNORECASE),
"priority": 99},
'cisco_nxos': {"oid": ".1.3.6.1.2.1.1.1.0",
"expr": re.compile(r".*Cisco NX-OS.*", re.IGNORECASE),
"priority": 99},
'cisco_wlc': {"oid": ".1.3.6.1.2.1.1.1.0",
"expr": re.compile(r".*Cisco Controller.*", re.IGNORECASE),
"priority": 99},
'f5_ltm': {"oid": ".1.3.6.1.4.1.3375.2.1.4.1.0",
"expr": re.compile(r".*BIG-IP.*", re.IGNORECASE),
"priority": 99},
'fortinet': {"oid": ".1.3.6.1.2.1.1.1.0",
"expr": re.compile(r"Forti.*", re.IGNORECASE),
"priority": 80},
'checkpoint': {"oid": ".1.3.6.1.4.1.2620.1.6.16.9.0",
"expr": re.compile(r"CheckPoint"),
"priority": 79},
}
# Ensure all SNMP device types are supported by Netmiko
SNMP_MAPPER = {}
std_device_types = list(CLASS_MAPPER.keys())
for device_type in std_device_types:
if SNMP_MAPPER_BASE.get(device_type):
SNMP_MAPPER[device_type] = SNMP_MAPPER_BASE[device_type]
class SNMPDetect(object):
"""
The SNMPDetect class tries to automatically determine the device type.
Typically this will use the MIB-2 SysDescr and regular expressions.
Parameters
----------
hostname: str
The name or IP address of the hostname we want to guess the type
snmp_version : str, optional ('v1', 'v2c' or 'v3')
The SNMP version that is running on the device (default: 'v3')
snmp_port : int, optional
The UDP port on which SNMP is listening (default: 161)
community : str, optional
The SNMP read community when using SNMPv2 (default: None)
user : str, optional
The SNMPv3 user for authentication (default: '')
auth_key : str, optional
The SNMPv3 authentication key (default: '')
encrypt_key : str, optional
The SNMPv3 encryption key (default: '')
auth_proto : str, optional ('des', '3des', 'aes128', 'aes192', 'aes256')
The SNMPv3 authentication protocol (default: 'aes128')
encrypt_proto : str, optional ('sha', 'md5')
The SNMPv3 encryption protocol (default: 'sha')
Attributes
----------
hostname: str
The name or IP address of the device we want to guess the type
snmp_version : str
The SNMP version that is running on the device
snmp_port : int
The UDP port on which SNMP is listening
community : str
The SNMP read community when using SNMPv2
user : str
The SNMPv3 user for authentication
auth_key : str
The SNMPv3 authentication key
encrypt_key : str
The SNMPv3 encryption key
auth_proto : str
The SNMPv3 authentication protocol
encrypt_proto : str
The SNMPv3 encryption protocol
Methods
-------
autodetect()
Try to determine the device type.
"""
def __init__(self, hostname, snmp_version="v3", snmp_port=161, community=None, user="",
auth_key="", encrypt_key="", auth_proto="sha", encrypt_proto="aes128"):
# Check that the SNMP version is matching predefined type or raise ValueError
if snmp_version == "v1" or snmp_version == "v2c":
if not community:
raise ValueError("SNMP version v1/v2c community must be set.")
elif snmp_version == "v3":
if not user:
raise ValueError("SNMP version v3 user and password must be set")
else:
raise ValueError("SNMP version must be set to 'v1', 'v2c' or 'v3'")
# Check that the SNMPv3 auth & priv parameters match allowed types
self._snmp_v3_authentication = {"sha": cmdgen.usmHMACSHAAuthProtocol,
"md5": cmdgen.usmHMACMD5AuthProtocol}
self._snmp_v3_encryption = {"des": cmdgen.usmDESPrivProtocol,
"3des": cmdgen.usm3DESEDEPrivProtocol,
"aes128": cmdgen.usmAesCfb128Protocol,
"aes192": cmdgen.usmAesCfb192Protocol,
"aes256": cmdgen.usmAesCfb256Protocol}
if auth_proto not in self._snmp_v3_authentication.keys():
raise ValueError("SNMP V3 'auth_proto' argument must be one of the following: {}"
.format(self._snmp_v3_authentication.keys()))
if encrypt_proto not in self._snmp_v3_encryption.keys():
raise ValueError("SNMP V3 'encrypt_proto' argument must be one of the following: {}"
.format(self._snmp_v3_encryption.keys()))
self.hostname = hostname
self.snmp_version = snmp_version
self.snmp_port = snmp_port
self.community = community
self.user = user
self.auth_key = auth_key
self.encrypt_key = encrypt_key
self.auth_proto = self._snmp_v3_authentication[auth_proto]
self.encryp_proto = self._snmp_v3_encryption[encrypt_proto]
self._response_cache = {}
def _get_snmpv3(self, oid):
"""
Try to send an SNMP GET operation using SNMPv3 for the specified OID.
Parameters
----------
oid : str
The SNMP OID that you want to get.
Returns
-------
string : str
The string as part of the value from the OID you are trying to retrieve.
"""
snmp_target = (self.hostname, self.snmp_port)
cmd_gen = cmdgen.CommandGenerator()
(error_detected, error_status, error_index, snmp_data) = cmd_gen.getCmd(
cmdgen.UsmUserData(self.user, self.auth_key, self.encrypt_key,
authProtocol=self.auth_proto,
privProtocol=self.encryp_proto),
cmdgen.UdpTransportTarget(snmp_target, timeout=1.5, retries=2),
oid, lookupNames=True, lookupValues=True)
if not error_detected and snmp_data[0][1]:
return str(snmp_data[0][1])
return ""
def _get_snmpv2c(self, oid):
"""
Try to send an SNMP GET operation using SNMPv2 for the specified OID.
Parameters
----------
oid : str
The SNMP OID that you want to get.
Returns
-------
string : str
The string as part of the value from the OID you are trying to retrieve.
"""
snmp_target = (self.hostname, self.snmp_port)
cmd_gen = cmdgen.CommandGenerator()
(error_detected, error_status, error_index, snmp_data) = cmd_gen.getCmd(
cmdgen.CommunityData(self.community),
cmdgen.UdpTransportTarget(snmp_target, timeout=1.5, retries=2),
oid, lookupNames=True, lookupValues=True)
if not error_detected and snmp_data[0][1]:
return str(snmp_data[0][1])
return ""
def _get_snmp(self, oid):
"""Wrapper for generic SNMP call."""
if self.snmp_version in ["v1", "v2c"]:
return self._get_snmpv2c(oid)
else:
return self._get_snmpv3(oid)
def autodetect(self):
"""
Try to guess the device_type using SNMP GET based on the SNMP_MAPPER dict. The type which
is returned is directly matching the name in *netmiko.ssh_dispatcher.CLASS_MAPPER_BASE*
dict.
Thus you can use this name to retrieve automatically the right ConnectionClass
Returns
-------
potential_type : str
The name of the device_type that must be running.
"""
# Convert SNMP_MAPPER to a list and sort by priority
snmp_mapper_list = []
for k, v in SNMP_MAPPER.items():
snmp_mapper_list.append({k: v})
snmp_mapper_list = sorted(snmp_mapper_list, key=lambda x: list(x.values())[0]['priority'])
snmp_mapper_list.reverse()
for entry in snmp_mapper_list:
for device_type, v in entry.items():
oid = v['oid']
regex = v['expr']
# Used cache data if we already queryied this OID
if self._response_cache.get(oid):
snmp_response = self._response_cache.get(oid)
else:
snmp_response = self._get_snmp(oid)
self._response_cache[oid] = snmp_response
# See if we had a match
if re.search(regex, snmp_response):
return device_type
return None
| |
# -*- coding: utf-8 -*-
import ckan.plugins as plugins
import ckan.lib.helpers as h
import ckan.logic as logic
import ckan.model as model
import collections
import dbsuggest as db
import constants
import datetime
import cgi
import logging
import validator
from ckan.common import response, request, json
c = plugins.toolkit.c
log = logging.getLogger(__name__)
tk = plugins.toolkit
# Avoid user_show lag
USERS_CACHE = {}
def _get_user(user_id):
try:
if user_id in USERS_CACHE:
return USERS_CACHE[user_id]
else:
user = tk.get_action('user_show')({'ignore_auth': True}, {'id': user_id})
USERS_CACHE[user_id] = user
return user
except Exception as e:
log.warn(e)
def tnstats_dataset_count(self, id):
_ViewCount = collections.namedtuple("ViewCount", "views downloads")
engine = model.meta.engine
sql = '''
SELECT
COALESCE(SUM(s.count), 0) AS views,
--COALESCE((SELECT SUM(resource_count) FROM v_dataset_count WHERE dataset_id=p.id), 0) AS views,
COALESCE((SELECT SUM(resource_count) FROM v_dataset_download WHERE dataset_id=p.id), 0) AS downloads
FROM package AS p LEFT OUTER JOIN tracking_summary AS s ON s.package_id = p.id
WHERE p.id = %s GROUP BY p.id ; '''
result = [_ViewCount(*t) for t in engine.execute(sql, id).fetchall()]
return result[0]
def suggest_index(context, data_dict):
model = context['model']
# Init the data base
db.init_db(model)
# Check access
tk.check_access(constants.SUGGEST_INDEX, context, data_dict)
params = {}
# Filter by state
closed = data_dict.get('closed', None)
if closed is not None:
params['closed'] = closed
# Call the function
db_suggests = db.Suggest.get_ordered_by_date(**params)
# Dictize the results
offset = data_dict.get('offset', 0)
limit = data_dict.get('limit', constants.SUGGESTS_PER_PAGE)
suggests = []
for data_req in db_suggests[offset:offset + limit]:
suggests.append(_dictize_suggest_list(data_req))
result = {
'count': len(db_suggests),
'facets': {},
'result': suggests
}
return result
def _dictize_suggest_list(suggest):
# Transform time
open_time = str(suggest.open_time)
close_time = suggest.close_time
close_time = str(close_time) if close_time else close_time
#mail_time = suggest.mail_time
#mail_time = str(mail_time) if mail_time else mail_time
gg = model.Session.query(model.Group).filter(model.Group.id == suggest.org_id).first()
data_dict = {
'id': suggest.id,
'title': suggest.title,
'user_id': _star_id(suggest.user_id),
'open_time': open_time,
'views': suggest.views,
'comments' : db.Comment.get_count_by_suggest(suggest_id=suggest.id),
'org_id': suggest.org_id ,
'org': '' if gg is None else gg.title,
'send_mail': suggest.send_mail,
'email': suggest.email,
'mail_time': suggest.mail_time.strftime("%Y-%m-%d %H:%M") if suggest.mail_time else ''
}
return data_dict
def _star_id(uid):
if len(uid) < 3:
return '**'
ap = '*'
for i in uid[1:-1]:
ap += i
ap += '*'
return ap
def suggest_create(context, data_dict):
model = context['model']
session = context['session']
# Init the data base
db.init_db(model)
# Check access
tk.check_access(constants.SUGGEST_CREATE, context, data_dict)
# Validate data
validator.validate_suggest(context, data_dict)
# Store the data
data_req = db.Suggest()
_undictize_suggest_basic(data_req, data_dict)
data_req.open_time = datetime.datetime.now()
session.add(data_req)
session.commit()
return _dictize_suggest(data_req)
def _dictize_suggest(suggest):
# Transform time
open_time = str(suggest.open_time)
# Close time can be None and the transformation is only needed when the
# fields contains a valid date
close_time = suggest.close_time
close_time = str(close_time) if close_time else close_time
mail_time = suggest.mail_time
mail_time = str(mail_time) if mail_time else mail_time
gg = model.Session.query(model.Group).filter(model.Group.id == suggest.org_id).first()
# Convert the data request into a dict
data_dict = {
'id': suggest.id,
#'user_name': suggest.user_name,
'title': suggest.title,
'description': suggest.description,
'user_id': _star_id(suggest.user_id),
'dataset_name': suggest.dataset_name,
'suggest_columns': suggest.suggest_columns,
'open_time': open_time,
'close_time': close_time,
'closed': suggest.closed,
'views': suggest.views,
'org_id':suggest.org_id,
'org': '' if gg is None else gg.title,
'send_mail': suggest.send_mail,
'email': suggest.email,
'mail_time':mail_time
}
return data_dict
def _undictize_suggest_basic(suggest, data_dict):
suggest.title = data_dict['title']
suggest.description = data_dict['description']
suggest.user_id = data_dict['user_id']
suggest.dataset_name = data_dict['dataset_name']
suggest.suggest_columns = data_dict['suggest_columns']
suggest.org_id = data_dict['org_id']
suggest.email = data_dict['email']
#suggest.mail_time = data_tict['mail_time']
def suggest_show(context, data_dict):
model = context['model']
suggest_id = data_dict.get('id', '')
if not suggest_id:
raise tk.ValidationError('Data Request ID has not been included')
# Init the data base
db.init_db(model)
# Check access
tk.check_access(constants.SUGGEST_SHOW, context, data_dict)
# Get the data request
result = db.Suggest.get(id=suggest_id)
if not result:
raise tk.ObjectNotFound('Data Request %s not found in the data base' % suggest_id)
data_req = result[0]
data_dict = _dictize_suggest(data_req)
# Get comments
comments_db = db.Comment.get_ordered_by_date(suggest_id=data_dict['id'])
comments_list = []
for comment in comments_db:
comments_list.append(_dictize_comment(comment))
data_dict['comments'] = comments_list
return data_dict
def get_domail_content(context, params):
model = context['model']
suggest_id = params.get('id', '')
log.info("Suggest_action_get_domail_content: " + suggest_id)
if not suggest_id:
raise tk.ValidationError('Data Request ID has not been included')
# Init the data base
db.init_db(model)
# Get the data request
db_suggests = db.Suggest.get(id=suggest_id)
if not db_suggests:
raise tk.ObjectNotFound('Data Request %s not found in the data base' % suggest_id)
suggest = db_suggests[0]
gg = model.Session.query(model.Group).filter(model.Group.id == suggest.org_id).first()
# Convert the data request into a dict
mail_content = {
'id': suggest.id,
'user_name': suggest.user_id,
'title': suggest.title,
'description': suggest.description,
'dataset_name': suggest.dataset_name,
'suggest_columns': suggest.suggest_columns,
'open_time': suggest.open_time,
'org_id':suggest.org_id,
'org': '' if gg is None else gg.title,
'send_mail': suggest.send_mail,
'email': suggest.email
}
return mail_content
def suggest_views(context, data_dict):
model = context['model']
suggest_id = data_dict.get('id', '')
db.Suggest.views_plus(suggest_id)
def suggest_mailed(context, data_dict):
model = context['model']
suggest_id = data_dict.get('id', '')
db.Suggest.suggest_mailed(suggest_id)
def suggest_comment(context, data_dict):
model = context['model']
session = context['session']
suggest_id = data_dict.get('suggest_id', '')
# Check id
if not suggest_id:
raise tk.ValidationError(['Data Request ID has not been included'])
# Init the data base
db.init_db(model)
# Check access
tk.check_access(constants.SUGGEST_COMMENT, context, data_dict)
# Validate comment
validator.validate_comment(context, data_dict)
# Store the data
comment = db.Comment()
_undictize_comment_basic(comment, data_dict)
comment.user_id = context['auth_user_obj'].id
comment.time = datetime.datetime.now()
session.add(comment)
session.commit()
return _dictize_comment(comment)
def _undictize_comment_basic(comment, data_dict):
comment.comment = cgi.escape(data_dict.get('comment', ''))
comment.suggest_id = data_dict.get('suggest_id', '')
def _dictize_comment(comment):
return {
'id': comment.id,
'suggest_id': comment.suggest_id,
'user_id': comment.user_id,
'comment': comment.comment,
'user': _get_user(comment.user_id),
'time': str(comment.time)
}
| |
from revelation.instruction import Instruction
from revelation.isa import decode
from revelation.test.machine import StateChecker, new_state
from pydgin.misc import FatalError
import opcode_factory
import pytest
def test_64bit_load_disppm():
state = new_state(rf5=8)
state.mem.write(8, 4, 0xffffffff)
state.mem.write(12, 4, 0xffffffff)
# bb: 00=byte, 01=half-word, 10=word, 11=double-word
instr = opcode_factory.ldstrpmd32(rd=0, rn=5, imm=0, bb=0b11, s=0)
name, executefn = decode(instr)
executefn(state, Instruction(instr, None))
expected_state = StateChecker(rf0=0xffffffff, rf1=0xffffffff)
expected_state.check(state)
def test_64bit_store_disppm():
state = new_state(rf0=0xffffffff, rf1=0xffffffff, rf5=8)
# bb: 00=byte, 01=half-word, 10=word, 11=double-word
instr = opcode_factory.ldstrpmd32(rd=0, rn=5, sub=0, imm=1, bb=0b11, s=1)
name, executefn = decode(instr)
executefn(state, Instruction(instr, None))
expected_state = StateChecker(rf0=0xffffffff, rf1=0xffffffff)
expected_state.check(state, memory=[(8, 4, 0xffffffff),
(12, 4, 0xffffffff)])
@pytest.mark.parametrize('opcode', [opcode_factory.ldstrdisp16,
opcode_factory.ldstrdisp32,
])
def test_64bit_load_disp(opcode):
state = new_state(rf5=8)
state.mem.write(8, 4, 0xffffffff)
state.mem.write(12, 4, 0xffffffff)
# bb: 00=byte, 01=half-word, 10=word, 11=double-word
instr = opcode(rd=2, rn=5, imm=0, bb=0b11, s=0)
name, executefn = decode(instr)
executefn(state, Instruction(instr, None))
expected_state = StateChecker(rf2=0xffffffff, rf3=0xffffffff)
expected_state.check(state)
@pytest.mark.parametrize('opcode', [opcode_factory.ldstrdisp16,
opcode_factory.ldstrdisp32,
])
def test_64bit_store_disp(opcode):
state = new_state(rf0=0xffffffff, rf1=0xffffffff, rf5=8)
# bb: 00=byte, 01=half-word, 10=word, 11=double-word
instr = opcode(rd=0, rn=5, imm=0, bb=0b11, s=1)
name, executefn = decode(instr)
executefn(state, Instruction(instr, None))
expected_state = StateChecker(rf0=0xffffffff, rf1=0xffffffff)
expected_state.check(state, memory=[(8, 4, 0xffffffff),
(12, 4, 0xffffffff)])
@pytest.mark.parametrize('opcode', [opcode_factory.ldstrind16,
opcode_factory.ldstrind32,
])
def test_64bit_load_index(opcode):
state = new_state(rf5=8)
state.mem.write(8, 4, 0xffffffff)
state.mem.write(12, 4, 0xffffffff)
# bb: 00=byte, 01=half-word, 10=word, 11=double-word
instr = opcode(rd=0, rn=5, sub=0, bb=0b11, s=0)
name, executefn = decode(instr)
executefn(state, Instruction(instr, None))
expected_state = StateChecker(rf0=0xffffffff, rf1=0xffffffff)
expected_state.check(state)
@pytest.mark.parametrize('opcode', [opcode_factory.ldstrind16,
opcode_factory.ldstrind32,
])
def test_64bit_store_index(opcode):
state = new_state(rf0=0xffffffff, rf1=0xffffffff, rf5=8)
# bb: 00=byte, 01=half-word, 10=word, 11=double-word
instr = opcode(rd=0, rn=5, rm=6, sub=0, bb=0b11, s=1)
name, executefn = decode(instr)
executefn(state, Instruction(instr, None))
expected_state = StateChecker(rf0=0xffffffff, rf1=0xffffffff)
expected_state.check(state, memory=[(8, 4, 0xffffffff),
(12, 4, 0xffffffff)])
@pytest.mark.parametrize('sub,new_rn', [(1, 8 - 4),
(0, 8 + 4),
(1, 8 - 4),
(0, 8 + 4)])
def test_execute_ldr_disp_pm(sub, new_rn):
# Load.
state = new_state(rf5=8)
state.mem.write(8, 4, 42) # Start address, number of bytes, value
# bb: 00=byte, 01=half-word, 10=word, 11=double-word
instr = opcode_factory.ldstrpmd32(rd=0, rn=5, sub=sub, imm=1, bb=0b10, s=0)
name, executefn = decode(instr)
executefn(state, Instruction(instr, None))
expected_state = StateChecker(rf0=42, rf5=new_rn)
expected_state.check(state)
@pytest.mark.parametrize('sub,new_rn', [(1, 8 - 4),
(0, 8 + 4),
(1, 8 - 4),
(0, 8 + 4)])
def test_execute_str_disp_pm(sub, new_rn):
# Store.
state = new_state(rf0=0xffffffff, rf5=8)
# bb: 00=byte, 01=half-word, 10=word, 11=double-word
instr = opcode_factory.ldstrpmd32(rd=0, rn=5, sub=sub, imm=1, bb=0b10, s=1)
name, executefn = decode(instr)
executefn(state, Instruction(instr, None))
expected_state = StateChecker(rf0=0xffffffff, rf5=new_rn)
expected_state.check(state, memory=[(8, 4, 0xffffffff)])
@pytest.mark.parametrize('is16bit,sub,address', [(False, 1, 8 - (1 << 2)),
(False, 0, 8 + (1 << 2)),
(True, 0, 8 + (1 << 2))])
def test_execute_ldr_disp(is16bit, sub, address):
# Load.
state = new_state(rf0=0, rf5=8)
state.mem.write(address, 4, 0xffffffff) # Start address, number of bytes, value
# bb: 00=byte, 01=half-word, 10=word, 11=double-word
if is16bit:
instr = opcode_factory.ldstrdisp16(rd=0, rn=5, imm=1, bb=0b10, s=0)
else:
instr = opcode_factory.ldstrdisp32(rd=0, rn=5, sub=sub, imm=1, bb=0b10, s=0)
name, executefn = decode(instr)
executefn(state, Instruction(instr, None))
expected_state = StateChecker(rf0=0xffffffff, rf5=8)
expected_state.check(state)
@pytest.mark.parametrize('is16bit,sub,expected', [(False, 1, 8 - (1 << 2)),
(False, 0, 8 + (1 << 2)),
(True, 0, 8 + (1 << 2))])
def test_execute_str_disp(is16bit, sub, expected):
# Store.
state = new_state(rf0=0xffffffff, rf5=8)
# bb: 00=byte, 01=half-word, 10=word, 11=double-word
if is16bit:
instr = opcode_factory.ldstrdisp16(rd=0, rn=5, imm=1, bb=0b10, s=1)
else:
instr = opcode_factory.ldstrdisp32(rd=0, rn=5, sub=sub, imm=1, bb=0b10, s=1)
name, executefn = decode(instr)
executefn(state, Instruction(instr, None))
address = 8 - (1 << 2) if sub else 8 + (1 << 2)
expected_state = StateChecker(rf0=0xffffffff, rf5=8)
expected_state.check(state, memory=[(address, 4, 0xffffffff)])
@pytest.mark.parametrize('is16bit', [True, False])
def test_ldr_index(is16bit):
# Load.
state = new_state(rf0=0, rf5=8, rf6=8)
state.mem.write(16, 4, 0xffffffff)
if is16bit:
instr = opcode_factory.ldstrind16(rd=0, rn=5, rm=6, bb=0b10, s=0)
else:
instr = opcode_factory.ldstrind32(rd=0, rn=5, rm=6, sub=0, bb=0b10, s=0)
name, executefn = decode(instr)
executefn(state, Instruction(instr, None))
expected_state = StateChecker(rf0=0xffffffff, rf5=8, rf6=8)
expected_state.check(state)
@pytest.mark.parametrize('is16bit', [True, False])
def test_str_index(is16bit):
# Store.
state = new_state(rf0=0xffffffff, rf5=8, rf6=8)
if is16bit:
instr = opcode_factory.ldstrind16(rd=0, rn=5, rm=6, bb=0b10, s=1)
else:
instr = opcode_factory.ldstrind32(rd=0, rn=5, rm=6, sub=0, bb=0b10, s=1)
name, executefn = decode(instr)
executefn(state, Instruction(instr, None))
expected_state = StateChecker(rf0=0xffffffff, rf5=8, rf6=8)
expected_state.check(state, memory=[(16, 4, 0xffffffff)])
@pytest.mark.parametrize('is16bit', [True, False])
def test_ldr_pm(is16bit):
# Load.
state = new_state(rf0=0, rf5=8, rf6=8)
state.mem.write(8, 4, 0xffffffff)
if is16bit:
instr = opcode_factory.ldstrpm16(rd=0, rn=5, rm=6, bb=0b10, s=0)
else:
instr = opcode_factory.ldstrpm32(rd=0, rn=5, rm=6, sub=0, bb=0b10, s=0)
name, executefn = decode(instr)
executefn(state, Instruction(instr, None))
expected_state = StateChecker(rf0=0xffffffff, rf5=16, rf6=8)
expected_state.check(state)
@pytest.mark.parametrize('is16bit', [True, False])
def test_str_pm(is16bit):
# Store.
state = new_state(rf0=0xffffffff, rf5=8, rf6=8)
if is16bit:
instr = opcode_factory.ldstrpm16(rd=0, rn=5, rm=6, bb=0b10, s=1)
else:
instr = opcode_factory.ldstrpm32(rd=0, rn=5, rm=6, sub=0, bb=0b10, s=1)
name, executefn = decode(instr)
executefn(state, Instruction(instr, None))
expected_state = StateChecker(rf0=0xffffffff, rf5=16, rf6=8)
expected_state.check(state, memory=[(8, 4, 0xffffffff)])
@pytest.mark.parametrize('is16bit', [True, False])
def test_ldr_pm_double(is16bit):
# Load.
state = new_state(rf0=0, rf5=8, rf6=8)
state.mem.write(8, 8, 0xffffffffffffffff)
if is16bit:
instr = opcode_factory.ldstrpm16(rd=0, rn=5, rm=6, bb=0b11, s=0)
else:
instr = opcode_factory.ldstrpm32(rd=0, rn=5, rm=6, sub=0, bb=0b11, s=0)
name, executefn = decode(instr)
executefn(state, Instruction(instr, None))
expected_state = StateChecker(rf0=0xffffffff, rf1=0xffffffff, rf5=16, rf6=8)
expected_state.check(state)
@pytest.mark.parametrize('is16bit', [True, False])
def test_str_pm_double(is16bit):
# Store.
state = new_state(rf0=0xffffffff, rf1=0xffffffff, rf5=8, rf6=8)
if is16bit:
instr = opcode_factory.ldstrpm16(rd=0, rn=5, rm=6, bb=0b11, s=1)
else:
instr = opcode_factory.ldstrpm32(rd=0, rn=5, rm=6, sub=0, bb=0b11, s=1)
name, executefn = decode(instr)
executefn(state, Instruction(instr, None))
expected_state = StateChecker(rf0=0xffffffff, rf5=16, rf6=8)
expected_state.check(state, memory=[(8, 8, 0xffffffffffffffff)])
def test_testset32_zero():
state = new_state(rf0=0xffff, rf1=0x80002, rf2=0x80002)
size = 0b10 # Word
state.mem.write(0x00100004, 4, 0x0)
instr = opcode_factory.testset32(rd=0, rn=1, rm=2, sub=0, bb=size)
name, executefn = decode(instr)
executefn(state, Instruction(instr, None))
expected_state = StateChecker(rf0=0, rf1=0x80002, rf2=0x80002)
expected_state.check(state, memory=[(0x00100004, 4, 0xffff)])
def test_testset32_nonzero():
state = new_state(rf0=0, rf1=0x80002, rf2=0x80002)
size = 0b10 # Word
state.mem.write(0x00100004, 4, 0xffff)
instr = opcode_factory.testset32(rd=0, rn=1, rm=2, sub=0, bb=size)
name, executefn = decode(instr)
executefn(state, Instruction(instr, None))
expected_state = StateChecker(rf0=0xffff, rf1=0x80002, rf2=0x80002,)
expected_state.check(state, memory=[(0x00100004, 4, 0xffff)])
def test_testset32_fail():
expected_text = """testset32 has failed to write to address 0x4.
The absolute address used for the test and set instruction must be located
within the on-chip local memory and must be greater than 0x00100000 (2^20).
"""
state = new_state(rf0=0, rf1=0x2, rf2=0x2)
size = 0b10 # Word
state.mem.write(0x00100004, 4, 0xffff)
instr = opcode_factory.testset32(rd=0, rn=1, rm=2, sub=0, bb=size)
name, executefn = decode(instr)
with pytest.raises(FatalError) as exninfo:
executefn(state, Instruction(instr, None))
assert expected_text == exninfo.value.msg
| |
# Copyright (c) 2011, Enthought, Ltd.
# Author: Pietro Berkes <pberkes@enthought.com>
# License: Modified BSD license (2-clause)
"""View for model and data pair."""
from traits.has_traits import HasTraits, on_trait_change
from traits.trait_types import (Any, File, Instance, Button, Enum, Str, Bool,
Float, Event, Int)
from traits.traits import Property
from traitsui.editors.range_editor import RangeEditor
from traitsui.group import HGroup, VGroup, Tabbed
from traitsui.handler import ModelView
from traitsui.item import Item, Label, Spring, UItem
from traitsui.menu import OKCancelButtons
from traitsui.view import View
from traitsui.message import error
from pyanno.modelA import ModelA
from pyanno.modelB import ModelB
from pyanno.modelBt import ModelBt
from pyanno.modelBt_loopdesign import ModelBtLoopDesign
from pyanno.annotations import AnnotationsContainer
from pyanno.plots.annotations_plot import PosteriorPlot
from pyanno.ui.annotation_stat_view import AnnotationsStatisticsView
from pyanno.ui.annotations_view import AnnotationsView, CreateNewAnnotationsDialog
from pyanno.ui.appbase.long_running_call import LongRunningCall
from pyanno.ui.appbase.wx_utils import is_display_small
from pyanno.ui.model_a_view import ModelAView
from pyanno.ui.model_bt_view import ModelBtView
from pyanno.ui.model_btloop_view import ModelBtLoopDesignView
from pyanno.ui.model_b_view import ModelBView
import numpy as np
# TODO remember last setting of parameters
from pyanno.ui.posterior_view import PosteriorView
from pyanno.util import PyannoValueError
from traitsui.message import message
import logging
logger = logging.getLogger(__name__)
class ModelDataView(ModelView):
#### Information about available models
model_name = Enum(
'Model B-with-theta',
'Model B-with-theta (loop design)',
'Model B',
'Model A (loop design)',
)
_model_name_to_class = {
'Model B-with-theta': ModelBt,
'Model B-with-theta (loop design)': ModelBtLoopDesign,
'Model B': ModelB,
'Model A (loop design)': ModelA
}
_model_class_to_view = {
ModelBt: ModelBtView,
ModelBtLoopDesign: ModelBtLoopDesignView,
ModelB: ModelBView,
ModelA: ModelAView
}
#### Application-related traits
# reference to pyanno application
application = Any
#### Model-related traits
# the annotations model
model = Any
# Traits UI view of the model
model_view = Instance(ModelView)
# fired when the model is updates
model_updated = Event
# parameters view should not update when this trait is False
model_update_suspended = Bool(False)
#### Annotation-related traits
# File trait to load a new annotations file
annotations_file = File
# True then annotations are loaded correctly
annotations_are_defined = Bool(False)
# fired when annotations are updated
annotations_updated = Event
# Traits UI view of the annotations
annotations_view = Instance(AnnotationsView)
# Traits UI view of the annotations' statistics
annotations_stats_view = Instance(AnnotationsStatisticsView)
# shortcut to the annotations
annotations = Property
def _get_annotations(self):
return self.annotations_view.annotations_container.annotations
# property that combines information from the model and the annotations
# to give a consistent number of classes
nclasses = Property
def _get_nclasses(self):
return max(self.model.nclasses, self.annotations.max() + 1)
# info string -- currently not used
info_string = Str
# used to display the current log likelihood
log_likelihood = Float
def _annotations_view_default(self):
anno = AnnotationsContainer.from_array([[0]], name='<undefined>')
return AnnotationsView(annotations_container = anno,
nclasses = self.model.nclasses,
application = self.application,
model=HasTraits())
@on_trait_change('annotations_file')
def _update_annotations_file(self):
logger.info('Load file {}'.format(self.annotations_file))
anno = AnnotationsContainer.from_file(self.annotations_file)
self.set_annotations(anno)
@on_trait_change('annotations_updated,model_updated')
def _update_log_likelihood(self):
if self.annotations_are_defined:
if not self.model.are_annotations_compatible(self.annotations):
self.log_likelihood = np.nan
else:
self.log_likelihood = self.model.log_likelihood(
self.annotations)
@on_trait_change('model.nclasses')
def _update_nclasses(self):
self.annotations_view.nclasses = self.model.nclasses
self.annotations_view.annotations_updated = True
@on_trait_change('model,model:theta,model:gamma')
def _fire_model_updated(self):
if not self.model_update_suspended:
self.model_updated = True
if self.model_view is not None:
self.model_view.model_updated = True
### Control content #######################################################
def set_model(self, model):
"""Update window with a new model.
"""
self.model = model
model_view_class = self._model_class_to_view[model.__class__]
self.model_view = model_view_class(model=model)
self.model_updated = True
def set_annotations(self, annotations_container):
"""Update window with a new set of annotations."""
self.annotations_view = AnnotationsView(
annotations_container = annotations_container,
nclasses = self.model.nclasses,
application = self.application,
model = HasTraits()
)
self.annotations_stats_view = AnnotationsStatisticsView(
annotations = self.annotations,
nclasses = self.nclasses
)
self.annotations_are_defined = True
self.annotations_updated = True
def set_from_database_record(self, record):
"""Set main window model and annotations from a database record."""
self.set_model(record.model)
self.set_annotations(record.anno_container)
### Actions ##############################################################
#### Model creation actions
# create a new model
new_model = Button(label='Create...')
# show informations about the selected model
get_info_on_model = Button(label='Info...')
#### Annotation creation actions
# create new annotations
new_annotations = Button(label='Create...')
#### Model <-> data computations
# execute Maximum Likelihood estimation of parameters
ml_estimate = Button(label='ML estimate...',
desc=('Maximum Likelihood estimate of model '
'parameters'))
# execute MAP estimation of parameters
map_estimate = Button(label='MAP estimate...')
# draw samples from the posterior over accuracy
sample_posterior_over_accuracy = Button(label='Sample parameters...')
# compute posterior over label classes
estimate_labels = Button(label='Estimate labels...')
#### Database actions
# open database window
open_database = Button(label="Open database")
# add current results to database
add_to_database = Button(label="Add to database")
def _new_model_fired(self):
"""Create new model."""
# delegate creation to associated model_view
model_name = self.model_name
model_class = self._model_name_to_class[model_name]
responsible_view = self._model_class_to_view[model_class]
# if annotations are loaded, set default values for number of
# annotations and annotators to the ones in the data set
kwargs = {}
if self.annotations_are_defined:
anno = self.annotations_view.annotations_container
kwargs['nclasses'] = anno.nclasses
kwargs['nannotators'] = anno.nannotators
# model == None if the user cancelled the action
model = responsible_view.create_model_dialog(self.info.ui.control,
**kwargs)
if model is not None:
self.set_model(model)
def _new_annotations_fired(self):
"""Create an empty annotations set."""
annotations = CreateNewAnnotationsDialog.create_annotations_dialog()
if annotations is not None:
name = self.application.database.get_available_id()
anno_cont = AnnotationsContainer.from_array(annotations,
name=name)
self.set_annotations(anno_cont)
def _open_database_fired(self):
"""Open database window."""
if self.application is not None:
self.application.open_database_window()
def _get_info_on_model_fired(self):
"""Open dialog with model description."""
model_class = self._model_name_to_class[self.model_name]
message(message = model_class.__doc__, title='Model info')
def _add_to_database_fired(self):
"""Add current results to database."""
if self.application is not None:
self.application.add_current_state_to_database()
def _action_finally(self):
"""Operations that need to be executed both in case of a success and
a failure of the long-running action.
"""
self.model_update_suspended = False
def _action_success(self, result):
self._action_finally()
self._fire_model_updated()
def _action_failure(self, err):
self._action_finally()
if isinstance(err, PyannoValueError):
errmsg = err.args[0]
if 'Annotations' in errmsg:
# raised when annotations are incompatible with the model
error('Error: ' + errmsg)
else:
# re-raise exception if it has not been handled
raise err
def _action_on_model(self, message, method, args=None, kwargs=None,
on_success=None, on_failure=None):
"""Call long running method on model.
While the call is running, a window with a pulse progress bar is
displayed.
An error message is displayed if the call raises a PyannoValueError
(raised when annotations are incompatible with the current model).
"""
if args is None: args = []
if kwargs is None: kwargs = {}
if on_success is None: on_success = self._action_success
if on_failure is None: on_failure = self._action_failure
self.model_update_suspended = True
call = LongRunningCall(
parent = None,
title = 'Calculating...',
message = message,
callable = method,
args = args,
kw = kwargs,
on_success = on_success,
on_failure = on_failure,
)
call()
def _ml_estimate_fired(self):
"""Run ML estimation of parameters."""
message = 'Computing maximum likelihood estimate'
self._action_on_model(message, self.model.mle, args=[self.annotations])
def _map_estimate_fired(self):
"""Run ML estimation of parameters."""
message = 'Computing maximum a posteriori estimate'
self._action_on_model(message, self.model.map, args=[self.annotations])
def _sample_posterior_success(self, samples):
if (samples is not None
and hasattr(self.model_view, 'plot_theta_samples')):
self.model_view.plot_theta_samples(samples)
self._action_finally()
def _sample_posterior_over_accuracy_fired(self):
"""Sample the posterior of the parameters `theta`."""
message = 'Sampling from the posterior over accuracy'
# open dialog asking for number of samples
params = _SamplingParamsDialog()
dialog_ui = params.edit_traits(kind='modal')
if not dialog_ui.result:
# user pressed "Cancel"
return
nsamples = params.nsamples
self._action_on_model(
message,
self.model.sample_posterior_over_accuracy,
args = [self.annotations, nsamples],
kwargs = {'burn_in_samples': params.burn_in_samples,
'thin_samples' : params.thin_samples},
on_success=self._sample_posterior_success
)
def _estimate_labels_success(self, posterior):
if posterior is not None:
post_plot = PosteriorPlot(posterior=posterior,
title='Posterior over classes')
post_view = PosteriorView(posterior_plot=post_plot,
annotations=self.annotations)
post_view.edit_traits()
self._action_finally()
def _estimate_labels_fired(self):
"""Compute the posterior over annotations and show it in a new window"""
message = 'Computing the posterior over classes'
self._action_on_model(
message,
self.model.infer_labels,
args=[self.annotations],
on_success=self._estimate_labels_success
)
### Views ################################################################
def traits_view(self):
## Model view
# adjust sizes to display size
if is_display_small():
# full view size
w_view, h_view = 1024, 768
w_data_create_group = 350
w_data_info_group = 500
h_annotations_stats = 270
else:
w_view, h_view = 1300, 850
w_data_create_group = 400
w_data_info_group = 700
h_annotations_stats = 330
model_create_group = (
VGroup(
HGroup(
UItem(name='model_name',width=200),
UItem(name='new_model', width=100),
UItem(name='get_info_on_model', width=100, height=25),
),
label = 'Create new model'
)
)
model_group = (
VGroup (
model_create_group,
VGroup(
Item(
'model_view',
style='custom',
show_label=False,
width=400
),
label = 'Model view',
),
),
)
## Data view
data_create_group = VGroup(
#Label('Open annotation file:', width=800),
HGroup(
Item('annotations_file', style='simple', label='Open file:',
width=w_data_create_group, height=25),
UItem('new_annotations', height=25)
),
label = 'Load/create annotations',
show_border = False,
)
data_info_group = VGroup(
Item('annotations_view',
style='custom',
show_label=False,
visible_when='annotations_are_defined',
width=w_data_info_group,
),
Item('annotations_stats_view',
style='custom',
show_label=False,
visible_when='annotations_are_defined',
height=h_annotations_stats),
label = 'Data view',
)
data_group = (
VGroup (
data_create_group,
data_info_group,
),
)
## (Model,Data) view
model_data_group = (
VGroup(
#Item('info_string', show_label=False, style='readonly'),
Item('log_likelihood', label='Log likelihood', style='readonly'),
HGroup(
Item('ml_estimate',
enabled_when='annotations_are_defined'),
Item('map_estimate',
enabled_when='annotations_are_defined'),
Item('sample_posterior_over_accuracy',
enabled_when='annotations_are_defined'),
Item('estimate_labels',
enabled_when='annotations_are_defined'),
Spring(),
Item('add_to_database',
enabled_when='annotations_are_defined'),
Item('open_database'),
show_labels=False,
),
label = 'Model-data view',
)
)
## Full view
full_view = View(
VGroup(
HGroup(
model_group,
data_group
),
model_data_group,
),
title='PyAnno - Models of data annotations by multiple curators',
width = w_view,
height = h_view,
resizable = False
)
return full_view
class _SamplingParamsDialog(HasTraits):
nsamples = Int(200)
burn_in_samples = Int(100)
thin_samples = Int(1)
traits_view = View(
VGroup(
Item('nsamples',
label = 'Number of samples',
editor = RangeEditor(mode='spinner',
low=100, high=50000,
is_float=False),
width = 100
),
Item('burn_in_samples',
label = 'Number of samples in burn-in phase',
editor = RangeEditor(mode='spinner',
low=1, high=50000,
is_float=False),
width = 100
),
Item('thin_samples',
label = 'Thinning (keep 1 samples every N)',
editor = RangeEditor(mode='spinner',
low=1, high=50000,
is_float=False),
width = 100
),
),
buttons = OKCancelButtons
)
#### Testing and debugging ####################################################
def main():
""" Entry point for standalone testing/debugging. """
from pyanno.ui.model_data_view import ModelDataView
model = ModelBtLoopDesign.create_initial_state(5)
model_data_view = ModelDataView()
model_data_view.set_model(model)
# open model_data_view
model_data_view.configure_traits(view='traits_view')
return model, model_data_view
if __name__ == '__main__':
m, mdv = main()
| |
# coding=utf-8
# Copyright 2018 The HuggingFace Inc. team, Microsoft Corporation.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" TF 2.0 MPNet model. """
import math
import warnings
import tensorflow as tf
from ...activations_tf import get_tf_activation
from ...file_utils import (
MULTIPLE_CHOICE_DUMMY_INPUTS,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
)
from ...modeling_tf_outputs import (
TFBaseModelOutput,
TFBaseModelOutputWithPooling,
TFMaskedLMOutput,
TFMultipleChoiceModelOutput,
TFQuestionAnsweringModelOutput,
TFSequenceClassifierOutput,
TFTokenClassifierOutput,
)
from ...modeling_tf_utils import (
TFMaskedLanguageModelingLoss,
TFMultipleChoiceLoss,
TFPreTrainedModel,
TFQuestionAnsweringLoss,
TFSequenceClassificationLoss,
TFTokenClassificationLoss,
get_initializer,
input_processing,
keras_serializable,
shape_list,
)
from ...utils import logging
from .configuration_mpnet import MPNetConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "microsoft/mpnet-base"
_CONFIG_FOR_DOC = "MPNetConfig"
_TOKENIZER_FOR_DOC = "MPNetTokenizer"
TF_MPNET_PRETRAINED_MODEL_ARCHIVE_LIST = [
"microsoft/mpnet-base",
]
class TFMPNetPreTrainedModel(TFPreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = MPNetConfig
base_model_prefix = "mpnet"
@tf.function(
input_signature=[
{
"input_ids": tf.TensorSpec((None, None), tf.int32, name="input_ids"),
"attention_mask": tf.TensorSpec((None, None), tf.int32, name="attention_mask"),
}
]
)
def serving(self, inputs):
output = self.call(inputs)
return self.serving_output(output)
class TFMPNetEmbeddings(tf.keras.layers.Layer):
"""Construct the embeddings from word, position embeddings."""
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.padding_idx = 1
self.vocab_size = config.vocab_size
self.hidden_size = config.hidden_size
self.max_position_embeddings = config.max_position_embeddings
self.initializer_range = config.initializer_range
self.embeddings_sum = tf.keras.layers.Add()
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)
def build(self, input_shape: tf.TensorShape):
with tf.name_scope("word_embeddings"):
self.weight = self.add_weight(
name="weight",
shape=[self.vocab_size, self.hidden_size],
initializer=get_initializer(initializer_range=self.initializer_range),
)
with tf.name_scope("position_embeddings"):
self.position_embeddings = self.add_weight(
name="embeddings",
shape=[self.max_position_embeddings, self.hidden_size],
initializer=get_initializer(initializer_range=self.initializer_range),
)
super().build(input_shape)
def create_position_ids_from_input_ids(self, input_ids):
"""
Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding
symbols are ignored. This is modified from fairseq's `utils.make_positions`.
Args:
input_ids: tf.Tensor
Returns: tf.Tensor
"""
mask = tf.cast(tf.math.not_equal(input_ids, self.padding_idx), dtype=input_ids.dtype)
incremental_indices = tf.math.cumsum(mask, axis=1) * mask
return incremental_indices + self.padding_idx
def call(self, input_ids=None, position_ids=None, inputs_embeds=None, training=False):
"""
Applies embedding based on inputs tensor.
Returns:
final_embeddings (:obj:`tf.Tensor`): output embedding tensor.
"""
assert not (input_ids is None and inputs_embeds is None)
if input_ids is not None:
inputs_embeds = tf.gather(params=self.weight, indices=input_ids)
input_shape = shape_list(inputs_embeds)[:-1]
if position_ids is None:
if input_ids is not None:
# Create the position ids from the input token ids. Any padded tokens remain padded.
position_ids = self.create_position_ids_from_input_ids(input_ids=input_ids)
else:
position_ids = tf.expand_dims(
tf.range(start=self.padding_idx + 1, limit=input_shape[-1] + self.padding_idx + 1), axis=0
)
position_ids = tf.tile(input=position_ids, multiples=(input_shape[0], 1))
position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)
final_embeddings = self.embeddings_sum(inputs=[inputs_embeds, position_embeds])
final_embeddings = self.LayerNorm(inputs=final_embeddings)
final_embeddings = self.dropout(inputs=final_embeddings, training=training)
return final_embeddings
# Copied from transformers.models.bert.modeling_tf_bert.TFBertPooler with Bert->MPNet
class TFMPNetPooler(tf.keras.layers.Layer):
def __init__(self, config: MPNetConfig, **kwargs):
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(
units=config.hidden_size,
kernel_initializer=get_initializer(config.initializer_range),
activation="tanh",
name="dense",
)
def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(inputs=first_token_tensor)
return pooled_output
class TFMPNetSelfAttention(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads}"
)
self.num_attention_heads = config.num_attention_heads
assert config.hidden_size % config.num_attention_heads == 0
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.q = tf.keras.layers.Dense(
self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="q"
)
self.k = tf.keras.layers.Dense(
self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="k"
)
self.v = tf.keras.layers.Dense(
self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="v"
)
self.o = tf.keras.layers.Dense(
config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="o"
)
self.dropout = tf.keras.layers.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x, batch_size):
# Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size]
x = tf.reshape(x, (batch_size, -1, self.num_attention_heads, self.attention_head_size))
return tf.transpose(x, perm=[0, 2, 1, 3])
def call(self, hidden_states, attention_mask, head_mask, output_attentions, position_bias=None, training=False):
batch_size = shape_list(hidden_states)[0]
q = self.q(hidden_states)
k = self.k(hidden_states)
v = self.v(hidden_states)
q = self.transpose_for_scores(q, batch_size)
k = self.transpose_for_scores(k, batch_size)
v = self.transpose_for_scores(v, batch_size)
attention_scores = tf.matmul(q, k, transpose_b=True)
dk = tf.cast(shape_list(k)[-1], attention_scores.dtype)
attention_scores = attention_scores / tf.math.sqrt(dk)
# Apply relative position embedding (precomputed in MPNetEncoder) if provided.
if position_bias is not None:
attention_scores += position_bias
if attention_mask is not None:
attention_scores = attention_scores + attention_mask
attention_probs = tf.nn.softmax(attention_scores, axis=-1)
attention_probs = self.dropout(attention_probs, training=training)
if head_mask is not None:
attention_probs = attention_probs * head_mask
c = tf.matmul(attention_probs, v)
c = tf.transpose(c, perm=[0, 2, 1, 3])
c = tf.reshape(c, (batch_size, -1, self.all_head_size))
o = self.o(c)
outputs = (o, attention_probs) if output_attentions else (o,)
return outputs
class TFMPNetAttention(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.attn = TFMPNetSelfAttention(config, name="attn")
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)
def prune_heads(self, heads):
raise NotImplementedError
def call(self, input_tensor, attention_mask, head_mask, output_attentions, position_bias=None, training=False):
self_outputs = self.attn(
input_tensor, attention_mask, head_mask, output_attentions, position_bias=position_bias, training=training
)
attention_output = self.LayerNorm(self.dropout(self_outputs[0]) + input_tensor)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
# Copied from transformers.models.bert.modeling_tf_bert.TFBertIntermediate with Bert->MPNet
class TFMPNetIntermediate(tf.keras.layers.Layer):
def __init__(self, config: MPNetConfig, **kwargs):
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(
units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = get_tf_activation(config.hidden_act)
else:
self.intermediate_act_fn = config.hidden_act
def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
hidden_states = self.dense(inputs=hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
# Copied from transformers.models.bert.modeling_tf_bert.TFBertOutput with Bert->MPNet
class TFMPNetOutput(tf.keras.layers.Layer):
def __init__(self, config: MPNetConfig, **kwargs):
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(
units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
)
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)
def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor:
hidden_states = self.dense(inputs=hidden_states)
hidden_states = self.dropout(inputs=hidden_states, training=training)
hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor)
return hidden_states
class TFMPNetLayer(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.attention = TFMPNetAttention(config, name="attention")
self.intermediate = TFMPNetIntermediate(config, name="intermediate")
self.out = TFMPNetOutput(config, name="output")
def call(self, hidden_states, attention_mask, head_mask, output_attentions, position_bias=None, training=False):
self_attention_outputs = self.attention(
hidden_states, attention_mask, head_mask, output_attentions, position_bias=position_bias, training=training
)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
intermediate_output = self.intermediate(attention_output)
layer_output = self.out(intermediate_output, attention_output, training=training)
outputs = (layer_output,) + outputs # add attentions if we output them
return outputs
class TFMPNetEncoder(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.config = config
self.n_heads = config.num_attention_heads
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
self.relative_attention_num_buckets = config.relative_attention_num_buckets
self.initializer_range = config.initializer_range
self.layer = [TFMPNetLayer(config, name=f"layer_._{i}") for i in range(config.num_hidden_layers)]
self.relative_attention_num_buckets = config.relative_attention_num_buckets
def build(self, input_shape):
with tf.name_scope("relative_attention_bias"):
self.relative_attention_bias = self.add_weight(
name="embeddings",
shape=[self.relative_attention_num_buckets, self.n_heads],
initializer=get_initializer(self.initializer_range),
)
return super().build(input_shape)
def call(
self,
hidden_states,
attention_mask,
head_mask,
output_attentions,
output_hidden_states,
return_dict,
training=False,
):
position_bias = self.compute_position_bias(hidden_states)
all_hidden_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = layer_module(
hidden_states,
attention_mask,
head_mask[i],
output_attentions,
position_bias=position_bias,
training=training,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
# Add last layer
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)
return TFBaseModelOutput(
last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
)
@staticmethod
def _relative_position_bucket(relative_position, num_buckets=32, max_distance=128):
ret = 0
n = -relative_position
num_buckets //= 2
ret += tf.cast(tf.math.less(n, 0), dtype=relative_position.dtype) * num_buckets
n = tf.math.abs(n)
# now n is in the range [0, inf)
max_exact = num_buckets // 2
is_small = tf.math.less(n, max_exact)
val_if_large = max_exact + tf.cast(
tf.math.log(n / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact),
dtype=relative_position.dtype,
)
val_if_large = tf.math.minimum(val_if_large, num_buckets - 1)
ret += tf.where(is_small, n, val_if_large)
return ret
def compute_position_bias(self, x, position_ids=None):
"""Compute binned relative position bias"""
input_shape = shape_list(x)
qlen, klen = input_shape[1], input_shape[1]
if position_ids is not None:
context_position = position_ids[:, :, None]
memory_position = position_ids[:, None, :]
else:
context_position = tf.range(qlen)[:, None]
memory_position = tf.range(klen)[None, :]
relative_position = memory_position - context_position # shape (qlen, klen)
rp_bucket = self._relative_position_bucket(
relative_position,
num_buckets=self.relative_attention_num_buckets,
)
values = tf.gather(self.relative_attention_bias, rp_bucket) # shape (qlen, klen, num_heads)
values = tf.expand_dims(tf.transpose(values, [2, 0, 1]), axis=0) # shape (1, num_heads, qlen, klen)
return values
@keras_serializable
class TFMPNetMainLayer(tf.keras.layers.Layer):
config_class = MPNetConfig
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.config = config
self.num_hidden_layers = config.num_hidden_layers
self.initializer_range = config.initializer_range
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
self.return_dict = config.use_return_dict
self.encoder = TFMPNetEncoder(config, name="encoder")
self.pooler = TFMPNetPooler(config, name="pooler")
# The embeddings must be the last declaration in order to follow the weights order
self.embeddings = TFMPNetEmbeddings(config, name="embeddings")
# Copied from transformers.models.bert.modeling_tf_bert.TFBertMainLayer.get_input_embeddings
def get_input_embeddings(self) -> tf.keras.layers.Layer:
return self.embeddings
# Copied from transformers.models.bert.modeling_tf_bert.TFBertMainLayer.set_input_embeddings
def set_input_embeddings(self, value: tf.Variable):
self.embeddings.weight = value
self.embeddings.vocab_size = shape_list(value)[0]
# Copied from transformers.models.bert.modeling_tf_bert.TFBertMainLayer._prune_heads
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
raise NotImplementedError
def call(
self,
input_ids=None,
attention_mask=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
training=False,
**kwargs,
):
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
kwargs_call=kwargs,
)
if inputs["input_ids"] is not None and inputs["inputs_embeds"] is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif inputs["input_ids"] is not None:
input_shape = shape_list(inputs["input_ids"])
elif inputs["inputs_embeds"] is not None:
input_shape = shape_list(inputs["inputs_embeds"])[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if inputs["attention_mask"] is None:
inputs["attention_mask"] = tf.fill(input_shape, 1)
embedding_output = self.embeddings(
inputs["input_ids"],
inputs["position_ids"],
inputs["inputs_embeds"],
training=inputs["training"],
)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
extended_attention_mask = tf.reshape(inputs["attention_mask"], (input_shape[0], 1, 1, input_shape[1]))
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = tf.cast(extended_attention_mask, embedding_output.dtype)
one_cst = tf.constant(1.0, dtype=embedding_output.dtype)
ten_thousand_cst = tf.constant(-10000.0, dtype=embedding_output.dtype)
extended_attention_mask = tf.multiply(tf.subtract(one_cst, extended_attention_mask), ten_thousand_cst)
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
if inputs["head_mask"] is not None:
raise NotImplementedError
else:
inputs["head_mask"] = [None] * self.num_hidden_layers
encoder_outputs = self.encoder(
embedding_output,
extended_attention_mask,
inputs["head_mask"],
inputs["output_attentions"],
inputs["output_hidden_states"],
inputs["return_dict"],
training=inputs["training"],
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output)
if not inputs["return_dict"]:
return (
sequence_output,
pooled_output,
) + encoder_outputs[1:]
return TFBaseModelOutputWithPooling(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
MPNET_START_DOCSTRING = r"""
This model inherits from :class:`~transformers.TFPreTrainedModel`. Check the superclass documentation for the
generic methods the library implements for all its model (such as downloading or saving, resizing the input
embeddings, pruning heads etc.)
This model is also a `tf.keras.Model <https://www.tensorflow.org/api_docs/python/tf/keras/Model>`__ subclass. Use
it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage
and behavior.
.. note::
TF 2.0 models accepts two formats as inputs:
- having all inputs as keyword arguments (like PyTorch models), or
- having all inputs as a list, tuple or dict in the first positional arguments.
This second option is useful when using :meth:`tf.keras.Model.fit` method which currently requires having all
the tensors in the first argument of the model call function: :obj:`model(inputs)`.
If you choose this second option, there are three possibilities you can use to gather all the input Tensor in
the first positional argument :
- a single Tensor with :obj:`input_ids` only and nothing else: :obj:`model(inputs_ids)`
- a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
:obj:`model([input_ids, attention_mask])`
- a dictionary with one or several input Tensors associated to the input names given in the docstring:
:obj:`model({"input_ids": input_ids, "attention_mask": attention_mask})`
Args:
config (:class:`~transformers.MPNetConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
weights.
"""
MPNET_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`~transformers.MPNetTokenizer`. See
:func:`transformers.PreTrainedTokenizer.__call__` and :func:`transformers.PreTrainedTokenizer.encode` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`({0})`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
position_ids (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`({0})`, `optional`):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,
config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`__
head_mask (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (:obj:`tf.Tensor` of shape :obj:`({0}, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
config will be used instead.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
used instead.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. This
argument can be used in eager mode, in graph mode the value will always be set to True.
training (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to use the model in training mode (some modules like dropout modules have different
behaviors between training and evaluation).
"""
@add_start_docstrings(
"The bare MPNet Model transformer outputting raw hidden-states without any specific head on top.",
MPNET_START_DOCSTRING,
)
class TFMPNetModel(TFMPNetPreTrainedModel):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.mpnet = TFMPNetMainLayer(config, name="mpnet")
@add_start_docstrings_to_model_forward(MPNET_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFBaseModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids=None,
attention_mask=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
training=False,
**kwargs,
):
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
kwargs_call=kwargs,
)
outputs = self.mpnet(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
return outputs
# Copied from transformers.models.bert.modeling_tf_bert.TFBertModel.serving_output
def serving_output(self, output: TFBaseModelOutputWithPooling) -> TFBaseModelOutputWithPooling:
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFBaseModelOutputWithPooling(
last_hidden_state=output.last_hidden_state,
pooler_output=output.pooler_output,
hidden_states=hs,
attentions=attns,
)
class TFMPNetLMHead(tf.keras.layers.Layer):
"""MPNet head for masked and permuted language modeling"""
def __init__(self, config, input_embeddings, **kwargs):
super().__init__(**kwargs)
self.vocab_size = config.vocab_size
self.hidden_size = config.hidden_size
self.dense = tf.keras.layers.Dense(
config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
)
self.layer_norm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm")
self.act = get_tf_activation("gelu")
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = input_embeddings
def build(self, input_shape):
self.bias = self.add_weight(shape=(self.vocab_size,), initializer="zeros", trainable=True, name="bias")
super().build(input_shape)
def get_output_embeddings(self):
return self.decoder
def set_output_embeddings(self, value):
self.decoder.weight = value
self.decoder.vocab_size = shape_list(value)[0]
def get_bias(self):
return {"bias": self.bias}
def set_bias(self, value):
self.bias = value["bias"]
self.vocab_size = shape_list(value["bias"])[0]
def call(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.act(hidden_states)
hidden_states = self.layer_norm(hidden_states)
# project back to size of vocabulary with bias
seq_length = shape_list(tensor=hidden_states)[1]
hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.hidden_size])
hidden_states = tf.matmul(a=hidden_states, b=self.decoder.weight, transpose_b=True)
hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.vocab_size])
hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias)
return hidden_states
@add_start_docstrings("""MPNet Model with a `language modeling` head on top. """, MPNET_START_DOCSTRING)
class TFMPNetForMaskedLM(TFMPNetPreTrainedModel, TFMaskedLanguageModelingLoss):
_keys_to_ignore_on_load_missing = [r"pooler"]
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.mpnet = TFMPNetMainLayer(config, name="mpnet")
self.lm_head = TFMPNetLMHead(config, self.mpnet.embeddings, name="lm_head")
def get_lm_head(self):
return self.lm_head
def get_prefix_bias_name(self):
warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning)
return self.name + "/" + self.lm_head.name
@add_start_docstrings_to_model_forward(MPNET_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFMaskedLMOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids=None,
attention_mask=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
labels=None,
training=False,
**kwargs,
):
r"""
labels (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
labels=labels,
training=training,
kwargs_call=kwargs,
)
outputs = self.mpnet(
inputs["input_ids"],
attention_mask=inputs["attention_mask"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
sequence_output = outputs[0]
prediction_scores = self.lm_head(sequence_output)
loss = None if inputs["labels"] is None else self.compute_loss(inputs["labels"], prediction_scores)
if not inputs["return_dict"]:
output = (prediction_scores,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFMaskedLMOutput(
loss=loss,
logits=prediction_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
# Copied from transformers.models.bert.modeling_tf_bert.TFBertForMaskedLM.serving_output
def serving_output(self, output: TFMaskedLMOutput) -> TFMaskedLMOutput:
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFMaskedLMOutput(logits=output.logits, hidden_states=hs, attentions=attns)
class TFMPNetClassificationHead(tf.keras.layers.Layer):
"""Head for sentence-level classification tasks."""
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(
config.hidden_size,
kernel_initializer=get_initializer(config.initializer_range),
activation="tanh",
name="dense",
)
self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)
self.out_proj = tf.keras.layers.Dense(
config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="out_proj"
)
def call(self, features, training=False):
x = features[:, 0, :] # take <s> token (equiv. to [CLS])
x = self.dropout(x, training=training)
x = self.dense(x)
x = self.dropout(x, training=training)
x = self.out_proj(x)
return x
@add_start_docstrings(
"""
MPNet Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled
output) e.g. for GLUE tasks.
""",
MPNET_START_DOCSTRING,
)
class TFMPNetForSequenceClassification(TFMPNetPreTrainedModel, TFSequenceClassificationLoss):
_keys_to_ignore_on_load_missing = [r"pooler"]
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.mpnet = TFMPNetMainLayer(config, name="mpnet")
self.classifier = TFMPNetClassificationHead(config, name="classifier")
@add_start_docstrings_to_model_forward(MPNET_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFSequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids=None,
attention_mask=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
labels=None,
training=False,
**kwargs,
):
r"""
labels (:obj:`tf.Tensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
labels=labels,
training=training,
kwargs_call=kwargs,
)
outputs = self.mpnet(
inputs["input_ids"],
attention_mask=inputs["attention_mask"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
sequence_output = outputs[0]
logits = self.classifier(sequence_output, training=training)
loss = None if inputs["labels"] is None else self.compute_loss(inputs["labels"], logits)
if not inputs["return_dict"]:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
# Copied from transformers.models.bert.modeling_tf_bert.TFBertForSequenceClassification.serving_output
def serving_output(self, output: TFSequenceClassifierOutput) -> TFSequenceClassifierOutput:
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFSequenceClassifierOutput(logits=output.logits, hidden_states=hs, attentions=attns)
@add_start_docstrings(
"""
MPNet Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
softmax) e.g. for RocStories/SWAG tasks.
""",
MPNET_START_DOCSTRING,
)
class TFMPNetForMultipleChoice(TFMPNetPreTrainedModel, TFMultipleChoiceLoss):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.mpnet = TFMPNetMainLayer(config, name="mpnet")
self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)
self.classifier = tf.keras.layers.Dense(
1, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
)
@property
def dummy_inputs(self):
"""
Dummy inputs to build the network.
Returns:
tf.Tensor with dummy inputs
"""
return {"input_ids": tf.constant(MULTIPLE_CHOICE_DUMMY_INPUTS)}
@add_start_docstrings_to_model_forward(MPNET_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFMultipleChoiceModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids=None,
attention_mask=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
labels=None,
training=False,
**kwargs,
):
r"""
labels (:obj:`tf.Tensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the multiple choice classification loss. Indices should be in ``[0, ...,
num_choices]`` where :obj:`num_choices` is the size of the second dimension of the input tensors. (See
:obj:`input_ids` above)
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
labels=labels,
training=training,
kwargs_call=kwargs,
)
if inputs["input_ids"] is not None:
num_choices = shape_list(inputs["input_ids"])[1]
seq_length = shape_list(inputs["input_ids"])[2]
else:
num_choices = shape_list(inputs["inputs_embeds"])[1]
seq_length = shape_list(inputs["inputs_embeds"])[2]
flat_input_ids = tf.reshape(inputs["input_ids"], (-1, seq_length)) if inputs["input_ids"] is not None else None
flat_attention_mask = (
tf.reshape(inputs["attention_mask"], (-1, seq_length)) if inputs["attention_mask"] is not None else None
)
flat_position_ids = (
tf.reshape(inputs["position_ids"], (-1, seq_length)) if inputs["position_ids"] is not None else None
)
flat_inputs_embeds = (
tf.reshape(inputs["inputs_embeds"], (-1, seq_length, shape_list(inputs["inputs_embeds"])[3]))
if inputs["inputs_embeds"] is not None
else None
)
outputs = self.mpnet(
flat_input_ids,
flat_attention_mask,
flat_position_ids,
inputs["head_mask"],
flat_inputs_embeds,
inputs["output_attentions"],
inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output, training=inputs["training"])
logits = self.classifier(pooled_output)
reshaped_logits = tf.reshape(logits, (-1, num_choices))
loss = None if inputs["labels"] is None else self.compute_loss(inputs["labels"], reshaped_logits)
if not inputs["return_dict"]:
output = (reshaped_logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFMultipleChoiceModelOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@tf.function(
input_signature=[
{
"input_ids": tf.TensorSpec((None, None, None), tf.int32, name="input_ids"),
"attention_mask": tf.TensorSpec((None, None, None), tf.int32, name="attention_mask"),
}
]
)
def serving(self, inputs):
output = self.call(inputs)
return self.serving_output(output)
# Copied from transformers.models.bert.modeling_tf_bert.TFBertForMultipleChoice.serving_output
def serving_output(self, output: TFMultipleChoiceModelOutput) -> TFMultipleChoiceModelOutput:
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFMultipleChoiceModelOutput(logits=output.logits, hidden_states=hs, attentions=attns)
@add_start_docstrings(
"""
MPNet Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
Named-Entity-Recognition (NER) tasks.
""",
MPNET_START_DOCSTRING,
)
class TFMPNetForTokenClassification(TFMPNetPreTrainedModel, TFTokenClassificationLoss):
_keys_to_ignore_on_load_missing = [r"pooler"]
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.mpnet = TFMPNetMainLayer(config, name="mpnet")
self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)
self.classifier = tf.keras.layers.Dense(
config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
)
@add_start_docstrings_to_model_forward(MPNET_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFTokenClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids=None,
attention_mask=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
labels=None,
training=False,
**kwargs,
):
r"""
labels (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels -
1]``.
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
labels=labels,
training=training,
kwargs_call=kwargs,
)
outputs = self.mpnet(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output, training=inputs["training"])
logits = self.classifier(sequence_output)
loss = None if inputs["labels"] is None else self.compute_loss(inputs["labels"], logits)
if not inputs["return_dict"]:
output = (logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return TFTokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
# Copied from transformers.models.bert.modeling_tf_bert.TFBertForTokenClassification.serving_output
def serving_output(self, output: TFTokenClassifierOutput) -> TFTokenClassifierOutput:
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFTokenClassifierOutput(logits=output.logits, hidden_states=hs, attentions=attns)
@add_start_docstrings(
"""
MPNet Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
MPNET_START_DOCSTRING,
)
class TFMPNetForQuestionAnswering(TFMPNetPreTrainedModel, TFQuestionAnsweringLoss):
_keys_to_ignore_on_load_missing = [r"pooler"]
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.mpnet = TFMPNetMainLayer(config, name="mpnet")
self.qa_outputs = tf.keras.layers.Dense(
config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs"
)
@add_start_docstrings_to_model_forward(MPNET_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFQuestionAnsweringModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids=None,
attention_mask=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
start_positions=None,
end_positions=None,
training=False,
**kwargs,
):
r"""
start_positions (:obj:`tf.Tensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
end_positions (:obj:`tf.Tensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
start_positions=start_positions,
end_positions=end_positions,
training=training,
kwargs_call=kwargs,
)
outputs = self.mpnet(
inputs["input_ids"],
attention_mask=inputs["attention_mask"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = tf.split(logits, 2, axis=-1)
start_logits = tf.squeeze(start_logits, axis=-1)
end_logits = tf.squeeze(end_logits, axis=-1)
loss = None
if inputs["start_positions"] is not None and inputs["end_positions"] is not None:
labels = {"start_position": inputs["start_positions"]}
labels["end_position"] = inputs["end_positions"]
loss = self.compute_loss(labels, (start_logits, end_logits))
if not inputs["return_dict"]:
output = (start_logits, end_logits) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFQuestionAnsweringModelOutput(
loss=loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
# Copied from transformers.models.bert.modeling_tf_bert.TFBertForQuestionAnswering.serving_output
def serving_output(self, output: TFQuestionAnsweringModelOutput) -> TFQuestionAnsweringModelOutput:
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFQuestionAnsweringModelOutput(
start_logits=output.start_logits, end_logits=output.end_logits, hidden_states=hs, attentions=attns
)
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Import a trackable object from a SavedModel."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
import sys
from tensorflow.core.protobuf import graph_debug_info_pb2
from tensorflow.python.distribute import distribute_utils
from tensorflow.python.distribute import distribution_strategy_context as ds_context
from tensorflow.python.distribute import values_util
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import handle_data_util
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.saved_model import function_deserialization
from tensorflow.python.saved_model import load_options
from tensorflow.python.saved_model import load_v1_in_v2
from tensorflow.python.saved_model import loader_impl
from tensorflow.python.saved_model import nested_structure_coder
from tensorflow.python.saved_model import registration
from tensorflow.python.saved_model import revived_types
from tensorflow.python.saved_model import utils_impl as saved_model_utils
from tensorflow.python.saved_model.pywrap_saved_model import metrics
from tensorflow.python.training.saving import checkpoint_options
from tensorflow.python.training.saving import saveable_object_util
from tensorflow.python.training.tracking import base
from tensorflow.python.training.tracking import data_structures
from tensorflow.python.training.tracking import graph_view
from tensorflow.python.training.tracking import tracking
from tensorflow.python.training.tracking import util
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
# API label for SavedModel metrics.
_LOAD_V2_LABEL = "load_v2"
def _unused_handle():
"""Returns a placeholder as a handle that is not supposed to be accessed."""
error_message = ("Trying to access a placeholder that is not supposed to be "
"executed. This means you are executing a graph generated "
"from the cross-replica context in an in-replica context.")
assert_op = control_flow_ops.Assert(
array_ops.placeholder_with_default(False, shape=()),
[error_message])
with ops.control_dependencies([assert_op]):
return array_ops.placeholder(dtype=dtypes.resource)
class _WrapperFunction(function.ConcreteFunction):
"""A class wraps a concrete function to handle different distributed contexts.
The reason for wrapping a concrete function is because the _captured_inputs
fields used for in-replica context and cross-replica context are different.
When `load()` is called from within a tf.distribute.strategy scope, the
captured inputs are distributed variables. When using these distributed
variables during calling the function, we need different approaches when it is
in-replica and when it is not in-replica. When it is in replica, naturally we
should use the corresponding component of the distributed variable; when it is
not in-replica, calling the function should mean that it is constructing a
graph that is not actually going to be used. A typical use case is when
constructing a functional model. In this case, return a placeholder with a
control dependency to ensure that is never accessed.
"""
def __init__(self, concrete_function):
# Shallow copy the concrete_function
self.__dict__.update(vars(concrete_function))
def _call_flat(self, args, captured_inputs, cancellation_manager=None):
def get_handle(x):
return x.handle if distribute_utils.is_distributed_variable(x) else x
def get_unused_handle(x):
return _unused_handle() if distribute_utils.is_distributed_variable(x) \
else x
if (ds_context.get_replica_context() is not None or
values_util.is_saving_non_distributed()):
# If we're in the replica context or are saving a non-distributed version
# of the model, we resolve the captured variables to the corresponding
# resource handle. In both situation we call var.handle, but it has
# different behavior. In the replica context, var.handle resolves the
# replica local variable handle if the variable is replicated. When saving
# a non-distributed version of the model, var.handle resolves to the
# primary variable handle, since we only save one copy of a replicated
# variable.
captured_inputs = list(map(get_handle, captured_inputs))
else: # cross-replica context
captured_inputs = list(map(get_unused_handle, captured_inputs))
return super(_WrapperFunction, self)._call_flat(args, captured_inputs,
cancellation_manager)
class Loader(object):
"""Helper class to load an object-based SavedModel."""
def __init__(self, object_graph_proto, saved_model_proto, export_dir,
ckpt_options, save_options, filters):
meta_graph = saved_model_proto.meta_graphs[0]
self._asset_file_def = meta_graph.asset_file_def
self._operation_attributes = {
node.name: node.attr for node in meta_graph.graph_def.node}
self._proto = object_graph_proto
self._export_dir = export_dir
self._concrete_functions = (
function_deserialization.load_function_def_library(
meta_graph.graph_def.library, wrapper_function=_WrapperFunction))
self._checkpoint_options = ckpt_options
self._save_options = save_options
# Stores user-defined node_filters argument.
self._node_filters = filters
# Stores map of string paths to integers.
self._node_path_to_id = self._convert_node_paths_to_ints()
self._loaded_nodes = {}
if isinstance(filters, dict):
# If node_filters is a dict, then the values may contain already created
# trackable objects. In this case, create a dictionary mapping node IDs to
# the already created nodes. This dict will be updated in
# `_retrieve_all_filtered_nodes` with tracked dependencies.
for node_path, node in filters.items():
if isinstance(node, tuple):
self._loaded_nodes[self._node_path_to_id[node_path]] = node
else:
self._loaded_nodes[self._node_path_to_id[node_path]] = (node, setattr)
# Get a list of all integer node ids to load, or None if all nodes should be
# loaded. This list includes ids of child nodes.
self._filtered_nodes = self._retrieve_all_filtered_nodes()
self._load_all()
if not save_options.experimental_skip_checkpoint:
self._restore_checkpoint()
for node in self._nodes:
if isinstance(node, tracking.CapturableResource):
init_op = node._initialize() # pylint: disable=protected-access
if not context.executing_eagerly():
ops.add_to_collection(ops.GraphKeys.TABLE_INITIALIZERS, init_op)
def _convert_node_paths_to_ints(self):
"""Maps all string node paths in node_filters to the int node ids."""
if self._node_filters is None:
return None
path_to_int = {}
for node_id in self._node_filters:
int_node_id = None
if isinstance(node_id, str):
node_path = node_id.split(".")
if node_path[0] != "root":
raise ValueError(
"When passing string identifiers to node_filters, the first name"
f" must be root. Received {node_path[0]}.")
int_node_id = 0
for n, name in enumerate(node_path[1:]):
int_node_id = self._find_node_child(
int_node_id, name, ".".join(node_path[:n+2]))
path_to_int[node_id] = int_node_id
else:
raise TypeError("Elements in node_filters must be strings.")
return path_to_int
def _retrieve_all_filtered_nodes(self):
"""Traverses through the object graph to get the IDs of all nodes to load.
As a side-effect, if node_filters is a dictionary that contains already-
created objects, then the dependencies tracked by those objects will be
added to node_filters.
Returns:
List of all nodes to load, or None if all nodes should be loaded.
"""
if self._node_filters is None:
return None # All nodes should be loaded.
all_filtered_nodes = set()
nodes_to_visit = list(self._node_filters)
while nodes_to_visit:
node_path = nodes_to_visit.pop(0)
node_id = self._node_path_to_id[node_path]
if node_id in all_filtered_nodes:
continue
all_filtered_nodes.add(node_id)
node, setter = self._loaded_nodes.get(node_id, (None, None))
if node is not None:
if not isinstance(node, base.Trackable):
raise TypeError(
"Error when processing dictionary values passed to nodes_to_load."
f"Object at {node_path} is expected to be a checkpointable (i.e. "
"'trackable') TensorFlow object (e.g. tf.Variable, tf.Module or "
"Keras layer).")
node._maybe_initialize_trackable() # pylint: disable=protected-access
for reference in self._proto.nodes[node_id].children:
child_object, _ = self._loaded_nodes.get(
reference.node_id, (None, None))
# See if node already tracks the child reference, in which case add the
# child to the loaded_nodes dict.
if child_object is None and node is not None:
child_object = node._lookup_dependency(reference.local_name) # pylint: disable=protected-access
if isinstance(child_object, data_structures.TrackableDataStructure):
# Make setattr a noop to avoid overwriting already existing data
# structures.
setter = lambda *args: None
self._loaded_nodes[reference.node_id] = (child_object, setter)
child_path = "{}.{}".format(node_path, reference.local_name)
self._node_path_to_id[child_path] = reference.node_id
nodes_to_visit.append(child_path)
if 0 in all_filtered_nodes:
return None
return all_filtered_nodes
def _find_node_child(self, node_id, child_name, path):
for reference in self._proto.nodes[node_id].children:
if reference.local_name == child_name:
return reference.node_id
raise ValueError(f"Unable to find node {path}.")
def _load_all(self):
"""Loads all nodes and functions from the SavedModel and their edges."""
self._load_nodes()
self._load_edges()
# TODO(b/124045874): There are limitations with functions whose captures
# trigger other functions to be executed. For now it is only guaranteed to
# work if the captures of a function only trigger functions without
# captures.
self._setup_functions_structures()
self._setup_functions_captures()
self._create_saveable_object_factories()
def _create_saveable_object_factories(self):
for node_id, proto in self._iter_all_nodes():
node = self.get(node_id)
node._self_saveable_object_factories = {} # pylint: disable=protected-access
for name, saveable_object_proto in proto.saveable_objects.items():
node._self_saveable_object_factories[name] = ( # pylint: disable=protected-access
saveable_object_util.restored_saved_object_factory(
self.get(saveable_object_proto.save_function),
self.get(saveable_object_proto.restore_function)))
def _load_edges(self):
"""Adds edges from objects to other objects and functions."""
for node_id, object_proto in self._iter_all_nodes():
self._add_object_graph_edges(object_proto, node_id)
# If root object isn't loaded, then create edges from the root for
# checkpoint compatibility.
if self._filtered_nodes is not None and 0 not in self._filtered_nodes:
root = self.get(0)
for node_path in self._node_filters:
loaded_node = self._nodes[self._node_path_to_id[node_path]]
path = node_path.split(".")
current_node = root
for name in path[1:-1]:
if not hasattr(current_node, name):
setattr(current_node, name, self._recreate_base_user_object()[0])
current_node = getattr(current_node, name)
if not hasattr(current_node, path[-1]):
setattr(current_node, path[-1], loaded_node)
def _add_object_graph_edges(self, proto, node_id):
"""Adds edges from an object to its children."""
obj = self._nodes[node_id]
setter = self._node_setters[node_id]
for reference in proto.children:
setter(obj, reference.local_name, self._nodes[reference.node_id])
# Note: if an object has an attribute `__call__` add a class method
# that allows `obj()` syntax to work. This is done per-instance to
# allow `callable` to be used to find out if an object is callable.
if reference.local_name == "__call__" and not callable(obj):
setattr(type(obj), "__call__", _call_attribute)
def _setup_functions_structures(self):
"""Setup structure for inputs and outputs of restored functions."""
coder = nested_structure_coder.StructureCoder()
for name, proto in sorted(self._proto.concrete_functions.items()):
concrete_function = self._concrete_functions[name]
# By setting the structured_outputs directly, we can rely on this
# function_lib.ConcreteFunction object to perform the output repacking
# logic. The only limitation of that logic is that it only works
# with output that is convertible to Tensors and the conversion
# always happens. For example tf.TensorShape([2, 3]) will be
# converted to Tensor representing [2, 3].
original_outputs = coder.decode_proto(proto.output_signature)
# The original_outputs here had Tensors converted to TensorSpecs, so
# the restored function's structured_outputs field will not be
# exactly the same. Fortunately the repacking logic cares only about
# the structure; and the unpacking logic cares only about structure
# and types.
concrete_function._func_graph.structured_outputs = original_outputs # pylint: disable=protected-access
concrete_function._func_graph.structured_input_signature = ( # pylint: disable=protected-access
coder.decode_proto(proto.canonicalized_input_signature))
concrete_function._initialize_function_spec() # pylint: disable=protected-access
def _setup_functions_captures(self):
"""Setup captures and variables in restored functions."""
concrete_functions = sorted(self._proto.concrete_functions.items())
for name, proto in concrete_functions:
concrete_function = self._concrete_functions[name]
bound_inputs = [
self._get_tensor_from_node(node_id, name)
for node_id in proto.bound_inputs]
bound_variables = [
self._nodes[node_id]
for node_id in proto.bound_inputs
if self._proto.nodes[node_id].WhichOneof("kind") == "variable"
]
# TODO(andresp): This is only injecting the captured inputs into the
# concrete function, note that we did not modify the FuncGraph
# itself.
concrete_function._captured_inputs = bound_inputs # pylint: disable=protected-access
concrete_function._func_graph.variables = bound_variables # pylint: disable=protected-access
if bound_inputs:
for bound_input, internal_capture in zip(
bound_inputs, concrete_function.inputs[-len(bound_inputs):]):
if distribute_utils.is_distributed_variable(bound_input):
concrete_function.graph.capture_distributed_variable(
bound_input, internal_capture)
else:
concrete_function.graph.replace_capture(bound_input,
internal_capture)
if internal_capture.dtype == dtypes.resource:
if resource_variable_ops.is_resource_variable(bound_input):
try:
handle = bound_input.handle
except ValueError:
# For mirrored variables we'll copy handle data for components
# as they get captured.
pass
else:
handle_data_util.copy_handle_data(handle, internal_capture)
else:
handle_data_util.copy_handle_data(bound_input, internal_capture)
# Setting "captures" first means "capture" won't create a new
# placeholder for this input.
concrete_function.graph.capture(bound_input)
def _get_tensor_from_node(self, node_id, fn_name):
"""Resolves a node id into a tensor to be captured for a function."""
if self._node_filters is not None and self._nodes[node_id] is None:
raise ValueError(
f"Error when processing nodes_to_load. Function '{fn_name}' requires "
"inputs/variables that are not loaded when nodes_to_load="
f"{self._node_filters}.")
with ops.init_scope():
obj = self._nodes[node_id]
if distribute_utils.is_distributed_variable(obj):
return obj
elif resource_variable_ops.is_resource_variable(obj):
return obj.handle
elif isinstance(obj, tracking.Asset):
return obj.asset_path
elif tensor_util.is_tf_type(obj):
return obj
elif isinstance(obj, tracking.CapturableResource):
# Note: this executes restored functions in the CapturableResource.
return obj.resource_handle
raise ValueError(f"Cannot convert node {obj} to tensor.")
def _initialize_loaded_nodes(self):
nodes = {}
node_setters = {}
for node_id, (node, setter) in self._loaded_nodes.items():
nodes[node_id] = node
node_setters[node_id] = setter
return nodes, node_setters
def _iter_all_nodes(self):
if self._filtered_nodes is None:
return enumerate(self._proto.nodes)
else:
return [(node_id, self._proto.nodes[node_id])
for node_id in self._filtered_nodes]
def _load_nodes(self):
"""Load all saved objects."""
# `nodes` maps from node ids to recreated objects
# `node_setters` maps from node ids to setter functions
# (same signature as setattr) for setting dependencies.
nodes, node_setters = self._initialize_loaded_nodes()
# Figure out which objects are slot variables. These objects are created
# with Optimizer.add_slot rather than _recreate_variable.
slot_variable_node_ids = set()
for _, proto in self._iter_all_nodes():
for slot_variable_proto in proto.slot_variables:
slot_variable_node_ids.add(slot_variable_proto.slot_variable_node_id)
# Re-create everything except slot variables.
for node_id, proto in self._iter_all_nodes():
if node_id in slot_variable_node_ids or nodes.get(node_id) is not None:
# Defer recreating slot variables so we can use the public Optimizer
# interface.
continue
node, setter = self._recreate(proto, node_id)
nodes[node_id] = node
node_setters[node_id] = setter
# Now that we have created the variables being optimized, we have enough
# information to re-create slot variables for them.
for node_id, proto in self._iter_all_nodes():
optimizer_object = nodes[node_id]
for slot_variable_proto in proto.slot_variables:
optimized_variable = nodes[
slot_variable_proto.original_variable_node_id]
slot_variable = optimizer_object.add_slot(
var=optimized_variable,
slot_name=slot_variable_proto.slot_name)
nodes[slot_variable_proto.slot_variable_node_id] = slot_variable
node_setters[slot_variable_proto.slot_variable_node_id] = setattr
# If root object is not loaded, add a dummy root object for checkpoint
# compatibility.
if 0 not in nodes:
nodes[0] = self._recreate_base_user_object()[0]
self._nodes = [nodes.get(node_id)
for node_id in range(len(self._proto.nodes))]
self._node_setters = node_setters
def _restore_checkpoint(self):
"""Load state from checkpoint into the deserialized objects."""
variables_path = saved_model_utils.get_variables_path(self._export_dir)
# TODO(andresp): Clean use of private methods of TrackableSaver.
# pylint: disable=protected-access
saver = util.TrackableSaver(graph_view.ObjectGraphView(self.get(0)))
with ops.device("CPU"):
saver._file_prefix_placeholder = constant_op.constant(variables_path)
if self._save_options.allow_partial_checkpoint:
load_status = saver.restore(variables_path,
self._checkpoint_options).expect_partial()
load_status.assert_nontrivial_match()
else:
load_status = saver.restore(variables_path, self._checkpoint_options)
load_status.assert_existing_objects_matched()
checkpoint = load_status._checkpoint
if not context.executing_eagerly():
# When running in eager mode, the `restore` call above has already run and
# restored the state of trackables, and calling `position.restore_ops()`
# would re-run the restore. In graph mode, that will return a cached list
# of ops that must run to restore the object on that position. We have to
# wire them in the initializers of the objects so that they get
# initialized properly when using common practices (e.g. the ones used by
# ManagedSession) without further user action.
for object_id, obj in dict(checkpoint.object_by_proto_id).items():
position = base.CheckpointPosition(checkpoint=checkpoint,
proto_id=object_id)
restore_ops = position.restore_ops()
if restore_ops:
if resource_variable_ops.is_resource_variable(obj):
if len(restore_ops) == 1:
obj._initializer_op = restore_ops[0]
else:
obj._initializer_op = control_flow_ops.group(*restore_ops)
elif isinstance(obj, lookup_ops.LookupInterface):
# We don't need to check for eager execution here, since this code
# path should only be taken if we are restoring in graph mode.
ops.add_to_collection(ops.GraphKeys.TABLE_INITIALIZERS, restore_ops)
else:
raise NotImplementedError(
f"Unable to restore state of object {obj} from the checkpoint.")
def adjust_debug_info_func_names(self, debug_info):
"""Rewrite func names in the debug info by using the concrete func names."""
output_debug_info = graph_debug_info_pb2.GraphDebugInfo()
output_debug_info.files[:] = debug_info.files
for key in debug_info.traces:
node, func = key.split("@")
new_func = ""
if func in self._concrete_functions:
new_func = self._concrete_functions[func].function_def.signature.name
output_debug_info.traces[node + "@" + new_func].CopyFrom(
debug_info.traces[key])
return output_debug_info
def get(self, node_id):
if isinstance(node_id, str):
node_id = self._node_path_to_id[node_id]
return self._nodes[node_id]
def _recreate(self, proto, node_id):
"""Creates a Python object from a SavedObject protocol buffer.
Args:
proto: a SavedObject proto
node_id: int, the index of this object in the SavedObjectGraph node list.
Returns:
The recreated object, and the set-attribute function for reconnecting
the trackable children.
"""
registered_class = registration.get_registered_class(proto.registered_name)
if registered_class:
obj = registered_class._deserialize_from_proto( # pylint: disable=protected-access
proto=proto.serialized_user_proto)
return obj, type(obj)._add_trackable_child # pylint: disable=protected-access
else:
return self._recreate_default(proto, node_id)
def _recreate_default(self, proto, node_id):
"""Creates a Python object from a SavedObject protocol buffer."""
factory = {
"user_object": (
lambda: self._recreate_user_object(proto.user_object, node_id)),
"asset": lambda: self._recreate_asset(proto.asset),
"function": lambda: self._recreate_function(proto.function),
"bare_concrete_function": functools.partial(
self._recreate_bare_concrete_function,
proto.bare_concrete_function),
"variable": lambda: self._recreate_variable(proto.variable),
"constant": lambda: self._recreate_constant(proto.constant),
"resource": lambda: self._recreate_resource(proto.resource),
"captured_tensor": functools.partial(
self._get_tensor_from_fn, proto.captured_tensor),
}
kind = proto.WhichOneof("kind")
if kind not in factory:
raise ValueError(f"Unknown SavedObject type: {kind}. Expected one of "
f"{list(factory.keys())}.")
return factory[kind]()
def _recreate_user_object(self, proto, node_id):
"""Instantiates a SavedUserObject."""
looked_up = revived_types.deserialize(proto)
if looked_up is None:
return self._recreate_base_user_object(proto, node_id)
return looked_up
def _recreate_base_user_object(self, proto=None, node_id=None):
del proto, node_id
# Note: each user object has its own class. This allows making each one
# individually callable by adding a `__call__` method to the classes of
# the objects instances that have a `__call__` property.
class _UserObject(tracking.AutoTrackable):
pass
return _UserObject(), setattr
def _recreate_asset(self, proto):
filename = os.path.join(
saved_model_utils.get_assets_dir(self._export_dir),
self._asset_file_def[proto.asset_file_def_index].filename)
asset = tracking.Asset(filename)
if not context.executing_eagerly():
ops.add_to_collection(ops.GraphKeys.ASSET_FILEPATHS, asset.asset_path)
return asset, setattr
def _recreate_function(self, proto):
return function_deserialization.recreate_function(
proto, self._concrete_functions), setattr
def _recreate_bare_concrete_function(self, proto):
return function_deserialization.setup_bare_concrete_function(
proto, self._concrete_functions), setattr
def _recreate_variable(self, proto):
name = proto.name if proto.name else None
if name is not None:
dbg_name = name
else:
dbg_name = "<variable loaded from saved model>"
synchronization, aggregation, trainable = (
variables.validate_synchronization_aggregation_trainable(
proto.synchronization, proto.aggregation, proto.trainable,
name=dbg_name))
def uninitialized_variable_creator(next_creator, **kwargs):
"""A variable creator that creates uninitialized variables."""
del next_creator
return resource_variable_ops.UninitializedVariable(**kwargs)
# Create a variable_creator_scope that creates uninitialized variables with
# a lower priority such that a potential distributed variable_creator_scope
# can take precedence.
with ops.get_default_graph()._variable_creator_scope( # pylint: disable=protected-access
uninitialized_variable_creator,
priority=50):
return variables.Variable(
shape=proto.shape,
dtype=proto.dtype,
name=name,
trainable=trainable,
synchronization=synchronization,
aggregation=aggregation), setattr
def _recreate_constant(self, proto):
tensor_proto = self._operation_attributes[proto.operation]["value"].tensor
ndarray = tensor_util.MakeNdarray(tensor_proto)
if dtypes.as_dtype(tensor_proto.dtype) == dtypes.string:
with ops.device("CPU"):
imported_constant = constant_op.constant(ndarray)
else:
imported_constant = constant_op.constant(ndarray)
return imported_constant, setattr
def _get_tensor_from_fn(self, proto):
outer_graph = self._concrete_functions[proto.concrete_function].graph
captured_tensor = outer_graph.get_tensor_by_name(proto.name)
return captured_tensor, setattr
def _recreate_resource(self, proto):
return _RestoredResource(device=proto.device), _setattr_and_track
# TODO(b/124205571,b/124092991): Solve destruction of resources.
class _RestoredResource(tracking.TrackableResource):
"""Restored SavedResource."""
def __init__(self, device=""):
super(_RestoredResource, self).__init__(device=device)
def _create_resource(self):
raise RuntimeError()
def _initialize(self):
raise RuntimeError()
# _list_functions_for_serialization expects Function objects, but unlike
# _create_resource and _initialize, _destroy_function didn't always exist in
# older TrackableResource implementations, so this default stub must be a
# Function.
@def_function.function
def _destroy_resource(self):
raise RuntimeError()
def _list_functions_for_serialization(self, unused_serialization_cache):
# Overwrite this method to avoid the implementation of
# base class to re-wrap the polymorphic functions into
# another layer of `tf.function`.
functions = {
"_create_resource": self._create_resource,
"_initialize": self._initialize,
"_destroy_resource": self._destroy_resource,
}
return functions
def _call_attribute(instance, *args, **kwargs):
return instance.__call__(*args, **kwargs)
def _setattr_and_track(obj, name, value):
"""Sets new attribute and marks it as a dependency if Trackable."""
setattr(obj, name, value)
if isinstance(value, base.Trackable):
obj._track_trackable(value, name) # pylint:disable=protected-access
@tf_export("__internal__.saved_model.load_partial", v1=[])
def load_partial(export_dir, filters, tags=None, options=None):
"""Partially load a SavedModel (saved from V2).
Similar to `tf.saved_model.load`, but with an additional argument that
lets you specify which nodes to load.
`tf.saved_model.load_partial(export_dir, ["root"])` and
`tf.saved_model.load(export_dir)` are equivalent.
Note: This only works for SavedModels saved with TensorFlow V2 from
`tf.saved_model.save` or Keras. This will not load SavedModels save from
the Estimator API.
In Tensorflow V2, SavedModel stores the **object graph** of the saved object.
The graph contains nodes (`tf.Module`, `tf.Variable`, `tf.function`, Keras
layers, etc.) and edges that are the name of the attributes connecting the
objects.
*Example 1*
```
model = tf.Module()
model.child_layer = tf.Module()
model.child_layer.v = tf.Variable(5.)
tf.saved_model.save(model, '/tmp/model')
loaded = tf.__internal__.saved_model.load_partial(
... '/tmp/model',
... ['root.child_layer', 'root.child_layer.v'])
loaded['root.child_layer'].v.numpy()
5.
loaded['root.child_layer'].v is loaded['root.child_layer.v']
True
*Example 2*
model = tf.Module()
model.child_layer = tf.Module()
model.child_layer.v = tf.Variable(5.)
>>>
tf.saved_model.save(model, '/tmp/model')
# Create a variable
new_variable = tf.Variable(0.)
loaded = tf.__internal__.saved_model.load_partial(
... '/tmp/model',
... {'root.child_layer': None, 'root.child_layer.v': new_variable})
loaded['root.child_layer'].v.numpy()
5.
new_variable.numpy()
5.
```
**Loading under different distribution strategies**
You can load different parts of the model under different distribution
strategies. Note that this is very experimental so use with care.
```
model = tf.Module()
model.layer_1 = tf.Module()
model.layer_1.v = tf.Variable(5.)
model.layer_2 = tf.Module()
model.layer_2.v = tf.Variable(7.)
tf.saved_model.save(model, '/tmp/model')
# Load with no strategy
loaded = tf.__internal__.saved_model.load_partial(
... '/tmp/model',
... ['root.layer_1'])
loaded['root.layer_1'].v
<tf.Variable 'Variable:0' shape=() dtype=float32, numpy=5.0>
strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
... loaded2 = tf.__internal__.saved_model.load_partial(
... '/tmp/model',
... ['root.layer_2'])
loaded2['root.layer_2'].v
MirroredVariable:{
0: <tf.Variable 'Variable:0' shape=() dtype=float32, numpy=7.0>
}
```
Args:
export_dir: The SavedModel directory to load from.
filters: A list or dictionary where each element or key is a string
path to nodes that should be loaded. Node paths consist of all the child
attribute names to reach that node in the form: `root.{attribute_name}`.
The loader will load all of the specified nodes and their recursive
descendants. When this option is defined, the loader will return a
dictionary mapping the node paths to the loaded objects.
tags: A tag or sequence of tags identifying the MetaGraph to load. Optional
if the SavedModel contains a single MetaGraph, as for those exported from
`tf.saved_model.save`.
options: `tf.saved_model.LoadOptions` object that specifies options for
loading.
Returns:
A dictionary mapping node paths from the filter to loaded objects.
"""
return load_internal(export_dir, tags, options, filters=filters)
@tf_export("saved_model.load", v1=["saved_model.load_v2"])
def load(export_dir, tags=None, options=None):
"""Load a SavedModel from `export_dir`.
Signatures associated with the SavedModel are available as functions:
```python
imported = tf.saved_model.load(path)
f = imported.signatures["serving_default"]
print(f(x=tf.constant([[1.]])))
```
Objects exported with `tf.saved_model.save` additionally have trackable
objects and functions assigned to attributes:
```python
exported = tf.train.Checkpoint(v=tf.Variable(3.))
exported.f = tf.function(
lambda x: exported.v * x,
input_signature=[tf.TensorSpec(shape=None, dtype=tf.float32)])
tf.saved_model.save(exported, path)
imported = tf.saved_model.load(path)
assert 3. == imported.v.numpy()
assert 6. == imported.f(x=tf.constant(2.)).numpy()
```
_Loading Keras models_
Keras models are trackable, so they can be saved to SavedModel. The object
returned by `tf.saved_model.load` is not a Keras object (i.e. doesn't have
`.fit`, `.predict`, etc. methods). A few attributes and functions are still
available: `.variables`, `.trainable_variables` and `.__call__`.
```python
model = tf.keras.Model(...)
tf.saved_model.save(model, path)
imported = tf.saved_model.load(path)
outputs = imported(inputs)
```
Use `tf.keras.models.load_model` to restore the Keras model.
_Importing SavedModels from TensorFlow 1.x_
SavedModels from `tf.estimator.Estimator` or 1.x SavedModel APIs have a flat
graph instead of `tf.function` objects. These SavedModels will be loaded with
the following attributes:
* `.signatures`: A dictionary mapping signature names to functions.
* `.prune(feeds, fetches) `: A method which allows you to extract
functions for new subgraphs. This is equivalent to importing the SavedModel
and naming feeds and fetches in a Session from TensorFlow 1.x.
```python
imported = tf.saved_model.load(path_to_v1_saved_model)
pruned = imported.prune("x:0", "out:0")
pruned(tf.ones([]))
```
See `tf.compat.v1.wrap_function` for details.
* `.variables`: A list of imported variables.
* `.graph`: The whole imported graph.
* `.restore(save_path)`: A function that restores variables from a checkpoint
saved from `tf.compat.v1.Saver`.
_Consuming SavedModels asynchronously_
When consuming SavedModels asynchronously (the producer is a separate
process), the SavedModel directory will appear before all files have been
written, and `tf.saved_model.load` will fail if pointed at an incomplete
SavedModel. Rather than checking for the directory, check for
"saved_model_dir/saved_model.pb". This file is written atomically as the last
`tf.saved_model.save` file operation.
Args:
export_dir: The SavedModel directory to load from.
tags: A tag or sequence of tags identifying the MetaGraph to load. Optional
if the SavedModel contains a single MetaGraph, as for those exported from
`tf.saved_model.save`.
options: `tf.saved_model.LoadOptions` object that specifies options for
loading.
Returns:
A trackable object with a `signatures` attribute mapping from signature
keys to functions. If the SavedModel was exported by `tf.saved_model.save`,
it also points to trackable objects, functions, debug info which it has been
saved.
Raises:
ValueError: If `tags` don't match a MetaGraph in the SavedModel.
"""
result = load_internal(export_dir, tags, options)["root"]
return result
def load_internal(export_dir, tags=None, options=None, loader_cls=Loader,
filters=None):
"""Loader implementation."""
options = options or load_options.LoadOptions()
if tags is not None and not isinstance(tags, set):
# Supports e.g. tags=SERVING and tags=[SERVING]. Sets aren't considered
# sequences for nest.flatten, so we put those through as-is.
tags = nest.flatten(tags)
saved_model_proto, debug_info = (
loader_impl.parse_saved_model_with_debug_info(export_dir))
if (len(saved_model_proto.meta_graphs) == 1 and
saved_model_proto.meta_graphs[0].HasField("object_graph_def")):
metrics.IncrementReadApi(_LOAD_V2_LABEL)
meta_graph_def = saved_model_proto.meta_graphs[0]
# tensor_content field contains raw bytes in litle endian format
# which causes problems when loaded on big-endian systems
# requiring byteswap
if sys.byteorder == "big":
saved_model_utils.swap_function_tensor_content(meta_graph_def, "little",
"big")
if (tags is not None
and set(tags) != set(meta_graph_def.meta_info_def.tags)):
raise ValueError(
"Got an incompatible argument to `tags`: {tags}. The SavedModel at "
f"{export_dir} has one MetaGraph with tags "
f"{meta_graph_def.meta_info_def.tags}. You may omit the argument, "
"pass 'None', or pass matching tags.")
object_graph_proto = meta_graph_def.object_graph_def
ckpt_options = checkpoint_options.CheckpointOptions(
experimental_io_device=options.experimental_io_device)
with ops.init_scope():
try:
loader = loader_cls(object_graph_proto, saved_model_proto, export_dir,
ckpt_options, options, filters)
except errors.NotFoundError as err:
raise FileNotFoundError(
str(err) + "\n You may be trying to load on a different device "
"from the computational device. Consider setting the "
"`experimental_io_device` option in `tf.saved_model.LoadOptions` "
"to the io_device such as '/job:localhost'.")
root = loader.get(0)
if isinstance(loader, Loader):
root.graph_debug_info = loader.adjust_debug_info_func_names(debug_info)
root.tensorflow_version = meta_graph_def.meta_info_def.tensorflow_version
root.tensorflow_git_version = (
meta_graph_def.meta_info_def.tensorflow_git_version)
metrics.IncrementRead(write_version="2")
else:
if filters:
raise ValueError("SavedModels saved from Tensorflow 1.x or Estimator (any"
" version) cannot be loaded with node filters.")
with ops.init_scope():
root = load_v1_in_v2.load(export_dir, tags)
root.graph_debug_info = debug_info
if filters:
return {node_id: loader.get(node_id) for node_id in filters}
else:
return {"root": root}
| |
from django.db import models
from datetime import datetime
import traceback
import os
from django.conf import settings
from datasets.models import Dataset
from models.models import ArtmModel
from assessment.models import AssessmentProblem
from django.contrib.auth.models import User
from django.db.models.signals import pre_delete
from django.dispatch import receiver
from shutil import rmtree
from django.contrib import admin
class Research(models.Model):
dataset = models.ForeignKey(Dataset, null=True, blank=True)
model = models.ForeignKey(ArtmModel, null=True, blank=True)
problem = models.ForeignKey(AssessmentProblem, null=True, blank=True)
researcher = models.ForeignKey(User, null=False)
script_name = models.TextField(null=False)
start_time = models.DateTimeField(null=False, default=datetime.now)
finish_time = models.DateTimeField(null=True, blank=True)
sealed = models.BooleanField(default=False)
# 1-running,2-OK,3-errror,4-interrupted, 5-backup
status = models.IntegerField(null=False, default=0)
error_message = models.TextField(null=True, blank=True)
is_private = models.BooleanField(default=False)
def run(self):
with open(self.get_report_file(), "w", encoding="utf-8") as f:
f.write("<html>\n<head><meta charset='utf-8'></head>\n<body>")
f.write("<h1>Research report</h1>\n")
f.write("<p>Research id: %d<br>\n" % self.id)
f.write("Dataset: %s<br>\n" % str(self.dataset))
if self.model:
f.write("Model: %s (id=%d)<br>\n" %
(str(self.model), self.model.id))
if self.problem:
f.write("Assesment problem: %s<br>\n" % str(self.problem))
f.write("Script: %s<br>\n" % self.script_name)
f.write("Researcher: %s<br>\n" % self.researcher.username)
f.write("Research started: %s</p>\n" %
self.start_time.strftime("%d.%m.%y %H:%M:%S"))
f.write("<hr>\n")
script_file_name = os.path.join(
settings.BASE_DIR, "algo", "research", self.script_name)
self.img_counter = 0
try:
with open(script_file_name, "r", encoding="utf-8") as f:
code = compile(f.read(), script_file_name, "exec")
exec(code, {"research": self})
except BaseException:
self.status = 3
self.error_message = traceback.format_exc()
self.finish_time = datetime.now()
self.save()
return
self.finish_time = datetime.now()
self.status = 2
self.save()
with open(self.get_report_file(), "a", encoding="utf-8") as f:
f.write("<hr>\n")
f.write("<p>Research finished: %s</p>\n" %
self.finish_time.strftime("%d.%m.%y %H:%M:%S"))
f.write("</body>\n</html>\n")
def report_html(self, text):
with open(self.get_report_file(), "a", encoding="utf-8") as f:
f.write(text + "\n")
def report(self, text):
with open(self.get_report_file(), "a", encoding="utf-8") as f:
f.write(text + "<br>\n")
def log(self, text):
self.report("[LOG] %s" % text)
def report_p(self, text=""):
with open(self.get_report_file(), "a", encoding="utf-8") as f:
f.write("<p>" + text + "</p>\n")
def gca(self, figsize=None):
self.figure = self.get_figure(figsize=figsize)
return self.figure.gca()
def get_figure(self, figsize=None):
import matplotlib as mpl
mpl.use("Agg")
import matplotlib.pyplot as plt
self.figure = plt.figure(figsize=figsize)
return self.figure
def show_matrix(self, m):
self.gca().imshow(m, interpolation="nearest")
self.report_picture()
def report_picture(
self,
height=400,
width=400,
align='left',
bbox_extra_artists=None,
name=None):
self.img_counter += 1
file_name = str(self.img_counter) + '.png'
eps_file_name = str(self.img_counter) + '.eps'
if name:
eps_file_name = name + ".eps"
self.figure.savefig(
os.path.join(
self.get_pic_folder(),
eps_file_name),
bbox_extra_artists=bbox_extra_artists,
bbox_inches='tight')
self.figure.savefig(
os.path.join(
self.get_pic_folder(),
file_name),
bbox_extra_artists=bbox_extra_artists,
bbox_inches='tight')
self.figure.clf()
with open(self.get_report_file(), "a", encoding="utf-8") as f:
f.write(
("<div align='%s'><a href='pic/%s'>"
"<img src='pic/%s' width='%d' heigth='%d' />"
"</a></div>\n") %
(align, eps_file_name, file_name, width, height))
del self.figure
def latex_table(self, table, format):
nrows = len(table)
ncols = len(table[0])
ans = "\\begin{tabular}{|%s|}\n" % "|".join(
["c" for i in range(ncols)])
for row in table:
ans += "\\hline\n"
for i in range(ncols):
ans += (format % row[i])
if i == ncols - 1:
ans += " \\\\\n"
else:
ans += " & "
ans += "\\hline\n"
ans += "\\end{tabular}\n"
return ans
def report_table(self, table, format="%s"):
with open(self.get_report_file(), "a", encoding="utf-8") as f:
f.write('<table border="1" cellpadding="0" cellspacing="0">\n')
for row in table:
f.write("<tr>\n")
for cell in row:
if format:
f.write("<td>")
f.write(format % cell)
f.write("</td>")
f.write("</tr>\n")
f.write("</table>\n")
self.img_counter += 1
f.write(
"<p><a href='pic/%d.txt'>Table in LaTeX</a></p>" %
self.img_counter)
table_file = os.path.join(self.get_pic_folder(),
str(self.img_counter) + '.txt')
with open(table_file, "w", encoding='utf-8') as f:
f.write(self.latex_table(table, format))
def get_folder(self):
path = os.path.join(settings.DATA_DIR, "research", str(self.id))
if not os.path.exists(path):
os.makedirs(path)
return path
def get_pic_folder(self):
path = os.path.join(settings.DATA_DIR, "research", str(self.id), "pic")
if not os.path.exists(path):
os.makedirs(path)
return path
def get_report_file(self):
return os.path.join(self.get_folder(), "report.html")
def __str__(self):
return "Research %d (%s, %s)" % (
self.id, str(self.dataset), self.script_name)
def duration(self):
if self.finish_time:
dt = self.finish_time - self.start_time
else:
dt = datetime.now() - self.start_time
seconds = dt.seconds
return "{:02}:{:02}".format(seconds // 60, seconds % 60)
def on_start():
# print ("ENTRY POINT 2")
for research in Research.objects.filter(status=1):
research.status = 4
research.save()
@receiver(pre_delete, sender=Research, dispatch_uid='research_delete_signal')
def remove_research_files(sender, instance, using, **kwargs):
if instance.sealed:
backup = Research()
backup.researcher = instance.researcher
backup.status = 5
backup.sealed = True
backup.start_time = instance.start_time
backup.finish_time = instance.finish_time
backup.script_name = instance.script_name
backup.save()
os.rename(
instance.get_folder(), os.path.join(
settings.DATA_DIR, "research", str(
backup.id)))
else:
try:
rmtree(instance.get_folder())
except BaseException:
pass
admin.site.register(Research)
| |
import glob
from itertools import product
import os
import openmc
import openmc.lib
import numpy as np
import pytest
from tests.testing_harness import PyAPITestHarness
TETS_PER_VOXEL = 12
class UnstructuredMeshTest(PyAPITestHarness):
def __init__(self, statepoint_name, model, inputs_true, holes):
super().__init__(statepoint_name, model, inputs_true)
self.holes = holes # holes in the test mesh
def _compare_results(self):
with openmc.StatePoint(self._sp_name) as sp:
# loop over the tallies and get data
for tally in sp.tallies.values():
# find the regular and unstructured meshes
if tally.contains_filter(openmc.MeshFilter):
flt = tally.find_filter(openmc.MeshFilter)
if isinstance(flt.mesh, openmc.RegularMesh):
reg_mesh_data, reg_mesh_std_dev = self.get_mesh_tally_data(tally)
if self.holes:
reg_mesh_data = np.delete(reg_mesh_data, self.holes)
reg_mesh_std_dev = np.delete(reg_mesh_std_dev, self.holes)
else:
umesh_tally = tally
unstructured_data, unstructured_std_dev = self.get_mesh_tally_data(tally, True)
# we expect these results to be the same to within at least ten
# decimal places
decimals = 10 if umesh_tally.estimator == 'collision' else 8
np.testing.assert_array_almost_equal(unstructured_data,
reg_mesh_data,
decimals)
@staticmethod
def get_mesh_tally_data(tally, structured=False):
data = tally.get_reshaped_data(value='mean')
std_dev = tally.get_reshaped_data(value='std_dev')
if structured:
data.shape = (data.size // TETS_PER_VOXEL, TETS_PER_VOXEL)
std_dev.shape = (std_dev.size // TETS_PER_VOXEL, TETS_PER_VOXEL)
else:
data.shape = (data.size, 1)
std_dev.shape = (std_dev.size, 1)
return np.sum(data, axis=1), np.sum(std_dev, axis=1)
def _cleanup(self):
super()._cleanup()
output = glob.glob('tally*.vtk')
output += glob.glob('tally*.e')
for f in output:
if os.path.exists(f):
os.remove(f)
param_values = (['libmesh', 'moab'], # mesh libraries
['collision', 'tracklength'], # estimators
[True, False], # geometry outside of the mesh
[(333, 90, 77), None]) # location of holes in the mesh
test_cases = []
for i, (lib, estimator, ext_geom, holes) in enumerate(product(*param_values)):
test_cases.append({'library' : lib,
'estimator' : estimator,
'external_geom' : ext_geom,
'holes' : holes,
'inputs_true' : 'inputs_true{}.dat'.format(i)})
@pytest.mark.parametrize("test_opts", test_cases)
def test_unstructured_mesh(test_opts):
openmc.reset_auto_ids()
# skip the test if the library is not enabled
if test_opts['library'] == 'moab' and not openmc.lib._dagmc_enabled():
pytest.skip("DAGMC (and MOAB) mesh not enbaled in this build.")
if test_opts['library'] == 'libmesh' and not openmc.lib._libmesh_enabled():
pytest.skip("LibMesh is not enabled in this build.")
# skip the tracklength test for libmesh
if test_opts['library'] == 'libmesh' and \
test_opts['estimator'] == 'tracklength':
pytest.skip("Tracklength tallies are not supported using libmesh.")
### Materials ###
materials = openmc.Materials()
fuel_mat = openmc.Material(name="fuel")
fuel_mat.add_nuclide("U235", 1.0)
fuel_mat.set_density('g/cc', 4.5)
materials.append(fuel_mat)
zirc_mat = openmc.Material(name="zircaloy")
zirc_mat.add_element("Zr", 1.0)
zirc_mat.set_density("g/cc", 5.77)
materials.append(zirc_mat)
water_mat = openmc.Material(name="water")
water_mat.add_nuclide("H1", 2.0)
water_mat.add_nuclide("O16", 1.0)
water_mat.set_density("atom/b-cm", 0.07416)
materials.append(water_mat)
materials.export_to_xml()
### Geometry ###
fuel_min_x = openmc.XPlane(-5.0, name="minimum x")
fuel_max_x = openmc.XPlane(5.0, name="maximum x")
fuel_min_y = openmc.YPlane(-5.0, name="minimum y")
fuel_max_y = openmc.YPlane(5.0, name="maximum y")
fuel_min_z = openmc.ZPlane(-5.0, name="minimum z")
fuel_max_z = openmc.ZPlane(5.0, name="maximum z")
fuel_cell = openmc.Cell(name="fuel")
fuel_cell.region = +fuel_min_x & -fuel_max_x & \
+fuel_min_y & -fuel_max_y & \
+fuel_min_z & -fuel_max_z
fuel_cell.fill = fuel_mat
clad_min_x = openmc.XPlane(-6.0, name="minimum x")
clad_max_x = openmc.XPlane(6.0, name="maximum x")
clad_min_y = openmc.YPlane(-6.0, name="minimum y")
clad_max_y = openmc.YPlane(6.0, name="maximum y")
clad_min_z = openmc.ZPlane(-6.0, name="minimum z")
clad_max_z = openmc.ZPlane(6.0, name="maximum z")
clad_cell = openmc.Cell(name="clad")
clad_cell.region = (-fuel_min_x | +fuel_max_x |
-fuel_min_y | +fuel_max_y |
-fuel_min_z | +fuel_max_z) & \
(+clad_min_x & -clad_max_x &
+clad_min_y & -clad_max_y &
+clad_min_z & -clad_max_z)
clad_cell.fill = zirc_mat
if test_opts['external_geom']:
bounds = (15, 15, 15)
else:
bounds = (10, 10, 10)
water_min_x = openmc.XPlane(x0=-bounds[0],
name="minimum x",
boundary_type='vacuum')
water_max_x = openmc.XPlane(x0=bounds[0],
name="maximum x",
boundary_type='vacuum')
water_min_y = openmc.YPlane(y0=-bounds[1],
name="minimum y",
boundary_type='vacuum')
water_max_y = openmc.YPlane(y0=bounds[1],
name="maximum y",
boundary_type='vacuum')
water_min_z = openmc.ZPlane(z0=-bounds[2],
name="minimum z",
boundary_type='vacuum')
water_max_z = openmc.ZPlane(z0=bounds[2],
name="maximum z",
boundary_type='vacuum')
water_cell = openmc.Cell(name="water")
water_cell.region = (-clad_min_x | +clad_max_x |
-clad_min_y | +clad_max_y |
-clad_min_z | +clad_max_z) & \
(+water_min_x & -water_max_x &
+water_min_y & -water_max_y &
+water_min_z & -water_max_z)
water_cell.fill = water_mat
# create a containing universe
geometry = openmc.Geometry([fuel_cell, clad_cell, water_cell])
### Tallies ###
# create meshes and mesh filters
regular_mesh = openmc.RegularMesh()
regular_mesh.dimension = (10, 10, 10)
regular_mesh.lower_left = (-10.0, -10.0, -10.0)
regular_mesh.upper_right = (10.0, 10.0, 10.0)
regular_mesh_filter = openmc.MeshFilter(mesh=regular_mesh)
if test_opts['holes']:
mesh_filename = "test_mesh_tets_w_holes.e"
else:
mesh_filename = "test_mesh_tets.e"
uscd_mesh = openmc.UnstructuredMesh(mesh_filename, test_opts['library'])
uscd_filter = openmc.MeshFilter(mesh=uscd_mesh)
# create tallies
tallies = openmc.Tallies()
regular_mesh_tally = openmc.Tally(name="regular mesh tally")
regular_mesh_tally.filters = [regular_mesh_filter]
regular_mesh_tally.scores = ['flux']
regular_mesh_tally.estimator = test_opts['estimator']
tallies.append(regular_mesh_tally)
uscd_tally = openmc.Tally(name="unstructured mesh tally")
uscd_tally.filters = [uscd_filter]
uscd_tally.scores = ['flux']
uscd_tally.estimator = test_opts['estimator']
tallies.append(uscd_tally)
### Settings ###
settings = openmc.Settings()
settings.run_mode = 'fixed source'
settings.particles = 1000
settings.batches = 10
# source setup
r = openmc.stats.Uniform(a=0.0, b=0.0)
theta = openmc.stats.Discrete(x=[0.0], p=[1.0])
phi = openmc.stats.Discrete(x=[0.0], p=[1.0])
space = openmc.stats.SphericalIndependent(r, theta, phi)
energy = openmc.stats.Discrete(x=[15.e+06], p=[1.0])
source = openmc.Source(space=space, energy=energy)
settings.source = source
model = openmc.model.Model(geometry=geometry,
materials=materials,
tallies=tallies,
settings=settings)
harness = UnstructuredMeshTest('statepoint.10.h5',
model,
test_opts['inputs_true'],
test_opts['holes'])
harness.main()
| |
import hashlib
import sys
import pprint
from optparse import OptionParser
from xml.etree import ElementTree
class RepoInvestigatorException(Exception):
"""This is our base exception for this script"""
def __init__(self, value):
self.value = value
def __str__(self):
return "%s" % (self.value,)
OAI_NAMESPACE = "{http://www.openarchives.org/OAI/2.0/oai_dc/}"
DC_NAMESPACE = "{http://purl.org/dc/elements/1.1/}"
class Record:
"""Base class for a Dublin Core metadata record in an OAI-PMH
Repository file."""
def __init__(self, elem, options):
self.elem = elem
self.options = options
def get_record_id(self):
try:
record_id = self.elem.find("header/identifier").text
return record_id
except:
raise RepoInvestigatorException("Record does not have a valid Record Identifier")
def get_record_status(self):
return self.elem.find("header").get("status", "active")
def get_elements(self):
out = []
elements = self.elem[1][0].findall(DC_NAMESPACE + self.options.element)
for element in elements:
if element.text:
out.append(element.text.encode("utf-8").strip())
if len(out) == 0:
out = None
self.elements = out
return self.elements
def get_all_data(self):
out = []
for i in self.elem[1][0]:
if i.text:
out.append((i.tag, i.text.encode("utf-8").strip().replace("\n", " ")))
return out
def get_stats(self):
stats = {}
for element in self.elem[1][0]:
stats.setdefault(element.tag, 0)
stats[element.tag] += 1
return stats
def has_element(self):
out = []
elements = self.elem[1][0].findall(DC_NAMESPACE + self.options.element)
for element in elements:
if element.text:
return True
return False
def collect_stats(stats_aggregate, stats):
#increment the record counter
stats_aggregate["record_count"] += 1
for field in stats:
# get the total number of times a field occurs
stats_aggregate["field_info"].setdefault(field, {"field_count": 0})
stats_aggregate["field_info"][field]["field_count"] += 1
# get average of all fields
stats_aggregate["field_info"][field].setdefault("field_count_total", 0)
stats_aggregate["field_info"][field]["field_count_total"] += stats[field]
def create_stats_averages(stats_aggregate):
for field in stats_aggregate["field_info"]:
field_count = stats_aggregate["field_info"][field]["field_count"]
field_count_total = stats_aggregate["field_info"][field]["field_count_total"]
field_count_total_average = (float(field_count_total) / float(stats_aggregate["record_count"]))
stats_aggregate["field_info"][field]["field_count_total_average"] = field_count_total_average
field_count_element_average = (float(field_count_total) / float(field_count))
stats_aggregate["field_info"][field]["field_count_element_average"] = field_count_element_average
return stats_aggregate
def calc_completeness(stats_averages):
completeness = {}
record_count = stats_averages["record_count"]
completeness_total = 0
wwww_total = 0
collection_total = 0
collection_field_to_count = 0
wwww = [
"{http://purl.org/dc/elements/1.1/}creator", # who
"{http://purl.org/dc/elements/1.1/}title", # what
"{http://purl.org/dc/elements/1.1/}identifier", # where
"{http://purl.org/dc/elements/1.1/}date" # when
]
populated_elements = len(stats_averages["field_info"])
for element in sorted(stats_averages["field_info"]):
element_completeness_percent = 0
element_completeness_percent = ((stats_averages["field_info"][element]["field_count"]
/ float(record_count)) * 100)
completeness_total += element_completeness_percent
#gather collection completeness
if element_completeness_percent > 10:
collection_total += element_completeness_percent
collection_field_to_count += 1
#gather wwww completeness
if element in wwww:
wwww_total += element_completeness_percent
completeness["dc_completeness"] = completeness_total / float(15)
completeness["collection_completeness"] = collection_total / float(collection_field_to_count)
completeness["wwww_completeness"] = wwww_total / float(len(wwww))
completeness["average_completeness"] = ((completeness["dc_completeness"] +
completeness["collection_completeness"] +
completeness["wwww_completeness"]) / float(3))
return completeness
def pretty_print_stats(stats_averages):
record_count = stats_averages["record_count"]
#get header length
element_length = 0
for element in stats_averages["field_info"]:
if element_length < len(element):
element_length = len(element)
print "\n\n"
for element in sorted(stats_averages["field_info"]):
percent = (stats_averages["field_info"][element]["field_count"] / float(record_count)) * 100
percentPrint = "=" * (int(percent) / 4)
columnOne = " " * (element_length - len(element)) + element
print "%s: |%-25s| %6s/%s | %3d%% " % (
columnOne,
percentPrint,
stats_averages["field_info"][element]["field_count"],
record_count,
percent
)
print "\n"
completeness = calc_completeness(stats_averages)
for i in ["dc_completeness", "collection_completeness", "wwww_completeness", "average_completeness"]:
print "%23s %f" % (i, completeness[i])
def main():
usage = "usage: %prog [options] <OAI-PMH Repository File"
stats_aggregate = {
"record_count": 0,
"field_info": {}
}
element_stats_aggregate = {}
parser = OptionParser(usage)
parser.add_option("-e", "--element", dest="element",
help="elemnt to print to screen")
parser.add_option("-i", "--id", action="store_true", dest="id", default=False,
help="prepend meta_id to line")
parser.add_option("-s", "--stats", action="store_true", dest="stats", default=False,
help="only print stats for repository")
parser.add_option("-p", "--present", action="store_true", dest="present", default=False,
help="print if there is value of defined element in record")
parser.add_option("-d", "--dump", action="store_true", dest="dump", default=False,
help="Dump all record data to a tab delimited format")
(options, args) = parser.parse_args()
if len(args) == 0:
print usage
exit()
if options.element is None:
options.stats = True
s = 0
for event, elem in ElementTree.iterparse(args[0]):
if elem.tag == "record":
r = Record(elem, options)
record_id = r.get_record_id()
if options.dump is True:
if r.get_record_status() != "deleted":
record_fields = r.get_all_data()
for field_data in record_fields:
print "%s\t%s\t%s" % (record_id, field_data[0], field_data[1].replace("\t", " "))
elem.clear()
continue
if options.stats is False and options.present is False:
#move along if record is deleted
if r.get_record_status() != "deleted" and r.get_elements() is not None:
for i in r.get_elements():
if options.id:
print "\t".join([record_id, i])
else:
print i
if options.stats is False and options.present is True:
if r.get_record_status() != "deleted":
print "%s %s" % (record_id, r.has_element())
if options.stats is True and options.element is None:
if (s % 1000) == 0 and s != 0:
print "%d records processed" % s
s += 1
if r.get_record_status() != "deleted":
collect_stats(stats_aggregate, r.get_stats())
elem.clear()
if options.stats is True and options.element is None and options.dump is False:
stats_averages = create_stats_averages(stats_aggregate)
pretty_print_stats(stats_averages)
if __name__ == "__main__":
main()
| |
# Copyright 2020 Nexenta by DDN, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit tests for NexentaStor5 iSCSI Cinder volume driver
"""
from unittest import mock
import uuid
from oslo_utils import units
from cinder import context
from cinder import db
from cinder.tests.unit.consistencygroup.fake_cgsnapshot import (
fake_cgsnapshot_obj as fake_cgsnapshot)
from cinder.tests.unit.consistencygroup.fake_consistencygroup import (
fake_consistencyobject_obj as fake_cgroup)
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit.fake_snapshot import fake_snapshot_obj as fake_snapshot
from cinder.tests.unit.fake_volume import fake_volume_obj as fake_volume
from cinder.tests.unit import test
from cinder.volume import configuration as conf
from cinder.volume.drivers.nexenta.ns5 import iscsi
from cinder.volume.drivers.nexenta.ns5 import jsonrpc
class TestNexentaISCSIDriver(test.TestCase):
@mock.patch.object(jsonrpc.NefProxy, 'update_lock')
@mock.patch.object(jsonrpc, 'NefRequest')
def setUp(self, nef_request, update_lock):
super(TestNexentaISCSIDriver, self).setUp()
self.ctxt = context.get_admin_context()
self.cfg = mock.Mock(spec=conf.Configuration)
self.cfg.max_over_subscription_ratio = 20.0
self.cfg.volume_backend_name = 'nexenta_iscsi'
self.cfg.nexenta_group_snapshot_template = 'group-snapshot-%s'
self.cfg.nexenta_origin_snapshot_template = 'origin-snapshot-%s'
self.cfg.nexenta_cache_image_template = 'cache-image-%s'
self.cfg.nexenta_cache_snapshot_template = 'cache-snapshot-%s'
self.cfg.nexenta_migration_service_prefix = 'cinder-migration'
self.cfg.nexenta_migration_throttle = 100
self.cfg.nexenta_dataset_description = ''
self.cfg.nexenta_host = '1.1.1.1'
self.cfg.nexenta_user = 'admin'
self.cfg.nexenta_password = 'nexenta'
self.cfg.nexenta_volume = 'cinder'
self.cfg.nexenta_rest_port = 8443
self.cfg.nexenta_use_https = False
self.cfg.nexenta_iscsi_target_portal_port = 3260
self.cfg.nexenta_target_prefix = 'iqn:cinder'
self.cfg.nexenta_target_group_prefix = 'cinder'
# self.cfg.nexenta_ns5_blocksize = 32
self.cfg.nexenta_sparse = True
self.cfg.nexenta_image_cache = True
self.cfg.nexenta_lu_writebackcache_disabled = True
self.cfg.nexenta_dataset_compression = 'on'
self.cfg.nexenta_dataset_dedup = 'off'
self.cfg.reserved_percentage = 20
self.cfg.nexenta_host_group_prefix = 'hg'
self.cfg.nexenta_volume = 'pool'
self.cfg.driver_ssl_cert_verify = False
self.cfg.nexenta_luns_per_target = 20
self.cfg.nexenta_blocksize = 8192
self.cfg.nexenta_iscsi_target_portals = '1.1.1.1:3260,2.2.2.2:3260'
self.cfg.nexenta_iscsi_target_host_group = 'all'
self.cfg.nexenta_rest_address = '1.1.1.1'
self.cfg.nexenta_rest_backoff_factor = 1
self.cfg.nexenta_rest_retry_count = 3
self.cfg.nexenta_rest_connect_timeout = 1
self.cfg.nexenta_rest_read_timeout = 1
self.cfg.nexenta_volume_group = 'vg'
self.cfg.safe_get = self.fake_safe_get
self.drv = iscsi.NexentaISCSIDriver(
configuration=self.cfg)
self.drv.db = db
self.drv.do_setup(self.ctxt)
def fake_safe_get(self, key):
try:
value = getattr(self.cfg, key)
except AttributeError:
value = None
return value
def fake_uuid4():
return uuid.UUID('38d18a48-b791-4046-b523-a84aad966310')
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefProxy.update_lock')
def test_do_setup(self, update_lock):
update_lock.return_value = True
self.assertIsNone(self.drv.do_setup(self.ctxt))
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefServices.get')
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefVolumeGroups.create')
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefVolumeGroups.get')
def test_check_for_setup_error(self, volume_group_get,
volume_group_create,
service_get):
path = self.drv.root_path
bs = self.cfg.nexenta_ns5_blocksize * units.Ki
name = 'iscsit'
state = 'online'
volume_group_get.return_value = {'path': path}
service_get.return_value = {'name': name, 'state': state}
self.assertIsNone(self.drv.check_for_setup_error())
volume_group_get.assert_called_with(path)
service_get.assert_called_with(name)
volume_group_get.side_effect = jsonrpc.NefException({
'message': 'Failed to open dataset',
'code': 'ENOENT'
})
volume_group_create.return_value = {}
self.assertIsNone(self.drv.check_for_setup_error())
volume_group_get.assert_called_with(path)
payload = {'path': path, 'volumeBlockSize': bs}
volume_group_create.assert_called_with(payload)
service_get.assert_called_with(name)
state = 'offline'
volume_group_get.return_value = {'path': path}
service_get.return_value = {'name': name, 'state': state}
self.assertRaises(jsonrpc.NefException,
self.drv.check_for_setup_error)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefVolumes.create')
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'iscsi.NexentaISCSIDriver._get_vendor_properties')
def test_create_volume(self, get_vendor_properties, create_volume):
volume = fake_volume(self.ctxt)
spec = {
'sparseVolume': True,
'volumeBlockSize': 32768,
'compressionMode': 'on'
}
get_vendor_properties.return_value = spec
create_volume.return_value = {}
self.assertIsNone(self.drv.create_volume(volume))
path = self.drv._get_volume_path(volume)
size = volume['size'] * units.Gi
payload = {
'path': path,
'volumeSize': size
}
payload.update(spec)
create_volume.assert_called_with(payload)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefVolumes.delete')
def test_delete_volume(self, delete_volume):
volume = fake_volume(self.ctxt)
self.assertIsNone(self.drv.delete_volume(volume))
path = self.drv._get_volume_path(volume)
payload = {'snapshots': True}
delete_volume.assert_called_with(path, payload)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefVolumes.set')
def test_extend_volume(self, extend_volume):
volume = fake_volume(self.ctxt)
size = volume['size'] * 2
self.assertIsNone(self.drv.extend_volume(volume, size))
path = self.drv._get_volume_path(volume)
size = size * units.Gi
payload = {'volumeSize': size}
extend_volume.assert_called_with(path, payload)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefSnapshots.delete')
def test_delete_snapshot(self, delete_snapshot):
volume = fake_volume(self.ctxt)
snapshot = fake_snapshot(self.ctxt)
snapshot.volume = volume
delete_snapshot.return_value = {}
self.assertIsNone(self.drv.delete_snapshot(snapshot))
path = self.drv._get_snapshot_path(snapshot)
payload = {'defer': True}
delete_snapshot.assert_called_with(path, payload)
def test_snapshot_revert_use_temp_snapshot(self):
result = self.drv.snapshot_revert_use_temp_snapshot()
expected = False
self.assertEqual(expected, result)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefVolumes.rollback')
def test_revert_to_snapshot(self, rollback_volume):
volume = fake_volume(self.ctxt)
snapshot = fake_snapshot(self.ctxt)
snapshot.volume = volume
rollback_volume.return_value = {}
self.assertIsNone(
self.drv.revert_to_snapshot(self.ctxt, volume, snapshot)
)
path = self.drv._get_volume_path(volume)
payload = {'snapshot': snapshot['name']}
rollback_volume.assert_called_with(path, payload)
@mock.patch('cinder.volume.drivers.nexenta.ns5.iscsi.'
'NexentaISCSIDriver.delete_snapshot')
@mock.patch('cinder.volume.drivers.nexenta.ns5.iscsi.'
'NexentaISCSIDriver.create_volume_from_snapshot')
@mock.patch('cinder.volume.drivers.nexenta.ns5.iscsi.'
'NexentaISCSIDriver.create_snapshot')
def test_create_cloned_volume(self, create_snapshot, create_volume,
delete_snapshot):
volume = fake_volume(self.ctxt)
clone_spec = {'id': fake.VOLUME2_ID}
clone = fake_volume(self.ctxt, **clone_spec)
create_snapshot.return_value = {}
create_volume.return_value = {}
delete_snapshot.return_value = {}
self.assertIsNone(self.drv.create_cloned_volume(clone, volume))
snapshot = {
'name': self.drv.origin_snapshot_template % clone['id'],
'volume_id': volume['id'],
'volume_name': volume['name'],
'volume_size': volume['size']
}
create_snapshot.assert_called_with(snapshot)
create_volume.assert_called_with(clone, snapshot)
create_volume.side_effect = jsonrpc.NefException({
'message': 'Failed to create volume',
'code': 'EBUSY'
})
self.assertRaises(jsonrpc.NefException,
self.drv.create_cloned_volume,
clone, volume)
create_snapshot.side_effect = jsonrpc.NefException({
'message': 'Failed to open dataset',
'code': 'ENOENT'
})
self.assertRaises(jsonrpc.NefException,
self.drv.create_cloned_volume,
clone, volume)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefSnapshots.create')
def test_create_snapshot(self, create_snapshot):
volume = fake_volume(self.ctxt)
snapshot = fake_snapshot(self.ctxt)
snapshot.volume = volume
create_snapshot.return_value = {}
self.assertIsNone(self.drv.create_snapshot(snapshot))
path = self.drv._get_snapshot_path(snapshot)
payload = {'path': path}
create_snapshot.assert_called_with(payload)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'iscsi.NexentaISCSIDriver.extend_volume')
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefSnapshots.clone')
def test_create_volume_from_snapshot(self, clone_snapshot,
extend_volume):
volume = fake_volume(self.ctxt)
snapshot = fake_snapshot(self.ctxt)
snapshot.volume = volume
clone_size = 10
clone_spec = {
'id': fake.VOLUME2_ID,
'size': clone_size
}
clone = fake_volume(self.ctxt, **clone_spec)
snapshot_path = self.drv._get_snapshot_path(snapshot)
clone_path = self.drv._get_volume_path(clone)
clone_snapshot.return_value = {}
extend_volume.return_value = None
self.assertIsNone(
self.drv.create_volume_from_snapshot(clone, snapshot)
)
clone_payload = {'targetPath': clone_path}
clone_snapshot.assert_called_with(snapshot_path, clone_payload)
extend_volume.assert_called_with(clone, clone_size)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefLunMappings.list')
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'iscsi.NexentaISCSIDriver._create_target_group')
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'iscsi.NexentaISCSIDriver._create_target')
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'iscsi.NexentaISCSIDriver._target_group_props')
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'iscsi.NexentaISCSIDriver._get_host_portals')
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'iscsi.NexentaISCSIDriver._get_host_group')
@mock.patch('uuid.uuid4', fake_uuid4)
def test_initialize_connection(self, get_host_group, get_host_portals,
get_target_group_props, create_target,
create_target_group, list_mappings):
volume = fake_volume(self.ctxt)
host_iqn = 'iqn:cinder-client'
target_iqn = 'iqn:cinder-target'
connector = {'initiator': host_iqn, 'multipath': True}
host_group = 'cinder-host-group'
target_group = 'cinder-target-group'
target_portals = self.cfg.nexenta_iscsi_target_portals.split(',')
get_host_group.return_value = host_group
get_host_portals.return_value = {
target_iqn: target_portals
}
list_mappings.return_value = [{
'id': '309F9B9013CF627A00000000',
'lun': 0,
'hostGroup': host_group,
'targetGroup': target_group
}]
get_target_group_props.return_value = {
target_iqn: target_portals
}
create_target.return_value = {}
create_target_group.return_value = {}
result = self.drv.initialize_connection(volume, connector)
expected = {
'driver_volume_type': 'iscsi',
'data': {
'discard': True,
'target_discovered': False,
'encrypted': False,
'qos_specs': None,
'target_luns': [0] * len(target_portals),
'access_mode': 'rw',
'volume_id': volume['id'],
'target_portals': target_portals,
'target_iqns': [target_iqn] * len(target_portals)
}
}
self.assertEqual(expected, result)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefLunMappings.delete')
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefLunMappings.list')
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'iscsi.NexentaISCSIDriver._get_host_group')
def test_terminate_connection(self, get_host_group,
list_mappings, delete_mapping):
volume = fake_volume(self.ctxt)
host_group = 'cinder-host-group'
target_group = 'cinder-target-group'
connector = {'initiator': 'iqn:test'}
get_host_group.return_value = host_group
list_mappings.return_value = [{
'id': '309F9B9013CF627A00000000',
'lun': 0,
'hostGroup': host_group,
'targetGroup': target_group
}]
delete_mapping.return_value = {}
expected = {'driver_volume_type': 'iscsi', 'data': {}}
result = self.drv.terminate_connection(volume, connector)
self.assertEqual(expected, result)
def test_create_export(self):
volume = fake_volume(self.ctxt)
connector = {'initiator': 'iqn:test'}
self.assertIsNone(
self.drv.create_export(self.ctxt, volume, connector)
)
def test_ensure_export(self):
volume = fake_volume(self.ctxt)
self.assertIsNone(
self.drv.ensure_export(self.ctxt, volume)
)
def test_remove_export(self):
volume = fake_volume(self.ctxt)
self.assertIsNone(
self.drv.remove_export(self.ctxt, volume)
)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefVolumeGroups.get')
def test_get_volume_stats(self, get_volume_group):
available = 100
used = 75
get_volume_group.return_value = {
'bytesAvailable': available * units.Gi,
'bytesUsed': used * units.Gi
}
result = self.drv.get_volume_stats(True)
payload = {'fields': 'bytesAvailable,bytesUsed'}
get_volume_group.assert_called_with(self.drv.root_path, payload)
self.assertEqual(self.drv._stats, result)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefVolumeGroups.get')
def test_update_volume_stats(self, get_volume_group):
available = 8
used = 2
get_volume_group.return_value = {
'bytesAvailable': available * units.Gi,
'bytesUsed': used * units.Gi
}
location_info = '%(driver)s:%(host)s:%(pool)s/%(group)s' % {
'driver': self.drv.__class__.__name__,
'host': self.cfg.nexenta_host,
'pool': self.cfg.nexenta_volume,
'group': self.cfg.nexenta_volume_group
}
description = '%(product)s %(host)s:%(pool)s/%(group)s' % {
'product': self.drv.product_name,
'host': self.cfg.nexenta_host,
'pool': self.cfg.nexenta_volume,
'group': self.cfg.nexenta_volume_group
}
display_name = 'Capabilities of %(product)s %(proto)s driver' % {
'product': self.drv.product_name,
'proto': self.drv.storage_protocol
}
max_over_subscription_ratio = self.cfg.max_over_subscription_ratio
visibility = 'public'
if self.cfg.nexenta_use_https:
nef_scheme = 'https'
else:
nef_scheme = 'http'
nef_url = self.drv.nef.url()
expected = {
'vendor_name': 'Nexenta',
'description': description,
'display_name': display_name,
'driver_version': self.drv.VERSION,
'storage_protocol': self.drv.storage_protocol,
'pool_name': self.drv.pool,
'visibility': visibility,
'allocated_capacity_gb': used,
'total_capacity_gb': used + available,
'total_volumes': 0,
'free_capacity_gb': available,
'provisioned_capacity_gb': 0,
'reserved_percentage': self.cfg.reserved_percentage,
'max_over_subscription_ratio': max_over_subscription_ratio,
'thick_provisioning_support': True,
'thin_provisioning_support': True,
'sparse_copy_volume': True,
'online_extend_support': True,
'QoS_support': False,
'multiattach': True,
'consistencygroup_support': True,
'consistent_group_snapshot_enabled': True,
'volume_backend_name': self.cfg.volume_backend_name,
'location_info': location_info,
'nef_scheme': nef_scheme,
'nef_hosts': self.cfg.nexenta_rest_address,
'nef_port': self.cfg.nexenta_rest_port,
'nef_url': nef_url
}
self.assertIsNone(self.drv._update_volume_stats())
self.assertEqual(expected, self.drv._stats)
def test__get_volume_path(self):
volume = fake_volume(self.ctxt)
result = self.drv._get_volume_path(volume)
expected = '%s/%s/%s' % (self.cfg.nexenta_volume,
self.cfg.nexenta_volume_group,
volume['name'])
self.assertEqual(expected, result)
def test__get_snapshot_path(self):
volume = fake_volume(self.ctxt)
snapshot = fake_snapshot(self.ctxt)
snapshot.volume = volume
result = self.drv._get_snapshot_path(snapshot)
expected = '%s/%s/%s@%s' % (self.cfg.nexenta_volume,
self.cfg.nexenta_volume_group,
snapshot['volume_name'],
snapshot['name'])
self.assertEqual(expected, result)
def test__get_target_group_name(self):
target_iqn = '%s-test' % self.cfg.nexenta_target_prefix
result = self.drv._get_target_group_name(target_iqn)
expected = '%s-test' % self.cfg.nexenta_target_group_prefix
self.assertEqual(expected, result)
def test__get_target_name(self):
target_group = '%s-test' % self.cfg.nexenta_target_group_prefix
result = self.drv._get_target_name(target_group)
expected = '%s-test' % self.cfg.nexenta_target_prefix
self.assertEqual(expected, result)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefNetAddresses.list')
def test__get_host_addresses(self, list_addresses):
expected = ['1.1.1.1', '2.2.2.2', '3.3.3.3']
return_value = []
for address in expected:
return_value.append({
'addressType': 'static',
'address': '%s/24' % address
})
list_addresses.return_value = return_value
result = self.drv._get_host_addresses()
self.assertEqual(expected, result)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'iscsi.NexentaISCSIDriver._get_host_addresses')
def test__get_host_portals(self, list_addresses):
list_addresses.return_value = ['1.1.1.1', '2.2.2.2', '3.3.3.3']
expected = ['1.1.1.1:3260', '2.2.2.2:3260']
result = self.drv._get_host_portals()
self.assertEqual(expected, result)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefTargets.list')
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefTargetsGroups.list')
def test__target_group_props(self, list_target_groups, list_targets):
host_portals = ['1.1.1.1:3260', '2.2.2.2:3260']
target_group = 'cinder-test'
list_target_groups.return_value = [{
'name': target_group,
'members': [
'iqn:cinder-test'
]
}]
list_targets.return_value = [{
'name': 'iqn:cinder-test',
'portals': [
{
'address': '1.1.1.1',
'port': 3260
},
{
'address': '2.2.2.2',
'port': 3260
}
]
}]
expected = {'iqn:cinder-test': host_portals}
result = self.drv._target_group_props(target_group, host_portals)
self.assertEqual(expected, result)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefTargetsGroups.create')
def test__create_target_group(self, create_target_group):
name = 'name'
members = ['a', 'b', 'c']
create_target_group.return_value = {}
self.assertIsNone(self.drv._create_target_group(name, members))
payload = {'name': name, 'members': members}
create_target_group.assert_called_with(payload)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefTargetsGroups.set')
def test__update_target_group(self, update_target_group):
name = 'name'
members = ['a', 'b', 'c']
update_target_group.return_value = {}
self.assertIsNone(self.drv._update_target_group(name, members))
payload = {'members': members}
update_target_group.assert_called_with(name, payload)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefLunMappings.delete')
def test__delete_lun_mapping(self, delete_mapping):
name = 'name'
delete_mapping.return_value = {}
self.assertIsNone(self.drv._delete_lun_mapping(name))
delete_mapping.assert_called_with(name)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefTargets.create')
def test__create_target(self, create_target):
name = 'name'
portals = ['1.1.1.1:3260', '2.2.2.2:3260']
create_target.return_value = {}
self.assertIsNone(self.drv._create_target(name, portals))
payload = {
'name': name,
'portals': [
{
'address': '1.1.1.1',
'port': 3260
},
{
'address': '2.2.2.2',
'port': 3260
}
]
}
create_target.assert_called_with(payload)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefHostGroups.list')
def test__get_host_group(self, get_hostgroup):
member = 'member1'
get_hostgroup.return_value = [
{
'name': 'name1',
'members': [
'member1',
'member2',
'member3'
]
},
{
'name': 'name2',
'members': [
'member4',
'member5',
'member6'
]
}
]
expected = 'name1'
result = self.drv._get_host_group(member)
self.assertEqual(expected, result)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefHostGroups.create')
def test__create_host_group(self, create_host_group):
name = 'name'
members = ['a', 'b', 'c']
create_host_group.return_value = {}
self.assertIsNone(self.drv._create_host_group(name, members))
payload = {'name': name, 'members': members}
create_host_group.assert_called_with(payload)
def test__s2d(self):
portals = ['1.1.1.1:3260', '2.2.2.2:3260']
expected = [
{
'address': '1.1.1.1',
'port': 3260
},
{
'address': '2.2.2.2',
'port': 3260
}
]
result = self.drv._s2d(portals)
self.assertEqual(expected, result)
def test__d2s(self):
portals = [
{
'address': '1.1.1.1',
'port': 3260
},
{
'address': '2.2.2.2',
'port': 3260
}
]
expected = ['1.1.1.1:3260', '2.2.2.2:3260']
result = self.drv._d2s(portals)
self.assertEqual(expected, result)
def test_create_consistencygroup(self):
cgroup = fake_cgroup(self.ctxt)
result = self.drv.create_consistencygroup(self.ctxt, cgroup)
expected = {}
self.assertEqual(expected, result)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'iscsi.NexentaISCSIDriver.delete_volume')
def test_delete_consistencygroup(self, delete_volume):
cgroup = fake_cgroup(self.ctxt)
volume1 = fake_volume(self.ctxt)
volume2_spec = {'id': fake.VOLUME2_ID}
volume2 = fake_volume(self.ctxt, **volume2_spec)
volumes = [volume1, volume2]
delete_volume.return_value = {}
result = self.drv.delete_consistencygroup(self.ctxt,
cgroup,
volumes)
expected = ({}, [])
self.assertEqual(expected, result)
def test_update_consistencygroup(self):
cgroup = fake_cgroup(self.ctxt)
volume1 = fake_volume(self.ctxt)
volume2_spec = {'id': fake.VOLUME2_ID}
volume2 = fake_volume(self.ctxt, **volume2_spec)
volume3_spec = {'id': fake.VOLUME3_ID}
volume3 = fake_volume(self.ctxt, **volume3_spec)
volume4_spec = {'id': fake.VOLUME4_ID}
volume4 = fake_volume(self.ctxt, **volume4_spec)
add_volumes = [volume1, volume2]
remove_volumes = [volume3, volume4]
result = self.drv.update_consistencygroup(self.ctxt,
cgroup,
add_volumes,
remove_volumes)
expected = ({}, [], [])
self.assertEqual(expected, result)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefSnapshots.delete')
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefSnapshots.rename')
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefSnapshots.create')
def test_create_cgsnapshot(self, create_snapshot,
rename_snapshot,
delete_snapshot):
cgsnapshot = fake_cgsnapshot(self.ctxt)
volume = fake_volume(self.ctxt)
snapshot = fake_snapshot(self.ctxt)
snapshot.volume = volume
snapshots = [snapshot]
cgsnapshot_name = (
self.cfg.nexenta_group_snapshot_template % cgsnapshot['id'])
cgsnapshot_path = '%s@%s' % (self.drv.root_path, cgsnapshot_name)
snapshot_path = '%s/%s@%s' % (self.drv.root_path,
snapshot['volume_name'],
cgsnapshot_name)
create_snapshot.return_value = {}
rename_snapshot.return_value = {}
delete_snapshot.return_value = {}
result = self.drv.create_cgsnapshot(self.ctxt,
cgsnapshot,
snapshots)
create_payload = {'path': cgsnapshot_path, 'recursive': True}
create_snapshot.assert_called_with(create_payload)
rename_payload = {'newName': snapshot['name']}
rename_snapshot.assert_called_with(snapshot_path, rename_payload)
delete_payload = {'defer': True, 'recursive': True}
delete_snapshot.assert_called_with(cgsnapshot_path, delete_payload)
expected = ({}, [])
self.assertEqual(expected, result)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'iscsi.NexentaISCSIDriver.delete_snapshot')
def test_delete_cgsnapshot(self, delete_snapshot):
cgsnapshot = fake_cgsnapshot(self.ctxt)
volume = fake_volume(self.ctxt)
snapshot = fake_snapshot(self.ctxt)
snapshot.volume = volume
snapshots = [snapshot]
delete_snapshot.return_value = {}
result = self.drv.delete_cgsnapshot(self.ctxt,
cgsnapshot,
snapshots)
delete_snapshot.assert_called_with(snapshot)
expected = ({}, [])
self.assertEqual(expected, result)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'iscsi.NexentaISCSIDriver.create_volume_from_snapshot')
def test_create_consistencygroup_from_src_snapshots(self, create_volume):
cgroup = fake_cgroup(self.ctxt)
cgsnapshot = fake_cgsnapshot(self.ctxt)
volume = fake_volume(self.ctxt)
snapshot = fake_snapshot(self.ctxt)
snapshot.volume = volume
snapshots = [snapshot]
clone_spec = {'id': fake.VOLUME2_ID}
clone = fake_volume(self.ctxt, **clone_spec)
clones = [clone]
create_volume.return_value = {}
result = self.drv.create_consistencygroup_from_src(self.ctxt, cgroup,
clones, cgsnapshot,
snapshots, None,
None)
create_volume.assert_called_with(clone, snapshot)
expected = ({}, [])
self.assertEqual(expected, result)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefSnapshots.delete')
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'iscsi.NexentaISCSIDriver.create_volume_from_snapshot')
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefSnapshots.create')
def test_create_consistencygroup_from_src_volumes(self,
create_snapshot,
create_volume,
delete_snapshot):
src_cgroup = fake_cgroup(self.ctxt)
dst_cgroup_spec = {'id': fake.CONSISTENCY_GROUP2_ID}
dst_cgroup = fake_cgroup(self.ctxt, **dst_cgroup_spec)
src_volume = fake_volume(self.ctxt)
src_volumes = [src_volume]
dst_volume_spec = {'id': fake.VOLUME2_ID}
dst_volume = fake_volume(self.ctxt, **dst_volume_spec)
dst_volumes = [dst_volume]
create_snapshot.return_value = {}
create_volume.return_value = {}
delete_snapshot.return_value = {}
result = self.drv.create_consistencygroup_from_src(self.ctxt,
dst_cgroup,
dst_volumes,
None, None,
src_cgroup,
src_volumes)
snapshot_name = (
self.cfg.nexenta_origin_snapshot_template % dst_cgroup['id'])
snapshot_path = '%s@%s' % (self.drv.root_path, snapshot_name)
create_payload = {'path': snapshot_path, 'recursive': True}
create_snapshot.assert_called_with(create_payload)
snapshot = {
'name': snapshot_name,
'volume_id': src_volume['id'],
'volume_name': src_volume['name'],
'volume_size': src_volume['size']
}
create_volume.assert_called_with(dst_volume, snapshot)
delete_payload = {'defer': True, 'recursive': True}
delete_snapshot.assert_called_with(snapshot_path, delete_payload)
expected = ({}, [])
self.assertEqual(expected, result)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefVolumes.list')
def test__get_existing_volume(self, list_volumes):
volume = fake_volume(self.ctxt)
parent = self.drv.root_path
name = volume['name']
size = volume['size']
path = self.drv._get_volume_path(volume)
list_volumes.return_value = [{
'name': name,
'path': path,
'volumeSize': size * units.Gi
}]
result = self.drv._get_existing_volume({'source-name': name})
payload = {
'parent': parent,
'fields': 'name,path,volumeSize',
'name': name
}
list_volumes.assert_called_with(payload)
expected = {
'name': name,
'path': path,
'size': size
}
self.assertEqual(expected, result)
def test__check_already_managed_snapshot(self):
volume = fake_volume(self.ctxt)
snapshot = fake_snapshot(self.ctxt)
snapshot.volume = volume
result = self.drv._check_already_managed_snapshot(snapshot)
expected = False
self.assertEqual(expected, result)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefSnapshots.list')
def test__get_existing_snapshot(self, list_snapshots):
volume = fake_volume(self.ctxt)
snapshot = fake_snapshot(self.ctxt)
snapshot.volume = volume
name = snapshot['name']
path = self.drv._get_snapshot_path(snapshot)
parent = self.drv._get_volume_path(volume)
list_snapshots.return_value = [{
'name': name,
'path': path
}]
payload = {'source-name': name}
result = self.drv._get_existing_snapshot(snapshot, payload)
payload = {
'parent': parent,
'fields': 'name,path',
'recursive': False,
'name': name
}
list_snapshots.assert_called_with(payload)
expected = {
'name': name,
'path': path,
'volume_name': volume['name'],
'volume_size': volume['size']
}
self.assertEqual(expected, result)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefVolumes.rename')
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefLunMappings.list')
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'iscsi.NexentaISCSIDriver._get_existing_volume')
def test_manage_existing(self, get_existing_volume,
list_mappings, rename_volume):
existing_volume = fake_volume(self.ctxt)
manage_volume_spec = {'id': fake.VOLUME2_ID}
manage_volume = fake_volume(self.ctxt, **manage_volume_spec)
existing_name = existing_volume['name']
existing_path = self.drv._get_volume_path(existing_volume)
existing_size = existing_volume['size']
manage_path = self.drv._get_volume_path(manage_volume)
get_existing_volume.return_value = {
'name': existing_name,
'path': existing_path,
'size': existing_size
}
list_mappings.return_value = []
payload = {'source-name': existing_name}
self.assertIsNone(self.drv.manage_existing(manage_volume, payload))
get_existing_volume.assert_called_with(payload)
payload = {'volume': existing_path}
list_mappings.assert_called_with(payload)
payload = {'newPath': manage_path}
rename_volume.assert_called_with(existing_path, payload)
@mock.patch('cinder.volume.drivers.nexenta.ns5.iscsi.'
'NexentaISCSIDriver._get_existing_volume')
def test_manage_existing_get_size(self, get_volume):
volume = fake_volume(self.ctxt)
name = volume['name']
size = volume['size']
path = self.drv._get_volume_path(volume)
get_volume.return_value = {
'name': name,
'path': path,
'size': size
}
payload = {'source-name': name}
result = self.drv.manage_existing_get_size(volume, payload)
expected = size
self.assertEqual(expected, result)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefVolumes.list')
def test_get_manageable_volumes(self, list_volumes):
volume = fake_volume(self.ctxt)
volumes = [volume]
name = volume['name']
size = volume['size']
path = self.drv._get_volume_path(volume)
guid = 12345
parent = self.drv.root_path
list_volumes.return_value = [{
'name': name,
'path': path,
'guid': guid,
'volumeSize': size * units.Gi
}]
result = self.drv.get_manageable_volumes(volumes, None, 1,
0, 'size', 'asc')
payload = {
'parent': parent,
'fields': 'name,guid,path,volumeSize',
'recursive': False
}
list_volumes.assert_called_with(payload)
expected = [{
'cinder_id': volume['id'],
'extra_info': None,
'reason_not_safe': 'Volume already managed',
'reference': {
'source-guid': guid,
'source-name': volume['name']
},
'safe_to_manage': False,
'size': volume['size']
}]
self.assertEqual(expected, result)
def test_unmanage(self):
volume = fake_volume(self.ctxt)
self.assertIsNone(self.drv.unmanage(volume))
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefSnapshots.rename')
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'iscsi.NexentaISCSIDriver._get_existing_snapshot')
def test_manage_existing_snapshot(self, get_existing_snapshot,
rename_snapshot):
volume = fake_volume(self.ctxt)
existing_snapshot = fake_snapshot(self.ctxt)
existing_snapshot.volume = volume
manage_snapshot_spec = {'id': fake.SNAPSHOT2_ID}
manage_snapshot = fake_snapshot(self.ctxt, **manage_snapshot_spec)
manage_snapshot.volume = volume
existing_name = existing_snapshot['name']
manage_name = manage_snapshot['name']
volume_name = volume['name']
volume_size = volume['size']
existing_path = self.drv._get_snapshot_path(existing_snapshot)
get_existing_snapshot.return_value = {
'name': existing_name,
'path': existing_path,
'volume_name': volume_name,
'volume_size': volume_size
}
rename_snapshot.return_value = {}
payload = {'source-name': existing_name}
self.assertIsNone(
self.drv.manage_existing_snapshot(manage_snapshot, payload)
)
get_existing_snapshot.assert_called_with(manage_snapshot, payload)
payload = {'newName': manage_name}
rename_snapshot.assert_called_with(existing_path, payload)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'iscsi.NexentaISCSIDriver._get_existing_snapshot')
def test_manage_existing_snapshot_get_size(self, get_snapshot):
volume = fake_volume(self.ctxt)
snapshot = fake_snapshot(self.ctxt)
snapshot.volume = volume
snapshot_name = snapshot['name']
volume_name = volume['name']
volume_size = volume['size']
snapshot_path = self.drv._get_snapshot_path(snapshot)
get_snapshot.return_value = {
'name': snapshot_name,
'path': snapshot_path,
'volume_name': volume_name,
'volume_size': volume_size
}
payload = {'source-name': snapshot_name}
result = self.drv.manage_existing_snapshot_get_size(volume, payload)
expected = volume['size']
self.assertEqual(expected, result)
@mock.patch('cinder.objects.VolumeList.get_all_by_host')
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefSnapshots.list')
def test_get_manageable_snapshots(self, list_snapshots, list_volumes):
volume = fake_volume(self.ctxt)
volumes = [volume]
snapshot = fake_snapshot(self.ctxt)
snapshot.volume = volume
snapshots = [snapshot]
guid = 12345
name = snapshot['name']
path = self.drv._get_snapshot_path(snapshot)
parent = self.drv._get_volume_path(volume)
list_snapshots.return_value = [{
'name': name,
'path': path,
'guid': guid,
'parent': parent,
'hprService': '',
'snaplistId': ''
}]
list_volumes.return_value = volumes
result = self.drv.get_manageable_snapshots(snapshots, None, 1,
0, 'size', 'asc')
payload = {
'parent': self.drv.root_path,
'fields': 'name,guid,path,parent,hprService,snaplistId',
'recursive': True
}
list_snapshots.assert_called_with(payload)
expected = [{
'cinder_id': snapshot['id'],
'extra_info': None,
'reason_not_safe': 'Snapshot already managed',
'source_reference': {
'name': volume['name']
},
'reference': {
'source-guid': guid,
'source-name': snapshot['name']
},
'safe_to_manage': False,
'size': volume['size']
}]
self.assertEqual(expected, result)
def test_unmanage_snapshot(self):
volume = fake_volume(self.ctxt)
snapshot = fake_snapshot(self.ctxt)
snapshot.volume = volume
self.assertIsNone(self.drv.unmanage_snapshot(snapshot))
| |
"""
Bots are a special child typeclasses of
Player that are controlled by the server.
"""
from django.conf import settings
from evennia.players.players import DefaultPlayer
from evennia.scripts.scripts import DefaultScript
from evennia.commands.command import Command
from evennia.commands.cmdset import CmdSet
from evennia.utils import search
_IDLE_TIMEOUT = settings.IDLE_TIMEOUT
_SESSIONS = None
# Bot helper utilities
class BotStarter(DefaultScript):
"""
This non-repeating script has the
sole purpose of kicking its bot
into gear when it is initialized.
"""
def at_script_creation(self):
self.key = "botstarter"
self.desc = "bot start/keepalive"
self.persistent = True
self.db.started = False
if _IDLE_TIMEOUT > 0:
# call before idle_timeout triggers
self.interval = int(max(60, _IDLE_TIMEOUT * 0.90))
self.start_delay = True
def at_start(self):
"Kick bot into gear"
if not self.db.started:
self.player.start()
self.db.started = True
def at_repeat(self):
"""
Called self.interval seconds to keep connection. We cannot use
the IDLE command from inside the game since the system will
not catch it (commands executed from the server side usually
has no sessions). So we update the idle counter manually here
instead. This keeps the bot getting hit by IDLE_TIMEOUT.
"""
global _SESSIONS
if not _SESSIONS:
from evennia.server.sessionhandler import SESSIONS as _SESSIONS
for session in _SESSIONS.sessions_from_player(self.player):
session.update_session_counters(idle=True)
def at_server_reload(self):
"""
If server reloads we don't need to reconnect the protocol
again, this is handled by the portal reconnect mechanism.
"""
self.db.started = True
def at_server_shutdown(self):
"Make sure we are shutdown"
self.db.started = False
class CmdBotListen(Command):
"""
This is a command that absorbs input
aimed specifically at the bot. The session
must prepend its data with bot_data_in for
this to trigger.
"""
key = "bot_data_in"
def func(self):
"Relay to typeclass"
self.obj.execute_cmd(self.args.strip(), sessid=self.sessid)
class BotCmdSet(CmdSet):
"Holds the BotListen command"
key = "botcmdset"
def at_cmdset_creation(self):
self.add(CmdBotListen())
# Bot base class
class Bot(DefaultPlayer):
"""
A Bot will start itself when the server
starts (it will generally not do so
on a reload - that will be handled by the
normal Portal session resync)
"""
def basetype_setup(self):
"""
This sets up the basic properties for the bot.
"""
# the text encoding to use.
self.db.encoding = "utf-8"
# A basic security setup
lockstring = "examine:perm(Wizards);edit:perm(Wizards);delete:perm(Wizards);boot:perm(Wizards);msg:false()"
self.locks.add(lockstring)
# set the basics of being a bot
self.cmdset.add_default(BotCmdSet)
script_key = "%s" % self.key
self.scripts.add(BotStarter, key=script_key)
self.is_bot = True
def start(self, **kwargs):
"""
This starts the bot, whatever that may mean.
"""
pass
def msg(self, text=None, from_obj=None, sessid=None, **kwargs):
"""
Evennia -> outgoing protocol
"""
super(Bot, self).msg(text=text, from_obj=from_obj, sessid=sessid, **kwargs)
def execute_cmd(self, raw_string, sessid=None):
"""
Incoming protocol -> Evennia
"""
super(Bot, self).msg(raw_string, sessid=sessid)
def at_server_shutdown(self):
"We need to handle this case manually since the shutdown may be a reset"
print "bots at_server_shutdown called"
for session in self.get_all_sessions():
session.sessionhandler.disconnect(session)
# Bot implementations
# IRC
class IRCBot(Bot):
"""
Bot for handling IRC connections.
"""
def start(self, ev_channel=None, irc_botname=None, irc_channel=None, irc_network=None, irc_port=None):
"""
Start by telling the portal to start a new session.
ev_channel - key of the Evennia channel to connect to
irc_botname - name of bot to connect to irc channel. If not set, use self.key
irc_channel - name of channel on the form #channelname
irc_network - url of network, like irc.freenode.net
irc_port - port number of irc network, like 6667
"""
global _SESSIONS
if not _SESSIONS:
from evennia.server.sessionhandler import SESSIONS as _SESSIONS
# if keywords are given, store (the BotStarter script
# will not give any keywords, so this should normally only
# happen at initialization)
if irc_botname:
self.db.irc_botname = irc_botname
elif not self.db.irc_botname:
self.db.irc_botname = self.key
if ev_channel:
# connect to Evennia channel
channel = search.channel_search(ev_channel)
if not channel:
raise RuntimeError("Evennia Channel '%s' not found." % ev_channel)
channel = channel[0]
channel.connect(self)
self.db.ev_channel = channel
if irc_channel:
self.db.irc_channel = irc_channel
if irc_network:
self.db.irc_network = irc_network
if irc_port:
self.db.irc_port = irc_port
# instruct the server and portal to create a new session with
# the stored configuration
configdict = {"uid":self.dbid,
"botname": self.db.irc_botname,
"channel": self.db.irc_channel ,
"network": self.db.irc_network,
"port": self.db.irc_port}
_SESSIONS.start_bot_session("evennia.server.portal.irc.IRCBotFactory", configdict)
def msg(self, text=None, **kwargs):
"""
Takes text from connected channel (only)
"""
if not self.ndb.ev_channel and self.db.ev_channel:
# cache channel lookup
self.ndb.ev_channel = self.db.ev_channel
if "from_channel" in kwargs and text and self.ndb.ev_channel.dbid == kwargs["from_channel"]:
if "from_obj" not in kwargs or kwargs["from_obj"] != [self.id]:
text = "bot_data_out %s" % text
super(IRCBot, self).msg(text=text)
def execute_cmd(self, text=None, sessid=None):
"""
Take incoming data and send it to connected channel. This is triggered
by the CmdListen command in the BotCmdSet.
"""
if not self.ndb.ev_channel and self.db.ev_channel:
# cache channel lookup
self.ndb.ev_channel = self.db.ev_channel
if self.ndb.ev_channel:
self.ndb.ev_channel.msg(text, senders=self.id)
# RSS
class RSSBot(Bot):
"""
An RSS relayer. The RSS protocol itself runs a ticker to update its feed at regular
intervals.
"""
def start(self, ev_channel=None, rss_url=None, rss_rate=None):
"""
Start by telling the portal to start a new RSS session
ev_channel - key of the Evennia channel to connect to
rss_url - full URL to the RSS feed to subscribe to
rss_update_rate - how often for the feedreader to update
"""
global _SESSIONS
if not _SESSIONS:
from evennia.server.sessionhandler import SESSIONS as _SESSIONS
if ev_channel:
# connect to Evennia channel
channel = search.channel_search(ev_channel)
if not channel:
raise RuntimeError("Evennia Channel '%s' not found." % ev_channel)
channel = channel[0]
self.db.ev_channel = channel
if rss_url:
self.db.rss_url = rss_url
if rss_rate:
self.db.rss_rate = rss_rate
# instruct the server and portal to create a new session with
# the stored configuration
configdict = {"uid": self.dbid,
"url": self.db.rss_url,
"rate": self.db.rss_rate}
_SESSIONS.start_bot_session("evennia.server.portal.rss.RSSBotFactory", configdict)
def execute_cmd(self, text=None, sessid=None):
"""
Echo RSS input to connected channel
"""
print "execute_cmd rss:", text
if not self.ndb.ev_channel and self.db.ev_channel:
# cache channel lookup
self.ndb.ev_channel = self.db.ev_channel
if self.ndb.ev_channel:
self.ndb.ev_channel.msg(text, senders=self.id)
# IMC2
class IMC2Bot(Bot):
"""
IMC2 Bot
"""
def start(self, ev_channel=None, imc2_network=None, imc2_mudname=None,
imc2_port=None, imc2_client_pwd=None, imc2_server_pwd=None):
"""
Start by telling the portal to start a new session
ev_channel - key of the Evennia channel to connect to
imc2_network - IMC2 network name
imc2_mudname - registered mudname (if not given, use settings.SERVERNAME)
imc2_port - port number of IMC2 network
imc2_client_pwd - client password registered with IMC2 network
imc2_server_pwd - server password registered with IMC2 network
"""
global _SESSIONS
if not _SESSIONS:
from evennia.server.sessionhandler import SESSIONS as _SESSIONS
if ev_channel:
# connect to Evennia channel
channel = search.channel_search(ev_channel)
if not channel:
raise RuntimeError("Evennia Channel '%s' not found." % ev_channel)
channel = channel[0]
channel.connect(self)
self.db.ev_channel = channel
if imc2_network:
self.db.imc2_network = imc2_network
if imc2_port:
self.db.imc2_port = imc2_port
if imc2_mudname:
self.db.imc2_mudname = imc2_mudname
elif not self.db.imc2_mudname:
self.db.imc2_mudname = settings.SERVERNAME
# storing imc2 passwords in attributes - a possible
# security issue?
if imc2_server_pwd:
self.db.imc2_server_pwd = imc2_server_pwd
if imc2_client_pwd:
self.db.imc2_client_pwd = imc2_client_pwd
configdict = {"uid": self.dbid,
"mudname": self.db.imc2_mudname,
"network": self.db.imc2_network,
"port": self.db.imc2_port,
"client_pwd": self.db.client_pwd,
"server_pwd": self.db.server_pwd}
_SESSIONS.start_bot_session("evennia.server.portal.imc2.IMC2BotFactory", configdict)
def msg(self, text=None, **kwargs):
"""
Takes text from connected channel (only)
"""
if not self.ndb.ev_channel and self.db.ev_channel:
# cache channel lookup
self.ndb.ev_channel = self.db.ev_channel
if "from_channel" in kwargs and text and self.ndb.ev_channel.dbid == kwargs["from_channel"]:
if "from_obj" not in kwargs or kwargs["from_obj"] != [self.id]:
text = "bot_data_out %s" % text
self.msg(text=text)
def execute_cmd(self, text=None, sessid=None):
"""
Relay incoming data to connected channel.
"""
if not self.ndb.ev_channel and self.db.ev_channel:
# cache channel lookup
self.ndb.ev_channel = self.db.ev_channel
if self.ndb.ev_channel:
self.ndb.ev_channel.msg(text, senders=self.id)
| |
'''
Created on 2016/3/31
:author: hubo
'''
from __future__ import print_function, absolute_import, division
from vlcp.utils.dataobject import DataObject, DataObjectSet, updater, DataObjectUpdateEvent,\
multiwaitif, dump, set_new, ReferenceObject, request_context, Relationship
from vlcp.server.module import depend, Module, call_api, ModuleLoadStateChanged,\
api
import vlcp.service.kvdb.objectdb as objectdb
from vlcp.config.config import defaultconfig
from vlcp.event.runnable import RoutineContainer, RoutineException
from uuid import uuid1
from vlcp.server import main
import logging
from functools import partial
class PhysicalNetwork(DataObject):
_prefix = 'vlcptest.physicalnetwork'
_indices = ('id',)
class LogicalNetwork(DataObject):
_prefix = 'vlcptest.logicalnetwork'
_indices = ('id',)
class PhysicalNetworkMap(DataObject):
_prefix = 'vlcptest.physicalnetworkmap'
_indices = ('id',)
def __init__(self, prefix=None, deleted=False):
DataObject.__init__(self, prefix=prefix, deleted=deleted)
self.networks = DataObjectSet()
self.network_allocation = dict()
self.ports = DataObjectSet()
PhysicalNetworkMap._network = Relationship(PhysicalNetworkMap, PhysicalNetwork, ('id', 'id'))
class LogicalNetworkMap(DataObject):
_prefix = 'vlcptest.logicalnetworkmap'
_indices = ('id',)
def __init__(self, prefix=None, deleted=False):
DataObject.__init__(self, prefix=prefix, deleted=deleted)
self.ports = DataObjectSet()
LogicalNetworkMap._network = Relationship(LogicalNetworkMap, LogicalNetwork, ('id', 'id'))
class PhysicalPort(DataObject):
_prefix = 'vlcptest.physicalport'
_indices = ('systemid', 'bridge', 'name')
class LogicalPort(DataObject):
_prefix = 'vlcptest.logicalport'
_indices = ('id',)
class PhysicalNetworkSet(DataObject):
_prefix = 'vlcptest.physicalnetworkset'
class LogicalNetworkSet(DataObject):
_prefix = 'vlcptest.logicalnetworkset'
class LogicalPortSet(DataObject):
_prefix = 'vlcptest.logicalportset'
class PhysicalPortSet(DataObject):
_prefix = 'vlcptest.physicalportset'
@defaultconfig
@depend(objectdb.ObjectDB)
class TestObjectDB(Module):
def __init__(self, server):
Module.__init__(self, server)
self.apiroutine = RoutineContainer(self.scheduler)
self.apiroutine.main = self._main
self.routines.append(self.apiroutine)
self._reqid = 0
self._ownerid = uuid1().hex
self.createAPI(api(self.createlogicalnetwork, self.apiroutine),
api(self.createlogicalnetworks, self.apiroutine),
api(self.createphysicalnetwork, self.apiroutine),
api(self.createphysicalnetworks, self.apiroutine),
api(self.createphysicalport, self.apiroutine),
api(self.createphysicalports, self.apiroutine),
api(self.createlogicalport, self.apiroutine),
api(self.createlogicalports, self.apiroutine),
api(self.getlogicalnetworks, self.apiroutine))
self._logger.setLevel(logging.DEBUG)
async def _monitor(self):
update_event = DataObjectUpdateEvent.createMatcher()
while True:
ev = await update_event
self._logger.info('Database update: %r', ev)
async def _dumpkeys(self, keys):
self._reqid += 1
reqid = ('testobjectdb', self._reqid)
with request_context(reqid, self.apiroutine):
retobjs = await call_api(self.apiroutine, 'objectdb', 'mget', {'keys': keys, 'requestid': reqid})
return [dump(v) for v in retobjs]
async def _updateport(self, key):
unload_matcher = ModuleLoadStateChanged.createMatcher(self.target, ModuleLoadStateChanged.UNLOADING)
async def updateinner():
self._reqid += 1
reqid = ('testobjectdb', self._reqid)
with request_context(reqid, self.apiroutine):
portobj = await call_api(self.apiroutine, 'objectdb', 'get', {'key': key, 'requestid': reqid})
if portobj is not None:
@updater
def write_status(portobj):
if portobj is None:
raise ValueError('Already deleted')
if not hasattr(portobj, 'owner'):
portobj.owner = self._ownerid
portobj.status = 'READY'
return [portobj]
else:
raise ValueError('Already managed')
try:
await call_api(self.apiroutine, 'objectdb', 'transact', {'keys': [portobj.getkey()], 'updater': write_status})
except ValueError:
pass
else:
await portobj.waitif(self.apiroutine, lambda x: x.isdeleted() or hasattr(x, 'owner'))
self._logger.info('Port managed: %r', dump(portobj))
while True:
await portobj.waitif(self.apiroutine, lambda x: True, True)
if portobj.isdeleted():
self._logger.info('Port deleted: %r', dump(portobj))
break
else:
self._logger.info('Port updated: %r', dump(portobj))
try:
await self.apiroutine.withException(updateinner(), unload_matcher)
except RoutineException:
pass
async def _waitforchange(self, key):
with request_context('testobjectdb', self.apiroutine):
setobj = await call_api(self.apiroutine, 'objectdb', 'watch', {'key': key, 'requestid': 'testobjectdb'})
await setobj.wait(self.apiroutine)
oldset = set()
while True:
for weakref in setobj.set.dataset().difference(oldset):
self.apiroutine.subroutine(self._updateport(weakref.getkey()))
oldset = set(setobj.set.dataset())
await setobj.waitif(self.apiroutine, lambda x: not x.isdeleted(), True)
async def _main(self):
routines = []
routines.append(self._monitor())
keys = [LogicalPortSet.default_key(), PhysicalPortSet.default_key()]
for k in keys:
routines.append(self._waitforchange(k))
await self.apiroutine.execute_all(routines)
async def load(self, container):
@updater
def initialize(phynetset, lognetset, logportset, phyportset):
if phynetset is None:
phynetset = PhysicalNetworkSet()
phynetset.set = DataObjectSet()
if lognetset is None:
lognetset = LogicalNetworkSet()
lognetset.set = DataObjectSet()
if logportset is None:
logportset = LogicalPortSet()
logportset.set = DataObjectSet()
if phyportset is None:
phyportset = PhysicalPortSet()
phyportset.set = DataObjectSet()
return [phynetset, lognetset, logportset, phyportset]
await call_api(container, 'objectdb', 'transact', {'keys':[PhysicalNetworkSet.default_key(),
LogicalNetworkSet.default_key(),
LogicalPortSet.default_key(),
PhysicalPortSet.default_key()],
'updater': initialize})
await Module.load(self, container)
async def createphysicalnetwork(self, type = 'vlan', id = None, **kwargs):
new_network, new_map = self._createphysicalnetwork(type, id, **kwargs)
@updater
def create_phy(physet, phynet, phymap):
phynet = set_new(phynet, new_network)
phymap = set_new(phymap, new_map)
physet.set.dataset().add(phynet.create_weakreference())
return [physet, phynet, phymap]
await call_api(self.apiroutine, 'objectdb', 'transact', {'keys':[PhysicalNetworkSet.default_key(),
new_network.getkey(),
new_map.getkey()],'updater':create_phy})
return (await self._dumpkeys([new_network.getkey()]))[0]
async def createphysicalnetworks(self, networks):
new_networks = [self._createphysicalnetwork(**n) for n in networks]
@updater
def create_phys(physet, *phynets):
return_nets = [None, None] * len(new_networks)
for i in range(0, len(new_networks)):
return_nets[i * 2] = set_new(phynets[i * 2], new_networks[i][0])
return_nets[i * 2 + 1] = set_new(phynets[i * 2 + 1], new_networks[i][1])
physet.set.dataset().add(new_networks[i][0].create_weakreference())
return [physet] + return_nets
keys = [sn.getkey() for n in new_networks for sn in n]
await call_api(self.apiroutine, 'objectdb', 'transact', {'keys':[PhysicalNetworkSet.default_key()] + keys,'updater':create_phys})
return await self._dumpkeys([n[0].getkey() for n in new_networks])
def _createlogicalnetwork(self, physicalnetwork, id = None, **kwargs):
if not id:
id = str(uuid1())
new_network = LogicalNetwork.create_instance(id)
for k,v in kwargs.items():
setattr(new_network, k, v)
new_network.physicalnetwork = ReferenceObject(PhysicalNetwork.default_key(physicalnetwork))
new_networkmap = LogicalNetworkMap.create_instance(id)
new_networkmap.network = new_network.create_reference()
return new_network,new_networkmap
async def createlogicalnetworks(self, networks):
new_networks = [self._createlogicalnetwork(**n) for n in networks]
physical_networks = list(set(n[0].physicalnetwork.getkey() for n in new_networks))
physical_maps = [PhysicalNetworkMap.default_key(PhysicalNetwork._getIndices(k)[1][0]) for k in physical_networks]
@updater
def create_logs(logset, *networks):
phy_maps = list(networks[len(new_networks) * 2 : len(new_networks) * 2 + len(physical_networks)])
phy_nets = list(networks[len(new_networks) * 2 + len(physical_networks):])
phy_dict = dict(zip(physical_networks, zip(phy_nets, phy_maps)))
return_nets = [None, None] * len(new_networks)
for i in range(0, len(new_networks)):
return_nets[2 * i] = set_new(networks[2 * i], new_networks[i][0])
return_nets[2 * i + 1] = set_new(networks[2 * i + 1], new_networks[i][1])
for n in return_nets[::2]:
phynet, phymap = phy_dict.get(n.physicalnetwork.getkey())
if phynet is None:
_, (phyid,) = PhysicalNetwork._getIndices(n.physicalnetwork.getkey())
raise ValueError('Physical network %r does not exist' % (phyid,))
else:
if phynet.type == 'vlan':
if hasattr(n, 'vlanid'):
n.vlanid = int(n.vlanid)
if n.vlanid <= 0 or n.vlanid >= 4095:
raise ValueError('Invalid VLAN ID')
# VLAN id is specified
if str(n.vlanid) in phymap.network_allocation:
raise ValueError('VLAN ID %r is already allocated in physical network %r' % (n.vlanid,phynet.id))
else:
for start,end in phynet.vlanrange:
if start <= n.vlanid <= end:
break
else:
raise ValueError('VLAN ID %r is not in vlan range of physical network %r' % (n.vlanid,phynet.id))
phymap.network_allocation[str(n.vlanid)] = n.create_weakreference()
else:
# Allocate a new VLAN id
for start,end in phynet.vlanrange:
for vlanid in range(start, end + 1):
if str(vlanid) not in phymap.network_allocation:
break
else:
continue
break
else:
raise ValueError('Not enough VLAN ID to be allocated in physical network %r' % (phynet.id,))
n.vlanid = vlanid
phymap.network_allocation[str(vlanid)] = n.create_weakreference()
else:
if phymap.network_allocation:
raise ValueError('Physical network %r is already allocated by another logical network', (phynet.id,))
phymap.network_allocation['native'] = n.create_weakreference()
phymap.networks.dataset().add(n.create_weakreference())
logset.set.dataset().add(n.create_weakreference())
return [logset] + return_nets + phy_maps
await call_api(self.apiroutine, 'objectdb', 'transact', {'keys': [LogicalNetworkSet.default_key()] +\
[sn.getkey() for n in new_networks for sn in n] +\
physical_maps +\
physical_networks,
'updater': create_logs})
return await self._dumpkeys([n[0].getkey() for n in new_networks])
async def createlogicalnetwork(self, physicalnetwork, id = None, **kwargs):
n = {'physicalnetwork':physicalnetwork, 'id':id}
n.update(kwargs)
return (await self.createlogicalnetworks([n]))[0]
def _createphysicalnetwork(self, type = 'vlan', id = None, **kwargs):
if not id:
id = str(uuid1())
if type == 'vlan':
if 'vlanrange' not in kwargs:
raise ValueError(r'Must specify vlanrange with network type="vlan"')
vlanrange = kwargs['vlanrange']
# Check
try:
lastend = 0
for start, end in vlanrange:
if start <= lastend:
raise ValueError('VLAN sequences overlapped or disordered')
lastend = end
if lastend >= 4095:
raise ValueError('VLAN ID out of range')
except Exception as exc:
raise ValueError('vlanrange format error: %s' % (str(exc),))
else:
type = 'native'
new_network = PhysicalNetwork.create_instance(id)
new_network.type = type
for k,v in kwargs.items():
setattr(new_network, k, v)
new_networkmap = PhysicalNetworkMap.create_instance(id)
new_networkmap.network = new_network.create_reference()
return (new_network, new_networkmap)
async def createphysicalport(self, physicalnetwork, name, systemid = '%', bridge = '%', **kwargs):
p = {'physicalnetwork':physicalnetwork, 'name':name, 'systemid':systemid,'bridge':bridge}
p.update(kwargs)
return (await self.createphysicalports([p]))[0]
def _createphysicalport(self, physicalnetwork, name, systemid = '%', bridge = '%', **kwargs):
new_port = PhysicalPort.create_instance(systemid, bridge, name)
new_port.physicalnetwork = ReferenceObject(PhysicalNetwork.default_key(physicalnetwork))
for k,v in kwargs.items():
setattr(new_port, k, v)
return new_port
async def createphysicalports(self, ports):
new_ports = [self._createphysicalport(**p) for p in ports]
physical_networks = list(set([p.physicalnetwork.getkey() for p in new_ports]))
physical_maps = [PhysicalNetworkMap.default_key(*PhysicalNetwork._getIndices(k)[1]) for k in physical_networks]
def _walker(walk, write):
for p, port in zip(new_ports, ports):
key = p.getkey()
try:
value = walk(key)
except KeyError:
pass
else:
new_port = self._createphysicalport(**port)
value = set_new(value, new_port)
try:
phynet = walk(new_port.physicalnetwork.getkey())
except KeyError:
pass
else:
if phynet is None:
_, (phyid,) = PhysicalNetwork._getIndices(p.physicalnetwork.getkey())
raise ValueError('Physical network %r does not exist' % (phyid,))
write(key, value)
try:
phymap = walk(PhysicalNetworkMap._network.leftkey(new_port.physicalnetwork))
except KeyError:
pass
else:
if phymap is not None:
phymap.ports.dataset().add(value.create_weakreference())
write(phymap.getkey(), phymap)
try:
portset = walk(PhysicalPortSet.default_key())
except KeyError:
pass
else:
portset.set.dataset().add(value.create_weakreference())
write(portset.getkey(), portset)
await call_api(self.apiroutine, 'objectdb', 'writewalk', {'keys': set([PhysicalPortSet.default_key()] +\
[p.getkey() for p in new_ports] +\
physical_maps +\
physical_networks),
'walker': _walker})
return await self._dumpkeys([p.getkey() for p in new_ports])
async def createlogicalport(self, logicalnetwork, id = None, **kwargs):
p = {'logicalnetwork':logicalnetwork, 'id':id}
p.update(kwargs)
return (await self.createlogicalports([p]))[0]
def _createlogicalport(self, logicalnetwork, id = None, **kwargs):
if not id:
id = str(uuid1())
new_port = LogicalPort.create_instance(id)
new_port.logicalnetwork = ReferenceObject(LogicalNetwork.default_key(logicalnetwork))
for k,v in kwargs.items():
setattr(new_port, k, v)
return new_port
async def createlogicalports(self, ports):
new_ports = [self._createlogicalport(**p) for p in ports]
def _walker(walk, write):
for p in new_ports:
key = p.getkey()
try:
value = walk(key)
except KeyError:
pass
else:
value = set_new(value, p)
try:
lognet = walk(value.logicalnetwork.getkey())
except KeyError:
pass
else:
if lognet is None:
_, (logid,) = LogicalNetwork._getIndices(value.logicalnetwork.getkey())
raise ValueError("Logical network %r does not exist" % (logid,))
try:
logmap = walk(LogicalNetworkMap._network.leftkey(value.logicalnetwork))
except KeyError:
pass
else:
if logmap is not None:
logmap.ports.dataset().add(value.create_weakreference())
write(key, value)
write(logmap.getkey(), logmap)
try:
portset = walk(LogicalPortSet.default_key())
except KeyError:
pass
else:
portset.set.dataset().add(value.create_weakreference())
write(portset.getkey(), portset)
keys = set()
keys.update(p.getkey() for p in new_ports)
keys.update(p.logicalnetwork.getkey() for p in new_ports)
keys.update(LogicalNetworkMap._network.leftkey(p.logicalnetwork)
for p in new_ports)
keys.add(LogicalPortSet.default_key())
await call_api(self.apiroutine, 'objectdb', 'writewalk', {'keys': keys,
'walker': _walker})
return await self._dumpkeys([p.getkey() for p in new_ports])
async def getlogicalnetworks(self, id = None, physicalnetwork = None, **kwargs):
def set_walker(key, set, walk, save):
if set is None:
return
for o in set.dataset():
key = o.getkey()
try:
net = walk(key)
except KeyError:
pass
else:
for k,v in kwargs.items():
if getattr(net, k, None) != v:
break
else:
save(key)
def walker_func(set_func):
def walker(key, obj, walk, save):
if obj is None:
return
set_walker(key, set_func(obj), walk, save)
return walker
if id is not None:
self._reqid += 1
reqid = ('testobjectdb', self._reqid)
with request_context(reqid, self.apiroutine):
result = await call_api(self.apiroutine, 'objectdb', 'get', {'key' : LogicalNetwork.default_key(id), 'requestid': reqid})
if result is None:
return []
if physicalnetwork is not None and physicalnetwork != result.physicalnetwork.id:
return []
for k,v in kwargs.items():
if getattr(result, k, None) != v:
return []
return [dump(result)]
elif physicalnetwork is not None:
self._reqid += 1
reqid = ('testobjectdb', self._reqid)
pm_key = PhysicalNetworkMap.default_key(physicalnetwork)
with request_context(reqid, self.apiroutine):
keys, result = await call_api(self.apiroutine, 'objectdb', 'walk', {'keys': [pm_key],
'walkerdict': {pm_key: walker_func(lambda x: x.networks)},
'requestid': reqid})
return [dump(r) for r in result]
else:
self._reqid += 1
reqid = ('testobjectdb', self._reqid)
ns_key = LogicalNetworkSet.default_key()
with request_context(reqid, self.apiroutine):
keys, result = await call_api(self.apiroutine, 'objectdb', 'walk', {'keys': [ns_key],
'walkerdict': {ns_key: walker_func(lambda x: x.set)},
'requestid': reqid})
return [dump(r) for r in result]
if __name__ == '__main__':
main("/etc/vlcp.conf", ("__main__.TestObjectDB", "vlcp.service.manage.webapi.WebAPI"))
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow estimators for Linear and DNN joined training models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
import math
import numpy as np
import six
from tensorflow.contrib import layers
from tensorflow.contrib import metrics as metrics_lib
from tensorflow.contrib.framework.python.ops import variables as contrib_variables
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.training import training
# TODO(ispir): Increase test coverage
class _DNNLinearCombinedBaseEstimator(estimator.BaseEstimator):
"""An estimator for TensorFlow Linear and DNN joined training models.
Input of `fit`, `train`, and `evaluate` should have following features,
otherwise there will be a `KeyError`:
if `weight_column_name` is not `None`, a feature with
`key=weight_column_name` whose value is a `Tensor`.
for each `column` in `dnn_feature_columns` + `linear_feature_columns`:
- if `column` is a `SparseColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
- if `column` is a `RealValuedColumn, a feature with `key=column.name`
whose `value` is a `Tensor`.
Parameters:
model_dir: Directory to save model parameters, graph and etc.
n_classes: number of target classes. Default is binary classification.
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
linear_feature_columns: An iterable containing all the feature columns used
by linear part of the model. All items in the set should be instances of
classes derived from `FeatureColumn`.
linear_optimizer: An instance of `tf.Optimizer` used to apply gradients to
the linear part of the model. If `None`, will use a FTRL optimizer.
dnn_feature_columns: An iterable containing all the feature columns used by
deep part of the model. All items in the set should be instances of
classes derived from `FeatureColumn`.
dnn_hidden_units: List of hidden units per layer. All layers are fully
connected.
dnn_optimizer: An instance of `tf.Optimizer` used to apply gradients to the
deep part of the model. If `None`, will use an Adagrad optimizer.
dnn_activation_fn: Activation function applied to each layer. If `None`,
will use `tf.nn.relu`.
dnn_dropout: When not None, the probability we will drop out
a given coordinate.
config: RunConfig object to configure the runtime settings.
Raises:
ValueError: If both linear_feature_columns and dnn_features_columns are
empty at the same time.
"""
def __init__(self,
model_dir=None,
weight_column_name=None,
linear_feature_columns=None,
linear_optimizer=None,
dnn_feature_columns=None,
dnn_optimizer=None,
dnn_hidden_units=None,
dnn_activation_fn=nn.relu,
dnn_dropout=None,
config=None):
super(_DNNLinearCombinedBaseEstimator, self).__init__(model_dir=model_dir,
config=config)
self._weight_column_name = weight_column_name
self._linear_feature_columns = linear_feature_columns
self._linear_optimizer = linear_optimizer
self._dnn_feature_columns = dnn_feature_columns
self._dnn_optimizer = dnn_optimizer
self._dnn_hidden_units = dnn_hidden_units
self._dnn_activation_fn = dnn_activation_fn
if self._dnn_activation_fn is None:
self._dnn_activation_fn = nn.relu
self._dnn_dropout = dnn_dropout
self._dnn_weight_collection = "DNNLinearCombined_dnn"
self._linear_weight_collection = "DNNLinearCombined_linear"
self._centered_bias_weight_collection = "centered_bias"
@property
def linear_weights_(self):
"""Returns weights per feature of the linear part."""
all_variables = self.get_variable_names()
# TODO(ispir): Figure out a better way to retrieve variables for features.
# for example using feature info / columns.
values = {}
for name in all_variables:
if (name.startswith("linear/") and name.rfind("/") == 6 and
name != "linear/bias_weight"):
values[name] = self.get_variable_value(name)
if len(values) == 1:
return values[list(values.keys())[0]]
return values
@property
def linear_bias_(self):
"""Returns bias of the linear part."""
return (self.get_variable_value("linear/bias_weight") +
self.get_variable_value("centered_bias_weight"))
@property
def dnn_weights_(self):
"""Returns weights of deep neural network part."""
return [self.get_variable_value("hiddenlayer_%d/weights" % i)
for i, _ in enumerate(self._dnn_hidden_units)] + [
self.get_variable_value("dnn_logit/weights")]
@property
def dnn_bias_(self):
"""Returns bias of deep neural network part."""
return [self.get_variable_value("hiddenlayer_%d/bias" % i)
for i, _ in enumerate(self._dnn_hidden_units)] + [
self.get_variable_value("dnn_logit/bias"),
self.get_variable_value("centered_bias_weight")]
def _get_train_ops(self, features, targets):
"""See base class."""
global_step = contrib_variables.get_global_step()
assert global_step
logits = self._logits(features, is_training=True)
with ops.control_dependencies([self._centered_bias_step(
targets, self._get_weight_tensor(features))]):
loss = self._loss(logits, targets, self._get_weight_tensor(features))
logging_ops.scalar_summary("loss", loss)
linear_vars = self._get_linear_vars()
dnn_vars = self._get_dnn_vars()
grads = gradients.gradients(loss, dnn_vars + linear_vars)
dnn_grads = grads[0:len(dnn_vars)]
linear_grads = grads[len(dnn_vars):]
train_ops = self._get_linear_training_ops(
linear_grads, linear_vars) + self._get_dnn_training_ops(dnn_grads,
dnn_vars)
train_step = control_flow_ops.group(*train_ops, name="combined_training_op")
with ops.control_dependencies([train_step]):
with ops.get_default_graph().colocate_with(global_step):
return state_ops.assign_add(global_step, 1).op, loss
def _run_metrics(self, predictions, targets, metrics, weights):
result = {}
targets = math_ops.cast(targets, predictions.dtype)
for name, metric in six.iteritems(metrics or {}):
if "weights" in inspect.getargspec(metric)[0]:
result[name] = metric(predictions, targets, weights=weights)
else:
result[name] = metric(predictions, targets)
return result
def _get_eval_ops(self, features, targets, metrics=None):
raise NotImplementedError
def _get_predict_ops(self, features):
"""See base class."""
logits = self._logits(features)
return self._logits_to_predictions(logits, proba=True)
def _logits_to_predictions(self, logits, proba=False):
raise NotImplementedError
def _get_feature_ops_from_example(self, examples_batch):
column_types = layers.create_feature_spec_for_parsing((
self._get_linear_feature_columns() or []) + (
self._get_dnn_feature_columns() or []))
features = parsing_ops.parse_example(examples_batch, column_types)
return features
def _num_label_columns(self):
raise NotImplementedError
def _get_linear_feature_columns(self):
return sorted(
set(self._linear_feature_columns),
key=lambda x: x.key) if self._linear_feature_columns else None
def _get_dnn_feature_columns(self):
return sorted(set(
self._dnn_feature_columns)) if self._dnn_feature_columns else None
def _dnn_logits(self, features, is_training=False):
net = layers.input_from_feature_columns(
features,
self._get_dnn_feature_columns(),
weight_collections=[self._dnn_weight_collection])
for layer_id, num_hidden_units in enumerate(self._dnn_hidden_units):
net = layers.legacy_fully_connected(
net,
num_hidden_units,
activation_fn=self._dnn_activation_fn,
weight_collections=[self._dnn_weight_collection],
bias_collections=[self._dnn_weight_collection],
name="hiddenlayer_%d" % layer_id)
if self._dnn_dropout is not None and is_training:
net = layers.dropout(
net,
keep_prob=(1.0 - self._dnn_dropout))
self._add_hidden_layer_summary(net, "hiddenlayer_%d" % layer_id)
logit = layers.legacy_fully_connected(
net,
self._num_label_columns(),
weight_collections=[self._dnn_weight_collection],
bias_collections=[self._dnn_weight_collection],
name="dnn_logit")
self._add_hidden_layer_summary(logit, "dnn_logit")
return logit
def _add_hidden_layer_summary(self, value, tag):
# TODO(zakaria): Move this code to tf.learn and add test.
logging_ops.scalar_summary("%s:fraction_of_zero_values" % tag,
nn.zero_fraction(value))
logging_ops.histogram_summary("%s:activation" % tag, value)
def _linear_logits(self, features):
logits, _, _ = layers.weighted_sum_from_feature_columns(
columns_to_tensors=features,
feature_columns=self._get_linear_feature_columns(),
num_outputs=self._num_label_columns(),
weight_collections=[self._linear_weight_collection],
name="linear")
return logits
def _get_feature_dict(self, features):
if isinstance(features, dict):
return features
return {"": features}
def _centered_bias(self):
centered_bias = variables.Variable(
array_ops.zeros([self._num_label_columns()]),
collections=[self._centered_bias_weight_collection,
ops.GraphKeys.VARIABLES],
name="centered_bias_weight")
logging_ops.scalar_summary(
["centered_bias_%d" % cb for cb in range(self._num_label_columns())],
array_ops.reshape(centered_bias, [-1]))
return centered_bias
def _centered_bias_step(self, targets, weight_tensor):
centered_bias = ops.get_collection(self._centered_bias_weight_collection)
batch_size = array_ops.shape(targets)[0]
logits = array_ops.reshape(
array_ops.tile(centered_bias[0], [batch_size]),
[batch_size, self._num_label_columns()])
loss = self._loss(logits, targets, weight_tensor)
# Learn central bias by an optimizer. 0.1 is a convervative lr for a single
# variable.
return training.AdagradOptimizer(0.1).minimize(loss, var_list=centered_bias)
def _logits(self, features, is_training=False):
if not (self._get_linear_feature_columns() or
self._get_dnn_feature_columns()):
raise ValueError("Either linear_feature_columns or dnn_feature_columns "
"should be defined.")
features = self._get_feature_dict(features)
if self._get_linear_feature_columns() and self._get_dnn_feature_columns():
logits = (self._linear_logits(features) +
self._dnn_logits(features, is_training=is_training))
elif self._get_dnn_feature_columns():
logits = self._dnn_logits(features, is_training=is_training)
else:
logits = self._linear_logits(features)
return nn.bias_add(logits, self._centered_bias())
def _get_weight_tensor(self, features):
if not self._weight_column_name:
return None
else:
return array_ops.reshape(
math_ops.to_float(features[self._weight_column_name]),
shape=(-1,))
def _loss_vec(self, logits, targets):
raise NotImplementedError
def _loss(self, logits, target, weight_tensor):
loss_vec = self._loss_vec(logits, target)
if weight_tensor is None:
return math_ops.reduce_mean(loss_vec, name="loss")
else:
loss_vec = array_ops.reshape(loss_vec, shape=(-1,))
loss_vec = math_ops.mul(
loss_vec, array_ops.reshape(weight_tensor, shape=(-1,)))
return math_ops.div(
math_ops.reduce_sum(loss_vec),
math_ops.to_float(math_ops.reduce_sum(weight_tensor)),
name="loss")
def _get_linear_vars(self):
if self._get_linear_feature_columns():
return ops.get_collection(self._linear_weight_collection)
return []
def _get_linear_training_ops(self, linear_grads, linear_vars):
if self._get_linear_feature_columns():
self._linear_optimizer = self._get_optimizer(
self._linear_optimizer,
default_optimizer="Ftrl",
default_learning_rate=1. / math.sqrt(len(
self._get_linear_feature_columns())))
return [
self._linear_optimizer.apply_gradients(zip(linear_grads, linear_vars))
]
return []
def _get_dnn_vars(self):
if self._get_dnn_feature_columns():
return ops.get_collection(self._dnn_weight_collection)
return []
def _get_dnn_training_ops(self, dnn_grads, dnn_vars):
if self._get_dnn_feature_columns():
self._dnn_optimizer = self._get_optimizer(self._dnn_optimizer,
default_optimizer="Adagrad",
default_learning_rate=0.05)
return [self._dnn_optimizer.apply_gradients(zip(dnn_grads, dnn_vars))]
return []
def _get_optimizer(self, optimizer, default_optimizer, default_learning_rate):
if optimizer is None:
optimizer = default_optimizer
if isinstance(optimizer, six.string_types):
optimizer = layers.OPTIMIZER_CLS_NAMES[optimizer](
learning_rate=default_learning_rate)
return optimizer
class DNNLinearCombinedClassifier(_DNNLinearCombinedBaseEstimator):
"""A classifier for TensorFlow Linear and DNN joined training models.
Example:
```
installed_app_id = sparse_column_with_hash_bucket("installed_id", 1e6)
impression_app_id = sparse_column_with_hash_bucket("impression_id", 1e6)
installed_x_impression = crossed_column(
[installed_app_id, impression_app_id])
installed_emb = embedding_column(installed_app_id, dimension=16,
combiner="sum")
impression_emb = embedding_column(impression_app_id, dimension=16,
combiner="sum")
estimator = DNNLinearCombinedClassifier(
# common settings
n_classes, weight_column_name,
# wide settings
linear_feature_columns=[installed_x_impression],
linear_optimizer=tf.train.FtrlOptimizer(...),
# deep settings
dnn_feature_columns=[installed_emb, impression_emb],
dnn_hidden_units=[1000, 500, 100],
dnn_optimizer=tf.train.AdagradOptimizer(...))
# Input builders
def input_fn_train: # returns x, y
...
def input_fn_eval: # returns x, y
...
estimator.fit(input_fn=input_fn_train)
estimator.evaluate(input_fn=input_fn_eval)
estimator.predict(x=x)
```
Input of `fit` and `evaluate` should have following features,
otherwise there will be a `KeyError`:
if `weight_column_name` is not `None`, a feature with
`key=weight_column_name` whose value is a `Tensor`.
for each `column` in `dnn_feature_columns` + `linear_feature_columns`:
- if `column` is a `SparseColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
- if `column` is a `RealValuedColumn, a feature with `key=column.name`
whose `value` is a `Tensor`.
Parameters:
model_dir: Directory to save model parameters, graph and etc.
n_classes: number of target classes. Default is binary classification.
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
linear_feature_columns: An iterable containing all the feature columns used
by linear part of the model. All items in the set must be instances of
classes derived from `FeatureColumn`.
linear_optimizer: An instance of `tf.Optimizer` used to apply gradients to
the linear part of the model. If `None`, will use a FTRL optimizer.
dnn_feature_columns: An iterable containing all the feature columns used by
deep part of the model. All items in the set must be instances of
classes derived from `FeatureColumn`.
dnn_hidden_units: List of hidden units per layer. All layers are fully
connected.
dnn_optimizer: An instance of `tf.Optimizer` used to apply gradients to the
deep part of the model. If `None`, will use an Adagrad optimizer.
dnn_activation_fn: Activation function applied to each layer. If `None`,
will use `tf.nn.relu`.
dnn_dropout: When not None, the probability we will drop out
a given coordinate.
config: RunConfig object to configure the runtime settings.
Raises:
ValueError: If both linear_feature_columns and dnn_features_columns are
empty at the same time.
ValueError: If both n_classes < 2.
"""
def __init__(self,
model_dir=None,
n_classes=2,
weight_column_name=None,
linear_feature_columns=None,
linear_optimizer=None,
dnn_feature_columns=None,
dnn_optimizer=None,
dnn_hidden_units=None,
dnn_activation_fn=nn.relu,
dnn_dropout=None,
config=None):
if n_classes < 2:
raise ValueError("n_classes should be greater than 1. Given: {}".format(
n_classes))
self._n_classes = n_classes
super(DNNLinearCombinedClassifier, self).__init__(
model_dir=model_dir,
weight_column_name=weight_column_name,
linear_feature_columns=linear_feature_columns,
linear_optimizer=linear_optimizer,
dnn_feature_columns=dnn_feature_columns,
dnn_optimizer=dnn_optimizer,
dnn_hidden_units=dnn_hidden_units,
dnn_activation_fn=dnn_activation_fn,
dnn_dropout=dnn_dropout,
config=config)
def predict(self, x=None, input_fn=None, batch_size=None):
"""Returns predictions for given features.
Args:
x: features.
input_fn: Input function. If set, x must be None.
batch_size: Override default batch size.
Returns:
Numpy array of predicted classes or regression values.
"""
predictions = super(DNNLinearCombinedClassifier, self).predict(
x=x, input_fn=input_fn, batch_size=batch_size)
predictions = np.argmax(predictions, axis=1)
return predictions
def predict_proba(self, x=None, input_fn=None, batch_size=None):
"""Returns prediction probabilities for given features.
Args:
x: features.
input_fn: Input function. If set, x and y must be None.
batch_size: Override default batch size.
Returns:
Numpy array of predicted probabilities.
"""
return super(DNNLinearCombinedClassifier, self).predict(
x=x, input_fn=input_fn, batch_size=batch_size)
def _loss_vec(self, logits, target):
if self._n_classes == 2:
# sigmoid_cross_entropy_with_logits requires [batch_size, 1] target.
if len(target.get_shape()) == 1:
target = array_ops.expand_dims(target, dim=[1])
loss_vec = nn.sigmoid_cross_entropy_with_logits(
logits, math_ops.to_float(target))
else:
# Check that we got int32/int64 for classification.
if (not target.dtype.is_compatible_with(dtypes.int64) and
not target.dtype.is_compatible_with(dtypes.int32)):
raise ValueError("Target's dtype should be int32, int64 or compatible. "
"Instead got %s." % target.dtype)
# sparse_softmax_cross_entropy_with_logits requires [batch_size] target.
if len(target.get_shape()) == 2:
target = array_ops.squeeze(target, squeeze_dims=[1])
loss_vec = nn.sparse_softmax_cross_entropy_with_logits(
logits, target)
return loss_vec
def _logits_to_predictions(self, logits, proba=False):
if self._n_classes == 2:
logits = array_ops.concat(1, [array_ops.zeros_like(logits), logits])
if proba:
return nn.softmax(logits)
else:
return math_ops.argmax(logits, 1)
def _num_label_columns(self):
return 1 if self._n_classes == 2 else self._n_classes
def _get_eval_ops(self, features, targets, metrics=None):
"""See base class."""
logits = self._logits(features)
result = {"loss": metrics_lib.streaming_mean(self._loss(
logits, targets,
weight_tensor=self._get_weight_tensor(features)))}
# Adding default metrics
if metrics is None:
metrics = {"accuracy": metrics_lib.streaming_accuracy}
if self._n_classes == 2:
predictions = math_ops.sigmoid(logits)
result["eval_auc"] = metrics_lib.streaming_auc(predictions, targets)
if metrics:
predictions = self._logits_to_predictions(logits, proba=False)
result.update(self._run_metrics(predictions, targets, metrics,
self._get_weight_tensor(features)))
return result
class DNNLinearCombinedRegressor(_DNNLinearCombinedBaseEstimator):
"""A regressor for TensorFlow Linear and DNN joined training models.
Example:
```
installed_app_id = sparse_column_with_hash_bucket("installed_id", 1e6)
impression_app_id = sparse_column_with_hash_bucket("impression_id", 1e6)
installed_x_impression = crossed_column(
[installed_app_id, impression_app_id])
installed_emb = embedding_column(installed_app_id, dimension=16,
combiner="sum")
impression_emb = embedding_column(impression_app_id, dimension=16,
combiner="sum")
estimator = DNNLinearCombinedClassifier(
# common settings
n_classes, weight_column_name,
# wide settings
linear_feature_columns=[installed_x_impression],
linear_optimizer=tf.train.FtrlOptimizer(...),
# deep settings
dnn_feature_columns=[installed_emb, impression_emb],
dnn_hidden_units=[1000, 500, 100],
dnn_optimizer=tf.train.AdagradOptimizer(...))
# Input builders
def input_fn_train: # returns x, y
...
def input_fn_eval: # returns x, y
...
estimator.train(input_fn_train)
estimator.evaluate(input_fn_eval)
estimator.predict(x)
```
Input of `fit`, `train`, and `evaluate` should have following features,
otherwise there will be a `KeyError`:
if `weight_column_name` is not `None`, a feature with
`key=weight_column_name` whose value is a `Tensor`.
for each `column` in `dnn_feature_columns` + `linear_feature_columns`:
- if `column` is a `SparseColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
- if `column` is a `RealValuedColumn, a feature with `key=column.name`
whose `value` is a `Tensor`.
Parameters:
model_dir: Directory to save model parameters, graph and etc.
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
linear_feature_columns: An iterable containing all the feature columns used
by linear part of the model. All items in the set must be instances of
classes derived from `FeatureColumn`.
linear_optimizer: An instance of `tf.Optimizer` used to apply gradients to
the linear part of the model. If `None`, will use a FTRL optimizer.
dnn_feature_columns: An iterable containing all the feature columns used by
deep part of the model. All items in the set must be instances of
classes derived from `FeatureColumn`.
dnn_hidden_units: List of hidden units per layer. All layers are fully
connected.
dnn_optimizer: An instance of `tf.Optimizer` used to apply gradients to the
deep part of the model. If `None`, will use an Adagrad optimizer.
dnn_activation_fn: Activation function applied to each layer. If None, will
use `tf.nn.relu`.
dnn_dropout: When not None, the probability we will drop out
a given coordinate.
config: RunConfig object to configure the runtime settings.
Raises:
ValueError: If both linear_feature_columns and dnn_features_columns are
empty at the same time.
"""
def __init__(self,
model_dir=None,
weight_column_name=None,
linear_feature_columns=None,
linear_optimizer=None,
dnn_feature_columns=None,
dnn_optimizer=None,
dnn_hidden_units=None,
dnn_activation_fn=nn.relu,
dnn_dropout=None,
config=None):
super(DNNLinearCombinedRegressor, self).__init__(
model_dir=model_dir,
weight_column_name=weight_column_name,
linear_feature_columns=linear_feature_columns,
linear_optimizer=linear_optimizer,
dnn_feature_columns=dnn_feature_columns,
dnn_optimizer=dnn_optimizer,
dnn_hidden_units=dnn_hidden_units,
dnn_activation_fn=dnn_activation_fn,
dnn_dropout=dnn_dropout,
config=config)
def predict(self, x=None, input_fn=None, batch_size=None):
"""Returns predictions for given features.
Args:
x: features.
input_fn: Input function. If set, x must be None.
batch_size: Override default batch size.
Returns:
Numpy array of predicted classes or regression values.
"""
return super(DNNLinearCombinedRegressor, self).predict(
x=x, input_fn=input_fn, batch_size=batch_size)
def _loss_vec(self, logits, target):
# To prevent broadcasting inside "-".
if len(target.get_shape()) == 1:
target = array_ops.expand_dims(target, dim=[1])
logits.get_shape().assert_is_compatible_with(target.get_shape())
return math_ops.square(logits - math_ops.to_float(target))
def _logits_to_predictions(self, logits, proba=False):
# TODO(ispir): Add target column support.
if self._targets_info is None or len(self._targets_info.shape) == 1:
return array_ops.squeeze(logits, squeeze_dims=[1])
return logits
def _num_label_columns(self):
# TODO(ispir): Add target column support.
if self._targets_info is None or len(self._targets_info.shape) == 1:
return 1
return int(self._targets_info.shape[1])
def _get_eval_ops(self, features, targets, metrics=None):
"""See base class."""
logits = self._logits(features)
result = {"loss": metrics_lib.streaming_mean(self._loss(
logits, targets,
weight_tensor=self._get_weight_tensor(features)))}
# Adding default metrics
if metrics:
predictions = self._logits_to_predictions(logits, proba=False)
result.update(self._run_metrics(predictions, targets, metrics,
self._get_weight_tensor(features)))
return result
| |
from __future__ import division, print_function, absolute_import
import os
from distutils.version import LooseVersion
import functools
import numpy as np
from numpy.testing import assert_
import pytest
import scipy.special as sc
__all__ = ['with_special_errors', 'assert_tol_equal', 'assert_func_equal',
'FuncData']
#------------------------------------------------------------------------------
# Check if a module is present to be used in tests
#------------------------------------------------------------------------------
class MissingModule(object):
def __init__(self, name):
self.name = name
def check_version(module, min_ver):
if type(module) == MissingModule:
return pytest.mark.skip(reason="{} is not installed".format(module.name))
return pytest.mark.skipif(LooseVersion(module.__version__) < LooseVersion(min_ver),
reason="{} version >= {} required".format(module.__name__, min_ver))
#------------------------------------------------------------------------------
# Enable convergence and loss of precision warnings -- turn off one by one
#------------------------------------------------------------------------------
def with_special_errors(func):
"""
Enable special function errors (such as underflow, overflow,
loss of precision, etc.)
"""
@functools.wraps(func)
def wrapper(*a, **kw):
with sc.errstate(all='raise'):
res = func(*a, **kw)
return res
return wrapper
#------------------------------------------------------------------------------
# Comparing function values at many data points at once, with helpful
#------------------------------------------------------------------------------
def assert_tol_equal(a, b, rtol=1e-7, atol=0, err_msg='', verbose=True):
"""Assert that `a` and `b` are equal to tolerance ``atol + rtol*abs(b)``"""
def compare(x, y):
return np.allclose(x, y, rtol=rtol, atol=atol)
a, b = np.asanyarray(a), np.asanyarray(b)
header = 'Not equal to tolerance rtol=%g, atol=%g' % (rtol, atol)
np.testing.utils.assert_array_compare(compare, a, b, err_msg=str(err_msg),
verbose=verbose, header=header)
#------------------------------------------------------------------------------
# Comparing function values at many data points at once, with helpful
# error reports
#------------------------------------------------------------------------------
def assert_func_equal(func, results, points, rtol=None, atol=None,
param_filter=None, knownfailure=None,
vectorized=True, dtype=None, nan_ok=False,
ignore_inf_sign=False, distinguish_nan_and_inf=True):
if hasattr(points, 'next'):
# it's a generator
points = list(points)
points = np.asarray(points)
if points.ndim == 1:
points = points[:,None]
nparams = points.shape[1]
if hasattr(results, '__name__'):
# function
data = points
result_columns = None
result_func = results
else:
# dataset
data = np.c_[points, results]
result_columns = list(range(nparams, data.shape[1]))
result_func = None
fdata = FuncData(func, data, list(range(nparams)),
result_columns=result_columns, result_func=result_func,
rtol=rtol, atol=atol, param_filter=param_filter,
knownfailure=knownfailure, nan_ok=nan_ok, vectorized=vectorized,
ignore_inf_sign=ignore_inf_sign,
distinguish_nan_and_inf=distinguish_nan_and_inf)
fdata.check()
class FuncData(object):
"""
Data set for checking a special function.
Parameters
----------
func : function
Function to test
filename : str
Input file name
param_columns : int or tuple of ints
Columns indices in which the parameters to `func` lie.
Can be imaginary integers to indicate that the parameter
should be cast to complex.
result_columns : int or tuple of ints, optional
Column indices for expected results from `func`.
result_func : callable, optional
Function to call to obtain results.
rtol : float, optional
Required relative tolerance. Default is 5*eps.
atol : float, optional
Required absolute tolerance. Default is 5*tiny.
param_filter : function, or tuple of functions/Nones, optional
Filter functions to exclude some parameter ranges.
If omitted, no filtering is done.
knownfailure : str, optional
Known failure error message to raise when the test is run.
If omitted, no exception is raised.
nan_ok : bool, optional
If nan is always an accepted result.
vectorized : bool, optional
Whether all functions passed in are vectorized.
ignore_inf_sign : bool, optional
Whether to ignore signs of infinities.
(Doesn't matter for complex-valued functions.)
distinguish_nan_and_inf : bool, optional
If True, treat numbers which contain nans or infs as as
equal. Sets ignore_inf_sign to be True.
"""
def __init__(self, func, data, param_columns, result_columns=None,
result_func=None, rtol=None, atol=None, param_filter=None,
knownfailure=None, dataname=None, nan_ok=False, vectorized=True,
ignore_inf_sign=False, distinguish_nan_and_inf=True):
self.func = func
self.data = data
self.dataname = dataname
if not hasattr(param_columns, '__len__'):
param_columns = (param_columns,)
self.param_columns = tuple(param_columns)
if result_columns is not None:
if not hasattr(result_columns, '__len__'):
result_columns = (result_columns,)
self.result_columns = tuple(result_columns)
if result_func is not None:
raise ValueError("Only result_func or result_columns should be provided")
elif result_func is not None:
self.result_columns = None
else:
raise ValueError("Either result_func or result_columns should be provided")
self.result_func = result_func
self.rtol = rtol
self.atol = atol
if not hasattr(param_filter, '__len__'):
param_filter = (param_filter,)
self.param_filter = param_filter
self.knownfailure = knownfailure
self.nan_ok = nan_ok
self.vectorized = vectorized
self.ignore_inf_sign = ignore_inf_sign
self.distinguish_nan_and_inf = distinguish_nan_and_inf
if not self.distinguish_nan_and_inf:
self.ignore_inf_sign = True
def get_tolerances(self, dtype):
if not np.issubdtype(dtype, np.inexact):
dtype = np.dtype(float)
info = np.finfo(dtype)
rtol, atol = self.rtol, self.atol
if rtol is None:
rtol = 5*info.eps
if atol is None:
atol = 5*info.tiny
return rtol, atol
def check(self, data=None, dtype=None):
"""Check the special function against the data."""
if self.knownfailure:
pytest.xfail(reason=self.knownfailure)
if data is None:
data = self.data
if dtype is None:
dtype = data.dtype
else:
data = data.astype(dtype)
rtol, atol = self.get_tolerances(dtype)
# Apply given filter functions
if self.param_filter:
param_mask = np.ones((data.shape[0],), np.bool_)
for j, filter in zip(self.param_columns, self.param_filter):
if filter:
param_mask &= list(filter(data[:,j]))
data = data[param_mask]
# Pick parameters from the correct columns
params = []
for j in self.param_columns:
if np.iscomplexobj(j):
j = int(j.imag)
params.append(data[:,j].astype(complex))
else:
params.append(data[:,j])
# Helper for evaluating results
def eval_func_at_params(func, skip_mask=None):
if self.vectorized:
got = func(*params)
else:
got = []
for j in range(len(params[0])):
if skip_mask is not None and skip_mask[j]:
got.append(np.nan)
continue
got.append(func(*tuple([params[i][j] for i in range(len(params))])))
got = np.asarray(got)
if not isinstance(got, tuple):
got = (got,)
return got
# Evaluate function to be tested
got = eval_func_at_params(self.func)
# Grab the correct results
if self.result_columns is not None:
# Correct results passed in with the data
wanted = tuple([data[:,icol] for icol in self.result_columns])
else:
# Function producing correct results passed in
skip_mask = None
if self.nan_ok and len(got) == 1:
# Don't spend time evaluating what doesn't need to be evaluated
skip_mask = np.isnan(got[0])
wanted = eval_func_at_params(self.result_func, skip_mask=skip_mask)
# Check the validity of each output returned
assert_(len(got) == len(wanted))
for output_num, (x, y) in enumerate(zip(got, wanted)):
if np.issubdtype(x.dtype, np.complexfloating) or self.ignore_inf_sign:
pinf_x = np.isinf(x)
pinf_y = np.isinf(y)
minf_x = np.isinf(x)
minf_y = np.isinf(y)
else:
pinf_x = np.isposinf(x)
pinf_y = np.isposinf(y)
minf_x = np.isneginf(x)
minf_y = np.isneginf(y)
nan_x = np.isnan(x)
nan_y = np.isnan(y)
olderr = np.seterr(all='ignore')
try:
abs_y = np.absolute(y)
abs_y[~np.isfinite(abs_y)] = 0
diff = np.absolute(x - y)
diff[~np.isfinite(diff)] = 0
rdiff = diff / np.absolute(y)
rdiff[~np.isfinite(rdiff)] = 0
finally:
np.seterr(**olderr)
tol_mask = (diff <= atol + rtol*abs_y)
pinf_mask = (pinf_x == pinf_y)
minf_mask = (minf_x == minf_y)
nan_mask = (nan_x == nan_y)
bad_j = ~(tol_mask & pinf_mask & minf_mask & nan_mask)
point_count = bad_j.size
if self.nan_ok:
bad_j &= ~nan_x
bad_j &= ~nan_y
point_count -= (nan_x | nan_y).sum()
if not self.distinguish_nan_and_inf and not self.nan_ok:
# If nan's are okay we've already covered all these cases
inf_x = np.isinf(x)
inf_y = np.isinf(y)
both_nonfinite = (inf_x & nan_y) | (nan_x & inf_y)
bad_j &= ~both_nonfinite
point_count -= both_nonfinite.sum()
if np.any(bad_j):
# Some bad results: inform what, where, and how bad
msg = [""]
msg.append("Max |adiff|: %g" % diff.max())
msg.append("Max |rdiff|: %g" % rdiff.max())
msg.append("Bad results (%d out of %d) for the following points (in output %d):"
% (np.sum(bad_j), point_count, output_num,))
for j in np.where(bad_j)[0]:
j = int(j)
fmt = lambda x: "%30s" % np.array2string(x[j], precision=18)
a = " ".join(map(fmt, params))
b = " ".join(map(fmt, got))
c = " ".join(map(fmt, wanted))
d = fmt(rdiff)
msg.append("%s => %s != %s (rdiff %s)" % (a, b, c, d))
assert_(False, "\n".join(msg))
def __repr__(self):
"""Pretty-printing, esp. for Nose output"""
if np.any(list(map(np.iscomplexobj, self.param_columns))):
is_complex = " (complex)"
else:
is_complex = ""
if self.dataname:
return "<Data for %s%s: %s>" % (self.func.__name__, is_complex,
os.path.basename(self.dataname))
else:
return "<Data for %s%s>" % (self.func.__name__, is_complex)
| |
(dp0
VPlayStation
p1
(dp2
S'summary_display'
p3
S''
p4
sS'total_runs'
p5
L1L
sS'summary_score'
p6
I27
sS'results'
p7
(dp8
S'score'
p9
(dp10
g9
I27
sS'raw_score'
p11
I27
sS'display'
p12
S'27/100'
p13
ssssS'total_runs'
p14
L11412L
sVChrome
p15
(dp16
S'summary_display'
p17
g4
sS'total_runs'
p18
L1966L
sS'summary_score'
p19
I100
sS'results'
p20
(dp21
S'score'
p22
(dp23
g22
I100
sS'raw_score'
p24
I100
sS'display'
p25
S'100/100'
p26
ssssVStainless
p27
(dp28
S'summary_display'
p29
g4
sS'total_runs'
p30
L5L
sS'summary_score'
p31
I100
sS'results'
p32
(dp33
S'score'
p34
(dp35
g34
I100
sS'raw_score'
p36
I100
sS'display'
p37
S'100/100'
p38
ssssVSeaMonkey
p39
(dp40
S'summary_display'
p41
g4
sS'total_runs'
p42
L38L
sS'summary_score'
p43
I93
sS'results'
p44
(dp45
S'score'
p46
(dp47
g46
I93
sS'raw_score'
p48
I93
sS'display'
p49
S'93/100'
p50
ssssVFirefox (Minefield)
p51
(dp52
S'summary_display'
p53
g4
sS'total_runs'
p54
L210L
sS'summary_score'
p55
I96
sS'results'
p56
(dp57
S'score'
p58
(dp59
g58
I96
sS'raw_score'
p60
I96
sS'display'
p61
S'96/100'
p62
ssssVFirefox (Shiretoko)
p63
(dp64
S'summary_display'
p65
g4
sS'total_runs'
p66
L180L
sS'summary_score'
p67
I93
sS'results'
p68
(dp69
S'score'
p70
(dp71
g70
I93
sS'raw_score'
p72
I93
sS'display'
p73
S'93/100'
p74
ssssVMaxthon
p75
(dp76
S'summary_display'
p77
g4
sS'total_runs'
p78
L16L
sS'summary_score'
p79
I13
sS'results'
p80
(dp81
S'score'
p82
(dp83
g82
I13
sS'raw_score'
p84
I13
sS'display'
p85
S'13/100'
p86
ssssVCamino
p87
(dp88
S'summary_display'
p89
g4
sS'total_runs'
p90
L25L
sS'summary_score'
p91
I53
sS'results'
p92
(dp93
S'score'
p94
(dp95
g94
I53
sS'raw_score'
p96
I53
sS'display'
p97
S'53/100'
p98
ssssVOther
p99
(dp100
S'summary_display'
p101
g4
sS'total_runs'
p102
L58L
sS'summary_score'
p103
I100
sS'results'
p104
(dp105
S'score'
p106
(dp107
g106
I100
sS'raw_score'
p108
I100
sS'display'
p109
S'100/100'
p110
ssssVGranParadiso
p111
(dp112
S'summary_display'
p113
g4
sS'total_runs'
p114
L2L
sS'summary_score'
p115
I72
sS'results'
p116
(dp117
S'score'
p118
(dp119
g118
I72
sS'raw_score'
p120
I72
sS'display'
p121
S'72/100'
p122
ssssVOpera Mini
p123
(dp124
S'summary_display'
p125
g4
sS'total_runs'
p126
L13L
sS'summary_score'
p127
I98
sS'results'
p128
(dp129
S'score'
p130
(dp131
g130
I98
sS'raw_score'
p132
I98
sS'display'
p133
S'98/100'
p134
ssssVAndroid
p135
(dp136
S'summary_display'
p137
g4
sS'total_runs'
p138
L49L
sS'summary_score'
p139
I93
sS'results'
p140
(dp141
S'score'
p142
(dp143
g142
I93
sS'raw_score'
p144
I93
sS'display'
p145
S'93/100'
p146
ssssVUzbl
p147
(dp148
S'summary_display'
p149
g4
sS'total_runs'
p150
L23L
sS'summary_score'
p151
I100
sS'results'
p152
(dp153
S'score'
p154
(dp155
g154
I100
sS'raw_score'
p156
I100
sS'display'
p157
S'100/100'
p158
ssssVVodafone
p159
(dp160
S'summary_display'
p161
g4
sS'total_runs'
p162
L1L
sS'summary_score'
p163
I93
sS'results'
p164
(dp165
S'score'
p166
(dp167
g166
I93
sS'raw_score'
p168
I93
sS'display'
p169
S'93/100'
p170
ssssVVienna
p171
(dp172
S'summary_display'
p173
g4
sS'total_runs'
p174
L2L
sS'summary_score'
p175
I100
sS'results'
p176
(dp177
S'score'
p178
(dp179
g178
I100
sS'raw_score'
p180
I100
sS'display'
p181
S'100/100'
p182
ssssVSleipnir
p183
(dp184
S'summary_display'
p185
g4
sS'total_runs'
p186
L19L
sS'summary_score'
p187
I20
sS'results'
p188
(dp189
S'score'
p190
(dp191
g190
I20
sS'raw_score'
p192
I20
sS'display'
p193
S'20/100'
p194
ssssVNetFront
p195
(dp196
S'summary_display'
p197
g4
sS'total_runs'
p198
L1L
sS'summary_score'
p199
I72
sS'results'
p200
(dp201
S'score'
p202
(dp203
g202
I72
sS'raw_score'
p204
I72
sS'display'
p205
S'72/100'
p206
ssssVNokia
p207
(dp208
S'summary_display'
p209
g4
sS'total_runs'
p210
L7L
sS'summary_score'
p211
I47
sS'results'
p212
(dp213
S'score'
p214
(dp215
g214
I47
sS'raw_score'
p216
I47
sS'display'
p217
S'47/100'
p218
ssssVJasmine
p219
(dp220
S'summary_display'
p221
g4
sS'total_runs'
p222
L3L
sS'summary_score'
p223
I70
sS'results'
p224
(dp225
S'score'
p226
(dp227
g226
I70
sS'raw_score'
p228
I70
sS'display'
p229
S'70/100'
p230
ssssVFluid
p231
(dp232
S'summary_display'
p233
g4
sS'total_runs'
p234
L1L
sS'summary_score'
p235
I100
sS'results'
p236
(dp237
S'score'
p238
(dp239
g238
I100
sS'raw_score'
p240
I100
sS'display'
p241
S'100/100'
p242
ssssVK-Meleon
p243
(dp244
S'summary_display'
p245
g4
sS'total_runs'
p246
L9L
sS'summary_score'
p247
I53
sS'results'
p248
(dp249
S'score'
p250
(dp251
g250
I53
sS'raw_score'
p252
I53
sS'display'
p253
S'53/100'
p254
ssssVChrome Frame (IE 7)
p255
(dp256
S'summary_display'
p257
g4
sS'total_runs'
p258
L5L
sS'summary_score'
p259
I100
sS'results'
p260
(dp261
S'score'
p262
(dp263
g262
I100
sS'raw_score'
p264
I100
sS'display'
p265
S'100/100'
p266
ssssVQtWeb
p267
(dp268
S'summary_display'
p269
g4
sS'total_runs'
p270
L1L
sS'summary_score'
p271
I98
sS'results'
p272
(dp273
S'score'
p274
(dp275
g274
I98
sS'raw_score'
p276
I98
sS'display'
p277
S'98/100'
p278
ssssVSafari
p279
(dp280
S'summary_display'
p281
g4
sS'total_runs'
p282
L1088L
sS'summary_score'
p283
I100
sS'results'
p284
(dp285
S'score'
p286
(dp287
g286
I100
sS'raw_score'
p288
I100
sS'display'
p289
S'100/100'
p290
ssssVGaleon
p291
(dp292
S'summary_display'
p293
g4
sS'total_runs'
p294
L2L
sS'summary_score'
p295
I72
sS'results'
p296
(dp297
S'score'
p298
(dp299
g298
I72
sS'raw_score'
p300
I72
sS'display'
p301
S'72/100'
p302
ssssVNetNewsWire
p303
(dp304
S'summary_display'
p305
g4
sS'total_runs'
p306
L12L
sS'summary_score'
p307
I100
sS'results'
p308
(dp309
S'score'
p310
(dp311
g310
I100
sS'raw_score'
p312
I100
sS'display'
p313
S'100/100'
p314
ssssVMaemo Browser
p315
(dp316
S'summary_display'
p317
g4
sS'total_runs'
p318
L1L
sS'summary_score'
p319
I94
sS'results'
p320
(dp321
S'score'
p322
(dp323
g322
I94
sS'raw_score'
p324
I94
sS'display'
p325
S'94/100'
p326
ssssVPalm Pre
p327
(dp328
S'summary_display'
p329
g4
sS'total_runs'
p330
L5L
sS'summary_score'
p331
I92
sS'results'
p332
(dp333
S'score'
p334
(dp335
g334
I92
sS'raw_score'
p336
I92
sS'display'
p337
S'92/100'
p338
ssssVNetscape
p339
(dp340
S'summary_display'
p341
g4
sS'total_runs'
p342
L5L
sS'summary_score'
p343
I40
sS'results'
p344
(dp345
S'score'
p346
(dp347
g346
I40
sS'raw_score'
p348
I40
sS'display'
p349
S'40/100'
p350
ssssVIceweasel
p351
(dp352
S'summary_display'
p353
g4
sS'total_runs'
p354
L52L
sS'summary_score'
p355
I72
sS'results'
p356
(dp357
S'score'
p358
(dp359
g358
I72
sS'raw_score'
p360
I72
sS'display'
p361
S'72/100'
p362
ssssVKonqueror
p363
(dp364
S'summary_display'
p365
g4
sS'total_runs'
p366
L49L
sS'summary_score'
p367
I89
sS'results'
p368
(dp369
S'score'
p370
(dp371
g370
I89
sS'raw_score'
p372
I89
sS'display'
p373
S'89/100'
p374
ssssVKazehakase
p375
(dp376
S'summary_display'
p377
g4
sS'total_runs'
p378
L1L
sS'summary_score'
p379
I53
sS'results'
p380
(dp381
S'score'
p382
(dp383
g382
I53
sS'raw_score'
p384
I53
sS'display'
p385
S'53/100'
p386
ssssVOpera
p387
(dp388
S'summary_display'
p389
g4
sS'total_runs'
p390
L1383L
sS'summary_score'
p391
I100
sS'results'
p392
(dp393
S'score'
p394
(dp395
g394
I100
sS'raw_score'
p396
I100
sS'display'
p397
S'100/100'
p398
ssssVAvant
p399
(dp400
S'summary_display'
p401
g4
sS'total_runs'
p402
L11L
sS'summary_score'
p403
I12
sS'results'
p404
(dp405
S'score'
p406
(dp407
g406
I12
sS'raw_score'
p408
I12
sS'display'
p409
S'12/100'
p410
ssssViPhone
p411
(dp412
S'summary_display'
p413
g4
sS'total_runs'
p414
L129L
sS'summary_score'
p415
I100
sS'results'
p416
(dp417
S'score'
p418
(dp419
g418
I100
sS'raw_score'
p420
I100
sS'display'
p421
S'100/100'
p422
ssssVMicroB
p423
(dp424
S'summary_display'
p425
g4
sS'total_runs'
p426
L2L
sS'summary_score'
p427
I55
sS'results'
p428
(dp429
S'score'
p430
(dp431
g430
I55
sS'raw_score'
p432
I55
sS'display'
p433
S'55/100'
p434
ssssVEpiphany
p435
(dp436
S'summary_display'
p437
g4
sS'total_runs'
p438
L8L
sS'summary_score'
p439
I72
sS'results'
p440
(dp441
S'score'
p442
(dp443
g442
I72
sS'raw_score'
p444
I72
sS'display'
p445
S'72/100'
p446
ssssVBlackberry
p447
(dp448
S'summary_display'
p449
g4
sS'total_runs'
p450
L1L
sS'summary_score'
p451
I83
sS'results'
p452
(dp453
S'score'
p454
(dp455
g454
I83
sS'raw_score'
p456
I83
sS'display'
p457
S'83/100'
p458
ssssVIron
p459
(dp460
S'summary_display'
p461
g4
sS'total_runs'
p462
L87L
sS'summary_score'
p463
I100
sS'results'
p464
(dp465
S'score'
p466
(dp467
g466
I100
sS'raw_score'
p468
I100
sS'display'
p469
S'100/100'
p470
ssssVShiira
p471
(dp472
S'summary_display'
p473
g4
sS'total_runs'
p474
L1L
sS'summary_score'
p475
I98
sS'results'
p476
(dp477
S'score'
p478
(dp479
g478
I98
sS'raw_score'
p480
I98
sS'display'
p481
S'98/100'
p482
ssssVMidori
p483
(dp484
S'summary_display'
p485
g4
sS'total_runs'
p486
L22L
sS'summary_score'
p487
I100
sS'results'
p488
(dp489
S'score'
p490
(dp491
g490
I100
sS'raw_score'
p492
I100
sS'display'
p493
S'100/100'
p494
ssssVOmniWeb
p495
(dp496
S'summary_display'
p497
g4
sS'total_runs'
p498
L6L
sS'summary_score'
p499
I100
sS'results'
p500
(dp501
S'score'
p502
(dp503
g502
I100
sS'raw_score'
p504
I100
sS'display'
p505
S'100/100'
p506
ssssVIE
p507
(dp508
S'summary_display'
p509
g4
sS'total_runs'
p510
L843L
sS'summary_score'
p511
I20
sS'results'
p512
(dp513
S'score'
p514
(dp515
g514
I20
sS'raw_score'
p516
I20
sS'display'
p517
S'20/100'
p518
ssssVFirefox
p519
(dp520
S'summary_display'
p521
g4
sS'total_runs'
p522
L4902L
sS'summary_score'
p523
I93
sS'results'
p524
(dp525
S'score'
p526
(dp527
g526
I93
sS'raw_score'
p528
I93
sS'display'
p529
S'93/100'
p530
ssssVLunascape
p531
(dp532
S'summary_display'
p533
g4
sS'total_runs'
p534
L27L
sS'summary_score'
p535
I93
sS'results'
p536
(dp537
S'score'
p538
(dp539
g538
I93
sS'raw_score'
p540
I93
sS'display'
p541
S'93/100'
p542
ssssVSwiftfox
p543
(dp544
S'summary_display'
p545
g4
sS'total_runs'
p546
L13L
sS'summary_score'
p547
I93
sS'results'
p548
(dp549
S'score'
p550
(dp551
g550
I93
sS'raw_score'
p552
I93
sS'display'
p553
S'93/100'
p554
ssssVWii
p555
(dp556
S'summary_display'
p557
g4
sS'total_runs'
p558
L1L
sS'summary_score'
p559
I55
sS'results'
p560
(dp561
S'score'
p562
(dp563
g562
I55
sS'raw_score'
p564
I55
sS'display'
p565
S'55/100'
p566
ssssVFennec
p567
(dp568
S'summary_display'
p569
g4
sS'total_runs'
p570
L8L
sS'summary_score'
p571
I94
sS'results'
p572
(dp573
S'score'
p574
(dp575
g574
I94
sS'raw_score'
p576
I94
sS'display'
p577
S'94/100'
p578
ssssVChrome Frame (IE 6)
p579
(dp580
S'summary_display'
p581
g4
sS'total_runs'
p582
L4L
sS'summary_score'
p583
I100
sS'results'
p584
(dp585
S'score'
p586
(dp587
g586
I100
sS'raw_score'
p588
I100
sS'display'
p589
S'100/100'
p590
ssssVFirefox (Namoroka)
p591
(dp592
S'summary_display'
p593
g4
sS'total_runs'
p594
L52L
sS'summary_score'
p595
I94
sS'results'
p596
(dp597
S'score'
p598
(dp599
g598
I94
sS'raw_score'
p600
I94
sS'display'
p601
S'94/100'
p602
ssssVArora
p603
(dp604
S'summary_display'
p605
g4
sS'total_runs'
p606
L34L
sS'summary_score'
p607
I100
sS'results'
p608
(dp609
S'score'
p610
(dp611
g610
I100
sS'raw_score'
p612
I100
sS'display'
p613
S'100/100'
p614
ssssVFlock
p615
(dp616
S'summary_display'
p617
g4
sS'total_runs'
p618
L8L
sS'summary_score'
p619
I71
sS'results'
p620
(dp621
S'score'
p622
(dp623
g622
I71
sS'raw_score'
p624
I71
sS'display'
p625
S'71/100'
p626
ssssViCab
p627
(dp628
S'summary_display'
p629
g4
sS'total_runs'
p630
L2L
sS'summary_score'
p631
I100
sS'results'
p632
(dp633
S'score'
p634
(dp635
g634
I100
sS'raw_score'
p636
I100
sS'display'
p637
S'100/100'
p638
ssssVChrome Frame (IE 8)
p639
(dp640
S'summary_display'
p641
g4
sS'total_runs'
p642
L18L
sS'summary_score'
p643
I100
sS'results'
p644
(dp645
S'score'
p646
(dp647
g646
I100
sS'raw_score'
p648
I100
sS'display'
p649
S'100/100'
p650
ssss.
| |
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import dates
import os
import pickle
from datetime import datetime
from pprint import pprint
import sys
from datetime import timedelta
import calendar
import math
import copy
import mysql.connector
import calendar
#database connection
cnx = mysql.connector.connect(user='root', password='Suresh15', host='localhost', database='black_carbon')
cursor = cnx.cursor()
AD_corr = True
#alter the dates to set limits on data analysis range
start_analysis_at = datetime.strptime('20090501','%Y%m%d')
end_analysis_at = datetime.strptime('20120531','%Y%m%d')
########data dirs
directory_list = [
'D:/2009/WHI_ECSP2/Binary/',
'D:/2010/WHI_ECSP2/Binary/',
'D:/2012/WHI_UBCSP2/Binary/',
]
#traking odd neg intervals (buffering issue?)
argh = 0
ok = 0
err_count = 0
non_err_count = 0
##############initialize binning variables
bins = []
start_size = 70 #VED in nm
end_size = 220 #VED in nm
interval_length = 5 #in nm
#need durations to calc sampled volume later for concs
sampling_duration_BB = 0
sampling_duration_FR = 0
sampling_duration_NPac = 0
sampling_duration_SPac = 0
sampling_duration_Cont = 0
sampling_duration_LRT = 0
sampling_duration_GBPS = 0
sampling_duration_allFT = 0
#create list of size bins
while start_size < end_size:
bins.append(start_size)
start_size += interval_length
#create dictionary with size bins as keys
binned_data = {}
for bin in bins:
binned_data[bin] = [0,0]
###create a binning dictionary for each air mass category
rBC_BB_24h_data = copy.deepcopy(binned_data)
rBC_24h_data = copy.deepcopy(binned_data) #does not include BB data
rBC_FT_data_cluster_NPac = copy.deepcopy(binned_data)
rBC_FT_data_cluster_SPac = copy.deepcopy(binned_data)
rBC_FT_data_cluster_Cont = copy.deepcopy(binned_data)
rBC_FT_data_cluster_LRT = copy.deepcopy(binned_data)
rBC_FT_data_cluster_GBPS = copy.deepcopy(binned_data)
rBC_FT_data_all = copy.deepcopy(binned_data)
######get spike times
os.chdir('C:/Users/Sarah Hanna/Documents/Data/WHI long term record/')
file = open('WHI_rBC_record_2009to2013-spike_times.rbcpckl', 'r')
spike_times = pickle.load(file)
file.close()
#fire times
fire_time1 = [datetime.strptime('2009/07/27 00:00', '%Y/%m/%d %H:%M'), datetime.strptime('2009/08/08 00:00', '%Y/%m/%d %H:%M')] #row_datetimes follwing Takahama et al (2011) doi:10.5194/acp-11-6367-2011 #PST
fire_time2 = [datetime.strptime('2010/07/26 09:00', '%Y/%m/%d %H:%M'), datetime.strptime('2010/07/28 09:30', '%Y/%m/%d %H:%M')] #jason's BC clear report #PST
######this helper method allows cenversion of BC mass from a value arrived at via an old calibration to a value arrived at via a new calibration
#quad eqytn = ax2 + bx + c
def PeakHtFromMass(BC_mass,var_C,var_b,var_a):
C = var_C
b = var_b
a = var_a
c = C - BC_mass
d = b**2-4*a*c
if d < 0:
#This equation has no real solution"
return np.nan
elif d == 0:
# This equation has one solutions
x = (-b+math.sqrt(b**2-4*a*c))/(2*a)
return x
else:
#This equation has two solutions
x1 = (-b+math.sqrt((b**2)-(4*(a*c))))/(2*a)
x2 = (-b-math.sqrt((b**2)-(4*(a*c))))/(2*a)
if x1 <4000:
return x1
if x2 <4000:
return x2
def build_distr(distr,BC_VED,BC_mass,sample_factor):
for key in distr:
key_value = float(key)
interval_end = key_value + interval_length
if key_value <= BC_VED < interval_end:
distr[key][0] = distr[key][0] + (BC_mass*sample_factor)
distr[key][1] = distr[key][1] + (1*sample_factor)
return distr
#get BC data
for directory in directory_list:
os.chdir(directory)
print directory
for item in os.listdir('.'):
if os.path.isdir(item) == True and item.startswith('20'):
folder_date = datetime.strptime(item, '%Y%m%d')
if folder_date >= start_analysis_at and folder_date <= end_analysis_at:
if folder_date.year == 2009:
old_C = 0
old_b = 0.012
old_a = 0
new_C = 0.01244
new_b = 0.0172
if folder_date.year == 2010:
old_C = 0.156
old_b = 0.00606
old_a = 6.3931e-7
new_C = -0.32619
new_b = 0.01081
if folder_date.year == 2012:
old_C = 0.20699
old_b = 0.00246
old_a = -1.09254e-7
new_C = 0.24826
new_b = 0.003043
os.chdir(item)
for file in os.listdir('.'):
if file.endswith('.ptxt'):
print file
f = open(file,'r')
f.readline()
for line in f:
newline = line.split('\t')
start_time = float(newline[0])
end_time = float(newline[1])
incand_flag = float(newline[2])
incand_sat_flag = int(newline[3])
BC_mass = float(newline[4])
BC_mass_old = float(newline[4])
if AD_corr == True:
if folder_date.year == 2009:
pk_ht = BC_mass/old_b
else:
pk_ht = PeakHtFromMass(BC_mass, old_C, old_b, old_a)
BC_mass = new_b*pk_ht + new_C
try:
BC_VED = (((BC_mass/(10**15*1.8))*6/3.14159)**(1/3.0))*10**7 #VED in nm with 10^15fg/g and 10^7nm/cm
except:
#print BC_mass, BC_mass_old, datetime.utcfromtimestamp(end_time), err_count
err_count+=1
continue
non_err_count +=1
#this is to account for me running the first few 2012 days and all of 2009 with the new UTC code (the rest are old PST code)
timezone = timedelta(hours = 0) #using zero here b/c most files were written with old PST code, have a correction further down for those (2009 early 2012) run with newer UTC code
if datetime.strptime('20120401', '%Y%m%d') <= datetime.utcfromtimestamp(start_time) <= datetime.strptime('20120410', '%Y%m%d'):
timezone = timedelta(hours = -8)
if datetime.utcfromtimestamp(start_time) <= datetime.strptime('20091231', '%Y%m%d'):
timezone = timedelta(hours = -8)
start_time_obj = datetime.utcfromtimestamp(start_time)+timezone
end_time_obj = datetime.utcfromtimestamp(end_time)+timezone
#####now have correct UTC times
#sample rate changes
if end_time_obj < datetime(2012,4,4,19,43,4):
sample_factor = 1.0
if datetime(2012,4,4,19,43,4) <= end_time_obj < datetime(2012,4,5,13,47,9):
sample_factor = 3.0
if datetime(2012,4,5,13,47,9) <= end_time_obj < datetime(2012,4,10,3,3,25):
sample_factor = 1.0
if datetime(2012,4,10,3,3,25) <= end_time_obj < datetime(2012,5,16,6,9,13):
sample_factor = 3.0
if datetime(2012,5,16,6,9,13) <= end_time_obj < datetime(2012,6,7,18,14,39):
sample_factor = 10.0
####
#ignore annoying neg intervals
if end_time_obj < start_time_obj:
argh += 1
continue
else:
ok +=1
#ignore spike times
spike_buffer = 5*60
end_timestamp=calendar.timegm(end_time_obj.utctimetuple())
cursor.execute('''(SELECT
spike_start_UTC,
spike_end_UTC
FROM whi_spike_times_2009to2012
WHERE
%s BETWEEN (spike_start_UTC-%s) AND (spike_end_UTC+%s))''',
(end_timestamp,spike_buffer,spike_buffer))
spike_data = cursor.fetchall()
if spike_data != []:
continue
#if in a BB time, put this data in BB dict
if (fire_time1[0] <= end_time_obj <= fire_time1[1]) or (fire_time2[0] <= end_time_obj <= fire_time2[1]):
sampling_duration_BB = sampling_duration_BB + end_time - start_time #need duration to calc sampled volume later for concs
rBC_BB_24h_data = build_distr(rBC_BB_24h_data,BC_VED,BC_mass,sample_factor)
continue #do not go on to put this data into a cluster dictionary or the FR dictionary
####### get cluster number
end_timestamp=calendar.timegm(end_time_obj.utctimetuple())
cursor.execute('''(SELECT
cluster_start_time,
cluster_number
FROM whi_ft_cluster_times_2009to2012
WHERE
%s BETWEEN cluster_start_time AND cluster_end_time
AND
id > %s
AND
cluster_number IS NOT NULL)''',
(end_timestamp,0))
cluster = cursor.fetchall()
if cluster == []:
continue #if we don't have a cluster number it's not in an FT time!!
cluslist_current_cluster_no = cluster[0][0]
cluster_start_time = datetime.utcfromtimestamp(cluster[0][1])
#add data to list in cluster dictionaries (1 list per cluster time early night/late night)
if ((cluster_start_time) <= end_time_obj < (cluster_start_time+timedelta(hours=6))):
#if cluslist_current_cluster_no == 9:
# sampling_duration_GBPS = sampling_duration_GBPS + end_time - start_time #need duration to calc sampled volume later for concs
# sampling_duration_allFT = sampling_duration_allFT + end_time - start_time
#
# rBC_FT_data_cluster_GBPS = build_distr(rBC_FT_data_cluster_GBPS,BC_VED,BC_mass,sample_factor)
# rBC_FT_data_all = build_distr(rBC_FT_data_all,BC_VED,BC_mass,sample_factor)
if cluslist_current_cluster_no == 4:
sampling_duration_Cont = sampling_duration_Cont + end_time - start_time #need duration to calc sampled volume later for concs
sampling_duration_allFT = sampling_duration_allFT + end_time - start_time
rBC_FT_data_cluster_Cont = build_distr(rBC_FT_data_cluster_Cont,BC_VED,BC_mass,sample_factor)
rBC_FT_data_all = build_distr(rBC_FT_data_all,BC_VED,BC_mass,sample_factor)
if cluslist_current_cluster_no in [6,8,9]:
sampling_duration_SPac = sampling_duration_SPac + end_time - start_time #need duration to calc sampled volume later for concs
sampling_duration_allFT = sampling_duration_allFT + end_time - start_time
rBC_FT_data_cluster_SPac = build_distr(rBC_FT_data_cluster_SPac,BC_VED,BC_mass,sample_factor)
rBC_FT_data_all = build_distr(rBC_FT_data_all,BC_VED,BC_mass,sample_factor)
if cluslist_current_cluster_no in [2,7]:
sampling_duration_LRT = sampling_duration_LRT + end_time - start_time #need duration to calc sampled volume later for concs
sampling_duration_allFT = sampling_duration_allFT + end_time - start_time
rBC_FT_data_cluster_LRT = build_distr(rBC_FT_data_cluster_LRT,BC_VED,BC_mass,sample_factor)
rBC_FT_data_all = build_distr(rBC_FT_data_all,BC_VED,BC_mass,sample_factor)
if cluslist_current_cluster_no in [1,3,5,10]:
sampling_duration_NPac = sampling_duration_NPac + end_time - start_time #need duration to calc sampled volume later for concs
sampling_duration_allFT = sampling_duration_allFT + end_time - start_time
rBC_FT_data_cluster_NPac = build_distr(rBC_FT_data_cluster_NPac,BC_VED,BC_mass,sample_factor)
rBC_FT_data_all = build_distr(rBC_FT_data_all,BC_VED,BC_mass,sample_factor)
f.close()
os.chdir(directory)
print 'neg times', argh, ok, argh*100./(argh+ok)
print err_count, non_err_count, err_count*100./(err_count+non_err_count)
average_flow = 120
total_sampled_volume_BB = sampling_duration_BB*average_flow/60 #flow is in cc/min and sampling duration is in sec, so use min/60sec to get vol in cc
total_sampled_volume_FR = sampling_duration_FR*average_flow/60
total_sampled_volume_NPac = sampling_duration_NPac*average_flow/60
total_sampled_volume_SPac = sampling_duration_SPac*average_flow/60
total_sampled_volume_Cont = sampling_duration_Cont*average_flow/60
total_sampled_volume_LRT = sampling_duration_LRT *average_flow/60
total_sampled_volume_GBPS = sampling_duration_GBPS*average_flow/60
total_sampled_volume_allFT = sampling_duration_allFT*average_flow/60
#v=create lists
rBC_BB_24h_data_l = []
rBC_24h_data_l = []
rBC_FT_data_cluster_NPac_l = []
rBC_FT_data_cluster_SPac_l = []
rBC_FT_data_cluster_Cont_l = []
rBC_FT_data_cluster_LRT_l = []
rBC_FT_data_cluster_GBPS_l = []
rBC_FT_data_all_l = []
#put lists etc in array
binned_data_lists = [
[rBC_BB_24h_data ,rBC_BB_24h_data_l , total_sampled_volume_BB ,'BB'],
[rBC_24h_data ,rBC_24h_data_l , total_sampled_volume_FR ,'FR'],
[rBC_FT_data_cluster_NPac ,rBC_FT_data_cluster_NPac_l , total_sampled_volume_NPac,'NPac'],
[rBC_FT_data_cluster_SPac ,rBC_FT_data_cluster_SPac_l , total_sampled_volume_SPac,'SPac'],
[rBC_FT_data_cluster_Cont ,rBC_FT_data_cluster_Cont_l , total_sampled_volume_Cont,'Cont'],
[rBC_FT_data_cluster_LRT ,rBC_FT_data_cluster_LRT_l , total_sampled_volume_LRT ,'LRT'],
[rBC_FT_data_cluster_GBPS ,rBC_FT_data_cluster_GBPS_l , total_sampled_volume_GBPS,'GBPS'],
[rBC_FT_data_all ,rBC_FT_data_all_l , total_sampled_volume_allFT,'all_FT'],
]
#fiddle with data (sort, normalize, etc)
for line in binned_data_lists:
dict = line[0]
list = line[1]
sampled_vol = line[2]
for bin, value in dict.iteritems():
bin_mass = value[0]
bin_numb = value[1]
try:
bin_mass_conc = bin_mass/sampled_vol #gives mass per cc
bin_numb_conc = bin_numb/sampled_vol #gives number per cc
temp = [bin,bin_mass_conc,bin_numb_conc]
except:
temp = [bin,np.nan,np.nan]
list.append(temp)
list.sort()
for row in list: #normalize
row.append(row[1]) #these 2 lines append teh raw mass and number concs
row.append(row[2])
row[1] = row[1]/(math.log(row[0]+interval_length,10)-math.log(row[0],10)) #d/dlog(VED)
row[2] = row[2]/(math.log(row[0]+interval_length,10)-math.log(row[0],10)) #d/dlog(VED)
row[0] = row[0]+interval_length/2 #correction for our binning code recording bin starts as keys instead of midpoints
VED_bin_BB = [row[0] for row in rBC_BB_24h_data_l]
mass_BB = [row[1] for row in rBC_BB_24h_data_l]
numb_BB = [row[2] for row in rBC_BB_24h_data_l]
VED_bin_FR = [row[0] for row in rBC_24h_data_l]
mass_FR = [row[1] for row in rBC_24h_data_l]
numb_FR = [row[2] for row in rBC_24h_data_l]
VED_bin_NPac = [row[0] for row in rBC_FT_data_cluster_NPac_l]
mass_NPac = [row[1] for row in rBC_FT_data_cluster_NPac_l]
numb_NPac = [row[2] for row in rBC_FT_data_cluster_NPac_l]
VED_bin_SPac = [row[0] for row in rBC_FT_data_cluster_SPac_l]
mass_SPac = [row[1] for row in rBC_FT_data_cluster_SPac_l]
numb_SPac = [row[2] for row in rBC_FT_data_cluster_SPac_l]
VED_bin_Cont = [row[0] for row in rBC_FT_data_cluster_Cont_l]
mass_Cont = [row[1] for row in rBC_FT_data_cluster_Cont_l]
numb_Cont = [row[2] for row in rBC_FT_data_cluster_Cont_l]
VED_bin_LRT = [row[0] for row in rBC_FT_data_cluster_LRT_l]
mass_LRT = [row[1] for row in rBC_FT_data_cluster_LRT_l]
numb_LRT = [row[2] for row in rBC_FT_data_cluster_LRT_l]
VED_bin_GBPS = [row[0] for row in rBC_FT_data_cluster_GBPS_l]
mass_GBPS = [row[1] for row in rBC_FT_data_cluster_GBPS_l]
numb_GBPS = [row[2] for row in rBC_FT_data_cluster_GBPS_l]
VED_bin_allFT = [row[0] for row in rBC_FT_data_all_l]
mass_allFT = [row[1] for row in rBC_FT_data_all_l]
numb_allFT = [row[2] for row in rBC_FT_data_all_l]
#write final list of interval data to file and pickle
os.chdir('C:/Users/Sarah Hanna/Documents/Data/WHI long term record/coatings/size_distrs/from ptxt v2/')
for list in binned_data_lists:
file = open('AD_corr - size distr - FT - ' + list[3] + 'base 10.txt', 'w')
file.write('size_bin_midpoint(VEDnm)' + '\t'+ 'dM/dlog(VED)_(ng/cm3)' + '\t'+ 'd#/dlog(VED)_(#/cm3)' + '\t' + 'dM(VED)_(ng/cm3)' + '\t'+ 'd#(VED)_(#/cm3)' + '\n')
file.write('total sampled volume:' + str(list[2]) + 'cc' + '\n')
for row in list[1]:
line = '\t'.join(str(x) for x in row)
file.write(line + '\n')
file.close()
file = open('AD_corr - size distr - FT - ' + list[3] + '.sdbinpickl', 'w')
pickle.dump(list[1], file)
file.close()
#plotting
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.semilogx(VED_bin_BB,mass_BB , label = 'BB')
ax1.semilogx(VED_bin_FR,mass_FR , label = 'FR')
ax1.semilogx(VED_bin_NPac,mass_NPac , label = 'NPac')
ax1.semilogx(VED_bin_SPac,mass_SPac , label = 'SPac')
ax1.semilogx(VED_bin_Cont,mass_Cont , label = 'Cont')
ax1.semilogx(VED_bin_LRT,mass_LRT , label = 'LRT')
ax1.semilogx(VED_bin_GBPS,mass_GBPS , label = 'GBPS')
ax1.semilogx(VED_bin_allFT,mass_allFT, label = 'all FT')
plt.xlabel('VED (nm)')
plt.ylabel('dM/dlog(VED)')
plt.legend()
plt.show()
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.semilogx(VED_bin_BB ,numb_BB , label = 'BB')
ax1.semilogx(VED_bin_FR ,numb_FR , label = 'FR')
ax1.semilogx(VED_bin_NPac,numb_NPac , label = 'NPac')
ax1.semilogx(VED_bin_SPac,numb_SPac , label = 'SPac')
ax1.semilogx(VED_bin_Cont,numb_Cont , label = 'Cont')
ax1.semilogx(VED_bin_LRT ,numb_LRT , label = 'LRT')
ax1.semilogx(VED_bin_GBPS,numb_GBPS , label = 'GBPS')
ax1.semilogx(VED_bin_allFT,numb_allFT , label = 'all FT')
plt.xlabel('VED (nm)')
plt.ylabel('d#/dlog(VED)')
plt.legend()
plt.show()
| |
#! /usr/bin/env pypy
from lib import dd as ddlib
import re
import sys
import json
dict_fossils = {}
dict_species_lastword = {}
dict_english = {}
dict_locations = {}
words = {}
pprev = ""
prev = ""
for l in open(ddlib.BASE_FOLDER + '/dicts/words'):
dict_english[l.rstrip().lower()] = 1
for l in open(ddlib.BASE_FOLDER + '/dicts/paleodb_taxons.tsv'):
try:
(rawname, rank) = l.rstrip().split('\t')
ss = rawname.split(' ')
if len(ss) == 1:
dict_fossils[ss[0].lower()] = rank
elif len(ss) == 2:
if '(' not in rawname:
dict_fossils[rawname.lower()] = rank
dict_fossils[ss[0]] = "genus"
else:
for s in ss:
if '(' in s:
s = s.replace('(', '').replace(')', '')
if 'species' in rank and ' ' not in s: continue
dict_fossils[s.lower()] = rank
elif len(ss) == 3:
if '(' not in rawname:
dict_fossils[name.lower()] = rank
elif '(' in ss[1] and '(' not in ss[2] and '(' not in ss[0]:
dict_fossils[(ss[0]+" "+ss[2]).lower()] = rank
dict_fossils[(ss[1].replace('(', '').replace(')', '')+" "+ss[2]).lower()] = rank
if 'species' in rank and len(ss) > 1 and len(ss[-1]) > 2:
dict_species_lastword[ss[-1].lower()] = 1
except:
continue
RE_CF_ATOZ = re.compile(' cf\. [A-Z]\.')
RE_CF = re.compile(' cf\.')
RE_SIC = re.compile(' ( sic ) ')
RE_AFF1 = re.compile(r'\baff\b')
RE_AFF2 = re.compile(r'\baff\.\b')
RE_INC1 = re.compile(r'\bincertae\b')
RE_INC2 = re.compile(r'\bindet\b')
RE_START_ATOZ = re.compile(r'^[A-Z]')
RE_START_ATOZSTAR_END = re.compile(r'^[A-Z]*$')
ranks = {"subgenus":3,"genus":4,"subtribe":5,"tribe":6,"subfamily":7,"family":8,"group":9,
"superfamily":10,"infraorder":11,"suborder":12,"order":13,"superorder":14,"infraclass":15,
"subclass":16,"class":17,"superclass":18,"subphylum":19,"phylum":20,"superphylum":21,"subkingdom":22,
"kingdom":23,"superkingdom":24}
MAXPHRASELEN = 7
for _row in sys.stdin:
row = json.loads(_row)
docid = row["docid"]
sentid = row["sentid"]
wordidxs = row["wordidxs"]
words = row["words"]
poses = row["poses"]
ners = row["ners"]
lemmas = row["lemmas"]
dep_paths = row["dep_paths"]
dep_parents = row["dep_parents"]
bounding_boxes = row["bounding_boxes"]
history = {}
for start in range(0, len(words)):
for end in reversed(range(start + 1, min(len(words), start + 1 + MAXPHRASELEN))):
if start in history or end in history: continue
phrase = " ".join(words[start:end])
ner = " ".join(ners[start:end])
lemma = " ".join(lemmas[start:end])
if end<len(words):
n_word = words[end]
if n_word.lower().endswith('zone'):
continue
if end<len(words)-1:
n_word = words[end+1]
if n_word.lower().endswith('zone'):
continue
if 'cf.' in phrase or 'sic' in phrase:
phrase2 = re.sub(RE_CF_ATOZ, '', phrase)
phrase = phrase2
phrase2 = re.sub(RE_CF, '', phrase)
phrase = phrase2
phrase2 = re.sub(RE_SIC, '', phrase)
phrase = phrase2
lphrase = phrase.lower()
genus_reso = None
if 'aff' in lphrase or 'incertae' in lphrase or 'indet' in lphrase:
if 'aff' in lphrase:
genus_reso = 'aff'
if 'incertae' in lphrase:
genus_reso = 'incertae'
if 'indet' in lphrase:
genus_reso = 'indet'
if re.search(RE_AFF1, lphrase):
continue
if re.search(RE_AFF2, lphrase):
continue
if re.search(RE_INC1, lphrase):
continue
if re.search(RE_INC2, lphrase):
continue
prerank = None
for prew in range(max(0, start-1), start):
if words[prew].lower() in ranks:
prerank = words[prew].lower()
ss = phrase.split(' ')
lss = len(ss)
isvalid = True
if not prerank and not re.search(RE_START_ATOZ, ss[0]):
isvalid = False
for i in range(1, lss):
if not prerank and re.search(RE_START_ATOZ, ss[i]) and not re.search(RE_START_ATOZSTAR_END, ss[i]):
isvalid = False
inpar = False
for i in range(1, lss):
if '(' in ss[i]:
inpar = True
continue
if ')' in ss[i]:
inpar = False
continue
if inpar == False and re.search(RE_START_ATOZ, ss[i]) and not re.search(RE_START_ATOZSTAR_END, ss[i]):
isvalid = False
if isvalid == False: continue
if inpar == True: continue
if len(phrase) < 5: continue
#if prerank == None and prerank == None and lphrase in dict_locations: continue
eid = "TAXON_DOC_" + docid + "_%s_%d_%d" % (sentid, start, end-1)
prov = [sentid, "%d"%start, "%d"%(end-1), phrase]
if lphrase in dict_fossils:
if prerank != None:
print json.dumps({"docid":docid, "type":prerank, "eid":eid, "entity": lphrase, "prov":prov, "author_year":"", "is_correct":None})
else:
print json.dumps({"docid":docid, "type":dict_fossils[lphrase], "eid":eid, "entity": lphrase, "prov":prov, "author_year":"", "is_correct":None})
elif ' ' not in lphrase and lphrase.endswith('idae') and lphrase not in dict_english:
if prerank != None:
print json.dumps({"docid":docid, "type":prerank, "eid":eid, "entity": lphrase, "prov":prov, "author_year":"", "is_correct":None})
else:
print json.dumps({"docid":docid, "type":"family", "eid":eid, "entity": lphrase, "prov":prov, "author_year":"", "is_correct":None})
else:
ss = phrase.split(' ')
ssl = lphrase.split(' ')
if len(ss) == 2:
if ssl[0] in dict_fossils and 'genus' in dict_fossils[ssl[0]]:
if ssl[1] in dict_species_lastword and ssl[1] not in dict_english:
print json.dumps({"docid":docid, "type":"species", "eid":eid, "entity": lphrase, "prov":prov, "author_year":"", "is_correct":None})
cleanup = []
inpars = []
inpar = False
for s in ssl:
if '(' in s:
inpar = True
continue
if ')' in s:
inpar = False
continue
if inpar == False:
cleanup.append(s)
else:
inpars.append(s)
if len(inpars) == 1 and inpars[0] in dict_fossils and 'genus' in dict_fossils[inpars[0]]:
if nc == 1:
if len(cleanup) == 1 and ss[0] != '(':
if cleanup[0] in dict_fossils and 'genus' in dict_fossils[cleanup[0]]:
print json.dumps({"docid":docid, "type":"subgenus", "eid":eid, "entity": " ".join(ssl), "prov":prov, "author_year":"", "is_correct":None})
if nc == 1 and len(cleanup) == 2:
if cleanup[0] in dict_fossils and 'genus' in dict_fossils[cleanup[0]]:
if cleanup[1] in dict_species_lastword and cleanup[1] not in dict_english:
print json.dumps({"docid":docid, "type":"species", "eid":eid, "entity": " ".join(ssl), "prov":prov, "author_year":"", "is_correct":None})
eid2 = "TAXON_DOC_" + docid + "_%s_%d_%d" % (sentid, start, start)
prov2 = [sentid, "%d"%start, "%d"%start, cleanup[0]]
print json.dumps({"docid":docid, "type":"genus", "eid":eid2, "entity": cleanup[0], "prov":prov2, "author_year":"", "is_correct":None})
| |
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import serialize
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class DefaultsList(ListResource):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact help@twilio.com. """
def __init__(self, version, assistant_sid):
"""
Initialize the DefaultsList
:param Version version: Version that contains the resource
:param assistant_sid: The SID of the Assistant that is the parent of the resource
:returns: twilio.rest.autopilot.v1.assistant.defaults.DefaultsList
:rtype: twilio.rest.autopilot.v1.assistant.defaults.DefaultsList
"""
super(DefaultsList, self).__init__(version)
# Path Solution
self._solution = {'assistant_sid': assistant_sid, }
def get(self):
"""
Constructs a DefaultsContext
:returns: twilio.rest.autopilot.v1.assistant.defaults.DefaultsContext
:rtype: twilio.rest.autopilot.v1.assistant.defaults.DefaultsContext
"""
return DefaultsContext(self._version, assistant_sid=self._solution['assistant_sid'], )
def __call__(self):
"""
Constructs a DefaultsContext
:returns: twilio.rest.autopilot.v1.assistant.defaults.DefaultsContext
:rtype: twilio.rest.autopilot.v1.assistant.defaults.DefaultsContext
"""
return DefaultsContext(self._version, assistant_sid=self._solution['assistant_sid'], )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Autopilot.V1.DefaultsList>'
class DefaultsPage(Page):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact help@twilio.com. """
def __init__(self, version, response, solution):
"""
Initialize the DefaultsPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:param assistant_sid: The SID of the Assistant that is the parent of the resource
:returns: twilio.rest.autopilot.v1.assistant.defaults.DefaultsPage
:rtype: twilio.rest.autopilot.v1.assistant.defaults.DefaultsPage
"""
super(DefaultsPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of DefaultsInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.autopilot.v1.assistant.defaults.DefaultsInstance
:rtype: twilio.rest.autopilot.v1.assistant.defaults.DefaultsInstance
"""
return DefaultsInstance(self._version, payload, assistant_sid=self._solution['assistant_sid'], )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Autopilot.V1.DefaultsPage>'
class DefaultsContext(InstanceContext):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact help@twilio.com. """
def __init__(self, version, assistant_sid):
"""
Initialize the DefaultsContext
:param Version version: Version that contains the resource
:param assistant_sid: The SID of the Assistant that is the parent of the resource to fetch
:returns: twilio.rest.autopilot.v1.assistant.defaults.DefaultsContext
:rtype: twilio.rest.autopilot.v1.assistant.defaults.DefaultsContext
"""
super(DefaultsContext, self).__init__(version)
# Path Solution
self._solution = {'assistant_sid': assistant_sid, }
self._uri = '/Assistants/{assistant_sid}/Defaults'.format(**self._solution)
def fetch(self):
"""
Fetch the DefaultsInstance
:returns: The fetched DefaultsInstance
:rtype: twilio.rest.autopilot.v1.assistant.defaults.DefaultsInstance
"""
payload = self._version.fetch(method='GET', uri=self._uri, )
return DefaultsInstance(self._version, payload, assistant_sid=self._solution['assistant_sid'], )
def update(self, defaults=values.unset):
"""
Update the DefaultsInstance
:param dict defaults: A JSON string that describes the default task links.
:returns: The updated DefaultsInstance
:rtype: twilio.rest.autopilot.v1.assistant.defaults.DefaultsInstance
"""
data = values.of({'Defaults': serialize.object(defaults), })
payload = self._version.update(method='POST', uri=self._uri, data=data, )
return DefaultsInstance(self._version, payload, assistant_sid=self._solution['assistant_sid'], )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Autopilot.V1.DefaultsContext {}>'.format(context)
class DefaultsInstance(InstanceResource):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact help@twilio.com. """
def __init__(self, version, payload, assistant_sid):
"""
Initialize the DefaultsInstance
:returns: twilio.rest.autopilot.v1.assistant.defaults.DefaultsInstance
:rtype: twilio.rest.autopilot.v1.assistant.defaults.DefaultsInstance
"""
super(DefaultsInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'account_sid': payload.get('account_sid'),
'assistant_sid': payload.get('assistant_sid'),
'url': payload.get('url'),
'data': payload.get('data'),
}
# Context
self._context = None
self._solution = {'assistant_sid': assistant_sid, }
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: DefaultsContext for this DefaultsInstance
:rtype: twilio.rest.autopilot.v1.assistant.defaults.DefaultsContext
"""
if self._context is None:
self._context = DefaultsContext(self._version, assistant_sid=self._solution['assistant_sid'], )
return self._context
@property
def account_sid(self):
"""
:returns: The SID of the Account that created the resource
:rtype: unicode
"""
return self._properties['account_sid']
@property
def assistant_sid(self):
"""
:returns: The SID of the Assistant that is the parent of the resource
:rtype: unicode
"""
return self._properties['assistant_sid']
@property
def url(self):
"""
:returns: The absolute URL of the Defaults resource
:rtype: unicode
"""
return self._properties['url']
@property
def data(self):
"""
:returns: The JSON string that describes the default task links
:rtype: dict
"""
return self._properties['data']
def fetch(self):
"""
Fetch the DefaultsInstance
:returns: The fetched DefaultsInstance
:rtype: twilio.rest.autopilot.v1.assistant.defaults.DefaultsInstance
"""
return self._proxy.fetch()
def update(self, defaults=values.unset):
"""
Update the DefaultsInstance
:param dict defaults: A JSON string that describes the default task links.
:returns: The updated DefaultsInstance
:rtype: twilio.rest.autopilot.v1.assistant.defaults.DefaultsInstance
"""
return self._proxy.update(defaults=defaults, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Autopilot.V1.DefaultsInstance {}>'.format(context)
| |
import numpy as np
import scipy.stats as ss
import scipy.special as sp
from .family import Family
from .flat import Flat
from .normal import Normal
from .gas_recursions import gas_recursion_exponential_orderone, gas_recursion_exponential_ordertwo
from .gas_recursions import gasx_recursion_exponential_orderone, gasx_recursion_exponential_ordertwo
from .gas_recursions import gas_llev_recursion_exponential_orderone, gas_llev_recursion_exponential_ordertwo
from .gas_recursions import gas_llt_recursion_exponential_orderone, gas_llt_recursion_exponential_ordertwo
from .gas_recursions import gas_reg_recursion_exponential_orderone, gas_reg_recursion_exponential_ordertwo
class Exponential(Family):
"""
Exponential Distribution
----
This class contains methods relating to the Exponential distribution for time series.
"""
def __init__(self, lmd=1.0, transform=None, **kwargs):
"""
Parameters
----------
lambda : float
Rate parameter for the Exponential distribution
transform : str
Whether to apply a transformation to the location variable - e.g. 'exp' or 'logit'
"""
super(Exponential, self).__init__(transform)
self.lmd0 = lmd
self.covariance_prior = False
self.gradient_only = kwargs.get('gradient_only', False) # used for GAS Exponential models
if self.gradient_only is True:
self.score_function = self.first_order_score
else:
self.score_function = self.second_order_score
def approximating_model(self, beta, T, Z, R, Q, h_approx, data):
""" Creates approximating Gaussian state space model for Exponential measurement density
Parameters
----------
beta : np.array
Contains untransformed starting values for latent variables
T, Z, R, Q : np.array
State space matrices used in KFS algorithm
h_approx : float
The variance of the measurement density
data: np.array
The univariate time series data
Returns
----------
H : np.array
Approximating measurement variance matrix
mu : np.array
Approximating measurement constants
"""
H = np.ones(data.shape[0])*h_approx
mu = np.zeros(data.shape[0])
return H, mu
def approximating_model_reg(self, beta, T, Z, R, Q, h_approx, data, X, state_no):
""" Creates approximating Gaussian state space model for Exponential measurement density
Parameters
----------
beta : np.array
Contains untransformed starting values for latent variables
T, Z, R, Q : np.array
State space matrices used in KFS algorithm
h_approx : float
The variance of the measurement density
data: np.array
The univariate time series data
X: np.array
The regressors
state_no : int
Number of states
Returns
----------
H : np.array
Approximating measurement variance matrix
mu : np.array
Approximating measurement constants
"""
H = np.ones(data.shape[0])*h_approx
mu = np.zeros(data.shape[0])
return H, mu
@staticmethod
def build_latent_variables():
""" Builds additional latent variables for this family
Returns
----------
- A list of lists (each sub-list contains latent variable information)
"""
lvs_to_build = []
return lvs_to_build
@staticmethod
def draw_variable(loc, scale, shape, skewness, nsims):
""" Draws random variables from Exponential distribution
Parameters
----------
loc : float
location parameter for the distribution
scale : float
scale parameter for the distribution
shape : float
tail thickness parameter for the distribution
skewness : float
skewness parameter for the distribution
nsims : int or list
number of draws to take from the distribution
Returns
----------
- Random draws from the distribution
"""
return np.random.exponential(1.0/loc, nsims)
@staticmethod
def first_order_score(y, mean, scale, shape, skewness):
""" GAS Exponential Update term using gradient only - native Python function
Parameters
----------
y : float
datapoint for the time series
mean : float
location parameter for the Exponential distribution
scale : float
scale parameter for the Exponential distribution
shape : float
tail thickness parameter for the Exponential distribution
skewness : float
skewness parameter for the Exponential distribution
Returns
----------
- Score of the Exponential family
"""
return 1 - (mean*y)
def logpdf(self, mu):
"""
Log PDF for Exponential prior
Parameters
----------
mu : float
Latent variable for which the prior is being formed over
Returns
----------
- log(p(mu))
"""
if self.transform is not None:
mu = self.transform(mu)
return ss.expon.logpdf(mu, self.lmd0)
@staticmethod
def markov_blanket(y, mean, scale, shape, skewness):
""" Markov blanket for the Exponential distribution
Parameters
----------
y : np.ndarray
univariate time series
mean : np.ndarray
array of location parameters for the Exponential distribution
scale : float
scale parameter for the Exponential distribution
shape : float
tail thickness parameter for the Exponential distribution
skewness : float
skewness parameter for the Exponential distribution
Returns
----------
- Markov blanket of the Exponential family
"""
return ss.expon.logpdf(x=y, scale=1/mean)
@staticmethod
def exponential_link(x):
return 1.0/np.exp(x)
@staticmethod
def setup():
""" Returns the attributes of this family
Notes
----------
- scale notes whether family has a variance parameter (sigma)
- shape notes whether family has a tail thickness parameter (nu)
- skewness notes whether family has a skewness parameter (gamma)
- mean_transform is a function which transforms the location parameter
- cythonized notes whether the family has cythonized routines
Returns
----------
- model name, link function, scale, shape, skewness, mean_transform, cythonized
"""
name = "Exponential GAS"
link = Exponential.exponential_link
scale = False
shape = False
skewness = False
mean_transform = np.log
cythonized = True
return name, link, scale, shape, skewness, mean_transform, cythonized
@staticmethod
def neg_loglikelihood(y, mean, scale, shape, skewness):
""" Negative loglikelihood function
Parameters
----------
y : np.ndarray
univariate time series
mean : np.ndarray
array of location parameters for the Exponential distribution
scale : float
scale parameter for the Exponential distribution
shape : float
tail thickness parameter for the Exponential distribution
skewness : float
skewness parameter for the Exponential distribution
Returns
----------
- Negative loglikelihood of the Exponential family
"""
return -np.sum(ss.expon.logpdf(x=y, scale=1/mean))
def pdf(self, mu):
"""
PDF for Exponential prior
Parameters
----------
mu : float
Latent variable for which the prior is being formed over
Returns
----------
- p(mu)
"""
if self.transform is not None:
mu = self.transform(mu)
return ss.expon.pdf(mu, self.lmd0)
@staticmethod
def reg_score_function(X, y, mean, scale, shape, skewness):
""" GAS Exponential Regression Update term using gradient only - native Python function
Parameters
----------
X : float
datapoint for the right hand side variable
y : float
datapoint for the time series
mean : float
location parameter for the Exponential distribution
scale : float
scale parameter for the Exponential distribution
shape : float
tail thickness parameter for the Exponential distribution
skewness : float
skewness parameter for the Exponential distribution
Returns
----------
- Score of the Exponential family
"""
return X*(1.0 - mean*y)
@staticmethod
def second_order_score(y, mean, scale, shape, skewness):
""" GAS Exponential Update term potentially using second-order information - native Python function
Parameters
----------
y : float
datapoint for the time series
mean : float
location parameter for the Exponential distribution
scale : float
scale parameter for the Exponential distribution
shape : float
tail thickness parameter for the Exponential distribution
skewness : float
skewness parameter for the Exponential distribution
Returns
----------
- Adjusted score of the Exponential family
"""
return 1 - (mean*y)
# Optional Cythonized recursions below for GAS Exponential models
@staticmethod
def gradient_recursion():
""" GAS Exponential Model Recursion - gradient only
Returns
----------
- Recursion function for GAS Exponential model - gradient only
"""
return gas_recursion_exponential_orderone
@staticmethod
def newton_recursion():
""" GAS Exponential Model Recursion - adjusted score
Returns
----------
- Recursion function for GAS Exponential model - adjusted score
"""
return gas_recursion_exponential_ordertwo
@staticmethod
def gradientx_recursion():
""" GASX Exponential Model Recursion - gradient only
Returns
----------
- Recursion function for GASX Exponential model - gradient only
"""
return gasx_recursion_exponential_orderone
@staticmethod
def newtonx_recursion():
""" GASX Exponential Model Recursion - adjusted score
Returns
----------
- Recursion function for GASX Exponential model - adjusted score
"""
return gasx_recursion_exponential_ordertwo
@staticmethod
def gradientllev_recursion():
""" GAS Local Level Exponential Model Recursion - gradient only
Returns
----------
- Recursion function for GAS Local Level Exponential model - gradient only
"""
return gas_llev_recursion_exponential_orderone
@staticmethod
def newtonllev_recursion():
""" GAS Local Level Exponential Model Recursion - adjusted score
Returns
----------
- Recursion function for GAS Local Level Exponential model - adjusted score
"""
return gas_llev_recursion_exponential_ordertwo
@staticmethod
def gradientllt_recursion():
""" GAS Local Linear Trend Exponential Model Recursion - gradient only
Returns
----------
- Recursion function for GAS Local Linear Trend Exponential model - gradient only
"""
return gas_llt_recursion_exponential_orderone
@staticmethod
def newtonllt_recursion():
""" GAS Local Linear Trend Exponential Model Recursion - adjusted score
Returns
----------
- Recursion function for GAS Local Linear Trend Exponential model - adjusted score
"""
return gas_llt_recursion_exponential_ordertwo
@staticmethod
def gradientreg_recursion():
""" GAS Dynamic Regression Exponential Model Recursion - gradient only
Returns
----------
- Recursion function for GAS Dynamic Regression Exponential model - gradient only
"""
return gas_reg_recursion_exponential_orderone
@staticmethod
def newtonreg_recursion():
""" GAS Dynamic Regression Exponential Model Recursion - adjusted score
Returns
----------
- Recursion function for GAS Dynamic Regression Exponential model - adjusted score
"""
return gas_reg_recursion_exponential_ordertwo
| |
# Todd Miller jmiller@stsci.edu
from __future__ import division, print_function
import os, sys, math
import os.path
import Tkinter as Tk, FileDialog
# Paint image to Tk photo blitter extension
import matplotlib.backends.tkagg as tkagg
from matplotlib.backends.backend_agg import FigureCanvasAgg
import matplotlib.backends.windowing as windowing
import matplotlib
from matplotlib.cbook import is_string_like
from matplotlib.backend_bases import RendererBase, GraphicsContextBase
from matplotlib.backend_bases import FigureManagerBase, FigureCanvasBase
from matplotlib.backend_bases import NavigationToolbar2, cursors, TimerBase
from matplotlib.backend_bases import ShowBase
from matplotlib._pylab_helpers import Gcf
from matplotlib.figure import Figure
from matplotlib.widgets import SubplotTool
import matplotlib.cbook as cbook
rcParams = matplotlib.rcParams
verbose = matplotlib.verbose
backend_version = Tk.TkVersion
# the true dots per inch on the screen; should be display dependent
# see http://groups.google.com/groups?q=screen+dpi+x11&hl=en&lr=&ie=UTF-8&oe=UTF-8&safe=off&selm=7077.26e81ad5%40swift.cs.tcd.ie&rnum=5 for some info about screen dpi
PIXELS_PER_INCH = 75
cursord = {
cursors.MOVE: "fleur",
cursors.HAND: "hand2",
cursors.POINTER: "arrow",
cursors.SELECT_REGION: "tcross",
}
def round(x):
return int(math.floor(x+0.5))
def raise_msg_to_str(msg):
"""msg is a return arg from a raise. Join with new lines"""
if not is_string_like(msg):
msg = '\n'.join(map(str, msg))
return msg
def error_msg_tkpaint(msg, parent=None):
import tkMessageBox
tkMessageBox.showerror("matplotlib", msg)
def draw_if_interactive():
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager is not None:
figManager.show()
class Show(ShowBase):
def mainloop(self):
Tk.mainloop()
show = Show()
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
FigureClass = kwargs.pop('FigureClass', Figure)
figure = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, figure)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
_focus = windowing.FocusManager()
window = Tk.Tk()
window.withdraw()
if Tk.TkVersion >= 8.5:
# put a mpl icon on the window rather than the default tk icon. Tkinter
# doesn't allow colour icons on linux systems, but tk >=8.5 has a iconphoto
# command which we call directly. Source:
# http://mail.python.org/pipermail/tkinter-discuss/2006-November/000954.html
icon_fname = os.path.join(rcParams['datapath'], 'images', 'matplotlib.gif')
icon_img = Tk.PhotoImage(file=icon_fname)
try:
window.tk.call('wm', 'iconphoto', window._w, icon_img)
except (SystemExit, KeyboardInterrupt):
# re-raise exit type Exceptions
raise
except:
# log the failure, but carry on
verbose.report('Could not load matplotlib icon: %s' % sys.exc_info()[1])
canvas = FigureCanvasTkAgg(figure, master=window)
figManager = FigureManagerTkAgg(canvas, num, window)
if matplotlib.is_interactive():
figManager.show()
return figManager
class TimerTk(TimerBase):
'''
Subclass of :class:`backend_bases.TimerBase` that uses Tk's timer events.
Attributes:
* interval: The time between timer events in milliseconds. Default
is 1000 ms.
* single_shot: Boolean flag indicating whether this timer should
operate as single shot (run once and then stop). Defaults to False.
* callbacks: Stores list of (func, args) tuples that will be called
upon timer events. This list can be manipulated directly, or the
functions add_callback and remove_callback can be used.
'''
def __init__(self, parent, *args, **kwargs):
TimerBase.__init__(self, *args, **kwargs)
self.parent = parent
self._timer = None
def _timer_start(self):
self._timer_stop()
self._timer = self.parent.after(self._interval, self._on_timer)
def _timer_stop(self):
if self._timer is not None:
self.parent.after_cancel(self._timer)
self._timer = None
def _on_timer(self):
TimerBase._on_timer(self)
# Tk after() is only a single shot, so we need to add code here to
# reset the timer if we're not operating in single shot mode.
if not self._single and len(self.callbacks) > 0:
self._timer = self.parent.after(self._interval, self._on_timer)
else:
self._timer = None
class FigureCanvasTkAgg(FigureCanvasAgg):
keyvald = {65507 : 'control',
65505 : 'shift',
65513 : 'alt',
65515 : 'super',
65508 : 'control',
65506 : 'shift',
65514 : 'alt',
65361 : 'left',
65362 : 'up',
65363 : 'right',
65364 : 'down',
65307 : 'escape',
65470 : 'f1',
65471 : 'f2',
65472 : 'f3',
65473 : 'f4',
65474 : 'f5',
65475 : 'f6',
65476 : 'f7',
65477 : 'f8',
65478 : 'f9',
65479 : 'f10',
65480 : 'f11',
65481 : 'f12',
65300 : 'scroll_lock',
65299 : 'break',
65288 : 'backspace',
65293 : 'enter',
65379 : 'insert',
65535 : 'delete',
65360 : 'home',
65367 : 'end',
65365 : 'pageup',
65366 : 'pagedown',
65438 : '0',
65436 : '1',
65433 : '2',
65435 : '3',
65430 : '4',
65437 : '5',
65432 : '6',
65429 : '7',
65431 : '8',
65434 : '9',
65451 : '+',
65453 : '-',
65450 : '*',
65455 : '/',
65439 : 'dec',
65421 : 'enter',
}
_keycode_lookup = {
262145: 'control',
524320: 'alt',
524352: 'alt',
1048584: 'super',
1048592: 'super',
131074: 'shift',
131076: 'shift',
}
"""_keycode_lookup is used for badly mapped (i.e. no event.key_sym set)
keys on apple keyboards."""
def __init__(self, figure, master=None, resize_callback=None):
FigureCanvasAgg.__init__(self, figure)
self._idle = True
self._idle_callback = None
t1,t2,w,h = self.figure.bbox.bounds
w, h = int(w), int(h)
self._tkcanvas = Tk.Canvas(
master=master, width=w, height=h, borderwidth=4)
self._tkphoto = Tk.PhotoImage(
master=self._tkcanvas, width=w, height=h)
self._tkcanvas.create_image(w//2, h//2, image=self._tkphoto)
self._resize_callback = resize_callback
self._tkcanvas.bind("<Configure>", self.resize)
self._tkcanvas.bind("<Key>", self.key_press)
self._tkcanvas.bind("<Motion>", self.motion_notify_event)
self._tkcanvas.bind("<KeyRelease>", self.key_release)
for name in "<Button-1>", "<Button-2>", "<Button-3>":
self._tkcanvas.bind(name, self.button_press_event)
for name in "<Double-Button-1>", "<Double-Button-2>", "<Double-Button-3>":
self._tkcanvas.bind(name, self.button_dblclick_event)
for name in "<ButtonRelease-1>", "<ButtonRelease-2>", "<ButtonRelease-3>":
self._tkcanvas.bind(name, self.button_release_event)
# Mouse wheel on Linux generates button 4/5 events
for name in "<Button-4>", "<Button-5>":
self._tkcanvas.bind(name, self.scroll_event)
# Mouse wheel for windows goes to the window with the focus.
# Since the canvas won't usually have the focus, bind the
# event to the window containing the canvas instead.
# See http://wiki.tcl.tk/3893 (mousewheel) for details
root = self._tkcanvas.winfo_toplevel()
root.bind("<MouseWheel>", self.scroll_event_windows)
# Can't get destroy events by binding to _tkcanvas. Therefore, bind
# to the window and filter.
def filter_destroy(evt):
if evt.widget is self._tkcanvas:
self.close_event()
root.bind("<Destroy>", filter_destroy)
self._master = master
self._tkcanvas.focus_set()
def resize(self, event):
width, height = event.width, event.height
if self._resize_callback is not None:
self._resize_callback(event)
# compute desired figure size in inches
dpival = self.figure.dpi
winch = width/dpival
hinch = height/dpival
self.figure.set_size_inches(winch, hinch)
self._tkcanvas.delete(self._tkphoto)
self._tkphoto = Tk.PhotoImage(
master=self._tkcanvas, width=int(width), height=int(height))
self._tkcanvas.create_image(int(width/2),int(height/2),image=self._tkphoto)
self.resize_event()
self.show()
# a resizing will in general move the pointer position
# relative to the canvas, so process it as a motion notify
# event. An intended side effect of this call is to allow
# window raises (which trigger a resize) to get the cursor
# position to the mpl event framework so key presses which are
# over the axes will work w/o clicks or explicit motion
self._update_pointer_position(event)
def _update_pointer_position(self, guiEvent=None):
"""
Figure out if we are inside the canvas or not and update the
canvas enter/leave events
"""
# if the pointer if over the canvas, set the lastx and lasty
# attrs of the canvas so it can process event w/o mouse click
# or move
# the window's upper, left coords in screen coords
xw = self._tkcanvas.winfo_rootx()
yw = self._tkcanvas.winfo_rooty()
# the pointer's location in screen coords
xp, yp = self._tkcanvas.winfo_pointerxy()
# not figure out the canvas coordinates of the pointer
xc = xp - xw
yc = yp - yw
# flip top/bottom
yc = self.figure.bbox.height - yc
# JDH: this method was written originally to get the pointer
# location to the backend lastx and lasty attrs so that events
# like KeyEvent can be handled without mouse events. Eg, if
# the cursor is already above the axes, then key presses like
# 'g' should toggle the grid. In order for this to work in
# backend_bases, the canvas needs to know _lastx and _lasty.
# There are three ways to get this info the canvas:
#
# 1) set it explicity
#
# 2) call enter/leave events explicity. The downside of this
# in the impl below is that enter could be repeatedly
# triggered if thes mouse is over the axes and one is
# resizing with the keyboard. This is not entirely bad,
# because the mouse position relative to the canvas is
# changing, but it may be surprising to get repeated entries
# without leaves
#
# 3) process it as a motion notify event. This also has pros
# and cons. The mouse is moving relative to the window, but
# this may surpise an event handler writer who is getting
# motion_notify_events even if the mouse has not moved
# here are the three scenarios
if 1:
# just manually set it
self._lastx, self._lasty = xc, yc
elif 0:
# alternate implementation: process it as a motion
FigureCanvasBase.motion_notify_event(self, xc, yc, guiEvent)
elif 0:
# alternate implementation -- process enter/leave events
# instead of motion/notify
if self.figure.bbox.contains(xc, yc):
self.enter_notify_event(guiEvent, xy=(xc,yc))
else:
self.leave_notify_event(guiEvent)
def draw(self):
FigureCanvasAgg.draw(self)
tkagg.blit(self._tkphoto, self.renderer._renderer, colormode=2)
self._master.update_idletasks()
def blit(self, bbox=None):
tkagg.blit(self._tkphoto, self.renderer._renderer, bbox=bbox, colormode=2)
self._master.update_idletasks()
show = draw
def draw_idle(self):
'update drawing area only if idle'
d = self._idle
self._idle = False
def idle_draw(*args):
self.draw()
self._idle = True
if d:
self._idle_callback = self._tkcanvas.after_idle(idle_draw)
def get_tk_widget(self):
"""returns the Tk widget used to implement FigureCanvasTkAgg.
Although the initial implementation uses a Tk canvas, this routine
is intended to hide that fact.
"""
return self._tkcanvas
def motion_notify_event(self, event):
x = event.x
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.y
FigureCanvasBase.motion_notify_event(self, x, y, guiEvent=event)
def button_press_event(self, event, dblclick=False):
x = event.x
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.y
num = getattr(event, 'num', None)
if sys.platform=='darwin':
# 2 and 3 were reversed on the OSX platform I
# tested under tkagg
if num==2: num=3
elif num==3: num=2
FigureCanvasBase.button_press_event(self, x, y, num, dblclick=dblclick, guiEvent=event)
def button_dblclick_event(self,event):
self.button_press_event(event,dblclick=True)
def button_release_event(self, event):
x = event.x
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.y
num = getattr(event, 'num', None)
if sys.platform=='darwin':
# 2 and 3 were reversed on the OSX platform I
# tested under tkagg
if num==2: num=3
elif num==3: num=2
FigureCanvasBase.button_release_event(self, x, y, num, guiEvent=event)
def scroll_event(self, event):
x = event.x
y = self.figure.bbox.height - event.y
num = getattr(event, 'num', None)
if num==4: step = +1
elif num==5: step = -1
else: step = 0
FigureCanvasBase.scroll_event(self, x, y, step, guiEvent=event)
def scroll_event_windows(self, event):
"""MouseWheel event processor"""
# need to find the window that contains the mouse
w = event.widget.winfo_containing(event.x_root, event.y_root)
if w == self._tkcanvas:
x = event.x_root - w.winfo_rootx()
y = event.y_root - w.winfo_rooty()
y = self.figure.bbox.height - y
step = event.delta/120.
FigureCanvasBase.scroll_event(self, x, y, step, guiEvent=event)
def _get_key(self, event):
val = event.keysym_num
if val in self.keyvald:
key = self.keyvald[val]
elif val == 0 and sys.platform == 'darwin' and \
event.keycode in self._keycode_lookup:
key = self._keycode_lookup[event.keycode]
elif val < 256:
key = chr(val)
else:
key = None
# add modifier keys to the key string. Bit details originate from
# http://effbot.org/tkinterbook/tkinter-events-and-bindings.htm
# BIT_SHIFT = 0x001; BIT_CAPSLOCK = 0x002; BIT_CONTROL = 0x004;
# BIT_LEFT_ALT = 0x008; BIT_NUMLOCK = 0x010; BIT_RIGHT_ALT = 0x080;
# BIT_MB_1 = 0x100; BIT_MB_2 = 0x200; BIT_MB_3 = 0x400;
# In general, the modifier key is excluded from the modifier flag,
# however this is not the case on "darwin", so double check that
# we aren't adding repeat modifier flags to a modifier key.
modifiers = [(6, 'super', 'super'),
(3, 'alt', 'alt'),
(2, 'ctrl', 'control'),
]
if sys.platform == 'darwin':
modifiers = [(3, 'super', 'super'),
(4, 'alt', 'alt'),
(2, 'ctrl', 'control'),
]
if key is not None:
# note, shift is not added to the keys as this is already accounted for
for bitmask, prefix, key_name in modifiers:
if event.state & (1 << bitmask) and key_name not in key:
key = '{0}+{1}'.format(prefix, key)
return key
def key_press(self, event):
key = self._get_key(event)
FigureCanvasBase.key_press_event(self, key, guiEvent=event)
def key_release(self, event):
key = self._get_key(event)
FigureCanvasBase.key_release_event(self, key, guiEvent=event)
def new_timer(self, *args, **kwargs):
"""
Creates a new backend-specific subclass of :class:`backend_bases.Timer`.
This is useful for getting periodic events through the backend's native
event loop. Implemented only for backends with GUIs.
optional arguments:
*interval*
Timer interval in milliseconds
*callbacks*
Sequence of (func, args, kwargs) where func(*args, **kwargs) will
be executed by the timer every *interval*.
"""
return TimerTk(self._tkcanvas, *args, **kwargs)
def flush_events(self):
self._master.update()
def start_event_loop(self,timeout):
FigureCanvasBase.start_event_loop_default(self,timeout)
start_event_loop.__doc__=FigureCanvasBase.start_event_loop_default.__doc__
def stop_event_loop(self):
FigureCanvasBase.stop_event_loop_default(self)
stop_event_loop.__doc__=FigureCanvasBase.stop_event_loop_default.__doc__
class FigureManagerTkAgg(FigureManagerBase):
"""
Public attributes
canvas : The FigureCanvas instance
num : The Figure number
toolbar : The tk.Toolbar
window : The tk.Window
"""
def __init__(self, canvas, num, window):
FigureManagerBase.__init__(self, canvas, num)
self.window = window
self.window.withdraw()
self.set_window_title("Figure %d" % num)
self.canvas = canvas
self._num = num
_, _, w, h = canvas.figure.bbox.bounds
w, h = int(w), int(h)
self.window.minsize(int(w*3/4),int(h*3/4))
if matplotlib.rcParams['toolbar']=='classic':
self.toolbar = NavigationToolbar( canvas, self.window )
elif matplotlib.rcParams['toolbar']=='toolbar2':
self.toolbar = NavigationToolbar2TkAgg( canvas, self.window )
else:
self.toolbar = None
if self.toolbar is not None:
self.toolbar.update()
self.canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
self._shown = False
def notify_axes_change(fig):
'this will be called whenever the current axes is changed'
if self.toolbar != None: self.toolbar.update()
self.canvas.figure.add_axobserver(notify_axes_change)
def resize(self, width, height=None):
# before 09-12-22, the resize method takes a single *event*
# parameter. On the other hand, the resize method of other
# FigureManager class takes *width* and *height* parameter,
# which is used to change the size of the window. For the
# Figure.set_size_inches with forward=True work with Tk
# backend, I changed the function signature but tried to keep
# it backward compatible. -JJL
# when a single parameter is given, consider it as a event
if height is None:
width = width.width
else:
self.canvas._tkcanvas.master.geometry("%dx%d" % (width, height))
self.toolbar.configure(width=width)
def show(self):
"""
this function doesn't segfault but causes the
PyEval_RestoreThread: NULL state bug on win32
"""
_focus = windowing.FocusManager()
if not self._shown:
def destroy(*args):
self.window = None
Gcf.destroy(self._num)
self.canvas._tkcanvas.bind("<Destroy>", destroy)
self.window.deiconify()
# anim.py requires this
self.window.update()
else:
self.canvas.draw_idle()
self._shown = True
def destroy(self, *args):
if self.window is not None:
#self.toolbar.destroy()
if self.canvas._idle_callback:
self.canvas._tkcanvas.after_cancel(self.canvas._idle_callback)
self.window.destroy()
if Gcf.get_num_fig_managers()==0:
if self.window is not None:
self.window.quit()
self.window = None
def get_window_title(self):
return self.window.wm_title()
def set_window_title(self, title):
self.window.wm_title(title)
def full_screen_toggle(self):
is_fullscreen = bool(self.window.attributes('-fullscreen'))
self.window.attributes('-fullscreen', not is_fullscreen)
class AxisMenu:
def __init__(self, master, naxes):
self._master = master
self._naxes = naxes
self._mbar = Tk.Frame(master=master, relief=Tk.RAISED, borderwidth=2)
self._mbar.pack(side=Tk.LEFT)
self._mbutton = Tk.Menubutton(
master=self._mbar, text="Axes", underline=0)
self._mbutton.pack(side=Tk.LEFT, padx="2m")
self._mbutton.menu = Tk.Menu(self._mbutton)
self._mbutton.menu.add_command(
label="Select All", command=self.select_all)
self._mbutton.menu.add_command(
label="Invert All", command=self.invert_all)
self._axis_var = []
self._checkbutton = []
for i in range(naxes):
self._axis_var.append(Tk.IntVar())
self._axis_var[i].set(1)
self._checkbutton.append(self._mbutton.menu.add_checkbutton(
label = "Axis %d" % (i+1),
variable=self._axis_var[i],
command=self.set_active))
self._mbutton.menu.invoke(self._mbutton.menu.index("Select All"))
self._mbutton['menu'] = self._mbutton.menu
self._mbar.tk_menuBar(self._mbutton)
self.set_active()
def adjust(self, naxes):
if self._naxes < naxes:
for i in range(self._naxes, naxes):
self._axis_var.append(Tk.IntVar())
self._axis_var[i].set(1)
self._checkbutton.append( self._mbutton.menu.add_checkbutton(
label = "Axis %d" % (i+1),
variable=self._axis_var[i],
command=self.set_active))
elif self._naxes > naxes:
for i in range(self._naxes-1, naxes-1, -1):
del self._axis_var[i]
self._mbutton.menu.forget(self._checkbutton[i])
del self._checkbutton[i]
self._naxes = naxes
self.set_active()
def get_indices(self):
a = [i for i in range(len(self._axis_var)) if self._axis_var[i].get()]
return a
def set_active(self):
self._master.set_active(self.get_indices())
def invert_all(self):
for a in self._axis_var:
a.set(not a.get())
self.set_active()
def select_all(self):
for a in self._axis_var:
a.set(1)
self.set_active()
class NavigationToolbar(Tk.Frame):
"""
Public attributes
canvas - the FigureCanvas (gtk.DrawingArea)
win - the gtk.Window
"""
def _Button(self, text, file, command):
file = os.path.join(rcParams['datapath'], 'images', file)
im = Tk.PhotoImage(master=self, file=file)
b = Tk.Button(
master=self, text=text, padx=2, pady=2, image=im, command=command)
b._ntimage = im
b.pack(side=Tk.LEFT)
return b
def __init__(self, canvas, window):
self.canvas = canvas
self.window = window
xmin, xmax = canvas.figure.bbox.intervalx
height, width = 50, xmax-xmin
Tk.Frame.__init__(self, master=self.window,
width=int(width), height=int(height),
borderwidth=2)
self.update() # Make axes menu
self.bLeft = self._Button(
text="Left", file="stock_left",
command=lambda x=-1: self.panx(x))
self.bRight = self._Button(
text="Right", file="stock_right",
command=lambda x=1: self.panx(x))
self.bZoomInX = self._Button(
text="ZoomInX",file="stock_zoom-in",
command=lambda x=1: self.zoomx(x))
self.bZoomOutX = self._Button(
text="ZoomOutX", file="stock_zoom-out",
command=lambda x=-1: self.zoomx(x))
self.bUp = self._Button(
text="Up", file="stock_up",
command=lambda y=1: self.pany(y))
self.bDown = self._Button(
text="Down", file="stock_down",
command=lambda y=-1: self.pany(y))
self.bZoomInY = self._Button(
text="ZoomInY", file="stock_zoom-in",
command=lambda y=1: self.zoomy(y))
self.bZoomOutY = self._Button(
text="ZoomOutY",file="stock_zoom-out",
command=lambda y=-1: self.zoomy(y))
self.bSave = self._Button(
text="Save", file="stock_save_as",
command=self.save_figure)
self.pack(side=Tk.BOTTOM, fill=Tk.X)
def set_active(self, ind):
self._ind = ind
self._active = [ self._axes[i] for i in self._ind ]
def panx(self, direction):
for a in self._active:
a.xaxis.pan(direction)
self.canvas.draw()
def pany(self, direction):
for a in self._active:
a.yaxis.pan(direction)
self.canvas.draw()
def zoomx(self, direction):
for a in self._active:
a.xaxis.zoom(direction)
self.canvas.draw()
def zoomy(self, direction):
for a in self._active:
a.yaxis.zoom(direction)
self.canvas.draw()
def save_figure(self, *args):
fs = FileDialog.SaveFileDialog(master=self.window,
title='Save the figure')
try:
self.lastDir
except AttributeError:
self.lastDir = os.curdir
fname = fs.go(dir_or_file=self.lastDir) # , pattern="*.png")
if fname is None: # Cancel
return
self.lastDir = os.path.dirname(fname)
try:
self.canvas.print_figure(fname)
except IOError as msg:
err = '\n'.join(map(str, msg))
msg = 'Failed to save %s: Error msg was\n\n%s' % (
fname, err)
error_msg_tkpaint(msg)
def update(self):
_focus = windowing.FocusManager()
self._axes = self.canvas.figure.axes
naxes = len(self._axes)
if not hasattr(self, "omenu"):
self.set_active(range(naxes))
self.omenu = AxisMenu(master=self, naxes=naxes)
else:
self.omenu.adjust(naxes)
class NavigationToolbar2TkAgg(NavigationToolbar2, Tk.Frame):
"""
Public attributes
canvas - the FigureCanvas (gtk.DrawingArea)
win - the gtk.Window
"""
def __init__(self, canvas, window):
self.canvas = canvas
self.window = window
self._idle = True
#Tk.Frame.__init__(self, master=self.canvas._tkcanvas)
NavigationToolbar2.__init__(self, canvas)
def destroy(self, *args):
del self.message
Tk.Frame.destroy(self, *args)
def set_message(self, s):
self.message.set(s)
def draw_rubberband(self, event, x0, y0, x1, y1):
height = self.canvas.figure.bbox.height
y0 = height-y0
y1 = height-y1
try: self.lastrect
except AttributeError: pass
else: self.canvas._tkcanvas.delete(self.lastrect)
self.lastrect = self.canvas._tkcanvas.create_rectangle(x0, y0, x1, y1)
#self.canvas.draw()
def release(self, event):
try: self.lastrect
except AttributeError: pass
else:
self.canvas._tkcanvas.delete(self.lastrect)
del self.lastrect
def set_cursor(self, cursor):
self.window.configure(cursor=cursord[cursor])
def _Button(self, text, file, command, extension='.ppm'):
img_file = os.path.join(rcParams['datapath'], 'images', file + extension)
im = Tk.PhotoImage(master=self, file=img_file)
b = Tk.Button(
master=self, text=text, padx=2, pady=2, image=im, command=command)
b._ntimage = im
b.pack(side=Tk.LEFT)
return b
def _init_toolbar(self):
xmin, xmax = self.canvas.figure.bbox.intervalx
height, width = 50, xmax-xmin
Tk.Frame.__init__(self, master=self.window,
width=int(width), height=int(height),
borderwidth=2)
self.update() # Make axes menu
for text, tooltip_text, image_file, callback in self.toolitems:
if text is None:
# spacer, unhandled in Tk
pass
else:
button = self._Button(text=text, file=image_file,
command=getattr(self, callback))
if tooltip_text is not None:
ToolTip.createToolTip(button, tooltip_text)
self.message = Tk.StringVar(master=self)
self._message_label = Tk.Label(master=self, textvariable=self.message)
self._message_label.pack(side=Tk.RIGHT)
self.pack(side=Tk.BOTTOM, fill=Tk.X)
def configure_subplots(self):
toolfig = Figure(figsize=(6,3))
window = Tk.Tk()
canvas = FigureCanvasTkAgg(toolfig, master=window)
toolfig.subplots_adjust(top=0.9)
tool = SubplotTool(self.canvas.figure, toolfig)
canvas.show()
canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
def save_figure(self, *args):
from tkFileDialog import asksaveasfilename
from tkMessageBox import showerror
filetypes = self.canvas.get_supported_filetypes().copy()
default_filetype = self.canvas.get_default_filetype()
# Tk doesn't provide a way to choose a default filetype,
# so we just have to put it first
default_filetype_name = filetypes[default_filetype]
del filetypes[default_filetype]
sorted_filetypes = filetypes.items()
sorted_filetypes.sort()
sorted_filetypes.insert(0, (default_filetype, default_filetype_name))
tk_filetypes = [
(name, '*.%s' % ext) for (ext, name) in sorted_filetypes]
# adding a default extension seems to break the
# asksaveasfilename dialog when you choose various save types
# from the dropdown. Passing in the empty string seems to
# work - JDH
#defaultextension = self.canvas.get_default_filetype()
defaultextension = ''
fname = asksaveasfilename(
master=self.window,
title='Save the figure',
filetypes = tk_filetypes,
defaultextension = defaultextension,
initialfile=self.canvas.get_default_filename(),
)
if fname == "" or fname == ():
return
else:
try:
# This method will handle the delegation to the correct type
self.canvas.print_figure(fname)
except Exception as e:
showerror("Error saving file", str(e))
def set_active(self, ind):
self._ind = ind
self._active = [ self._axes[i] for i in self._ind ]
def update(self):
_focus = windowing.FocusManager()
self._axes = self.canvas.figure.axes
naxes = len(self._axes)
#if not hasattr(self, "omenu"):
# self.set_active(range(naxes))
# self.omenu = AxisMenu(master=self, naxes=naxes)
#else:
# self.omenu.adjust(naxes)
NavigationToolbar2.update(self)
def dynamic_update(self):
'update drawing area only if idle'
# legacy method; new method is canvas.draw_idle
self.canvas.draw_idle()
FigureManager = FigureManagerTkAgg
class ToolTip(object):
"""
Tooltip recipe from
http://www.voidspace.org.uk/python/weblog/arch_d7_2006_07_01.shtml#e387
"""
@staticmethod
def createToolTip(widget, text):
toolTip = ToolTip(widget)
def enter(event):
toolTip.showtip(text)
def leave(event):
toolTip.hidetip()
widget.bind('<Enter>', enter)
widget.bind('<Leave>', leave)
def __init__(self, widget):
self.widget = widget
self.tipwindow = None
self.id = None
self.x = self.y = 0
def showtip(self, text):
"Display text in tooltip window"
self.text = text
if self.tipwindow or not self.text:
return
x, y, _, _ = self.widget.bbox("insert")
x = x + self.widget.winfo_rootx() + 27
y = y + self.widget.winfo_rooty()
self.tipwindow = tw = Tk.Toplevel(self.widget)
tw.wm_overrideredirect(1)
tw.wm_geometry("+%d+%d" % (x, y))
try:
# For Mac OS
tw.tk.call("::tk::unsupported::MacWindowStyle",
"style", tw._w,
"help", "noActivates")
except Tk.TclError:
pass
label = Tk.Label(tw, text=self.text, justify=Tk.LEFT,
background="#ffffe0", relief=Tk.SOLID, borderwidth=1,
)
label.pack(ipadx=1)
def hidetip(self):
tw = self.tipwindow
self.tipwindow = None
if tw:
tw.destroy()
| |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from . import steps
SPEC = {
'settings': {
'build_gs_bucket': 'chromium-linux-archive',
# WARNING: src-side runtest.py is only tested with chromium CQ builders.
# Usage not covered by chromium CQ is not supported and can break
# without notice.
'src_side_runtest_py': True,
},
'builders': {
'Linux Builder': {
'chromium_config': 'chromium',
'chromium_apply_config': [
'mb',
'ninja_confirm_noop',
'archive_gpu_tests',
'chrome_with_codecs'
],
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 64,
},
'bot_type': 'builder',
'compile_targets': [
'chromium_swarm_tests',
],
'testing': {
'platform': 'linux',
},
'use_isolate': True,
'enable_swarming': True,
},
'Linux Tests': {
'chromium_config': 'chromium',
'chromium_apply_config': ['mb', 'ninja_confirm_noop'],
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 64,
},
'bot_type': 'tester',
'test_generators': [
steps.generate_gtest,
steps.generate_script,
steps.generate_isolated_script,
],
'parent_buildername': 'Linux Builder',
'testing': {
'platform': 'linux',
},
'enable_swarming': True,
},
'Linux Builder (dbg)(32)': {
'chromium_config': 'chromium',
'chromium_apply_config': ['mb', 'ninja_confirm_noop'],
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Debug',
'TARGET_BITS': 32,
},
'bot_type': 'builder',
'compile_targets': [
'google_apis_unittests',
'sync_integration_tests',
],
'testing': {
'platform': 'linux',
},
'use_isolate': True,
'enable_swarming': True,
# Temporary hack because the binaries are too large to be isolated.
'GYP_DEFINES': {
'fastbuild': 2,
},
},
'Linux Tests (dbg)(1)(32)': {
'chromium_config': 'chromium',
'chromium_apply_config': ['mb', 'ninja_confirm_noop'],
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Debug',
'TARGET_BITS': 32,
},
'bot_type': 'tester',
'test_generators': [
steps.generate_gtest,
steps.generate_script,
steps.generate_isolated_script,
],
'parent_buildername': 'Linux Builder (dbg)(32)',
'testing': {
'platform': 'linux',
},
'enable_swarming': True,
},
'Linux Builder (dbg)': {
'chromium_config': 'chromium',
'chromium_apply_config': ['mb', 'ninja_confirm_noop'],
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Debug',
'TARGET_BITS': 64,
},
'bot_type': 'builder',
'testing': {
'platform': 'linux',
},
'enable_swarming': True,
'use_isolate': True,
},
'Linux Tests (dbg)(1)': {
'chromium_config': 'chromium',
'chromium_apply_config': ['mb', 'ninja_confirm_noop'],
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Debug',
'TARGET_BITS': 64,
},
'bot_type': 'tester',
'test_generators': [
steps.generate_gtest,
steps.generate_script,
steps.generate_isolated_script,
],
'parent_buildername': 'Linux Builder (dbg)',
'testing': {
'platform': 'linux',
},
'enable_swarming': True,
},
'Android GN': {
'chromium_config': 'android',
'chromium_apply_config': ['mb'],
'gclient_config': 'chromium',
'gclient_apply_config': ['android'],
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_PLATFORM': 'android',
'TARGET_ARCH': 'arm',
},
'android_config': 'main_builder',
'test_generators': [
steps.generate_gtest,
steps.generate_script,
steps.generate_isolated_script,
],
'tests': [
steps.AndroidInstrumentationTest('AndroidWebViewTest'),
steps.AndroidInstrumentationTest('ChromePublicTest'),
steps.AndroidInstrumentationTest('ChromeSyncShellTest'),
steps.AndroidInstrumentationTest('ContentShellTest'),
steps.AndroidInstrumentationTest('MojoTest'),
steps.GTestTest(
'breakpad_unittests',
override_compile_targets=['breakpad_unittests_deps'],
android_isolate_path='breakpad/breakpad_unittests.isolate'),
steps.GTestTest(
'sandbox_linux_unittests',
override_compile_targets=['sandbox_linux_unittests_deps']),
steps.AndroidJunitTest('base_junit_tests'),
steps.AndroidJunitTest('chrome_junit_tests'),
steps.AndroidJunitTest('components_invalidation_impl_junit_tests'),
steps.AndroidJunitTest('components_policy_junit_tests'),
steps.AndroidJunitTest('content_junit_tests'),
steps.AndroidJunitTest('junit_unit_tests'),
steps.AndroidJunitTest('net_junit_tests'),
steps.AndroidJunitTest('ui_junit_tests'),
],
'testing': {
'platform': 'linux',
},
},
'Android Arm64 Builder (dbg)': {
'chromium_config': 'android',
'chromium_apply_config': ['chrome_with_codecs', 'mb'],
'gclient_config': 'chromium',
'gclient_apply_config': ['android'],
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Debug',
'TARGET_BITS': 64,
'TARGET_PLATFORM': 'android',
},
'android_config': 'arm64_builder',
'compile_targets': [
'android_builder_tests'
],
'testing': {
'platform': 'linux',
},
},
'Android Builder (dbg)': {
'chromium_config': 'android',
'chromium_apply_config': ['chrome_with_codecs', 'mb'],
'gclient_config': 'chromium',
'gclient_apply_config': ['android'],
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Debug',
'TARGET_BITS': 32,
'TARGET_PLATFORM': 'android',
},
'android_config': 'main_builder',
'bot_type': 'builder',
'compile_targets': [
'cronet_test_instrumentation_apk',
'system_webview_apk',
],
'testing': {
'platform': 'linux',
},
'use_isolate': True,
'enable_swarming': True,
},
'Android GN (dbg)': {
'chromium_config': 'android',
'chromium_apply_config': ['mb'],
'gclient_config': 'chromium',
'gclient_apply_config': ['android'],
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Debug',
'TARGET_PLATFORM': 'android',
'TARGET_ARCH': 'arm',
},
'android_config': 'main_builder',
'test_generators': [
steps.generate_gtest,
steps.generate_script,
steps.generate_isolated_script,
],
'testing': {
'platform': 'linux',
},
},
'Android Tests (dbg)': {
'chromium_config': 'android',
'chromium_apply_config': ['chrome_with_codecs', 'mb'],
'gclient_config': 'chromium',
'gclient_apply_config': ['android'],
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Debug',
'TARGET_BITS': 32,
'TARGET_PLATFORM': 'android',
},
'bot_type': 'tester',
'parent_buildername': 'Android Builder (dbg)',
'android_config': 'main_builder',
'root_devices': True,
'tests': [
steps.AndroidJunitTest('base_junit_tests'),
steps.AndroidJunitTest('chrome_junit_tests'),
steps.AndroidJunitTest('components_junit_tests'),
steps.AndroidJunitTest('content_junit_tests'),
steps.AndroidJunitTest('junit_unit_tests'),
steps.AndroidJunitTest('net_junit_tests'),
steps.AndroidJunitTest('ui_junit_tests'),
],
'test_generators': [
steps.generate_gtest,
steps.generate_instrumentation_test,
steps.generate_isolated_script,
steps.generate_script,
],
'testing': {
'platform': 'linux',
},
'enable_swarming': True,
},
'Android Builder': {
'chromium_config': 'android',
'chromium_apply_config': ['chrome_with_codecs', 'mb'],
'gclient_config': 'chromium',
'gclient_apply_config': ['android'],
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 32,
'TARGET_PLATFORM': 'android',
},
'android_config': 'main_builder',
'bot_type': 'builder',
'compile_targets': [
'system_webview_apk',
],
'testing': {
'platform': 'linux',
},
'use_isolate': True,
'enable_swarming': True,
},
'Android Tests': {
'chromium_config': 'android',
'chromium_apply_config': ['mb'],
'gclient_config': 'chromium',
'gclient_apply_config': ['android'],
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 32,
'TARGET_PLATFORM': 'android',
},
'bot_type': 'tester',
'parent_buildername': 'Android Builder',
'android_config': 'main_builder',
'root_devices': True,
'tests': [
steps.AndroidJunitTest('base_junit_tests'),
steps.AndroidJunitTest('chrome_junit_tests'),
steps.AndroidJunitTest('components_junit_tests'),
steps.AndroidJunitTest('content_junit_tests'),
steps.AndroidJunitTest('junit_unit_tests'),
steps.AndroidJunitTest('net_junit_tests'),
steps.AndroidJunitTest('ui_junit_tests'),
],
'test_generators': [
steps.generate_gtest,
steps.generate_instrumentation_test,
steps.generate_isolated_script,
steps.generate_script,
],
'testing': {
'platform': 'linux',
},
'enable_swarming': True,
},
'Android Clang Builder (dbg)': {
'chromium_config': 'android_clang',
'chromium_apply_config': ['chrome_with_codecs', 'errorprone', 'mb'],
'gclient_config': 'chromium',
'gclient_apply_config': ['android'],
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Debug',
'TARGET_BITS': 32,
'TARGET_PLATFORM': 'android',
},
'android_config': 'clang_builder',
'bot_type': 'builder_tester',
'compile_targets': [
'android_builder_tests',
'system_webview_apk',
],
'testing': {
'platform': 'linux',
},
},
'Cast Linux': {
'chromium_config': 'cast_linux',
'chromium_apply_config': ['mb', 'ninja_confirm_noop'],
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 64,
},
'compile_targets': [
'cast_shell',
],
'test_generators': [
steps.generate_gtest,
steps.generate_script,
steps.generate_isolated_script,
],
'testing': {
'platform': 'linux',
},
},
'Cast Android (dbg)': {
'chromium_config': 'android',
'chromium_apply_config': ['chrome_with_codecs'],
'gclient_config': 'chromium',
'gclient_apply_config': ['android'],
'compile_targets': [
'cast_shell_apk',
],
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Debug',
'TARGET_BITS': 32,
'TARGET_PLATFORM': 'android',
},
'android_config': 'cast_builder',
'testing': {
'platform': 'linux',
},
},
},
}
| |
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
import sys
vizdtestdir = sys.path[0]
import urllib2
import xmltodict
import json
import requests
import socket
from lxml import etree
from opserver.introspect_util import *
from opserver_results import *
from opserver.opserver_util import OpServerUtils
class VerificationOpsSrv (IntrospectUtilBase):
def __init__(self, ip, port=8081):
super(VerificationOpsSrv, self).__init__(ip, port)
def get_ops_vm(self, vm='default-virtual-machine'):
vm_dict = self.dict_get('analytics/uves/virtual-machine/' + vm)
return OpVMResult(vm_dict)
def get_ops_vn(self, vn='default-virtual-network'):
res = None
try:
vn_dict = self.dict_get('analytics/uves/virtual-network/' + vn)
res = OpVNResult(vn_dict)
except Exception as e:
print e
finally:
return res
def get_ops_collector(self, col=None):
if (col is None):
col = socket.gethostname()
res = None
try:
#import pdb; pdb.set_trace()
col_dict = self.dict_get('analytics/uves/analytics-node/' + col)
res = OpCollectorResult(col_dict)
except Exception as e:
print e
finally:
return res
def send_tracebuffer_req(self, src, mod, instance, buf_name):
return self.dict_get('analytics/send-tracebuffer/%s/%s/%s/%s' \
% (src, mod, instance, buf_name))
def get_table_column_values(self, table, col_name):
return self.dict_get('analytics/table/%s/column-values/%s' \
% (table, col_name))
def uve_query(self, query):
return self.dict_get('analytics/uves/%s' % query)
def post_uve_request(self, table, json_body):
url = 'http://%s:%s/analytics/uves/%s' % (self._ip, str(self._port), table)
try:
res = OpServerUtils.post_url_http(url, json_body, sync=True)
res = json.loads(res)
except Exception as e:
print 'Error: POST uve request: %s' % str(e)
return None
else:
return res
# end post_uve_request
def get_alarms(self, query):
return self.dict_get('analytics/alarms/%s' % query)
# end get_alarms
def post_alarm_request(self, table, json_body):
url = 'http://%s:%s/analytics/alarms/%s' % (self._ip, str(self._port), table)
try:
res = OpServerUtils.post_url_http(url, json_body, sync=True)
res = json.loads(res)
except Exception as e:
print 'Error: POST alarm request: %s' % str(e)
return None
else:
return res
# end post_alarm_request
def get_redis_uve_info(self):
path = 'Snh_RedisUVERequest'
xpath = '/RedisUVEResponse/redis_uve_info'
p = self.dict_get(path, XmlDrv)
return EtreeToDict(xpath).get_all_entry(p)
def post_query_json(self, json_str, sync=True):
'''
this module is to support raw query given in json format
'''
res = None
try:
flows_url = OpServerUtils.opserver_query_url(self._ip, str(self._port))
print flows_url
print "query is: ", json_str
res = []
resp = OpServerUtils.post_url_http(flows_url, json_str, sync)
if sync:
if resp is not None:
res = json.loads(resp)
res = res['value']
else:
if resp is not None:
resp = json.loads(resp)
qid = resp['href'].rsplit('/', 1)[1]
result = OpServerUtils.get_query_result(self._ip, str(self._port), qid, 30)
for item in result:
res.append(item)
except Exception as e:
print str(e)
finally:
return res
def post_purge_query_json(self, json_str, sync=True):
'''
this module is to support raw purge query given in json format
'''
res = None
try:
purge_request_url = \
OpServerUtils.opserver_database_purge_query_url(
self._ip, str(self._port))
print purge_request_url
print "query is: ", json_str
resp = OpServerUtils.post_url_http(
purge_request_url, json_str, sync)
if resp is not None:
res = json.loads(resp)
res = res['status']
except Exception as e:
print str(e)
finally:
return res
def post_query(self, table, start_time=None, end_time=None,
select_fields=None, where_clause=None,
sort_fields=None, sort=None, limit=None,
filter=None, sync=True,dir=None):
res = None
try:
flows_url = OpServerUtils.opserver_query_url(
self._ip, str(self._port))
print flows_url
query_dict = OpServerUtils.get_query_dict(
table, start_time, end_time,
select_fields,
where_clause,
sort_fields, sort, limit, filter, dir)
print json.dumps(query_dict)
res = []
resp = OpServerUtils.post_url_http(
flows_url, json.dumps(query_dict), sync)
if sync:
if resp is not None:
res = json.loads(resp)
res = res['value']
else:
if resp is not None:
resp = json.loads(resp)
qid = resp['href'].rsplit('/', 1)[1]
result = OpServerUtils.get_query_result(
self._ip, str(self._port), qid, 30)
for item in result:
res.append(item)
except Exception as e:
print str(e)
finally:
return res
if __name__ == '__main__':
vns = VerificationOpsSrv('127.0.0.1')
vn = vns.get_ops_vn(vn='abc-corp:vn02')
print "*** Verify VN Cfg ***"
print vn.get_attr('Config', 'attached_policies', 'abc-default-policy')
'''
[{u'vnp_major': u'10', u'vnp_name': u'abc-default-policy',
u'vnp_minor': u'50'}]
'''
print vn.get_attr('Config', 'connected_networks')
'''
[u'abc-corp:vn04']
'''
print vn.get_attr('Config', 'total_interfaces')
'''
10
'''
print vn.get_attr('Config', 'total_acl_rules')
'''
60
'''
print "*** Verify VN Agt ***"
print vn.get_attr('Agent', 'total_acl_rules')
'''
55
'''
print vn.get_attr('Agent', 'in_tpkts')
'''
240
'''
print vn.get_attr('Agent', 'in_stats', 'abc-corp:map-reduce-02')
'''
[{u'bytes': u'7200', u'other_vn': u'abc-corp:map-reduce-02',
u'tpkts': u'60'}]
'''
vm = vns.get_ops_vm(vm='abc-corp:vm-web-fe01')
print "*** Verify VM Cfg ***"
print vm.get_attr('Config', 'vrouter')
'''
rack01-host04
'''
print vm.get_attr('Config', 'attached_groups')
'''
[u'abc-grp01']
'''
print vm.get_attr('Config', 'interface_list', 'abc-corp:vn-fe')
'''
[{u'virtual_network': u'abc-corp:vn-fe', u'ip_address': u'10.1.1.2',
u'floating_ips': [u'67.1.1.2', u'67.1.1.3']}]
'''
print "*** Verify VM Agt ***"
print vm.get_attr('Agent', 'vrouter')
'''
rack01-host04
'''
print vm.get_attr('Agent', 'attached_groups')
'''
[u'abc-grp01']
'''
print vm.get_attr('Agent', 'interface_list')
'''
[{u'in_bytes': u'1000', u'out_bytes': u'10000',
u'floating_ips': [u'67.1.1.2', u'67.1.1.3'],
u'out_pkts': u'20', u'virtual_network': u'abc-corp:vn-fe',
u'in_pkts': u'5', u'ip_address': u'10.1.1.2'}]
'''
col = vns.get_ops_collector()
print col.get_attr('Analytics', 'generator_infos')
'''
[{u'gen_attr': {u'http_port': u'8089', u'in_clear': u'false',
u'pid': u'57160', u'connects': u'1', u'clears': u'1',
u'resets': u'0'},
u'source': u'sa-nc-mfg-30.static.jnpr.net',
u'msgtype_stats': {u'SandeshStats':
[{u'bytes': u'1363005',
u'messages': u'431',
u'message_type': u'CollectorInfo'}]},
u'module_id': u'Collector'},
{u'gen_attr': {u'http_port': u'0', u'in_clear': u'false',
u'pid': u'0', u'connects': u'1', u'clears': u'0',
u'resets': u'0'},
u'source': u'sa-nc-mfg-30.static.jnpr.net', u'msgtype_stats': {},
u'module_id': u'OpServer'},
{u'gen_attr': {u'http_port': u'8091', u'in_clear': u'false',
u'pid': u'57200', u'connects': u'2', u'clears': u'2',
u'resets': u'1'},
u'source': u'sa-nc-mfg-30.static.jnpr.net',
u'msgtype_stats': {u'SandeshStats': [{u'bytes': u'16771',
u'messages': u'66',
u'message_type': u'QELog'},
{u'bytes': u'12912',
u'messages': u'32',
u'message_type': u'QEQueryLog'}]},
u'module_id': u'QueryEngine'}]
'''
print col.get_attr('Analytics', 'generator_infos',
[('module_id', 'OpServer'),
('source', "sa-nc-mfg-30.static.jnpr.net")])
'''
[{u'gen_attr': {u'http_port': u'0', u'in_clear': u'false', u'pid': u'0',
u'connects': u'1', u'clears': u'0', u'resets': u'0'},
u'source': u'sa-nc-mfg-30.static.jnpr.net', u'msgtype_stats': {},
u'module_id': u'OpServer'}]
'''
print col.get_attr('Analytics', 'cpu_info')
'''
{u'num_cpu': u'4', u'cpu_share': u'0.00833056',
u'meminfo': {u'virt': u'2559582208', u'peakvirt': u'2559582208',
u'res': u'2805760'}}
'''
| |
import braintree
import warnings
from braintree.resource import Resource
from braintree.address import Address
from braintree.configuration import Configuration
from braintree.transparent_redirect import TransparentRedirect
from braintree.credit_card_verification import CreditCardVerification
class CreditCard(Resource):
"""
A class representing Braintree CreditCard objects.
An example of creating an credit card with all available fields::
result = braintree.CreditCard.create({
"cardholder_name": "John Doe",
"cvv": "123",
"expiration_date": "12/2012",
"number": "4111111111111111",
"token": "my_token",
"billing_address": {
"first_name": "John",
"last_name": "Doe",
"company": "Braintree",
"street_address": "111 First Street",
"extended_address": "Unit 1",
"locality": "Chicago",
"postal_code": "60606",
"region": "IL",
"country_name": "United States of America"
},
"options": {
"verify_card": True
}
})
print(result.credit_card.token)
print(result.credit_card.masked_number)
For more information on CreditCards, see https://developers.braintreepayments.com/ios+python/reference/request/credit-card/create
"""
class CardType(object):
"""
Contants representing the type of the credit card. Available types are:
* Braintree.CreditCard.AmEx
* Braintree.CreditCard.CarteBlanche
* Braintree.CreditCard.ChinaUnionPay
* Braintree.CreditCard.DinersClubInternational
* Braintree.CreditCard.Discover
* Braintree.CreditCard.JCB
* Braintree.CreditCard.Laser
* Braintree.CreditCard.Maestro
* Braintree.CreditCard.MasterCard
* Braintree.CreditCard.Solo
* Braintree.CreditCard.Switch
* Braintree.CreditCard.Visa
* Braintree.CreditCard.Unknown
"""
AmEx = "American Express"
CarteBlanche = "Carte Blanche"
ChinaUnionPay = "China UnionPay"
DinersClubInternational = "Diners Club"
Discover = "Discover"
JCB = "JCB"
Laser = "Laser"
Maestro = "Maestro"
MasterCard = "MasterCard"
Solo = "Solo"
Switch = "Switch"
Visa = "Visa"
Unknown = "Unknown"
class CustomerLocation(object):
"""
Contants representing the issuer location of the credit card. Available locations are:
* braintree.CreditCard.CustomerLocation.International
* braintree.CreditCard.CustomerLocation.US
"""
International = "international"
US = "us"
class CardTypeIndicator(object):
"""
Constants representing the three states for the card type indicator attributes
* braintree.CreditCard.CardTypeIndicator.Yes
* braintree.CreditCard.CardTypeIndicator.No
* braintree.CreditCard.CardTypeIndicator.Unknown
"""
Yes = "Yes"
No = "No"
Unknown = "Unknown"
Commercial = DurbinRegulated = Debit = Healthcare = \
CountryOfIssuance = IssuingBank = Payroll = Prepaid = CardTypeIndicator
@staticmethod
def confirm_transparent_redirect(query_string):
"""
Confirms a transparent redirect request. It expects the query string from the
redirect request. The query string should _not_ include the leading "?" character. ::
result = braintree.CreditCard.confirm_transparent_redirect_request("foo=bar&id=12345")
"""
warnings.warn("Please use TransparentRedirect.confirm instead", DeprecationWarning)
return Configuration.gateway().credit_card.confirm_transparent_redirect(query_string)
@staticmethod
def create(params={}):
"""
Create a CreditCard.
A number and expiration_date are required. ::
result = braintree.CreditCard.create({
"number": "4111111111111111",
"expiration_date": "12/2012"
})
"""
return Configuration.gateway().credit_card.create(params)
@staticmethod
def update(credit_card_token, params={}):
"""
Update an existing CreditCard
By credit_card_id. The params are similar to create::
result = braintree.CreditCard.update("my_credit_card_id", {
"cardholder_name": "John Doe"
})
"""
return Configuration.gateway().credit_card.update(credit_card_token, params)
@staticmethod
def delete(credit_card_token):
"""
Delete a credit card
Given a credit_card_id::
result = braintree.CreditCard.delete("my_credit_card_id")
"""
return Configuration.gateway().credit_card.delete(credit_card_token)
@staticmethod
def expired():
""" Return a collection of expired credit cards. """
return Configuration.gateway().credit_card.expired()
@staticmethod
def expiring_between(start_date, end_date):
""" Return a collection of credit cards expiring between the given dates. """
return Configuration.gateway().credit_card.expiring_between(start_date, end_date)
@staticmethod
def find(credit_card_token):
"""
Find a credit card, given a credit_card_id. This does not return
a result object. This will raise a :class:`NotFoundError <braintree.exceptions.not_found_error.NotFoundError>` if the provided
credit_card_id is not found. ::
credit_card = braintree.CreditCard.find("my_credit_card_token")
"""
return Configuration.gateway().credit_card.find(credit_card_token)
@staticmethod
def forward(credit_card_token, receiving_merchant_id):
"""
Create a nonce for a credit card in your Braintree vault that can be used by another Braintree merchant.
A credit card token and a receiving merchant ID are required:
result = braintree.CreditCard.forward(
credit_card.token,
"another_merchant_public_id"
})
"""
return Configuration.gateway().credit_card.forward(credit_card_token, receiving_merchant_id)
@staticmethod
def from_nonce(nonce):
"""
Convert a payment method nonce into a CreditCard. This does not return
a result object. This will raise a :class:`NotFoundError <braintree.exceptions.not_found_error.NotFoundError>` if the provided
credit_card_id is not found. ::
credit_card = braintree.CreditCard.from_nonce("my_payment_method_nonce")
"""
return Configuration.gateway().credit_card.from_nonce(nonce)
@staticmethod
def create_signature():
return CreditCard.signature("create")
@staticmethod
def update_signature():
return CreditCard.signature("update")
@staticmethod
def signature(type):
billing_address_params = [
"company", "country_code_alpha2", "country_code_alpha3", "country_code_numeric", "country_name",
"extended_address", "first_name", "last_name", "locality", "postal_code", "region", "street_address"
]
options = ["make_default", "verification_merchant_account_id", "verify_card", "verification_amount", "venmo_sdk_session"]
signature = [
"billing_address_id", "cardholder_name", "cvv", "expiration_date", "expiration_month", "expiration_year",
"device_session_id", "fraud_merchant_id", "number", "token", "venmo_sdk_payment_method_code", "device_data",
"payment_method_nonce",
{"billing_address": billing_address_params},
{"options": options}
]
if type == "create":
signature.append("customer_id")
options.append("fail_on_duplicate_payment_method")
elif type == "update":
billing_address_params.append({"options": ["update_existing"]})
elif type == "update_via_customer":
options.append("update_existing_token")
billing_address_params.append({"options": ["update_existing"]})
else:
raise AttributeError
return signature
@staticmethod
def transparent_redirect_create_url():
"""
Returns the url to use for creating CreditCards through transparent redirect.
"""
warnings.warn("Please use TransparentRedirect.url instead", DeprecationWarning)
return Configuration.gateway().credit_card.transparent_redirect_create_url()
@staticmethod
def tr_data_for_create(tr_data, redirect_url):
"""
Builds tr_data for CreditCard creation.
"""
return Configuration.gateway().credit_card.tr_data_for_create(tr_data, redirect_url)
@staticmethod
def tr_data_for_update(tr_data, redirect_url):
"""
Builds tr_data for CreditCard updating.
"""
return Configuration.gateway().credit_card.tr_data_for_update(tr_data, redirect_url)
@staticmethod
def transparent_redirect_update_url():
"""
Returns the url to be used for updating CreditCards through transparent redirect.
"""
warnings.warn("Please use TransparentRedirect.url instead", DeprecationWarning)
return Configuration.gateway().credit_card.transparent_redirect_update_url()
def __init__(self, gateway, attributes):
Resource.__init__(self, gateway, attributes)
self.is_expired = self.expired
if "billing_address" in attributes:
self.billing_address = Address(gateway, self.billing_address)
else:
self.billing_address = None
if "subscriptions" in attributes:
self.subscriptions = [braintree.subscription.Subscription(gateway, subscription) for subscription in self.subscriptions]
if "verifications" in attributes:
sorted_verifications = sorted(attributes["verifications"], key=lambda verification: verification["created_at"], reverse=True)
if len(sorted_verifications) > 0:
self.verification = CreditCardVerification(gateway, sorted_verifications[0])
@property
def expiration_date(self):
return self.expiration_month + "/" + self.expiration_year
@property
def masked_number(self):
"""
Returns the masked number of the CreditCard.
"""
return self.bin + "******" + self.last_4
| |
# MyLibrary.py
import sys, time, random, math, pygame
from pygame.locals import *
# prints text using the supplied font
def print_text(font, x, y, text, color=(255, 255, 255)):
imgText = font.render(text, True, color)
screen = pygame.display.get_surface() # req'd when function moved into MyLibrary
screen.blit(imgText, (x, y))
# MySprite class extends pygame.sprite.Sprite
class MySprite(pygame.sprite.Sprite):
def __init__(self) -> object:
pygame.sprite.Sprite.__init__(self) # extend the base Sprite class
self.master_image = None
self.frame = 0
self.old_frame = -1
self.frame_width = 1
self.frame_height = 1
self.first_frame = 0
self.last_frame = 0
self.columns = 1
self.last_time = 0
self.direction = 0
self.velocity = Point(0.0, 0.0)
# X property
def _getx(self):
return self.rect.x
def _setx(self, value):
self.rect.x = value
X = property(_getx, _setx)
# Y property
def _gety(self):
return self.rect.y
def _sety(self, value):
self.rect.y = value
Y = property(_gety, _sety)
# position property
def _getpos(self):
return self.rect.topleft
def _setpos(self, pos):
self.rect.topleft = pos
position = property(_getpos, _setpos)
def load(self, filename, width=0, height=0, columns=1):
self.master_image = pygame.image.load(filename).convert_alpha()
self.set_image(self.master_image, width, height, columns)
def set_image(self, image, width=0, height=0, columns=1):
self.master_image = image
if width == 0 and height == 0:
self.frame_width = image.get_width()
self.frame_height = image.get_height()
else:
self.frame_width = width
self.frame_height = height
rect = self.master_image.get_rect()
self.last_frame = (rect.width // width) * (rect.height // height) - 1
self.rect = Rect(0, 0, self.frame_width, self.frame_height)
self.columns = columns
def update(self, current_time, rate=30):
if self.last_frame > self.first_frame:
# update animation frame number
if current_time > self.last_time + rate:
self.frame += 1
if self.frame > self.last_frame:
self.frame = self.first_frame
self.last_time = current_time
else:
self.frame = self.first_frame
# build current frame only if it changed
if self.frame != self.old_frame:
frame_x = (self.frame % self.columns) * self.frame_width
frame_y = (self.frame // self.columns) * self.frame_height
rect = Rect(frame_x, frame_y, self.frame_width, self.frame_height)
self.image = self.master_image.subsurface(rect)
self.old_frame = self.frame
def __str__(self):
return str(self.frame) + "," + str(self.first_frame) + \
"," + str(self.last_frame) + "," + str(self.frame_width) + \
"," + str(self.frame_height) + "," + str(self.columns) + \
"," + str(self.rect)
# Point class
class Point(object):
def __init__(self, x, y):
self.__x = x
self.__y = y
# X property
def getx(self): return self.__x
def setx(self, x): self.__x = x
x = property(getx, setx)
# Y property
def gety(self): return self.__y
def sety(self, y): self.__y = y
y = property(gety, sety)
def __str__(self):
return "{X:" + "{:.0f}".format(self.__x) + \
",Y:" + "{:.0f}".format(self.__y) + "}"
class Food(MySprite):
def __init__(self):
MySprite.__init__(self)
image = pygame.Surface((32, 32)).convert_alpha()
image.fill((255, 255, 255, 0))
pygame.draw.circle(image, (250, 250, 50), (16, 16), 16, 0)
self.set_image(image)
MySprite.update(self, 0, 30)
self.X = random.randint(0, 23) * 32
self.Y = random.randint(0, 17) * 32
class SnakeSegment(MySprite):
def __init__(self, color=(20, 200, 20)):
MySprite.__init__(self)
image = pygame.Surface((32, 32)).convert_alpha()
image.fill((255, 255, 255, 0))
pygame.draw.circle(image, color, (16, 16), 16, 0)
self.set_image(image)
MySprite.update(self, 0, 30)
class Snake():
def __init__(self):
self.velocity = Point(-1, 0)
self.old_time = 0
head = SnakeSegment((50, 250, 50))
head.X = 12 * 32
head.Y = 9 * 32
self.segments = list()
self.segments.append(head)
self.add_segment()
self.add_segment()
def update(self, ticks):
global step_time
if ticks > self.old_time + step_time:
self.old_time = ticks
for n in range(len(self.segments) - 1, 0, -1):
self.segments[n].X = self.segments[n - 1].X
self.segments[n].Y = self.segments[n - 1].Y
self.segments[0].X += self.velocity.x * 32
self.segments[0].Y += self.velocity.y * 32
def draw(self, surface):
for segment in self.segments:
surface.blit(segment.image, (segment.X, segment.Y))
def add_segment(self):
last = len(self.segments) - 1
segment = SnakeSegment()
start = Point(0, 0)
if self.velocity.x < 0:
start.x = 32
elif self.velocity.x > 0:
start.x = -32
if self.velocity.y < 0:
start.y = 32
elif self.velocity.y > 0:
start.y = -32
segment.X = self.segments[last].X + start.x
segment.Y = self.segments[last].Y + start.y
self.segments.append(segment)
def get_current_direction():
global head_x, head_y
first_segment_x = snake.segments[1].X // 32
first_segment_y = snake.segments[1].Y // 32
if head_x - 1 == first_segment_x:
return "right"
elif head_x + 1 == first_segment_x:
return "left"
elif head_y - 1 == first_segment_y:
return "down"
elif head_y + 1 == first_segment_y:
return "up"
def get_food_direction():
global head_x, head_y
food = Point(0, 0)
for obj in food_group:
food = Point(obj.X // 32, obj.Y // 32)
if head_x < food.x:
return "right"
elif head_x > food.x:
return "left"
elif head_x == food.x:
if head_y < food.y:
return "down"
elif head_y > food.y:
return "up"
def auto_move():
direction = get_current_direction()
food_dir = get_food_direction()
if food_dir == "left":
if direction != "right":
direction = "left"
elif food_dir == "right":
if direction != "left":
direction = "right"
elif food_dir == "up":
if direction != "down":
direction = "up"
elif food_dir == "down":
if direction != "up":
direction = "down"
if direction == "up":
snake.velocity = Point(0, -1)
elif direction == "down":
snake.velocity = Point(0, 1)
elif direction == "left":
snake.velocity = Point(-1, 0)
elif direction == "right":
snake.velocity = Point(1, 0)
def game_init():
global screen, back_buffer, font, timer, snake, food_group
pygame.init()
screen = pygame.display.set_mode((24 * 32, 18 * 32))
pygame.display.set_caption("Snake Game")
font = pygame.font.Font(None, 30)
timer = pygame.time.Clock()
back_buffer = pygame.Surface((screen.get_rect().width, screen.get_rect().height))
snake = Snake()
image = pygame.Surface((60, 60)).convert_alpha()
image.fill((255, 255, 255, 0))
pygame.draw.circle(image, (80, 80, 220, 70), (30, 30), 30, 0)
pygame.draw.circle(image, (80, 80, 250, 255), (30, 30), 30, 4)
food_group = pygame.sprite.Group()
food = Food()
food_group.add(food)
if __name__ == '__main__':
global screen, back_buffer, font, timer, snake, food_group, head_x, head_y, step_time
game_init()
game_over = False
last_time = 0
auto_play = False
step_time = 400
while True:
timer.tick(30)
ticks = pygame.time.get_ticks()
for event in pygame.event.get():
if event.type == QUIT: sys.exit()
keys = pygame.key.get_pressed()
if keys[K_ESCAPE]:
sys.exit()
elif keys[K_UP] or keys[K_w]:
snake.velocity = Point(0, -1)
elif keys[K_DOWN] or keys[K_s]:
snake.velocity = Point(0, 1)
elif keys[K_LEFT] or keys[K_a]:
snake.velocity = Point(-1, 0)
elif keys[K_RIGHT] or keys[K_d]:
snake.velocity = Point(1, 0)
elif keys[K_SPACE]:
if auto_play:
auto_play = False
step_time = 400
else:
auto_play = True
step_time = 100
if not game_over:
snake.update(ticks)
food_group.update(ticks)
hit_list = pygame.sprite.groupcollide(snake.segments, food_group, False, True)
if len(hit_list) > 0:
food_group.add(Food())
snake.add_segment()
for n in range(1, len(snake.segments)):
if pygame.sprite.collide_rect(snake.segments[0], snake.segments[n]):
game_over = True
head_x = snake.segments[0].X // 32
head_y = snake.segments[0].Y // 32
if head_x < 0 or head_x > 24 or head_y < 0 or head_y > 18:
game_over = True
if auto_play: auto_move()
back_buffer.fill((20, 50, 20))
snake.draw(back_buffer)
food_group.draw(back_buffer)
screen.blit(back_buffer, (0, 0))
if not game_over:
print_text(font, 0, 0, "Length " + str(len(snake.segments)))
print_text(font, 0, 20, "Position " + str(snake.segments[0].X // 32) + "," + str(snake.segments[0].Y // 32))
else:
print_text(font, 0, 0, "G A M E _ O V E R")
if auto_play:
print_text(font, 700, 0, "AUTO")
pygame.display.update()
| |
import time
class Waypoint:
VIA = 1
STOP = 2
class Router:
def __init__(self, name=None, rate_limit_dt=0):
# Just a simple identifier
if name is None:
self.name = self.default_name
# The min time delta in seconds between queries
self._rate_limit_dt = rate_limit_dt
# The time of the last query, None if it hasn't been hit yet
self._last_query = None
def raw_query(self, waypoints, **kwargs):
return NotImplementedError()
def rate_limit_wait(self):
"""
Sleep if rate limiting is required based on current time and last
query.
"""
if self._rate_limit_dt and self._last_query is not None:
dt = time.time() - self._last_query
wait = self._rate_limit_dt - dt
if wait > 0:
time.sleep(wait)
def format_output(self, data):
return NotImplementedError()
def route(self, arg, destination=None, waypoints=None, raw=False, **kwargs):
"""
Query a route.
route(locations): points can be
- a sequence of locations
- a Shapely LineString
route(origin, destination, waypoints=None)
- origin and destination are a single destination
- waypoints are the points to be inserted between the
origin and destination
If waypoints is specified, destination must also be specified
Each location can be:
- string (will be geocoded by the routing provider. Not all
providers accept this as input)
- (longitude, latitude) sequence (tuple, list, numpy array, etc.)
- Shapely Point with x as longitude, y as latitude
Additional parameters
---------------------
raw : bool, default False
Return the raw json dict response from the service
Returns
-------
list of Route objects
If raw is True, returns the json dict instead of converting to Route
objects
Examples
--------
mq = directions.Mapquest(key)
routes = mq.route('1 magazine st. cambridge, ma',
'south station boston, ma')
routes = mq.route('1 magazine st. cambridge, ma',
'south station boston, ma',
waypoints=['700 commonwealth ave. boston, ma'])
# Uses each point in the line as a waypoint. There is a limit to the
# number of waypoints for each service. Consult the docs.
line = LineString(...)
routes = mq.route(line)
# Feel free to mix different location types
routes = mq.route(line.coords[0], 'south station boston, ma',
waypoints=[(-71.103972, 42.349324)])
"""
points = _parse_points(arg, destination, waypoints)
if len(points) < 2:
raise ValueError('You must specify at least 2 points')
self.rate_limit_wait()
data = self.raw_query(points, **kwargs)
self._last_query = time.time()
if raw:
return data
return self.format_output(data)
def _parse_points(arg, destination=None, waypoints=None):
# If destination is None, then arg is all the waypoints
if destination is None:
# waypoints must be None
if waypoints is not None:
raise ValueError('Cannot specify waypoints without destination')
p = arg
else: # arg is origin
if waypoints is None:
p = [arg, destination]
else:
p = [arg] + waypoints + [destination]
points = _waypoints(p)
return points
def _waypoints(waypoints):
if hasattr(waypoints, 'coords'):
waypoints = waypoints.coords
points = []
for wp in waypoints:
if isinstance(wp, basestring):
p = wp
elif hasattr(wp, 'coords'):
coords = wp.coords
if len(coords) != 1:
raise ValueError('Non-point like object used in waypoints')
p = coords[0]
elif len(wp) == 2:
p = wp
else:
raise ValueError('Non 2-tuple used in waypoints')
points.append(p)
return points
class Route:
def __init__(self, coords, distance, duration, formatedTime, maneuvers=None, **kwargs):
"""
Simple class to represent a single returned route
Parameters
----------
coords : sequence of (lon, lat) coordinates
distance : length in meters of the route
duration : estimated duration of the route in seconds
kwargs : additional properties when converting to geojson
"""
self.coords = coords
self.distance = distance
self.duration = duration
self.formatedTime = formatedTime
self.properties = kwargs.copy()
if maneuvers is None:
maneuvers = []
self.maneuvers = maneuvers
@property
def __geo_interface__(self):
geom = {'type': 'LineString',
'coordinates': self.coords}
properties = self.properties.copy()
properties.update({'index': 0,
'distance': self.distance,
'duration': self.duration,
'formatedTime': self.formatedTime})
f = {'type': 'Feature',
'geometry': geom,
'properties': properties}
return f
def geojson(self, include_maneuvers=True):
if include_maneuvers:
features = [self] + self.maneuvers
else:
features = [self]
properties = self.properties.copy()
properties.update({'distance': self.distance,
'duration': self.duration,
'formatedTime': self.formatedTime})
return {'type': 'FeatureCollection',
'properties': properties,
'features': [f.__geo_interface__ for f in features]}
@classmethod
def from_geojson(cls, data):
"""
Return a Route from a GeoJSON dictionary, as returned by Route.geojson()
"""
properties = data['properties']
distance = properties.pop('distance')
duration = properties.pop('duration')
maneuvers = []
for feature in data['features']:
geom = feature['geometry']
if geom['type'] == 'LineString':
coords = geom['coordinates']
else:
maneuvers.append(Maneuver.from_geojson(feature))
return Route(coords, distance, duration, maneuvers, **properties)
class Maneuver:
def __init__(self, coords, **kwargs):
"""
Simple class to represent a maneuver.
Todo: Add some remaining fields like maneuver text, type, etc.
"""
self.coords = coords
self.properties = kwargs.copy()
@property
def __geo_interface__(self):
geom = {'type': 'Point',
'coordinates': self.coords}
f = {'type': 'Feature',
'geometry': geom,
'properties': self.properties}
return f
@classmethod
def from_geojson(cls, data):
"""
Return a Maneuver from a GeoJSON dictionary
"""
coords = data['geometry']['coordinates']
return Maneuver(coords, **data['properties'])
| |
""" Contains the ModeController, Mode, and ModeTimers parent classes"""
# modes.py
# Mission Pinball Framework
# Written by Brian Madden & Gabe Knuth
# Released under the MIT License. (See license info at the end of this file.)
# Documentation and more info at http://missionpinball.com/mpf
import logging
import os
from collections import namedtuple
from mpf.system.timing import Timing, Timer
from mpf.system.tasks import DelayManager
from mpf.system.config import Config
RemoteMethod = namedtuple('RemoteMethod', 'method config_section kwargs',
verbose=False)
"""RemotedMethod is used by other modules that want to register a method to
be called on mode_start or mode_stop.
"""
# todo
# override player var
# override event strings
class ModeController(object):
"""Parent class for the Mode Controller. There is one instance of this in
MPF and it's responsible for loading, unloading, and managing all game
modes.
"""
def __init__(self, machine):
self.machine = machine
self.log = logging.getLogger('ModeController')
self.queue = None # ball ending event queue
self.active_modes = list()
self.mode_stop_count = 0
# The following two lists hold namedtuples of any remote components that
# need to be notified when a mode object is created and/or started.
self.loader_methods = list()
self.start_methods = list()
if 'modes' in self.machine.config:
self.machine.events.add_handler('init_phase_4',
self._load_modes)
self.machine.events.add_handler('ball_ending', self._ball_ending,
priority=0)
def _load_modes(self):
#Loads the modes from the Modes: section of the machine configuration
#file.
for mode in self.machine.config['modes']:
self.machine.game_modes.append(self._load_mode(mode))
def _load_mode(self, mode_string):
"""Loads a mode, reads in its config, and creates the Mode object.
Args:
mode: String name of the mode you're loading. This is the name of
the mode's folder in your game's machine_files/modes folder.
"""
self.log.info('Processing mode: %s', mode_string)
mode_path = os.path.join(self.machine.machine_path,
self.machine.config['mpf']['paths']['modes'], mode_string)
mode_config_file = os.path.join(self.machine.machine_path,
self.machine.config['mpf']['paths']['modes'], mode_string, 'config',
mode_string + '.yaml')
config = Config.load_config_yaml(yaml_file=mode_config_file)
if 'code' in config['mode']:
import_str = ('modes.' + mode_string + '.code.' +
config['mode']['code'].split('.')[0])
i = __import__(import_str, fromlist=[''])
mode_object = getattr(i, config['mode']['code'].split('.')[1])(
self.machine, config, mode_string, mode_path)
else:
mode_object = Mode(self.machine, config, mode_string, mode_path)
return mode_object
def _ball_ending(self, queue):
# unloads all the active modes, like when the ball ends
if not self.active_modes:
return()
self.queue = queue
self.queue.wait()
self.mode_stop_count = 0
for mode in self.active_modes:
self.mode_stop_count += 1
mode.stop(callback=self._mode_stopped_callback)
def _mode_stopped_callback(self):
self.mode_stop_count -= 1
if not self.mode_stop_count:
self.queue.clear()
def register_load_method(self, load_method, config_section_name=None,
**kwargs):
"""Used by system components, plugins, etc. to register themselves with
the Mode Controller for anything that they a mode to do when its
registered.
Args:
load_method: The method that will be called when this mode code
loads.
config_section_name: An optional string for the section of the
configuration file that will be passed to the load_method when
it's called.
**kwargs: Any additional keyword arguments specified will be passed
to the load_method.
Note that these methods will be called once, when the mode code is first
initialized.
"""
self.loader_methods.append(RemoteMethod(method=load_method,
config_section=config_section_name, kwargs=kwargs))
def register_start_method(self, start_method, config_section_name=None,
**kwargs):
"""Used by system components, plugins, etc. to register themselves with
the Mode Controller for anything that they a mode to do when it starts.
Args:
start_method: The method that will be called when this mode code
loads.
config_section_name: An optional string for the section of the
configuration file that will be passed to the start_method when
it's called.
**kwargs: Any additional keyword arguments specified will be passed
to the start_method.
Note that these methods will be called every single time this mode is
started.
"""
self.start_methods.append(RemoteMethod(method=start_method,
config_section=config_section_name, kwargs=kwargs))
def _active_change(self, mode, active):
# called when a mode goes active or inactive
if active:
self.active_modes.append(mode)
else:
self.active_modes.remove(mode)
# sort the active mode list by priority
self.active_modes.sort(key=lambda x: x.priority, reverse=True)
self.dump()
def dump(self):
"""Dumps the current status of the running modes to the log file."""
self.log.info('================ ACTIVE GAME MODES ===================')
for mode in self.active_modes:
if mode.active:
self.log.info('%s : %s', mode.name, mode.priority)
self.log.info('======================================================')
class Mode(object):
"""Parent class for in-game mode code."""
def __init__(self, machine, config, name, path):
self.machine = machine
self.config = config
self.name = name.lower()
self.path = path
self.log = logging.getLogger('Mode.' + name)
self.priority = 0
self._active = False
self.stop_methods = list()
self.timers = dict()
self.start_callback = None
self.stop_callback = None
self.event_handlers = set()
self.player = None
'''Reference to the current player object.'''
if 'mode' in self.config:
self.configure_mode_settings(config['mode'])
for asset_manager in self.machine.asset_managers.values():
config_data = self.config.get(asset_manager.config_section, dict())
self.config[asset_manager.config_section] = (
asset_manager.register_assets(config=config_data,
mode_path=self.path))
# Call registered remote loader methods
for item in self.machine.modes.loader_methods:
if (item.config_section and
item.config_section in self.config and
self.config[item.config_section]):
item.method(config=self.config[item.config_section],
mode_path=self.path,
**item.kwargs)
elif not item.config_section:
item.method(config=self.config, mode_path=self.path,
**item.kwargs)
self.mode_init()
@property
def active(self):
return self._active
@active.setter
def active(self, active):
if self._active != active:
self._active = active
self.machine.modes._active_change(self, self._active)
def configure_mode_settings(self, config):
"""Processes this mode's configuration settings from a config
dictionary.
"""
if not ('priority' in config and type(config['priority']) is int):
config['priority'] = 0
if 'start_events' in config:
config['start_events'] = Config.string_to_list(
config['start_events'])
else:
config['start_events'] = list()
if 'stop_events' in config:
config['stop_events'] = Config.string_to_list(
config['stop_events'])
else:
config['stop_events'] = list()
# register mode start events
if 'start_events' in config:
for event in config['start_events']:
self.machine.events.add_handler(event, self.start)
self.config['mode'] = config
def start(self, priority=None, callback=None, **kwargs):
"""Starts this mode.
Args:
priority: Integer value of what you want this mode to run at. If you
don't specify one, it will use the "Mode: priority" setting from
this mode's configuration file.
**kwargs: Catch-all since this mode might start from events with
who-knows-what keyword arguments.
Warning: You can safely call this method, but do not override it in your
mode code. If you want to write your own mode code by subclassing Mode,
put whatever code you want to run when this mode starts in the
mode_start method which will be called automatically.
"""
if self.active or not self.machine.game.player:
# Only start the mode if it's not already started and there's an
# active player.
self.log.debug('Mode Start Aborted')
return
self.player = self.machine.game.player
if type(priority) is int:
self.priority = priority
else:
self.priority = self.config['mode']['priority']
self.log.info('Mode Starting. Priority: %s', self.priority)
# register mode stop events
if 'stop_events' in self.config['mode']:
for event in self.config['mode']['stop_events']:
self.add_mode_event_handler(event, self.stop)
self.start_callback = callback
if 'timers' in self.config:
self._setup_timers()
self.machine.events.post_queue(event='mode_' + self.name + '_starting',
callback=self._started)
def _started(self):
# Called after the mode_<name>_starting queue event has finished.
self.log.info('Mode Started. Priority: %s', self.priority)
self.active = True
for item in self.machine.modes.start_methods:
if item.config_section in self.config:
self.stop_methods.append(
item.method(config=self.config[item.config_section],
priority=self.priority,
mode=self,
**item.kwargs))
self._start_timers()
self.machine.events.post('mode_' + self.name + '_started',
callback=self._mode_started_callback)
def _mode_started_callback(self, **kwargs):
# Called after the mode_<name>_started queue event has finished.
self.mode_start()
if self.start_callback:
self.start_callback()
def stop(self, callback=None, **kwargs):
"""Stops this mode.
Args:
**kwargs: Catch-all since this mode might start from events with
who-knows-what keyword arguments.
Warning: You can safely call this method, but do not override it in your
mode code. If you want to write your own mode code by subclassing Mode,
put whatever code you want to run when this mode stops in the
mode_stop method which will be called automatically.
"""
if not self.active:
return
self.log.debug('Mode Stopping.')
self._remove_mode_event_handlers()
self.stop_callback = callback
self._kill_timers()
# self.machine.events.remove_handler(self.stop)
# todo is this ok here? Or should we only remove ones that we know this
# mode added?
self.machine.events.post_queue(event='mode_' + self.name + '_stopping',
callback=self._stopped)
def _stopped(self):
self.log.debug('Mode Stopped.')
self.priority = 0
self.active = False
for item in self.stop_methods:
try:
item[0](item[1])
except TypeError:
pass
self.stop_methods = list()
self.machine.events.post('mode_' + self.name + '_stopped',
callback=self._mode_stopped_callback)
def _mode_stopped_callback(self, **kwargs):
self.mode_stop()
if self.stop_callback:
self.stop_callback()
self.player = None
def add_mode_event_handler(self, event, handler, priority=1, **kwargs):
"""Registers an event handler which is automatically removed when this
mode stops.
This method is similar to the Event Manager's add_handler() method,
except this method automatically unregisters the handlers when the mode
ends.
Args:
event: String name of the event you're adding a handler for. Since
events are text strings, they don't have to be pre-defined.
handler: The method that will be called when the event is fired.
priority: An arbitrary integer value that defines what order the
handlers will be called in. The default is 1, so if you have a
handler that you want to be called first, add it here with a
priority of 2. (Or 3 or 10 or 100000.) The numbers don't matter.
They're called from highest to lowest. (i.e. priority 100 is
called before priority 1.)
**kwargs: Any any additional keyword/argument pairs entered here
will be attached to the handler and called whenever that handler
is called. Note these are in addition to kwargs that could be
passed as part of the event post. If there's a conflict, the
event-level ones will win.
Returns:
A GUID reference to the handler which you can use to later remove
the handler via ``remove_handler_by_key``. Though you don't need to
remove the handler since the whole point of this method is they're
automatically removed when the mode stops.
Note that if you do add a handler via this method and then remove it
manually, that's ok too.
"""
key = self.machine.events.add_handler(event, handler, priority,
**kwargs)
self.event_handlers.add(key)
return key
def _remove_mode_event_handlers(self):
for key in self.event_handlers:
self.machine.events.remove_handler_by_key(key)
self.event_handlers = set()
def _setup_timers(self):
# config is localized
for timer, settings in self.config['timers'].iteritems():
self.timers[timer] = ModeTimer(machine=self.machine, mode=self,
name=timer, config=settings)
return self._kill_timers
def _start_timers(self):
for timer in self.timers.values():
if timer.running:
timer.start()
def _kill_timers(self, ):
for timer in self.timers.values():
timer.kill()
self.timers = dict()
def mode_init(self):
"""User-overrideable method which will be called when this mode
initializes as part of the MPF boot process.
"""
pass
def mode_start(self):
"""User-overrideable method which will be called whenever this mode
starts (i.e. whenever it becomes active).
"""
pass
def mode_stop(self):
"""User-overrideable method which will be called whenever this mode
stops (i.e. whenever it becomes inactive).
"""
pass
class ModeTimer(object):
"""Parent class for a mode timer.
Args:
machine: The main MPF MachineController object.
mode: The parent mode object that this timer belongs to.
name: The string name of this timer.
config: A Python dictionary which contains the configuration settings
for this timer.
"""
def __init__(self, machine, mode, name, config):
self.machine = machine
self.mode = mode
self.name = name
self.config = config
self.tick_var = self.mode.name + '_' + self.name + '_tick'
self.mode.player[self.tick_var] = 0
self.running = False
self.start_value = 0
self.restart_on_complete = False
self._ticks = 0
self.end_value = 0
self.max_value = None
self.direction = 'up'
self.tick_secs = 1
self.timer = None
self.bcp = False
self.event_keys = set()
self.delay = DelayManager()
if 'start_value' in self.config:
self.start_value = self.config['start_value']
else:
self.start_value = 0
if 'start_running' in self.config and self.config['start_running']:
self.running = True
if 'end_value' in self.config:
self.end_value = self.config['end_value']
if 'control_events' in self.config and self.config['control_events']:
if type(self.config['control_events']) is dict:
self.config['control_events'] = [self.config['control_events']]
else:
self.config['control_events'] = list()
if 'direction' in self.config and self.config['direction'] == 'down':
self.direction = 'down'
if 'tick_interval' in self.config:
self.tick_secs = Timing.string_to_secs(self.config['tick_interval'])
if 'max_value' in self.config:
self.max_value = self.config['max_value']
if ('restart_on_complete' in self.config and
self.config['restart_on_complete']):
self.restart_on_complete = True
if 'bcp' in self.config and self.config['bcp']:
self.bcp = True
self.mode.player[self.tick_var] = self.start_value
self._setup_control_events(self.config['control_events'])
def _setup_control_events(self, event_list):
kwargs = None
for entry in event_list:
if entry['action'] == 'add':
handler = self.add_time
kwargs = {'timer_value': entry['value']}
elif entry['action'] == 'subtract':
handler = self.subtract_time
kwargs = {'timer_value': entry['value']}
elif entry['action'] == 'jump':
handler = self.set_current_time
kwargs = {'timer_value': entry['value']}
elif entry['action'] == 'start':
handler = self.start
elif entry['action'] == 'stop':
handler = self.stop
elif entry['action'] == 'pause':
handler = self.pause
kwargs = {'timer_value': entry['value']}
elif entry['action'] == 'set_tick_interval':
handler = self.set_tick_interval
kwargs = {'timer_value': entry['value']}
elif entry['action'] == 'change_tick_interval':
handler = self.change_tick_interval
kwargs = {'change': entry['value']}
if kwargs:
self.event_keys.add(self.machine.events.add_handler(
entry['event'], handler, **kwargs))
else:
self.event_keys.add(self.machine.events.add_handler(
entry['event'], handler))
def _remove_control_events(self):
for key in self.event_keys:
self.machine.events.remove_handler_by_key(key)
def reset(self):
self._ticks = self.start_value
def start(self, **kwargs):
"""Starts this timer based on the starting value that's already been
configured. Use set_current_time() if you want to set the starting time
value.
Args:
**kwargs: Not used in this method. Only exists since this method is
often registered as an event handler which may contain
additional keyword arguments.
"""
self.running = True
self.delay.remove('pause')
self._create_system_timer()
self.machine.events.post('timer_' + self.name + '_started',
ticks=self.mode.player[self.tick_var])
if self.bcp:
self.machine.bcp.send('timer', name=self.name, action='started',
ticks=self.mode.player[self.tick_var])
def stop(self, **kwargs):
"""Stops the timer and posts the 'timer_<name>_stopped' event.
Args:
**kwargs: Not used in this method. Only exists since this method is
often registered as an event handler which may contain
additional keyword arguments.
"""
self.delay.remove('pause')
self.running = False
self._remove_system_timer()
self.machine.events.post('timer_' + self.name + '_stopped',
ticks=self.mode.player[self.tick_var])
if self.bcp:
self.machine.bcp.send('timer', name=self.name, action='stopped',
ticks=self.mode.player[self.tick_var])
def pause(self, timer_value=0, **kwargs):
"""Pauses the timer and posts the 'timer_<name>_paused' event
Args:
timer_value: How many seconds you want to pause the timer for. Note
that this pause time is real-world seconds and does not take
into consideration this timer's tick interval.
**kwargs: Not used in this method. Only exists since this method is
often registered as an event handler which may contain
additional keyword arguments.
"""
self.running = False
pause_secs = timer_value
self._remove_system_timer()
self.machine.events.post('timer_' + self.name + '_paused',
ticks=self.mode.player[self.tick_var])
if self.bcp:
self.machine.bcp.send('timer', name=self.name, action='paused',
ticks=self.mode.player[self.tick_var])
if pause_secs > 0:
self.delay.add('pause', pause_secs, self.start)
def timer_complete(self):
"""Automatically called when this timer completes. Posts the
'timer_<name>_complete' event. Can be manually called to mark this timer
as complete.
Args:
**kwargs: Not used in this method. Only exists since this method is
often registered as an event handler which may contain
additional keyword arguments.
"""
self.stop()
if self.bcp: # must be before the event post in case it stops the mode
self.machine.bcp.send('timer', name=self.name, action='complete',
ticks=self.mode.player[self.tick_var])
self.machine.events.post('timer_' + self.name + '_complete',
ticks=self.mode.player[self.tick_var])
if self.restart_on_complete:
self.reset()
self.start()
def _timer_tick(self):
# Automatically called by the sytem timer each tick
if not self.running:
self._remove_system_timer()
return
if self.direction == 'down':
self.mode.player[self.tick_var] -= 1
else:
self.mode.player[self.tick_var] += 1
if not self._check_for_done():
self.machine.events.post('timer_' + self.name + '_tick',
ticks=self.mode.player[self.tick_var])
if self.bcp:
self.machine.bcp.send('timer', name=self.name, action='tick',
ticks=self.mode.player[self.tick_var])
def add_time(self, timer_value, **kwargs):
"""Adds ticks to this timer.
Args:
Args:
timer_value: The number of ticks you want to add to this timer's
current value.
**kwargs: Not used in this method. Only exists since this method is
often registered as an event handler which may contain
additional keyword arguments.
"""
ticks_added = timer_value
new_value = self.mode.player[self.tick_var] + ticks_added
if self.max_value and new_value > self.max_value:
new_value = self.max_value
self.mode.player[self.tick_var] = new_value
ticks_added = new_value - timer_value
self.machine.events.post('timer_' + self.name + '_time_added',
ticks=self.mode.player[self.tick_var],
ticks_added=ticks_added)
if self.bcp:
self.machine.bcp.send('timer', name=self.name, action='time_added',
ticks=self.mode.player[self.tick_var],
ticks_added=ticks_added)
self._check_for_done()
def subtract_time(self, timer_value, **kwargs):
"""Subracts ticks from this timer.
Args:
timer_value: The numebr of ticks you want to subtract from this
timer's current value.
**kwargs: Not used in this method. Only exists since this method is
often registered as an event handler which may contain
additional keyword arguments.
"""
ticks_subtracted = timer_value
self.mode.player[self.tick_var] -= ticks_subtracted
self.machine.events.post('timer_' + self.name + '_time_subtracted',
ticks=self.mode.player[self.tick_var],
ticks_subtracted=ticks_subtracted)
if self.bcp:
self.machine.bcp.send('timer', name=self.name,
action='time_subtracted',
ticks=self.mode.player[self.tick_var],
ticks_subtracted=ticks_subtracted)
self._check_for_done()
def _check_for_done(self):
# Checks to see if this timer is done. Automatically called anytime the
# timer's value changes.
if (self.direction == 'up' and
self.mode.player[self.tick_var] >= self.end_value):
self.timer_complete()
return True
elif self.mode.player[self.tick_var] <= self.end_value:
self.timer_complete()
return True
return False
def _create_system_timer(self):
# Creates the system timer which drives this mode timer's tick method.
self._remove_system_timer()
self.timer = Timer(callback=self._timer_tick, frequency=self.tick_secs)
self.machine.timing.add(self.timer)
def _remove_system_timer(self):
# Removes the system timer associated with this mode timer.
if self.timer:
self.machine.timing.remove(self.timer)
self.timer = None
def change_tick_interval(self, change=0.0, **kwargs):
"""Changes the interval for each "tick" of this timer.
Args:
change: Float or int of the change you want to make to this timer's
tick rate. Note this value is added to the current tick
interval. To set an absolute value, use the set_tick_interval()
method. To shorten the tick rate, use a negative value.
**kwargs: Not used in this method. Only exists since this method is
often registered as an event handler which may contain
additional keyword arguments.
"""
self.tick_secs *= change
self._create_system_timer()
def set_tick_interval(self, timer_value, **kwargs):
"""Sets the number of seconds between ticks for this timer. This is an
absolute setting. To apply a change to the current value, use the
change_tick_interval() method.
Args:
timer_value: The new number of seconds between each tick of this
timer. This value should always be positive.
**kwargs: Not used in this method. Only exists since this method is
often registered as an event handler which may contain
additional keyword arguments.
"""
self.tick_secs = abs(timer_value)
self._create_system_timer()
def set_current_time(self, timer_value, **kwargs):
"""Sets the current amount of time of this timer. This value is
expressed in "ticks" since the interval per tick can be something other
than 1 second).
Args:
timer_value: Integer of the current value you want this timer to be.
**kwargs: Not used in this method. Only exists since this method is
often registered as an event handler which may contain
additional keyword arguments.
"""
self.mode.player[self.tick_var] = int(timer_value)
if self.max_value and self.mode.player[self.tick_var] > self.max_value:
self.mode.player[self.tick_var] = self.max_value
def kill(self):
"""Stops this timer and also removes all the control events.
Args:
**kwargs: Not used in this method. Only exists since this method is
often registered as an event handler which may contain
additional keyword arguments.
"""
self.stop()
self._remove_control_events()
# The MIT License (MIT)
# Copyright (c) 2013-2015 Brian Madden and Gabe Knuth
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
| |
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, see
# <http://www.gnu.org/licenses/>.
# This file is part of urlgrabber, a high-level cross-protocol url-grabber
# Copyright 2002-2004 Michael D. Stenner, Ryan Tomayko
# Modified by Benoit Boissinot:
# - fix for digest auth (inspired from urllib2.py @ Python v2.4)
# Modified by Dirkjan Ochtman:
# - import md5 function from a local util module
# Modified by Martin Geisler:
# - moved md5 function from local util module to this module
# Modified by Augie Fackler:
# - add safesend method and use it to prevent broken pipe errors
# on large POST requests
"""An HTTP handler for urllib2 that supports HTTP 1.1 and keepalive.
>>> import urllib2
>>> from keepalive import HTTPHandler
>>> keepalive_handler = HTTPHandler()
>>> opener = urllib2.build_opener(keepalive_handler)
>>> urllib2.install_opener(opener)
>>>
>>> fo = urllib2.urlopen('http://www.python.org')
If a connection to a given host is requested, and all of the existing
connections are still in use, another connection will be opened. If
the handler tries to use an existing connection but it fails in some
way, it will be closed and removed from the pool.
To remove the handler, simply re-run build_opener with no arguments, and
install that opener.
You can explicitly close connections by using the close_connection()
method of the returned file-like object (described below) or you can
use the handler methods:
close_connection(host)
close_all()
open_connections()
NOTE: using the close_connection and close_all methods of the handler
should be done with care when using multiple threads.
* there is nothing that prevents another thread from creating new
connections immediately after connections are closed
* no checks are done to prevent in-use connections from being closed
>>> keepalive_handler.close_all()
EXTRA ATTRIBUTES AND METHODS
Upon a status of 200, the object returned has a few additional
attributes and methods, which should not be used if you want to
remain consistent with the normal urllib2-returned objects:
close_connection() - close the connection to the host
readlines() - you know, readlines()
status - the return status (i.e. 404)
reason - english translation of status (i.e. 'File not found')
If you want the best of both worlds, use this inside an
AttributeError-catching try:
>>> try: status = fo.status
>>> except AttributeError: status = None
Unfortunately, these are ONLY there if status == 200, so it's not
easy to distinguish between non-200 responses. The reason is that
urllib2 tries to do clever things with error codes 301, 302, 401,
and 407, and it wraps the object upon return.
For python versions earlier than 2.4, you can avoid this fancy error
handling by setting the module-level global HANDLE_ERRORS to zero.
You see, prior to 2.4, it's the HTTP Handler's job to determine what
to handle specially, and what to just pass up. HANDLE_ERRORS == 0
means "pass everything up". In python 2.4, however, this job no
longer belongs to the HTTP Handler and is now done by a NEW handler,
HTTPErrorProcessor. Here's the bottom line:
python version < 2.4
HANDLE_ERRORS == 1 (default) pass up 200, treat the rest as
errors
HANDLE_ERRORS == 0 pass everything up, error processing is
left to the calling code
python version >= 2.4
HANDLE_ERRORS == 1 pass up 200, treat the rest as errors
HANDLE_ERRORS == 0 (default) pass everything up, let the
other handlers (specifically,
HTTPErrorProcessor) decide what to do
In practice, setting the variable either way makes little difference
in python 2.4, so for the most consistent behavior across versions,
you probably just want to use the defaults, which will give you
exceptions on errors.
"""
# $Id: keepalive.py,v 1.14 2006/04/04 21:00:32 mstenner Exp $
import errno
import httplib
import socket
import thread
import urllib2
DEBUG = None
import sys
if sys.version_info < (2, 4):
HANDLE_ERRORS = 1
else: HANDLE_ERRORS = 0
class ConnectionManager(object):
"""
The connection manager must be able to:
* keep track of all existing
"""
def __init__(self):
self._lock = thread.allocate_lock()
self._hostmap = {} # map hosts to a list of connections
self._connmap = {} # map connections to host
self._readymap = {} # map connection to ready state
def add(self, host, connection, ready):
self._lock.acquire()
try:
if host not in self._hostmap:
self._hostmap[host] = []
self._hostmap[host].append(connection)
self._connmap[connection] = host
self._readymap[connection] = ready
finally:
self._lock.release()
def remove(self, connection):
self._lock.acquire()
try:
try:
host = self._connmap[connection]
except KeyError:
pass
else:
del self._connmap[connection]
del self._readymap[connection]
self._hostmap[host].remove(connection)
if not self._hostmap[host]: del self._hostmap[host]
finally:
self._lock.release()
def set_ready(self, connection, ready):
try:
self._readymap[connection] = ready
except KeyError:
pass
def get_ready_conn(self, host):
conn = None
self._lock.acquire()
try:
if host in self._hostmap:
for c in self._hostmap[host]:
if self._readymap[c]:
self._readymap[c] = 0
conn = c
break
finally:
self._lock.release()
return conn
def get_all(self, host=None):
if host:
return list(self._hostmap.get(host, []))
else:
return dict(self._hostmap)
class KeepAliveHandler(object):
def __init__(self):
self._cm = ConnectionManager()
#### Connection Management
def open_connections(self):
"""return a list of connected hosts and the number of connections
to each. [('foo.com:80', 2), ('bar.org', 1)]"""
return [(host, len(li)) for (host, li) in self._cm.get_all().items()]
def close_connection(self, host):
"""close connection(s) to <host>
host is the host:port spec, as in 'www.cnn.com:8080' as passed in.
no error occurs if there is no connection to that host."""
for h in self._cm.get_all(host):
self._cm.remove(h)
h.close()
def close_all(self):
"""close all open connections"""
for host, conns in self._cm.get_all().iteritems():
for h in conns:
self._cm.remove(h)
h.close()
def _request_closed(self, request, host, connection):
"""tells us that this request is now closed and that the
connection is ready for another request"""
self._cm.set_ready(connection, 1)
def _remove_connection(self, host, connection, close=0):
if close:
connection.close()
self._cm.remove(connection)
#### Transaction Execution
def http_open(self, req):
return self.do_open(HTTPConnection, req)
def do_open(self, http_class, req):
host = req.get_host()
if not host:
raise urllib2.URLError('no host given')
try:
h = self._cm.get_ready_conn(host)
while h:
r = self._reuse_connection(h, req, host)
# if this response is non-None, then it worked and we're
# done. Break out, skipping the else block.
if r:
break
# connection is bad - possibly closed by server
# discard it and ask for the next free connection
h.close()
self._cm.remove(h)
h = self._cm.get_ready_conn(host)
else:
# no (working) free connections were found. Create a new one.
h = http_class(host)
if DEBUG:
DEBUG.info("creating new connection to %s (%d)",
host, id(h))
self._cm.add(host, h, 0)
self._start_transaction(h, req)
r = h.getresponse()
except (socket.error, httplib.HTTPException), err:
raise urllib2.URLError(err)
# if not a persistent connection, don't try to reuse it
if r.will_close:
self._cm.remove(h)
if DEBUG:
DEBUG.info("STATUS: %s, %s", r.status, r.reason)
r._handler = self
r._host = host
r._url = req.get_full_url()
r._connection = h
r.code = r.status
r.headers = r.msg
r.msg = r.reason
if r.status == 200 or not HANDLE_ERRORS:
return r
else:
return self.parent.error('http', req, r,
r.status, r.msg, r.headers)
def _reuse_connection(self, h, req, host):
"""start the transaction with a re-used connection
return a response object (r) upon success or None on failure.
This DOES not close or remove bad connections in cases where
it returns. However, if an unexpected exception occurs, it
will close and remove the connection before re-raising.
"""
try:
self._start_transaction(h, req)
r = h.getresponse()
# note: just because we got something back doesn't mean it
# worked. We'll check the version below, too.
except (socket.error, httplib.HTTPException):
r = None
except: # re-raises
# adding this block just in case we've missed
# something we will still raise the exception, but
# lets try and close the connection and remove it
# first. We previously got into a nasty loop
# where an exception was uncaught, and so the
# connection stayed open. On the next try, the
# same exception was raised, etc. The trade-off is
# that it's now possible this call will raise
# a DIFFERENT exception
if DEBUG:
DEBUG.error("unexpected exception - closing "
"connection to %s (%d)", host, id(h))
self._cm.remove(h)
h.close()
raise
if r is None or r.version == 9:
# httplib falls back to assuming HTTP 0.9 if it gets a
# bad header back. This is most likely to happen if
# the socket has been closed by the server since we
# last used the connection.
if DEBUG:
DEBUG.info("failed to re-use connection to %s (%d)",
host, id(h))
r = None
else:
if DEBUG:
DEBUG.info("re-using connection to %s (%d)", host, id(h))
return r
def _start_transaction(self, h, req):
# What follows mostly reimplements HTTPConnection.request()
# except it adds self.parent.addheaders in the mix.
headers = req.headers.copy()
if sys.version_info >= (2, 4):
headers.update(req.unredirected_hdrs)
headers.update(self.parent.addheaders)
headers = dict((n.lower(), v) for n, v in headers.items())
skipheaders = {}
for n in ('host', 'accept-encoding'):
if n in headers:
skipheaders['skip_' + n.replace('-', '_')] = 1
try:
if req.has_data():
data = req.get_data()
h.putrequest('POST', req.get_selector(), **skipheaders)
if 'content-type' not in headers:
h.putheader('Content-type',
'application/x-www-form-urlencoded')
if 'content-length' not in headers:
h.putheader('Content-length', '%d' % len(data))
else:
h.putrequest('GET', req.get_selector(), **skipheaders)
except (socket.error), err:
raise urllib2.URLError(err)
for k, v in headers.items():
h.putheader(k, v)
h.endheaders()
if req.has_data():
h.send(data)
class HTTPHandler(KeepAliveHandler, urllib2.HTTPHandler):
pass
class HTTPResponse(httplib.HTTPResponse):
# we need to subclass HTTPResponse in order to
# 1) add readline() and readlines() methods
# 2) add close_connection() methods
# 3) add info() and geturl() methods
# in order to add readline(), read must be modified to deal with a
# buffer. example: readline must read a buffer and then spit back
# one line at a time. The only real alternative is to read one
# BYTE at a time (ick). Once something has been read, it can't be
# put back (ok, maybe it can, but that's even uglier than this),
# so if you THEN do a normal read, you must first take stuff from
# the buffer.
# the read method wraps the original to accommodate buffering,
# although read() never adds to the buffer.
# Both readline and readlines have been stolen with almost no
# modification from socket.py
def __init__(self, sock, debuglevel=0, strict=0, method=None):
httplib.HTTPResponse.__init__(self, sock, debuglevel, method)
self.fileno = sock.fileno
self.code = None
self._rbuf = ''
self._rbufsize = 8096
self._handler = None # inserted by the handler later
self._host = None # (same)
self._url = None # (same)
self._connection = None # (same)
_raw_read = httplib.HTTPResponse.read
def close(self):
if self.fp:
self.fp.close()
self.fp = None
if self._handler:
self._handler._request_closed(self, self._host,
self._connection)
def close_connection(self):
self._handler._remove_connection(self._host, self._connection, close=1)
self.close()
def info(self):
return self.headers
def geturl(self):
return self._url
def read(self, amt=None):
# the _rbuf test is only in this first if for speed. It's not
# logically necessary
if self._rbuf and not amt is None:
L = len(self._rbuf)
if amt > L:
amt -= L
else:
s = self._rbuf[:amt]
self._rbuf = self._rbuf[amt:]
return s
s = self._rbuf + self._raw_read(amt)
self._rbuf = ''
return s
# stolen from Python SVN #68532 to fix issue1088
def _read_chunked(self, amt):
chunk_left = self.chunk_left
value = ''
# XXX This accumulates chunks by repeated string concatenation,
# which is not efficient as the number or size of chunks gets big.
while True:
if chunk_left is None:
line = self.fp.readline()
i = line.find(';')
if i >= 0:
line = line[:i] # strip chunk-extensions
try:
chunk_left = int(line, 16)
except ValueError:
# close the connection as protocol synchronization is
# probably lost
self.close()
raise httplib.IncompleteRead(value)
if chunk_left == 0:
break
if amt is None:
value += self._safe_read(chunk_left)
elif amt < chunk_left:
value += self._safe_read(amt)
self.chunk_left = chunk_left - amt
return value
elif amt == chunk_left:
value += self._safe_read(amt)
self._safe_read(2) # toss the CRLF at the end of the chunk
self.chunk_left = None
return value
else:
value += self._safe_read(chunk_left)
amt -= chunk_left
# we read the whole chunk, get another
self._safe_read(2) # toss the CRLF at the end of the chunk
chunk_left = None
# read and discard trailer up to the CRLF terminator
### note: we shouldn't have any trailers!
while True:
line = self.fp.readline()
if not line:
# a vanishingly small number of sites EOF without
# sending the trailer
break
if line == '\r\n':
break
# we read everything; close the "file"
self.close()
return value
def readline(self, limit=-1):
i = self._rbuf.find('\n')
while i < 0 and not (0 < limit <= len(self._rbuf)):
new = self._raw_read(self._rbufsize)
if not new:
break
i = new.find('\n')
if i >= 0:
i = i + len(self._rbuf)
self._rbuf = self._rbuf + new
if i < 0:
i = len(self._rbuf)
else:
i = i + 1
if 0 <= limit < len(self._rbuf):
i = limit
data, self._rbuf = self._rbuf[:i], self._rbuf[i:]
return data
def readlines(self, sizehint = 0):
total = 0
list = []
while True:
line = self.readline()
if not line:
break
list.append(line)
total += len(line)
if sizehint and total >= sizehint:
break
return list
def safesend(self, str):
"""Send `str' to the server.
Shamelessly ripped off from httplib to patch a bad behavior.
"""
# _broken_pipe_resp is an attribute we set in this function
# if the socket is closed while we're sending data but
# the server sent us a response before hanging up.
# In that case, we want to pretend to send the rest of the
# outgoing data, and then let the user use getresponse()
# (which we wrap) to get this last response before
# opening a new socket.
if getattr(self, '_broken_pipe_resp', None) is not None:
return
if self.sock is None:
if self.auto_open:
self.connect()
else:
raise httplib.NotConnected
# send the data to the server. if we get a broken pipe, then close
# the socket. we want to reconnect when somebody tries to send again.
#
# NOTE: we DO propagate the error, though, because we cannot simply
# ignore the error... the caller will know if they can retry.
if self.debuglevel > 0:
print "send:", repr(str)
try:
blocksize = 8192
read = getattr(str, 'read', None)
if read is not None:
if self.debuglevel > 0:
print "sending a read()able"
data = read(blocksize)
while data:
self.sock.sendall(data)
data = read(blocksize)
else:
self.sock.sendall(str)
except socket.error, v:
reraise = True
if v[0] == errno.EPIPE: # Broken pipe
if self._HTTPConnection__state == httplib._CS_REQ_SENT:
self._broken_pipe_resp = None
self._broken_pipe_resp = self.getresponse()
reraise = False
self.close()
if reraise:
raise
def wrapgetresponse(cls):
"""Wraps getresponse in cls with a broken-pipe sane version.
"""
def safegetresponse(self):
# In safesend() we might set the _broken_pipe_resp
# attribute, in which case the socket has already
# been closed and we just need to give them the response
# back. Otherwise, we use the normal response path.
r = getattr(self, '_broken_pipe_resp', None)
if r is not None:
return r
return cls.getresponse(self)
safegetresponse.__doc__ = cls.getresponse.__doc__
return safegetresponse
class HTTPConnection(httplib.HTTPConnection):
# use the modified response class
response_class = HTTPResponse
send = safesend
getresponse = wrapgetresponse(httplib.HTTPConnection)
#########################################################################
##### TEST FUNCTIONS
#########################################################################
def error_handler(url):
global HANDLE_ERRORS
orig = HANDLE_ERRORS
keepalive_handler = HTTPHandler()
opener = urllib2.build_opener(keepalive_handler)
urllib2.install_opener(opener)
pos = {0: 'off', 1: 'on'}
for i in (0, 1):
print " fancy error handling %s (HANDLE_ERRORS = %i)" % (pos[i], i)
HANDLE_ERRORS = i
try:
fo = urllib2.urlopen(url)
fo.read()
fo.close()
try:
status, reason = fo.status, fo.reason
except AttributeError:
status, reason = None, None
except IOError, e:
print " EXCEPTION: %s" % e
raise
else:
print " status = %s, reason = %s" % (status, reason)
HANDLE_ERRORS = orig
hosts = keepalive_handler.open_connections()
print "open connections:", hosts
keepalive_handler.close_all()
def md5(s):
try:
from hashlib import md5 as _md5
except ImportError:
from md5 import md5 as _md5
global md5
md5 = _md5
return _md5(s)
def continuity(url):
format = '%25s: %s'
# first fetch the file with the normal http handler
opener = urllib2.build_opener()
urllib2.install_opener(opener)
fo = urllib2.urlopen(url)
foo = fo.read()
fo.close()
m = md5.new(foo)
print format % ('normal urllib', m.hexdigest())
# now install the keepalive handler and try again
opener = urllib2.build_opener(HTTPHandler())
urllib2.install_opener(opener)
fo = urllib2.urlopen(url)
foo = fo.read()
fo.close()
m = md5.new(foo)
print format % ('keepalive read', m.hexdigest())
fo = urllib2.urlopen(url)
foo = ''
while True:
f = fo.readline()
if f:
foo = foo + f
else: break
fo.close()
m = md5.new(foo)
print format % ('keepalive readline', m.hexdigest())
def comp(N, url):
print ' making %i connections to:\n %s' % (N, url)
sys.stdout.write(' first using the normal urllib handlers')
# first use normal opener
opener = urllib2.build_opener()
urllib2.install_opener(opener)
t1 = fetch(N, url)
print ' TIME: %.3f s' % t1
sys.stdout.write(' now using the keepalive handler ')
# now install the keepalive handler and try again
opener = urllib2.build_opener(HTTPHandler())
urllib2.install_opener(opener)
t2 = fetch(N, url)
print ' TIME: %.3f s' % t2
print ' improvement factor: %.2f' % (t1 / t2)
def fetch(N, url, delay=0):
import time
lens = []
starttime = time.time()
for i in range(N):
if delay and i > 0:
time.sleep(delay)
fo = urllib2.urlopen(url)
foo = fo.read()
fo.close()
lens.append(len(foo))
diff = time.time() - starttime
j = 0
for i in lens[1:]:
j = j + 1
if not i == lens[0]:
print "WARNING: inconsistent length on read %i: %i" % (j, i)
return diff
def test_timeout(url):
global DEBUG
dbbackup = DEBUG
class FakeLogger(object):
def debug(self, msg, *args):
print msg % args
info = warning = error = debug
DEBUG = FakeLogger()
print " fetching the file to establish a connection"
fo = urllib2.urlopen(url)
data1 = fo.read()
fo.close()
i = 20
print " waiting %i seconds for the server to close the connection" % i
while i > 0:
sys.stdout.write('\r %2i' % i)
sys.stdout.flush()
time.sleep(1)
i -= 1
sys.stderr.write('\r')
print " fetching the file a second time"
fo = urllib2.urlopen(url)
data2 = fo.read()
fo.close()
if data1 == data2:
print ' data are identical'
else:
print ' ERROR: DATA DIFFER'
DEBUG = dbbackup
def test(url, N=10):
print "checking error handler (do this on a non-200)"
try: error_handler(url)
except IOError:
print "exiting - exception will prevent further tests"
sys.exit()
print
print "performing continuity test (making sure stuff isn't corrupted)"
continuity(url)
print
print "performing speed comparison"
comp(N, url)
print
print "performing dropped-connection check"
test_timeout(url)
if __name__ == '__main__':
import time
import sys
try:
N = int(sys.argv[1])
url = sys.argv[2]
except (IndexError, ValueError):
print "%s <integer> <url>" % sys.argv[0]
else:
test(url, N)
| |
# vim: set et sw=4 sts=4 fileencoding=utf-8:
#
# An alternate Python Minecraft library for the Rasperry-Pi
# Copyright (c) 2013-2015 Dave Jones <dave@waveform.org.uk>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import (
unicode_literals,
absolute_import,
print_function,
division,
)
str = type('')
import pytest
import math
from .conftest import fp_equal, fp_vectors_equal
from picraft import Vector, vector_range, line, lines, O, X, Y, Z
from picraft.vector import rmod, rdiv, sign
from picraft.compat import range
def test_vector_init():
assert Vector() == (0, 0, 0)
assert Vector(1) == (1, 0, 0)
assert Vector(1, 2, 3) == (1, 2, 3)
assert Vector(z=3, y=2, x=1) == (1, 2, 3)
def test_vector_from_string():
assert Vector.from_string('1,2,3') == Vector(1, 2, 3)
assert Vector.from_string('1, 2, 3') == Vector(1, 2, 3)
with pytest.raises(ValueError):
Vector.from_string('1')
with pytest.raises(ValueError):
Vector.from_string('(1,2,3)')
def test_vector_str():
assert str(Vector()) == '0,0,0'
assert str(Vector(1, 2, 3)) == '1,2,3'
assert str(Vector(0.1, 0.2, 0.3)) == '0.1,0.2,0.3'
def test_vector_add():
assert Vector() + Vector(1, 2, 3) == Vector(1,2, 3)
assert Vector() + 1 == Vector(1, 1, 1)
assert 1 + Vector() == Vector(1, 1, 1)
def test_vector_sub():
assert Vector(1, 2, 3) - Vector(1, 1, 1) == Vector(0, 1, 2)
assert Vector(1, 2, 3) - 1 == Vector(0, 1, 2)
with pytest.raises(TypeError):
1 - Vector()
def test_vector_mul():
assert Vector(1, 2, 3) * Vector(3, 3, 3) == Vector(3, 6, 9)
assert Vector(1, 2, 3) * 3 == Vector(3, 6, 9)
assert 3 * Vector(1, 2, 3) == Vector(3, 6, 9)
def test_vector_truediv():
assert fp_vectors_equal(Vector(1, 2, 3) / Vector(2, 2, 2), Vector(0.5, 1, 1.5))
assert fp_vectors_equal(Vector(1, 2, 3) / 2, Vector(0.5, 1, 1.5))
with pytest.raises(TypeError):
2 / Vector(1, 2, 3)
def test_vector_floordiv():
assert Vector(1, 2, 3) // Vector(2, 2, 2) == Vector(0, 1, 1)
assert Vector(1, 2, 3) // 2 == Vector(0, 1, 1)
with pytest.raises(TypeError):
2 // Vector(1, 2, 3)
def test_vector_mod():
assert Vector(1, 3, 9) % Vector(3, 3, 3) == Vector(1, 0, 0)
assert Vector(2, 3, 4) % 2 == Vector(0, 1, 0)
with pytest.raises(TypeError):
2 % Vector(1, 1, 1)
def test_vector_pow():
assert Vector(1, 2, 3) ** Vector(2, 2, 2) == Vector(1, 4, 9)
assert Vector(1, 2, 3) ** Vector(2, 2, 2) == Vector(1, 4, 9)
assert pow(Vector(1, 2, 3), Vector(2, 2, 2), Vector(5, 5, 5)) == Vector(1, 4, 4)
assert Vector(1, 2, 3) ** 2 == Vector(1, 4, 9)
assert pow(Vector(1, 2, 3), 2, 5) == Vector(1, 4, 4)
with pytest.raises(TypeError):
2 ** Vector(1, 2, 3)
def test_vector_lshift():
assert Vector(1, 2, 3) << Vector(1, 2, 3) == Vector(2, 8, 24)
assert Vector(1, 2, 3) << 1 == Vector(2, 4, 6)
with pytest.raises(TypeError):
2 << Vector(1, 2, 3)
def test_vector_rshift():
assert Vector(2, 8, 16) >> Vector(0, 1, 2) == Vector(2, 4, 4)
assert Vector(2, 8, 16) >> 1 == Vector(1, 4, 8)
with pytest.raises(TypeError):
2 >> Vector(1, 1, 1)
def test_vector_and():
assert Vector(1, 2, 3) & Vector(1, 1, 1) == Vector(1, 0, 1)
assert Vector(4, 4, 4) & Vector(0x7fff, 0, 0) == Vector(4, 0, 0)
assert Vector(1, 2, 3) & 1 == Vector(1, 0, 1)
def test_vector_xor():
assert Vector(1, 2, 3) ^ Vector(1, 1, 1) == Vector(0, 3, 2)
assert Vector(4, 4, 4) ^ Vector(4, 4, 4) == Vector(0, 0, 0)
assert Vector(1, 2, 3) ^ 1 == Vector(0, 3, 2)
def test_vector_or():
assert Vector(1, 2, 3) | Vector(1, 1, 1) == Vector(1, 3, 3)
assert Vector(4, 4, 4) | Vector(1, 2, 3) == Vector(5, 6, 7)
assert Vector(1, 2, 3) | 1 == Vector(1, 3, 3)
def test_vector_neg():
assert -Vector(1, 1, 1) == Vector(-1, -1, -1)
def test_vector_pos():
assert +Vector(1, 1, 1) == Vector(1, 1, 1)
def test_vector_abs():
assert abs(Vector(-1, 1, -2)) == Vector(1, 1, 2)
assert abs(Vector(0, 1, 2)) == Vector(0, 1, 2)
def test_vector_bool():
assert Vector(1, 0, 0)
assert Vector(0, 1, 0)
assert Vector(0, 0, 1)
assert not Vector()
def test_vector_dot():
assert Vector(1, 1, 1).dot(Vector()) == 0
assert Vector(1, 2, 3).dot(Vector(1, 1, 1)) == 6
assert Vector(1, 2, 3).dot(Vector(0, 0, 2)) == 6
def test_vector_cross():
assert Vector(x=1).cross(Vector(x=1)) == Vector()
assert Vector(y=1).cross(Vector(y=-1)) == Vector()
assert Vector(z=1).cross(Vector(z=1)) == Vector()
assert Vector(x=1).cross(Vector(y=1)) == Vector(z=1)
assert Vector(x=1).cross(Vector(y=-1)) == Vector(z=-1)
def test_vector_distance_to():
assert Vector().distance_to(Vector()) == 0.0
assert Vector(x=1).distance_to(Vector(x=2)) == 1.0
assert Vector(x=1).distance_to(Vector(2, 1, 0)) == 2 ** 0.5
def test_vector_magnitude():
assert Vector().magnitude == 0
assert Vector(2, 4, 4).magnitude == 6
def test_vector_unit():
assert Vector(x=1).unit == Vector(1, 0, 0)
assert Vector().unit == Vector()
assert Vector(2, 4, 4).unit == Vector(1/3, 2/3, 2/3)
def test_vector_replace():
assert Vector().replace(x=1) == Vector(1, 0, 0)
assert Vector(1, 2, 3).replace() == Vector(1, 2, 3)
assert Vector(1, 2, 3).replace(2, 4, 6) == Vector(2, 4, 6)
assert Vector(1, 2, 3).replace(z=-1) == Vector(1, 2, -1)
def test_vector_trunc():
assert math.trunc(Vector(1, 2, 3)) == Vector(1, 2, 3)
assert math.trunc(Vector(1.1, 2.5, -1.1)) == Vector(1, 2, -1)
assert math.trunc(Vector(1.9, 0.0, -1.9)) == Vector(1, 0, -1)
def test_vector_floor():
assert Vector(1, 2, 3).floor() == Vector(1, 2, 3)
assert Vector(1.1, 2.5, -1.1).floor() == Vector(1, 2, -2)
assert Vector(1.9, 0.0, -1.9).floor() == Vector(1, 0, -2)
def test_vector_ceil():
assert Vector(1, 2, 3).ceil() == Vector(1, 2, 3)
assert Vector(1.1, 2.5, -1.1).ceil() == Vector(2, 3, -1)
assert Vector(1.9, 0.0, -1.9).ceil() == Vector(2, 0, -1)
def test_vector_round():
assert fp_vectors_equal(Vector(1, 2, 3).round(), Vector(1, 2, 3))
assert fp_vectors_equal(Vector(1.1, 3.5, -1.1).round(), Vector(1, 4, -1))
assert fp_vectors_equal(Vector(1.9, 0.0, -1.9).round(), Vector(2, 0, -2))
assert fp_vectors_equal(Vector(1.9, 0.0, -1.9).round(1), Vector(1.9, 0.0, -1.9))
assert fp_vectors_equal(Vector(1.9, 0.0, -1.9).round(-1), Vector(0, 0, 0))
def test_vector_angle_between():
assert fp_equal(X.angle_between(Y), 90.0)
assert fp_equal(Y.angle_between(Z), 90.0)
assert fp_equal((X + Y).angle_between(X), 45.0)
def test_vector_project():
assert fp_equal(X.project(X), 1.0)
assert fp_equal(X.project(Y), 0.0)
assert fp_equal(X.project(Z), 0.0)
assert fp_equal(Vector(1, 2, 3).project(2 * Y), 2.0)
assert fp_equal(Vector(3, 4, 5).project(Vector(3, 4, 0)), 5.0)
def test_vector_rotate():
assert fp_vectors_equal(X.rotate(90, about=X), X)
assert fp_vectors_equal(X.rotate(90, about=Y), -Z)
assert fp_vectors_equal(X.rotate(90, about=Z), Y)
assert fp_vectors_equal(X.rotate(90, about=-X), X)
assert fp_vectors_equal(X.rotate(90, about=-Y), Z)
assert fp_vectors_equal(X.rotate(90, about=-Z), -Y)
assert fp_vectors_equal(X.rotate(180, about=X + Y), Y)
assert fp_vectors_equal(O.rotate(180, about=Y, origin=Z), 2*Z)
def test_vector_range_init():
v = vector_range(Vector() + 2)
assert v.start == Vector()
assert v.stop == Vector(2, 2, 2)
assert v.step == Vector(1, 1, 1)
with pytest.raises(TypeError):
vector_range(Vector(0.0, 0.0, 0.0), step=Vector(0.5, 0.5, 1.0))
def test_vector_range_start():
assert vector_range(Vector() + 1).start == Vector()
assert vector_range(Vector(), Vector() + 1).start == Vector()
assert vector_range(Vector(1, 0, 1), Vector() + 1).start == Vector(1, 0, 1)
def test_vector_range_stop():
assert vector_range(Vector() + 1).stop == Vector(1, 1, 1)
assert vector_range(Vector(1, 1, 0)).stop == Vector(1, 1, 0)
assert vector_range(Vector(1, 0, 1), Vector() + 1).stop == Vector(1, 1, 1)
def test_vector_range_step():
assert vector_range(Vector() + 1, step=Vector() + 2).step == Vector(2, 2, 2)
assert vector_range(Vector(), Vector() + 1, Vector() + 1).step == Vector(1, 1, 1)
with pytest.raises(ValueError):
vector_range(Vector() + 1, step=Vector(1, 1, 0))
def test_vector_range_order():
assert vector_range(Vector() + 1).order == 'zxy'
assert vector_range(Vector() + 1, order='xyz').order == 'xyz'
with pytest.raises(ValueError):
vector_range(Vector() + 2, order='abc')
def test_vector_range_index():
assert vector_range(Vector() + 2).index(Vector()) == 0
assert vector_range(Vector() + 2)[:Vector() - 1].index(Vector()) == 0
assert vector_range(Vector() + 2).index(Vector(1, 1, 1)) == 7
assert vector_range(Vector() + 2)[Vector(z=1):].index(Vector(1, 1, 1)) == 3
assert vector_range(Vector() + 2, order='xyz').index(Vector(1, 0, 0)) == 1
assert vector_range(Vector() + 2, order='zxy').index(Vector(1, 0, 0)) == 2
assert vector_range(Vector() + 2, order='zyx').index(Vector(1, 0, 0)) == 4
with pytest.raises(ValueError):
vector_range(Vector() + 2).index(Vector(2, 2, 2))
def test_vector_range_contains():
assert Vector() in vector_range(Vector() + 2)
assert Vector(1, 1, 1) in vector_range(Vector() + 2)
assert Vector(x=1) in vector_range(Vector() + 2, order='xyz')
assert Vector() + 2 not in vector_range(Vector() + 2)
def test_vector_range_count():
assert vector_range(Vector() + 2).count(Vector()) == 1
assert vector_range(Vector() + 2).count(Vector(1, 1, 1)) == 1
assert vector_range(Vector() + 2, order='xyz').count(Vector(1, 0, 0)) == 1
assert vector_range(Vector() + 2).count(Vector(2, 2, 2)) == 0
def test_vector_range_len():
assert len(vector_range(Vector())) == 0
assert len(vector_range(Vector() + 1)) == 1
assert len(vector_range(Vector() + 2)) == 8
assert len(vector_range(Vector() + 3)) == 27
def test_vector_range_ordering():
assert vector_range(Vector() + 2, order='xyz') == vector_range(Vector(2, 2, 2), order='xyz')
assert vector_range(Vector() + 2, order='xyz') == list(vector_range(Vector(2, 2, 2), order='xyz'))
assert vector_range(Vector() + 2, order='zxy') <= vector_range(Vector() + 2, order='xyz')
assert vector_range(Vector() + 2, order='zxy') <= vector_range(Vector() + 2, order='zxy')
assert vector_range(Vector() + 2, order='zxy') < vector_range(Vector() + 2, order='xyz')
assert not (vector_range(Vector() + 2, order='zxy') < vector_range(Vector() + 2, order='zxy'))
assert vector_range(Vector() + 2, order='xyz') > vector_range(Vector() + 2, order='zxy')
assert vector_range(Vector() + 2, order='xyz') >= vector_range(Vector() + 2, order='zxy')
assert vector_range(Vector() + 2, order='xyz') != vector_range(Vector() + 2, order='zxy')
assert vector_range(Vector() + 2, order='xyz') == [
Vector(0, 0, 0), Vector(1, 0, 0), Vector(0, 1, 0), Vector(1, 1, 0),
Vector(0, 0, 1), Vector(1, 0, 1), Vector(0, 1, 1), Vector(1, 1, 1),
]
assert vector_range(Vector() + 2, order='zxy') == [
Vector(0, 0, 0), Vector(0, 0, 1), Vector(1, 0, 0), Vector(1, 0, 1),
Vector(0, 1, 0), Vector(0, 1, 1), Vector(1, 1, 0), Vector(1, 1, 1),
]
assert vector_range(Vector() + 2, order='zyx') == [
Vector(0, 0, 0), Vector(0, 0, 1), Vector(0, 1, 0), Vector(0, 1, 1),
Vector(1, 0, 0), Vector(1, 0, 1), Vector(1, 1, 0), Vector(1, 1, 1),
]
assert iter(vector_range(Vector() + 2, order='zxy')) < vector_range(Vector() + 2, order='xyz')
assert iter(vector_range(Vector() + 2, order='zxy')) <= vector_range(Vector() + 2, order='xyz')
assert iter(vector_range(Vector() + 2, order='xyz')) > vector_range(Vector() + 2, order='zxy')
assert iter(vector_range(Vector() + 2, order='xyz')) >= vector_range(Vector() + 2, order='zxy')
assert iter(vector_range(Vector() + 2, order='xyz')) != vector_range(Vector() + 2, order='zxy')
def test_vector_range_reversed():
assert list(reversed(vector_range(Vector() + 2))) == list(reversed(list(vector_range(Vector() + 2))))
assert list(reversed(vector_range(Vector() + 2, order='xyz'))) == vector_range(Vector() + 1, Vector() - 1, Vector() - 1, order='xyz')
assert list(reversed(vector_range(Vector() + 2, order='xyz'))) == vector_range(Vector() + 2, order='xyz')[::Vector() - 1]
def test_vector_range_bool():
assert not vector_range(Vector())
assert vector_range(Vector() + 1)
assert not vector_range(Vector() + 1)[Vector() + 1:]
def test_vector_range_getitem():
v = vector_range(Vector() + 2, order='xyz')
assert v[0] == Vector()
assert v[1] == Vector(1, 0, 0)
assert v[7] == Vector(1, 1, 1)
assert v[7] == v[-1]
assert v[6] == v[-2]
assert v[0] == v[-8]
assert v[Vector()] == Vector()
assert v[Vector(1, 0, 0)] == Vector(1, 0, 0)
assert v[Vector() - 1] == Vector(1, 1, 1)
with pytest.raises(IndexError):
v[8]
with pytest.raises(IndexError):
v[-9]
with pytest.raises(IndexError):
v[Vector(2, 1, 1)]
def test_vector_getslice():
v = vector_range(Vector() + 2, order='xyz')
assert v[Vector(1, 0, 0):] == vector_range(Vector(x=1), Vector() + 2, order='xyz')
assert v[:Vector(1, None, None)] == vector_range(Vector(1, 2, 2), order='xyz')
with pytest.raises(ValueError):
v[1:]
with pytest.raises(ValueError):
v[::Vector(1, 1, 0)]
def test_vector_coverage():
# Miscellaneous tests purely for the sake of coverage
assert rmod(3, 3, range(10)) == set()
assert rmod(3, 2, []) == set()
with pytest.raises(ValueError):
rmod(0, 1, range(10))
with pytest.raises(ValueError):
rdiv(0, 1)
def test_vector_sign():
assert sign(10) == 1
assert sign(0) == 0
assert sign(-5) == -1
assert sign(X) == X
assert sign(O) == O
assert sign(Vector(3, 0, -3)) == X - Z
def test_vector_line():
assert list(line(O, O)) == [O]
assert list(line(O, 4*X)) == [O, X, 2*X, 3*X, 4*X]
assert list(line(O, Vector(3, 1, 0))) == [O, X, 2*X + Y, 3*X + Y]
assert list(line(O, Vector(1, 2, 3))) == [O, Y + Z, Vector(1, 1, 2), Vector(1, 2, 3)]
def test_vector_lines():
assert list(lines([O, 4*X, Vector(1, 2, 3)])) == [
O, X, 2*X, 3*X, 4*X, Vector(3, 1, 1), Vector(2, 1, 2),
Vector(1, 2, 3), Vector(1, 1, 2), Vector(0, 1, 1), O]
assert list(lines([O, 4*X, Vector(1, 2, 3)], closed=False)) == [
O, X, 2*X, 3*X, 4*X, Vector(3, 1, 1), Vector(2, 1, 2),
Vector(1, 2, 3)]
| |
#!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012, Cloudscaling
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Ceilometer HACKING file compliance testing
Built on top of pep8.py
"""
import imp
import inspect
import logging
import os
import re
import subprocess
import sys
import tokenize
import traceback
import pep8
# Don't need this for testing
logging.disable('LOG')
#N1xx comments
#N2xx except
#N3xx imports
#N4xx docstrings
#N5xx dictionaries/lists
#N6xx calling methods
#N7xx localization
#N8xx git commit messages
#N9xx other
IMPORT_EXCEPTIONS = ['sqlalchemy', 'migrate',
'ceilometer.storage.sqlalchemy.session',
'ceilometer.storage.sqlalchemy.models']
# Paste is missing a __init__ in top level directory
START_DOCSTRING_TRIPLE = ['u"""', 'r"""', '"""', "u'''", "r'''", "'''"]
END_DOCSTRING_TRIPLE = ['"""', "'''"]
VERBOSE_MISSING_IMPORT = os.getenv('HACKING_VERBOSE_MISSING_IMPORT', 'False')
_missingImport = set([])
# Monkey patch broken excluded filter in pep8
# See https://github.com/jcrocholl/pep8/pull/111
def excluded(self, filename):
"""Check if options.exclude contains a pattern that matches filename."""
basename = os.path.basename(filename)
return any((pep8.filename_match(filename, self.options.exclude,
default=False),
pep8.filename_match(basename, self.options.exclude,
default=False)))
def input_dir(self, dirname):
"""Check all files in this directory and all subdirectories."""
dirname = dirname.rstrip('/')
if self.excluded(dirname):
return 0
counters = self.options.report.counters
verbose = self.options.verbose
filepatterns = self.options.filename
runner = self.runner
for root, dirs, files in os.walk(dirname):
if verbose:
print('directory ' + root)
counters['directories'] += 1
for subdir in sorted(dirs):
if self.excluded(os.path.join(root, subdir)):
dirs.remove(subdir)
for filename in sorted(files):
# contain a pattern that matches?
if ((pep8.filename_match(filename, filepatterns) and
not self.excluded(filename))):
runner(os.path.join(root, filename))
def is_import_exception(mod):
return (mod in IMPORT_EXCEPTIONS or
any(mod.startswith(m + '.') for m in IMPORT_EXCEPTIONS))
def import_normalize(line):
# convert "from x import y" to "import x.y"
# handle "from x import y as z" to "import x.y as z"
split_line = line.split()
if ("import" in line and line.startswith("from ") and "," not in line and
split_line[2] == "import" and split_line[3] != "*" and
split_line[1] != "__future__" and
(len(split_line) == 4 or
(len(split_line) == 6 and split_line[4] == "as"))):
return "import %s.%s" % (split_line[1], split_line[3])
else:
return line
def nova_todo_format(physical_line, tokens):
"""Check for 'TODO()'.
nova HACKING guide recommendation for TODO:
Include your name with TODOs as in "#TODO(termie)"
Okay: #TODO(sdague)
N101: #TODO fail
N101: #TODO (jogo) fail
"""
# TODO(sdague): TODO check shouldn't fail inside of space
pos = physical_line.find('TODO')
pos1 = physical_line.find('TODO(')
pos2 = physical_line.find('#') # make sure it's a comment
if (pos != pos1 and pos2 >= 0 and pos2 < pos and len(tokens) == 0):
return pos, "N101: Use TODO(NAME)"
def nova_except_format(logical_line):
r"""Check for 'except:'.
nova HACKING guide recommends not using except:
Do not write "except:", use "except Exception:" at the very least
Okay: except Exception:
N201: except:
"""
if logical_line.startswith("except:"):
yield 6, "N201: no 'except:' at least use 'except Exception:'"
def nova_except_format_assert(logical_line):
r"""Check for 'assertRaises(Exception'.
nova HACKING guide recommends not using assertRaises(Exception...):
Do not use overly broad Exception type
Okay: self.assertRaises(NovaException)
N202: self.assertRaises(Exception)
"""
if logical_line.startswith("self.assertRaises(Exception"):
yield 1, "N202: assertRaises Exception too broad"
modules_cache = dict((mod, True) for mod in tuple(sys.modules.keys())
+ sys.builtin_module_names)
RE_RELATIVE_IMPORT = re.compile('^from\s*[.]')
def nova_import_rules(logical_line):
r"""Check for imports.
nova HACKING guide recommends one import per line:
Do not import more than one module per line
Examples:
Okay: from nova.compute import api
N301: from nova.compute import api, utils
Imports should usually be on separate lines.
nova HACKING guide recommends importing only modules:
Do not import objects, only modules
Examples:
Okay: from os import path
Okay: from os import path as p
Okay: from os import (path as p)
Okay: import os.path
Okay: from nova.compute import rpcapi
N302: from os.path import dirname as dirname2
N302: from os.path import (dirname as dirname2)
N303: from os.path import *
N304: from .compute import rpcapi
"""
#NOTE(afazekas): An old style relative import example will not be able to
# pass the doctest, since the relativity depends on the file's locality
def is_module_for_sure(mod, search_path=sys.path):
mod = mod.replace('(', '') # Ignore parentheses
try:
mod_name = mod
while '.' in mod_name:
pack_name, _sep, mod_name = mod.partition('.')
f, p, d = imp.find_module(pack_name, search_path)
search_path = [p]
imp.find_module(mod_name, search_path)
except ImportError:
try:
# NOTE(vish): handle namespace modules
module = __import__(mod)
except ImportError, exc:
# NOTE(vish): the import error might be due
# to a missing dependency
missing = str(exc).split()[-1]
if (missing != mod.split('.')[-1] or
"cannot import" in str(exc)):
_missingImport.add(missing)
return True
return False
except Exception, exc:
# NOTE(jogo) don't stack trace if unexpected import error,
# log and continue.
traceback.print_exc()
return False
return True
def is_module(mod):
"""Checks for non module imports."""
if mod in modules_cache:
return modules_cache[mod]
res = is_module_for_sure(mod)
modules_cache[mod] = res
return res
current_path = os.path.dirname(pep8.current_file)
current_mod = os.path.basename(pep8.current_file)
if current_mod[-3:] == ".py":
current_mod = current_mod[:-3]
split_line = logical_line.split()
split_line_len = len(split_line)
if (split_line[0] in ('import', 'from') and split_line_len > 1 and
not is_import_exception(split_line[1])):
pos = logical_line.find(',')
if pos != -1:
if split_line[0] == 'from':
yield pos, "N301: one import per line"
return # ',' is not supported by the N302 checker yet
pos = logical_line.find('*')
if pos != -1:
yield pos, "N303: No wildcard (*) import."
return
if split_line_len in (2, 4, 6) and split_line[1] != "__future__":
if 'from' == split_line[0] and split_line_len > 3:
mod = '.'.join((split_line[1], split_line[3]))
if is_import_exception(mod):
return
if RE_RELATIVE_IMPORT.search(logical_line):
yield logical_line.find('.'), ("N304: No "
"relative imports. '%s' is a relative import"
% logical_line)
return
if not is_module(mod):
yield 0, ("N302: import only modules."
"'%s' does not import a module" % logical_line)
return
#NOTE(afazekas): import searches first in the package
# The import keyword just imports modules
# The guestfs module now imports guestfs
mod = split_line[1]
if (current_mod != mod and
not is_module(mod) and
is_module_for_sure(mod, [current_path])):
yield 0, ("N304: No relative imports."
" '%s' is a relative import"
% logical_line)
#TODO(jogo): import template: N305
def nova_import_alphabetical(logical_line, blank_lines, previous_logical,
indent_level, previous_indent_level):
r"""Check for imports in alphabetical order.
nova HACKING guide recommendation for imports:
imports in human alphabetical order
Okay: import os\nimport sys\n\nimport nova\nfrom nova import test
N306: import sys\nimport os
"""
# handle import x
# use .lower since capitalization shouldn't dictate order
split_line = import_normalize(logical_line.strip()).lower().split()
split_previous = import_normalize(previous_logical.strip()).lower().split()
if blank_lines < 1 and indent_level == previous_indent_level:
length = [2, 4]
if (len(split_line) in length and len(split_previous) in length and
split_line[0] == "import" and split_previous[0] == "import"):
if split_line[1] < split_previous[1]:
yield (0, "N306: imports not in alphabetical order (%s, %s)"
% (split_previous[1], split_line[1]))
def nova_import_no_db_in_virt(logical_line, filename):
"""Check for db calls from nova/virt
As of grizzly-2 all the database calls have been removed from
nova/virt, and we want to keep it that way.
N307
"""
if "nova/virt" in filename and not filename.endswith("fake.py"):
if logical_line.startswith("from nova import db"):
yield (0, "N307: nova.db import not allowed in nova/virt/*")
def is_docstring(physical_line, previous_logical):
"""Return True if found docstring
'A docstring is a string literal that occurs as the first statement in a
module, function, class,'
http://www.python.org/dev/peps/pep-0257/#what-is-a-docstring
"""
line = physical_line.lstrip()
start = max([line.find(i) for i in START_DOCSTRING_TRIPLE])
end = max([line[-4:-1] == i for i in END_DOCSTRING_TRIPLE])
if (previous_logical.startswith("def ") or
previous_logical.startswith("class ")):
if start is 0:
return True
else:
# Handle multi line comments
return end and start in (-1, len(line) - 4)
def nova_docstring_start_space(physical_line, previous_logical):
r"""Check for docstring not start with space.
nova HACKING guide recommendation for docstring:
Docstring should not start with space
Okay: def foo():\n '''This is good.'''
Okay: def foo():\n a = ''' This is not a docstring.'''
Okay: def foo():\n pass\n ''' This is not.'''
N401: def foo():\n ''' This is not.'''
"""
# short circuit so that we don't fail on our own fail test
# when running under external pep8
if physical_line.find("N401: def foo()") != -1:
return
# it's important that we determine this is actually a docstring,
# and not a doc block used somewhere after the first line of a
# function def
if is_docstring(physical_line, previous_logical):
pos = max([physical_line.find(i) for i in START_DOCSTRING_TRIPLE])
if physical_line[pos + 3] == ' ':
return (pos, "N401: docstring should not start with"
" a space")
def nova_docstring_one_line(physical_line, previous_logical):
r"""Check one line docstring end.
nova HACKING guide recommendation for one line docstring:
A one line docstring looks like this and ends in punctuation.
Okay: def foo():\n '''This is good.'''
Okay: def foo():\n '''This is good too!'''
Okay: def foo():\n '''How about this?'''
Okay: def foo():\n a = '''This is not a docstring'''
Okay: def foo():\n pass\n '''This is not a docstring'''
Okay: class Foo:\n pass\n '''This is not a docstring'''
N402: def foo():\n '''This is not'''
N402: def foo():\n '''Bad punctuation,'''
N402: class Foo:\n '''Bad punctuation,'''
"""
#TODO(jogo) make this apply to multi line docstrings as well
line = physical_line.lstrip()
if is_docstring(physical_line, previous_logical):
pos = max([line.find(i) for i in START_DOCSTRING_TRIPLE]) # start
end = max([line[-4:-1] == i for i in END_DOCSTRING_TRIPLE]) # end
if pos != -1 and end and len(line) > pos + 4:
if line[-5] not in ['.', '?', '!']:
return pos, "N402: one line docstring needs punctuation."
def nova_docstring_multiline_end(physical_line, previous_logical, tokens):
r"""Check multi line docstring end.
nova HACKING guide recommendation for docstring:
Docstring should end on a new line
Okay: '''foobar\nfoo\nbar\n'''
Okay: def foo():\n '''foobar\nfoo\nbar\n'''
Okay: class Foo:\n '''foobar\nfoo\nbar\n'''
Okay: def foo():\n a = '''not\na\ndocstring'''
Okay: def foo():\n pass\n'''foobar\nfoo\nbar\n d'''
N403: def foo():\n '''foobar\nfoo\nbar\ndocstring'''
N403: class Foo:\n '''foobar\nfoo\nbar\ndocstring'''\n\n
"""
# if find OP tokens, not a docstring
ops = [t for t, _, _, _, _ in tokens if t == tokenize.OP]
if (is_docstring(physical_line, previous_logical) and len(tokens) > 0 and
len(ops) == 0):
pos = max(physical_line.find(i) for i in END_DOCSTRING_TRIPLE)
if physical_line.strip() not in START_DOCSTRING_TRIPLE:
return (pos, "N403: multi line docstring end on new line")
def nova_docstring_multiline_start(physical_line, previous_logical, tokens):
r"""Check multi line docstring start with summary.
nova HACKING guide recommendation for docstring:
Docstring should start with A multi line docstring has a one-line summary
Okay: '''foobar\nfoo\nbar\n'''
Okay: def foo():\n a = '''\nnot\na docstring\n'''
N404: def foo():\n'''\nfoo\nbar\n'''\n\n
"""
if is_docstring(physical_line, previous_logical):
pos = max([physical_line.find(i) for i in START_DOCSTRING_TRIPLE])
# start of docstring when len(tokens)==0
if len(tokens) == 0 and pos != -1 and len(physical_line) == pos + 4:
if physical_line.strip() in START_DOCSTRING_TRIPLE:
return (pos, "N404: multi line docstring "
"should start with a summary")
def nova_no_cr(physical_line):
r"""Check that we only use newlines not carriage returns.
Okay: import os\nimport sys
# pep8 doesn't yet replace \r in strings, will work on an
# upstream fix
N901 import os\r\nimport sys
"""
pos = physical_line.find('\r')
if pos != -1 and pos == (len(physical_line) - 2):
return (pos, "N901: Windows style line endings not allowed in code")
FORMAT_RE = re.compile("%(?:"
"%|" # Ignore plain percents
"(\(\w+\))?" # mapping key
"([#0 +-]?" # flag
"(?:\d+|\*)?" # width
"(?:\.\d+)?" # precision
"[hlL]?" # length mod
"\w))") # type
class LocalizationError(Exception):
pass
def check_i18n():
"""Generator that checks token stream for localization errors.
Expects tokens to be ``send``ed one by one.
Raises LocalizationError if some error is found.
"""
while True:
try:
token_type, text, _, _, line = yield
except GeneratorExit:
return
if (token_type == tokenize.NAME and text == "_" and
not line.startswith('def _(msg):')):
while True:
token_type, text, start, _, _ = yield
if token_type != tokenize.NL:
break
if token_type != tokenize.OP or text != "(":
continue # not a localization call
format_string = ''
while True:
token_type, text, start, _, _ = yield
if token_type == tokenize.STRING:
format_string += eval(text)
elif token_type == tokenize.NL:
pass
else:
break
if not format_string:
raise LocalizationError(start,
"N701: Empty localization string")
if token_type != tokenize.OP:
raise LocalizationError(start,
"N701: Invalid localization call")
if text != ")":
if text == "%":
raise LocalizationError(start,
"N702: Formatting operation should be outside"
" of localization method call")
elif text == "+":
raise LocalizationError(start,
"N702: Use bare string concatenation instead"
" of +")
else:
raise LocalizationError(start,
"N702: Argument to _ must be just a string")
format_specs = FORMAT_RE.findall(format_string)
positional_specs = [(key, spec) for key, spec in format_specs
if not key and spec]
# not spec means %%, key means %(smth)s
if len(positional_specs) > 1:
raise LocalizationError(start,
"N703: Multiple positional placeholders")
def nova_localization_strings(logical_line, tokens):
r"""Check localization in line.
Okay: _("This is fine")
Okay: _("This is also fine %s")
N701: _('')
N702: _("Bob" + " foo")
N702: _("Bob %s" % foo)
# N703 check is not quite right, disabled by removing colon
N703 _("%s %s" % (foo, bar))
"""
# TODO(sdague) actually get these tests working
gen = check_i18n()
next(gen)
try:
map(gen.send, tokens)
gen.close()
except LocalizationError as e:
yield e.args
#TODO(jogo) Dict and list objects
def nova_is_not(logical_line):
r"""Check localization in line.
Okay: if x is not y
N901: if not X is Y
N901: if not X.B is Y
"""
split_line = logical_line.split()
if (len(split_line) == 5 and split_line[0] == 'if' and
split_line[1] == 'not' and split_line[3] == 'is'):
yield (logical_line.find('not'), "N901: Use the 'is not' "
"operator for when testing for unequal identities")
def nova_not_in(logical_line):
r"""Check localization in line.
Okay: if x not in y
Okay: if not (X in Y or X is Z)
Okay: if not (X in Y)
N902: if not X in Y
N902: if not X.B in Y
"""
split_line = logical_line.split()
if (len(split_line) == 5 and split_line[0] == 'if' and
split_line[1] == 'not' and split_line[3] == 'in' and not
split_line[2].startswith('(')):
yield (logical_line.find('not'), "N902: Use the 'not in' "
"operator for collection membership evaluation")
current_file = ""
def readlines(filename):
"""Record the current file being tested."""
pep8.current_file = filename
return open(filename).readlines()
def add_nova():
"""Monkey patch in nova guidelines.
Look for functions that start with nova_ and have arguments
and add them to pep8 module
Assumes you know how to write pep8.py checks
"""
for name, function in globals().items():
if not inspect.isfunction(function):
continue
args = inspect.getargspec(function)[0]
if args and name.startswith("ceilometer"):
exec("pep8.%s = %s" % (name, name))
def once_git_check_commit_title():
"""Check git commit messages.
nova HACKING recommends not referencing a bug or blueprint in first line,
it should provide an accurate description of the change
N801
N802 Title limited to 72 chars
"""
#Get title of most recent commit
subp = subprocess.Popen(['git', 'log', '--no-merges', '--pretty=%s', '-1'],
stdout=subprocess.PIPE)
title = subp.communicate()[0]
if subp.returncode:
raise Exception("git log failed with code %s" % subp.returncode)
#From https://github.com/openstack/openstack-ci-puppet
# /blob/master/modules/gerrit/manifests/init.pp#L74
#Changeid|bug|blueprint
git_keywords = (r'(I[0-9a-f]{8,40})|'
'([Bb]ug|[Ll][Pp])[\s\#:]*(\d+)|'
'([Bb]lue[Pp]rint|[Bb][Pp])[\s\#:]*([A-Za-z0-9\\-]+)')
GIT_REGEX = re.compile(git_keywords)
error = False
#NOTE(jogo) if match regex but over 3 words, acceptable title
if GIT_REGEX.search(title) is not None and len(title.split()) <= 3:
print ("N801: git commit title ('%s') should provide an accurate "
"description of the change, not just a reference to a bug "
"or blueprint" % title.strip())
error = True
# HACKING.rst recommends commit titles 50 chars or less, but enforces
# a 72 character limit
if len(title.decode('utf-8')) > 72:
print ("N802: git commit title ('%s') should be under 50 chars"
% title.strip())
error = True
return error
imports_on_separate_lines_N301_compliant = r"""
Imports should usually be on separate lines.
Okay: import os\nimport sys
E401: import sys, os
N301: from subprocess import Popen, PIPE
Okay: from myclas import MyClass
Okay: from foo.bar.yourclass import YourClass
Okay: import myclass
Okay: import foo.bar.yourclass
"""
if __name__ == "__main__":
#include nova path
sys.path.append(os.getcwd())
#Run once tests (not per line)
once_error = once_git_check_commit_title()
#NOVA error codes start with an N
pep8.SELFTEST_REGEX = re.compile(r'(Okay|[EWN]\d{3}):\s(.*)')
pep8.ERRORCODE_REGEX = re.compile(r'[EWN]\d{3}')
add_nova()
pep8.current_file = current_file
pep8.readlines = readlines
pep8.StyleGuide.excluded = excluded
pep8.StyleGuide.input_dir = input_dir
# we need to kill this doctring otherwise the self tests fail
pep8.imports_on_separate_lines.__doc__ = \
imports_on_separate_lines_N301_compliant
try:
pep8._main()
sys.exit(once_error)
finally:
if len(_missingImport) > 0:
print >> sys.stderr, ("%i imports missing in this test environment"
% len(_missingImport))
| |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import random
import threading
from typing import Any
from typing import Callable
from typing import Iterable
from typing import Optional
from typing import TypeVar
from apache_beam.dataframe import partitionings
class Session(object):
"""A session represents a mapping of expressions to concrete values.
The bindings typically include required placeholders, but may be any
intermediate expression as well.
"""
def __init__(self, bindings=None):
self._bindings = dict(bindings or {})
def evaluate(self, expr): # type: (Expression) -> Any
if expr not in self._bindings:
self._bindings[expr] = expr.evaluate_at(self)
return self._bindings[expr]
def lookup(self, expr): # type: (Expression) -> Any
return self._bindings[expr]
class PartitioningSession(Session):
"""An extension of Session that enforces actual partitioning of inputs.
Each expression is evaluated multiple times for various supported
partitionings determined by its `requires_partition_by` specification. For
each tested partitioning, the input is partitioned and the expression is
evaluated on each partition separately, as if this were actually executed in
a parallel manner.
For each input partitioning, the results are verified to be partitioned
appropriately according to the expression's `preserves_partition_by`
specification.
For testing only.
"""
def evaluate(self, expr):
import pandas as pd
import collections
def is_scalar(expr):
return not isinstance(expr.proxy(), pd.core.generic.NDFrame)
if expr not in self._bindings:
if is_scalar(expr) or not expr.args():
result = super(PartitioningSession, self).evaluate(expr)
else:
scaler_args = [arg for arg in expr.args() if is_scalar(arg)]
def evaluate_with(input_partitioning):
parts = collections.defaultdict(
lambda: Session({arg: self.evaluate(arg)
for arg in scaler_args}))
for arg in expr.args():
if not is_scalar(arg):
input = self.evaluate(arg)
for key, part in input_partitioning.test_partition_fn(input):
parts[key]._bindings[arg] = part
if not parts:
parts[None] # Create at least one entry.
results = []
for session in parts.values():
if any(len(session.lookup(arg)) for arg in expr.args()
if not is_scalar(arg)):
results.append(session.evaluate(expr))
expected_output_partitioning = output_partitioning(
expr, input_partitioning)
if not expected_output_partitioning.check(results):
raise AssertionError(
f"""Expression does not preserve partitioning!
Expression: {expr}
Requires: {expr.requires_partition_by()}
Preserves: {expr.preserves_partition_by()}
Input partitioning: {input_partitioning}
Expected output partitioning: {expected_output_partitioning}
""")
if results:
return pd.concat(results)
else:
# Choose any single session.
return next(iter(parts.values())).evaluate(expr)
# Store random state so it can be re-used for each execution, in case
# the expression is part of a test that relies on the random seed.
random_state = random.getstate()
result = None
# Run with all supported partitionings s.t. the smallest subpartitioning
# is used last. This way the final result is computed with the most
# challenging partitioning. Avoids heisenbugs where sometimes the result
# is computed trivially with Singleton partitioning and passes.
for input_partitioning in sorted(set([expr.requires_partition_by(),
partitionings.Arbitrary(),
partitionings.Index(),
partitionings.Singleton()])):
if not expr.requires_partition_by().is_subpartitioning_of(
input_partitioning):
continue
random.setstate(random_state)
result = evaluate_with(input_partitioning)
assert result is not None
self._bindings[expr] = result
return self._bindings[expr]
# The return type of an Expression
T = TypeVar('T')
def output_partitioning(expr, input_partitioning):
""" Return the expected output partitioning for `expr` when it's input is
partitioned by `input_partitioning`.
For internal use only; No backward compatibility guarantees """
assert expr.requires_partition_by().is_subpartitioning_of(input_partitioning)
if expr.preserves_partition_by().is_subpartitioning_of(input_partitioning):
return min(input_partitioning, expr.preserves_partition_by())
else:
return partitionings.Arbitrary()
class Expression(object):
"""An expression is an operation bound to a set of arguments.
An expression represents a deferred tree of operations, which can be
evaluated at a specific bindings of root expressions to values.
requires_partition_by indicates the upper bound of a set of partitionings that
are acceptable inputs to this expression. The expression should be able to
produce the correct result when given input(s) partitioned by its
requires_partition_by attribute, or by any partitoning that is _not_
a subpartitioning of it.
preserves_partition_by indicates the upper bound of a set of partitionings
that can be preserved by this expression. When the input(s) to this expression
are partitioned by preserves_partition_by, or by any partitioning that is
_not_ a subpartitioning of it, this expression should produce output(s)
partitioned by the same partitioning.
However, if the partitioning of an expression's input is a subpartitioning of
the partitioning that it preserves, the output is presumed to have no
particular partitioning (i.e. Arbitrary()).
For example, let's look at an "element-wise operation", that has no
partitioning requirement, and preserves any partitioning given to it::
requires_partition_by = Arbitrary() -----------------------------+
|
+-----------+-------------+---------- ... ----+---------|
| | | | |
Singleton() < Index([i]) < Index([i, j]) < ... < Index() < Arbitrary()
| | | | |
+-----------+-------------+---------- ... ----+---------|
|
preserves_partition_by = Arbitrary() ----------------------------+
As a more interesting example, consider this expression, which requires Index
partitioning, and preserves just Singleton partitioning::
requires_partition_by = Index() -----------------------+
|
+-----------+-------------+---------- ... ----|
| | | |
Singleton() < Index([i]) < Index([i, j]) < ... < Index() < Arbitrary()
|
|
preserves_partition_by = Singleton()
Note that any non-Arbitrary partitioning is an acceptable input for this
expression. However, unless the inputs are Singleton-partitioned, the
expression makes no guarantees about the partitioning of the output.
"""
def __init__(
self,
name, # type: str
proxy, # type: T
_id=None # type: Optional[str]
):
self._name = name
self._proxy = proxy
# Store for preservation through pickling.
self._id = _id or '%s_%s_%s' % (name, type(proxy).__name__, id(self))
def proxy(self): # type: () -> T
return self._proxy
def __hash__(self):
return hash(self._id)
def __eq__(self, other):
return self._id == other._id
def __repr__(self):
return '%s[%s]' % (self.__class__.__name__, self._id)
def placeholders(self):
"""Returns all the placeholders that self depends on."""
raise NotImplementedError(type(self))
def evaluate_at(self, session): # type: (Session) -> T
"""Returns the result of self with the bindings given in session."""
raise NotImplementedError(type(self))
def requires_partition_by(self): # type: () -> partitionings.Partitioning
"""Returns the partitioning, if any, require to evaluate this expression.
Returns partitioning.Arbitrary() to require no partitioning is required.
"""
raise NotImplementedError(type(self))
def preserves_partition_by(self): # type: () -> partitionings.Partitioning
"""Returns the partitioning, if any, preserved by this expression.
This gives an upper bound on the partitioning of its ouput. The actual
partitioning of the output may be less strict (e.g. if the input was
less partitioned).
"""
raise NotImplementedError(type(self))
class PlaceholderExpression(Expression):
"""An expression whose value must be explicitly bound in the session."""
def __init__(
self, # type: PlaceholderExpression
proxy, # type: T
reference=None, # type: Any
):
"""Initialize a placeholder expression.
Args:
proxy: A proxy object with the type expected to be bound to this
expression. Used for type checking at pipeline construction time.
"""
super(PlaceholderExpression, self).__init__('placeholder', proxy)
self._reference = reference
def placeholders(self):
return frozenset([self])
def args(self):
return ()
def evaluate_at(self, session):
return session.lookup(self)
def requires_partition_by(self):
return partitionings.Arbitrary()
def preserves_partition_by(self):
return partitionings.Index()
class ConstantExpression(Expression):
"""An expression whose value is known at pipeline construction time."""
def __init__(
self, # type: ConstantExpression
value, # type: T
proxy=None # type: Optional[T]
):
"""Initialize a constant expression.
Args:
value: The constant value to be produced by this expression.
proxy: (Optional) a proxy object with same type as `value` to use for
rapid type checking at pipeline construction time. If not provided,
`value` will be used directly.
"""
if proxy is None:
proxy = value
super(ConstantExpression, self).__init__('constant', proxy)
self._value = value
def placeholders(self):
return frozenset()
def args(self):
return ()
def evaluate_at(self, session):
return self._value
def requires_partition_by(self):
return partitionings.Arbitrary()
def preserves_partition_by(self):
return partitionings.Arbitrary()
class ComputedExpression(Expression):
"""An expression whose value must be computed at pipeline execution time."""
def __init__(
self, # type: ComputedExpression
name, # type: str
func, # type: Callable[...,T]
args, # type: Iterable[Expression]
proxy=None, # type: Optional[T]
_id=None, # type: Optional[str]
requires_partition_by=partitionings.Index(), # type: partitionings.Partitioning
preserves_partition_by=partitionings.Singleton(), # type: partitionings.Partitioning
):
"""Initialize a computed expression.
Args:
name: The name of this expression.
func: The function that will be used to compute the value of this
expression. Should accept arguments of the types returned when
evaluating the `args` expressions.
args: The list of expressions that will be used to produce inputs to
`func`.
proxy: (Optional) a proxy object with same type as the objects that this
ComputedExpression will produce at execution time. If not provided, a
proxy will be generated using `func` and the proxies of `args`.
_id: (Optional) a string to uniquely identify this expression.
requires_partition_by: The required (common) partitioning of the args.
preserves_partition_by: The level of partitioning preserved.
"""
if (not _get_allow_non_parallel() and
isinstance(requires_partition_by, partitionings.Singleton)):
reason = requires_partition_by.reason or (
f"Encountered non-parallelizable form of {name!r}.")
raise NonParallelOperation(
f"{reason}\n"
"Consider using an allow_non_parallel_operations block if you're "
"sure you want to do this. See "
"https://s.apache.org/dataframe-non-parallel-operations for more "
"information.")
args = tuple(args)
if proxy is None:
proxy = func(*(arg.proxy() for arg in args))
super(ComputedExpression, self).__init__(name, proxy, _id)
self._func = func
self._args = args
self._requires_partition_by = requires_partition_by
self._preserves_partition_by = preserves_partition_by
def placeholders(self):
return frozenset.union(
frozenset(), *[arg.placeholders() for arg in self.args()])
def args(self):
return self._args
def evaluate_at(self, session):
return self._func(*(session.evaluate(arg) for arg in self._args))
def requires_partition_by(self):
return self._requires_partition_by
def preserves_partition_by(self):
return self._preserves_partition_by
def elementwise_expression(name, func, args):
return ComputedExpression(
name,
func,
args,
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Arbitrary())
_ALLOW_NON_PARALLEL = threading.local()
_ALLOW_NON_PARALLEL.value = False
def _get_allow_non_parallel():
return _ALLOW_NON_PARALLEL.value
@contextlib.contextmanager
def allow_non_parallel_operations(allow=True):
if allow is None:
yield
else:
old_value, _ALLOW_NON_PARALLEL.value = _ALLOW_NON_PARALLEL.value, allow
yield
_ALLOW_NON_PARALLEL.value = old_value
class NonParallelOperation(Exception):
def __init__(self, msg):
super(NonParallelOperation, self).__init__(self, msg)
self.msg = msg
| |
# Author: Trevor Perrin
# See the LICENSE file for legal information regarding use of this file.
"""
A state machine for using TLS Lite with asynchronous I/O.
"""
class AsyncStateMachine:
"""
This is an abstract class that's used to integrate TLS Lite with
asyncore and Twisted.
This class signals wantsReadsEvent() and wantsWriteEvent(). When
the underlying socket has become readable or writeable, the event
should be passed to this class by calling inReadEvent() or
inWriteEvent(). This class will then try to read or write through
the socket, and will update its state appropriately.
This class will forward higher-level events to its subclass. For
example, when a complete TLS record has been received,
outReadEvent() will be called with the decrypted data.
"""
def __init__(self):
self._clear()
def _clear(self):
#These store the various asynchronous operations (i.e.
#generators). Only one of them, at most, is ever active at a
#time.
self.handshaker = None
self.closer = None
self.reader = None
self.writer = None
#This stores the result from the last call to the
#currently active operation. If 0 it indicates that the
#operation wants to read, if 1 it indicates that the
#operation wants to write. If None, there is no active
#operation.
self.result = None
def _checkAssert(self, maxActive=1):
#This checks that only one operation, at most, is
#active, and that self.result is set appropriately.
activeOps = 0
if self.handshaker:
activeOps += 1
if self.closer:
activeOps += 1
if self.reader:
activeOps += 1
if self.writer:
activeOps += 1
if self.result == None:
if activeOps != 0:
raise AssertionError()
elif self.result in (0,1):
if activeOps != 1:
raise AssertionError()
else:
raise AssertionError()
if activeOps > maxActive:
raise AssertionError()
def wantsReadEvent(self):
"""If the state machine wants to read.
If an operation is active, this returns whether or not the
operation wants to read from the socket. If an operation is
not active, this returns None.
@rtype: bool or None
@return: If the state machine wants to read.
"""
if self.result != None:
return self.result == 0
return None
def wantsWriteEvent(self):
"""If the state machine wants to write.
If an operation is active, this returns whether or not the
operation wants to write to the socket. If an operation is
not active, this returns None.
@rtype: bool or None
@return: If the state machine wants to write.
"""
if self.result != None:
return self.result == 1
return None
def outConnectEvent(self):
"""Called when a handshake operation completes.
May be overridden in subclass.
"""
pass
def outCloseEvent(self):
"""Called when a close operation completes.
May be overridden in subclass.
"""
pass
def outReadEvent(self, readBuffer):
"""Called when a read operation completes.
May be overridden in subclass."""
pass
def outWriteEvent(self):
"""Called when a write operation completes.
May be overridden in subclass."""
pass
def inReadEvent(self):
"""Tell the state machine it can read from the socket."""
try:
self._checkAssert()
if self.handshaker:
self._doHandshakeOp()
elif self.closer:
self._doCloseOp()
elif self.reader:
self._doReadOp()
elif self.writer:
self._doWriteOp()
else:
self.reader = self.tlsConnection.readAsync(16384)
self._doReadOp()
except:
self._clear()
raise
def inWriteEvent(self):
"""Tell the state machine it can write to the socket."""
try:
self._checkAssert()
if self.handshaker:
self._doHandshakeOp()
elif self.closer:
self._doCloseOp()
elif self.reader:
self._doReadOp()
elif self.writer:
self._doWriteOp()
else:
self.outWriteEvent()
except:
self._clear()
raise
def _doHandshakeOp(self):
try:
self.result = self.handshaker.next()
except StopIteration:
self.handshaker = None
self.result = None
self.outConnectEvent()
def _doCloseOp(self):
try:
self.result = self.closer.next()
except StopIteration:
self.closer = None
self.result = None
self.outCloseEvent()
def _doReadOp(self):
self.result = self.reader.next()
if not self.result in (0,1):
readBuffer = self.result
self.reader = None
self.result = None
self.outReadEvent(readBuffer)
def _doWriteOp(self):
try:
self.result = self.writer.next()
except StopIteration:
self.writer = None
self.result = None
def setHandshakeOp(self, handshaker):
"""Start a handshake operation.
@type handshaker: generator
@param handshaker: A generator created by using one of the
asynchronous handshake functions (i.e. handshakeServerAsync, or
handshakeClientxxx(..., async=True).
"""
try:
self._checkAssert(0)
self.handshaker = handshaker
self._doHandshakeOp()
except:
self._clear()
raise
def setServerHandshakeOp(self, **args):
"""Start a handshake operation.
The arguments passed to this function will be forwarded to
L{tlslite.tlsconnection.TLSConnection.handshakeServerAsync}.
"""
handshaker = self.tlsConnection.handshakeServerAsync(**args)
self.setHandshakeOp(handshaker)
def setCloseOp(self):
"""Start a close operation.
"""
try:
self._checkAssert(0)
self.closer = self.tlsConnection.closeAsync()
self._doCloseOp()
except:
self._clear()
raise
def setWriteOp(self, writeBuffer):
"""Start a write operation.
@type writeBuffer: str
@param writeBuffer: The string to transmit.
"""
try:
self._checkAssert(0)
self.writer = self.tlsConnection.writeAsync(writeBuffer)
self._doWriteOp()
except:
self._clear()
raise
| |
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from collections import defaultdict
from future.builtins import zip
import numpy as np
from skbio.tree import TreeNode
from skbio.util._decorator import experimental
def _walk_clades(trees, weights):
"""Walk all the clades of all the trees
Parameters
----------
trees : list of TreeNode
The trees to walk
weights : np.array
Tree weights
Returns
-------
list of tuple
The clades and support values sorted by support value such that the
most supported clade is index 0. The tuples are of the form:
(frozenset, float).
defaultdict(float)
The edge lengths, keyed by frozenset of the clade, and valued by the
weighted average length of the clade by the trees the clade was
observed in.
"""
clade_counts = defaultdict(float)
edge_lengths = defaultdict(float)
total = weights.sum()
# get clade counts
def tipnames_f(n):
return [n.name] if n.is_tip() else []
for tree, weight in zip(trees, weights):
tree.cache_attr(tipnames_f, 'tip_names', frozenset)
for node in tree.postorder():
tip_names = node.tip_names
# if node.length is not None, fetch it and weight it
length = node.length * weight if node.length is not None else None
clade_counts[tip_names] += weight
if length is None:
edge_lengths[tip_names] = None
else:
edge_lengths[tip_names] += length / total
# sort clades by number times observed
clade_counts = sorted(clade_counts.items(), key=lambda x: len(x[0]),
reverse=True)
return clade_counts, edge_lengths
def _filter_clades(clade_counts, cutoff_threshold):
"""Filter clades that not well supported or are contradicted
Parameters
----------
clade_counts : list of tuple
Where the first element in each tuple is the frozenset of the clade,
and the second element is the support value. It is expected that this
list is sorted by descending order by support.
cutoff_threshold : float
The minimum weighted observation count that a clade must have to be
considered supported.
Returns
-------
dict
A dict of the accepted clades, keyed by the frozenset of the clade and
valued by the support value.
"""
accepted_clades = {}
for clade, count in clade_counts:
conflict = False
if count <= cutoff_threshold:
continue
if len(clade) > 1:
# check the current clade against all the accepted clades to see if
# it conflicts. A conflict is defined as:
# 1. the clades are not disjoint
# 2. neither clade is a subset of the other
for accepted_clade in accepted_clades:
intersect = clade.intersection(accepted_clade)
subset = clade.issubset(accepted_clade)
superset = clade.issuperset(accepted_clade)
if intersect and not (subset or superset):
conflict = True
if conflict is False:
accepted_clades[clade] = count
return accepted_clades
def _build_trees(clade_counts, edge_lengths, support_attr):
"""Construct the trees with support
Parameters
----------
clade_counts : dict
Keyed by the frozenset of the clade and valued by the support
edge_lengths : dict
Keyed by the frozenset of the clade and valued by the weighted length
support_attr : str
The name of the attribute to hold the support value
Returns
-------
list of TreeNode
A list of the constructed trees
"""
nodes = {}
queue = [(len(clade), clade) for clade in clade_counts]
while queue:
# The values within the queue are updated on each iteration, so it
# doesn't look like an insertion sort will make sense unfortunately
queue.sort()
(clade_size, clade) = queue.pop(0)
new_queue = []
# search for ancestors of clade
for (_, ancestor) in queue:
if clade.issubset(ancestor):
# update ancestor such that, in the following example:
# ancestor == {1, 2, 3, 4}
# clade == {2, 3}
# new_ancestor == {1, {2, 3}, 4}
new_ancestor = (ancestor - clade) | frozenset([clade])
# update references for counts and lengths
clade_counts[new_ancestor] = clade_counts.pop(ancestor)
edge_lengths[new_ancestor] = edge_lengths.pop(ancestor)
ancestor = new_ancestor
new_queue.append((len(ancestor), ancestor))
# if the clade is a tip, then we have a name
if clade_size == 1:
name = list(clade)[0]
else:
name = None
# the clade will not be in nodes if it is a tip
children = [nodes.pop(c) for c in clade if c in nodes]
length = edge_lengths[clade]
node = TreeNode(children=children, length=length, name=name)
setattr(node, support_attr, clade_counts[clade])
nodes[clade] = node
queue = new_queue
return list(nodes.values())
@experimental(as_of="0.4.0")
def majority_rule(trees, weights=None, cutoff=0.5, support_attr='support'):
r"""Determines consensus trees from a list of rooted trees
Parameters
----------
trees : list of TreeNode
The trees to operate on
weights : list or np.array of {int, float}, optional
If provided, the list must be in index order with `trees`. Each tree
will receive the corresponding weight. If omitted, all trees will be
equally weighted.
cutoff : float, 0.0 <= cutoff <= 1.0
Any clade that has <= cutoff support will be dropped. If cutoff is
< 0.5, then it is possible that ties will result. If so, ties are
broken arbitrarily depending on list sort order.
support_attr : str
The attribute to be decorated onto the resulting trees that contain the
consensus support.
Returns
-------
list of TreeNode
Multiple trees can be returned in the case of two or more disjoint sets
of tips represented on input.
Notes
-----
This code was adapted from PyCogent's majority consensus code originally
written by Matthew Wakefield. The method is based off the original
description of consensus trees in [1]_. An additional description can be
found in the Phylip manual [2]_. This method does not support majority rule
extended.
Support is computed as a weighted average of the tree weights in which the
clade was observed in. For instance, if {A, B, C} was observed in 5 trees
all with a weight of 1, its support would then be 5.
References
----------
.. [1] Margush T, McMorris FR. (1981) "Consensus n-trees." Bulletin for
Mathematical Biology 43(2) 239-44.
.. [2] http://evolution.genetics.washington.edu/phylip/doc/consense.html
Examples
--------
Computing the majority consensus, using the example from the Phylip manual
with the exception that we are computing majority rule and not majority
rule extended.
>>> from skbio.tree import TreeNode
>>> from io import StringIO
>>> trees = [
... TreeNode.read(StringIO("(A,(B,(H,(D,(J,(((G,E),(F,I)),C))))));")),
... TreeNode.read(StringIO("(A,(B,(D,((J,H),(((G,E),(F,I)),C)))));")),
... TreeNode.read(StringIO("(A,(B,(D,(H,(J,(((G,E),(F,I)),C))))));")),
... TreeNode.read(StringIO("(A,(B,(E,(G,((F,I),((J,(H,D)),C))))));")),
... TreeNode.read(StringIO("(A,(B,(E,(G,((F,I),(((J,H),D),C))))));")),
... TreeNode.read(StringIO("(A,(B,(E,((F,I),(G,((J,(H,D)),C))))));")),
... TreeNode.read(StringIO("(A,(B,(E,((F,I),(G,(((J,H),D),C))))));")),
... TreeNode.read(StringIO("(A,(B,(E,((G,(F,I)),((J,(H,D)),C)))));")),
... TreeNode.read(StringIO("(A,(B,(E,((G,(F,I)),(((J,H),D),C)))));"))]
>>> consensus = majority_rule(trees, cutoff=0.5)[0]
>>> for node in sorted(consensus.non_tips(),
... key=lambda k: k.count(tips=True)):
... support_value = node.support
... names = ' '.join(sorted(n.name for n in node.tips()))
... print("Tips: %s, support: %s" % (names, support_value))
Tips: F I, support: 9.0
Tips: D H J, support: 6.0
Tips: C D H J, support: 6.0
Tips: C D F G H I J, support: 6.0
Tips: C D E F G H I J, support: 9.0
Tips: B C D E F G H I J, support: 9.0
In the next example, multiple trees will be returned which can happen if
clades are not well supported across the trees. In addition, this can arise
if not all tips are present across all trees.
>>> trees = [
... TreeNode.read(StringIO("((a,b),(c,d),(e,f));")),
... TreeNode.read(StringIO("(a,(c,d),b,(e,f));")),
... TreeNode.read(StringIO("((c,d),(e,f),b);")),
... TreeNode.read(StringIO("(a,(c,d),(e,f));"))]
>>> consensus_trees = majority_rule(trees)
>>> len(consensus_trees)
4
"""
if weights is None:
weights = np.ones(len(trees), dtype=float)
else:
weights = np.asarray(weights)
if len(weights) != len(trees):
raise ValueError("Number of weights and trees differ.")
cutoff_threshold = cutoff * weights.sum()
clade_counts, edge_lengths = _walk_clades(trees, weights)
clade_counts = _filter_clades(clade_counts, cutoff_threshold)
trees = _build_trees(clade_counts, edge_lengths, support_attr)
return trees
| |
"""
Copyright 2010 Greg L. Turnquist, All Rights Reserved
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
from springpython.config import PythonConfig
from springpython.config import Object
from springpython.context import ApplicationContext
from springpython.security import AuthenticationException
from springpython.security.context import SecurityContext
from springpython.security.context import SecurityContextHolder
from springpython.security.providers import AuthenticationManager
from springpython.security.providers import UsernamePasswordAuthenticationToken
from springpython.security.providers.dao import DaoAuthenticationProvider
from springpython.security.providers.encoding import PlaintextPasswordEncoder
from springpython.security.providers.encoding import Md5PasswordEncoder
from springpython.security.providers.encoding import ShaPasswordEncoder
from springpython.security.userdetails import InMemoryUserDetailsService
from springpython.security.userdetails import UserDetailsService
from springpython.security.userdetails.dao import DatabaseUserDetailsService
from springpython.security.vote import AffirmativeBased
from springpython.security.vote import RoleVoter
from springpython.security.web import AuthenticationProcessingFilter
from springpython.security.web import AuthenticationProcessingFilterEntryPoint
from springpython.security.cherrypy3 import *
from springpython.security.web import ExceptionTranslationFilter
from springpython.security.web import FilterSecurityInterceptor
from springpython.security.web import HttpSessionContextIntegrationFilter
from springpython.security.web import SimpleAccessDeniedHandler
from view import *
from controller import *
class Twitter_cloneConfiguration(PythonConfig):
def __init__(self):
super(Twitter_cloneConfiguration, self).__init__()
@Object
def database(self):
return MessageStore()
@Object
def receiver(self):
return Receiver(self.database())
@Object
def securityFilterChain(self):
"""This is the main entry point for security chain. It works from top to bottom, until it finds a match,
based on the URI of the request, deciding what chain of filters to apply."""
return CP3FilterChainProxy(filterInvocationDefinitionSource =
[
("/images.*", []),
("/html.*", []),
("/login.*", ["httpSessionContextIntegrationFilter"]),
("/.*", ["httpSessionContextIntegrationFilter",
"exceptionTranslationFilter",
"authenticationProcessingFilter",
"filterSecurityInterceptor"])
])
@Object
def root(self):
"""This is the main object defined for the web application."""
form = Twitter_cloneView(poller=self.receiver(), message_store=self.database())
form.filter = self.authenticationProcessingFilter()
form.hashedUserDetailsServiceList = [self.shaUserDetailsService()]
form.authenticationManager = self.authenticationManager()
form.redirectStrategy = self.redirectStrategy()
form.httpContextFilter = self.httpSessionContextIntegrationFilter()
return form
@Object
def userDetailsService(self):
"""This user details service uses a pre-built, in-memory for demonstration purposes only. Do NOT use in a
production system!!!"""
userDetailsService = InMemoryUserDetailsService()
userDetailsService.user_dict = {"alice": ("password1", ["ROLE_ANY"], True), "bob": ("password2", ["ROLE_ANY"], True)}
return userDetailsService
@Object
def shaEncoder(self):
"""This password encoder uses the SHA hashing algorithm."""
return ShaPasswordEncoder()
@Object
def shaUserDetailsService(self):
"""This wrapper around a user details service will publish an unhashed user details service with hashed passwords,
allowing a demo set of users be stored in unhashed format. Do NOT use this for production systems!!!"""
userDetailsService = PreencodingUserDetailsService()
userDetailsService.wrappedUserDetailsService = self.userDetailsService()
userDetailsService.encoder = self.shaEncoder()
return userDetailsService
@Object
def shaAuthenticationProvider(self):
"""This authentication provider takes a user details service and links it with a password encoder, to hash
passwords before comparing with the user details service."""
provider = DaoAuthenticationProvider()
provider.user_details_service = self.shaUserDetailsService()
provider.password_encoder = self.shaEncoder()
return provider
@Object
def authenticationManager(self):
"""This authentication manager contains the list of authentication providers used to confirm a user's identity."""
authManager = AuthenticationManager()
authManager.auth_providers = []
authManager.auth_providers.append(self.shaAuthenticationProvider())
return authManager
@Object
def accessDecisionManager(self):
"""This AccessDecisionManager decides based on what ROLE_xxx the current user has."""
adm = AffirmativeBased()
adm.allow_if_all_abstain = False
adm.access_decision_voters = []
adm.access_decision_voters.append(RoleVoter())
return adm
@Object
def cherrypySessionStrategy(self):
"""This is the concrete mechanism used to activate HttpSession data."""
return CP3SessionStrategy()
@Object
def redirectStrategy(self):
"""This is the concrete mechanism used by several components to redirect the browser."""
return CP3RedirectStrategy()
@Object
def httpSessionContextIntegrationFilter(self):
"""This filter is used to move SecurityContext to/from the HttpSession of the web requests."""
filter = HttpSessionContextIntegrationFilter()
filter.sessionStrategy = self.cherrypySessionStrategy()
return filter
@Object
def authenticationProcessingFilter(self):
"""This defines the filter for confirming a user's identity."""
filter = AuthenticationProcessingFilter()
filter.auth_manager = self.authenticationManager()
filter.alwaysReauthenticate = False
return filter
@Object
def filterSecurityInterceptor(self):
"""This is the collection of rules used to determine if logged in users have permission to access a page. It
works top to bottom, until it finds a URI pattern match."""
filter = FilterSecurityInterceptor()
filter.auth_manager = self.authenticationManager()
filter.access_decision_mgr = self.accessDecisionManager()
filter.sessionStrategy = self.cherrypySessionStrategy()
filter.obj_def_source = [
("/.*", ["ROLE_ANY"])
]
return filter
@Object
def authenticationProcessingFilterEntryPoint(self):
"""This entry point defines where to redirect users not already logged into the system."""
filter = AuthenticationProcessingFilterEntryPoint()
filter.loginFormUrl = "/login"
filter.redirectStrategy = self.redirectStrategy()
return filter
@Object
def accessDeniedHandler(self):
"""This handler defines the location and mechanism used to get there, when processing a security exception."""
handler = SimpleAccessDeniedHandler()
handler.errorPage = "/accessDenied"
handler.redirectStrategy = self.redirectStrategy()
return handler
@Object
def exceptionTranslationFilter(self):
"""This filter allows re-routing to an Access Denied page in the event of a security exception."""
filter = ExceptionTranslationFilter()
filter.authenticationEntryPoint = self.authenticationProcessingFilterEntryPoint()
filter.accessDeniedHandler = self.accessDeniedHandler()
return filter
# @Object
# def filterChainProxy(self):
# """This is the main entry point for security chain. It works from top to bottom, until it finds a match,
# based on the URI of the request, deciding what chain of filters to apply."""
# return CP3FilterChainProxy(filterInvocationDefinitionSource =
# [
# ("/.*", []),
# ("/images.*", []),
# ("/html.*", []),
# ("/login.*", ["httpSessionContextIntegrationFilter"]),
# ("/.*", ["httpSessionContextIntegrationFilter",
# "exceptionTranslationFilter",
# "authenticationProcessingFilter",
# "filterSecurityInterceptor"])
# ])
class PreencodingUserDetailsService(UserDetailsService):
"""
This user details service allows passwords to be created that are un-encoded, but
will be encoded before the authentication step occurs. This is for demonstration
purposes only, specifically to show the password encoders being plugged in.
"""
def __init__(self, wrappedUserDetailsService = None, encoder = None):
UserDetailsService.__init__(self)
self.wrappedUserDetailsService = wrappedUserDetailsService
self.encoder = encoder
self.logger = logging.getLogger("twitter_clone.app_context.PreencodingUserDetailsService")
def load_user(self, username):
user = self.wrappedUserDetailsService.load_user(username)
user.password = self.encoder.encodePassword(user.password, None)
self.logger.debug("Pre-converting %s's password to hashed format of %s, before authentication happens." % (username, user.password))
return user
def __str__(self):
return "%s %s" % (self.encoder, self.wrappedUserDetailsService)
| |
"""
Copyright (c) 2015 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from __future__ import unicode_literals
from collections import defaultdict
import json
import os
import docker
from dockerfile_parse import DockerfileParser
from atomic_reactor.build import InsideBuilder, BuildResult
from atomic_reactor.util import ImageName
from atomic_reactor.plugin import (PreBuildPlugin, PrePublishPlugin, PostBuildPlugin, ExitPlugin,
AutoRebuildCanceledException, PluginFailedException,
BuildStepPlugin, InappropriateBuildStepError)
import atomic_reactor.plugin
from atomic_reactor.plugins.build_docker_api import DockerApiPlugin
import atomic_reactor.inner
from flexmock import flexmock
import pytest
from tests.constants import MOCK_SOURCE, SOURCE
from tests.docker_mock import mock_docker
from tests.util import requires_internet, is_string_type
import inspect
import signal
from atomic_reactor.inner import BuildResults, BuildResultsEncoder, BuildResultsJSONDecoder
from atomic_reactor.inner import DockerBuildWorkflow
from atomic_reactor.constants import INSPECT_ROOTFS, INSPECT_ROOTFS_LAYERS
BUILD_RESULTS_ATTRS = ['build_logs',
'built_img_inspect',
'built_img_info',
'base_img_info',
'base_plugins_output',
'built_img_plugins_output']
DUMMY_BUILD_RESULT = BuildResult(image_id="image_id")
DUMMY_FAILED_BUILD_RESULT = BuildResult(fail_reason='it happens')
DUMMY_REMOTE_BUILD_RESULT = BuildResult.make_remote_image_result()
def test_build_results_encoder():
results = BuildResults()
expected_data = {}
for attr in BUILD_RESULTS_ATTRS:
setattr(results, attr, attr)
expected_data[attr] = attr
data = json.loads(json.dumps(results, cls=BuildResultsEncoder))
assert data == expected_data
def test_build_results_decoder():
data = {}
expected_results = BuildResults()
for attr in BUILD_RESULTS_ATTRS:
setattr(expected_results, attr, attr)
data[attr] = attr
results = json.loads(json.dumps(data), cls=BuildResultsJSONDecoder)
for attr in set(BUILD_RESULTS_ATTRS) - set(['build_logs']):
assert getattr(results, attr) == getattr(expected_results, attr)
class MockDocker(object):
def history(self, name):
return [{'Size': 1, 'Id': "sha256:layer1-newest"},
{'Size': 2, 'Id': "sha256:layer2"},
{'Size': 3, 'Id': "sha256:layer3"},
{'Size': 4, 'Id': "sha256:layer4-oldest"}]
class MockDockerTasker(object):
def __init__(self):
self.d = MockDocker()
def inspect_image(self, name):
return {}
def build_image_from_path(self):
return True
class MockDockerTaskerBaseImage(MockDockerTasker):
def inspect_image(self, name):
raise docker.errors.NotFound(message='foo', response='bar', explanation='baz')
class X(object):
pass
class MockInsideBuilder(object):
def __init__(self, failed=False, is_base_image=False):
if is_base_image:
self.tasker = MockDockerTaskerBaseImage()
self.base_image = ImageName(namespace='koji', repo='image-build')
else:
self.tasker = MockDockerTasker()
self.base_image = ImageName(repo='Fedora', tag='22')
self.image_id = 'asd'
self.image = 'image'
self.failed = failed
self.df_path = 'some'
self.df_dir = 'some'
def simplegen(x, y):
yield "some"
flexmock(self.tasker, build_image_from_path=simplegen)
@property
def source(self):
result = X()
setattr(result, 'dockerfile_path', '/')
setattr(result, 'path', '/tmp')
return result
def pull_base_image(self, source_registry, insecure=False):
pass
def get_built_image_info(self):
return {'Id': 'some'}
def inspect_built_image(self):
return {INSPECT_ROOTFS: {INSPECT_ROOTFS_LAYERS: ['sha256:diff_id1-oldest',
'sha256:diff_id2',
'sha256:diff_id3',
'sha256:diff_id4-newest']}}
def ensure_not_built(self):
pass
class RaisesMixIn(object):
"""
Mix-in class for plugins that should raise exceptions.
"""
is_allowed_to_fail = False
def __init__(self, tasker, workflow, *args, **kwargs):
super(RaisesMixIn, self).__init__(tasker, workflow,
*args, **kwargs)
def run(self):
raise RuntimeError
class PreRaises(RaisesMixIn, PreBuildPlugin):
"""
This plugin must run and cause the build to abort.
"""
key = 'pre_raises'
class BuildStepRaises(RaisesMixIn, BuildStepPlugin):
"""
This plugin must run and cause the build to abort.
"""
key = 'buildstep_raises'
class PostRaises(RaisesMixIn, PostBuildPlugin):
"""
This plugin must run and cause the build to abort.
"""
key = 'post_raises'
class PrePubRaises(RaisesMixIn, PrePublishPlugin):
"""
This plugin must run and cause the build to abort.
"""
key = 'prepub_raises'
class WatchedMixIn(object):
"""
Mix-in class for plugins we want to watch.
"""
def __init__(self, tasker, workflow, watcher, *args, **kwargs):
super(WatchedMixIn, self).__init__(tasker, workflow,
*args, **kwargs)
self.watcher = watcher
def run(self):
self.watcher.call()
class WatchedBuildStep(object):
"""
class for buildstep plugins we want to watch.
"""
def __init__(self, tasker, workflow, watcher, *args, **kwargs):
super(WatchedBuildStep, self).__init__(tasker, workflow,
*args, **kwargs)
self.watcher = watcher
def run(self):
self.watcher.call()
return DUMMY_BUILD_RESULT
class PreWatched(WatchedMixIn, PreBuildPlugin):
"""
A PreBuild plugin we can watch.
"""
key = 'pre_watched'
class PrePubWatched(WatchedMixIn, PrePublishPlugin):
"""
A PrePublish plugin we can watch.
"""
key = 'prepub_watched'
class BuildStepWatched(WatchedBuildStep, BuildStepPlugin):
"""
A BuildStep plugin we can watch.
"""
key = 'buildstep_watched'
class PostWatched(WatchedMixIn, PostBuildPlugin):
"""
A PostBuild plugin we can watch.
"""
key = 'post_watched'
class ExitWatched(WatchedMixIn, ExitPlugin):
"""
An Exit plugin we can watch.
"""
key = 'exit_watched'
class ExitRaises(RaisesMixIn, ExitPlugin):
"""
An Exit plugin that should raise an exception.
"""
key = 'exit_raises'
class ExitRaisesAllowed(RaisesMixIn, ExitPlugin):
"""
An Exit plugin that should raise an exception.
"""
is_allowed_to_fail = True
key = 'exit_raises_allowed'
class ExitCompat(WatchedMixIn, ExitPlugin):
"""
An Exit plugin called as a Post-build plugin.
"""
key = 'store_logs_to_file'
class Watcher(object):
def __init__(self, raise_exc=False):
self.called = False
self.raise_exc = raise_exc
def call(self):
self.called = True
if self.raise_exc:
raise Exception
def was_called(self):
return self.called
class WatcherWithSignal(Watcher):
def __init__(self, signal=None):
super(WatcherWithSignal, self).__init__()
self.signal = signal
def call(self):
super(WatcherWithSignal, self).call()
if self.signal:
os.kill(os.getpid(), self.signal)
def test_workflow():
"""
Test normal workflow.
"""
flexmock(DockerfileParser, content='df_content')
this_file = inspect.getfile(PreWatched)
mock_docker()
fake_builder = MockInsideBuilder()
flexmock(InsideBuilder).new_instances(fake_builder)
watch_pre = Watcher()
watch_prepub = Watcher()
watch_buildstep = Watcher()
watch_post = Watcher()
watch_exit = Watcher()
workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image',
prebuild_plugins=[{'name': 'pre_watched',
'args': {
'watcher': watch_pre
}}],
buildstep_plugins=[{'name': 'buildstep_watched',
'args': {
'watcher': watch_buildstep
}}],
prepublish_plugins=[{'name': 'prepub_watched',
'args': {
'watcher': watch_prepub,
}}],
postbuild_plugins=[{'name': 'post_watched',
'args': {
'watcher': watch_post
}}],
exit_plugins=[{'name': 'exit_watched',
'args': {
'watcher': watch_exit
}}],
plugin_files=[this_file])
workflow.build_docker_image()
assert watch_pre.was_called()
assert watch_prepub.was_called()
assert watch_buildstep.was_called()
assert watch_post.was_called()
assert watch_exit.was_called()
assert workflow.base_image_inspect == {}
def test_workflow_base_images():
"""
Test workflow for base images
"""
flexmock(DockerfileParser, content='df_content')
this_file = inspect.getfile(PreWatched)
mock_docker()
fake_builder = MockInsideBuilder(is_base_image=True)
flexmock(InsideBuilder).new_instances(fake_builder)
watch_pre = Watcher()
watch_prepub = Watcher()
watch_buildstep = Watcher()
watch_post = Watcher()
watch_exit = Watcher()
workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image',
prebuild_plugins=[{'name': 'pre_watched',
'args': {
'watcher': watch_pre
}}],
buildstep_plugins=[{'name': 'buildstep_watched',
'args': {
'watcher': watch_buildstep
}}],
prepublish_plugins=[{'name': 'prepub_watched',
'args': {
'watcher': watch_prepub,
}}],
postbuild_plugins=[{'name': 'post_watched',
'args': {
'watcher': watch_post
}}],
exit_plugins=[{'name': 'exit_watched',
'args': {
'watcher': watch_exit
}}],
plugin_files=[this_file])
workflow.build_docker_image()
assert watch_pre.was_called()
assert watch_prepub.was_called()
assert watch_buildstep.was_called()
assert watch_post.was_called()
assert watch_exit.was_called()
with pytest.raises(KeyError):
assert workflow.base_image_inspect
class FakeLogger(object):
def __init__(self):
self.debugs = []
self.infos = []
self.warnings = []
self.errors = []
self.exc = []
def log(self, logs, args):
logs.append(args)
def debug(self, *args):
self.log(self.debugs, args)
def info(self, *args):
self.log(self.infos, args)
def warning(self, *args):
self.log(self.warnings, args)
def error(self, *args):
self.log(self.errors, args)
def exception(self, *args):
self.log(self.exc, args)
def test_workflow_compat(request):
"""
Some of our plugins have changed from being run post-build to
being run at exit. Let's test what happens when we try running an
exit plugin as a post-build plugin.
"""
flexmock(DockerfileParser, content='df_content')
this_file = inspect.getfile(PreWatched)
mock_docker()
fake_builder = MockInsideBuilder()
flexmock(InsideBuilder).new_instances(fake_builder)
watch_exit = Watcher()
watch_buildstep = Watcher()
fake_logger = FakeLogger()
existing_logger = atomic_reactor.plugin.logger
def restore_logger():
atomic_reactor.plugin.logger = existing_logger
request.addfinalizer(restore_logger)
atomic_reactor.plugin.logger = fake_logger
workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image',
postbuild_plugins=[{'name': 'store_logs_to_file',
'args': {
'watcher': watch_exit
}}],
buildstep_plugins=[{'name': 'buildstep_watched',
'args': {
'watcher': watch_buildstep
}}],
plugin_files=[this_file])
workflow.build_docker_image()
assert watch_exit.was_called()
assert len(fake_logger.errors) == 0 # This is explicitly allowed now
class Pre(PreBuildPlugin):
"""
This plugin does nothing. It's only used for configuration testing.
"""
key = 'pre'
class BuildStep(BuildStepPlugin):
"""
This plugin does nothing. It's only used for configuration testing.
"""
key = 'buildstep'
def run(self):
raise InappropriateBuildStepError
class Post(PostBuildPlugin):
"""
This plugin does nothing. It's only used for configuration testing.
"""
key = 'post'
class PrePub(PrePublishPlugin):
"""
This plugin does nothing. It's only used for configuration testing.
"""
key = 'prepub'
class Exit(ExitPlugin):
"""
This plugin does nothing. It's only used for configuration testing.
"""
key = 'exit'
@pytest.mark.parametrize(('plugins', 'should_fail', 'should_log'), [
# No 'name' key, prebuild
({
'prebuild_plugins': [{'args': {}},
{'name': 'pre_watched',
'args': {
'watcher': Watcher(),
}
}],
},
True, # is fatal
True, # logs error
),
# No 'name' key, buildstep
({
'buildstep_plugins': [{'args': {}},
{'name': 'buildstep_watched',
'args': {
'watcher': Watcher(),
}
}],
},
True, # is fatal
True, # logs error
),
# No 'name' key, postbuild
({
'postbuild_plugins': [{'args': {}},
{'name': 'post_watched',
'args': {
'watcher': Watcher(),
}
}],
},
True, # is fatal
True, # logs error
),
# No 'name' key, prepub
({
'prepublish_plugins': [{'args': {}},
{'name': 'prepub_watched',
'args': {
'watcher': Watcher(),
},
}]},
True, # is fatal
True, # logs error
),
# No 'name' key, exit
({
'exit_plugins': [{'args': {}},
{'name': 'exit_watched',
'args': {
'watcher': Watcher(),
}
}]
},
False, # not fatal
True, # logs error
),
# No 'args' key, prebuild
({'prebuild_plugins': [{'name': 'pre'},
{'name': 'pre_watched',
'args': {
'watcher': Watcher(),
}
}]},
False, # not fatal
False, # no error logged
),
# No 'args' key, buildstep
({'buildstep_plugins': [{'name': 'buildstep'},
{'name': 'buildstep_watched',
'args': {
'watcher': Watcher(),
}
}]},
False, # not fatal
False, # no error logged
),
# No 'args' key, postbuild
({'postbuild_plugins': [{'name': 'post'},
{'name': 'post_watched',
'args': {
'watcher': Watcher(),
}
}]},
False, # not fatal,
False, # no error logged
),
# No 'args' key, prepub
({'prepublish_plugins': [{'name': 'prepub'},
{'name': 'prepub_watched',
'args': {
'watcher': Watcher(),
}
}]},
False, # not fatal,
False, # no error logged
),
# No 'args' key, exit
({'exit_plugins': [{'name': 'exit'},
{'name': 'exit_watched',
'args': {
'watcher': Watcher(),
}
}]},
False, # not fatal
False, # no error logged
),
# No such plugin, prebuild
({'prebuild_plugins': [{'name': 'no plugin',
'args': {}},
{'name': 'pre_watched',
'args': {
'watcher': Watcher(),
}
}]},
True, # is fatal
True, # logs error
),
# No such plugin, buildstep
({'buildstep_plugins': [{'name': 'no plugin',
'args': {}},
{'name': 'buildstep_watched',
'args': {
'watcher': Watcher(),
}
}]},
False, # is fatal
False, # logs error
),
# No such plugin, postbuild
({'postbuild_plugins': [{'name': 'no plugin',
'args': {}},
{'name': 'post_watched',
'args': {
'watcher': Watcher(),
}
}]},
True, # is fatal
True, # logs error
),
# No such plugin, prepub
({'prepublish_plugins': [{'name': 'no plugin',
'args': {}},
{'name': 'prepub_watched',
'args': {
'watcher': Watcher(),
}
}]},
True, # is fatal
True, # logs error
),
# No such plugin, exit
({'exit_plugins': [{'name': 'no plugin',
'args': {}},
{'name': 'exit_watched',
'args': {
'watcher': Watcher(),
}
}]},
False, # not fatal
True, # logs error
),
# No such plugin, prebuild, not required
({'prebuild_plugins': [{'name': 'no plugin',
'args': {},
'required': False},
{'name': 'pre_watched',
'args': {
'watcher': Watcher(),
}
}]},
False, # not fatal
False, # does not log error
),
# No such plugin, buildstep, not required
({'buildstep_plugins': [{'name': 'no plugin',
'args': {},
'required': False},
{'name': 'buildstep_watched',
'args': {
'watcher': Watcher(),
}
}]},
False, # not fatal
False, # does not log error
),
# No such plugin, postbuild, not required
({'postbuild_plugins': [{'name': 'no plugin',
'args': {},
'required': False},
{'name': 'post_watched',
'args': {
'watcher': Watcher(),
}
}]},
False, # not fatal
False, # does not log error
),
# No such plugin, prepub, not required
({'prepublish_plugins': [{'name': 'no plugin',
'args': {},
'required': False},
{'name': 'prepub_watched',
'args': {
'watcher': Watcher(),
}
}]},
False, # not fatal
False, # does not log error
),
# No such plugin, exit, not required
({'exit_plugins': [{'name': 'no plugin',
'args': {},
'required': False},
{'name': 'exit_watched',
'args': {
'watcher': Watcher(),
}
}]},
False, # not fatal
False, # does not log error
),
])
def test_plugin_errors(request, plugins, should_fail, should_log):
"""
Try bad plugin configuration.
"""
flexmock(DockerfileParser, content='df_content')
flexmock(DockerApiPlugin).should_receive('run').and_return(DUMMY_BUILD_RESULT)
this_file = inspect.getfile(PreRaises)
mock_docker()
fake_builder = MockInsideBuilder()
flexmock(InsideBuilder).new_instances(fake_builder)
fake_logger = FakeLogger()
existing_logger = atomic_reactor.plugin.logger
def restore_logger():
atomic_reactor.plugin.logger = existing_logger
request.addfinalizer(restore_logger)
atomic_reactor.plugin.logger = fake_logger
workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image',
plugin_files=[this_file],
**plugins)
# Find the 'watcher' parameter
watchers = [conf.get('args', {}).get('watcher')
for plugin in plugins.values()
for conf in plugin]
watcher = [x for x in watchers if x][0]
if should_fail:
with pytest.raises(PluginFailedException):
workflow.build_docker_image()
assert not watcher.was_called()
assert workflow.plugins_errors
assert all([is_string_type(plugin)
for plugin in workflow.plugins_errors])
assert all([is_string_type(reason)
for reason in workflow.plugins_errors.values()])
else:
workflow.build_docker_image()
assert watcher.was_called()
assert not workflow.plugins_errors
if should_log:
assert len(fake_logger.errors) > 0
else:
assert len(fake_logger.errors) == 0
class StopAutorebuildPlugin(PreBuildPlugin):
key = 'stopstopstop'
def run(self):
raise AutoRebuildCanceledException(self.key, 'message')
def test_autorebuild_stop_prevents_build():
"""
test that a plugin that raises AutoRebuildCanceledException results in actually skipped build
"""
this_file = inspect.getfile(PreWatched)
mock_docker()
fake_builder = MockInsideBuilder()
flexmock(InsideBuilder).new_instances(fake_builder)
watch_prepub = Watcher()
watch_post = Watcher()
watch_exit = Watcher()
workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image',
prebuild_plugins=[{'name': 'stopstopstop',
'args': {
}}],
prepublish_plugins=[{'name': 'prepub_watched',
'args': {
'watcher': watch_prepub,
}}],
postbuild_plugins=[{'name': 'post_watched',
'args': {
'watcher': watch_post
}}],
exit_plugins=[{'name': 'exit_watched',
'args': {
'watcher': watch_exit
}}],
plugin_files=[this_file])
with pytest.raises(AutoRebuildCanceledException):
workflow.build_docker_image()
assert not watch_prepub.was_called()
assert not watch_post.was_called()
assert watch_exit.was_called()
assert workflow.autorebuild_canceled is True
assert not workflow.build_canceled
@pytest.mark.parametrize('fail_at', ['pre_raises',
'buildstep_raises',
'prepub_raises',
'post_raises',
'exit_raises',
'exit_raises_allowed'])
def test_workflow_plugin_error(fail_at):
"""
This is a test for what happens when plugins fail.
When a prebuild or postbuild plugin fails, and doesn't have
is_allowed_to_fail=True set, the whole build should fail.
However, all the exit plugins should run.
"""
flexmock(DockerfileParser, content='df_content')
this_file = inspect.getfile(PreRaises)
mock_docker()
fake_builder = MockInsideBuilder()
flexmock(InsideBuilder).new_instances(fake_builder)
watch_pre = Watcher()
watch_prepub = Watcher()
watch_buildstep = Watcher()
watch_post = Watcher()
watch_exit = Watcher()
prebuild_plugins = [{'name': 'pre_watched',
'args': {
'watcher': watch_pre,
}}]
buildstep_plugins = [{'name': 'buildstep_watched',
'args': {
'watcher': watch_buildstep,
}}]
prepublish_plugins = [{'name': 'prepub_watched',
'args': {
'watcher': watch_prepub,
}}]
postbuild_plugins = [{'name': 'post_watched',
'args': {
'watcher': watch_post
}}]
exit_plugins = [{'name': 'exit_watched',
'args': {
'watcher': watch_exit
}}]
# Insert a failing plugin into one of the build phases
if fail_at == 'pre_raises':
prebuild_plugins.insert(0, {'name': fail_at, 'args': {}})
elif fail_at == 'buildstep_raises':
buildstep_plugins.insert(0, {'name': fail_at, 'args': {}})
elif fail_at == 'prepub_raises':
prepublish_plugins.insert(0, {'name': fail_at, 'args': {}})
elif fail_at == 'post_raises':
postbuild_plugins.insert(0, {'name': fail_at, 'args': {}})
elif fail_at == 'exit_raises' or fail_at == 'exit_raises_allowed':
exit_plugins.insert(0, {'name': fail_at, 'args': {}})
else:
# Typo in the parameter list?
assert False
workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image',
prebuild_plugins=prebuild_plugins,
buildstep_plugins=buildstep_plugins,
prepublish_plugins=prepublish_plugins,
postbuild_plugins=postbuild_plugins,
exit_plugins=exit_plugins,
plugin_files=[this_file])
# Most failures cause the build process to abort. Unless, it's
# an exit plugin that's explicitly allowed to fail.
if fail_at == 'exit_raises_allowed':
workflow.build_docker_image()
assert not workflow.plugins_errors
else:
with pytest.raises(PluginFailedException):
workflow.build_docker_image()
assert fail_at in workflow.plugins_errors
# The pre-build phase should only complete if there were no
# earlier plugin failures.
assert watch_pre.was_called() == (fail_at != 'pre_raises')
# The buildstep phase should only complete if there were no
# earlier plugin failures.
assert watch_buildstep.was_called() == (fail_at not in ('pre_raises',
'buildstep_raises'))
# The prepublish phase should only complete if there were no
# earlier plugin failures.
assert watch_prepub.was_called() == (fail_at not in ('pre_raises',
'prepub_raises',
'buildstep_raises'))
# The post-build phase should only complete if there were no
# earlier plugin failures.
assert watch_post.was_called() == (fail_at not in ('pre_raises',
'prepub_raises',
'buildstep_raises',
'post_raises'))
# But all exit plugins should run, even if one of them also raises
# an exception.
assert watch_exit.was_called()
def test_workflow_docker_build_error():
"""
This is a test for what happens when the docker build fails.
"""
flexmock(DockerfileParser, content='df_content')
this_file = inspect.getfile(PreRaises)
mock_docker()
fake_builder = MockInsideBuilder(failed=True)
flexmock(InsideBuilder).new_instances(fake_builder)
watch_pre = Watcher()
watch_buildstep = Watcher(raise_exc=True)
watch_prepub = Watcher()
watch_post = Watcher()
watch_exit = Watcher()
workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image',
prebuild_plugins=[{'name': 'pre_watched',
'args': {
'watcher': watch_pre
}}],
buildstep_plugins=[{'name': 'buildstep_watched',
'args': {
'watcher': watch_buildstep,
}}],
prepublish_plugins=[{'name': 'prepub_watched',
'args': {
'watcher': watch_prepub,
}}],
postbuild_plugins=[{'name': 'post_watched',
'args': {
'watcher': watch_post
}}],
exit_plugins=[{'name': 'exit_watched',
'args': {
'watcher': watch_exit
}}],
plugin_files=[this_file])
with pytest.raises(Exception):
workflow.build_docker_image()
# No subsequent build phases should have run except 'exit'
assert watch_pre.was_called()
assert watch_buildstep.was_called()
assert not watch_prepub.was_called()
assert not watch_post.was_called()
assert watch_exit.was_called()
class ExitUsesSource(ExitWatched):
key = 'uses_source'
def run(self):
assert os.path.exists(self.workflow.source.get_build_file_path()[0])
WatchedMixIn.run(self)
@requires_internet
def test_source_not_removed_for_exit_plugins():
flexmock(DockerfileParser, content='df_content')
this_file = inspect.getfile(PreRaises)
mock_docker()
fake_builder = MockInsideBuilder()
flexmock(InsideBuilder).new_instances(fake_builder)
watch_exit = Watcher()
watch_buildstep = Watcher()
workflow = DockerBuildWorkflow(SOURCE, 'test-image',
exit_plugins=[{'name': 'uses_source',
'args': {
'watcher': watch_exit,
}}],
buildstep_plugins=[{'name': 'buildstep_watched',
'args': {
'watcher': watch_buildstep,
}}],
plugin_files=[this_file])
workflow.build_docker_image()
# Make sure that the plugin was actually run
assert watch_exit.was_called()
class ValueMixIn(object):
def __init__(self, tasker, workflow, *args, **kwargs):
super(ValueMixIn, self).__init__(tasker, workflow, *args, **kwargs)
def run(self):
return '%s_result' % self.key
class ValueBuildStep(object):
def __init__(self, tasker, workflow, *args, **kwargs):
super(ValueBuildStep, self).__init__(tasker, workflow, *args, **kwargs)
def run(self):
return DUMMY_BUILD_RESULT
class ValueFailedBuildStep(object):
def run(self):
return DUMMY_FAILED_BUILD_RESULT
class ValueRemoteBuildStep(object):
def run(self):
return DUMMY_REMOTE_BUILD_RESULT
class PreBuildResult(ValueMixIn, PreBuildPlugin):
"""
Pre build plugin that returns a result when run.
"""
key = 'pre_build_value'
class BuildStepResult(ValueBuildStep, BuildStepPlugin):
"""
Build step plugin that returns a result when run.
"""
key = 'buildstep_value'
class BuildStepFailedResult(ValueFailedBuildStep, BuildStepPlugin):
"""
Build step plugin that returns a failed result when run.
"""
key = 'buildstep_failed_value'
class BuildStepRemoteResult(ValueRemoteBuildStep, BuildStepPlugin):
"""
Build step plugin that returns a failed result when run.
"""
key = 'buildstep_remote_value'
class PostBuildResult(ValueMixIn, PostBuildPlugin):
"""
Post build plugin that returns a result when run.
"""
key = 'post_build_value'
class PrePublishResult(ValueMixIn, PrePublishPlugin):
"""
Pre publish plugin that returns a result when run.
"""
key = 'pre_publish_value'
class ExitResult(ValueMixIn, ExitPlugin):
"""
Exit plugin that returns a result when run.
"""
key = 'exit_value'
@pytest.mark.parametrize(['buildstep_plugin', 'buildstep_raises'], [
['buildstep_value', False],
['buildstep_remote_value', False],
['buildstep_failed_value', True],
])
def test_workflow_plugin_results(buildstep_plugin, buildstep_raises):
"""
Verifies the results of plugins in different phases
are stored properly.
It also verifies failed and remote BuildResult is handled properly.
"""
flexmock(DockerfileParser, content='df_content')
this_file = inspect.getfile(PreRaises)
mock_docker()
fake_builder = MockInsideBuilder()
flexmock(InsideBuilder).new_instances(fake_builder)
prebuild_plugins = [{'name': 'pre_build_value'}]
buildstep_plugins = [{'name': buildstep_plugin}]
postbuild_plugins = [{'name': 'post_build_value'}]
prepublish_plugins = [{'name': 'pre_publish_value'}]
exit_plugins = [{'name': 'exit_value'}]
workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image',
prebuild_plugins=prebuild_plugins,
buildstep_plugins=buildstep_plugins,
prepublish_plugins=prepublish_plugins,
postbuild_plugins=postbuild_plugins,
exit_plugins=exit_plugins,
plugin_files=[this_file])
if buildstep_raises:
with pytest.raises(PluginFailedException):
workflow.build_docker_image()
else:
workflow.build_docker_image()
assert workflow.prebuild_results == {'pre_build_value': 'pre_build_value_result'}
assert isinstance(workflow.buildstep_result[buildstep_plugin], BuildResult)
if buildstep_raises:
assert workflow.postbuild_results == {}
assert workflow.prepub_results == {}
else:
assert workflow.postbuild_results == {'post_build_value': 'post_build_value_result'}
assert workflow.prepub_results == {'pre_publish_value': 'pre_publish_value_result'}
assert workflow.exit_results == {'exit_value': 'exit_value_result'}
@pytest.mark.parametrize('fail_at', ['pre', 'prepub', 'buildstep', 'post', 'exit'])
def test_cancel_build(request, fail_at):
"""
Verifies that exit plugins are executed when the build is canceled
"""
# Make the phase we're testing send us SIGTERM
phase_signal = defaultdict(lambda: None)
phase_signal[fail_at] = signal.SIGTERM
flexmock(DockerfileParser, content='df_content')
this_file = inspect.getfile(PreRaises)
mock_docker()
fake_builder = MockInsideBuilder()
flexmock(InsideBuilder).new_instances(fake_builder)
watch_pre = WatcherWithSignal(phase_signal['pre'])
watch_prepub = WatcherWithSignal(phase_signal['prepub'])
watch_buildstep = WatcherWithSignal(phase_signal['buildstep'])
watch_post = WatcherWithSignal(phase_signal['post'])
watch_exit = WatcherWithSignal(phase_signal['exit'])
fake_logger = FakeLogger()
existing_logger = atomic_reactor.plugin.logger
def restore_logger():
atomic_reactor.plugin.logger = existing_logger
request.addfinalizer(restore_logger)
atomic_reactor.plugin.logger = fake_logger
workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image',
prebuild_plugins=[{'name': 'pre_watched',
'args': {
'watcher': watch_pre
}}],
prepublish_plugins=[{'name': 'prepub_watched',
'args': {
'watcher': watch_prepub,
}}],
buildstep_plugins=[{'name': 'buildstep_watched',
'args': {
'watcher': watch_buildstep
}}],
postbuild_plugins=[{'name': 'post_watched',
'args': {
'watcher': watch_post
}}],
exit_plugins=[{'name': 'exit_watched',
'args': {
'watcher': watch_exit
}}],
plugin_files=[this_file])
if fail_at == 'buildstep':
with pytest.raises(PluginFailedException):
workflow.build_docker_image()
assert workflow.build_canceled
assert ("plugin '%s_watched' raised an exception:" % fail_at +
" BuildCanceledException('Build was canceled',)",) in fake_logger.errors
else:
workflow.build_docker_image()
if fail_at != 'exit':
assert workflow.build_canceled
assert ("plugin '%s_watched' raised an exception:" % fail_at +
" BuildCanceledException('Build was canceled',)",) in fake_logger.warnings
else:
assert not workflow.build_canceled
assert watch_exit.was_called()
assert watch_pre.was_called()
if fail_at not in ['pre', 'buildstep']:
assert watch_prepub.was_called()
if fail_at not in ['pre', 'prepub', 'buildstep']:
assert watch_post.was_called()
@pytest.mark.parametrize('has_version', [True, False])
def test_show_version(request, has_version):
"""
Test atomic-reactor print version of osbs-client used to build the build json
if available
"""
VERSION = "1.0"
flexmock(DockerfileParser, content='df_content')
this_file = inspect.getfile(PreRaises)
mock_docker()
fake_builder = MockInsideBuilder()
flexmock(InsideBuilder).new_instances(fake_builder)
watch_buildstep = Watcher()
fake_logger = FakeLogger()
existing_logger = atomic_reactor.inner.logger
def restore_logger():
atomic_reactor.inner.logger = existing_logger
request.addfinalizer(restore_logger)
atomic_reactor.inner.logger = fake_logger
params = {
'prebuild_plugins': [],
'buildstep_plugins': [{'name': 'buildstep_watched',
'args': {'watcher': watch_buildstep}}],
'prepublish_plugins': [],
'postbuild_plugins': [],
'exit_plugins': [],
'plugin_files': [this_file],
}
if has_version:
params['client_version'] = VERSION
workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image', **params)
workflow.build_docker_image()
expected_log_message = ("build json was built by osbs-client %s", VERSION)
assert (expected_log_message in fake_logger.debugs) == has_version
def test_add_pulp_registry():
push_conf = atomic_reactor.inner.PushConf()
push_conf.add_pulp_registry("example.com", "http://example.com", False)
assert push_conf.pulp_registries[0].name == "example.com"
assert push_conf.pulp_registries[0].uri == "http://example.com"
assert not push_conf.pulp_registries[0].server_side_sync
push_conf.add_pulp_registry("example.com", "http://example.com", True)
assert push_conf.pulp_registries[0].server_side_sync
push_conf.add_pulp_registry("example.com", "http://example.com", False)
assert push_conf.pulp_registries[0].server_side_sync
with pytest.raises(RuntimeError):
push_conf.add_pulp_registry("example2.com", None, False)
with pytest.raises(RuntimeError):
push_conf.add_pulp_registry("example.com", "http://example2.com", False)
push_conf.add_pulp_registry("registry2.example.com", "http://registry2.example.com", True)
assert len(push_conf.pulp_registries) == 2
def test_layer_sizes():
flexmock(DockerfileParser, content='df_content')
this_file = inspect.getfile(PreRaises)
mock_docker()
fake_builder = MockInsideBuilder()
flexmock(InsideBuilder).new_instances(fake_builder)
watch_exit = Watcher()
watch_buildstep = Watcher()
workflow = DockerBuildWorkflow(SOURCE, 'test-image',
exit_plugins=[{'name': 'uses_source',
'args': {
'watcher': watch_exit,
}}],
buildstep_plugins=[{'name': 'buildstep_watched',
'args': {
'watcher': watch_buildstep,
}}],
plugin_files=[this_file])
workflow.build_docker_image()
expected = [
{'diff_id': u'sha256:diff_id1-oldest', 'size': 4},
{'diff_id': u'sha256:diff_id2', 'size': 3},
{'diff_id': u'sha256:diff_id3', 'size': 2},
{'diff_id': u'sha256:diff_id4-newest', 'size': 1}
]
assert workflow.layer_sizes == expected
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for contrib.seq2seq.python.seq2seq.beam_search_decoder."""
# pylint: disable=unused-import,g-bad-import-order
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: enable=unused-import
import numpy as np
from tensorflow.contrib.seq2seq.python.ops import attention_wrapper
from tensorflow.contrib.seq2seq.python.ops import beam_search_decoder
from tensorflow.contrib.seq2seq.python.ops import beam_search_ops
from tensorflow.contrib.seq2seq.python.ops import decoder
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.layers import core as layers_core
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
# pylint: enable=g-import-not-at-top
class TestGatherTree(test.TestCase):
"""Tests the gather_tree function."""
def test_gather_tree(self):
# (max_time = 3, batch_size = 2, beam_width = 3)
# create (batch_size, max_time, beam_width) matrix and transpose it
predicted_ids = np.array(
[[[1, 2, 3], [4, 5, 6], [7, 8, 9]], [[2, 3, 4], [5, 6, 7], [8, 9, 10]]],
dtype=np.int32).transpose([1, 0, 2])
parent_ids = np.array(
[[[0, 0, 0], [0, 1, 1], [2, 1, 2]], [[0, 0, 0], [1, 2, 0], [2, 1, 1]]],
dtype=np.int32).transpose([1, 0, 2])
# sequence_lengths is shaped (batch_size = 3)
max_sequence_lengths = [3, 3]
expected_result = np.array([[[2, 2, 2], [6, 5, 6], [7, 8, 9]],
[[2, 4, 4], [7, 6, 6],
[8, 9, 10]]]).transpose([1, 0, 2])
res = beam_search_ops.gather_tree(
predicted_ids,
parent_ids,
max_sequence_lengths=max_sequence_lengths,
end_token=11)
with self.test_session() as sess:
res_ = sess.run(res)
self.assertAllEqual(expected_result, res_)
def _test_gather_tree_from_array(self,
depth_ndims=0,
merged_batch_beam=False):
array = np.array(
[[[1, 2, 3], [4, 5, 6], [7, 8, 9], [0, 0, 0]],
[[2, 3, 4], [5, 6, 7], [8, 9, 10], [11, 12, 0]]]).transpose([1, 0, 2])
parent_ids = np.array(
[[[0, 0, 0], [0, 1, 1], [2, 1, 2], [-1, -1, -1]],
[[0, 0, 0], [1, 1, 0], [2, 0, 1], [0, 1, 0]]]).transpose([1, 0, 2])
expected_array = np.array(
[[[2, 2, 2], [6, 5, 6], [7, 8, 9], [0, 0, 0]],
[[2, 3, 2], [7, 5, 7], [8, 9, 8], [11, 12, 0]]]).transpose([1, 0, 2])
sequence_length = [[3, 3, 3], [4, 4, 3]]
array = ops.convert_to_tensor(
array, dtype=dtypes.float32)
parent_ids = ops.convert_to_tensor(
parent_ids, dtype=dtypes.int32)
expected_array = ops.convert_to_tensor(
expected_array, dtype=dtypes.float32)
max_time = array_ops.shape(array)[0]
batch_size = array_ops.shape(array)[1]
beam_width = array_ops.shape(array)[2]
def _tile_in_depth(tensor):
# Generate higher rank tensors by concatenating tensor and tensor + 1.
for _ in range(depth_ndims):
tensor = array_ops.stack([tensor, tensor + 1], -1)
return tensor
if merged_batch_beam:
array = array_ops.reshape(
array, [max_time, batch_size * beam_width])
expected_array = array_ops.reshape(
expected_array, [max_time, batch_size * beam_width])
if depth_ndims > 0:
array = _tile_in_depth(array)
expected_array = _tile_in_depth(expected_array)
sorted_array = beam_search_decoder.gather_tree_from_array(
array, parent_ids, sequence_length)
with self.test_session() as sess:
sorted_array = sess.run(sorted_array)
expected_array = sess.run(expected_array)
self.assertAllEqual(expected_array, sorted_array)
def test_gather_tree_from_array_scalar(self):
self._test_gather_tree_from_array()
def test_gather_tree_from_array_1d(self):
self._test_gather_tree_from_array(depth_ndims=1)
def test_gather_tree_from_array_1d_with_merged_batch_beam(self):
self._test_gather_tree_from_array(depth_ndims=1, merged_batch_beam=True)
def test_gather_tree_from_array_2d(self):
self._test_gather_tree_from_array(depth_ndims=2)
class TestArrayShapeChecks(test.TestCase):
def _test_array_shape_dynamic_checks(self, static_shape, dynamic_shape,
batch_size, beam_width, is_valid=True):
t = array_ops.placeholder_with_default(
np.random.randn(*static_shape).astype(np.float32),
shape=dynamic_shape)
batch_size = array_ops.constant(batch_size)
check_op = beam_search_decoder._check_batch_beam(t, batch_size, beam_width) # pylint: disable=protected-access
with self.test_session() as sess:
if is_valid:
sess.run(check_op)
else:
with self.assertRaises(errors.InvalidArgumentError):
sess.run(check_op)
def test_array_shape_dynamic_checks(self):
self._test_array_shape_dynamic_checks(
(8, 4, 5, 10), (None, None, 5, 10), 4, 5, is_valid=True)
self._test_array_shape_dynamic_checks(
(8, 20, 10), (None, None, 10), 4, 5, is_valid=True)
self._test_array_shape_dynamic_checks(
(8, 21, 10), (None, None, 10), 4, 5, is_valid=False)
self._test_array_shape_dynamic_checks(
(8, 4, 6, 10), (None, None, None, 10), 4, 5, is_valid=False)
self._test_array_shape_dynamic_checks(
(8, 4), (None, None), 4, 5, is_valid=False)
class TestEosMasking(test.TestCase):
"""Tests EOS masking used in beam search."""
def test_eos_masking(self):
probs = constant_op.constant([
[[-.2, -.2, -.2, -.2, -.2], [-.3, -.3, -.3, 3, 0], [5, 6, 0, 0, 0]],
[[-.2, -.2, -.2, -.2, 0], [-.3, -.3, -.1, 3, 0], [5, 6, 3, 0, 0]],
])
eos_token = 0
previously_finished = np.array([[0, 1, 0], [0, 1, 1]], dtype=bool)
masked = beam_search_decoder._mask_probs(probs, eos_token,
previously_finished)
with self.test_session() as sess:
probs = sess.run(probs)
masked = sess.run(masked)
self.assertAllEqual(probs[0][0], masked[0][0])
self.assertAllEqual(probs[0][2], masked[0][2])
self.assertAllEqual(probs[1][0], masked[1][0])
self.assertEqual(masked[0][1][0], 0)
self.assertEqual(masked[1][1][0], 0)
self.assertEqual(masked[1][2][0], 0)
for i in range(1, 5):
self.assertAllClose(masked[0][1][i], np.finfo('float32').min)
self.assertAllClose(masked[1][1][i], np.finfo('float32').min)
self.assertAllClose(masked[1][2][i], np.finfo('float32').min)
class TestBeamStep(test.TestCase):
"""Tests a single step of beam search."""
def setUp(self):
super(TestBeamStep, self).setUp()
self.batch_size = 2
self.beam_width = 3
self.vocab_size = 5
self.end_token = 0
self.length_penalty_weight = 0.6
def test_step(self):
dummy_cell_state = array_ops.zeros([self.batch_size, self.beam_width])
beam_state = beam_search_decoder.BeamSearchDecoderState(
cell_state=dummy_cell_state,
log_probs=nn_ops.log_softmax(
array_ops.ones([self.batch_size, self.beam_width])),
lengths=constant_op.constant(
2, shape=[self.batch_size, self.beam_width], dtype=dtypes.int64),
finished=array_ops.zeros(
[self.batch_size, self.beam_width], dtype=dtypes.bool))
logits_ = np.full([self.batch_size, self.beam_width, self.vocab_size],
0.0001)
logits_[0, 0, 2] = 1.9
logits_[0, 0, 3] = 2.1
logits_[0, 1, 3] = 3.1
logits_[0, 1, 4] = 0.9
logits_[1, 0, 1] = 0.5
logits_[1, 1, 2] = 2.7
logits_[1, 2, 2] = 10.0
logits_[1, 2, 3] = 0.2
logits = ops.convert_to_tensor(logits_, dtype=dtypes.float32)
log_probs = nn_ops.log_softmax(logits)
outputs, next_beam_state = beam_search_decoder._beam_search_step(
time=2,
logits=logits,
next_cell_state=dummy_cell_state,
beam_state=beam_state,
batch_size=ops.convert_to_tensor(self.batch_size),
beam_width=self.beam_width,
end_token=self.end_token,
length_penalty_weight=self.length_penalty_weight)
with self.test_session() as sess:
outputs_, next_state_, state_, log_probs_ = sess.run(
[outputs, next_beam_state, beam_state, log_probs])
self.assertAllEqual(outputs_.predicted_ids, [[3, 3, 2], [2, 2, 1]])
self.assertAllEqual(outputs_.parent_ids, [[1, 0, 0], [2, 1, 0]])
self.assertAllEqual(next_state_.lengths, [[3, 3, 3], [3, 3, 3]])
self.assertAllEqual(next_state_.finished,
[[False, False, False], [False, False, False]])
expected_log_probs = []
expected_log_probs.append(state_.log_probs[0][[1, 0, 0]])
expected_log_probs.append(state_.log_probs[1][[2, 1, 0]]) # 0 --> 1
expected_log_probs[0][0] += log_probs_[0, 1, 3]
expected_log_probs[0][1] += log_probs_[0, 0, 3]
expected_log_probs[0][2] += log_probs_[0, 0, 2]
expected_log_probs[1][0] += log_probs_[1, 2, 2]
expected_log_probs[1][1] += log_probs_[1, 1, 2]
expected_log_probs[1][2] += log_probs_[1, 0, 1]
self.assertAllEqual(next_state_.log_probs, expected_log_probs)
def test_step_with_eos(self):
dummy_cell_state = array_ops.zeros([self.batch_size, self.beam_width])
beam_state = beam_search_decoder.BeamSearchDecoderState(
cell_state=dummy_cell_state,
log_probs=nn_ops.log_softmax(
array_ops.ones([self.batch_size, self.beam_width])),
lengths=ops.convert_to_tensor(
[[2, 1, 2], [2, 2, 1]], dtype=dtypes.int64),
finished=ops.convert_to_tensor(
[[False, True, False], [False, False, True]], dtype=dtypes.bool))
logits_ = np.full([self.batch_size, self.beam_width, self.vocab_size],
0.0001)
logits_[0, 0, 2] = 1.9
logits_[0, 0, 3] = 2.1
logits_[0, 1, 3] = 3.1
logits_[0, 1, 4] = 0.9
logits_[1, 0, 1] = 0.5
logits_[1, 1, 2] = 5.7 # why does this not work when it's 2.7?
logits_[1, 2, 2] = 1.0
logits_[1, 2, 3] = 0.2
logits = ops.convert_to_tensor(logits_, dtype=dtypes.float32)
log_probs = nn_ops.log_softmax(logits)
outputs, next_beam_state = beam_search_decoder._beam_search_step(
time=2,
logits=logits,
next_cell_state=dummy_cell_state,
beam_state=beam_state,
batch_size=ops.convert_to_tensor(self.batch_size),
beam_width=self.beam_width,
end_token=self.end_token,
length_penalty_weight=self.length_penalty_weight)
with self.test_session() as sess:
outputs_, next_state_, state_, log_probs_ = sess.run(
[outputs, next_beam_state, beam_state, log_probs])
self.assertAllEqual(outputs_.parent_ids, [[1, 0, 0], [1, 2, 0]])
self.assertAllEqual(outputs_.predicted_ids, [[0, 3, 2], [2, 0, 1]])
self.assertAllEqual(next_state_.lengths, [[1, 3, 3], [3, 1, 3]])
self.assertAllEqual(next_state_.finished,
[[True, False, False], [False, True, False]])
expected_log_probs = []
expected_log_probs.append(state_.log_probs[0][[1, 0, 0]])
expected_log_probs.append(state_.log_probs[1][[1, 2, 0]])
expected_log_probs[0][1] += log_probs_[0, 0, 3]
expected_log_probs[0][2] += log_probs_[0, 0, 2]
expected_log_probs[1][0] += log_probs_[1, 1, 2]
expected_log_probs[1][2] += log_probs_[1, 0, 1]
self.assertAllEqual(next_state_.log_probs, expected_log_probs)
class TestLargeBeamStep(test.TestCase):
"""Tests large beam step.
Tests a single step of beam search in such case that beam size is larger than
vocabulary size.
"""
def setUp(self):
super(TestLargeBeamStep, self).setUp()
self.batch_size = 2
self.beam_width = 8
self.vocab_size = 5
self.end_token = 0
self.length_penalty_weight = 0.6
def test_step(self):
def get_probs():
"""this simulates the initialize method in BeamSearchDecoder."""
log_prob_mask = array_ops.one_hot(
array_ops.zeros([self.batch_size], dtype=dtypes.int32),
depth=self.beam_width,
on_value=True,
off_value=False,
dtype=dtypes.bool)
log_prob_zeros = array_ops.zeros(
[self.batch_size, self.beam_width], dtype=dtypes.float32)
log_prob_neg_inf = array_ops.ones(
[self.batch_size, self.beam_width], dtype=dtypes.float32) * -np.Inf
log_probs = array_ops.where(log_prob_mask, log_prob_zeros,
log_prob_neg_inf)
return log_probs
log_probs = get_probs()
dummy_cell_state = array_ops.zeros([self.batch_size, self.beam_width])
# pylint: disable=invalid-name
_finished = array_ops.one_hot(
array_ops.zeros([self.batch_size], dtype=dtypes.int32),
depth=self.beam_width,
on_value=False,
off_value=True,
dtype=dtypes.bool)
_lengths = np.zeros([self.batch_size, self.beam_width], dtype=np.int64)
_lengths[:, 0] = 2
_lengths = constant_op.constant(_lengths, dtype=dtypes.int64)
beam_state = beam_search_decoder.BeamSearchDecoderState(
cell_state=dummy_cell_state,
log_probs=log_probs,
lengths=_lengths,
finished=_finished)
logits_ = np.full([self.batch_size, self.beam_width, self.vocab_size],
0.0001)
logits_[0, 0, 2] = 1.9
logits_[0, 0, 3] = 2.1
logits_[0, 1, 3] = 3.1
logits_[0, 1, 4] = 0.9
logits_[1, 0, 1] = 0.5
logits_[1, 1, 2] = 2.7
logits_[1, 2, 2] = 10.0
logits_[1, 2, 3] = 0.2
logits = constant_op.constant(logits_, dtype=dtypes.float32)
log_probs = nn_ops.log_softmax(logits)
outputs, next_beam_state = beam_search_decoder._beam_search_step(
time=2,
logits=logits,
next_cell_state=dummy_cell_state,
beam_state=beam_state,
batch_size=ops.convert_to_tensor(self.batch_size),
beam_width=self.beam_width,
end_token=self.end_token,
length_penalty_weight=self.length_penalty_weight)
with self.test_session() as sess:
outputs_, next_state_, _, _ = sess.run(
[outputs, next_beam_state, beam_state, log_probs])
self.assertEqual(outputs_.predicted_ids[0, 0], 3)
self.assertEqual(outputs_.predicted_ids[0, 1], 2)
self.assertEqual(outputs_.predicted_ids[1, 0], 1)
neg_inf = -np.Inf
self.assertAllEqual(
next_state_.log_probs[:, -3:],
[[neg_inf, neg_inf, neg_inf], [neg_inf, neg_inf, neg_inf]])
self.assertEqual((next_state_.log_probs[:, :-3] > neg_inf).all(), True)
self.assertEqual((next_state_.lengths[:, :-3] > 0).all(), True)
self.assertAllEqual(next_state_.lengths[:, -3:], [[0, 0, 0], [0, 0, 0]])
class BeamSearchDecoderTest(test.TestCase):
def _testDynamicDecodeRNN(self, time_major, has_attention,
with_alignment_history=False):
encoder_sequence_length = np.array([3, 2, 3, 1, 1])
decoder_sequence_length = np.array([2, 0, 1, 2, 3])
batch_size = 5
decoder_max_time = 4
input_depth = 7
cell_depth = 9
attention_depth = 6
vocab_size = 20
end_token = vocab_size - 1
start_token = 0
embedding_dim = 50
max_out = max(decoder_sequence_length)
output_layer = layers_core.Dense(vocab_size, use_bias=True, activation=None)
beam_width = 3
with self.test_session() as sess:
batch_size_tensor = constant_op.constant(batch_size)
embedding = np.random.randn(vocab_size, embedding_dim).astype(np.float32)
cell = rnn_cell.LSTMCell(cell_depth)
initial_state = cell.zero_state(batch_size, dtypes.float32)
if has_attention:
inputs = array_ops.placeholder_with_default(
np.random.randn(batch_size, decoder_max_time, input_depth).astype(
np.float32),
shape=(None, None, input_depth))
tiled_inputs = beam_search_decoder.tile_batch(
inputs, multiplier=beam_width)
tiled_sequence_length = beam_search_decoder.tile_batch(
encoder_sequence_length, multiplier=beam_width)
attention_mechanism = attention_wrapper.BahdanauAttention(
num_units=attention_depth,
memory=tiled_inputs,
memory_sequence_length=tiled_sequence_length)
initial_state = beam_search_decoder.tile_batch(
initial_state, multiplier=beam_width)
cell = attention_wrapper.AttentionWrapper(
cell=cell,
attention_mechanism=attention_mechanism,
attention_layer_size=attention_depth,
alignment_history=with_alignment_history)
cell_state = cell.zero_state(
dtype=dtypes.float32, batch_size=batch_size_tensor * beam_width)
if has_attention:
cell_state = cell_state.clone(cell_state=initial_state)
bsd = beam_search_decoder.BeamSearchDecoder(
cell=cell,
embedding=embedding,
start_tokens=array_ops.fill([batch_size_tensor], start_token),
end_token=end_token,
initial_state=cell_state,
beam_width=beam_width,
output_layer=output_layer,
length_penalty_weight=0.0)
final_outputs, final_state, final_sequence_lengths = (
decoder.dynamic_decode(
bsd, output_time_major=time_major, maximum_iterations=max_out))
def _t(shape):
if time_major:
return (shape[1], shape[0]) + shape[2:]
return shape
self.assertTrue(
isinstance(final_outputs,
beam_search_decoder.FinalBeamSearchDecoderOutput))
self.assertTrue(
isinstance(final_state, beam_search_decoder.BeamSearchDecoderState))
beam_search_decoder_output = final_outputs.beam_search_decoder_output
self.assertEqual(
_t((batch_size, None, beam_width)),
tuple(beam_search_decoder_output.scores.get_shape().as_list()))
self.assertEqual(
_t((batch_size, None, beam_width)),
tuple(final_outputs.predicted_ids.get_shape().as_list()))
sess.run(variables.global_variables_initializer())
sess_results = sess.run({
'final_outputs': final_outputs,
'final_state': final_state,
'final_sequence_lengths': final_sequence_lengths
})
max_sequence_length = np.max(sess_results['final_sequence_lengths'])
# A smoke test
self.assertEqual(
_t((batch_size, max_sequence_length, beam_width)),
sess_results['final_outputs'].beam_search_decoder_output.scores.shape)
self.assertEqual(
_t((batch_size, max_sequence_length, beam_width)), sess_results[
'final_outputs'].beam_search_decoder_output.predicted_ids.shape)
def testDynamicDecodeRNNBatchMajorNoAttention(self):
self._testDynamicDecodeRNN(time_major=False, has_attention=False)
def testDynamicDecodeRNNBatchMajorYesAttention(self):
self._testDynamicDecodeRNN(time_major=False, has_attention=True)
def testDynamicDecodeRNNBatchMajorYesAttentionWithAlignmentHistory(self):
self._testDynamicDecodeRNN(
time_major=False,
has_attention=True,
with_alignment_history=True)
if __name__ == '__main__':
test.main()
| |
from collections import namedtuple
import numpy as np
import uuid
from . import config, log, Parser
from .hnumpy import StorageNumpy
from .IStorage import IStorage
from .tools import get_istorage_attrs, build_remotely, storage_id_from_name, basic_types, \
valid_types
class StorageObj(IStorage):
args_names = ["name", "tokens", "storage_id", "class_name", "built_remotely"]
args = namedtuple('StorageObjArgs', args_names)
_prepared_store_meta = config.session.prepare('INSERT INTO hecuba.istorage'
'(storage_id, class_name, name, tokens) '
' VALUES (?,?,?,?)')
"""
This class is where information will be stored in Hecuba.
The information can be in memory, stored in a python dictionary or local variables, or saved in a
DB(Cassandra), depending on if it's persistent or not.
"""
@staticmethod
def _store_meta(storage_args):
"""
Saves the information of the object in the istorage table.
Args:
storage_args (object): contains all data needed to restore the object from the workers
"""
log.debug("StorageObj: storing media %s", storage_args)
try:
config.session.execute(StorageObj._prepared_store_meta,
[storage_args.storage_id,
storage_args.class_name,
storage_args.name,
storage_args.tokens])
except Exception as ex:
log.warn("Error creating the StorageDict metadata: %s, %s", str(storage_args), ex)
raise ex
@classmethod
def _parse_comments(cls, comments):
parser = Parser("ClassField")
return parser._parse_comments(comments)
def __init__(self, name=None, storage_id=None, *args, **kwargs):
"""
Creates a new storageobj.
Args:
name (string): the name of the Cassandra Keyspace + table where information can be found
tokens (list of tuples): token ranges assigned to the new StorageObj
storage_id (string): an unique storageobj identifier
kwargs: more optional parameters
"""
# Assign private attributes
self._persistent_props = StorageObj._parse_comments(self.__doc__)
self._persistent_attrs = self._persistent_props.keys()
self._class_name = '%s.%s' % (self.__class__.__module__, self.__class__.__name__)
super().__init__(name=name, storage_id=storage_id, *args, **kwargs)
self._table = self.__class__.__name__.lower()
args = self.args(self._get_name(), self._tokens, self.storage_id, self._class_name, self._built_remotely)
self._build_args = args
if name or storage_id: # therefore... are we doing an Instantiation or a Creation? (built_remotely may be used to instantiate a mockup)
try:
data = get_istorage_attrs(self.storage_id)[0]
# Instantiation
except Exception:
self._persist_data(name)
pass # Creation
log.debug("CREATED StorageObj(%s)", self._get_name())
def __eq__(self, other):
return self.__class__ == other.__class__ and self.getID() == other.getID()
def _persist_attributes(self):
"""
Persist in-memory attributes to the data store
"""
for attribute in self._persistent_props.keys():
try:
val = super().__getattribute__(attribute)
setattr(self, attribute, val)
except AttributeError:
pass
def _build_is_attribute(self, attribute, persistence_name, storage_id):
# Build the IStorage obj
info = {"tokens": self._tokens, "storage_id": storage_id}
info.update(self._persistent_props[attribute])
info["built_remotely"] = self._built_remotely
info['name'] = persistence_name
return build_remotely(info)
def _create_tables(self):
"""
Setups the python structures used to communicate with the backend.
Creates the necessary tables on the backend to store the object data.
"""
log.info("CREATING KEYSPACE AND TABLE %s %s",self._ksp, self._table)
query_keyspace = "CREATE KEYSPACE IF NOT EXISTS %s WITH replication = %s" % (self._ksp, config.replication)
config.executelocked(query_keyspace)
query_simple = 'CREATE TABLE IF NOT EXISTS ' + self._ksp + '.' + self._table + \
'( storage_id uuid PRIMARY KEY, '
for key, entry in self._persistent_props.items():
query_simple += str(key) + ' '
if entry['type'] != 'dict' and entry['type'] in valid_types:
if entry['type'] == 'list' or entry['type'] == 'tuple':
query_simple += entry['type'] + '<' + entry['columns'] + '>, '
else:
query_simple += entry['type'] + ', '
else:
query_simple += 'uuid, '
try:
config.executelocked(query_simple[:-2] + ' )')
except Exception as ir:
log.error("Unable to execute %s", query_simple)
raise ir
def _flush_to_storage(self):
super()._flush_to_storage()
for attr_name in self._persistent_attrs:
attr = getattr(super(), attr_name, None)
if isinstance(attr, IStorage):
attr._flush_to_storage()
def _persist_data(self, name):
self._table = self.__class__.__name__.lower()
# Arguments used to build objects remotely
self._build_args = self.args(self._get_name(),
self._tokens,
self.storage_id,
self._class_name,
self._built_remotely)
# If never existed, must create the tables and register
if not self._built_remotely:
self._create_tables()
# Iterate over the objects the user has requested to be persistent
# retrieve them from memory and make them persistent
self._persist_attributes()
StorageObj._store_meta(self._build_args)
def make_persistent(self, name):
"""
Once a StorageObj has been created, it can be made persistent. This function retrieves the information about
the Object class schema, and creates a Cassandra table with those parameters, where information will be
saved from now on, until execution finishes or StorageObj is no longer persistent.
It also inserts into the new table all information that was in memory assigned to the StorageObj prior to
this call.
Args:
name (string): name with which the table in the DB will be created
"""
# Update name
super().make_persistent(name)
self._persist_data(name)
def stop_persistent(self):
"""
The StorageObj stops being persistent, but keeps the information already stored in Cassandra
"""
log.debug("STOP PERSISTENT")
for obj_name in self._persistent_attrs:
try:
attr = object.__getattribute__(self, obj_name)
except AttributeError:
attr = None
if isinstance(attr, IStorage):
attr.stop_persistent()
super().stop_persistent()
self.storage_id = None
def delete_persistent(self):
"""
Deletes the Cassandra table where the persistent StorageObj stores data
"""
log.debug("DELETE PERSISTENT: %s", self._table)
for obj_name in self._persistent_attrs:
attr = getattr(self, obj_name, None)
if isinstance(attr, IStorage):
attr.delete_persistent()
# TODO Drop table _ksp._table if it just contains a single element (non-perfomant :(
query = "DELETE FROM {}.{} where storage_id={}".format(self._ksp, self._table, self.storage_id)
config.session.execute(query)
query = "DELETE FROM hecuba.istorage where storage_id={}".format(self.storage_id)
config.session.execute(query)
super().delete_persistent()
self.storage_id = None
def __getattr__(self, attribute):
"""
Given an attribute, this function returns the value, obtaining it from either:
a) memory
b) the Database
Args:
attribute: name of the value that we want to obtain
Returns:
value: obtained value
"""
if attribute.startswith('_') or attribute not in self._persistent_attrs:
return super().__getattribute__(attribute)
is_istorage_attr = self._persistent_props[attribute]["type"] not in basic_types
if not self.storage_id:
if is_istorage_attr:
value = self._build_is_attribute(attribute, persistence_name=None, storage_id=None)
super().__setattr__(attribute, value)
return super().__getattribute__(attribute)
'''
StorageObj is persistent.
If the attribute is not a built-in object, we might have it in memory.
Since python works using references any modification from another reference will affect this attribute,
which is the expected behaviour. Therefore, is safe to store in-memory the Hecuba objects.
'''
try:
return super().__getattribute__(attribute)
except AttributeError as ex:
# Not present in memory, we will need to rebuild it
pass
query = "SELECT %s FROM %s.%s WHERE storage_id = %s;" % (attribute, self._ksp, self._table, self.storage_id)
log.debug("GETATTR: %s", query)
try:
result = config.session.execute(query)
except Exception as ex:
log.warn("GETATTR ex %s", ex)
raise ex
try:
value = result[0][0]
# if exists but is set to None, the current behaviour is raising AttributeError
if not is_istorage_attr and value is None:
raise AttributeError('value not found')
except IndexError as ex:
if not is_istorage_attr:
raise AttributeError('value not found')
value = None
except TypeError as ex:
log.warn("ERROR ON QUERY RESULT {}".format(str(result)))
raise ex
if is_istorage_attr:
# Value is uuid or None, because it was not found
attr_name = None
if value is None:
# Value not found, persist it BY NAME using a random name so we can retrieve it later
attr_name = attribute.lower()
my_name = self._get_name()
trailing_name = my_name[my_name.rfind('.') + 1:]
number = uuid.uuid4() # Random value
attr_name = self._ksp + "." + ("O" + str(number).replace('-','_') + trailing_name + attr_name)[:40]
value = self._build_is_attribute(attribute, persistence_name=attr_name, storage_id=None)
# Following lines emulate "self.__setattr__(attribute, value)" without the checks
values = [self.storage_id, value.storage_id]
query = "INSERT INTO %s.%s (storage_id,%s)" % (self._ksp, self._table, attribute)
query += " VALUES (%s,%s)"
log.debug("SETATTR: " + query)
config.session.execute(query, values)
else :
value = self._build_is_attribute(attribute, persistence_name=attr_name, storage_id=value)
super().__setattr__(attribute, value)
return value
def __setattr__(self, attribute, value):
"""
Given a key and its value, this function saves it (depending on if it's persistent or not):
a) In memory
b) In the DB
Args:
attribute: name of the value that we want to set
value: value that we want to save
"""
if attribute[0] == '_' or attribute not in self._persistent_attrs:
super().__setattr__(attribute, value)
return
# Transform numpy.ndarrays and python dicts to StorageNumpy and StorageDicts
if not isinstance(value, IStorage):
if isinstance(value, np.ndarray):
value = StorageNumpy(value)
elif isinstance(value, dict):
per_dict = self._persistent_props[attribute]
info = {"name": '', "tokens": self._build_args.tokens, "storage_id": None,
"built_remotely": self._built_remotely}
info.update(per_dict)
new_value = build_remotely(info)
new_value.update(value)
value = new_value
if self.storage_id:
# Write attribute to the storage
if isinstance(value, IStorage):
if not value.storage_id:
# Value is volatile, persist it BY NAME using a random name so we can retrieve it later
attr_name = attribute.lower()
my_name = self._get_name()
trailing_name = my_name[my_name.rfind('.') + 1:]
number = uuid.uuid4() # Random value
name = self._ksp + "." + ("O" + str(number).replace('-','_') + trailing_name + attr_name)[:40]
value.make_persistent(name) # Persist BY NAME
# We store the storage_id when the object belongs to an Hecuba class
values = [self.storage_id, value.storage_id]
# We store the IStorage object in memory, to avoid rebuilding when it is not necessary
else:
values = [self.storage_id, value]
query = "INSERT INTO %s.%s (storage_id,%s)" % (self._ksp, self._table, attribute)
query += " VALUES (%s,%s)"
log.debug("SETATTR: " + query)
config.session.execute(query, values)
# We store all the attributes in memory
super().__setattr__(attribute, value)
def __delattr__(self, name):
"""
Method that deletes a given attribute from a StorageObj
Args:
item: the name of the attribute to be deleted
"""
super().__delattr__(name)
if self.storage_id and name in self._persistent_attrs:
query = "UPDATE %s.%s SET %s = null WHERE storage_id = %s" % (
self._ksp, self._table, name, self.storage_id)
config.session.execute(query)
def sync(self):
"""
Wait until all pending stores to Cassandra have been finished.
"""
if not self.storage_id:
return
# Persistent Object
for attribute in self._persistent_props.keys():
try:
val = super().__getattribute__(attribute)
if isinstance(val, IStorage):
log.debug("StorageObj sync: %s.%s of type %s", self.storage_id, attribute,type(val))
val.sync()
except AttributeError as ex:
# Not present in memory
pass
| |
#!/usr/bin/python
#
# Author: Jarkko Vatjus-Anttila <jvatjusanttila@gmail.com>
#
# For conditions of distribution and use, see copyright notice in license.txt
#
import sys
import random
import TXMLOutput
class WorldGenerator():
""" class WorldGenerator():
- A baseclass for creating 3D worlds based on realXtend project entity component
model.
"""
def __init__(self):
self.cPatchSize = 16
self.cPatchSize = 16
self.TXML = TXMLOutput.TXMLOutput()
###########################################################################
# File I/O
#
def toFile(self, filename, overwrite=False):
self.TXML.toFile(filename, overwrite)
###########################################################################
# Internal attribute creation methods
#
def __pushAttributeDictionary(self, name, d, parameters):
for attr, value in d.items():
try: self.TXML.createAttribute(attr, parameters[attr])
except KeyError:
self.TXML.createAttribute(attr, d[attr])
except TypeError:
print "Error: (%s) Paramlist not a dictionary type." % name
return
self.__doubleCheckAttributes(name, d, parameters)
def __doubleCheckAttributes(self, name, d, p):
for attr, value in p.items():
try: t = d[attr]
except KeyError:
print "Warning, (%s) requesting attribute %s which is not supported by component" % (name, attr)
#if attr[0].lower() == attr[0]:
# print "Warning, (%s) attribute %s not capitalized. It should be: %s" % (name, attr, attr[0].upper()+attr[1:])
###########################################################################
# Attribute creation, component and entity start and stop signals
# - EC_RidigBody
# - EC_Name
# - EC_Placeable
# - EC_Mesh
# - EC_Light
# - EC_Script
# - EC_InputMapper
# - EC_OgreCompositor
# - EC_Material
# - EC_Terrain
# - EC_WaterPlane
# - EC_EnvironmentLight
# - EC_Sky
# - EC_SkyX
# - EC_HydraX
#
def createComponent_Rigidbody(self, sync, parameters={}):
d = { "Mass" :"0",
"Shape type" :"0",
"Size" :"1.0 1.0 1.0",
"Collision mesh ref" :"",
"Friction" :"0.5",
"Restitution" :"0",
"Linear damping" :"0",
"Angular damping" :"0",
"Linear factor" :"1.0 1.0 1.0",
"Angular factor" :"1.0 1.0 1.0",
"Kinematic" :"false",
"Phantom" :"false",
"Draw debug" :"false",
"Linear velocity" :"0.0 0.0 0.0",
"Angular velocity" :"0.0 0.0 0.0",
"Collision layer" :"-1",
"Collision mask" :"-1" }
self.TXML.startComponent("EC_RigidBody", str(sync))
self.__pushAttributeDictionary("EC_RidigBody", d, parameters)
self.TXML.endComponent()
def createComponent_Name(self, sync, parameters={}):
d = { "name" :"",
"description" :"" }
self.TXML.startComponent("EC_Name", str(sync))
self.__pushAttributeDictionary("EC_Name", d, parameters)
self.TXML.endComponent()
def createComponent_Placeable(self, sync, parameters={}):
d = { "Transform" :"0,0,0,0,0,0,1,1,1",
"Show bounding box" :"false",
"Visible" :"true",
"Selection layer" :"1",
"Parent entity ref" :"",
"Parent bone name" :"" }
self.TXML.startComponent("EC_Placeable", str(sync))
self.__pushAttributeDictionary("EC_Name", d, parameters)
self.TXML.endComponent()
def createComponent_Mesh(self, sync, parameters={}):
d = { "Transform" :"0,0,0,0,0,0,1,1,1",
"Mesh ref" :"",
"Skeleton ref" :"",
"Mesh materials" :"",
"Draw distance" :"0",
"Cast shadows" :"true" }
self.TXML.startComponent("EC_Mesh", str(sync))
self.__pushAttributeDictionary("EC_Mesh", d, parameters)
self.TXML.endComponent()
def createComponent_Light(self, sync, parameters={}):
d = { "lighttype" :"",
"diffuse color" :"1.0 1.0 1.0",
"specular color" :"0.0 0.0 0.0",
"cast shadows" :"false",
"light range" :"25.0",
"brightness" :"1.0",
"constant atten" :"0.0",
"linear atten" :"0.01",
"quadratic atten" :"0.01",
"light inner angle" :"30.0",
"lilght outer angle" :"40.0" }
self.TXML.startComponent("EC_Light", str(sync))
self.__pushAttributeDictionary("EC_Light", d, parameters)
self.TXML.endComponent()
def createComponent_Script(self, sync, parameters={}):
d = { "Script ref" :"",
"Run on load" :"false",
"Run mode" :"0",
"Script application name" :"",
"Script class name" :"" }
self.TXML.startComponent("EC_Script", str(sync))
self.__pushAttributeDictionary("EC_Script", d, parameters)
self.TXML.endComponent()
def createComponent_Inputmapper(self, sync, parameters={}):
d = { "Input context name" :"EC_InputMapper",
"Input context priority" :"90",
"Take keyboard events over QT" :"false",
"Take mouse events over QT" :"false",
"Action execution type" :"1",
"Key modifiers enable" :"true",
"Enable actions" :"true",
"Trigger on keyrepeats" :"true",
"Suppress used keybaord events" :"false",
"Suppress used mouse events" :"false" }
self.TXML.startComponent("EC_InputMapper", str(sync))
self.__pushAttributeDictionary("EC_InputMapper", d, parameters)
self.TXML.endComponent()
def createComponent_Ogrecompositor(self, sync, parameters={}):
d = { "Enabled" :"false",
"Compositor ref" :"HDR",
"Priority" :"-1",
"Parameters" :"" }
self.TXML.startComponent("EC_OgreCompositor", str(sync))
self.__pushAttributeDictionary("EC_OgreCompositor", d, parameters)
self.TXML.endComponent()
def createComponent_Material(self, sync, parameters={}):
d = { "Parameters" :"",
"Output material" :"",
"Input material" :"" }
self.TXML.startComponent("EC_Material", str(sync))
self.__pushAttributeDictionary("EC_Material", d, parameters)
self.TXML.endComponent()
def createComponent_Terrain(self, sync, parameters={}):
d = { "Transform" :"0,0,0,0,0,0,1,1,1",
"Grid Width" :"1",
"Grid Height" :"1",
"Material" :"local://RexTerrainPCF.material",
"Heightmap" :"",
"Tex. U scale" :"0.13",
"Tex. V scale" :"0.13" }
self.TXML.startComponent("EC_Terrain", str(sync))
self.__pushAttributeDictionary("EC_Terrain", d, parameters)
self.TXML.endComponent()
def createComponent_Waterplane(self, sync, parameters={}):
d = { "x-size" :"5000",
"y-size" :"5000",
"depth" :"20",
"Position" :"0 0 0",
"Rotation" :"1 0 0 0",
"U factor" :"0.0002f",
"V factor" :"0.0002f",
"Segments in x" :"10",
"Segments in y" :"10",
"Material" :"Ocean",
"Material ref" :"",
"Fog color" :"0.2 0.4 0.35 1.0",
"Fog start dist." :"100.0f",
"Fog end dist." :"2000.0f",
"Fog mode" :"3",
"Fog exponential density" :"0.001f" }
self.TXML.startComponent("EC_WaterPlane", str(sync))
self.__pushAttributeDictionary("EC_WaterPlane", d, parameters)
self.TXML.endComponent()
def createComponent_Environmentlight(self, sync, parameters={}):
d = { "Sunlight color" :"0.63 0.63 0.63 1.0",
"Ambient light color" :"0.36 0.36 0.36 1.0",
"Sunlight diffuse color" :"0.93 0.93 0.93 1.0",
"Sunlight direction vector" :"-1.0 -1.0 -1.0",
"Sunlight cast shadows" :"true" }
self.TXML.startComponent("EC_EnvironmentLight", str(sync))
self.__pushAttributeDictionary("EC_EnvironmentLight", d, parameters)
self.TXML.endComponent()
def createComponent_Sky(self, sync, parameters={}):
d = { "Material" :"RexSkyBox",
"Texture" :"rex_sky_front.dds;rex_sky_back.dds;rex_sky_left.dds;rex_sky_right.dds;rex_sky_top.dds;rex_sky_bot.dds",
"Distance" :"50",
"Orientation" :"0.0 0.0 0.0 1.0",
"Draw first" :"true" }
self.TXML.startComponent("EC_Sky", str(sync))
self.__pushAttributeDictionary("EC_Sky", d, parameters)
self.TXML.endComponent()
def createComponent_SkyX(self, sync, parameters={}):
d = { "Cloud type" :"1",
"Time multiplier" :"0.25",
"Time [0-24]" :"16.978079",
"Time sunrise [0-24]" :"7.5",
"Time sunset [0-24]" :"20.5",
"Cloud coverage [0-100]" :"50",
"Cloud average size [0-100]" :"50",
"Cloud height" :"100",
"Moon phase [0-100]" :"70.7420731",
"Sun inner radius" :"9.75",
"Sun outer radius" :"10.25",
"Wind direction" :"0",
"Wind speed" :"5" }
self.TXML.startComponent("EC_SkyX", str(sync))
self.__pushAttributeDictionary("EC_SkyX", d, parameters)
self.TXML.endComponent()
def createComponent_HydraX(self, sync, parameters={}):
d = { "Config ref" :"HydraxDefault.hdx",
"Visible" :"true",
"Position" :"0.0 7.0 0.0" }
self.TXML.startComponent("EC_Hydrax", str(sync))
self.__pushAttributeDictionary("EC_Hydrax", d, parameters)
self.TXML.endComponent()
###########################################################################
# Dynamic component creation
#
def createComponent_Dynamiccomponent(self, name, variables):
self.TXML.startComponent("EC_DynamicComponent", sync="1", name=name)
for var in variables:
self.TXML.createDynamicAttribute(var[0], var[1], var[2])
self.TXML.endComponent()
###########################################################################
# Entity creation macros. These macros create certain, often needed entities.
# Parametrization is minimal, and if the defaults are not suitable for the
# application requirement then use the component creation methods instead
#
# - Terrain
# - Static mesh
# - Water plane
# - Simple Sky (Environmentlight + Sky)
#
def createEntity_Terrain(self, sync, name, transform="0,0,0,0,0,0,1,1,1", width=1, height=1, material="", heightmap=""):
self.TXML.startEntity()
self.createComponent_Name(sync, { "name":str(name) } )
self.createComponent_Terrain(sync, { "Grid Width" :str(width),
"Grid Height" :str(height),
"Material" :str(material),
"Heightmap" :str(heightmap) } )
self.createComponent_Rigidbody(sync, { "Shape type" :"5" } )
self.createComponent_Placeable(sync, { "Transform" :str(transform) } )
self.TXML.endEntity()
def createEntity_Staticmesh(self, sync, name, transform="0,0,0,0,0,0,1,1,1", mesh="", material="", skeleton=""):
self.TXML.startEntity()
self.createComponent_Name(sync, { "name" :str(name) } )
self.createComponent_Mesh(sync, { "Mesh ref" :str(mesh),
"Skeleton ref" :str(skeleton),
"Mesh materials" :str(material) } )
self.createComponent_Placeable(sync, { "Transform" :str(transform) } )
self.createComponent_Rigidbody(sync, { "Shape type" :"4",
"Collision mesh ref":str(mesh) } )
self.TXML.endEntity()
def createEntity_Waterplane(self, sync, name, width, height, level):
self.TXML.startEntity()
self.createComponent_Name(sync, { "name" :str(name) } )
self.createComponent_Placeable(sync, { "Transform" :"0,%f,0,0,0,0,1,1,1" % float(level) } )
self.createComponent_Waterplane(sync, { "x-size" :str(width),
"y-size" :str(height) } )
self.TXML.endEntity()
def createEntity_Avatar(self, sync, name, script):
self.TXML.startEntity()
self.createComponent_Name(sync, { "name" :str(name) } )
self.createComponent_Script(sync, { "Script ref" :str(script),
"Run on load" :"true",
"Script application name":"AvatarApp" } )
self.TXML.endEntity()
def createEntity_SimpleSky(self, sync, name, texture="rex_sky_front.dds;rex_sky_back.dds;rex_sky_left.dds;rex_sky_right.dds;rex_sky_top.dds;rex_sky_bot.dds"):
self.TXML.startEntity()
self.createComponent_Name(sync, { "name":str(name) } )
self.createComponent_Environmentlight(sync)
self.createComponent_Sky(sync, {"Texture":texture})
self.TXML.endEntity()
###########################################################################
# Unit test case for demonstration of the WorldGenerator
#
if __name__ == "__main__": # if run standalone
import MeshGenerator
import MeshIO
import MeshContainer
import TerrainGenerator
import TextureGenerator
# By a default test case we shall run an embedded world generator script to create a simple
# world with terrain and a few objects in it. Feel free to rewrite the generator using
# world generator primitives, by yourself.
world = WorldGenerator()
terrain = TerrainGenerator.TerrainGenerator()
texture = TextureGenerator.TextureGenerator()
width = 32
height = 32
assetdir = "./resources/"
world.TXML.startScene()
print "Generating grass texture..."
texture.createSingleColorTexture(30,100,30,50)
texture.toImage("./resources/generated_grass.png", "PNG", overwrite=True)
print "Generating stone texture..."
texture.createSingleColorTexture(90,83,73,50)
texture.toImage("./resources/generated_stone.png", "PNG", overwrite=True)
print "Generating sand texture..."
texture.createSingleColorTexture(160,136,88,70)
texture.toImage("./resources/generated_sand.png", "PNG", overwrite=True)
print "Generating a terrain..."
terrain.fromDiamondsquare(width, 10, -5, -5, 10)
terrain.rescale(-20, 50)
terrain.saturate(-5)
terrain.toWeightmap(assetdir + "terrainsurfacemap.png", overwrite=True)
terrain.toFile(assetdir + "terrain.ntf", overwrite=True)
world.createEntity_Terrain(1, "terrain", transform="%d,0,%d,0,0,0,1,1,1"%(-width*8, -height*8), width=width, height=height, material="terrainsample.material", heightmap="terrain.ntf")
world.createEntity_Waterplane(1, "waterplane", width*world.cPatchSize, height*world.cPatchSize, -1)
world.createEntity_Avatar(1, "AvatarApp", "avatarapplication.js;simpleavatar.js;exampleavataraddon.js")
world.createEntity_SimpleSky(1, "SimpleSky")
print ("Generating a group of meshes to the world...")
mesh = MeshContainer.MeshContainer()
meshgen = MeshGenerator.MeshGenerator(mesh)
meshgen.createPlane(LOD=5)
meshio = MeshIO.MeshIO(mesh)
meshio.toFile(assetdir + "plane.mesh.xml", overwrite=True)
for i in range(20):
x = random.randint(0, width*world.cPatchSize)
y = random.randint(0, height*world.cPatchSize)
z = terrain.getHeight(x,y)
x = x - width*world.cPatchSize/2
y = y - height*world.cPatchSize/2
if (z > 2.0) and (z < terrain.getMaxitem()/2.0):
world.createEntity_Staticmesh(1, "Tree"+str(world.TXML.getCurrentEntityID()),
mesh="plane.mesh",
material="",
transform="%f,%f,%f,0,0,0,1,1,1" % (y, x, z))
world.TXML.endScene()
world.toFile("./testworld.txml", overwrite=True)
| |
#!/usr/bin/env python3
import argparse
import csv
import os
import pathlib
from collections import OrderedDict
from datetime import datetime, timedelta
from time import sleep
import yaml
import sqlalchemy as sqa
import serial
from serial.tools import list_ports
import logging
logging.basicConfig()
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
FIELDS = OrderedDict([
('time', 'time'),
('air_temperature', 'Ta'),
('rel_humidity', 'Ua'),
('air_pressure', 'Pa'),
('wind_speed_avg', 'Sm'),
('wind_speed_min', 'Sn'),
('wind_speed_max', 'Sx'),
('wind_dir_avg', 'Dm'),
('wind_dir_min', 'Dn'),
('wind_dir_max', 'Dx'),
('rain_accumulation', 'Rc'),
('rain_duration', 'Rd'),
('rain_intensity', 'Ri'),
('rain_peak_intensity', 'Rp'),
# ('hail_accumulation', 'Hc'),
# ('hail_duration', 'Hd'),
# ('hail_intensity', 'Hi'),
# ('hail_peak_intensity', 'Hp'),
# ('heating_voltage', 'Vh'),
# ('ref_voltage', 'Vr'),
# ('supply_voltage', 'Vs'),
('heating_temperature', 'Th'),
# ('internal_temperature', 'Tp'),
# ('information', 'Id'),
])
reverse_FIELDS = {v: k for k, v in FIELDS.items()}
def append_csv_row(path, data, default='NaN'):
"""Append a row to a CSV file. If the file does not exist already, also write the header."""
new_file = not path.exists()
f = path.open(mode='a', newline='')
writer = csv.DictWriter(f, FIELDS.keys(), restval=default)
if new_file:
logger.info('Created new file '+str(path))
writer.writeheader()
writer.writerow(data)
def delete_log_files_if_needed(log_dir, max_files):
path = pathlib.Path(log_dir)
files = sorted(list(path.glob('meteo_????-??-??.csv')))
if len(files) > max_files:
logger.info('Too many log files. Deleting oldest.')
old = files[0:len(files)-max_files]
for p in old:
p.unlink()
def convert_unit(key, value, unit, default=None):
"""Convert units to hPa, degC, mm and mm/h."""
def identity(v):
return v
dispatcher = dict()
# Speed
dispatcher['S'] = {
'M': identity, # m/s
'K': lambda v: 1000/3600 * v, # km/h
'S': lambda v: 0.44704 * v, # mph
'N': lambda v: 0.514444 * v, # knots
}
# Pressure
dispatcher['P'] = {
'H': identity, # hPa
'P': lambda v: v / 100, # Pa
'B': lambda v: v * 1000, # bar
'M': lambda v: v * 1.33322, # mmHg
'I': lambda v: v * 25.4 * 1.33322, # inHg
}
# Temperature
dispatcher['T'] = {
'C': identity, # Celsius
'F': lambda v: (v - 32) * 5/9
}
# Rain
dispatcher['R'] = {
'M': identity, # mm or mm/h
's': identity, # seconds
'I': lambda v: 52.4 * v, # in or in/h
}
if unit == '#':
return default
else:
conversion_fuc = dispatcher.get(key[0], {unit: identity})[unit]
return conversion_fuc(value)
def parse_line(line):
"""Parse a data message from the meteo station."""
parts = line.split(',')
msg_type = parts.pop(0)
data = dict()
for p in parts:
key, payload = p.split('=')
value = payload[:-1]
unit = payload[-1]
data[key] = convert_unit(key, value, unit, default='NaN')
data_row = {reverse_FIELDS[k]: v for k, v in data.items() if k in reverse_FIELDS}
return msg_type, data_row
def parse_settings(line):
"""Parse a data message from the meteo station."""
parts = line.split(',')
msg_type = parts.pop(0)
data = dict()
for p in parts:
key, value = p.split('=')
data[key] = value
return msg_type, data
class MeteoTerminal(serial.Serial):
"""Simple wraper around pyserial object to send and receive commands from meteo station."""
def __init__(self, name, *args, **kwargs):
default_kwargs = {'baudrate': 19200, 'timeout': 2}
default_kwargs.update(kwargs)
super().__init__(name, *args, **default_kwargs)
self.clear()
def ask(self, s):
self.send(s)
return self.receive()
def clear(self, loud=False):
"""Clear any previous incomplete input"""
line = self.receive()
while line:
if loud:
logger.warning('Unexpected response: ' + line)
line = self.receive()
self.send('?')
self.readline()
def send(self, s):
self.write((s + '\r\n').encode('utf-8'))
self.flush()
def receive(self):
bs = self.readline()
if bs:
return bs.decode('utf-8').strip()
else:
return ''
def setup(self, settings):
for line in settings:
cmd, expected = parse_settings(line)
cmd, current = parse_settings(self.ask(cmd))
current = {k: v for k, v in current.items() if k in expected}
if current != expected:
answer = self.ask(line)
logger.info('Setup "{}", answer "{}".'.format(line, answer))
self.clear(loud=True)
else:
logger.info('Setup "{}" already ok.'.format(line))
@staticmethod
def find_station():
ports = list_ports.comports()
found = None
for name, desc, hw in ports:
try:
logger.debug('Try ' + name)
with MeteoTerminal(name) as ser:
answer = ser.ask('0')
if answer == '0':
logger.debug('OK: '+name)
found = name
break
except Exception:
pass
logger.info('Found meteo station: {}'.format(found))
return found
def create_db_table(conn, table):
logger.info('Create table {} if not exists.'.format(table))
if conn.dialect.name == 'mysql':
columns = [sqa.Column('time', sqa.dialects.mysql.DATETIME(fsp=6), primary_key=True), ]
else:
columns = [sqa.Column('time', sqa.types.DateTime(), primary_key=True), ]
columns += [sqa.Column(name, sqa.types.Float()) for name in FIELDS if name != 'time']
meta = sqa.MetaData()
table = sqa.Table(table, meta, *columns)
table.create(conn, checkfirst=True)
return table
def meteo_logger(config):
if config['serial'] == 'auto':
port = MeteoTerminal.find_station()
else:
port = config['serial']
if port is None:
logger.error('No meteo station found. Specify port in config file.')
exit(1)
db_engine = None
db_table = None
if config['database']['use_database']:
try:
db_engine = sqa.create_engine(config['database']['url'])
with db_engine.connect() as conn:
db_table = create_db_table(conn, config['database']['table'])
except Exception as e:
logger.error('While setting up database: ' + str(e))
exit(1)
output_dir = config['target']
interval = config['interval']
with MeteoTerminal(port, baudrate=config['baudrate']) as term:
term.setup(config['setup'])
logger.info('Will now take action every {} s.'.format(interval))
while True:
try:
now = datetime.utcnow()
# Poll and store measurement
msg = term.ask('0R0')
msg_type, data = parse_line(msg)
data['time'] = now.isoformat() + 'Z'
# Write csv
day = now.date()
path = pathlib.Path(output_dir) / str(now.year) / ('meteo_' + str(day) + '.csv')
try:
path.parent.mkdir(parents=True)
except:
pass
append_csv_row(path, data)
# Store to database
if db_engine is not None:
with db_engine.connect() as conn:
conn.execute(db_table.insert(), **data)
if (now + timedelta(seconds=interval)).day > now.day:
# Reset counters
# next measurement will be in next day, so we reset now
logger.info('Reset precipitation counters.')
term.ask('0XZRU') # Precipitation counter reset
term.ask('0XZRI') # Precipitation intensity reset
# Housekeeping
delete_log_files_if_needed(output_dir, config['max_files'])
# Time
if datetime.utcnow() - now >= timedelta(seconds=interval):
logger.warning('Loop took longer than interval. Working as fast as possible.')
while datetime.utcnow() - now < timedelta(seconds=min(interval-2, 0)):
sleep(1)
while datetime.utcnow() - now < timedelta(seconds=interval):
pass # busy loop
except KeyboardInterrupt:
logger.info('Terminated by user.')
exit(0)
except Exception as e:
logger.warning('Exception in main loop: ' + str(e))
def main():
with open('/etc/meteo.yml', 'r') as f:
config = yaml.load(f)
meteo_logger(config)
if __name__ == '__main__':
main()
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Estimator for Dynamic RNNs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import layers
from tensorflow.contrib import rnn as contrib_rnn
from tensorflow.contrib.framework.python.framework import deprecated
from tensorflow.contrib.layers.python.layers import optimizers
from tensorflow.contrib.learn.python.learn.estimators import constants
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators import prediction_key
from tensorflow.contrib.learn.python.learn.estimators import rnn_common
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import rnn
from tensorflow.python.training import momentum as momentum_opt
from tensorflow.python.util import nest
# TODO(jtbates): Remove PredictionType when all non-experimental targets which
# depend on it point to rnn_common.PredictionType.
class PredictionType(object):
SINGLE_VALUE = 1
MULTIPLE_VALUE = 2
def _get_state_name(i):
"""Constructs the name string for state component `i`."""
return '{}_{}'.format(rnn_common.RNNKeys.STATE_PREFIX, i)
def state_tuple_to_dict(state):
"""Returns a dict containing flattened `state`.
Args:
state: A `Tensor` or a nested tuple of `Tensors`. All of the `Tensor`s must
have the same rank and agree on all dimensions except the last.
Returns:
A dict containing the `Tensor`s that make up `state`. The keys of the dict
are of the form "STATE_PREFIX_i" where `i` is the place of this `Tensor`
in a depth-first traversal of `state`.
"""
with ops.name_scope('state_tuple_to_dict'):
flat_state = nest.flatten(state)
state_dict = {}
for i, state_component in enumerate(flat_state):
state_name = _get_state_name(i)
state_value = (None if state_component is None
else array_ops.identity(state_component, name=state_name))
state_dict[state_name] = state_value
return state_dict
def dict_to_state_tuple(input_dict, cell):
"""Reconstructs nested `state` from a dict containing state `Tensor`s.
Args:
input_dict: A dict of `Tensor`s.
cell: An instance of `RNNCell`.
Returns:
If `input_dict` does not contain keys 'STATE_PREFIX_i' for `0 <= i < n`
where `n` is the number of nested entries in `cell.state_size`, this
function returns `None`. Otherwise, returns a `Tensor` if `cell.state_size`
is an `int` or a nested tuple of `Tensor`s if `cell.state_size` is a nested
tuple.
Raises:
ValueError: State is partially specified. The `input_dict` must contain
values for all state components or none at all.
"""
flat_state_sizes = nest.flatten(cell.state_size)
state_tensors = []
with ops.name_scope('dict_to_state_tuple'):
for i, state_size in enumerate(flat_state_sizes):
state_name = _get_state_name(i)
state_tensor = input_dict.get(state_name)
if state_tensor is not None:
rank_check = check_ops.assert_rank(
state_tensor, 2, name='check_state_{}_rank'.format(i))
shape_check = check_ops.assert_equal(
array_ops.shape(state_tensor)[1],
state_size,
name='check_state_{}_shape'.format(i))
with ops.control_dependencies([rank_check, shape_check]):
state_tensor = array_ops.identity(state_tensor, name=state_name)
state_tensors.append(state_tensor)
if not state_tensors:
return None
elif len(state_tensors) == len(flat_state_sizes):
dummy_state = cell.zero_state(batch_size=1, dtype=dtypes.bool)
return nest.pack_sequence_as(dummy_state, state_tensors)
else:
raise ValueError(
'RNN state was partially specified.'
'Expected zero or {} state Tensors; got {}'.
format(len(flat_state_sizes), len(state_tensors)))
def _concatenate_context_input(sequence_input, context_input):
"""Replicates `context_input` accross all timesteps of `sequence_input`.
Expands dimension 1 of `context_input` then tiles it `sequence_length` times.
This value is appended to `sequence_input` on dimension 2 and the result is
returned.
Args:
sequence_input: A `Tensor` of dtype `float32` and shape `[batch_size,
padded_length, d0]`.
context_input: A `Tensor` of dtype `float32` and shape `[batch_size, d1]`.
Returns:
A `Tensor` of dtype `float32` and shape `[batch_size, padded_length,
d0 + d1]`.
Raises:
ValueError: If `sequence_input` does not have rank 3 or `context_input` does
not have rank 2.
"""
seq_rank_check = check_ops.assert_rank(
sequence_input,
3,
message='sequence_input must have rank 3',
data=[array_ops.shape(sequence_input)])
seq_type_check = check_ops.assert_type(
sequence_input,
dtypes.float32,
message='sequence_input must have dtype float32; got {}.'.format(
sequence_input.dtype))
ctx_rank_check = check_ops.assert_rank(
context_input,
2,
message='context_input must have rank 2',
data=[array_ops.shape(context_input)])
ctx_type_check = check_ops.assert_type(
context_input,
dtypes.float32,
message='context_input must have dtype float32; got {}.'.format(
context_input.dtype))
with ops.control_dependencies(
[seq_rank_check, seq_type_check, ctx_rank_check, ctx_type_check]):
padded_length = array_ops.shape(sequence_input)[1]
tiled_context_input = array_ops.tile(
array_ops.expand_dims(context_input, 1),
array_ops.concat([[1], [padded_length], [1]], 0))
return array_ops.concat([sequence_input, tiled_context_input], 2)
def build_sequence_input(features,
sequence_feature_columns,
context_feature_columns,
weight_collections=None,
scope=None):
"""Combine sequence and context features into input for an RNN.
Args:
features: A `dict` containing the input and (optionally) sequence length
information and initial state.
sequence_feature_columns: An iterable containing all the feature columns
describing sequence features. All items in the set should be instances
of classes derived from `FeatureColumn`.
context_feature_columns: An iterable containing all the feature columns
describing context features i.e. features that apply accross all time
steps. All items in the set should be instances of classes derived from
`FeatureColumn`.
weight_collections: List of graph collections to which weights are added.
scope: Optional scope, passed through to parsing ops.
Returns:
A `Tensor` of dtype `float32` and shape `[batch_size, padded_length, ?]`.
This will be used as input to an RNN.
"""
sequence_input = layers.sequence_input_from_feature_columns(
columns_to_tensors=features,
feature_columns=sequence_feature_columns,
weight_collections=weight_collections,
scope=scope)
if context_feature_columns is not None:
context_input = layers.input_from_feature_columns(
columns_to_tensors=features,
feature_columns=context_feature_columns,
weight_collections=weight_collections,
scope=scope)
sequence_input = _concatenate_context_input(sequence_input, context_input)
return sequence_input
def construct_rnn(initial_state,
sequence_input,
cell,
num_label_columns,
dtype=dtypes.float32,
parallel_iterations=32,
swap_memory=True):
"""Build an RNN and apply a fully connected layer to get the desired output.
Args:
initial_state: The initial state to pass the RNN. If `None`, the
default starting state for `self._cell` is used.
sequence_input: A `Tensor` with shape `[batch_size, padded_length, d]`
that will be passed as input to the RNN.
cell: An initialized `RNNCell`.
num_label_columns: The desired output dimension.
dtype: dtype of `cell`.
parallel_iterations: Number of iterations to run in parallel. Values >> 1
use more memory but take less time, while smaller values use less memory
but computations take longer.
swap_memory: Transparently swap the tensors produced in forward inference
but needed for back prop from GPU to CPU. This allows training RNNs
which would typically not fit on a single GPU, with very minimal (or no)
performance penalty.
Returns:
activations: The output of the RNN, projected to `num_label_columns`
dimensions.
final_state: A `Tensor` or nested tuple of `Tensor`s representing the final
state output by the RNN.
"""
with ops.name_scope('RNN'):
rnn_outputs, final_state = rnn.dynamic_rnn(
cell=cell,
inputs=sequence_input,
initial_state=initial_state,
dtype=dtype,
parallel_iterations=parallel_iterations,
swap_memory=swap_memory,
time_major=False)
activations = layers.fully_connected(
inputs=rnn_outputs,
num_outputs=num_label_columns,
activation_fn=None,
trainable=True)
return activations, final_state
def _single_value_predictions(activations,
sequence_length,
target_column,
problem_type,
predict_probabilities):
"""Maps `activations` from the RNN to predictions for single value models.
If `predict_probabilities` is `False`, this function returns a `dict`
containing single entry with key `PREDICTIONS_KEY`. If `predict_probabilities`
is `True`, it will contain a second entry with key `PROBABILITIES_KEY`. The
value of this entry is a `Tensor` of probabilities with shape
`[batch_size, num_classes]`.
Args:
activations: Output from an RNN. Should have dtype `float32` and shape
`[batch_size, padded_length, ?]`.
sequence_length: A `Tensor` with shape `[batch_size]` and dtype `int32`
containing the length of each sequence in the batch. If `None`, sequences
are assumed to be unpadded.
target_column: An initialized `TargetColumn`, calculate predictions.
problem_type: Either `ProblemType.CLASSIFICATION` or
`ProblemType.LINEAR_REGRESSION`.
predict_probabilities: A Python boolean, indicating whether probabilities
should be returned. Should only be set to `True` for
classification/logistic regression problems.
Returns:
A `dict` mapping strings to `Tensors`.
"""
with ops.name_scope('SingleValuePrediction'):
last_activations = rnn_common.select_last_activations(
activations, sequence_length)
predictions_name = (prediction_key.PredictionKey.CLASSES
if problem_type == constants.ProblemType.CLASSIFICATION
else prediction_key.PredictionKey.SCORES)
if predict_probabilities:
probabilities = target_column.logits_to_predictions(
last_activations, proba=True)
prediction_dict = {
prediction_key.PredictionKey.PROBABILITIES: probabilities,
predictions_name: math_ops.argmax(probabilities, 1)}
else:
predictions = target_column.logits_to_predictions(
last_activations, proba=False)
prediction_dict = {predictions_name: predictions}
return prediction_dict
def _multi_value_loss(
activations, labels, sequence_length, target_column, features):
"""Maps `activations` from the RNN to loss for multi value models.
Args:
activations: Output from an RNN. Should have dtype `float32` and shape
`[batch_size, padded_length, ?]`.
labels: A `Tensor` with length `[batch_size, padded_length]`.
sequence_length: A `Tensor` with shape `[batch_size]` and dtype `int32`
containing the length of each sequence in the batch. If `None`, sequences
are assumed to be unpadded.
target_column: An initialized `TargetColumn`, calculate predictions.
features: A `dict` containing the input and (optionally) sequence length
information and initial state.
Returns:
A scalar `Tensor` containing the loss.
"""
with ops.name_scope('MultiValueLoss'):
activations_masked, labels_masked = rnn_common.mask_activations_and_labels(
activations, labels, sequence_length)
return target_column.loss(activations_masked, labels_masked, features)
def _single_value_loss(
activations, labels, sequence_length, target_column, features):
"""Maps `activations` from the RNN to loss for multi value models.
Args:
activations: Output from an RNN. Should have dtype `float32` and shape
`[batch_size, padded_length, ?]`.
labels: A `Tensor` with length `[batch_size]`.
sequence_length: A `Tensor` with shape `[batch_size]` and dtype `int32`
containing the length of each sequence in the batch. If `None`, sequences
are assumed to be unpadded.
target_column: An initialized `TargetColumn`, calculate predictions.
features: A `dict` containing the input and (optionally) sequence length
information and initial state.
Returns:
A scalar `Tensor` containing the loss.
"""
with ops.name_scope('SingleValueLoss'):
last_activations = rnn_common.select_last_activations(
activations, sequence_length)
return target_column.loss(last_activations, labels, features)
def _get_output_alternatives(prediction_type,
problem_type,
prediction_dict):
"""Constructs output alternatives dict for `ModelFnOps`.
Args:
prediction_type: either `MULTIPLE_VALUE` or `SINGLE_VALUE`.
problem_type: either `CLASSIFICATION` or `LINEAR_REGRESSION`.
prediction_dict: a dictionary mapping strings to `Tensor`s containing
predictions.
Returns:
`None` or a dictionary mapping a string to an output alternative.
Raises:
ValueError: `prediction_type` is not one of `SINGLE_VALUE` or
`MULTIPLE_VALUE`.
"""
if prediction_type == rnn_common.PredictionType.MULTIPLE_VALUE:
return None
if prediction_type == rnn_common.PredictionType.SINGLE_VALUE:
prediction_dict_no_state = {
k: v
for k, v in prediction_dict.items()
if rnn_common.RNNKeys.STATE_PREFIX not in k
}
return {'dynamic_rnn_output': (problem_type, prediction_dict_no_state)}
raise ValueError('Unrecognized prediction_type: {}'.format(prediction_type))
def _get_dynamic_rnn_model_fn(
cell_type,
num_units,
target_column,
problem_type,
prediction_type,
optimizer,
sequence_feature_columns,
context_feature_columns=None,
predict_probabilities=False,
learning_rate=None,
gradient_clipping_norm=None,
dropout_keep_probabilities=None,
sequence_length_key=rnn_common.RNNKeys.SEQUENCE_LENGTH_KEY,
dtype=dtypes.float32,
parallel_iterations=None,
swap_memory=True,
name='DynamicRNNModel'):
"""Creates an RNN model function for an `Estimator`.
The model function returns an instance of `ModelFnOps`. When
`problem_type == ProblemType.CLASSIFICATION` and
`predict_probabilities == True`, the returned `ModelFnOps` includes an output
alternative containing the classes and their associated probabilities. When
`predict_probabilities == False`, only the classes are included. When
`problem_type == ProblemType.LINEAR_REGRESSION`, the output alternative
contains only the predicted values.
Args:
cell_type: A string, a subclass of `RNNCell` or an instance of an `RNNCell`.
num_units: A single `int` or a list of `int`s. The size of the `RNNCell`s.
target_column: An initialized `TargetColumn`, used to calculate prediction
and loss.
problem_type: `ProblemType.CLASSIFICATION` or
`ProblemType.LINEAR_REGRESSION`.
prediction_type: `PredictionType.SINGLE_VALUE` or
`PredictionType.MULTIPLE_VALUE`.
optimizer: A subclass of `Optimizer`, an instance of an `Optimizer` or a
string.
sequence_feature_columns: An iterable containing all the feature columns
describing sequence features. All items in the set should be instances
of classes derived from `FeatureColumn`.
context_feature_columns: An iterable containing all the feature columns
describing context features, i.e., features that apply accross all time
steps. All items in the set should be instances of classes derived from
`FeatureColumn`.
predict_probabilities: A boolean indicating whether to predict probabilities
for all classes. Must only be used with
`ProblemType.CLASSIFICATION`.
learning_rate: Learning rate used for optimization. This argument has no
effect if `optimizer` is an instance of an `Optimizer`.
gradient_clipping_norm: A float. Gradients will be clipped to this value.
dropout_keep_probabilities: a list of dropout keep probabilities or `None`.
If a list is given, it must have length `len(num_units) + 1`.
sequence_length_key: The key that will be used to look up sequence length in
the `features` dict.
dtype: The dtype of the state and output of the given `cell`.
parallel_iterations: Number of iterations to run in parallel. Values >> 1
use more memory but take less time, while smaller values use less memory
but computations take longer.
swap_memory: Transparently swap the tensors produced in forward inference
but needed for back prop from GPU to CPU. This allows training RNNs
which would typically not fit on a single GPU, with very minimal (or no)
performance penalty.
name: A string that will be used to create a scope for the RNN.
Returns:
A model function to be passed to an `Estimator`.
Raises:
ValueError: `problem_type` is not one of
`ProblemType.LINEAR_REGRESSION` or `ProblemType.CLASSIFICATION`.
ValueError: `prediction_type` is not one of `PredictionType.SINGLE_VALUE`
or `PredictionType.MULTIPLE_VALUE`.
ValueError: `predict_probabilities` is `True` for `problem_type` other
than `ProblemType.CLASSIFICATION`.
ValueError: `len(dropout_keep_probabilities)` is not `len(num_units) + 1`.
"""
if problem_type not in (constants.ProblemType.CLASSIFICATION,
constants.ProblemType.LINEAR_REGRESSION):
raise ValueError(
'problem_type must be ProblemType.LINEAR_REGRESSION or '
'ProblemType.CLASSIFICATION; got {}'.
format(problem_type))
if prediction_type not in (rnn_common.PredictionType.SINGLE_VALUE,
rnn_common.PredictionType.MULTIPLE_VALUE):
raise ValueError(
'prediction_type must be PredictionType.MULTIPLE_VALUEs or '
'PredictionType.SINGLE_VALUE; got {}'.
format(prediction_type))
if (problem_type != constants.ProblemType.CLASSIFICATION
and predict_probabilities):
raise ValueError(
'predict_probabilities can only be set to True for problem_type'
' ProblemType.CLASSIFICATION; got {}.'.format(problem_type))
def _dynamic_rnn_model_fn(features, labels, mode):
"""The model to be passed to an `Estimator`."""
with ops.name_scope(name):
sequence_length = features.get(sequence_length_key)
sequence_input = build_sequence_input(features,
sequence_feature_columns,
context_feature_columns)
dropout = (dropout_keep_probabilities
if mode == model_fn.ModeKeys.TRAIN
else None)
# This class promises to use the cell type selected by that function.
cell = rnn_common.construct_rnn_cell(num_units, cell_type, dropout)
initial_state = dict_to_state_tuple(features, cell)
rnn_activations, final_state = construct_rnn(
initial_state,
sequence_input,
cell,
target_column.num_label_columns,
dtype=dtype,
parallel_iterations=parallel_iterations,
swap_memory=swap_memory)
loss = None # Created below for modes TRAIN and EVAL.
if prediction_type == rnn_common.PredictionType.MULTIPLE_VALUE:
prediction_dict = rnn_common.multi_value_predictions(
rnn_activations, target_column, problem_type, predict_probabilities)
if mode != model_fn.ModeKeys.INFER:
loss = _multi_value_loss(
rnn_activations, labels, sequence_length, target_column, features)
elif prediction_type == rnn_common.PredictionType.SINGLE_VALUE:
prediction_dict = _single_value_predictions(
rnn_activations, sequence_length, target_column,
problem_type, predict_probabilities)
if mode != model_fn.ModeKeys.INFER:
loss = _single_value_loss(
rnn_activations, labels, sequence_length, target_column, features)
state_dict = state_tuple_to_dict(final_state)
prediction_dict.update(state_dict)
eval_metric_ops = None
if mode != model_fn.ModeKeys.INFER:
eval_metric_ops = rnn_common.get_eval_metric_ops(
problem_type, prediction_type, sequence_length, prediction_dict,
labels)
train_op = None
if mode == model_fn.ModeKeys.TRAIN:
train_op = optimizers.optimize_loss(
loss=loss,
global_step=None, # Get it internally.
learning_rate=learning_rate,
optimizer=optimizer,
clip_gradients=gradient_clipping_norm,
summaries=optimizers.OPTIMIZER_SUMMARIES)
output_alternatives = _get_output_alternatives(prediction_type,
problem_type,
prediction_dict)
return model_fn.ModelFnOps(mode=mode,
predictions=prediction_dict,
loss=loss,
train_op=train_op,
eval_metric_ops=eval_metric_ops,
output_alternatives=output_alternatives)
return _dynamic_rnn_model_fn
def _get_dropout_and_num_units(cell_type,
num_units,
num_rnn_layers,
input_keep_probability,
output_keep_probability):
"""Helper function for deprecated factory functions."""
dropout_keep_probabilities = None
if isinstance(cell_type, contrib_rnn.RNNCell):
num_units = None
else:
num_units = [num_units for _ in range(num_rnn_layers)]
if input_keep_probability or output_keep_probability:
dropout_keep_probabilities = ([input_keep_probability]
+ [1.0] * (num_rnn_layers - 1)
+ [output_keep_probability])
return dropout_keep_probabilities, num_units
class DynamicRnnEstimator(estimator.Estimator):
def __init__(self,
problem_type,
prediction_type,
sequence_feature_columns,
context_feature_columns=None,
num_classes=None,
num_units=None,
cell_type='basic_rnn',
optimizer='SGD',
learning_rate=0.1,
predict_probabilities=False,
momentum=None,
gradient_clipping_norm=5.0,
dropout_keep_probabilities=None,
model_dir=None,
feature_engineering_fn=None,
config=None):
"""Initializes a `DynamicRnnEstimator`.
The input function passed to this `Estimator` optionally contains keys
`RNNKeys.SEQUENCE_LENGTH_KEY`. The value corresponding to
`RNNKeys.SEQUENCE_LENGTH_KEY` must be vector of size `batch_size` where
entry `n` corresponds to the length of the `n`th sequence in the batch. The
sequence length feature is required for batches of varying sizes. It will be
used to calculate loss and evaluation metrics. If
`RNNKeys.SEQUENCE_LENGTH_KEY` is not included, all sequences are assumed to
have length equal to the size of dimension 1 of the input to the RNN.
In order to specify an initial state, the input function must include keys
`STATE_PREFIX_i` for all `0 <= i < n` where `n` is the number of nested
elements in `cell.state_size`. The input function must contain values for
all state components or none of them. If none are included, then the default
(zero) state is used as an initial state. See the documentation for
`dict_to_state_tuple` and `state_tuple_to_dict` for further details.
The input function can call rnn_common.construct_rnn_cell() to obtain the
same cell type that this class will select from arguments to __init__.
The `predict()` method of the `Estimator` returns a dictionary with keys
`STATE_PREFIX_i` for `0 <= i < n` where `n` is the number of nested elements
in `cell.state_size`, along with `PredictionKey.CLASSES` for problem type
`CLASSIFICATION` or `PredictionKey.SCORES` for problem type
`LINEAR_REGRESSION`. The value keyed by
`PredictionKey.CLASSES` or `PredictionKey.SCORES` has shape
`[batch_size, padded_length]` in the multi-value case and shape
`[batch_size]` in the single-value case. Here, `padded_length` is the
largest value in the `RNNKeys.SEQUENCE_LENGTH` `Tensor` passed as input.
Entry `[i, j]` is the prediction associated with sequence `i` and time step
`j`. If the problem type is `CLASSIFICATION` and `predict_probabilities` is
`True`, it will also include key`PredictionKey.PROBABILITIES`.
Args:
problem_type: whether the `Estimator` is intended for a regression or
classification problem. Value must be one of
`ProblemType.CLASSIFICATION` or `ProblemType.LINEAR_REGRESSION`.
prediction_type: whether the `Estimator` should return a value for each
step in the sequence, or just a single value for the final time step.
Must be one of `ProblemType.SINGLE_VALUE` or
`ProblemType.MULTIPLE_VALUE`.
sequence_feature_columns: An iterable containing all the feature columns
describing sequence features. All items in the iterable should be
instances of classes derived from `FeatureColumn`.
context_feature_columns: An iterable containing all the feature columns
describing context features, i.e., features that apply accross all time
steps. All items in the set should be instances of classes derived from
`FeatureColumn`.
num_classes: the number of classes for a classification problem. Only
used when `problem_type=ProblemType.CLASSIFICATION`.
num_units: A list of integers indicating the number of units in the
`RNNCell`s in each layer. Either `num_units` is specified or `cell_type`
is an instance of `RNNCell`.
cell_type: A subclass of `RNNCell`, an instance of an `RNNCell` or one of
'basic_rnn,' 'lstm' or 'gru'.
optimizer: The type of optimizer to use. Either a subclass of
`Optimizer`, an instance of an `Optimizer`, a callback that returns an
optimizer, or a string. Strings must be one of 'Adagrad', 'Adam',
'Ftrl', 'Momentum', 'RMSProp' or 'SGD. See `layers.optimize_loss` for
more details.
learning_rate: Learning rate. This argument has no effect if `optimizer`
is an instance of an `Optimizer`.
predict_probabilities: A boolean indicating whether to predict
probabilities for all classes. Used only if `problem_type` is
`ProblemType.CLASSIFICATION`
momentum: Momentum value. Only used if `optimizer_type` is 'Momentum'.
gradient_clipping_norm: Parameter used for gradient clipping. If `None`,
then no clipping is performed.
dropout_keep_probabilities: a list of dropout probabilities or `None`.
If a list is given, it must have length `len(num_units) + 1`. If
`None`, then no dropout is applied.
model_dir: The directory in which to save and restore the model graph,
parameters, etc.
feature_engineering_fn: Takes features and labels which are the output of
`input_fn` and returns features and labels which will be fed into
`model_fn`. Please check `model_fn` for a definition of features and
labels.
config: A `RunConfig` instance.
Raises:
ValueError: Both or neither of the following are true: (a) `num_units` is
specified and (b) `cell_type` is an instance of `RNNCell`.
ValueError: `problem_type` is not one of
`ProblemType.LINEAR_REGRESSION` or `ProblemType.CLASSIFICATION`.
ValueError: `problem_type` is `ProblemType.CLASSIFICATION` but
`num_classes` is not specifieProblemType
ValueError: `prediction_type` is not one of
`PredictionType.MULTIPLE_VALUE` or `PredictionType.SINGLE_VALUE`.
"""
if (num_units is not None) == isinstance(cell_type, contrib_rnn.RNNCell):
raise ValueError(
'Either num_units is specified OR cell_type is an instance of '
'RNNCell. Got num_units = {} and cell_type = {}.'.format(
num_units, cell_type))
if prediction_type == rnn_common.PredictionType.MULTIPLE_VALUE:
name = 'MultiValueDynamicRNN'
elif prediction_type == rnn_common.PredictionType.SINGLE_VALUE:
name = 'SingleValueDynamicRNN'
else:
raise ValueError(
'prediction_type must be one of PredictionType.MULTIPLE_VALUE or '
'PredictionType.SINGLE_VALUE; got {}'.format(prediction_type))
if problem_type == constants.ProblemType.LINEAR_REGRESSION:
name += 'Regressor'
target_column = layers.regression_target()
elif problem_type == constants.ProblemType.CLASSIFICATION:
if not num_classes:
raise ValueError('For CLASSIFICATION problem_type, num_classes must be '
'specified.')
target_column = layers.multi_class_target(n_classes=num_classes)
name += 'Classifier'
else:
raise ValueError(
'problem_type must be either ProblemType.LINEAR_REGRESSION '
'or ProblemType.CLASSIFICATION; got {}'.format(
problem_type))
if optimizer == 'Momentum':
optimizer = momentum_opt.MomentumOptimizer(learning_rate, momentum)
dynamic_rnn_model_fn = _get_dynamic_rnn_model_fn(
cell_type=cell_type,
num_units=num_units,
target_column=target_column,
problem_type=problem_type,
prediction_type=prediction_type,
optimizer=optimizer,
sequence_feature_columns=sequence_feature_columns,
context_feature_columns=context_feature_columns,
predict_probabilities=predict_probabilities,
learning_rate=learning_rate,
gradient_clipping_norm=gradient_clipping_norm,
dropout_keep_probabilities=dropout_keep_probabilities,
name=name)
super(DynamicRnnEstimator, self).__init__(
model_fn=dynamic_rnn_model_fn,
model_dir=model_dir,
config=config,
feature_engineering_fn=feature_engineering_fn)
@deprecated('2017-04-01',
'multi_value_rnn_regressor is deprecated. '
'Please construct a DynamicRnnEstimator directly.')
def multi_value_rnn_regressor(num_units,
sequence_feature_columns,
context_feature_columns=None,
cell_type='basic_rnn',
num_rnn_layers=1,
optimizer_type='SGD',
learning_rate=0.1,
momentum=None,
gradient_clipping_norm=5.0,
input_keep_probability=None,
output_keep_probability=None,
model_dir=None,
config=None,
feature_engineering_fn=None):
"""Creates a `DynamicRnnEstimator` for multi-value regression.
Returns an `Estimator` that given input sequences, processes them in a dynamic
recurrent network and outputs a sequence of continuous values.
Args:
num_units: The size of the RNN cells. This argument has no effect
if `cell_type` is an instance of `RNNCell`.
sequence_feature_columns: An iterable containing all the feature columns
describing sequence features. All items in the set should be instances
of classes derived from `FeatureColumn`.
context_feature_columns: An iterable containing all the feature columns
describing context features, i.e., features that apply accross all time
steps. All items in the set should be instances of classes derived from
`FeatureColumn`.
cell_type: A subclass of `RNNCell`, an instance of an `RNNCell` or one of
'basic_rnn,' 'lstm' or 'gru'.
num_rnn_layers: Number of RNN layers. Leave this at its default value 1
if passing a `cell_type` that is already a MultiRNNCell.
optimizer_type: The type of optimizer to use. Either a subclass of
`Optimizer`, an instance of an `Optimizer`, a callback that returns an
optimizer, or a string. Strings must be one of 'Adagrad', 'Adam',
'Ftrl', 'Momentum', 'RMSProp' or 'SGD. See `layers.optimize_loss` for
more details.
learning_rate: Learning rate. This argument has no effect if `optimizer`
is an instance of an `Optimizer`.
momentum: Momentum value. Only used if `optimizer_type` is 'Momentum'.
gradient_clipping_norm: Parameter used for gradient clipping. If `None`,
then no clipping is performed.
input_keep_probability: Probability to keep inputs to `cell`. If `None`,
no dropout is applied.
output_keep_probability: Probability to keep outputs of `cell`. If `None`,
no dropout is applied.
model_dir: The directory in which to save and restore the model graph,
parameters, etc.
config: A `RunConfig` instance.
feature_engineering_fn: Takes features and labels which are the output of
`input_fn` and returns features and labels which will be fed into
`model_fn`. Please check `model_fn` for a definition of features and
labels.
Returns:
An initialized `Estimator`.
"""
dropout_keep_probabilities, num_units = _get_dropout_and_num_units(
cell_type,
num_units,
num_rnn_layers,
input_keep_probability,
output_keep_probability)
return DynamicRnnEstimator(
problem_type=constants.ProblemType.LINEAR_REGRESSION,
prediction_type=rnn_common.PredictionType.MULTIPLE_VALUE,
sequence_feature_columns=sequence_feature_columns,
context_feature_columns=context_feature_columns,
num_units=num_units,
cell_type=cell_type,
optimizer=optimizer_type,
learning_rate=learning_rate,
momentum=momentum,
gradient_clipping_norm=gradient_clipping_norm,
dropout_keep_probabilities=dropout_keep_probabilities,
model_dir=model_dir,
feature_engineering_fn=feature_engineering_fn,
config=config)
@deprecated('2017-04-01',
'multi_value_rnn_classifier is deprecated. '
'Please construct a DynamicRNNEstimator directly.')
def multi_value_rnn_classifier(num_classes,
num_units,
sequence_feature_columns,
context_feature_columns=None,
cell_type='basic_rnn',
num_rnn_layers=1,
optimizer_type='SGD',
learning_rate=0.1,
predict_probabilities=False,
momentum=None,
gradient_clipping_norm=5.0,
input_keep_probability=None,
output_keep_probability=None,
model_dir=None,
config=None,
feature_engineering_fn=None):
"""Creates a `DynamicRNNEstimator` for multi-value classification.
Returns an `Estimator` that given input sequences, processes them in a dynamic
recurrent network and outputs a sequence of classifications, along with
(optionally) a probability distribution over classes.
Args:
num_classes: The number of classes for categorization.
num_units: The size of the RNN cells. This argument has no effect
if `cell_type` is an instance of `RNNCell`.
sequence_feature_columns: An iterable containing all the feature columns
describing sequence features. All items in the set should be instances
of classes derived from `FeatureColumn`.
context_feature_columns: An iterable containing all the feature columns
describing context features, i.e., features that apply accross all time
steps. All items in the set should be instances of classes derived from
`FeatureColumn`.
cell_type: A subclass of `RNNCell`, an instance of an `RNNCell or one of
'basic_rnn,' 'lstm' or 'gru'.
num_rnn_layers: Number of RNN layers. Leave this at its default value 1
if passing a `cell_type` that is already a MultiRNNCell.
optimizer_type: The type of optimizer to use. Either a subclass of
`Optimizer`, an instance of an `Optimizer`, a callback that returns an
optimizer, or a string. Strings must be one of 'Adagrad', 'Adam',
'Ftrl', 'Momentum', 'RMSProp' or 'SGD. See `layers.optimize_loss` for
more details.
learning_rate: Learning rate. This argument has no effect if `optimizer`
is an instance of an `Optimizer`.
predict_probabilities: A boolean indicating whether to predict probabilities
for all classes.
momentum: Momentum value. Only used if `optimizer_type` is 'Momentum'.
gradient_clipping_norm: Parameter used for gradient clipping. If `None`,
then no clipping is performed.
input_keep_probability: Probability to keep inputs to `cell`. If `None`,
no dropout is applied.
output_keep_probability: Probability to keep outputs of `cell`. If `None`,
no dropout is applied.
model_dir: The directory in which to save and restore the model graph,
parameters, etc.
config: A `RunConfig` instance.
feature_engineering_fn: Takes features and labels which are the output of
`input_fn` and returns features and labels which will be fed into
`model_fn`. Please check `model_fn` for a definition of features and
labels.
Returns:
An initialized `Estimator`.
"""
dropout_keep_probabilities, num_units = _get_dropout_and_num_units(
cell_type,
num_units,
num_rnn_layers,
input_keep_probability,
output_keep_probability)
return DynamicRnnEstimator(
problem_type=constants.ProblemType.CLASSIFICATION,
prediction_type=rnn_common.PredictionType.MULTIPLE_VALUE,
num_classes=num_classes,
sequence_feature_columns=sequence_feature_columns,
context_feature_columns=context_feature_columns,
num_units=num_units,
cell_type=cell_type,
optimizer=optimizer_type,
learning_rate=learning_rate,
predict_probabilities=predict_probabilities,
momentum=momentum,
gradient_clipping_norm=gradient_clipping_norm,
dropout_keep_probabilities=dropout_keep_probabilities,
model_dir=model_dir,
feature_engineering_fn=feature_engineering_fn,
config=config)
@deprecated('2017-04-01',
'single_value_rnn_regressor is deprecated. '
'Please construct a DynamicRnnEstimator directly.')
def single_value_rnn_regressor(num_units,
sequence_feature_columns,
context_feature_columns=None,
cell_type='basic_rnn',
num_rnn_layers=1,
optimizer_type='SGD',
learning_rate=0.1,
momentum=None,
gradient_clipping_norm=5.0,
input_keep_probability=None,
output_keep_probability=None,
model_dir=None,
config=None,
feature_engineering_fn=None):
"""Creates a `DynamicRnnEstimator` for single-value regression.
Returns an `Estimator` that given input sequences, processes them in a dynamic
recurrent network and outputs a single continuous values.
Args:
num_units: The size of the RNN cells. This argument has no effect
if `cell_type` is an instance of `RNNCell`.
sequence_feature_columns: An iterable containing all the feature columns
describing sequence features. All items in the set should be instances
of classes derived from `FeatureColumn`.
context_feature_columns: An iterable containing all the feature columns
describing context features, i.e., features that apply accross all time
steps. All items in the set should be instances of classes derived from
`FeatureColumn`.
cell_type: A subclass of `RNNCell`, an instance of an `RNNCell` or one of
'basic_rnn,' 'lstm' or 'gru'.
num_rnn_layers: Number of RNN layers. Leave this at its default value 1
if passing a `cell_type` that is already a MultiRNNCell.
optimizer_type: The type of optimizer to use. Either a subclass of
`Optimizer`, an instance of an `Optimizer`, a callback that returns an
optimizer, or a string. Strings must be one of 'Adagrad', 'Adam',
'Ftrl', 'Momentum', 'RMSProp' or 'SGD. See `layers.optimize_loss` for
more details.
learning_rate: Learning rate. This argument has no effect if `optimizer`
is an instance of an `Optimizer`.
momentum: Momentum value. Only used if `optimizer_type` is 'Momentum'.
gradient_clipping_norm: Parameter used for gradient clipping. If `None`,
then no clipping is performed.
input_keep_probability: Probability to keep inputs to `cell`. If `None`,
no dropout is applied.
output_keep_probability: Probability to keep outputs of `cell`. If `None`,
no dropout is applied.
model_dir: The directory in which to save and restore the model graph,
parameters, etc.
config: A `RunConfig` instance.
feature_engineering_fn: Takes features and labels which are the output of
`input_fn` and returns features and labels which will be fed into
`model_fn`. Please check `model_fn` for a definition of features and
labels.
Returns:
An initialized `Estimator`.
"""
dropout_keep_probabilities, num_units = _get_dropout_and_num_units(
cell_type,
num_units,
num_rnn_layers,
input_keep_probability,
output_keep_probability)
return DynamicRnnEstimator(
problem_type=constants.ProblemType.LINEAR_REGRESSION,
prediction_type=rnn_common.PredictionType.SINGLE_VALUE,
sequence_feature_columns=sequence_feature_columns,
context_feature_columns=context_feature_columns,
num_units=num_units,
cell_type=cell_type,
optimizer=optimizer_type,
learning_rate=learning_rate,
momentum=momentum,
gradient_clipping_norm=gradient_clipping_norm,
dropout_keep_probabilities=dropout_keep_probabilities,
model_dir=model_dir,
feature_engineering_fn=feature_engineering_fn,
config=config)
@deprecated('2017-04-01',
'single_value_rnn_classifier is deprecated. '
'Please construct a DynamicRnnEstimator directly.')
def single_value_rnn_classifier(num_classes,
num_units,
sequence_feature_columns,
context_feature_columns=None,
cell_type='basic_rnn',
num_rnn_layers=1,
optimizer_type='SGD',
learning_rate=0.1,
predict_probabilities=False,
momentum=None,
gradient_clipping_norm=5.0,
input_keep_probability=None,
output_keep_probability=None,
model_dir=None,
config=None,
feature_engineering_fn=None):
"""Creates a `DynamicRnnEstimator` for single-value classification.
Returns an `Estimator` that given input sequences, processes them in a dynamic
recurrent network and outputs a single classifications, along with
(optionally) a probability distribution over classes.
Args:
num_classes: The number of classes for categorization.
num_units: The size of the RNN cells. This argument has no effect
if `cell_type` is an instance of `RNNCell`.
sequence_feature_columns: An iterable containing all the feature columns
describing sequence features. All items in the set should be instances
of classes derived from `FeatureColumn`.
context_feature_columns: An iterable containing all the feature columns
describing context features, i.e., features that apply accross all time
steps. All items in the set should be instances of classes derived from
`FeatureColumn`.
cell_type: A subclass of `RNNCell`, an instance of an `RNNCell or one of
'basic_rnn,' 'lstm' or 'gru'.
num_rnn_layers: Number of RNN layers. Leave this at its default value 1
if passing a `cell_type` that is already a MultiRNNCell.
optimizer_type: The type of optimizer to use. Either a subclass of
`Optimizer`, an instance of an `Optimizer`, a callback that returns an
optimizer, or a string. Strings must be one of 'Adagrad', 'Adam',
'Ftrl', 'Momentum', 'RMSProp' or 'SGD. See `layers.optimize_loss` for
more details.
learning_rate: Learning rate. This argument has no effect if `optimizer`
is an instance of an `Optimizer`.
predict_probabilities: A boolean indicating whether to predict probabilities
for all classes.
momentum: Momentum value. Only used if `optimizer_type` is 'Momentum'.
gradient_clipping_norm: Parameter used for gradient clipping. If `None`,
then no clipping is performed.
input_keep_probability: Probability to keep inputs to `cell`. If `None`,
no dropout is applied.
output_keep_probability: Probability to keep outputs of `cell`. If `None`,
no dropout is applied.
model_dir: The directory in which to save and restore the model graph,
parameters, etc.
config: A `RunConfig` instance.
feature_engineering_fn: Takes features and labels which are the output of
`input_fn` and returns features and labels which will be fed into
`model_fn`. Please check `model_fn` for a definition of features and
labels.
Returns:
An initialized `Estimator`.
"""
dropout_keep_probabilities, num_units = _get_dropout_and_num_units(
cell_type,
num_units,
num_rnn_layers,
input_keep_probability,
output_keep_probability)
return DynamicRnnEstimator(
problem_type=constants.ProblemType.CLASSIFICATION,
prediction_type=rnn_common.PredictionType.SINGLE_VALUE,
num_classes=num_classes,
sequence_feature_columns=sequence_feature_columns,
context_feature_columns=context_feature_columns,
num_units=num_units,
cell_type=cell_type,
optimizer=optimizer_type,
learning_rate=learning_rate,
predict_probabilities=predict_probabilities,
momentum=momentum,
gradient_clipping_norm=gradient_clipping_norm,
dropout_keep_probabilities=dropout_keep_probabilities,
model_dir=model_dir,
feature_engineering_fn=feature_engineering_fn,
config=config)
| |
"""
Copyright (c) 2017, Arm Limited and affiliates.
SPDX-License-Identifier: Apache-2.0
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import time
from icetea_lib.bench import Bench
from mbed_clitest.tools.tools import test_case
import icetea_lib.tools.asserts as asserts
import nfc_messages
from nfc_messages import NfcErrors
from nfc_cli_helper import CliHelper
from nfc_cli_helper import LARGE_BUFFLEN
import nfc
"""
Standalone (no NFC reader needed) tests, which cover API with no end-to-end checks.
"""
class CreamSconeSelfTests(Bench, CliHelper):
def __init__(self, **kwargs):
testcase_args = {
'title':"NFC tests with no reader",
'status':"development",
'purpose':"NFC target-only checks",
'component':["NFC"],
'type':"smoke",
'requirements':{
"duts": {
'*': {
"count": 1,
"type": "hardware",
"application": {
"name": "TEST_APPS-device-nfcapp"
}
},
"1": {"nick": "dev1"}
}
}
}
testcase_args.update(kwargs)
Bench.__init__(self, **testcase_args)
def setup(self):
pass
def teardown(self):
self.logger.info("Test teardown: Reboot target...")
self.reset_dut()
"""
smoke - target app is running, and can exchange simple values
"""
@test_case(CreamSconeSelfTests)
def test_nfc_error_codes(self):
wally = NfcErrors.nfc_err_not_found
for x in range(0, 3):
self.nfc_command("dev1", "setlastnfcerror %d" % wally.value, expected_nfc_error=wally)
self.nfc_command("dev1", "getlastnfcerror", expected_nfc_error=wally)
self.nfc_command("dev1", "setlastnfcerror %d" % 0)
self.nfc_command("dev1", "getlastnfcerror")
"""
smoke - target app reports if NFC eeprom driver present
"""
@test_case(CreamSconeSelfTests)
def test_nfc_eeprom(self):
response = self.command("dev1", "iseeprom") # will hold result from the init lib call
self.logger.info("Target includes NFCEEPROM: %s" % response.parsed['iseeprom'])
"""
check - Assert discovery can be started/stopped
"""
@test_case(CreamSconeSelfTests)
def test_nfc_discovery(self):
self.nfc_command("dev1", "initnfc")
response = self.nfc_command("dev1", "iseeprom") # will hold result from the init lib call
eeprom = response.parsed['iseeprom']
self.logger.info("Target includes NFCEEPROM: %s" % eeprom)
if not eeprom:
self.nfc_command("dev1", "start")
self.nfc_command("dev1", "stop")
self.nfc_command("dev1", "start")
self.nfc_command("dev1", "stop")
else:
# eeprom, so not supported
self.nfc_command("dev1", "start", expected_retcode=-2, expected_nfc_error = NfcErrors.nfc_err_unsupported )
self.nfc_command("dev1", "stop", expected_retcode=-2 , expected_nfc_error= NfcErrors.nfc_err_unsupported )
"""
check - Create a SmartPoster but does not read it back
"""
@test_case(CreamSconeSelfTests)
def test_nfc_setsmartposter(self):
self.nfc_command("dev1", "initnfc")
self.nfc_command("dev1", "setsmartposter https://www.mbed.com")
@test_case(CreamSconeSelfTests)
def test_nfc_erase(self):
self.nfc_command("dev1", "initnfc")
response = self.nfc_command("dev1", "iseeprom")
eeprom = response.parsed['iseeprom']
if eeprom:
self.logger.info("Target includes NFCEEPROM: %s" % eeprom)
self.nfc_command("dev1", "erase", timeout=30)
response = self.nfc_command("dev1", "readmessage")
asserts.assertEqual(response.parsed['nfcmessage'] is None, True)
'''
check - Build a long message by copying a string to stress the driver with a nominal buffer. Verify contents of entire message
can be read back.
'''
@test_case(CreamSconeSelfTests)
def test_nfc_write_long(self):
messageRep = 'thequickbrownfoxjumpedoverthelazydog' # repeating message written
textLength = LARGE_BUFFLEN # large values take longer
# calculate actual message to compare to using the library
message = nfc_messages.make_textrecord( nfc_messages.repeat_string_to_length(messageRep, textLength))
expected_message = str(message)
self.nfc_command("dev1", "initnfc")
response = self.nfc_command("dev1", "iseeprom")
eeprom = response.parsed['iseeprom']
if eeprom:
self.logger.info("Target includes NFCEEPROM: %s" % eeprom)
self.nfc_command("dev1", "erase")
self.nfc_command("dev1", "writelong %d %s" % (textLength,messageRep))
response = self.nfc_command("dev1", "readmessage")
# assert that read the eeprom contents gives textlength bytes (including framing bytes which will vary)
self.assert_binary_equal(response.parsed['nfcmessage'], expected_message)
'''
check - Query supported protocols if we have a controller
'''
@test_case(CreamSconeSelfTests)
def test_nfc_get_controller_protocols(self):
self.nfc_command("dev1", "initnfc")
response = self.nfc_command("dev1", "iseeprom")
eeprom = response.parsed['iseeprom']
if eeprom:
self.logger.info("Test ignore - target includes NFCEEPROM: %s" % eeprom)
else:
response = self.nfc_command("dev1", "getprotocols")
self.logger.info("Protocols = %s" % response.parsed['protocols'])
self.assertNotEqual(len(response.parsed['protocols']), 0, "Expected at least 1 protocol supported")
'''
check - Can set used protocols if we have a controller
Note: Currently only support Typ4 tags in PN512 driver
'''
@test_case(CreamSconeSelfTests)
def test_nfc_set_controller_protocols(self):
self.nfc_command("dev1", "initnfc")
response = self.nfc_command("dev1", "iseeprom")
eeprom = response.parsed['iseeprom']
if eeprom:
# eeproms do not allow target control
self.logger.info("Test ignore - target includes NFCEEPROM: %s" % eeprom)
else:
self.nfc_command("dev1", "setprotocols t1t")
self.nfc_command("dev1", "setprotocols t2t")
self.nfc_command("dev1", "setprotocols t3t")
self.nfc_command("dev1", "setprotocols isodep")
self.nfc_command("dev1", "setprotocols nfcdep")
self.nfc_command("dev1", "setprotocols t5t")
self.nfc_command("dev1", "setprotocols t1t t2t t3t isodep nfcdep t5t")
'''
check - SmartPoster URI forms are supported (in the test-app)
'''
@test_case(CreamSconeSelfTests)
def test_nfc_check_smartposter_uri_forms(self):
def enum(**enums):
return type('Enum', (), enums)
IDS = enum(NA=0x00, # Not applicable
HTTP_WWW=0x01, # http://www.
HTTPS_WWW=0x02, # https://www.
HTTP=0x03, # http://
HTTPS=0x04, # https://
TEL=0x05, # tel:
MAILTO=0x06, # mailto:
FTP_ANONYMOUS=0x07, # ftp://anonymous:anonymous@
FTP_FTP=0x08, # ftp://ftp.
FTPS=0x09, # ftps://
SFTP=0x0A, # sftp://
SMB=0x0B, # smb://
NFS=0x0C, # nfs://
FTP=0x0D, # ftp://
DAV=0x0E, # dav://
NEWS=0x0F, # news:
TELNET=0x10, # telnet://
IMAP=0x11, # imap:
RSTP=0x12, # rstp://
URN=0x13, # urn:
POP=0x14, # pop:
SIP=0x15, # sip:
SIPS=0x16, # sips:
TFTP=0x17, # tftp:
BTSPP=0x18, # btspp://
BTL2CAP=0x19, # btl2cap://
BTGOEP=0x1A, # btgoep://
TCPOBEX=0x1B, # tcpobex://
IRDAOBEX=0x1C, # irdaobex://
FILE=0x1D, # file://
URN_EPC_ID=0x1E, # urn:epc:id:
URN_EPC_TAG=0x1F, # urn:epc:tag:
URN_EPC_PAT=0x20, # urn:epc:pat:
URN_EPC_RAW=0x21, # urn:epc:raw:
URN_EPC=0x22, # urn:epc:
URN_NFC=0x23, # urn:nfc:
)
self.nfc_command("dev1", "initnfc")
result = self.nfc_command("dev1", "setsmartposter https://www.mbed.com")
asserts.assertEqual(result.parsed['uri_id'], IDS.HTTPS_WWW, "uri type expected HTTPS_WWW")
result = self.nfc_command("dev1", "setsmartposter http://www.mbed.com")
asserts.assertEqual(result.parsed['uri_id'], IDS.HTTP_WWW)
result = self.nfc_command("dev1", "setsmartposter https://www.topleveldomain")
asserts.assertEqual(result.parsed['uri_id'], IDS.HTTPS_WWW)
result = self.nfc_command("dev1", "setsmartposter tel:555-5551234")
asserts.assertEqual(result.parsed['uri_id'], IDS.TEL)
result = self.nfc_command("dev1", "setsmartposter ftp://www.mbed.com/files/")
asserts.assertEqual(result.parsed['uri_id'], IDS.FTP )
'''
smoke - driver buffer size can be retrieved
'''
@test_case(CreamSconeSelfTests)
def test_nfc_get_max_ndef(self):
self.nfc_command("dev1", "initnfc")
max = self.nfc_command("dev1", "getmaxndef").parsed['maxndef']
self.logger.info("Target NDEF max buffer size %d" % max)
self.logger.info("Teststress size %d" % LARGE_BUFFLEN)
| |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Unsupervised Kernel Regression (UKR) for Python.
Implemented as a scikit-learn module.
Author: Christoph Hermes
Created on Januar 16, 2015 18:48:22
The MIT License (MIT)
Copyright (c) 2015 Christoph Hermes
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import numpy as np
from scipy.optimize import minimize
import sklearn
from sklearn import decomposition, manifold
from scipy.linalg import sqrtm
# own modules
from ukr_core import (ukr_bp, ukr_dY, ukr_E, ukr_project,
ukr_backproject_particles)
import rprop
# possible UKR kernels: tuple(kernel, kernel derivative)
try: # try using numexpr
import numexpr as ne
gaussian = (lambda x: ne.evaluate('exp(-.5 * x)'), lambda x: ne.evaluate('-.5 * exp(-.5 * x)'))
quartic = (lambda x: np.where(x<1, (1. - x)**2, np.zeros_like(x)), lambda x: np.where(x<1, -2. * (1. - x), np.zeros_like(x)))
student_n = (lambda x, n: ne.evaluate('(1. + x/n)**(-(n+1.)/2.)'), lambda x, n: ne.evaluate('-(n+1.)/2. * n**((n+1.)/2.) * (x+n)**(-(n+1.)/2.-1.)') )
except ImportError:
gaussian = (lambda x: np.exp(-.5 * x), lambda x: -.5 * np.exp(-.5 * x))
quartic = (lambda x: np.where(x<1, (1. - x)**2, np.zeros_like(x)), lambda x: np.where(x<1, -2. * (1. - x), np.zeros_like(x)))
student_n = (lambda x, n: (1. + x/n)**(-(n+1.)/2.), lambda x, n: -(n+1.)/2. * n**((n+1.)/2.) * (x+n)**(-(n+1.)/2.-1.) )
student_1 = (lambda x: student_n[0](x, 1), lambda x: student_n[1](x, 1))
student_2 = (lambda x: student_n[0](x, 2), lambda x: student_n[1](x, 2))
student_3 = (lambda x: student_n[0](x, 3), lambda x: student_n[1](x, 3))
student_9 = (lambda x: student_n[0](x, 9), lambda x: student_n[1](x, 9))
student_k = lambda k: (lambda x: student_n[0](x, k), lambda x: student_n[1](x, k))
class UKR(sklearn.base.BaseEstimator, sklearn.base.TransformerMixin):
"""Unsupervised Kernel Regression (UKR)
Parameters
----------
n_components : int
Manifold dimension, usually in {1,2,3}.
kernel : str or tuple(k : func(x), k_der : func(x))
UKR kernel `k` and its derivative `k_der`. A few examples are included
in this module: gaussian, quartic and student_{1,2,3,9}.
metric : {L1, L2} or float
Distance metric.
L1: cityblock/manhattan; L2: euclidean
float : arbitrary Minkowsky
n_iter : int
Maximum number of iterations for training the UKR model.
lko_cv : int
Leave-k-out cross validation for training the UKR model.
embeddings : list of initial manifold generators
If None, the initial embedding is set to TSNE and then PCA (if TSNE is
not available).
Good choices are:
* sklearn.decomposition.PCA(`n_components`)
* sklearn.decomposition.KernelPCA(`n_components`, kernel='rbf')
* sklearn.manifold.locally_linear.LocallyLinearEmbedding(n_neighbors, `n_components`, method='modified')
* sklearn.manifold.MDS(n_components=`n_components`, n_jobs=-1),
* sklearn.manifold.TSNE(n_components=`n_components`),
enforceCycle : bool
Are the high-dimensional points sampled from a cyclic data, e.g. a
rotating object or a walking person? In this case the UKR tries to
maintain a close spatial distance of subsequent manifold points.
verbose : bool
Print additional information esp. during the training stage.
Attributes
----------
X : np.ndarray, shape=(N,D)
High-dimensional point list for UKR training.
Y : np.ndarray, shape=(N,n_components)
Low-dimensional respresentation of `X`.
"""
def __init__(self, n_components=2, kernel=gaussian, metric='L2', lko_cv=1, n_iter=1000, embeddings=None, enforceCycle=False, verbose=True):
if isinstance(kernel, basestring):
if kernel.lower() == 'gaussian':
self.k, self.k_der = gaussian
elif kernel.lower() == 'quartic':
self.k, self.k_der = quartic
elif kernel.lower() == 'student_1':
self.k, self.k_der = student_1
elif kernel.lower() == 'student_2':
self.k, self.k_der = student_2
elif kernel.lower() == 'student_3':
self.k, self.k_der = student_3
elif kernel.lower() == 'student_9':
self.k, self.k_der = student_9
else:
self.k, self.k_der = kernel
if isinstance(metric, basestring):
assert metric in ['L1', 'L2'], "failed condition: metric in ['L1', 'L2']"
if metric == 'L1': self.metric = 1.
elif metric == 'L2': self.metric = 2.
else:
self.metric = metric
self.n_components = n_components
self.lko_cv = lko_cv
self.n_iter = n_iter
self.enforceCycle = enforceCycle
self.verbose = verbose
if embeddings is None:
try:
self.embeddings = [manifold.TSNE(n_components=self.n_components)]
except AttributeError:
print 'ukr.py::Warning: TSNE not found in the sklearn packages. Try PCA instead.'
self.embeddings = [decomposition.PCA(n_components=self.n_components)]
else:
self.embeddings = embeddings
self.X = None
self.Y = None
self.B = None
pass
def fit(self, X, y=None):
"""Train the UKR model.
Parameters
----------
X : np.ndarray, shape=(N,D)
Sample set with `N` elements and `D` dimensions.
Returns
-------
UKR model object.
"""
X = np.atleast_2d(X)
###########################
# find an initial embedding
Y = None
embed_ = None
error = np.inf
for embeddingI, embedding in enumerate(self.embeddings):
if self.verbose:
print 'Try embedding %2d/%2d: %s' % (embeddingI+1, len(self.embeddings), embedding.__class__.__name__)
try:
Y_init_ = embedding.fit_transform(X)
Y_init_ = Y_init_ - Y_init_.mean(axis=0) # center around zero
except:
continue
# normalize initial hypothesis to Y.T * Y = I
Y_init_ = Y_init_.dot(np.linalg.pinv(sqrtm(Y_init_.T.dot(Y_init_))))
# optimze the scaling factor by using least squares
def residuals(p, X_, Y_):
B, P = ukr_bp(Y_ * p, self.k, self.k_der, self.lko_cv, metric=self.metric)
return ukr_E(X_, B)
p0 = np.ones((1,self.n_components))
sol = minimize(residuals, p0, method='Nelder-Mead', args=(X, Y_init_))
if sol['x'].max() < 1000:
Y_init_ = Y_init_ * sol['x']
else:
print 'UKR::warning: scaling initialization failed'
Y_init_ = Y_init_ * 20
# final projection error estimation
B, P = ukr_bp(Y_init_, self.k, self.k_der, self.lko_cv, metric=self.metric)
err_ = ukr_E(X, B)
if self.verbose:
print ' Error: %f' % err_
# store the results if they're an improvement
if err_ < error:
error = err_
Y = Y_init_
embed_ = embedding
# Summary:
if self.verbose:
print '=> using embedding', embed_.__class__.__name__
######################
# Refine the UKR model
iRpropPlus = rprop.iRpropPlus()
for iter in xrange(self.n_iter):
if self.verbose and iter % 10 == 0:
print 'UKR iter %5d, Err=%9.6f' % (iter, iRpropPlus.E_prev)
# derivative of X_model w.r.t. to the error gradient
B, P = ukr_bp(Y, self.k, self.k_der, self.lko_cv, metric=self.metric)
if self.enforceCycle and iter % 20 < 10 and iter < self.n_iter/2:
# close spatial distance of subsequent manifold points every
# ten iterations for the first half of the full training
dY = -np.diff(np.vstack([Y, Y[0]]), axis=0)
else:
dY = ukr_dY(Y, X, B, P)
# reconstruction error
E_cur = ukr_E(X, B) / X.shape[1]
Y = iRpropPlus.update(Y, dY, E_cur)
# store training results
self.X = X # original data
self.Y = Y # manifold points
return self
def fit_transform(self, X, y=None):
"""Train the UKR model and return the low-dimensional samples.
Parameters
----------
X : np.ndarray, shape=(N,D)
Sample set with `N` elements and `D` dimensions.
Returns
-------
Y : np.ndarray, shape=(N, `n_components`)
Low-dimensional representation of `X`.
"""
X = np.atleast_2d(X)
self.fit(X, y)
return self.Y
def transform(self, X, n_particle_iter=100):
"""Project each sample in `X` to the embedding.
Uses a particle set for the optimization.
Parameters
----------
X : np.ndarray, shape=(N,D)
Sample set with `N` elements and `D` dimensions.
Returns
-------
Y : np.ndarray, shape=(N, `n_components`)
Low-dimensional representation of `X`.
"""
X = np.atleast_2d(X)
Y = ukr_backproject_particles(self.Y, self.X, self.k, self.k_der, self.metric, X,
n_particles=self.Y.shape[0], n_iter=n_particle_iter)
return Y
def predict(self, Y):
"""Project a set of manifold points into the orignal space.
Parameters
----------
Y : np.ndarray, shape=(N,`n_components`)
Arbitrary points on the manifold.
Returns
-------
X : np.ndarray, shape=(N,D)
Corresponding samples in the high-dimensional space.
"""
assert self.Y is not None, "untrained UKR model"
Y = np.atleast_2d(Y)
assert Y.shape[1] == self.n_components, \
"failed condition: Y.shape[1] == self.n_components"
B, _ = ukr_bp(self.Y, self.k, self.k_der, diagK=-1, Y=Y, metric=self.metric)
return ukr_project(self.X, B)
def predict_proba(self, Y):
"""Kernel density estimate for each sample.
Parameters
----------
Y : np.ndarray, shape=(N,`n_components`)
Arbitrary points on the manifold.
Returns
-------
p : array-like, shape=(N,)
Estimated density value for each sample.
"""
assert self.Y is not None, "untrained UKR model"
Y = np.atleast_2d(Y)
assert Y.shape[1] == self.n_components, \
"failed condition: Y.shape[1] == self.n_components"
B, _ = ukr_bp(self.Y, self.k, self.k_der, diagK=-1, Y=Y, bNorm=False, metric=self.metric)
return B.mean(axis=0)
pass
| |
from bigquery_analytics import BigQueryAnalytics, BIGQUERY_AVAILABLE
import datetime
import re
import os
import sys
import time
import json
import traceback
import urllib2
# XXX hardcoded path
sys.path.append("/opt/xos")
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "xos.settings")
from django.conf import settings
from django import db
from django.db import connection
from core.models import Slice, Sliver, ServiceClass, Reservation, Tag, Network, User, Node, Image, Deployment, Site, NetworkTemplate, NetworkSlice, Service
BLUE_LOAD=5000000
RED_LOAD=15000000
glo_cached_queries = {}
class XOSAnalytics(BigQueryAnalytics):
def __init__(self, tableName=None):
if not tableName:
tableName = settings.BIGQUERY_TABLE
BigQueryAnalytics.__init__(self, tableName)
def service_to_sliceNames(self, serviceName):
service=Service.objects.get(name=serviceName)
try:
slices = service.slices.all()
except:
# BUG in data model -- Slice.service has related name 'service' and
# it should be 'slices'
slices = service.service.all()
return [slice.name for slice in slices]
def compose_query(self, filter={}, timeBucket="60", avg=[], sum=[], count=[], computed=[], val=[], groupBy=["Time"], orderBy=["Time"], tableName=None, latest=False, maxAge=60*60):
if tableName is None:
tableName = self.tableName
maxAge = maxAge * 1000
tablePart = "[%s.%s@-%d--1]" % ("vicci", tableName, maxAge)
fields = []
fieldNames = []
srcFieldNames = ["time"]
fields.append("SEC_TO_TIMESTAMP(INTEGER(TIMESTAMP_TO_SEC(time)/%s)*%s) as Time" % (str(timeBucket),str(timeBucket)))
#fields.append("INTEGER(TIMESTAMP_TO_SEC(time)/%s)*%s as Time" % (str(timeBucket),str(timeBucket)))
for fieldName in avg:
fields.append("AVG(%s) as avg_%s" % (fieldName, fieldName.replace("%","")))
fieldNames.append("avg_%s" % fieldName.replace("%",""))
srcFieldNames.append(fieldName)
for fieldName in sum:
fields.append("SUM(%s) as sum_%s" % (fieldName, fieldName.replace("%","")))
fieldNames.append("sum_%s" % fieldName.replace("%",""))
srcFieldNames.append(fieldName)
for fieldName in count:
fields.append("COUNT(distinct %s) as count_%s" % (fieldName, fieldName.replace("%","")))
fieldNames.append("count_%s" % fieldName.replace("%",""))
srcFieldNames.append(fieldName)
for fieldName in val:
fields.append(fieldName)
fieldNames.append(fieldName)
srcFieldNames.append(fieldName)
for fieldName in computed:
operator = "/"
parts = fieldName.split("/")
computedFieldName = "computed_" + parts[0].replace("%","")+"_div_"+parts[1].replace("%","")
if len(parts)==1:
operator = "*"
parts = computed.split("*")
computedFieldName = "computed_" + parts[0].replace("%","")+"_mult_"+parts[1].replace("%","")
fields.append("SUM(%s)%sSUM(%s) as %s" % (parts[0], operator, parts[1], computedFieldName))
fieldNames.append(computedFieldName)
srcFieldNames.append(parts[0])
srcFieldNames.append(parts[1])
for fieldName in groupBy:
if (fieldName not in ["Time"]):
fields.append(fieldName)
fieldNames.append(fieldName)
srcFieldNames.append(fieldName)
fields = ", ".join(fields)
where = []
if filter.get("slice",None):
where.append("%%slice='%s'" % filter["slice"])
if filter.get("site",None):
where.append("%%site='%s'" % filter["site"])
if filter.get("node",None):
where.append("%%hostname='%s'" % filter["node"])
if filter.get("event",None):
where.append("event='%s'" % filter["event"])
if filter.get("service",None):
sliceNames = self.service_to_sliceNames(filter["service"])
if sliceNames:
where.append("(" + " OR ".join(["%%slice='%s'" % sliceName for sliceName in sliceNames]) +")")
if where:
where = " WHERE " + " AND ".join(where)
else:
where =""
if groupBy:
groupBySub = " GROUP BY " + ",".join(groupBy + ["%hostname"])
groupBy = " GROUP BY " + ",".join(groupBy)
else:
groupBySub = " GROUP BY %hostname"
groupBy = ""
if orderBy:
orderBy = " ORDER BY " + ",".join(orderBy)
else:
orderBy = ""
if latest:
latestFields = ["table1.%s as %s" % (x,x) for x in srcFieldNames]
latestFields = ", ".join(latestFields)
tablePart = """(SELECT %s FROM %s AS table1
JOIN
(SELECT %%hostname, event, max(time) as maxtime from %s GROUP BY %%hostname, event) AS latest
ON
table1.%%hostname = latest.%%hostname AND table1.event = latest.event AND table1.time = latest.maxtime)""" % (latestFields, tablePart, tablePart)
if computed:
subQuery = "SELECT %%hostname, %s FROM %s" % (fields, tablePart)
if where:
subQuery = subQuery + where
subQuery = subQuery + groupBySub
sumFields = []
for fieldName in fieldNames:
if fieldName.startswith("avg"):
sumFields.append("AVG(%s) as avg_%s"%(fieldName,fieldName))
sumFields.append("MAX(%s) as max_%s"%(fieldName,fieldName))
elif (fieldName.startswith("count")) or (fieldName.startswith("sum")) or (fieldName.startswith("computed")):
sumFields.append("SUM(%s) as sum_%s"%(fieldName,fieldName))
else:
sumFields.append(fieldName)
sumFields = ",".join(sumFields)
query = "SELECT %s, %s FROM (%s)" % ("Time", sumFields, subQuery)
if groupBy:
query = query + groupBy
if orderBy:
query = query + orderBy
else:
query = "SELECT %s FROM %s" % (fields, tablePart)
if where:
query = query + " " + where
if groupBy:
query = query + groupBy
if orderBy:
query = query + orderBy
return query
def get_list_from_req(self, req, name, default=[]):
value = req.GET.get(name, None)
if not value:
return default
value=value.replace("@","%")
return value.split(",")
def format_result(self, format, result, query, dataSourceUrl):
if not BIGQUERY_AVAILABLE:
msg = "BigQuery Statistics Unavaiable"
else:
msg = None
if (format == "json_dicts"):
result = {"query": query, "rows": result, "dataSourceUrl": dataSourceUrl, "msg": msg}
return ("application/javascript", json.dumps(result))
elif (format == "json_arrays"):
new_result = []
for row in result:
new_row = []
for key in sorted(row.keys()):
new_row.append(row[key])
new_result.append(new_row)
new_result = {"query": query, "rows": new_result, "msg": msg}
return ("application/javascript", json.dumps(new_result))
elif (format == "html_table"):
new_rows = []
for row in result:
new_row = []
for key in sorted(row.keys()):
new_row.append("<TD>%s</TD>" % str(row[key]))
new_rows.append("<TR>%s</TR>" % "".join(new_row))
new_result = "<TABLE>%s</TABLE>" % "\n".join(new_rows)
return ("text/html", new_result)
def merge_datamodel_sites(self, rows, slice=None):
""" For a query that included "site" in its groupby, merge in the
opencloud site information.
"""
if slice:
try:
slice = Slice.objects.get(name=slice)
except:
slice = None
for row in rows:
sitename = row["site"]
try:
model_site = Site.objects.get(name=sitename)
except:
# we didn't find it in the data model
continue
allocated_slivers = 0
if model_site and slice:
for sliver in slice.slivers.all():
if sliver.node.site == model_site:
allocated_slivers = allocated_slivers + 1
row["lat"] = float(model_site.location.latitude)
row["long"] = float(model_site.location.longitude)
row["url"] = model_site.site_url
row["numNodes"] = model_site.nodes.count()
row["allocated_slivers"] = allocated_slivers
max_cpu = row.get("max_avg_cpu", row.get("max_cpu",0))
cpu=float(max_cpu)/100.0
row["hotness"] = max(0.0, ((cpu*RED_LOAD) - BLUE_LOAD)/(RED_LOAD-BLUE_LOAD))
def compose_cached_query(self, querySpec='default'):
""" Compose a query that returns the 'most recent' row for each (hostname, event)
pair.
Note that groupByFields cannot contain any values that are 'Null' or those
rows will be excluded. For example, if groupByFields includes cp, then
there will be no libvirt_event rows, since libvirt_event does not have
cp.
This means we can't really have 'one query to rule them'. Settle on
having a couple of different queries, and have the caller specify
which one he wants.
"""
fieldNames = ["%hostname", "%bytes_sent", "%bytes_hit", "%healthy", "time", "event", "%site", "%elapsed", "%cpu"]
if querySpec=="default":
groupByFields = ["%hostname", "event"]
elif (querySpec=="hpc"):
fieldNames.append("%cp")
groupByFields = ["%hostname", "event", "%cp"]
else:
raise ValueError("Unknown queryspec %s" % querySpec)
fields = ["table1.%s AS %s" % (x,x) for x in fieldNames]
fields = ", ".join(fields)
tableDesc = "%s.%s" % (self.projectName, self.tableName)
groupByOn = ["table1.time = latest.maxtime"]
for field in groupByFields:
groupByOn.append("table1.%s = latest.%s" % (field, field))
groupByOn = " AND ".join(groupByOn)
groupByFields = ", ".join(groupByFields)
base_query = "SELECT %s FROM [%s@-3600000--1] AS table1 JOIN (SELECT %s, max(time) as maxtime from [%s@-3600000--1] GROUP BY %s) AS latest ON %s" % \
(fields, tableDesc, groupByFields, tableDesc, groupByFields, groupByOn)
return base_query
def get_cached_query_results(self, q, wait=True):
global glo_cached_queries
if q in glo_cached_queries:
if (time.time() - glo_cached_queries[q]["time"]) <= 60:
print "using cached query"
return glo_cached_queries[q]["rows"]
if not wait:
return None
print "refreshing cached query"
result = self.run_query(q)
glo_cached_queries[q] = {"time": time.time(), "rows": result}
return result
def process_request(self, req):
print req.GET
tqx = req.GET.get("tqx", None)
slice = req.GET.get("slice", None)
site = req.GET.get("site", None)
node = req.GET.get("node", None)
service = req.GET.get("service", None)
event = req.GET.get("event", "libvirt_heartbeat")
cp = req.GET.get("cp", None)
format = req.GET.get("format", "json_dicts")
timeBucket = int(req.GET.get("timeBucket", 60))
avg = self.get_list_from_req(req, "avg")
sum = self.get_list_from_req(req, "sum")
count = self.get_list_from_req(req, "count")
computed = self.get_list_from_req(req, "computed")
groupBy = self.get_list_from_req(req, "groupBy", ["Time"])
orderBy = self.get_list_from_req(req, "orderBy", ["Time"])
maxRows = req.GET.get("maxRows", None)
mergeDataModelSites = req.GET.get("mergeDataModelSites", None)
maxAge = int(req.GET.get("maxAge", 60*60))
cached = req.GET.get("cached", None)
cachedGroupBy = self.get_list_from_req(req, "cachedGroupBy", ["doesnotexist"])
filter={}
if slice:
filter["slice"] = slice
if site:
filter["site"] = site
if node:
filter["hostname"] = node
if event:
filter["event"] = event
if cp:
filter["cp"] = cp
q = self.compose_query(filter, timeBucket, avg, sum, count, computed, [], groupBy, orderBy, maxAge=maxAge)
print q
dataSourceUrl = "http://" + req.META["SERVER_NAME"] + ":" + req.META["SERVER_PORT"] + req.META["PATH_INFO"] + "?" + req.META["QUERY_STRING"].replace("format=","origFormat=").replace("%","%25") + "&format=charts";
if (format=="dataSourceUrl"):
result = {"dataSourceUrl": dataSourceUrl}
return ("application/javascript", result)
elif (format=="raw"):
result = self.run_query_raw(q)
result["dataSourceUrl"] = dataSourceUrl
result = json.dumps(result);
return ("application/javascript", result)
elif (format=="nodata"):
result = {"dataSourceUrl": dataSourceUrl, "query": q}
result = json.dumps(result);
return {"application/javascript", result}
elif (format=="charts"):
bq_result = self.run_query_raw(q)
# cloudscrutiny code is probably better!
table = {}
table["cols"] = self.schema_to_cols(bq_result["schema"])
rows = []
if "rows" in bq_result:
for row in bq_result["rows"]:
rowcols = []
for (colnum,col) in enumerate(row["f"]):
if (colnum==0):
dt = datetime.datetime.fromtimestamp(float(col["v"]))
rowcols.append({"v": 'new Date("%s")' % dt.isoformat()})
else:
try:
rowcols.append({"v": float(col["v"])})
except:
rowcols.append({"v": col["v"]})
rows.append({"c": rowcols})
table["rows"] = rows
if tqx:
reqId = tqx.strip("reqId:")
else:
reqId = "0"
result = {"status": "okColumnChart", "reqId": reqId, "table": table, "version": "0.6"}
result = "google.visualization.Query.setResponse(" + json.dumps(result) + ");"
def unquote_it(x): return x.group()[1:-1].replace('\\"', '"')
p = re.compile(r'"new Date\(\\"[^"]*\\"\)"')
result=p.sub(unquote_it, result)
return ("application/javascript", result)
else:
if cached:
results = self.get_cached_query_results(self.compose_cached_query(cached))
result = self.postprocess_results(results, filter=filter, sum=sum, count=count, avg=avg, computed=computed, maxDeltaTime=120, groupBy=cachedGroupBy)
else:
result = self.run_query(q)
if maxRows:
result = result[-int(maxRows):]
if mergeDataModelSites:
self.merge_datamodel_sites(result)
return self.format_result(format, result, q, dataSourceUrl)
def DoXOSAnalytics(request):
bq = XOSAnalytics()
result = bq.process_request(request)
return result
def main():
bq = XOSAnalytics(tableName="demoevents")
q = bq.compose_cached_query()
results = bq.run_query(q)
#results = bq.postprocess_results(results,
# filter={"slice": "HyperCache"},
# groupBy=["site"],
# computed=["bytes_sent/elapsed"],
# sum=["bytes_sent", "computed_bytes_sent_div_elapsed"], avg=["cpu"],
# maxDeltaTime=60)
#results = bq.postprocess_results(results, filter={"slice": "HyperCache"}, maxi=["cpu"], count=["hostname"], computed=["bytes_sent/elapsed"], groupBy=["Time", "site"], maxDeltaTime=80)
results = bq.postprocess_results(results,filter={"event": "libvirt_heartbeat"}, avg=["cpu"], count=["hostname"], groupBy=["doesnotexist"])
bq.dump_table(results)
sys.exit(0)
q=bq.compose_query(sum=["%bytes_sent"], avg=["%cpu"], latest=True, groupBy=["Time", "%site"])
print q
bq.dump_table(bq.run_query(q))
q=bq.compose_query(avg=["%cpu","%bandwidth"], count=["%hostname"], slice="HyperCache")
print q
bq.dump_table(bq.run_query(q))
q=bq.compose_query(computed=["%bytes_sent/%elapsed"])
print
print q
bq.dump_table(bq.run_query(q))
q=bq.compose_query(timeBucket=60*60, avg=["%cpu"], count=["%hostname"], computed=["%bytes_sent/%elapsed"])
print
print q
bq.dump_table(bq.run_query(q))
if __name__ == "__main__":
main()
| |
from __future__ import unicode_literals
# Ensure 'assert_raises' context manager support for Python 2.6
import tests.backport_assert_raises
from nose.tools import assert_raises
import boto
import boto3
from boto.exception import EC2ResponseError
import six
import sure # noqa
from moto import mock_ec2, mock_ec2_deprecated
import logging
@mock_ec2_deprecated
def test_eip_allocate_classic():
"""Allocate/release Classic EIP"""
conn = boto.connect_ec2('the_key', 'the_secret')
with assert_raises(EC2ResponseError) as ex:
standard = conn.allocate_address(dry_run=True)
ex.exception.error_code.should.equal('DryRunOperation')
ex.exception.status.should.equal(400)
ex.exception.message.should.equal(
'An error occurred (DryRunOperation) when calling the AllocateAddress operation: Request would have succeeded, but DryRun flag is set')
standard = conn.allocate_address()
standard.should.be.a(boto.ec2.address.Address)
standard.public_ip.should.be.a(six.text_type)
standard.instance_id.should.be.none
standard.domain.should.be.equal("standard")
with assert_raises(EC2ResponseError) as ex:
standard.release(dry_run=True)
ex.exception.error_code.should.equal('DryRunOperation')
ex.exception.status.should.equal(400)
ex.exception.message.should.equal(
'An error occurred (DryRunOperation) when calling the ReleaseAddress operation: Request would have succeeded, but DryRun flag is set')
standard.release()
standard.should_not.be.within(conn.get_all_addresses())
@mock_ec2_deprecated
def test_eip_allocate_vpc():
"""Allocate/release VPC EIP"""
conn = boto.connect_ec2('the_key', 'the_secret')
with assert_raises(EC2ResponseError) as ex:
vpc = conn.allocate_address(domain="vpc", dry_run=True)
ex.exception.error_code.should.equal('DryRunOperation')
ex.exception.status.should.equal(400)
ex.exception.message.should.equal(
'An error occurred (DryRunOperation) when calling the AllocateAddress operation: Request would have succeeded, but DryRun flag is set')
vpc = conn.allocate_address(domain="vpc")
vpc.should.be.a(boto.ec2.address.Address)
vpc.domain.should.be.equal("vpc")
logging.debug("vpc alloc_id:".format(vpc.allocation_id))
vpc.release()
@mock_ec2_deprecated
def test_eip_allocate_invalid_domain():
"""Allocate EIP invalid domain"""
conn = boto.connect_ec2('the_key', 'the_secret')
with assert_raises(EC2ResponseError) as cm:
conn.allocate_address(domain="bogus")
cm.exception.code.should.equal('InvalidParameterValue')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
@mock_ec2_deprecated
def test_eip_associate_classic():
"""Associate/Disassociate EIP to classic instance"""
conn = boto.connect_ec2('the_key', 'the_secret')
reservation = conn.run_instances('ami-1234abcd')
instance = reservation.instances[0]
eip = conn.allocate_address()
eip.instance_id.should.be.none
with assert_raises(EC2ResponseError) as cm:
conn.associate_address(public_ip=eip.public_ip)
cm.exception.code.should.equal('MissingParameter')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
with assert_raises(EC2ResponseError) as ex:
conn.associate_address(instance_id=instance.id,
public_ip=eip.public_ip, dry_run=True)
ex.exception.error_code.should.equal('DryRunOperation')
ex.exception.status.should.equal(400)
ex.exception.message.should.equal(
'An error occurred (DryRunOperation) when calling the AssociateAddress operation: Request would have succeeded, but DryRun flag is set')
conn.associate_address(instance_id=instance.id, public_ip=eip.public_ip)
# no .update() on address ):
eip = conn.get_all_addresses(addresses=[eip.public_ip])[0]
eip.instance_id.should.be.equal(instance.id)
with assert_raises(EC2ResponseError) as ex:
conn.disassociate_address(public_ip=eip.public_ip, dry_run=True)
ex.exception.error_code.should.equal('DryRunOperation')
ex.exception.status.should.equal(400)
ex.exception.message.should.equal(
'An error occurred (DryRunOperation) when calling the DisAssociateAddress operation: Request would have succeeded, but DryRun flag is set')
conn.disassociate_address(public_ip=eip.public_ip)
# no .update() on address ):
eip = conn.get_all_addresses(addresses=[eip.public_ip])[0]
eip.instance_id.should.be.equal(u'')
eip.release()
eip.should_not.be.within(conn.get_all_addresses())
eip = None
instance.terminate()
@mock_ec2_deprecated
def test_eip_associate_vpc():
"""Associate/Disassociate EIP to VPC instance"""
conn = boto.connect_ec2('the_key', 'the_secret')
reservation = conn.run_instances('ami-1234abcd')
instance = reservation.instances[0]
eip = conn.allocate_address(domain='vpc')
eip.instance_id.should.be.none
with assert_raises(EC2ResponseError) as cm:
conn.associate_address(allocation_id=eip.allocation_id)
cm.exception.code.should.equal('MissingParameter')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
conn.associate_address(instance_id=instance.id,
allocation_id=eip.allocation_id)
# no .update() on address ):
eip = conn.get_all_addresses(addresses=[eip.public_ip])[0]
eip.instance_id.should.be.equal(instance.id)
conn.disassociate_address(association_id=eip.association_id)
# no .update() on address ):
eip = conn.get_all_addresses(addresses=[eip.public_ip])[0]
eip.instance_id.should.be.equal(u'')
eip.association_id.should.be.none
with assert_raises(EC2ResponseError) as ex:
eip.release(dry_run=True)
ex.exception.error_code.should.equal('DryRunOperation')
ex.exception.status.should.equal(400)
ex.exception.message.should.equal(
'An error occurred (DryRunOperation) when calling the ReleaseAddress operation: Request would have succeeded, but DryRun flag is set')
eip.release()
eip = None
instance.terminate()
@mock_ec2
def test_eip_boto3_vpc_association():
"""Associate EIP to VPC instance in a new subnet with boto3"""
service = boto3.resource('ec2', region_name='us-west-1')
client = boto3.client('ec2', region_name='us-west-1')
vpc_res = client.create_vpc(CidrBlock='10.0.0.0/24')
subnet_res = client.create_subnet(
VpcId=vpc_res['Vpc']['VpcId'], CidrBlock='10.0.0.0/24')
instance = service.create_instances(**{
'InstanceType': 't2.micro',
'ImageId': 'ami-test',
'MinCount': 1,
'MaxCount': 1,
'SubnetId': subnet_res['Subnet']['SubnetId']
})[0]
allocation_id = client.allocate_address(Domain='vpc')['AllocationId']
address = service.VpcAddress(allocation_id)
address.load()
address.association_id.should.be.none
address.instance_id.should.be.empty
address.network_interface_id.should.be.empty
association_id = client.associate_address(
InstanceId=instance.id,
AllocationId=allocation_id,
AllowReassociation=False)
instance.load()
address.reload()
address.association_id.should_not.be.none
instance.public_ip_address.should_not.be.none
instance.public_dns_name.should_not.be.none
address.network_interface_id.should.equal(instance.network_interfaces_attribute[0].get('NetworkInterfaceId'))
address.public_ip.should.equal(instance.public_ip_address)
address.instance_id.should.equal(instance.id)
client.disassociate_address(AssociationId=address.association_id)
instance.reload()
address.reload()
instance.public_ip_address.should.be.none
address.network_interface_id.should.be.empty
address.association_id.should.be.none
address.instance_id.should.be.empty
@mock_ec2_deprecated
def test_eip_associate_network_interface():
"""Associate/Disassociate EIP to NIC"""
conn = boto.connect_vpc('the_key', 'the_secret')
vpc = conn.create_vpc("10.0.0.0/16")
subnet = conn.create_subnet(vpc.id, "10.0.0.0/18")
eni = conn.create_network_interface(subnet.id)
eip = conn.allocate_address(domain='vpc')
eip.network_interface_id.should.be.none
with assert_raises(EC2ResponseError) as cm:
conn.associate_address(network_interface_id=eni.id)
cm.exception.code.should.equal('MissingParameter')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
conn.associate_address(network_interface_id=eni.id,
allocation_id=eip.allocation_id)
# no .update() on address ):
eip = conn.get_all_addresses(addresses=[eip.public_ip])[0]
eip.network_interface_id.should.be.equal(eni.id)
conn.disassociate_address(association_id=eip.association_id)
# no .update() on address ):
eip = conn.get_all_addresses(addresses=[eip.public_ip])[0]
eip.network_interface_id.should.be.equal(u'')
eip.association_id.should.be.none
eip.release()
eip = None
@mock_ec2_deprecated
def test_eip_reassociate():
"""reassociate EIP"""
conn = boto.connect_ec2('the_key', 'the_secret')
reservation = conn.run_instances('ami-1234abcd', min_count=2)
instance1, instance2 = reservation.instances
eip = conn.allocate_address()
conn.associate_address(instance_id=instance1.id, public_ip=eip.public_ip)
# Same ID is idempotent
conn.associate_address(instance_id=instance1.id, public_ip=eip.public_ip)
# Different ID detects resource association
with assert_raises(EC2ResponseError) as cm:
conn.associate_address(
instance_id=instance2.id, public_ip=eip.public_ip, allow_reassociation=False)
cm.exception.code.should.equal('Resource.AlreadyAssociated')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
conn.associate_address.when.called_with(
instance_id=instance2.id, public_ip=eip.public_ip, allow_reassociation=True).should_not.throw(EC2ResponseError)
eip.release()
eip = None
instance1.terminate()
instance2.terminate()
@mock_ec2_deprecated
def test_eip_reassociate_nic():
"""reassociate EIP"""
conn = boto.connect_vpc('the_key', 'the_secret')
vpc = conn.create_vpc("10.0.0.0/16")
subnet = conn.create_subnet(vpc.id, "10.0.0.0/18")
eni1 = conn.create_network_interface(subnet.id)
eni2 = conn.create_network_interface(subnet.id)
eip = conn.allocate_address()
conn.associate_address(network_interface_id=eni1.id,
public_ip=eip.public_ip)
# Same ID is idempotent
conn.associate_address(network_interface_id=eni1.id,
public_ip=eip.public_ip)
# Different ID detects resource association
with assert_raises(EC2ResponseError) as cm:
conn.associate_address(
network_interface_id=eni2.id, public_ip=eip.public_ip)
cm.exception.code.should.equal('Resource.AlreadyAssociated')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
conn.associate_address.when.called_with(
network_interface_id=eni2.id, public_ip=eip.public_ip, allow_reassociation=True).should_not.throw(EC2ResponseError)
eip.release()
eip = None
@mock_ec2_deprecated
def test_eip_associate_invalid_args():
"""Associate EIP, invalid args """
conn = boto.connect_ec2('the_key', 'the_secret')
reservation = conn.run_instances('ami-1234abcd')
instance = reservation.instances[0]
eip = conn.allocate_address()
with assert_raises(EC2ResponseError) as cm:
conn.associate_address(instance_id=instance.id)
cm.exception.code.should.equal('MissingParameter')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
instance.terminate()
@mock_ec2_deprecated
def test_eip_disassociate_bogus_association():
"""Disassociate bogus EIP"""
conn = boto.connect_ec2('the_key', 'the_secret')
with assert_raises(EC2ResponseError) as cm:
conn.disassociate_address(association_id="bogus")
cm.exception.code.should.equal('InvalidAssociationID.NotFound')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
@mock_ec2_deprecated
def test_eip_release_bogus_eip():
"""Release bogus EIP"""
conn = boto.connect_ec2('the_key', 'the_secret')
with assert_raises(EC2ResponseError) as cm:
conn.release_address(allocation_id="bogus")
cm.exception.code.should.equal('InvalidAllocationID.NotFound')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
@mock_ec2_deprecated
def test_eip_disassociate_arg_error():
"""Invalid arguments disassociate address"""
conn = boto.connect_ec2('the_key', 'the_secret')
with assert_raises(EC2ResponseError) as cm:
conn.disassociate_address()
cm.exception.code.should.equal('MissingParameter')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
@mock_ec2_deprecated
def test_eip_release_arg_error():
"""Invalid arguments release address"""
conn = boto.connect_ec2('the_key', 'the_secret')
with assert_raises(EC2ResponseError) as cm:
conn.release_address()
cm.exception.code.should.equal('MissingParameter')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
@mock_ec2_deprecated
def test_eip_describe():
"""Listing of allocated Elastic IP Addresses."""
conn = boto.connect_ec2('the_key', 'the_secret')
eips = []
number_of_classic_ips = 2
number_of_vpc_ips = 2
# allocate some IPs
for _ in range(number_of_classic_ips):
eips.append(conn.allocate_address())
for _ in range(number_of_vpc_ips):
eips.append(conn.allocate_address(domain='vpc'))
len(eips).should.be.equal(number_of_classic_ips + number_of_vpc_ips)
# Can we find each one individually?
for eip in eips:
if eip.allocation_id:
lookup_addresses = conn.get_all_addresses(
allocation_ids=[eip.allocation_id])
else:
lookup_addresses = conn.get_all_addresses(
addresses=[eip.public_ip])
len(lookup_addresses).should.be.equal(1)
lookup_addresses[0].public_ip.should.be.equal(eip.public_ip)
# Can we find first two when we search for them?
lookup_addresses = conn.get_all_addresses(
addresses=[eips[0].public_ip, eips[1].public_ip])
len(lookup_addresses).should.be.equal(2)
lookup_addresses[0].public_ip.should.be.equal(eips[0].public_ip)
lookup_addresses[1].public_ip.should.be.equal(eips[1].public_ip)
# Release all IPs
for eip in eips:
eip.release()
len(conn.get_all_addresses()).should.be.equal(0)
@mock_ec2_deprecated
def test_eip_describe_none():
"""Error when search for bogus IP"""
conn = boto.connect_ec2('the_key', 'the_secret')
with assert_raises(EC2ResponseError) as cm:
conn.get_all_addresses(addresses=["256.256.256.256"])
cm.exception.code.should.equal('InvalidAddress.NotFound')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
@mock_ec2
def test_eip_filters():
service = boto3.resource('ec2', region_name='us-west-1')
client = boto3.client('ec2', region_name='us-west-1')
vpc_res = client.create_vpc(CidrBlock='10.0.0.0/24')
subnet_res = client.create_subnet(
VpcId=vpc_res['Vpc']['VpcId'], CidrBlock='10.0.0.0/24')
def create_inst_with_eip():
instance = service.create_instances(**{
'InstanceType': 't2.micro',
'ImageId': 'ami-test',
'MinCount': 1,
'MaxCount': 1,
'SubnetId': subnet_res['Subnet']['SubnetId']
})[0]
allocation_id = client.allocate_address(Domain='vpc')['AllocationId']
_ = client.associate_address(
InstanceId=instance.id,
AllocationId=allocation_id,
AllowReassociation=False)
instance.load()
address = service.VpcAddress(allocation_id)
address.load()
return instance, address
inst1, eip1 = create_inst_with_eip()
inst2, eip2 = create_inst_with_eip()
inst3, eip3 = create_inst_with_eip()
# Param search by AllocationId
addresses = list(service.vpc_addresses.filter(AllocationIds=[eip2.allocation_id]))
len(addresses).should.be.equal(1)
addresses[0].public_ip.should.equal(eip2.public_ip)
inst2.public_ip_address.should.equal(addresses[0].public_ip)
# Param search by PublicIp
addresses = list(service.vpc_addresses.filter(PublicIps=[eip3.public_ip]))
len(addresses).should.be.equal(1)
addresses[0].public_ip.should.equal(eip3.public_ip)
inst3.public_ip_address.should.equal(addresses[0].public_ip)
# Param search by Filter
def check_vpc_filter_valid(filter_name, filter_values):
addresses = list(service.vpc_addresses.filter(
Filters=[{'Name': filter_name,
'Values': filter_values}]))
len(addresses).should.equal(2)
ips = [addr.public_ip for addr in addresses]
set(ips).should.equal(set([eip1.public_ip, eip2.public_ip]))
ips.should.contain(inst1.public_ip_address)
def check_vpc_filter_invalid(filter_name):
addresses = list(service.vpc_addresses.filter(
Filters=[{'Name': filter_name,
'Values': ['dummy1', 'dummy2']}]))
len(addresses).should.equal(0)
def check_vpc_filter(filter_name, filter_values):
check_vpc_filter_valid(filter_name, filter_values)
check_vpc_filter_invalid(filter_name)
check_vpc_filter('allocation-id', [eip1.allocation_id, eip2.allocation_id])
check_vpc_filter('association-id', [eip1.association_id, eip2.association_id])
check_vpc_filter('instance-id', [inst1.id, inst2.id])
check_vpc_filter(
'network-interface-id',
[inst1.network_interfaces_attribute[0].get('NetworkInterfaceId'),
inst2.network_interfaces_attribute[0].get('NetworkInterfaceId')])
check_vpc_filter(
'private-ip-address',
[inst1.network_interfaces_attribute[0].get('PrivateIpAddress'),
inst2.network_interfaces_attribute[0].get('PrivateIpAddress')])
check_vpc_filter('public-ip', [inst1.public_ip_address, inst2.public_ip_address])
# all the ips are in a VPC
addresses = list(service.vpc_addresses.filter(
Filters=[{'Name': 'domain', 'Values': ['vpc']}]))
len(addresses).should.equal(3)
| |
'''
This module implements :class:`AnalogSignal`, an array of analog signals.
:class:`AnalogSignal` inherits from :class:`basesignal.BaseSignal` which
derives from :class:`BaseNeo`, and from :class:`quantites.Quantity`which
in turn inherits from :class:`numpy.array`.
Inheritance from :class:`numpy.array` is explained here:
http://docs.scipy.org/doc/numpy/user/basics.subclassing.html
In brief:
* Initialization of a new object from constructor happens in :meth:`__new__`.
This is where user-specified attributes are set.
* :meth:`__array_finalize__` is called for all new objects, including those
created by slicing. This is where attributes are copied over from
the old object.
'''
import logging
try:
import scipy.signal
except ImportError as err:
HAVE_SCIPY = False
else:
HAVE_SCIPY = True
import numpy as np
import quantities as pq
from neo.core.baseneo import BaseNeo, MergeError, merge_annotations
from neo.core.dataobject import DataObject
from copy import copy, deepcopy
from neo.core.basesignal import BaseSignal
logger = logging.getLogger("Neo")
def _get_sampling_rate(sampling_rate, sampling_period):
'''
Gets the sampling_rate from either the sampling_period or the
sampling_rate, or makes sure they match if both are specified
'''
if sampling_period is None:
if sampling_rate is None:
raise ValueError("You must provide either the sampling rate or " + "sampling period")
elif sampling_rate is None:
sampling_rate = 1.0 / sampling_period
elif sampling_period != 1.0 / sampling_rate:
raise ValueError('The sampling_rate has to be 1/sampling_period')
if not hasattr(sampling_rate, 'units'):
raise TypeError("Sampling rate/sampling period must have units")
return sampling_rate
def _new_AnalogSignalArray(cls, signal, units=None, dtype=None, copy=True, t_start=0 * pq.s,
sampling_rate=None, sampling_period=None, name=None, file_origin=None,
description=None, array_annotations=None, annotations=None,
channel_index=None, segment=None):
'''
A function to map AnalogSignal.__new__ to function that
does not do the unit checking. This is needed for pickle to work.
'''
obj = cls(signal=signal, units=units, dtype=dtype, copy=copy,
t_start=t_start, sampling_rate=sampling_rate,
sampling_period=sampling_period, name=name,
file_origin=file_origin, description=description,
array_annotations=array_annotations, **annotations)
obj.channel_index = channel_index
obj.segment = segment
return obj
class AnalogSignal(BaseSignal):
'''
Array of one or more continuous analog signals.
A representation of several continuous, analog signals that
have the same duration, sampling rate and start time.
Basically, it is a 2D array: dim 0 is time, dim 1 is
channel index
Inherits from :class:`quantities.Quantity`, which in turn inherits from
:class:`numpy.ndarray`.
*Usage*::
>>> from neo.core import AnalogSignal
>>> import quantities as pq
>>>
>>> sigarr = AnalogSignal([[1, 2, 3], [4, 5, 6]], units='V',
... sampling_rate=1*pq.Hz)
>>>
>>> sigarr
<AnalogSignal(array([[1, 2, 3],
[4, 5, 6]]) * mV, [0.0 s, 2.0 s], sampling rate: 1.0 Hz)>
>>> sigarr[:,1]
<AnalogSignal(array([2, 5]) * V, [0.0 s, 2.0 s],
sampling rate: 1.0 Hz)>
>>> sigarr[1, 1]
array(5) * V
*Required attributes/properties*:
:signal: (quantity array 2D, numpy array 2D, or list (data, channel))
The data itself.
:units: (quantity units) Required if the signal is a list or NumPy
array, not if it is a :class:`Quantity`
:t_start: (quantity scalar) Time when signal begins
:sampling_rate: *or* **sampling_period** (quantity scalar) Number of
samples per unit time or
interval between two samples.
If both are specified, they are
checked for consistency.
*Recommended attributes/properties*:
:name: (str) A label for the dataset.
:description: (str) Text description.
:file_origin: (str) Filesystem path or URL of the original data file.
*Optional attributes/properties*:
:dtype: (numpy dtype or str) Override the dtype of the signal array.
:copy: (bool) True by default.
:array_annotations: (dict) Dict mapping strings to numpy arrays containing annotations \
for all data points
Note: Any other additional arguments are assumed to be user-specific
metadata and stored in :attr:`annotations`.
*Properties available on this object*:
:sampling_rate: (quantity scalar) Number of samples per unit time.
(1/:attr:`sampling_period`)
:sampling_period: (quantity scalar) Interval between two samples.
(1/:attr:`quantity scalar`)
:duration: (Quantity) Signal duration, read-only.
(size * :attr:`sampling_period`)
:t_stop: (quantity scalar) Time when signal ends, read-only.
(:attr:`t_start` + :attr:`duration`)
:times: (quantity 1D) The time points of each sample of the signal,
read-only.
(:attr:`t_start` + arange(:attr:`shape`[0])/:attr:`sampling_rate`)
:channel_index:
(deprecated) access to the channel_index attribute of the principal ChannelIndex
associated with this signal.
*Slicing*:
:class:`AnalogSignal` objects can be sliced. When taking a single
column (dimension 0, e.g. [0, :]) or a single element,
a :class:`~quantities.Quantity` is returned.
Otherwise an :class:`AnalogSignal` (actually a view) is
returned, with the same metadata, except that :attr:`t_start`
is changed if the start index along dimension 1 is greater than 1.
Note that slicing an :class:`AnalogSignal` may give a different
result to slicing the underlying NumPy array since signals
are always two-dimensional.
*Operations available on this object*:
== != + * /
'''
_single_parent_objects = ('Segment', 'ChannelIndex')
_single_parent_attrs = ('segment', 'channel_index')
_quantity_attr = 'signal'
_necessary_attrs = (('signal', pq.Quantity, 2),
('sampling_rate', pq.Quantity, 0),
('t_start', pq.Quantity, 0))
_recommended_attrs = BaseNeo._recommended_attrs
def __new__(cls, signal, units=None, dtype=None, copy=True, t_start=0 * pq.s,
sampling_rate=None, sampling_period=None, name=None, file_origin=None,
description=None, array_annotations=None, **annotations):
'''
Constructs new :class:`AnalogSignal` from data.
This is called whenever a new class:`AnalogSignal` is created from
the constructor, but not when slicing.
__array_finalize__ is called on the new object.
'''
signal = cls._rescale(signal, units=units)
obj = pq.Quantity(signal, units=units, dtype=dtype, copy=copy).view(cls)
if obj.ndim == 1:
obj.shape = (-1, 1)
if t_start is None:
raise ValueError('t_start cannot be None')
obj._t_start = t_start
obj._sampling_rate = _get_sampling_rate(sampling_rate, sampling_period)
obj.segment = None
obj.channel_index = None
return obj
def __init__(self, signal, units=None, dtype=None, copy=True, t_start=0 * pq.s,
sampling_rate=None, sampling_period=None, name=None, file_origin=None,
description=None, array_annotations=None, **annotations):
'''
Initializes a newly constructed :class:`AnalogSignal` instance.
'''
# This method is only called when constructing a new AnalogSignal,
# not when slicing or viewing. We use the same call signature
# as __new__ for documentation purposes. Anything not in the call
# signature is stored in annotations.
# Calls parent __init__, which grabs universally recommended
# attributes and sets up self.annotations
DataObject.__init__(self, name=name, file_origin=file_origin, description=description,
array_annotations=array_annotations, **annotations)
def __reduce__(self):
'''
Map the __new__ function onto _new_AnalogSignalArray, so that pickle
works
'''
return _new_AnalogSignalArray, (self.__class__, np.array(self), self.units, self.dtype,
True, self.t_start, self.sampling_rate,
self.sampling_period, self.name, self.file_origin,
self.description, self.array_annotations,
self.annotations, self.channel_index, self.segment)
def _array_finalize_spec(self, obj):
'''
Set default values for attributes specific to :class:`AnalogSignal`.
Common attributes are defined in
:meth:`__array_finalize__` in :class:`basesignal.BaseSignal`),
which is called every time a new signal is created
and calls this method.
'''
self._t_start = getattr(obj, '_t_start', 0 * pq.s)
self._sampling_rate = getattr(obj, '_sampling_rate', None)
return obj
def __repr__(self):
'''
Returns a string representing the :class:`AnalogSignal`.
'''
return ('<%s(%s, [%s, %s], sampling rate: %s)>' % (self.__class__.__name__,
super().__repr__(),
self.t_start, self.t_stop,
self.sampling_rate))
def get_channel_index(self):
"""
"""
if self.channel_index:
return self.channel_index.index
else:
return None
def __getitem__(self, i):
'''
Get the item or slice :attr:`i`.
'''
if isinstance(i, (int, np.integer)): # a single point in time across all channels
obj = super().__getitem__(i)
obj = pq.Quantity(obj.magnitude, units=obj.units)
elif isinstance(i, tuple):
obj = super().__getitem__(i)
j, k = i
if isinstance(j, (int, np.integer)): # extract a quantity array
obj = pq.Quantity(obj.magnitude, units=obj.units)
else:
if isinstance(j, slice):
if j.start:
obj.t_start = (self.t_start + j.start * self.sampling_period)
if j.step:
obj.sampling_period *= j.step
elif isinstance(j, np.ndarray):
raise NotImplementedError(
"Arrays not yet supported") # in the general case, would need to return
# IrregularlySampledSignal(Array)
else:
raise TypeError("%s not supported" % type(j))
if isinstance(k, (int, np.integer)):
obj = obj.reshape(-1, 1)
if self.channel_index:
obj.channel_index = self.channel_index.__getitem__(k)
obj.array_annotate(**deepcopy(self.array_annotations_at_index(k)))
elif isinstance(i, slice):
obj = super().__getitem__(i)
if i.start:
obj.t_start = self.t_start + i.start * self.sampling_period
obj.array_annotations = deepcopy(self.array_annotations)
elif isinstance(i, np.ndarray):
# Indexing of an AnalogSignal is only consistent if the resulting number of
# samples is the same for each trace. The time axis for these samples is not
# guaranteed to be continuous, so returning a Quantity instead of an AnalogSignal here.
new_time_dims = np.sum(i, axis=0)
if len(new_time_dims) and all(new_time_dims == new_time_dims[0]):
obj = np.asarray(self).T.__getitem__(i.T)
obj = obj.T.reshape(self.shape[1], -1).T
obj = pq.Quantity(obj, units=self.units)
else:
raise IndexError("indexing of an AnalogSignals needs to keep the same number of "
"sample for each trace contained")
else:
raise IndexError("index should be an integer, tuple, slice or boolean numpy array")
return obj
def __setitem__(self, i, value):
"""
Set an item or slice defined by :attr:`i` to `value`.
"""
# because AnalogSignals are always at least two-dimensional,
# we need to handle the case where `i` is an integer
if isinstance(i, int):
i = slice(i, i + 1)
elif isinstance(i, tuple):
j, k = i
if isinstance(k, int):
i = (j, slice(k, k + 1))
return super().__setitem__(i, value)
# sampling_rate attribute is handled as a property so type checking can
# be done
@property
def sampling_rate(self):
'''
Number of samples per unit time.
(1/:attr:`sampling_period`)
'''
return self._sampling_rate
@sampling_rate.setter
def sampling_rate(self, rate):
'''
Setter for :attr:`sampling_rate`
'''
if rate is None:
raise ValueError('sampling_rate cannot be None')
elif not hasattr(rate, 'units'):
raise ValueError('sampling_rate must have units')
self._sampling_rate = rate
# sampling_period attribute is handled as a property on underlying rate
@property
def sampling_period(self):
'''
Interval between two samples.
(1/:attr:`sampling_rate`)
'''
return 1. / self.sampling_rate
@sampling_period.setter
def sampling_period(self, period):
'''
Setter for :attr:`sampling_period`
'''
if period is None:
raise ValueError('sampling_period cannot be None')
elif not hasattr(period, 'units'):
raise ValueError('sampling_period must have units')
self.sampling_rate = 1. / period
# t_start attribute is handled as a property so type checking can be done
@property
def t_start(self):
'''
Time when signal begins.
'''
return self._t_start
@t_start.setter
def t_start(self, start):
'''
Setter for :attr:`t_start`
'''
if start is None:
raise ValueError('t_start cannot be None')
self._t_start = start
@property
def duration(self):
'''
Signal duration
(:attr:`size` * :attr:`sampling_period`)
'''
return self.shape[0] / self.sampling_rate
@property
def t_stop(self):
'''
Time when signal ends.
(:attr:`t_start` + :attr:`duration`)
'''
return self.t_start + self.duration
@property
def times(self):
'''
The time points of each sample of the signal
(:attr:`t_start` + arange(:attr:`shape`)/:attr:`sampling_rate`)
'''
return self.t_start + np.arange(self.shape[0]) / self.sampling_rate
def __eq__(self, other):
'''
Equality test (==)
'''
if (isinstance(other, AnalogSignal) and (
self.t_start != other.t_start or self.sampling_rate != other.sampling_rate)):
return False
return super().__eq__(other)
def _check_consistency(self, other):
'''
Check if the attributes of another :class:`AnalogSignal`
are compatible with this one.
'''
if isinstance(other, AnalogSignal):
for attr in "t_start", "sampling_rate":
if getattr(self, attr) != getattr(other, attr):
raise ValueError(
"Inconsistent values of %s" % attr) # how to handle name and annotations?
def _repr_pretty_(self, pp, cycle):
'''
Handle pretty-printing the :class:`AnalogSignal`.
'''
pp.text("{cls} with {channels} channels of length {length}; "
"units {units}; datatype {dtype} ".format(cls=self.__class__.__name__,
channels=self.shape[1],
length=self.shape[0],
units=self.units.dimensionality.string,
dtype=self.dtype))
if self._has_repr_pretty_attrs_():
pp.breakable()
self._repr_pretty_attrs_(pp, cycle)
def _pp(line):
pp.breakable()
with pp.group(indent=1):
pp.text(line)
_pp("sampling rate: {}".format(self.sampling_rate))
_pp("time: {} to {}".format(self.t_start, self.t_stop))
def time_index(self, t):
"""Return the array index (or indices) corresponding to the time (or times) `t`"""
i = (t - self.t_start) * self.sampling_rate
i = np.rint(i.simplified.magnitude).astype(np.int)
return i
def time_slice(self, t_start, t_stop):
'''
Creates a new AnalogSignal corresponding to the time slice of the
original AnalogSignal between times t_start, t_stop. Note, that for
numerical stability reasons if t_start does not fall exactly on
the time bins defined by the sampling_period it will be rounded to
the nearest sampling bin. The time bin for t_stop will be chosen to
make the duration of the resultant signal as close as possible to
t_stop - t_start. This means that for a given duration, the size
of the slice will always be the same.
'''
# checking start time and transforming to start index
if t_start is None:
i = 0
t_start = 0 * pq.s
else:
i = self.time_index(t_start)
# checking stop time and transforming to stop index
if t_stop is None:
j = len(self)
else:
delta = (t_stop - t_start) * self.sampling_rate
j = i + int(np.rint(delta.simplified.magnitude))
if (i < 0) or (j > len(self)):
raise ValueError('t_start, t_stop have to be within the analog \
signal duration')
# Time slicing should create a deep copy of the object
obj = deepcopy(self[i:j])
obj.t_start = self.t_start + i * self.sampling_period
return obj
def time_shift(self, t_shift):
"""
Shifts a :class:`AnalogSignal` to start at a new time.
Parameters:
-----------
t_shift: Quantity (time)
Amount of time by which to shift the :class:`AnalogSignal`.
Returns:
--------
new_sig: :class:`AnalogSignal`
New instance of a :class:`AnalogSignal` object starting at t_shift later than the
original :class:`AnalogSignal` (the original :class:`AnalogSignal` is not modified).
"""
new_sig = deepcopy(self)
new_sig.t_start = new_sig.t_start + t_shift
return new_sig
def splice(self, signal, copy=False):
"""
Replace part of the current signal by a new piece of signal.
The new piece of signal will overwrite part of the current signal
starting at the time given by the new piece's `t_start` attribute.
The signal to be spliced in must have the same physical dimensions,
sampling rate, and number of channels as the current signal and
fit within it.
If `copy` is False (the default), modify the current signal in place.
If `copy` is True, return a new signal and leave the current one untouched.
In this case, the new signal will not be linked to any parent objects.
"""
if signal.t_start < self.t_start:
raise ValueError("Cannot splice earlier than the start of the signal")
if signal.t_stop > self.t_stop:
raise ValueError("Splice extends beyond signal")
if signal.sampling_rate != self.sampling_rate:
raise ValueError("Sampling rates do not match")
i = self.time_index(signal.t_start)
j = i + signal.shape[0]
if copy:
new_signal = deepcopy(self)
new_signal.segment = None
new_signal.channel_index = None
new_signal[i:j, :] = signal
return new_signal
else:
self[i:j, :] = signal
return self
def downsample(self, downsampling_factor, **kwargs):
"""
Downsample the data of a signal.
This method reduces the number of samples of the AnalogSignal to a fraction of the
original number of samples, defined by `downsampling_factor`.
This method is a wrapper of scipy.signal.decimate and accepts the same set of keyword
arguments, except for specifying the axis of resampling, which is fixed to the first axis
here.
Parameters:
-----------
downsampling_factor: integer
Factor used for decimation of samples. Scipy recommends to call decimate multiple times
for downsampling factors higher than 13 when using IIR downsampling (default).
Returns:
--------
downsampled_signal: :class:`AnalogSignal`
New instance of a :class:`AnalogSignal` object containing the resampled data points.
The original :class:`AnalogSignal` is not modified.
Note:
-----
For resampling the signal with a fixed number of samples, see `resample` method.
"""
if not HAVE_SCIPY:
raise ImportError('Decimating requires availability of scipy.signal')
# Resampling is only permitted along the time axis (axis=0)
if 'axis' in kwargs:
kwargs.pop('axis')
downsampled_data = scipy.signal.decimate(self.magnitude, downsampling_factor, axis=0,
**kwargs)
downsampled_signal = self.duplicate_with_new_data(downsampled_data)
# since the number of channels stays the same, we can also copy array annotations here
downsampled_signal.array_annotations = self.array_annotations.copy()
downsampled_signal.sampling_rate = self.sampling_rate / downsampling_factor
return downsampled_signal
def resample(self, sample_count, **kwargs):
"""
Resample the data points of the signal.
This method interpolates the signal and returns a new signal with a fixed number of
samples defined by `sample_count`.
This method is a wrapper of scipy.signal.resample and accepts the same set of keyword
arguments, except for specifying the axis of resampling which is fixed to the first axis
here, and the sample positions. .
Parameters:
-----------
sample_count: integer
Number of desired samples. The resulting signal starts at the same sample as the
original and is sampled regularly.
Returns:
--------
resampled_signal: :class:`AnalogSignal`
New instance of a :class:`AnalogSignal` object containing the resampled data points.
The original :class:`AnalogSignal` is not modified.
Note:
-----
For reducing the number of samples to a fraction of the original, see `downsample` method
"""
if not HAVE_SCIPY:
raise ImportError('Resampling requires availability of scipy.signal')
# Resampling is only permitted along the time axis (axis=0)
if 'axis' in kwargs:
kwargs.pop('axis')
if 't' in kwargs:
kwargs.pop('t')
resampled_data, resampled_times = scipy.signal.resample(self.magnitude, sample_count,
t=self.times, axis=0, **kwargs)
resampled_signal = self.duplicate_with_new_data(resampled_data)
resampled_signal.sampling_rate = (sample_count / self.shape[0]) * self.sampling_rate
# since the number of channels stays the same, we can also copy array annotations here
resampled_signal.array_annotations = self.array_annotations.copy()
return resampled_signal
def rectify(self, **kwargs):
"""
Rectify the signal.
This method rectifies the signal by taking the absolute value.
This method is a wrapper of numpy.absolute() and accepts the same set of keyword
arguments.
Returns:
--------
resampled_signal: :class:`AnalogSignal`
New instance of a :class:`AnalogSignal` object containing the rectified data points.
The original :class:`AnalogSignal` is not modified.
"""
# Use numpy to get the absolute value of the signal
rectified_data = np.absolute(self.magnitude, **kwargs)
rectified_signal = self.duplicate_with_new_data(rectified_data)
# the sampling rate stays constant
rectified_signal.sampling_rate = self.sampling_rate
# since the number of channels stays the same, we can also copy array annotations here
rectified_signal.array_annotations = self.array_annotations.copy()
return rectified_signal
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
"""
Schema transformer DB to store ids allocated by it
"""
from pycassa import NotFoundException
import cfgm_common as common
from cfgm_common.exceptions import VncError, NoIdError
from cfgm_common.zkclient import IndexAllocator
from cfgm_common.vnc_cassandra import VncCassandraClient
from sandesh_common.vns.constants import SCHEMA_KEYSPACE_NAME
import uuid
class SchemaTransformerDB(VncCassandraClient):
_KEYSPACE = SCHEMA_KEYSPACE_NAME
_RT_CF = 'route_target_table'
_SC_IP_CF = 'service_chain_ip_address_table'
_SERVICE_CHAIN_CF = 'service_chain_table'
_SERVICE_CHAIN_UUID_CF = 'service_chain_uuid_table'
_zk_path_prefix = ''
_BGP_RTGT_MAX_ID = 1 << 24
_BGP_RTGT_ALLOC_PATH = "/id/bgp/route-targets/"
_VN_MAX_ID = 1 << 24
_VN_ID_ALLOC_PATH = "/id/virtual-networks/"
_SECURITY_GROUP_MAX_ID = 1 << 32
_SECURITY_GROUP_ID_ALLOC_PATH = "/id/security-groups/id/"
_SERVICE_CHAIN_MAX_VLAN = 4093
_SERVICE_CHAIN_VLAN_ALLOC_PATH = "/id/service-chain/vlan/"
_BGPAAS_PORT_ALLOC_PATH = "/id/bgpaas/port/"
@classmethod
def get_db_info(cls):
db_info = [(cls._KEYSPACE, [cls._RT_CF, cls._SC_IP_CF,
cls._SERVICE_CHAIN_CF,
cls._SERVICE_CHAIN_UUID_CF])]
return db_info
# end get_db_info
def __init__(self, manager, zkclient):
self._manager = manager
self._args = manager._args
self._zkclient = zkclient
if self._args.cluster_id:
self._zk_path_pfx = self._args.cluster_id + '/'
else:
self._zk_path_pfx = ''
keyspaces = {
self._KEYSPACE: {self._RT_CF: {},
self._SC_IP_CF: {},
self._SERVICE_CHAIN_CF: {},
self._SERVICE_CHAIN_UUID_CF: {}}}
cass_server_list = self._args.cassandra_server_list
cred = None
if (self._args.cassandra_user is not None and
self._args.cassandra_password is not None):
cred={'username':self._args.cassandra_user,
'password':self._args.cassandra_password}
super(SchemaTransformerDB, self).__init__(
cass_server_list, self._args.cluster_id, keyspaces, None,
manager.config_log, reset_config=self._args.reset_config,
credential=cred)
SchemaTransformerDB._rt_cf = self._cf_dict[self._RT_CF]
SchemaTransformerDB._sc_ip_cf = self._cf_dict[self._SC_IP_CF]
SchemaTransformerDB._service_chain_cf = self._cf_dict[
self._SERVICE_CHAIN_CF]
SchemaTransformerDB._service_chain_uuid_cf = self._cf_dict[
self._SERVICE_CHAIN_UUID_CF]
# reset zookeeper config
if self._args.reset_config:
zkclient.delete_node(
self._zk_path_pfx + self._BGP_RTGT_ALLOC_PATH, True)
zkclient.delete_node(
self._zk_path_pfx + self._BGPAAS_PORT_ALLOC_PATH, True)
zkclient.delete_node(
self._zk_path_pfx + self._SERVICE_CHAIN_VLAN_ALLOC_PATH, True)
# TODO(ethuleau): We keep the virtual network and security group ID
# allocation in schema and in the vnc API for one
# release overlap to prevent any upgrade issue. So the
# following code need to be remove in release (3.2 + 1)
self._vn_id_allocator = IndexAllocator(
zkclient, self._zk_path_pfx+self._VN_ID_ALLOC_PATH, self._VN_MAX_ID)
self._sg_id_allocator = IndexAllocator(
zkclient, self._zk_path_pfx+self._SECURITY_GROUP_ID_ALLOC_PATH,
self._SECURITY_GROUP_MAX_ID)
# 0 is not a valid sg id any more. So, if it was previously allocated,
# delete it and reserve it
if self._sg_id_allocator.read(0) != '__reserved__':
self._sg_id_allocator.delete(0)
self._sg_id_allocator.reserve(0, '__reserved__')
self._rt_allocator = IndexAllocator(
zkclient, self._zk_path_pfx+self._BGP_RTGT_ALLOC_PATH,
self._BGP_RTGT_MAX_ID, common.BGP_RTGT_MIN_ID)
self._bgpaas_port_allocator = IndexAllocator(
zkclient, self._zk_path_pfx + self._BGPAAS_PORT_ALLOC_PATH,
self._args.bgpaas_port_end - self._args.bgpaas_port_start,
self._args.bgpaas_port_start)
self._sc_vlan_allocator_dict = {}
self._upgrade_vlan_alloc_path()
# end __init__
def _upgrade_vlan_alloc_path(self):
# In earlier releases, allocation path for vlans did not end with '/'.
# This caused the allocated numbers to be just appended to the vm id
# instead of being created as a child of it. That caused the vlan ids
# to be leaked when process restarted. With that being fixed, we need
# to change any vlan ids that were allocated in prior releases to the
# new format.
vlan_alloc_path = (self._zk_path_prefix +
self._SERVICE_CHAIN_VLAN_ALLOC_PATH)
for item in self._zkclient.get_children(vlan_alloc_path):
try:
# in the old format, item was vm id followed by 10 digit vlan
# id allocated. Try to parse it to determine if it is still in
# old format
vm_id = uuid.UUID(item[:-10])
vlan_id = int(item[-10:])
except ValueError:
continue
sc_id = self._zkclient.read_node(vlan_alloc_path+item)
self._zkclient.delete_node(vlan_alloc_path+item)
self._zkclient.create_node(
vlan_alloc_path+item[:-10]+'/'+item[-10:], sc_id)
# end for item
def allocate_service_chain_vlan(self, service_vm, service_chain):
alloc_new = False
if service_vm not in self._sc_vlan_allocator_dict:
self._sc_vlan_allocator_dict[service_vm] = IndexAllocator(
self._zkclient,
(self._zk_path_prefix + self._SERVICE_CHAIN_VLAN_ALLOC_PATH +
service_vm + '/'),
self._SERVICE_CHAIN_MAX_VLAN)
vlan_ia = self._sc_vlan_allocator_dict[service_vm]
try:
vlan = int(self.get_one_col(self._SERVICE_CHAIN_CF,
service_vm, service_chain))
db_sc = vlan_ia.read(vlan)
if (db_sc is None) or (db_sc != service_chain):
alloc_new = True
except (KeyError, VncError, NoIdError):
# TODO(ethuleau): VncError is raised if more than one row was
# fetched from db with get_one_col method.
# Probably need to be cleaned
alloc_new = True
if alloc_new:
vlan = vlan_ia.alloc(service_chain)
self._service_chain_cf.insert(service_vm,
{service_chain: str(vlan)})
# Since vlan tag 0 is not valid, increment before returning
return vlan + 1
# end allocate_service_chain_vlan
def free_service_chain_vlan(self, service_vm, service_chain):
try:
vlan_ia = self._sc_vlan_allocator_dict[service_vm]
vlan = int(self.get_one_col(self._SERVICE_CHAIN_CF,
service_vm, service_chain))
self._service_chain_cf.remove(service_vm, [service_chain])
vlan_ia.delete(vlan)
if vlan_ia.empty():
del self._sc_vlan_allocator_dict[service_vm]
except (KeyError, VncError, NoIdError):
# TODO(ethuleau): VncError is raised if more than one row was
# fetched from db with get_one_col method.
# Probably need to be cleaned
pass
# end free_service_chain_vlan
def get_route_target(self, ri_fq_name):
try:
return int(self.get_one_col(self._RT_CF, ri_fq_name, 'rtgt_num'))
except (VncError, NoIdError):
# TODO(ethuleau): VncError is raised if more than one row was
# fetched from db with get_one_col method.
# Probably need to be cleaned
return 0
def alloc_route_target(self, ri_fq_name, zk_only=False):
alloc_new = False
if zk_only:
alloc_new = True
else:
rtgt_num = self.get_route_target(ri_fq_name)
if rtgt_num < common.BGP_RTGT_MIN_ID:
alloc_new = True
else:
rtgt_ri_fq_name_str = self._rt_allocator.read(rtgt_num)
if (rtgt_ri_fq_name_str != ri_fq_name):
alloc_new = True
if (alloc_new):
rtgt_num = self._rt_allocator.alloc(ri_fq_name)
self._rt_cf.insert(ri_fq_name, {'rtgt_num': str(rtgt_num)})
return rtgt_num
# end alloc_route_target
def free_route_target_by_number(self, rtgt):
self._rt_allocator.delete(rtgt)
def free_route_target(self, ri_fq_name):
try:
rtgt = self.get_route_target(ri_fq_name)
self._rt_cf.remove(ri_fq_name)
except NotFoundException:
pass
self._rt_allocator.delete(rtgt)
# end free_route_target
def get_service_chain_ip(self, sc_name):
addresses = self.get(self._SC_IP_CF, sc_name)
if addresses:
return addresses.get('ip_address'), addresses.get('ipv6_address')
else:
return None, None
def add_service_chain_ip(self, sc_name, ip, ipv6):
val = {}
if ip:
val['ip_address'] = ip
if ipv6:
val['ipv6_address'] = ipv6
self._sc_ip_cf.insert(sc_name, val)
def remove_service_chain_ip(self, sc_name):
try:
self._sc_ip_cf.remove(sc_name)
except NotFoundException:
pass
def list_service_chain_uuid(self):
try:
return self._service_chain_uuid_cf.get_range()
except NotFoundException:
return []
def add_service_chain_uuid(self, name, value):
self._service_chain_uuid_cf.insert(name, {'value': value})
def remove_service_chain_uuid(self, name):
try:
self._service_chain_uuid_cf.remove(name)
except NotFoundException:
pass
# TODO(ethuleau): We keep the virtual network and security group ID
# allocation in schema and in the vnc API for one
# release overlap to prevent any upgrade issue. So the
# following code need to be remove in release (3.2 + 1)
def get_sg_from_id(self, sg_id):
return self._sg_id_allocator.read(sg_id)
def alloc_sg_id(self, name):
return self._sg_id_allocator.alloc(name)
def free_sg_id(self, sg_id):
self._sg_id_allocator.delete(sg_id)
def get_vn_from_id(self, vn_id):
return self._vn_id_allocator.read(vn_id)
def alloc_vn_id(self, name):
return self._vn_id_allocator.alloc(name)
def free_vn_id(self, vn_id):
self._vn_id_allocator.delete(vn_id)
def get_bgpaas_port(self, port):
return self._bgpaas_allocator.read(port)
def alloc_bgpaas_port(self, name):
return self._bgpaas_port_allocator.alloc(name)
def free_bgpaas_port(self, port):
self._bgpaas_port_allocator.delete(port)
| |
"""
A directive for including a matplotlib plot in a Sphinx document.
By default, in HTML output, `plot` will include a .png file with a
link to a high-res .png and .pdf. In LaTeX output, it will include a
.pdf.
The source code for the plot may be included in one of three ways:
1. **A path to a source file** as the argument to the directive::
.. plot:: path/to/plot.py
When a path to a source file is given, the content of the
directive may optionally contain a caption for the plot::
.. plot:: path/to/plot.py
This is the caption for the plot
Additionally, one may specify the name of a function to call (with
no arguments) immediately after importing the module::
.. plot:: path/to/plot.py plot_function1
2. Included as **inline content** to the directive::
.. plot::
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
img = mpimg.imread('_static/stinkbug.png')
imgplot = plt.imshow(img)
3. Using **doctest** syntax::
.. plot::
A plotting example:
>>> import matplotlib.pyplot as plt
>>> plt.plot([1,2,3], [4,5,6])
Options
-------
The ``plot`` directive supports the following options:
format : {'python', 'doctest'}
Specify the format of the input
include-source : bool
Whether to display the source code. The default can be changed
using the `plot_include_source` variable in conf.py
encoding : str
If this source file is in a non-UTF8 or non-ASCII encoding,
the encoding must be specified using the `:encoding:` option.
The encoding will not be inferred using the ``-*- coding -*-``
metacomment.
context : bool or str
If provided, the code will be run in the context of all
previous plot directives for which the `:context:` option was
specified. This only applies to inline code plot directives,
not those run from files. If the ``:context: reset`` option is
specified, the context is reset for this and future plots, and
previous figures are closed prior to running the code.
``:context:close-figs`` keeps the context but closes previous figures
before running the code.
nofigs : bool
If specified, the code block will be run, but no figures will
be inserted. This is usually useful with the ``:context:``
option.
Additionally, this directive supports all of the options of the
`image` directive, except for `target` (since plot will add its own
target). These include `alt`, `height`, `width`, `scale`, `align` and
`class`.
Configuration options
---------------------
The plot directive has the following configuration options:
plot_include_source
Default value for the include-source option
plot_html_show_source_link
Whether to show a link to the source in HTML.
plot_pre_code
Code that should be executed before each plot.
plot_basedir
Base directory, to which ``plot::`` file names are relative
to. (If None or empty, file names are relative to the
directory where the file containing the directive is.)
plot_formats
File formats to generate. List of tuples or strings::
[(suffix, dpi), suffix, ...]
that determine the file format and the DPI. For entries whose
DPI was omitted, sensible defaults are chosen. When passing from
the command line through sphinx_build the list should be passed as
suffix:dpi,suffix:dpi, ....
plot_html_show_formats
Whether to show links to the files in HTML.
plot_rcparams
A dictionary containing any non-standard rcParams that should
be applied before each plot.
plot_apply_rcparams
By default, rcParams are applied when `context` option is not used in
a plot directive. This configuration option overrides this behavior
and applies rcParams before each plot.
plot_working_directory
By default, the working directory will be changed to the directory of
the example, so the code can get at its data files, if any. Also its
path will be added to `sys.path` so it can import any helper modules
sitting beside it. This configuration option can be used to specify
a central directory (also added to `sys.path`) where data files and
helper modules for all code are located.
plot_template
Provide a customized template for preparing restructured text.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import xrange
import sys, os, shutil, io, re, textwrap
from os.path import relpath
import traceback
import warnings
if not six.PY3:
import cStringIO
from docutils.parsers.rst import directives
from docutils.parsers.rst.directives.images import Image
align = Image.align
import sphinx
sphinx_version = sphinx.__version__.split(".")
# The split is necessary for sphinx beta versions where the string is
# '6b1'
sphinx_version = tuple([int(re.split('[^0-9]', x)[0])
for x in sphinx_version[:2]])
try:
# Sphinx depends on either Jinja or Jinja2
import jinja2
def format_template(template, **kw):
return jinja2.Template(template).render(**kw)
except ImportError:
import jinja
def format_template(template, **kw):
return jinja.from_string(template, **kw)
import matplotlib
import matplotlib.cbook as cbook
try:
with warnings.catch_warnings(record=True):
warnings.simplefilter("error", UserWarning)
matplotlib.use('Agg')
except UserWarning:
import matplotlib.pyplot as plt
plt.switch_backend("Agg")
else:
import matplotlib.pyplot as plt
from matplotlib import _pylab_helpers
__version__ = 2
#------------------------------------------------------------------------------
# Registration hook
#------------------------------------------------------------------------------
def plot_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
return run(arguments, content, options, state_machine, state, lineno)
plot_directive.__doc__ = __doc__
def _option_boolean(arg):
if not arg or not arg.strip():
# no argument given, assume used as a flag
return True
elif arg.strip().lower() in ('no', '0', 'false'):
return False
elif arg.strip().lower() in ('yes', '1', 'true'):
return True
else:
raise ValueError('"%s" unknown boolean' % arg)
def _option_context(arg):
if arg in [None, 'reset', 'close-figs']:
return arg
raise ValueError("argument should be None or 'reset' or 'close-figs'")
def _option_format(arg):
return directives.choice(arg, ('python', 'doctest'))
def _option_align(arg):
return directives.choice(arg, ("top", "middle", "bottom", "left", "center",
"right"))
def mark_plot_labels(app, document):
"""
To make plots referenceable, we need to move the reference from
the "htmlonly" (or "latexonly") node to the actual figure node
itself.
"""
for name, explicit in six.iteritems(document.nametypes):
if not explicit:
continue
labelid = document.nameids[name]
if labelid is None:
continue
node = document.ids[labelid]
if node.tagname in ('html_only', 'latex_only'):
for n in node:
if n.tagname == 'figure':
sectname = name
for c in n:
if c.tagname == 'caption':
sectname = c.astext()
break
node['ids'].remove(labelid)
node['names'].remove(name)
n['ids'].append(labelid)
n['names'].append(name)
document.settings.env.labels[name] = \
document.settings.env.docname, labelid, sectname
break
def setup(app):
setup.app = app
setup.config = app.config
setup.confdir = app.confdir
options = {'alt': directives.unchanged,
'height': directives.length_or_unitless,
'width': directives.length_or_percentage_or_unitless,
'scale': directives.nonnegative_int,
'align': _option_align,
'class': directives.class_option,
'include-source': _option_boolean,
'format': _option_format,
'context': _option_context,
'nofigs': directives.flag,
'encoding': directives.encoding
}
app.add_directive('plot', plot_directive, True, (0, 2, False), **options)
app.add_config_value('plot_pre_code', None, True)
app.add_config_value('plot_include_source', False, True)
app.add_config_value('plot_html_show_source_link', True, True)
app.add_config_value('plot_formats', ['png', 'hires.png', 'pdf'], True)
app.add_config_value('plot_basedir', None, True)
app.add_config_value('plot_html_show_formats', True, True)
app.add_config_value('plot_rcparams', {}, True)
app.add_config_value('plot_apply_rcparams', False, True)
app.add_config_value('plot_working_directory', None, True)
app.add_config_value('plot_template', None, True)
app.connect(str('doctree-read'), mark_plot_labels)
metadata = {'parallel_read_safe': True, 'parallel_write_safe': True}
return metadata
#------------------------------------------------------------------------------
# Doctest handling
#------------------------------------------------------------------------------
def contains_doctest(text):
try:
# check if it's valid Python as-is
compile(text, '<string>', 'exec')
return False
except SyntaxError:
pass
r = re.compile(r'^\s*>>>', re.M)
m = r.search(text)
return bool(m)
def unescape_doctest(text):
"""
Extract code from a piece of text, which contains either Python code
or doctests.
"""
if not contains_doctest(text):
return text
code = ""
for line in text.split("\n"):
m = re.match(r'^\s*(>>>|\.\.\.) (.*)$', line)
if m:
code += m.group(2) + "\n"
elif line.strip():
code += "# " + line.strip() + "\n"
else:
code += "\n"
return code
def split_code_at_show(text):
"""
Split code at plt.show()
"""
parts = []
is_doctest = contains_doctest(text)
part = []
for line in text.split("\n"):
if (not is_doctest and line.strip() == 'plt.show()') or \
(is_doctest and line.strip() == '>>> plt.show()'):
part.append(line)
parts.append("\n".join(part))
part = []
else:
part.append(line)
if "\n".join(part).strip():
parts.append("\n".join(part))
return parts
def remove_coding(text):
"""
Remove the coding comment, which six.exec_ doesn't like.
"""
sub_re = re.compile("^#\s*-\*-\s*coding:\s*.*-\*-$", flags=re.MULTILINE)
return sub_re.sub("", text)
#------------------------------------------------------------------------------
# Template
#------------------------------------------------------------------------------
TEMPLATE = """
{{ source_code }}
{{ only_html }}
{% if source_link or (html_show_formats and not multi_image) %}
(
{%- if source_link -%}
`Source code <{{ source_link }}>`__
{%- endif -%}
{%- if html_show_formats and not multi_image -%}
{%- for img in images -%}
{%- for fmt in img.formats -%}
{%- if source_link or not loop.first -%}, {% endif -%}
`{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
{%- endfor -%}
{%- endfor -%}
{%- endif -%}
)
{% endif %}
{% for img in images %}
.. figure:: {{ build_dir }}/{{ img.basename }}.{{ default_fmt }}
{% for option in options -%}
{{ option }}
{% endfor %}
{% if html_show_formats and multi_image -%}
(
{%- for fmt in img.formats -%}
{%- if not loop.first -%}, {% endif -%}
`{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
{%- endfor -%}
)
{%- endif -%}
{{ caption }}
{% endfor %}
{{ only_latex }}
{% for img in images %}
{% if 'pdf' in img.formats -%}
.. figure:: {{ build_dir }}/{{ img.basename }}.pdf
{% for option in options -%}
{{ option }}
{% endfor %}
{{ caption }}
{% endif -%}
{% endfor %}
{{ only_texinfo }}
{% for img in images %}
.. image:: {{ build_dir }}/{{ img.basename }}.png
{% for option in options -%}
{{ option }}
{% endfor %}
{% endfor %}
"""
exception_template = """
.. htmlonly::
[`source code <%(linkdir)s/%(basename)s.py>`__]
Exception occurred rendering plot.
"""
# the context of the plot for all directives specified with the
# :context: option
plot_context = dict()
class ImageFile(object):
def __init__(self, basename, dirname):
self.basename = basename
self.dirname = dirname
self.formats = []
def filename(self, format):
return os.path.join(self.dirname, "%s.%s" % (self.basename, format))
def filenames(self):
return [self.filename(fmt) for fmt in self.formats]
def out_of_date(original, derived):
"""
Returns True if derivative is out-of-date wrt original,
both of which are full file paths.
"""
return (not os.path.exists(derived) or
(os.path.exists(original) and
os.stat(derived).st_mtime < os.stat(original).st_mtime))
class PlotError(RuntimeError):
pass
def run_code(code, code_path, ns=None, function_name=None):
"""
Import a Python module from a path, and run the function given by
name, if function_name is not None.
"""
# Change the working directory to the directory of the example, so
# it can get at its data files, if any. Add its path to sys.path
# so it can import any helper modules sitting beside it.
if six.PY2:
pwd = os.getcwdu()
else:
pwd = os.getcwd()
old_sys_path = list(sys.path)
if setup.config.plot_working_directory is not None:
try:
os.chdir(setup.config.plot_working_directory)
except OSError as err:
raise OSError(str(err) + '\n`plot_working_directory` option in'
'Sphinx configuration file must be a valid '
'directory path')
except TypeError as err:
raise TypeError(str(err) + '\n`plot_working_directory` option in '
'Sphinx configuration file must be a string or '
'None')
sys.path.insert(0, setup.config.plot_working_directory)
elif code_path is not None:
dirname = os.path.abspath(os.path.dirname(code_path))
os.chdir(dirname)
sys.path.insert(0, dirname)
# Reset sys.argv
old_sys_argv = sys.argv
sys.argv = [code_path]
# Redirect stdout
stdout = sys.stdout
if six.PY3:
sys.stdout = io.StringIO()
else:
sys.stdout = cStringIO.StringIO()
# Assign a do-nothing print function to the namespace. There
# doesn't seem to be any other way to provide a way to (not) print
# that works correctly across Python 2 and 3.
def _dummy_print(*arg, **kwarg):
pass
try:
try:
code = unescape_doctest(code)
if ns is None:
ns = {}
if not ns:
if setup.config.plot_pre_code is None:
six.exec_(six.text_type("import numpy as np\n" +
"from matplotlib import pyplot as plt\n"), ns)
else:
six.exec_(six.text_type(setup.config.plot_pre_code), ns)
ns['print'] = _dummy_print
if "__main__" in code:
six.exec_("__name__ = '__main__'", ns)
code = remove_coding(code)
six.exec_(code, ns)
if function_name is not None:
six.exec_(function_name + "()", ns)
except (Exception, SystemExit) as err:
raise PlotError(traceback.format_exc())
finally:
os.chdir(pwd)
sys.argv = old_sys_argv
sys.path[:] = old_sys_path
sys.stdout = stdout
return ns
def clear_state(plot_rcparams, close=True):
if close:
plt.close('all')
matplotlib.rc_file_defaults()
matplotlib.rcParams.update(plot_rcparams)
def get_plot_formats(config):
default_dpi = {'png': 80, 'hires.png': 200, 'pdf': 200}
formats = []
plot_formats = config.plot_formats
if isinstance(plot_formats, six.string_types):
# String Sphinx < 1.3, Split on , to mimic
# Sphinx 1.3 and later. Sphinx 1.3 always
# returns a list.
plot_formats = plot_formats.split(',')
for fmt in plot_formats:
if isinstance(fmt, six.string_types):
if ':' in fmt:
suffix, dpi = fmt.split(':')
formats.append((str(suffix), int(dpi)))
else:
formats.append((fmt, default_dpi.get(fmt, 80)))
elif type(fmt) in (tuple, list) and len(fmt) == 2:
formats.append((str(fmt[0]), int(fmt[1])))
else:
raise PlotError('invalid image format "%r" in plot_formats' % fmt)
return formats
def render_figures(code, code_path, output_dir, output_base, context,
function_name, config, context_reset=False,
close_figs=False):
"""
Run a pyplot script and save the images in *output_dir*.
Save the images under *output_dir* with file names derived from
*output_base*
"""
formats = get_plot_formats(config)
# -- Try to determine if all images already exist
code_pieces = split_code_at_show(code)
# Look for single-figure output files first
all_exists = True
img = ImageFile(output_base, output_dir)
for format, dpi in formats:
if out_of_date(code_path, img.filename(format)):
all_exists = False
break
img.formats.append(format)
if all_exists:
return [(code, [img])]
# Then look for multi-figure output files
results = []
all_exists = True
for i, code_piece in enumerate(code_pieces):
images = []
for j in xrange(1000):
if len(code_pieces) > 1:
img = ImageFile('%s_%02d_%02d' % (output_base, i, j), output_dir)
else:
img = ImageFile('%s_%02d' % (output_base, j), output_dir)
for format, dpi in formats:
if out_of_date(code_path, img.filename(format)):
all_exists = False
break
img.formats.append(format)
# assume that if we have one, we have them all
if not all_exists:
all_exists = (j > 0)
break
images.append(img)
if not all_exists:
break
results.append((code_piece, images))
if all_exists:
return results
# We didn't find the files, so build them
results = []
if context:
ns = plot_context
else:
ns = {}
if context_reset:
clear_state(config.plot_rcparams)
plot_context.clear()
close_figs = not context or close_figs
for i, code_piece in enumerate(code_pieces):
if not context or config.plot_apply_rcparams:
clear_state(config.plot_rcparams, close_figs)
elif close_figs:
plt.close('all')
run_code(code_piece, code_path, ns, function_name)
images = []
fig_managers = _pylab_helpers.Gcf.get_all_fig_managers()
for j, figman in enumerate(fig_managers):
if len(fig_managers) == 1 and len(code_pieces) == 1:
img = ImageFile(output_base, output_dir)
elif len(code_pieces) == 1:
img = ImageFile("%s_%02d" % (output_base, j), output_dir)
else:
img = ImageFile("%s_%02d_%02d" % (output_base, i, j),
output_dir)
images.append(img)
for format, dpi in formats:
try:
figman.canvas.figure.savefig(img.filename(format), dpi=dpi)
except Exception as err:
raise PlotError(traceback.format_exc())
img.formats.append(format)
results.append((code_piece, images))
if not context or config.plot_apply_rcparams:
clear_state(config.plot_rcparams, close=not context)
return results
def run(arguments, content, options, state_machine, state, lineno):
document = state_machine.document
config = document.settings.env.config
nofigs = 'nofigs' in options
formats = get_plot_formats(config)
default_fmt = formats[0][0]
options.setdefault('include-source', config.plot_include_source)
keep_context = 'context' in options
context_opt = None if not keep_context else options['context']
rst_file = document.attributes['source']
rst_dir = os.path.dirname(rst_file)
if len(arguments):
if not config.plot_basedir:
source_file_name = os.path.join(setup.app.builder.srcdir,
directives.uri(arguments[0]))
else:
source_file_name = os.path.join(setup.confdir, config.plot_basedir,
directives.uri(arguments[0]))
# If there is content, it will be passed as a caption.
caption = '\n'.join(content)
# If the optional function name is provided, use it
if len(arguments) == 2:
function_name = arguments[1]
else:
function_name = None
with io.open(source_file_name, 'r', encoding='utf-8') as fd:
code = fd.read()
output_base = os.path.basename(source_file_name)
else:
source_file_name = rst_file
code = textwrap.dedent("\n".join(map(str, content)))
counter = document.attributes.get('_plot_counter', 0) + 1
document.attributes['_plot_counter'] = counter
base, ext = os.path.splitext(os.path.basename(source_file_name))
output_base = '%s-%d.py' % (base, counter)
function_name = None
caption = ''
base, source_ext = os.path.splitext(output_base)
if source_ext in ('.py', '.rst', '.txt'):
output_base = base
else:
source_ext = ''
# ensure that LaTeX includegraphics doesn't choke in foo.bar.pdf filenames
output_base = output_base.replace('.', '-')
# is it in doctest format?
is_doctest = contains_doctest(code)
if 'format' in options:
if options['format'] == 'python':
is_doctest = False
else:
is_doctest = True
# determine output directory name fragment
source_rel_name = relpath(source_file_name, setup.confdir)
source_rel_dir = os.path.dirname(source_rel_name)
while source_rel_dir.startswith(os.path.sep):
source_rel_dir = source_rel_dir[1:]
# build_dir: where to place output files (temporarily)
build_dir = os.path.join(os.path.dirname(setup.app.doctreedir),
'plot_directive',
source_rel_dir)
# get rid of .. in paths, also changes pathsep
# see note in Python docs for warning about symbolic links on Windows.
# need to compare source and dest paths at end
build_dir = os.path.normpath(build_dir)
if not os.path.exists(build_dir):
os.makedirs(build_dir)
# output_dir: final location in the builder's directory
dest_dir = os.path.abspath(os.path.join(setup.app.builder.outdir,
source_rel_dir))
if not os.path.exists(dest_dir):
os.makedirs(dest_dir) # no problem here for me, but just use built-ins
# how to link to files from the RST file
dest_dir_link = os.path.join(relpath(setup.confdir, rst_dir),
source_rel_dir).replace(os.path.sep, '/')
try:
build_dir_link = relpath(build_dir, rst_dir).replace(os.path.sep, '/')
except ValueError:
# on Windows, relpath raises ValueError when path and start are on
# different mounts/drives
build_dir_link = build_dir
source_link = dest_dir_link + '/' + output_base + source_ext
# make figures
try:
results = render_figures(code,
source_file_name,
build_dir,
output_base,
keep_context,
function_name,
config,
context_reset=context_opt == 'reset',
close_figs=context_opt == 'close-figs')
errors = []
except PlotError as err:
reporter = state.memo.reporter
sm = reporter.system_message(
2, "Exception occurred in plotting %s\n from %s:\n%s" % (output_base,
source_file_name, err),
line=lineno)
results = [(code, [])]
errors = [sm]
# Properly indent the caption
caption = '\n'.join(' ' + line.strip()
for line in caption.split('\n'))
# generate output restructuredtext
total_lines = []
for j, (code_piece, images) in enumerate(results):
if options['include-source']:
if is_doctest:
lines = ['']
lines += [row.rstrip() for row in code_piece.split('\n')]
else:
lines = ['.. code-block:: python', '']
lines += [' %s' % row.rstrip()
for row in code_piece.split('\n')]
source_code = "\n".join(lines)
else:
source_code = ""
if nofigs:
images = []
opts = [':%s: %s' % (key, val) for key, val in six.iteritems(options)
if key in ('alt', 'height', 'width', 'scale', 'align', 'class')]
only_html = ".. only:: html"
only_latex = ".. only:: latex"
only_texinfo = ".. only:: texinfo"
# Not-None src_link signals the need for a source link in the generated
# html
if j == 0 and config.plot_html_show_source_link:
src_link = source_link
else:
src_link = None
result = format_template(
config.plot_template or TEMPLATE,
default_fmt=default_fmt,
dest_dir=dest_dir_link,
build_dir=build_dir_link,
source_link=src_link,
multi_image=len(images) > 1,
only_html=only_html,
only_latex=only_latex,
only_texinfo=only_texinfo,
options=opts,
images=images,
source_code=source_code,
html_show_formats=config.plot_html_show_formats and len(images),
caption=caption)
total_lines.extend(result.split("\n"))
total_lines.extend("\n")
if total_lines:
state_machine.insert_input(total_lines, source=source_file_name)
# copy image files to builder's output directory, if necessary
if not os.path.exists(dest_dir):
cbook.mkdirs(dest_dir)
for code_piece, images in results:
for img in images:
for fn in img.filenames():
destimg = os.path.join(dest_dir, os.path.basename(fn))
if fn != destimg:
shutil.copyfile(fn, destimg)
# copy script (if necessary)
target_name = os.path.join(dest_dir, output_base + source_ext)
with io.open(target_name, 'w', encoding="utf-8") as f:
if source_file_name == rst_file:
code_escaped = unescape_doctest(code)
else:
code_escaped = code
f.write(code_escaped)
return errors
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras SavedModel deserialization."""
import os
import re
import types
from google.protobuf import message
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.keras import backend
from tensorflow.python.keras import regularizers
from tensorflow.python.keras.engine import input_spec
from tensorflow.python.keras.optimizer_v2 import optimizer_v2
from tensorflow.python.keras.protobuf import saved_metadata_pb2
from tensorflow.python.keras.protobuf import versions_pb2
from tensorflow.python.keras.saving import saving_utils
from tensorflow.python.keras.saving.saved_model import constants
from tensorflow.python.keras.saving.saved_model import json_utils
from tensorflow.python.keras.saving.saved_model import utils
from tensorflow.python.keras.saving.saved_model.serialized_attributes import CommonEndpoints
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.keras.utils import metrics_utils
from tensorflow.python.keras.utils.generic_utils import LazyLoader
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import load as tf_load
from tensorflow.python.saved_model import loader_impl
from tensorflow.python.saved_model import nested_structure_coder
from tensorflow.python.saved_model import revived_types
from tensorflow.python.training.tracking import base as trackable
from tensorflow.python.training.tracking import data_structures
from tensorflow.python.util import compat
from tensorflow.python.util import nest
# To avoid circular dependencies between keras/engine and keras/saving,
# code in keras/saving must delay imports.
# TODO(b/134426265): Switch back to single-quotes to match the rest of the file
# once the issue with copybara is fixed.
# pylint:disable=g-inconsistent-quotes
models_lib = LazyLoader("models_lib", globals(),
"tensorflow.python.keras.models")
base_layer = LazyLoader(
"base_layer", globals(),
"tensorflow.python.keras.engine.base_layer")
layers_module = LazyLoader(
"layers_module", globals(),
"tensorflow.python.keras.layers")
input_layer = LazyLoader(
"input_layer", globals(),
"tensorflow.python.keras.engine.input_layer")
functional_lib = LazyLoader(
"functional_lib", globals(),
"tensorflow.python.keras.engine.functional")
training_lib = LazyLoader(
"training_lib", globals(),
"tensorflow.python.keras.engine.training")
training_lib_v1 = LazyLoader(
"training_lib_v1", globals(),
"tensorflow.python.keras.engine.training_v1")
metrics = LazyLoader("metrics", globals(),
"tensorflow.python.keras.metrics")
recurrent = LazyLoader(
"recurrent", globals(),
"tensorflow.python.keras.layers.recurrent")
# pylint:enable=g-inconsistent-quotes
PUBLIC_ATTRIBUTES = CommonEndpoints.all_functions.union(
CommonEndpoints.all_checkpointable_objects)
PUBLIC_ATTRIBUTES.add(constants.KERAS_ATTR)
def load(path, compile=True, options=None): # pylint: disable=redefined-builtin
"""Loads Keras objects from a SavedModel.
Any Keras layer or model saved to the SavedModel will be loaded back
as Keras objects. Other objects are loaded as regular trackable objects (same
as `tf.saved_model.load`).
Currently, Keras saving/loading only retains the Keras object's weights,
losses, and call function.
The loaded model can be re-compiled, but the original optimizer, compiled loss
functions, and metrics are not retained. This is temporary, and `model.save`
will soon be able to serialize compiled models.
Args:
path: Path to SavedModel.
compile: If true, compile the model after loading it.
options: Optional `tf.saved_model.LoadOptions` object that specifies
options for loading from SavedModel.
Returns:
Object loaded from SavedModel.
"""
# TODO(kathywu): Add saving/loading of optimizer, compiled losses and metrics.
# TODO(kathywu): Add code to load from objects that contain all endpoints
# Look for metadata file or parse the SavedModel
metadata = saved_metadata_pb2.SavedMetadata()
meta_graph_def = loader_impl.parse_saved_model(path).meta_graphs[0]
object_graph_def = meta_graph_def.object_graph_def
path_to_metadata_pb = os.path.join(path, constants.SAVED_METADATA_PATH)
if gfile.Exists(path_to_metadata_pb):
try:
with gfile.GFile(path_to_metadata_pb, 'rb') as f:
file_content = f.read()
metadata.ParseFromString(file_content)
except message.DecodeError as e:
raise IOError('Cannot parse keras metadata {}: {}.'
.format(path_to_metadata_pb, str(e)))
else:
logging.warning('SavedModel saved prior to TF 2.5 detected when loading '
'Keras model. Please ensure that you are saving the model '
'with model.save() or tf.keras.models.save_model(), *NOT* '
'tf.saved_model.save(). To confirm, there should be a file '
'named "keras_metadata.pb" in the SavedModel directory.')
_read_legacy_metadata(object_graph_def, metadata)
if not metadata.nodes:
# When there are no Keras objects, return the results from the core loader
return tf_load.load(path, options=options)
# Recreate layers and metrics using the info stored in the metadata.
keras_loader = KerasObjectLoader(metadata, object_graph_def)
keras_loader.load_layers(compile=compile)
# Generate a dictionary of all loaded nodes.
nodes_to_load = {'root': None}
for node_id, loaded_node in keras_loader.loaded_nodes.items():
nodes_to_load[keras_loader.get_path(node_id)] = loaded_node
loaded = tf_load.load_partial(path, nodes_to_load, options=options)
# Finalize the loaded layers and remove the extra tracked dependencies.
keras_loader.finalize_objects()
keras_loader.del_tracking()
model = loaded['root']
# pylint: disable=protected-access
if isinstance(model, training_lib.Model) and compile:
# TODO(kathywu): Use compiled objects from SavedModel, instead of
# creating new objects from the training config.
training_config = model._serialized_attributes['metadata'].get(
'training_config', None)
if training_config is not None:
model.compile(**saving_utils.compile_args_from_training_config(
training_config), from_serialized=True)
saving_utils.try_build_compiled_arguments(model)
if isinstance(model.optimizer, optimizer_v2.OptimizerV2):
if (model.optimizer.get_slot_names()):
logging.warning('Your optimizer uses slots. '
'Slots cannot be restored from saved_model, '
'as a result, your model is starting with '
'a new initialized optimizer.')
else:
logging.warning('No training configuration found in save file, so the '
'model was *not* compiled. Compile it manually.')
# pylint: enable=protected-access
# Force variables and resources to initialize.
if not context.executing_eagerly():
sess = backend.get_session() # Variables are initialized by this call.
sess.run(ops.get_collection(ops.GraphKeys.TABLE_INITIALIZERS))
return model
def _read_legacy_metadata(object_graph_def, metadata):
"""Builds a KerasMetadata proto from the SavedModel ObjectGraphDef."""
# Older SavedModels store the metadata directly in the proto instead of the
# separate pb file.
node_paths = _generate_object_paths(object_graph_def)
for node_id, proto in enumerate(object_graph_def.nodes):
if (proto.WhichOneof('kind') == 'user_object' and
proto.user_object.identifier in constants.KERAS_OBJECT_IDENTIFIERS):
if not proto.user_object.metadata:
raise ValueError('Unable to create a Keras model from this SavedModel. '
'This SavedModel was created with '
'`tf.saved_model.save`, and lacks the Keras metadata.'
'Please save your Keras model by calling `model.save`'
'or `tf.keras.models.save_model`.')
metadata.nodes.add(
node_id=node_id,
node_path=node_paths[node_id],
version=versions_pb2.VersionDef(
producer=1, min_consumer=1, bad_consumers=[]),
identifier=proto.user_object.identifier,
metadata=proto.user_object.metadata)
def _generate_object_paths(object_graph_def):
"""Traverses through an ObjectGraphDef and builds a map of all node paths."""
paths = {0: 'root'}
nodes_to_visit = [0]
while nodes_to_visit:
current_node = nodes_to_visit.pop()
current_path = paths[current_node]
for reference in object_graph_def.nodes[current_node].children:
if reference.node_id in paths:
continue
paths[reference.node_id] = '{}.{}'.format(current_path,
reference.local_name)
nodes_to_visit.append(reference.node_id)
return paths
def _is_graph_network(layer):
"""Determines whether the layer is a graph network."""
# pylint: disable=protected-access
if isinstance(layer, RevivedNetwork):
return False
elif isinstance(layer, functional_lib.Functional):
return (layer._is_graph_network or
isinstance(layer, models_lib.Sequential))
return False
class KerasObjectLoader(object):
"""Loader that recreates Keras objects (e.g. layers, models).
Layers and models are revived from either the config or SavedModel following
these rules:
1. If object is a graph network (i.e. Sequential or Functional) then it will
be initialized using the structure from the config only after the children
layers have been created. Graph networks must be initialized with inputs
and outputs, so all child layers must be created beforehand.
2. If object's config exists and the class can be found, then revive from
config.
3. Object may have already been created if its parent was revived from config.
In this case, do nothing.
4. If nothing of the above applies, compose the various artifacts from the
SavedModel to create a subclassed layer or model. At this time, custom
metrics are not supported.
"""
def __init__(self, metadata, object_graph_def):
self._metadata = {x.node_id: x for x in metadata.nodes}
self._proto = object_graph_def
self._node_paths = {node_data.node_id: node_data.node_path
for node_data in metadata.nodes}
self.loaded_nodes = {} # Maps node path -> loaded node
# Store all node ids that have already been traversed when tracking nodes
# that were recreated from the config.
self._traversed_nodes_from_config = set()
# Maps model id -> (blank model obj, list of child layer or their node ids)
# This tracks all layers in functional and sequential models. These models
# are only reconstructed after all of their child layers have been created.
self.model_layer_dependencies = {}
self._models_to_reconstruct = []
def del_tracking(self):
"""Removes tracked references that are only used when loading the model."""
# Now that the node object has been fully loaded, and the checkpoint has
# been restored, the object no longer needs to track objects added from
# SerializedAttributes. (Note that saving a training checkpoint still
# functions correctly, because layers and variables are tracked separately
# by the Layer object.)
# TODO(kathywu): Instead of outright deleting these nodes (which would
# make restoring from a different checkpoint tricky), mark them as extra
# dependencies that are OK to overwrite.
for node in self.loaded_nodes.values():
node = node[0]
if not isinstance(node, base_layer.Layer):
# Loaded nodes can contain other trackable objects created when
# loading layers from the config, such as variables.
continue
for name in PUBLIC_ATTRIBUTES:
node._delete_tracking(name) # pylint: disable=protected-access
if isinstance(node, functional_lib.Functional):
# Delete the temporary layer dependencies, which were used to restore
# the checkpointed values. When the model is live, the user can delete
# or add layers to the model at any time, so these layer dependencies
# may be obsolete.
dependencies = list(node._self_unconditional_dependency_names) # pylint: disable=protected-access
for name in dependencies:
if re.match(r'^layer(_with_weights)?-[\d+]', name) is not None:
node._delete_tracking(name) # pylint: disable=protected-access
def _add_children_recreated_from_config(self, obj, proto, node_id):
"""Recursively records objects recreated from config."""
# pylint: disable=protected-access
if node_id in self._traversed_nodes_from_config:
return
parent_path = self._node_paths[node_id]
self._traversed_nodes_from_config.add(node_id)
obj._maybe_initialize_trackable()
if isinstance(obj, base_layer.Layer) and not obj.built:
metadata = json_utils.decode(self._metadata[node_id].metadata)
self._try_build_layer(obj, node_id, metadata.get('build_input_shape'))
# Create list of all possible children
children = []
# Look for direct children
for reference in proto.children:
obj_child = obj._lookup_dependency(reference.local_name)
children.append((obj_child, reference.node_id, reference.local_name))
# Add metrics that may have been added to the layer._metrics list.
# This is stored in the SavedModel as layer.keras_api.layer_metrics in
# SavedModels created after Tf 2.2.
metric_list_node_id = self._search_for_child_node(
node_id, [constants.KERAS_ATTR, 'layer_metrics'])
if metric_list_node_id is not None and hasattr(obj, '_metrics'):
obj_metrics = {m.name: m for m in obj._metrics}
for reference in self._proto.nodes[metric_list_node_id].children:
metric = obj_metrics.get(reference.local_name)
if metric is not None:
metric_path = '{}.layer_metrics.{}'.format(constants.KERAS_ATTR,
reference.local_name)
children.append((metric, reference.node_id, metric_path))
for (obj_child, child_id, child_name) in children:
child_proto = self._proto.nodes[child_id]
if not isinstance(obj_child, trackable.Trackable):
continue
if (child_proto.user_object.identifier in
revived_types.registered_identifiers()):
setter = revived_types.get_setter(child_proto.user_object)
elif obj_child._object_identifier in constants.KERAS_OBJECT_IDENTIFIERS:
setter = _revive_setter
else:
setter = setattr
# pylint: enable=protected-access
if child_id in self.loaded_nodes:
if self.loaded_nodes[child_id][0] is not obj_child:
# This means that the same trackable object is referenced by two
# different objects that were recreated from the config.
logging.warning(
'Looks like there is an object (perhaps variable or '
'layer) that is shared between different layers/models. '
'This may cause issues when restoring the variable '
'values. Object: {}'.format(obj_child))
continue
# Overwrite variable names with the ones saved in the SavedModel.
if (child_proto.WhichOneof('kind') == 'variable' and
child_proto.variable.name):
obj_child._handle_name = child_proto.variable.name + ':0' # pylint: disable=protected-access
if isinstance(obj_child, data_structures.TrackableDataStructure):
setter = lambda *args: None
child_path = '{}.{}'.format(parent_path, child_name)
self._node_paths[child_id] = child_path
self._add_children_recreated_from_config(
obj_child, child_proto, child_id)
self.loaded_nodes[child_id] = obj_child, setter
def load_layers(self, compile=True): # pylint: disable=redefined-builtin
"""Load all layer nodes from the metadata."""
# Load metrics after models and layers, since it's likely that models
# and layers will create the metric when initialized (this avoids wasting
# time by creating objects multiple times).
metric_list = []
for node_metadata in self._metadata.values():
if node_metadata.identifier == constants.METRIC_IDENTIFIER:
metric_list.append(node_metadata)
continue
self.loaded_nodes[node_metadata.node_id] = self._load_layer(
node_metadata.node_id, node_metadata.identifier,
node_metadata.metadata)
for node_metadata in metric_list:
try:
self.loaded_nodes[node_metadata.node_id] = self._load_layer(
node_metadata.node_id, node_metadata.identifier,
node_metadata.metadata)
except ValueError:
# Metrics are only needed when the model is compiled later. We ignore
# errors when trying to load custom metrics when `compile=False` until
# custom metrics are serialized properly (b/135550038).
if compile:
raise
logging.warning('Unable to restore custom metric. Please ensure that '
'the layer implements `get_config` and `from_config` '
'when saving. In addition, please use the '
'`custom_objects` arg when calling `load_model()`.')
def _load_layer(self, node_id, identifier, metadata):
"""Load a single layer from a SavedUserObject proto."""
metadata = json_utils.decode(metadata)
# If node was already created
if node_id in self.loaded_nodes:
node, setter = self.loaded_nodes[node_id]
# Revive setter requires the object to have a `_serialized_attributes`
# property. Add it here.
_maybe_add_serialized_attributes(node, metadata)
config = metadata.get('config')
if _is_graph_network(node) and generic_utils.validate_config(config):
child_nodes = self._get_child_layer_node_ids(node_id)
self.model_layer_dependencies[node_id] = (node, child_nodes)
if not child_nodes:
self._models_to_reconstruct.append(node_id)
return node, setter
# Detect whether this object can be revived from the config. If not, then
# revive from the SavedModel instead.
obj, setter = self._revive_from_config(identifier, metadata, node_id)
if obj is None:
obj, setter = revive_custom_object(identifier, metadata)
# Add an attribute that stores the extra functions/objects saved in the
# SavedModel. Most of these functions/objects are ignored, but some are
# used later in the loading process (e.g. the list of regularization
# losses, or the training config of compiled models).
_maybe_add_serialized_attributes(obj, metadata)
return obj, setter
def _revive_from_config(self, identifier, metadata, node_id):
"""Revives a layer/model from config, or returns None."""
if identifier == constants.METRIC_IDENTIFIER:
obj = self._revive_metric_from_config(metadata)
else:
obj = (
self._revive_graph_network(identifier, metadata, node_id) or
self._revive_layer_or_model_from_config(metadata, node_id))
if obj is None:
return None, None
setter = self._config_node_setter(_revive_setter)
self._add_children_recreated_from_config(
obj, self._proto.nodes[node_id], node_id)
return obj, setter
def _revive_graph_network(self, identifier, metadata, node_id):
"""Revives a graph network from config."""
# Determine whether the metadata contains information for reviving a
# functional or Sequential model.
config = metadata.get('config')
if not generic_utils.validate_config(config):
return None
class_name = compat.as_str(metadata['class_name'])
if generic_utils.get_registered_object(class_name) is not None:
return None
model_is_functional_or_sequential = (
metadata.get('is_graph_network', False) or
class_name == 'Sequential' or
class_name == 'Functional')
if not model_is_functional_or_sequential:
return None
# Revive functional and sequential models as blank model objects for now (
# must be initialized to enable setattr tracking and attribute caching).
# Reconstruction of the network is deferred until all of the model's layers
# have been revived.
if class_name == 'Sequential':
model = models_lib.Sequential(name=config['name'])
# The model is a custom Sequential model.
elif identifier == constants.SEQUENTIAL_IDENTIFIER:
# Uses the custom class name, since the config does not have one.
model = models_lib.Sequential(name=class_name)
else:
model = models_lib.Functional(
inputs=[], outputs=[], name=config['name'])
# Record this model and its layers. This will later be used to reconstruct
# the model.
layers = self._get_child_layer_node_ids(node_id)
self.model_layer_dependencies[node_id] = (model, layers)
if not layers:
self._models_to_reconstruct.append(node_id)
return model
def _revive_layer_or_model_from_config(self, metadata, node_id):
"""Revives a layer/custom model from config; returns None if infeasible."""
# Check that the following requirements are met for reviving from config:
# 1. Object can be deserialized from config.
# 2. If the object needs to be built, then the build input shape can be
# found.
class_name = metadata.get('class_name')
config = metadata.get('config')
shared_object_id = metadata.get('shared_object_id')
must_restore_from_config = metadata.get('must_restore_from_config')
if not generic_utils.validate_config(config):
return None
try:
obj = layers_module.deserialize(
generic_utils.serialize_keras_class_and_config(
class_name, config, shared_object_id=shared_object_id))
except ValueError:
if must_restore_from_config:
raise RuntimeError(
'Unable to restore a layer of class {cls}. Layers of '
'class {cls} require that the class be provided to '
'the model loading code, either by registering the '
'class using @keras.utils.register_keras_serializable '
'on the class def and including that file in your '
'program, or by passing the class in a '
'keras.utils.CustomObjectScope that wraps this load '
'call.'.format(cls=class_name))
else:
return None
# Use the dtype, name, and trainable status. Often times these are not
# specified in custom configs, so retrieve their values from the metadata.
# pylint: disable=protected-access
obj._name = metadata['name']
if metadata.get('trainable') is not None:
obj.trainable = metadata['trainable']
if metadata.get('dtype') is not None:
obj._set_dtype_policy(metadata['dtype'])
if metadata.get('stateful') is not None:
obj.stateful = metadata['stateful']
# Restore model save spec for subclassed models. (layers do not store a
# SaveSpec)
if isinstance(obj, training_lib.Model):
save_spec = metadata.get('save_spec')
if save_spec is not None:
obj._set_save_spec(save_spec)
# pylint: enable=protected-access
build_input_shape = metadata.get('build_input_shape')
built = self._try_build_layer(obj, node_id, build_input_shape)
if not built:
# If the layer cannot be built, revive a custom layer instead.
return None
return obj
def _revive_metric_from_config(self, metadata):
"""Revives a metric object using the config saved in the metadata."""
class_name = compat.as_str(metadata['class_name'])
config = metadata.get('config')
if not generic_utils.validate_config(config):
return None
try:
obj = metrics.deserialize(
generic_utils.serialize_keras_class_and_config(class_name, config))
except ValueError:
return None
build_input_shape = metadata.get('build_input_shape')
if build_input_shape is not None and hasattr(obj, '_build'):
obj._build(build_input_shape) # pylint: disable=protected-access
return obj
def _try_build_layer(self, obj, node_id, build_input_shape):
"""Attempts to build the layer."""
if obj.built or hasattr(obj.build, '_is_default'):
obj.built = True
return True
if build_input_shape is None:
build_input_shape = self._infer_inputs(node_id, convert_to_shapes=True)
if build_input_shape is not None:
obj.build(build_input_shape)
base_layer.Layer.build(obj, build_input_shape)
return True
return False
def _load_edges(self):
"""Add edges for all nodes that are not waiting on initialization."""
for node_id, proto in enumerate(self._proto.nodes):
if node_id not in self.model_layer_dependencies:
self._add_object_graph_edges(proto, node_id)
def get_path(self, node_id):
return self._node_paths[node_id]
def finalize_objects(self):
"""Finish setting up Keras objects.
This function is executed after all objects and functions have been created.
Call functions and losses are attached to each layer, and once all layers
have been fully set up, graph networks are initialized.
Subclassed models that are revived from the SavedModel are treated like
layers, and have their call/loss functions attached here.
"""
# Finish setting up layers and subclassed models. This step attaches call
# functions and losses to each object, and sets model inputs/outputs.
layers_revived_from_config = []
layers_revived_from_saved_model = []
for node_id, (node, _) in self.loaded_nodes.items():
if (not isinstance(node, base_layer.Layer) or
# Don't finalize models until all layers have finished loading.
node_id in self.model_layer_dependencies):
continue
self._unblock_model_reconstruction(node_id, node)
if isinstance(node, input_layer.InputLayer):
continue
elif isinstance(node, metrics.Metric):
continue
if isinstance(node, (RevivedLayer, RevivedInputLayer)):
layers_revived_from_saved_model.append(node)
else:
layers_revived_from_config.append(node)
_finalize_saved_model_layers(layers_revived_from_saved_model)
_finalize_config_layers(layers_revived_from_config)
# Initialize graph networks, now that layer dependencies have been resolved.
self._reconstruct_all_models()
def _unblock_model_reconstruction(self, layer_id, layer):
"""Removes layer from blocking model reconstruction."""
for model_id, v in self.model_layer_dependencies.items():
_, layers = v
if layer_id not in layers:
continue
layers[layers.index(layer_id)] = layer
if all(isinstance(x, base_layer.Layer) for x in layers):
self._models_to_reconstruct.append(model_id)
def _reconstruct_all_models(self):
"""Reconstructs the network structure of all models."""
all_initialized_models = set()
while self._models_to_reconstruct:
model_id = self._models_to_reconstruct.pop(0)
all_initialized_models.add(model_id)
model, layers = self.model_layer_dependencies[model_id]
self._reconstruct_model(model_id, model, layers)
_finalize_config_layers([model])
if all_initialized_models != set(self.model_layer_dependencies.keys()):
# This should not happen.
uninitialized_model_ids = (
set(self.model_layer_dependencies.keys()) - all_initialized_models)
uninitialized_model_names = [
self.model_layer_dependencies[model_id][0].name
for model_id in uninitialized_model_ids]
raise ValueError('Error when loading from SavedModel -- the following '
'models could not be initialized: {}'
.format(uninitialized_model_names))
def _reconstruct_model(self, model_id, model, layers):
"""Reconstructs the network structure."""
config = json_utils.decode(self._metadata[model_id].metadata)['config']
# Set up model inputs
if model.inputs:
# Inputs may already be created if the model is instantiated in another
# object's __init__.
pass
elif isinstance(model, models_lib.Sequential):
if not layers or not isinstance(layers[0], input_layer.InputLayer):
if config['layers'][0]['class_name'] == 'InputLayer':
layers.insert(0, input_layer.InputLayer.from_config(
config['layers'][0]['config']))
elif 'batch_input_shape' in config['layers'][0]['config']:
batch_input_shape = config['layers'][0]['config']['batch_input_shape']
layers.insert(0, input_layer.InputLayer(
input_shape=batch_input_shape[1:],
batch_size=batch_input_shape[0],
dtype=layers[0].dtype,
name=layers[0].name + '_input'))
model.__init__(layers, name=config['name'])
if not model.inputs:
first_layer = self._get_child_layer_node_ids(model_id)[0]
input_specs = self._infer_inputs(first_layer)
input_shapes = self._infer_inputs(first_layer, convert_to_shapes=True)
model._set_inputs(input_specs) # pylint: disable=protected-access
if not model.built and not isinstance(input_specs, dict):
model.build(input_shapes)
else: # Reconstruct functional model
(inputs, outputs,
created_layers) = functional_lib.reconstruct_from_config(
config, created_layers={layer.name: layer for layer in layers})
model.__init__(inputs, outputs, name=config['name'])
functional_lib.connect_ancillary_layers(model, created_layers)
# Set model dtype.
_set_network_attributes_from_metadata(model)
# Unblock models that are dependent on this model.
self._unblock_model_reconstruction(model_id, model)
def _get_child_layer_node_ids(self, node_id):
"""Returns the node ids of each layer in a Sequential/Functional model."""
# Sequential and Functional track layers with names following the format
# "layer-N". Use this to generate the list of layers.
num_layers = 0
child_layers = {}
pattern = re.compile('layer-(\\d+)')
for child in self._proto.nodes[node_id].children:
m = pattern.match(child.local_name)
if m is None:
continue
layer_n = int(m.group(1))
num_layers = max(layer_n + 1, num_layers)
child_layers[layer_n] = child.node_id
ordered = []
for n in range(num_layers):
child = child_layers.get(n)
if child is None:
break
ordered.append(child)
return ordered
def _search_for_child_node(self, parent_id, path_to_child):
"""Returns node id of child node.
A helper method for traversing the object graph proto.
As an example, say that the object graph proto in the SavedModel contains an
object with the following child and grandchild attributes:
`parent.child_a.child_b`
This method can be used to retrieve the node id of `child_b` using the
parent's node id by calling:
`_search_for_child_node(parent_id, ['child_a', 'child_b'])`.
Args:
parent_id: node id of parent node
path_to_child: list of children names.
Returns:
node_id of child, or None if child isn't found.
"""
if not path_to_child:
return parent_id
for child in self._proto.nodes[parent_id].children:
if child.local_name == path_to_child[0]:
return self._search_for_child_node(child.node_id, path_to_child[1:])
return None
def _infer_inputs(self, layer_node_id, convert_to_shapes=False):
"""Infers input shape of layer from SavedModel functions."""
call_fn_id = self._search_for_child_node(
layer_node_id, ['call_and_return_all_conditional_losses'])
if call_fn_id is None:
return None
concrete_functions = (
self._proto.nodes[call_fn_id].function.concrete_functions)
if not concrete_functions:
return None
call_fn_name = concrete_functions[0]
call_fn_proto = self._proto.concrete_functions[call_fn_name]
structured_input_signature = nested_structure_coder.decode_proto(
call_fn_proto.canonicalized_input_signature)
inputs = structured_input_signature[0][0]
if convert_to_shapes:
return nest.map_structure(lambda spec: spec.shape, inputs)
else:
return inputs
def _config_node_setter(self, setter):
"""Creates edges for nodes that are recreated from config."""
def setattr_wrapper(obj, name, value):
# Avoid overwriting attributes of objects recreated from the config.
if obj._lookup_dependency(name) is None: # pylint: disable=protected-access
setter(obj, name, value)
return setattr_wrapper
def _finalize_saved_model_layers(layers):
"""Runs the final steps of loading Keras Layers from SavedModel."""
# pylint: disable=protected-access
# 1. Set up call functions for all layers initialized from the SavedModel (
# and not the config)
for layer in layers:
layer.built = True
layer_call = getattr(_get_keras_attr(layer),
'call_and_return_conditional_losses', None)
if layer_call and layer_call.concrete_functions:
layer.call = utils.use_wrapped_call(
layer, layer_call, return_method=True)
expects_training_arg = layer._serialized_attributes['metadata'][
'expects_training_arg']
if 'training' in layer_call.function_spec.arg_names:
# This could change the value of `expects_training_arg` if this layer
# doesn't expect a training arg, but has a child layer that does.
expects_training_arg = True
layer._init_call_fn_args(expects_training_arg)
else:
layer.call = types.MethodType(
_unable_to_call_layer_due_to_serialization_issue, layer)
for layer in layers:
# 2. Set model inputs and outputs.
if isinstance(layer, RevivedNetwork):
_set_network_attributes_from_metadata(layer)
if hasattr(_get_keras_attr(layer), 'call_and_return_conditional_losses'):
call_fn = _get_keras_attr(layer).call_and_return_conditional_losses
if not call_fn.concrete_functions:
continue
if call_fn.input_signature is None:
inputs = infer_inputs_from_restored_call_function(call_fn)
else:
inputs = call_fn.input_signature[0]
layer._set_inputs(inputs) # pylint: disable=protected-access
# 3. Add losses that aren't generated by the layer.call function.
_restore_layer_unconditional_losses(layer)
_restore_layer_activation_loss(layer)
# 4. Restore metrics list
_restore_layer_metrics(layer)
# pylint: enable=protected-access
def _unable_to_call_layer_due_to_serialization_issue(
layer, *unused_args, **unused_kwargs):
"""Replaces the `layer.call` if the layer was not fully serialized.
Keras Model/Layer serialization is relatively relaxed because SavedModels
are not always loaded back as keras models. Thus, when there is an issue
tracing a non-signature function, a warning is logged instead of raising an
error. This results in a SavedModel where the model's call function is saved,
but the internal layer call functions are not.
When deserialized with `tf.keras.models.load_model`, the internal layers
which do not have serialized call functions should raise an error when called.
Args:
layer: Layer without the serialized call function.
Raises:
ValueError
"""
raise ValueError(
'Cannot call custom layer {} of type {}, because the call function was '
'not serialized to the SavedModel.'
'Please try one of the following methods to fix this issue:'
'\n\n(1) Implement `get_config` and `from_config` in the layer/model '
'class, and pass the object to the `custom_objects` argument when '
'loading the model. For more details, see: '
'https://www.tensorflow.org/guide/keras/save_and_serialize'
'\n\n(2) Ensure that the subclassed model or layer overwrites `call` '
'and not `__call__`. The input shape and dtype will be automatically '
'recorded when the object is called, and used when saving. To manually '
'specify the input shape/dtype, decorate the call function with '
'`@tf.function(input_signature=...)`.'.format(layer.name, type(layer)))
def _finalize_config_layers(layers):
"""Runs the final steps of loading Keras Layers from config."""
for layer in layers:
# It is assumed that layers define their unconditional losses after being
# recreated from the config and built. The exceptions to this
# are Functional and Sequential models, which only store conditional losses
# (losses dependent on the inputs) in the config. Unconditional losses like
# weight regularization must be revived from the SavedModel.
if _is_graph_network(layer):
_restore_layer_unconditional_losses(layer)
# Some layers, like Dense, record their activation loss function in the
# config. However, not all layers do this, so the activation loss may be
# missing when restored from the config/hdf5.
# TODO(kathywu): Investigate ways to improve the config to ensure consistent
# loading behavior between HDF5 and SavedModel.
_restore_layer_activation_loss(layer)
# Restore metrics list.
_restore_layer_metrics(layer)
# Restore RNN layer states.
if (isinstance(layer, recurrent.RNN) and
layer.stateful and
hasattr(_get_keras_attr(layer), 'states')):
layer.states = getattr(_get_keras_attr(layer), 'states', None)
for variable in nest.flatten(layer.states):
backend.track_variable(variable)
# Perform any layer defined finalization of the layer state.
layer.finalize_state()
def _finalize_metric(metric):
metric.update_state = types.MethodType(metrics_utils.update_state_wrapper(
metric.keras_api.update_state), metric)
metric.result = metric.keras_api.result
def _restore_layer_unconditional_losses(layer):
"""Restore unconditional losses from SavedModel."""
if hasattr(_get_keras_attr(layer), 'layer_regularization_losses'):
losses = getattr(_get_keras_attr(layer), 'layer_regularization_losses', [])
else:
# Some earlier SavedModels may not have layer_regularization_losses
# serialized separately. Fall back to using the regularization_losses
# list if it does not exist.
losses = layer._serialized_attributes.get('regularization_losses', []) # pylint: disable=protected-access
for loss in losses:
layer.add_loss(loss)
def _restore_layer_activation_loss(layer):
"""Restore actiation loss from SavedModel."""
# Use wrapped activity regularizer function if the layer's activity
# regularizer wasn't created during initialization.
activity_regularizer = getattr(_get_keras_attr(layer),
'activity_regularizer_fn', None)
if activity_regularizer and not layer.activity_regularizer:
try:
layer.activity_regularizer = activity_regularizer
except AttributeError:
# This may happen if a layer wrapper is saved with an activity
# regularizer. The wrapper object's activity regularizer is unsettable.
pass
def revive_custom_object(identifier, metadata):
"""Revives object from SavedModel."""
if ops.executing_eagerly_outside_functions():
model_class = training_lib.Model
else:
model_class = training_lib_v1.Model
revived_classes = {
constants.INPUT_LAYER_IDENTIFIER: (
RevivedInputLayer, input_layer.InputLayer),
constants.LAYER_IDENTIFIER: (RevivedLayer, base_layer.Layer),
constants.MODEL_IDENTIFIER: (RevivedNetwork, model_class),
constants.NETWORK_IDENTIFIER: (RevivedNetwork, functional_lib.Functional),
constants.SEQUENTIAL_IDENTIFIER: (RevivedNetwork, models_lib.Sequential),
}
parent_classes = revived_classes.get(identifier, None)
if parent_classes is not None:
parent_classes = revived_classes[identifier]
revived_cls = type(
compat.as_str(metadata['class_name']), parent_classes, {})
return revived_cls._init_from_metadata(metadata) # pylint: disable=protected-access
else:
raise ValueError('Unable to restore custom object of type {} currently. '
'Please make sure that the layer implements `get_config`'
'and `from_config` when saving. In addition, please use '
'the `custom_objects` arg when calling `load_model()`.'
.format(identifier))
def _restore_layer_metrics(layer):
metrics_list = getattr(_get_keras_attr(layer), 'layer_metrics', {})
layer_metrics = {m.name: m for m in layer._metrics} # pylint: disable=protected-access
for name, metric in metrics_list.items():
if name not in layer_metrics:
# Metrics may be added during initialization/building of custom layers.
layer._metrics.append(metric) # pylint: disable=protected-access
# TODO(kathywu): Centrally define keys and functions for both serialization and
# deserialization.
class RevivedLayer(object):
"""Keras layer loaded from a SavedModel."""
@classmethod
def _init_from_metadata(cls, metadata):
"""Create revived layer from metadata stored in the SavedModel proto."""
init_args = dict(
name=metadata['name'],
trainable=metadata['trainable'])
if metadata.get('dtype') is not None:
init_args['dtype'] = metadata['dtype']
if metadata.get('batch_input_shape') is not None:
init_args['batch_input_shape'] = metadata['batch_input_shape']
revived_obj = cls(**init_args)
with utils.no_automatic_dependency_tracking_scope(revived_obj):
# pylint:disable=protected-access
revived_obj._expects_training_arg = metadata['expects_training_arg']
config = metadata.get('config')
if generic_utils.validate_config(config):
revived_obj._config = config
if metadata.get('input_spec') is not None:
revived_obj.input_spec = recursively_deserialize_keras_object(
metadata['input_spec'],
module_objects={'InputSpec': input_spec.InputSpec})
if metadata.get('activity_regularizer') is not None:
revived_obj.activity_regularizer = regularizers.deserialize(
metadata['activity_regularizer'])
if metadata.get('_is_feature_layer') is not None:
revived_obj._is_feature_layer = metadata['_is_feature_layer']
if metadata.get('stateful') is not None:
revived_obj.stateful = metadata['stateful']
# pylint:enable=protected-access
return revived_obj, _revive_setter
@property
def keras_api(self):
return self._serialized_attributes.get(constants.KERAS_ATTR, None)
def get_config(self):
if hasattr(self, '_config'):
return self._config
else:
raise NotImplementedError
def _revive_setter(layer, name, value):
"""Setter function that saves some attributes to separate dictionary."""
# Many attributes in the SavedModel conflict with properties defined in
# Layer and Model. Save these attributes to a separate dictionary.
if name in PUBLIC_ATTRIBUTES:
# pylint: disable=protected-access
if isinstance(value, trackable.Trackable):
layer._track_trackable(value, name=name)
layer._serialized_attributes[name] = value
# pylint: enable=protected-access
elif (isinstance(layer, functional_lib.Functional) and
re.match(r'^layer(_with_weights)?-[\d+]', name) is not None):
# Edges named "layer-n" or "layer_with_weights-n", which are tracked in
# network._track_layers, should not be added as an attribute. They should
# be temporarily added as a dependency so that checkpointed values can be
# restored. These dependencies are manually deleted in
# KerasObjectLoader.del_tracking.
# Set `overwrite=True` in the case that `layer` already tracks a different
# layer-n. This may cause variable values to not be loaded properly in the
# original layer-n, but we already warn the users about this
# (ctrl-f "shared between different layers/models").
layer._track_trackable(value, name, overwrite=True) # pylint: disable=protected-access
elif getattr(layer, name, None) is not None:
# Don't overwrite already defined attributes.
pass
else:
setattr(layer, name, value)
class RevivedInputLayer(object):
"""InputLayer loaded from a SavedModel."""
@classmethod
def _init_from_metadata(cls, metadata):
"""Revives the saved InputLayer from the Metadata."""
init_args = dict(
name=metadata['name'],
dtype=metadata['dtype'],
sparse=metadata['sparse'],
ragged=metadata['ragged'],
batch_input_shape=metadata['batch_input_shape'])
revived_obj = cls(**init_args)
with utils.no_automatic_dependency_tracking_scope(revived_obj):
revived_obj._config = metadata['config'] # pylint:disable=protected-access
return revived_obj, setattr
def get_config(self):
return self._config
def recursively_deserialize_keras_object(config, module_objects=None):
"""Deserialize Keras object from a nested structure."""
if isinstance(config, dict):
if 'class_name' in config:
return generic_utils.deserialize_keras_object(
config, module_objects=module_objects)
else:
return {key: recursively_deserialize_keras_object(config[key],
module_objects)
for key in config}
if isinstance(config, (tuple, list)):
return [recursively_deserialize_keras_object(x, module_objects)
for x in config]
else:
raise ValueError('Unable to decode config: {}'.format(config))
def get_common_shape(x, y):
"""Find a `TensorShape` that is compatible with both `x` and `y`."""
if x is None != y is None:
raise RuntimeError(
'Cannot find a common shape when LHS shape is None but RHS shape '
'is not (or vice versa): %s vs. %s' % (x, y))
if x is None:
return None # The associated input was not a Tensor, no shape generated.
if not isinstance(x, tensor_shape.TensorShape):
raise TypeError('Expected x to be a TensorShape but saw %s' % (x,))
if not isinstance(y, tensor_shape.TensorShape):
raise TypeError('Expected y to be a TensorShape but saw %s' % (y,))
if x.rank != y.rank or x.rank is None:
return tensor_shape.TensorShape(None)
dims = []
for dim_x, dim_y in zip(x.dims, y.dims):
if (dim_x != dim_y
or tensor_shape.dimension_value(dim_x) is None
or tensor_shape.dimension_value(dim_y) is None):
dims.append(None)
else:
dims.append(tensor_shape.dimension_value(dim_x))
return tensor_shape.TensorShape(dims)
def infer_inputs_from_restored_call_function(fn):
"""Returns TensorSpec of inputs from a restored call function.
Args:
fn: Restored layer call function. It is assumed that `fn` has at least
one concrete function and that the inputs are in the first argument.
Returns:
TensorSpec of call function inputs.
"""
def common_spec(x, y):
common_shape = get_common_shape(x.shape, y.shape)
if isinstance(x, sparse_tensor.SparseTensorSpec):
return sparse_tensor.SparseTensorSpec(common_shape, x.dtype)
elif isinstance(x, ragged_tensor.RaggedTensorSpec):
return ragged_tensor.RaggedTensorSpec(common_shape, x.dtype)
return tensor_spec.TensorSpec(common_shape, x.dtype, x.name)
spec = fn.concrete_functions[0].structured_input_signature[0][0]
for concrete in fn.concrete_functions[1:]:
spec2 = concrete.structured_input_signature[0][0]
spec = nest.map_structure(common_spec, spec, spec2)
return spec
class RevivedNetwork(RevivedLayer):
"""Keras network of layers loaded from a SavedModel."""
@classmethod
def _init_from_metadata(cls, metadata):
"""Create revived network from metadata stored in the SavedModel proto."""
revived_obj = cls(name=metadata['name'])
# Store attributes revived from SerializedAttributes in a un-tracked
# dictionary. The attributes are the ones listed in CommonEndpoints or
# "keras_api" for keras-specific attributes.
with utils.no_automatic_dependency_tracking_scope(revived_obj):
# pylint:disable=protected-access
revived_obj._expects_training_arg = metadata['expects_training_arg']
config = metadata.get('config')
if generic_utils.validate_config(config):
revived_obj._config = config
if metadata.get('activity_regularizer') is not None:
revived_obj.activity_regularizer = regularizers.deserialize(
metadata['activity_regularizer'])
# pylint:enable=protected-access
return revived_obj, _revive_setter # pylint:disable=protected-access
def _set_network_attributes_from_metadata(revived_obj):
"""Sets attributes recorded in the metadata."""
with utils.no_automatic_dependency_tracking_scope(revived_obj):
# pylint:disable=protected-access
metadata = revived_obj._serialized_attributes['metadata']
if metadata.get('dtype') is not None:
revived_obj._set_dtype_policy(metadata['dtype'])
revived_obj._trainable = metadata['trainable']
# pylint:enable=protected-access
def _maybe_add_serialized_attributes(layer, metadata):
# Store attributes revived from SerializedAttributes in a un-tracked
# dictionary. The attributes are the ones listed in CommonEndpoints or
# "keras_api" for keras-specific attributes.
if not hasattr(layer, '_serialized_attributes'):
with utils.no_automatic_dependency_tracking_scope(layer):
layer._serialized_attributes = {'metadata': metadata} # pylint: disable=protected-access
def _get_keras_attr(layer):
return getattr(layer, '_serialized_attributes', {}).get(constants.KERAS_ATTR,
None)
| |
#!/usr/bin/python
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Convience file system related operations."""
import os
import shutil
import sys
import tempfile
import platform
import time
def AtomicWriteFile(data, filename):
"""Write a file atomically.
NOTE: Not atomic on Windows!
Args:
data: String to write to the file.
filename: Filename to write.
"""
filename = os.path.abspath(filename)
handle, temp_file = tempfile.mkstemp(
prefix='atomic_write', suffix='.tmp',
dir=os.path.dirname(filename))
fh = os.fdopen(handle, 'wb')
fh.write(data)
fh.close()
# Window's can't move into place atomically, delete first.
if sys.platform in ['win32', 'cygwin']:
try:
os.remove(filename)
except OSError:
pass
Retry(os.rename, temp_file, filename)
def WriteFile(data, filename):
"""Write a file in one step.
Args:
data: String to write to the file.
filename: Filename to write.
"""
fh = open(filename, 'wb')
fh.write(data)
fh.close()
def ReadFile(filename):
"""Read a file in one step.
Args:
filename: Filename to read.
Returns:
String containing complete file.
"""
fh = open(filename, 'rb')
data = fh.read()
fh.close()
return data
class ExecutableNotFound(Exception):
pass
def Which(command, paths=None, require_executable=True):
"""Find the absolute path of a command in the current PATH.
Args:
command: Command name to look for.
paths: Optional paths to search.
Returns:
Absolute path of the command (first one found),
or default to a bare command if nothing is found.
"""
if paths is None:
paths = os.environ.get('PATH', '').split(os.pathsep)
exe_suffixes = ['']
if sys.platform == 'win32':
exe_suffixes += ['.exe']
for p in paths:
np = os.path.abspath(os.path.join(p, command))
for suffix in exe_suffixes:
full_path = np + suffix
if (os.path.isfile(full_path) and
(not require_executable or os.access(full_path, os.X_OK))):
return full_path
raise ExecutableNotFound('Unable to find: ' + command)
def MakeDirectoryIfAbsent(path):
"""Create a directory if it doesn't already exist.
Args:
path: Directory to create.
"""
if not os.path.isdir(path):
os.makedirs(path)
def MakeParentDirectoryIfAbsent(path):
"""Creates a directory for the parent if it doesn't already exist.
Args:
path: Path of child where parent directory should be created for.
"""
abs_path = os.path.abspath(path)
MakeDirectoryIfAbsent(os.path.dirname(abs_path))
def RemoveDirectoryIfPresent(path):
"""Remove a directory if it exists.
Args:
path: Directory to remove.
"""
# On Windows, attempts to remove read-only files get Error 5. This
# error handler fixes the permissions and retries the removal.
def onerror_readonly(func, path, exc_info):
import stat
if not os.access(path, os.W_OK):
os.chmod(path, stat.S_IWUSR)
func(path)
if os.path.exists(path):
Retry(shutil.rmtree, path, onerror=onerror_readonly)
def CopyTree(src, dst):
"""Recursively copy the items in the src directory to the dst directory.
Unlike shutil.copytree, the destination directory and any subdirectories and
files may exist. Existing directories are left untouched, and existing files
are removed and copied from the source using shutil.copy2. It is also not
symlink-aware.
Args:
src: Source. Must be an existing directory.
dst: Destination directory. If it exists, must be a directory. Otherwise it
will be created, along with parent directories.
"""
if not os.path.isdir(dst):
os.makedirs(dst)
for root, dirs, files in os.walk(src):
relroot = os.path.relpath(root, src)
dstroot = os.path.join(dst, relroot)
for d in dirs:
dstdir = os.path.join(dstroot, d)
if not os.path.isdir(dstdir):
os.mkdir(dstdir)
for f in files:
dstfile = os.path.join(dstroot, f)
if os.path.isfile(dstfile):
Retry(os.remove, dstfile)
shutil.copy2(os.path.join(root, f), dstfile)
def MoveAndMergeDirTree(src_dir, dest_dir):
"""Moves everything from a source directory to a destination directory.
This is different from shutil's move implementation in that it only operates
on directories, and if the destination directory exists, it will move the
contents into the directory and merge any existing directories.
Args:
src_dir: Source directory which files should be moved from.
dest_dir: Destination directory where files should be moved and merged to.
"""
if not os.path.isdir(src_dir):
raise OSError('MoveAndMergeDirTree can only operate on directories.')
if not os.path.exists(dest_dir):
# Simply move the directory over if destination doesn't exist.
MakeParentDirectoryIfAbsent(dest_dir)
Retry(os.rename, src_dir, dest_dir)
else:
# Merge each item if destination directory exists.
for dir_item in os.listdir(src_dir):
source_item = os.path.join(src_dir, dir_item)
destination_item = os.path.join(dest_dir, dir_item)
if os.path.exists(destination_item):
if os.path.isdir(destination_item) and os.path.isdir(source_item):
# Merge the sub-directories together if they are both directories.
MoveAndMergeDirTree(source_item, destination_item)
elif os.path.isfile(destination_item) and os.path.isfile(source_item):
# Overwrite the file if they are both files.
Retry(os.unlink, destination_item)
Retry(os.rename, source_item, destination_item)
else:
raise OSError('Cannot move directory tree, mismatching types.'
' Source - %s. Destination - %s' %
(source_item, destination_item))
else:
Retry(os.rename, source_item, destination_item)
# Remove the directory once all the contents have been moved
Retry(os.rmdir, src_dir)
def Retry(op, *args, **kwargs):
# Windows seems to be prone to having commands that delete files or
# directories fail. We currently do not have a complete understanding why,
# and as a workaround we simply retry the command a few times.
# It appears that file locks are hanging around longer than they should. This
# may be a secondary effect of processes hanging around longer than they
# should. This may be because when we kill a browser sel_ldr does not exit
# immediately, etc.
# Virus checkers can also accidently prevent files from being deleted, but
# that shouldn't be a problem on the bots.
if platform.IsWindows():
count = 0
while True:
try:
op(*args, **kwargs)
break
except Exception:
sys.stdout.write('FAILED: %s %s %s\n' % (
op.__name__, repr(args), repr(kwargs)))
count += 1
if count < 5:
sys.stdout.write('RETRYING\n')
time.sleep(pow(2, count))
else:
# Don't mask the exception.
raise
else:
op(*args, **kwargs)
def MoveDirCleanly(src, dst):
RemoveDirectoryIfPresent(dst)
MoveDir(src, dst)
def MoveDir(src, dst):
Retry(shutil.move, src, dst)
def RemoveFile(path):
if os.path.exists(path):
Retry(os.unlink, path)
| |
from ConfigParser import ConfigParser
from collections import defaultdict
import copy
import json
import logging
import os.path
import re
import sys
# This must be called early - before the rest of the blueprint library loads.
logging.basicConfig(format='# [blueprint] %(message)s',
level=logging.INFO)
import git
import rules
import util
import walk
DEFAULTS = {'io': {'max_content_length': 67108864,
'server': 'https://devstructure.com'},
's3': {'region': 'US',
'use_https': True},
'statsd': {'port': 8125}}
cfg = ConfigParser()
for section, options in DEFAULTS.iteritems():
cfg.add_section(section)
for option, value in options.iteritems():
cfg.set(section, option, str(value))
cfg.read(['/etc/blueprint.cfg',
os.path.expanduser('~/.blueprint.cfg')])
class NameError(ValueError):
pass
class NotFoundError(KeyError):
pass
class Blueprint(dict):
DISCLAIMER = """#
# Automatically generated by blueprint(7). Edit at your own risk.
#
"""
@classmethod
def checkout(cls, name, commit=None):
git.init()
if commit is None:
commit = git.rev_parse('refs/heads/{0}'.format(name))
if commit is None:
raise NotFoundError(name)
tree = git.tree(commit)
blob = git.blob(tree, 'blueprint.json')
content = git.content(blob)
return cls(name, commit, **json.loads(content))
@classmethod
def create(cls, name):
b = cls(name)
r = rules.defaults()
import backend
for funcname in backend.__all__:
getattr(backend, funcname)(b, r)
import services
services.services(b)
return b
@classmethod
def destroy(cls, name):
"""
Destroy the named blueprint.
"""
if not os.path.isdir(git.repo()):
raise NotFoundError(name)
try:
git.git('branch', '-D', name)
except:
raise NotFoundError(name)
@classmethod
def iter(cls):
"""
Yield the name of each blueprint.
"""
if not os.path.isdir(git.repo()):
return
status, stdout = git.git('branch')
for line in stdout.splitlines():
yield line.strip()
@classmethod
def load(cls, f, name=None):
"""
Instantiate and return a Blueprint object from a file-like object
from which valid blueprint JSON may be read.
"""
return cls(name, **json.load(f))
@classmethod
def loads(cls, s, name=None):
"""
Instantiate and return a Blueprint object from a string containing
valid blueprint JSON.
"""
return cls(name, **json.loads(s))
@classmethod
def rules(cls, r, name=None):
b = cls(name)
import backend
for funcname in backend.__all__:
getattr(backend, funcname)(b, r)
import services
services.services(b)
return b
def __init__(self, name=None, commit=None, *args, **kwargs):
"""
Construct a blueprint. Extra arguments are used to create a `dict`
which is then sent through the `blueprint`(5) algorithm to be injested
into this `Blueprint` object with the proper types. (The structure
makes heavy use of `defaultdict` and `set`).
"""
self.name = name
self._commit = commit
def file(pathname, f):
self.add_file(pathname, **f)
def package(manager, package, version):
self.add_package(manager, package, version)
def service(manager, service):
self.add_service(manager, service)
def service_file(manager, service, pathname):
self.add_service_file(manager, service, pathname)
def service_package(manager, service, package_manager, package):
self.add_service_package(manager,
service,
package_manager,
package)
def service_source(manager, service, dirname):
self.add_service_source(manager, service, dirname)
def source(dirname, filename, gen_content, url):
if url is not None:
self.add_source(dirname, url)
elif gen_content is not None:
self.add_source(dirname, filename)
walk.walk(dict(*args, **kwargs),
file=file,
package=package,
service=service,
service_file=service_file,
service_package=service_package,
service_source=service_source,
source=source)
def __sub__(self, other):
"""
Subtracting one blueprint from another allows blueprints to remain
free of superfluous packages from the base installation. It takes
three passes through the package tree. The first two remove
superfluous packages and the final one accounts for some special
dependencies by adding them back to the tree.
"""
b = copy.deepcopy(self)
# Compare file contents and metadata. Keep files that differ.
for pathname, file in self.files.iteritems():
if other.files.get(pathname, {}) == file:
del b.files[pathname]
# The first pass removes all duplicate packages that are not
# themselves managers. Allowing multiple versions of the same
# packages complicates things slightly. For each package, each
# version that appears in the other blueprint is removed from
# this blueprint. After that is finished, this blueprint is
# normalized. If no versions remain, the package is removed.
def package(manager, package, version):
if package in b.packages:
return
try:
b_packages = b.packages[manager]
except KeyError:
return
if manager in b_packages:
return
if package not in b_packages:
return
try:
b_versions = b_packages[package]
except KeyError:
return
b_versions.discard(version)
if 0 == len(b_versions):
del b_packages[package]
else:
b_packages[package] = b_versions
other.walk(package=package)
# The second pass removes managers that manage no packages, a
# potential side-effect of the first pass. This step must be
# applied repeatedly until the blueprint reaches a steady state.
def package(manager, package, version):
if package not in b.packages:
return
if 0 == len(b.packages[package]):
del b.packages[package]
del b.packages[self.managers[package]][package]
while 1:
l = len(b.packages)
other.walk(package=package)
if len(b.packages) == l:
break
# The third pass adds back special dependencies like `ruby*-dev`.
# It isn't apparent from the rules above that a manager like RubyGems
# needs more than just itself to function. In some sense, this might
# be considered a missing dependency in the Debian archive but in
# reality it's only _likely_ that you need `ruby*-dev` to use
# `rubygems*`.
def after_packages(manager):
if manager not in b.packages:
return
deps = {r'^python(\d+(?:\.\d+)?)$': ['python{0}',
'python{0}-dev',
'python',
'python-devel'],
r'^ruby(\d+\.\d+(?:\.\d+)?)$': ['ruby{0}-dev'],
r'^rubygems(\d+\.\d+(?:\.\d+)?)$': ['ruby{0}',
'ruby{0}-dev',
'ruby',
'ruby-devel']}
for pattern, packages in deps.iteritems():
match = re.search(pattern, manager)
if match is None:
continue
for package in packages:
package = package.format(match.group(1))
for managername in ('apt', 'yum'):
mine = self.packages.get(managername, {}).get(package,
None)
if mine is not None:
b.packages[managername][package] = mine
other.walk(after_packages=after_packages)
# Compare service metadata. Keep services that differ.
for manager, services in self.services.iteritems():
for service, deps in services.iteritems():
if other.services.get(manager, {}).get(service, {}) == deps:
del b.services[manager][service]
if 0 == len(b.services[manager]):
del b.services[manager]
# Compare source tarball filenames, which indicate their content.
# Keep source tarballs that differ.
for dirname, filename in self.sources.iteritems():
if other.sources.get(dirname, '') == filename:
del b.sources[dirname]
return b
def get_name(self):
return self._name
def set_name(self, name):
"""
Validate and set the blueprint name.
"""
if name is not None and re.search(r'^$|^-$|[/ \t\r\n]', name):
raise NameError('invalid blueprint name')
self._name = name
name = property(get_name, set_name)
def get_arch(self):
if 'arch' not in self:
self['arch'] = None
return self['arch']
def set_arch(self, arch):
self['arch'] = arch
arch = property(get_arch, set_arch)
@property
def files(self):
if 'files' not in self:
self['files'] = defaultdict(dict)
return self['files']
@property
def managers(self):
"""
Build a hierarchy of managers for easy access when declaring
dependencies.
"""
if hasattr(self, '_managers'):
return self._managers
self._managers = {'apt': None, 'yum': None}
def package(manager, package, version):
if package in self.packages and manager != package:
self._managers[package] = manager
self.walk(package=package)
return self._managers
@property
def packages(self):
if 'packages' not in self:
self['packages'] = defaultdict(lambda: defaultdict(set))
return self['packages']
@property
def services(self):
if 'services' not in self:
self['services'] = defaultdict(lambda: defaultdict(dict))
return self['services']
@property
def sources(self):
if 'sources' not in self:
self['sources'] = defaultdict(dict)
return self['sources']
def add_file(self, pathname, **kwargs):
"""
Create a file resource.
"""
self.files[pathname] = kwargs
def add_package(self, manager, package, version):
"""
Create a package resource.
"""
self.packages[manager][package].add(version)
def add_service(self, manager, service):
"""
Create a service resource which depends on given files and packages.
"""
# AWS cfn-init respects the enable and ensure parameters like Puppet
# does. Blueprint provides these parameters for interoperability.
self.services[manager].setdefault(service, {'enable': True,
'ensureRunning': True})
def add_service_file(self, manager, service, *args):
"""
Add file dependencies to a service resource.
"""
if 0 == len(args):
return
s = self.services[manager][service].setdefault('files', set())
for dirname in args:
s.add(dirname)
def add_service_package(self, manager, service, package_manager, *args):
"""
Add package dependencies to a service resource.
"""
if 0 == len(args):
return
d = self.services[manager][service].setdefault('packages',
defaultdict(set))
for package in args:
d[package_manager].add(package)
def add_service_source(self, manager, service, *args):
"""
Add source tarball dependencies to a service resource.
"""
if 0 == len(args):
return
s = self.services[manager][service].setdefault('sources', set())
for dirname in args:
s.add(dirname)
def add_source(self, dirname, filename):
"""
Create a source tarball resource.
"""
self.sources[dirname] = filename
def commit(self, message=''):
"""
Create a new revision of this blueprint in the local Git repository.
Include the blueprint JSON and any source archives referenced by
the JSON.
"""
git.init()
refname = 'refs/heads/{0}'.format(self.name)
parent = git.rev_parse(refname)
# Start with an empty index every time. Specifically, clear out
# source tarballs from the parent commit.
if parent is not None:
for mode, type, sha, pathname in git.ls_tree(git.tree(parent)):
git.git('update-index', '--force-remove', pathname)
# Add `blueprint.json` to the index.
f = open('blueprint.json', 'w')
f.write(self.dumps())
f.close()
git.git('update-index', '--add', os.path.abspath('blueprint.json'))
# Add source tarballs to the index.
for filename in self.sources.itervalues():
git.git('update-index', '--add', os.path.abspath(filename))
# Add `/etc/blueprintignore` and `~/.blueprintignore` to the index.
# Since adding extra syntax to this file, it no longer makes sense
# to store it as `.gitignore`.
f = open('blueprintignore', 'w')
for pathname in ('/etc/blueprintignore',
os.path.expanduser('~/.blueprintignore')):
try:
f.write(open(pathname).read())
except IOError:
pass
f.close()
git.git('update-index', '--add', os.path.abspath('blueprintignore'))
# Write the index to Git's object store.
tree = git.write_tree()
# Write the commit and update the tip of the branch.
self._commit = git.commit_tree(tree, message, parent)
git.git('update-ref', refname, self._commit)
def normalize(self):
"""
Remove superfluous empty keys to reduce variance in serialized JSON.
"""
if 'arch' in self and self['arch'] is None:
del self['arch']
for key in ['files', 'packages', 'sources']:
if key in self and 0 == len(self[key]):
del self[key]
def dumps(self):
"""
Return a JSON serialization of this blueprint. Make a best effort
to prevent variance from run-to-run.
"""
self.normalize()
return util.json_dumps(self)
def puppet(self, relaxed=False):
"""
Generate Puppet code.
"""
import frontend.puppet
return frontend.puppet.puppet(self, relaxed)
def chef(self, relaxed=False):
"""
Generate Chef code.
"""
import frontend.chef
return frontend.chef.chef(self, relaxed)
def bcfg2(self, relaxed=False):
"""
Generate bcfg2 code.
"""
import frontend.bcfg2
return frontend.bcfg2.bcfg2(self, relaxed)
def sh(self,
relaxed=False,
server='https://devstructure.com',
secret=None):
"""
Generate shell code.
"""
import frontend.sh
return frontend.sh.sh(self, relaxed, server, secret)
def blueprint_rules(self, relaxed=False):
"""
Generate Blueprint rules.
"""
import frontend.rules
return frontend.rules.rules(self, relaxed)
def cfn(self, relaxed=False):
"""
Generate an AWS CloudFormation template.
"""
import frontend.cfn
return frontend.cfn.cfn(self, relaxed)
def cfengine3(self, relaxed=False):
"""
Generate a CFEngine 3 template.
"""
import frontend.cfengine3
return frontend.cfengine3.cfengine3(self, relaxed)
def blueprintignore(self):
"""
Return an open file pointer to the blueprint's blueprintignore file,
which is suitable for passing back to `blueprint.rules.Rules.parse`.
Prior to v3.0.9 this file was stored as .blueprintignore in the
repository. Prior to v3.0.4 this file was stored as .gitignore in
the repository.
"""
tree = git.tree(self._commit)
blob = git.blob(tree, 'blueprintignore')
if blob is None:
blob = git.blob(tree, '.blueprintignore')
if blob is None:
blob = git.blob(tree, '.gitignore')
if blob is None:
return []
return git.cat_file(blob)
def walk(self, **kwargs):
walk.walk(self, **kwargs)
| |
# Copyright (c) 2015 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from osc_lib.command import command
from osc_lib import exceptions
from osc_lib import utils as osc_utils
from oslo_log import log as logging
from oslo_serialization import jsonutils as json
from saharaclient.osc import utils
CT_FIELDS = ['id', 'name', 'plugin_name', 'plugin_version', 'description',
'node_groups', 'anti_affinity', 'use_autoconfig', 'is_default',
'is_protected', 'is_public', 'domain_name']
def _format_node_groups_list(node_groups):
return ', '.join(
['%s:%s' % (ng['name'], ng['count']) for ng in node_groups])
def _format_ct_output(app, data):
data['plugin_version'] = data.pop('hadoop_version')
data['node_groups'] = _format_node_groups_list(data['node_groups'])
data['anti_affinity'] = osc_utils.format_list(data['anti_affinity'])
def _configure_node_groups(app, node_groups, client):
node_groups_list = dict(
map(lambda x: x.split(':', 1), node_groups))
node_groups = []
plugins_versions = set()
for name, count in node_groups_list.items():
ng = utils.get_resource(client.node_group_templates, name)
node_groups.append({'name': ng.name,
'count': int(count),
'node_group_template_id': ng.id})
plugins_versions.add((ng.plugin_name, ng.hadoop_version))
if len(plugins_versions) != 1:
raise exceptions.CommandError('Node groups with the same plugins '
'and versions must be specified')
plugin, plugin_version = plugins_versions.pop()
return plugin, plugin_version, node_groups
class CreateClusterTemplate(command.ShowOne):
"""Creates cluster template"""
log = logging.getLogger(__name__ + ".CreateClusterTemplate")
def get_parser(self, prog_name):
parser = super(CreateClusterTemplate, self).get_parser(prog_name)
parser.add_argument(
'--name',
metavar="<name>",
help="Name of the cluster template [REQUIRED if JSON is not "
"provided]",
)
parser.add_argument(
'--node-groups',
metavar="<node-group:instances_count>",
nargs="+",
help="List of the node groups(names or IDs) and numbers of "
"instances for each one of them [REQUIRED if JSON is not "
"provided]"
)
parser.add_argument(
'--anti-affinity',
metavar="<anti-affinity>",
nargs="+",
help="List of processes that should be added to an anti-affinity "
"group"
)
parser.add_argument(
'--description',
metavar="<description>",
help='Description of the cluster template'
)
parser.add_argument(
'--autoconfig',
action='store_true',
default=False,
help='If enabled, instances of the cluster will be '
'automatically configured',
)
parser.add_argument(
'--public',
action='store_true',
default=False,
help='Make the cluster template public (Visible from other '
'projects)',
)
parser.add_argument(
'--protected',
action='store_true',
default=False,
help='Make the cluster template protected',
)
parser.add_argument(
'--json',
metavar='<filename>',
help='JSON representation of the cluster template. Other '
'arguments will not be taken into account if this one is '
'provided'
)
parser.add_argument(
'--shares',
metavar='<filename>',
help='JSON representation of the manila shares'
)
parser.add_argument(
'--configs',
metavar='<filename>',
help='JSON representation of the cluster template configs'
)
parser.add_argument(
'--domain-name',
metavar='<domain-name>',
help='Domain name for instances of this cluster template. This '
'option is available if \'use_designate\' config is True'
)
return parser
def _take_action(self, client, parsed_args):
if parsed_args.json:
blob = osc_utils.read_blob_file_contents(parsed_args.json)
try:
template = json.loads(blob)
except ValueError as e:
raise exceptions.CommandError(
'An error occurred when reading '
'template from file %s: %s' % (parsed_args.json, e))
if 'neutron_management_network' in template:
template['net_id'] = template.pop('neutron_management_network')
data = client.cluster_templates.create(**template).to_dict()
else:
if not parsed_args.name or not parsed_args.node_groups:
raise exceptions.CommandError(
'At least --name , --node-groups arguments should be '
'specified or json template should be provided with '
'--json argument')
configs = None
if parsed_args.configs:
blob = osc_utils.read_blob_file_contents(parsed_args.configs)
try:
configs = json.loads(blob)
except ValueError as e:
raise exceptions.CommandError(
'An error occurred when reading '
'configs from file %s: %s' % (parsed_args.configs, e))
shares = None
if parsed_args.shares:
blob = osc_utils.read_blob_file_contents(parsed_args.shares)
try:
shares = json.loads(blob)
except ValueError as e:
raise exceptions.CommandError(
'An error occurred when reading '
'shares from file %s: %s' % (parsed_args.shares, e))
plugin, plugin_version, node_groups = (
utils._cluster_templates_configure_ng(self.app,
parsed_args.node_groups,
client))
data = utils.create_cluster_template(self.app, client, plugin,
plugin_version,
parsed_args, configs, shares,
node_groups)
return data
def take_action(self, parsed_args):
self.log.debug("take_action(%s)", parsed_args)
client = self.app.client_manager.data_processing
data = self._take_action(client, parsed_args)
_format_ct_output(self.app, data)
data = utils.prepare_data(data, CT_FIELDS)
return self.dict2columns(data)
class ListClusterTemplates(command.Lister):
"""Lists cluster templates"""
log = logging.getLogger(__name__ + ".ListClusterTemplates")
def get_parser(self, prog_name):
parser = super(ListClusterTemplates, self).get_parser(prog_name)
parser.add_argument(
'--long',
action='store_true',
default=False,
help='List additional fields in output',
)
parser.add_argument(
'--plugin',
metavar="<plugin>",
help="List cluster templates for specific plugin"
)
parser.add_argument(
'--plugin-version',
metavar="<plugin_version>",
help="List cluster templates with specific version of the "
"plugin"
)
parser.add_argument(
'--name',
metavar="<name-substring>",
help="List cluster templates with specific substring in the "
"name"
)
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)", parsed_args)
client = self.app.client_manager.data_processing
search_opts = {}
if parsed_args.plugin:
search_opts['plugin_name'] = parsed_args.plugin
if parsed_args.plugin_version:
if utils.is_api_v2(self.app):
search_opts['plugin_version'] = parsed_args.plugin_version
else:
search_opts['hadoop_version'] = parsed_args.plugin_version
data = client.cluster_templates.list(search_opts=search_opts)
if parsed_args.name:
data = utils.get_by_name_substring(data, parsed_args.name)
if parsed_args.long:
columns = ('name', 'id', 'plugin_name', 'hadoop_version',
'node_groups', 'description')
column_headers = utils.prepare_column_headers(
columns, {'hadoop_version': 'plugin_version'})
else:
columns = ('name', 'id', 'plugin_name', 'hadoop_version')
column_headers = utils.prepare_column_headers(
columns, {'hadoop_version': 'plugin_version'})
return (
column_headers,
(osc_utils.get_item_properties(
s,
columns,
formatters={
'node_groups': _format_node_groups_list
}
) for s in data)
)
class ShowClusterTemplate(command.ShowOne):
"""Display cluster template details"""
log = logging.getLogger(__name__ + ".ShowClusterTemplate")
def get_parser(self, prog_name):
parser = super(ShowClusterTemplate, self).get_parser(prog_name)
parser.add_argument(
"cluster_template",
metavar="<cluster-template>",
help="Name or id of the cluster template to display",
)
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)", parsed_args)
client = self.app.client_manager.data_processing
data = utils.get_resource(
client.cluster_templates, parsed_args.cluster_template).to_dict()
_format_ct_output(self.app, data)
data = utils.prepare_data(data, CT_FIELDS)
return self.dict2columns(data)
class DeleteClusterTemplate(command.Command):
"""Deletes cluster template"""
log = logging.getLogger(__name__ + ".DeleteClusterTemplate")
def get_parser(self, prog_name):
parser = super(DeleteClusterTemplate, self).get_parser(prog_name)
parser.add_argument(
"cluster_template",
metavar="<cluster-template>",
nargs="+",
help="Name(s) or id(s) of the cluster template(s) to delete",
)
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)", parsed_args)
client = self.app.client_manager.data_processing
for ct in parsed_args.cluster_template:
ct_id = utils.get_resource_id(client.cluster_templates, ct)
client.cluster_templates.delete(ct_id)
sys.stdout.write(
'Cluster template "{ct}" has been removed '
'successfully.\n'.format(ct=ct))
class UpdateClusterTemplate(command.ShowOne):
"""Updates cluster template"""
log = logging.getLogger(__name__ + ".UpdateClusterTemplate")
def get_parser(self, prog_name):
parser = super(UpdateClusterTemplate, self).get_parser(prog_name)
parser.add_argument(
'cluster_template',
metavar="<cluster-template>",
help="Name or ID of the cluster template [REQUIRED]",
)
parser.add_argument(
'--name',
metavar="<name>",
help="New name of the cluster template",
)
parser.add_argument(
'--node-groups',
metavar="<node-group:instances_count>",
nargs="+",
help="List of the node groups(names or IDs) and numbers of"
"instances for each one of them"
)
parser.add_argument(
'--anti-affinity',
metavar="<anti-affinity>",
nargs="+",
help="List of processes that should be added to an anti-affinity "
"group"
)
parser.add_argument(
'--description',
metavar="<description>",
help='Description of the cluster template'
)
autoconfig = parser.add_mutually_exclusive_group()
autoconfig.add_argument(
'--autoconfig-enable',
action='store_true',
help='Instances of the cluster will be '
'automatically configured',
dest='use_autoconfig'
)
autoconfig.add_argument(
'--autoconfig-disable',
action='store_false',
help='Instances of the cluster will not be '
'automatically configured',
dest='use_autoconfig'
)
public = parser.add_mutually_exclusive_group()
public.add_argument(
'--public',
action='store_true',
help='Make the cluster template public '
'(Visible from other projects)',
dest='is_public'
)
public.add_argument(
'--private',
action='store_false',
help='Make the cluster template private '
'(Visible only from this tenant)',
dest='is_public'
)
protected = parser.add_mutually_exclusive_group()
protected.add_argument(
'--protected',
action='store_true',
help='Make the cluster template protected',
dest='is_protected'
)
protected.add_argument(
'--unprotected',
action='store_false',
help='Make the cluster template unprotected',
dest='is_protected'
)
parser.add_argument(
'--json',
metavar='<filename>',
help='JSON representation of the cluster template. Other '
'arguments will not be taken into account if this one is '
'provided'
)
parser.add_argument(
'--shares',
metavar='<filename>',
help='JSON representation of the manila shares'
)
parser.add_argument(
'--configs',
metavar='<filename>',
help='JSON representation of the cluster template configs'
)
parser.add_argument(
'--domain-name',
metavar='<domain-name>',
default=None,
help='Domain name for instances of this cluster template. This '
'option is available if \'use_designate\' config is True'
)
parser.set_defaults(is_public=None, is_protected=None,
use_autoconfig=None)
return parser
def _take_action(self, client, parsed_args, ct_id):
if parsed_args.json:
blob = osc_utils.read_blob_file_contents(parsed_args.json)
try:
template = json.loads(blob)
except ValueError as e:
raise exceptions.CommandError(
'An error occurred when reading '
'template from file %s: %s' % (parsed_args.json, e))
data = client.cluster_templates.update(
ct_id, **template).to_dict()
else:
plugin, plugin_version, node_groups = None, None, None
if parsed_args.node_groups:
plugin, plugin_version, node_groups = (
utils._cluster_templates_configure_ng(
self.app, parsed_args.node_groups, client))
configs = None
if parsed_args.configs:
blob = osc_utils.read_blob_file_contents(parsed_args.configs)
try:
configs = json.loads(blob)
except ValueError as e:
raise exceptions.CommandError(
'An error occurred when reading '
'configs from file %s: %s' % (parsed_args.configs, e))
shares = None
if parsed_args.shares:
blob = osc_utils.read_blob_file_contents(parsed_args.shares)
try:
shares = json.loads(blob)
except ValueError as e:
raise exceptions.CommandError(
'An error occurred when reading '
'shares from file %s: %s' % (parsed_args.shares, e))
data = utils.update_cluster_template(self.app, client, plugin,
plugin_version, parsed_args,
configs, shares, node_groups,
ct_id)
return data
def take_action(self, parsed_args):
self.log.debug("take_action(%s)", parsed_args)
client = self.app.client_manager.data_processing
ct_id = utils.get_resource_id(
client.cluster_templates, parsed_args.cluster_template)
data = self._take_action(client, parsed_args, ct_id)
_format_ct_output(self.app, data)
data = utils.prepare_data(data, CT_FIELDS)
return self.dict2columns(data)
class ImportClusterTemplate(command.ShowOne):
"""Imports cluster template"""
log = logging.getLogger(__name__ + ".ImportClusterTemplate")
def get_parser(self, prog_name):
parser = super(ImportClusterTemplate, self).get_parser(prog_name)
parser.add_argument(
'json',
metavar="<json>",
help="JSON containing cluster template",
)
parser.add_argument(
'--name',
metavar="<name>",
help="Name of the cluster template",
)
parser.add_argument(
'--default-image-id',
metavar="<default_image_id>",
help="Default image ID to be used",
)
parser.add_argument(
'--node-groups',
metavar="<node-group:instances_count>",
nargs="+",
required=True,
help="List of the node groups(names or IDs) and numbers of "
"instances for each one of them"
)
return parser
def _take_action(self, client, parsed_args):
if (not parsed_args.node_groups):
raise exceptions.CommandError('--node_groups should be specified')
blob = osc_utils.read_blob_file_contents(parsed_args.json)
try:
template = json.loads(blob)
except ValueError as e:
raise exceptions.CommandError(
'An error occurred when reading '
'template from file %s: %s' % (parsed_args.json, e))
if parsed_args.default_image_id:
template['cluster_template']['default_image_id'] = (
parsed_args.default_image_id)
else:
template['cluster_template']['default_image_id'] = None
if parsed_args.name:
template['cluster_template']['name'] = parsed_args.name
if 'neutron_management_network' in template['cluster_template']:
template['cluster_template']['net_id'] = (
template['cluster_template'].pop('neutron_management_network'))
plugin, plugin_version, node_groups = (
utils._cluster_templates_configure_ng_configure_node_groups(
self.app, parsed_args.node_groups, client))
if (('plugin_version' in template['cluster_template'] and
template['cluster_template']['plugin_version'] !=
plugin_version) or
('plugin' in template['cluster_template'] and
template['cluster_template']['plugin'] != plugin)):
raise exceptions.CommandError(
'Plugin of plugin version do not match between template '
'and given node group templates')
template['cluster_template']['node_groups'] = node_groups
data = client.cluster_templates.create(
**template['cluster_template']).to_dict()
return data
def take_action(self, parsed_args):
self.log.debug("take_action(%s)", parsed_args)
client = self.app.client_manager.data_processing
data = self._take_action(client, parsed_args)
_format_ct_output(self.app, data)
data = utils.prepare_data(data, CT_FIELDS)
return self.dict2columns(data)
class ExportClusterTemplate(command.Command):
"""Export cluster template to JSON"""
log = logging.getLogger(__name__ + ".ExportClusterTemplate")
def get_parser(self, prog_name):
parser = super(ExportClusterTemplate, self).get_parser(prog_name)
parser.add_argument(
"cluster_template",
metavar="<cluster-template>",
help="Name or id of the cluster template to export",
)
parser.add_argument(
"--file",
metavar="<filename>",
help="Name of the file cluster template should be exported to "
"If not provided, print to stdout"
)
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)", parsed_args)
client = self.app.client_manager.data_processing
ngt_id = utils.get_resource_id(
client.cluster_templates, parsed_args.cluster_template)
response = client.cluster_templates.export(ngt_id)
result = json.dumps(response._info, indent=4)+"\n"
if parsed_args.file:
with open(parsed_args.file, "w+") as file:
file.write(result)
else:
sys.stdout.write(result)
| |
# Copyright (C) 2013 - 2016 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
import os
import socket
from urllib.parse import urlparse, parse_qs
import sublime
from ..logger import Log
from ..unix_socket import UnixSocketPath
from ..helpers import project_name, debug_enabled
from ..helpers import get_settings, active_view, get_interpreter
from ..vagrant import VagrantIPAddressGlobal, VagrantMachineGlobalInfo
class Interpreter(object):
"""Parses a configured Python Interpreter
"""
def __init__(self, interpreter_string):
self.__data = {}
self.__raw_interpreter = interpreter_string
self.__parse_raw_interpreter()
self.__project_name = ''
def __getattr__(self, attr_name):
"""Return data as it was part of the object itself
"""
return self.__data.get(attr_name, None)
@property
def raw_interpreter(self):
return self.__raw_interpreter
@property
def for_local(self):
"""Returns True if this interpreter is configured for local
"""
return self.scheme == 'local'
@property
def for_remote(self):
"""Return True if this interpreter is configured for remote
"""
return self.scheme == 'tcp'
@property
def for_vagrant(self):
"""Return True if this interpreter is configured for vagrant
"""
return self.scheme == 'vagrant'
@property
def project_name(self):
"""Set project name if necessary and return it back
"""
if not self.__project_name:
self.__project_name = project_name()
return self.__project_name
def renew_interpreter(self):
"""Renew the whole intrepreter
"""
if not self.for_local:
return
self.__prepare_local_interpreter()
def __prepare_local_interpreter(self):
"""Prepare data for the local interpreter if scheme is lcoal
"""
view = active_view()
self.__extract_port(view)
self.__extract_paths(view)
self.__extract_python_interpreter(view)
self.__extract_script()
args = [self.python, '-B', self.script_file, '-p', self.project_name]
if self.port is not None:
args.append(str(self.port))
if len(self.paths) > 0:
paths = [p for p in self.paths if os.path.exists(p)]
args.extend(['-e', ','.join(paths)])
args.extend([str(os.getpid())])
kwargs = {}
folders = sublime.active_window().folders()
if len(folders) > 0 and os.path.exists(folders[0]):
kwargs['cwd'] = folders[0]
self.__data['arguments'] = (args, kwargs)
def __extract_port(self, view):
"""Extract the port to connect to
"""
if sublime.platform() != 'linux':
self.__data['host'] = 'localhost'
else:
self.__data['host'] = self.__get_unix_domain_socket()
return
if debug_enabled(view):
port = get_settings(view, 'jsonserver_debug_port', 9999)
self.__data['port'] = port
return
if sublime.platform() != 'linux':
s = socket.socket()
s.bind(('', 0))
self.__data['port'] = s.getsockname()[1]
s.close()
def __extract_paths(self, view):
"""Extract a list of paths to be added to jedi
"""
extra = get_settings(view, 'extra_paths', [])
paths = [os.path.expanduser(os.path.expandvars(p)) for p in extra]
try:
paths.extend(sublime.active_window().folders())
except AttributeError:
Log.warning(
'Your `extra_paths` configuration is a string but we are '
'expecting a list of strings.'
)
paths = paths.split(',')
paths.extend(sublime.active_window().folder())
self.__data['paths'] = paths
def __extract_python_interpreter(self, view):
"""Extract the configured python interpreter
"""
try:
python = os.path.expanduser(
os.path.expandvars(get_interpreter(view))
)
if '$VIRTUAL_ENV' in python:
Log.warning(
'WARNING: your anaconda configured python interpreter '
'is {} but there is no $VIRTUAL_ENV key in your '
'environment, falling back to `python`'.format(python)
)
except:
python = 'python'
finally:
self.__data['python'] = python
def __extract_script(self):
"""Extrct the jsonserver.py script location
"""
self.__data['script_file'] = os.path.join(
os.path.dirname(os.path.dirname(os.path.dirname(__file__))),
'anaconda_server', 'jsonserver.py'
)
def __get_unix_domain_socket(self):
"""Compound the Unix domain socket path
"""
if sublime.platform() != 'linux':
return 'localhost'
return UnixSocketPath(self.project_name).socket
def __parse_raw_interpreter(self):
"""Parses the raw interpreter string for later simple use
"""
urldata = urlparse(self.__raw_interpreter)
self.__data['scheme'] = urldata.scheme if urldata.scheme else 'local'
if len(self.__data['scheme']) == 1:
self.__data['scheme'] = 'local'
if self.for_local:
# we are set up for local return now and do our thing
return self.__prepare_local_interpreter()
if urldata.query and 'manual=' in urldata.query:
self.__data['scheme'] = 'tcp'
netloc = urldata.netloc
if '@' in urldata.netloc:
left, netloc = netloc.split('@')
self.__data['username'], self.__data['password'] = left.split(':')
if self.for_remote:
self.__data['host'], self.__data['port'] = netloc.split(':')
if self.for_vagrant:
self.__data['machine'], self.__data['port'] = netloc.split(':')
if urldata.query:
options = parse_qs(urldata.query)
for key, value in options.items():
self.__data[key] = (
value if key in ['extra', 'pathmap'] else value[0]
)
if self.for_vagrant:
self.__data['network'] = self.__data.get('network', 'forwarded')
self.__data['interpreter'] = (
self.__data.get('interpreter', 'python')
)
_vagrant_hosts = {
'forwarded': 'localhost',
'private': self.address,
'public': VagrantIPAddressGlobal(
VagrantMachineGlobalInfo(self.machine).machine_id, self.dev
).ip_address
}
self.__data['host'] = _vagrant_hosts[self.network]
pathmap = {}
for map_data in self.__data.get('pathmap', []):
split_data = map_data.split(',')
if len(split_data) != 2:
Log.warning('pathmap corruption? -> {}'.format(map_data))
continue
local_path = os.path.expanduser(os.path.expandvars(split_data[0]))
remote_path = os.path.expanduser(os.path.expandvars(split_data[1]))
pathmap[local_path] = remote_path
self.__data['pathmap'] = pathmap
def __repr__(self):
"""String representation
"""
try:
return ' '.join(self.arguments[0])
except TypeError:
rep = ''
for k, v in self.__data.items():
rep + k + ': ' + v + '\n'
return rep
| |
'''
Created on Jun 14, 2012
@author: bcaine
'''
import sys
sys.path = ['./FunctionalTest/RAS/lib'] + ['./dataFiles'] + ['./lib/vista'] + sys.path
from SCActions import SCActions
import TestHelper
def sc_test001(test_suite_details):
'''Basic appointment managment options
Make an Appointment, Check in, Check Out'''
testname = sys._getframe().f_code.co_name
test_driver = TestHelper.TestDriver(testname)
test_driver.pre_test_run(test_suite_details)
try:
VistA = test_driver.connect_VistA(test_suite_details)
SC = SCActions(VistA, scheduling='Scheduling')
time = SC.schtime()
SC.signon()
SC.makeapp(patient='333224444', datetime=time)
time = SC.schtime(plushour=1)
now = datetime.datetime.now()
hour = now.hour + 1
SC.signon()
SC.checkin(vlist=['Three', str(hour), 'CHECKED-IN:'])
SC.signon()
SC.checkout(vlist1=['Three', str(hour), 'Checked In'], vlist2=['305.91', 'OTHER DRUG', 'RESULTING'], icd='305.91')
SC.signoff()
test_driver.post_test_run(test_suite_details)
except TestHelper.TestError, e:
test_driver.exception_handling(test_suite_details, e)
else:
test_driver.try_else_handling(test_suite_details)
finally:
test_driver.finally_handling(test_suite_details)
test_driver.end_method_handling(test_suite_details)
def sc_test002(test_suite_details):
'''Basic appointment managment options
Make an Appointment (Scheduled and Unscheduled),
record a No-Show, Cancel an appointment and change patients'''
testname = sys._getframe().f_code.co_name
test_driver = TestHelper.TestDriver(testname)
test_driver.pre_test_run(test_suite_details)
try:
VistA = test_driver.connect_VistA(test_suite_details)
SC = SCActions(VistA, scheduling='Scheduling')
time = SC.schtime()
SC.signon()
SC.makeapp(patient='655447777', datetime=time)
time = SC.schtime(plushour=1)
SC.signon()
SC.unschvisit(patient='345678233', patientname='Twelve')
SC.signon()
SC.noshow(appnum='3')
SC.signon()
SC.canapp(mult='1')
SC.signon()
SC.chgpatient(patient1='345678233', patient2='345238901', patientname1='Twelve', patientname2='Ten')
SC.signoff()
test_driver.post_test_run(test_suite_details)
except TestHelper.TestError, e:
test_driver.exception_handling(test_suite_details, e)
else:
test_driver.try_else_handling(test_suite_details)
finally:
test_driver.finally_handling(test_suite_details)
test_driver.end_method_handling(test_suite_details)
def sc_test003(test_suite_details):
'''This tests clinic features such as change clinic, change daterange,
expand the entry, add and edit, and Patient demographics'''
testname = sys._getframe().f_code.co_name
test_driver = TestHelper.TestDriver(testname)
test_driver.pre_test_run(test_suite_details)
try:
VistA = test_driver.connect_VistA(test_suite_details)
SC = SCActions(VistA, scheduling='Scheduling')
SC.signon()
SC.chgclinic()
SC.signon()
SC.chgdaterange()
SC.signon()
SC.teaminfo()
SC.signoff()
test_driver.post_test_run(test_suite_details)
except TestHelper.TestError, e:
test_driver.exception_handling(test_suite_details, e)
else:
test_driver.try_else_handling(test_suite_details)
finally:
test_driver.finally_handling(test_suite_details)
test_driver.end_method_handling(test_suite_details)
def sc_test004(test_suite_details):
'''This tests clinic features such as expand the entry, add and edit, and Patient demographics'''
testname = sys._getframe().f_code.co_name
test_driver = TestHelper.TestDriver(testname)
test_driver.pre_test_run(test_suite_details)
try:
VistA = test_driver.connect_VistA(test_suite_details)
SC = SCActions(VistA, scheduling='Scheduling')
time = SC.schtime(plushour=1)
SC.signon()
SC.makeapp(patient='345238901', datetime=time)
SC.signon()
SC.patdem(name='Ten', mult='2')
SC.signon()
SC.expandentry(vlist1=['TEN', 'SCHEDULED', '30'], vlist2=['Event', 'Date', 'User', 'TESTMASTER'],
vlist3=['NEXT AVAILABLE', 'NO', '0'], vlist4=['1933', 'MALE', 'UNANSWERED'],
vlist5=['Combat Veteran:', 'No check out information'], mult='2')
SC.signon()
SC.addedit(name='345623902', icd='305.91')
SC.signoff()
test_driver.post_test_run(test_suite_details)
except TestHelper.TestError, e:
test_driver.exception_handling(test_suite_details, e)
else:
test_driver.try_else_handling(test_suite_details)
finally:
test_driver.finally_handling(test_suite_details)
test_driver.end_method_handling(test_suite_details)
def sc_test005(test_suite_details):
'''This test checks a patient into a clinic, then discharges him, then deletes his checkout'''
testname = sys._getframe().f_code.co_name
test_driver = TestHelper.TestDriver(testname)
test_driver.pre_test_run(test_suite_details)
try:
VistA = test_driver.connect_VistA(test_suite_details)
SC = SCActions(VistA)
SC.signon()
SC.enroll(patient='543236666')
SC = SCActions(VistA, scheduling='Scheduling')
time = SC.schtime(plushour=1)
SC.signon()
SC.makeapp(patient='543236666', datetime=time)
SC.signon()
SC.discharge(patient='543236666', appnum='3')
SC.signon()
SC.checkout(vlist1=['One', 'No Action'], vlist2=['305.91', 'RESULTING'], icd='305.91', mult='3')
SC = SCActions(VistA, user='fakedoc1', code='1Doc!@#$')
SC.signon()
SC.deletecheckout(appnum='3')
SC.signoff()
test_driver.post_test_run(test_suite_details)
except TestHelper.TestError, e:
test_driver.exception_handling(test_suite_details, e)
else:
test_driver.try_else_handling(test_suite_details)
finally:
test_driver.finally_handling(test_suite_details)
test_driver.end_method_handling(test_suite_details)
def sc_test006(test_suite_details):
'''This test will exercise the wait list functionality'''
testname = sys._getframe().f_code.co_name
test_driver = TestHelper.TestDriver(testname)
test_driver.pre_test_run(test_suite_details)
try:
VistA = test_driver.connect_VistA(test_suite_details)
SC = SCActions(VistA, user='fakedoc1', code='1Doc!@#$')
SC.signon()
SC.waitlistentry(patient='323554545')
SC.waitlistdisposition(patient='323554545')
SC.signoff()
test_driver.post_test_run(test_suite_details)
except TestHelper.TestError, e:
test_driver.exception_handling(test_suite_details, e)
else:
test_driver.try_else_handling(test_suite_details)
finally:
test_driver.finally_handling(test_suite_details)
test_driver.end_method_handling(test_suite_details)
def startmon(test_suite_details):
'''Starts Coverage Monitor'''
testname = sys._getframe().f_code.co_name
test_driver = TestHelper.TestDriver(testname)
test_driver.pre_test_run(test_suite_details)
try:
VistA1 = test_driver.connect_VistA(test_suite_details)
VistA1.startCoverage(routines=['SC*', 'SD*'])
test_driver.post_test_run(test_suite_details)
'''
Close Vista
'''
VistA1.write('^\r^\r^\r')
VistA1.write('h\r')
except TestHelper.TestError, e:
test_driver.exception_handling(test_suite_details, e)
else:
test_driver.try_else_handling(test_suite_details)
finally:
test_driver.finally_handling(test_suite_details)
test_driver.end_method_handling(test_suite_details)
def stopmon (test_suite_details):
''' STOP MONITOR'''
testname = sys._getframe().f_code.co_name
test_driver = TestHelper.TestDriver(testname)
test_driver.pre_test_run(test_suite_details)
try:
# Connect to VistA
VistA1 = test_driver.connect_VistA(test_suite_details)
VistA1.stopCoverage(path=(test_suite_details.result_dir + '/' + 'Scheduling_coverage.txt'))
test_driver.post_test_run(test_suite_details)
'''
Close Vista
'''
VistA1.write('^\r^\r^\r')
VistA1.write('h\r')
except TestHelper.TestError, e:
test_driver.exception_handling(test_suite_details, e)
else:
test_driver.try_else_handling(test_suite_details)
finally:
test_driver.finally_handling(test_suite_details)
test_driver.end_method_handling(test_suite_details)
'''
def connect_VistA(testname, result_dir):
# Connect to VistA
from OSEHRAHelper import ConnectToMUMPS, PROMPT
VistA = ConnectToMUMPS(logfile=result_dir + '/' + testname + '.txt', instance='', namespace='')
if VistA.type == 'cache':
try:
VistA.ZN('VISTA')
except IndexError, no_namechange:
pass
VistA.wait(PROMPT)
return VistA
'''
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import os
import sys
import warnings
from openstack_dashboard import exceptions
warnings.formatwarning = lambda message, category, *args, **kwargs: \
'%s: %s' % (category.__name__, message)
ROOT_PATH = os.path.dirname(os.path.abspath(__file__))
BIN_DIR = os.path.abspath(os.path.join(ROOT_PATH, '..', 'bin'))
if ROOT_PATH not in sys.path:
sys.path.append(ROOT_PATH)
DEBUG = False
TEMPLATE_DEBUG = DEBUG
# Ensure that we always have a SECRET_KEY set, even when no local_settings.py
# file is present. See local_settings.py.example for full documentation on the
# horizon.utils.secret_key module and its use.
from horizon.utils import secret_key
LOCAL_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'local')
SECRET_KEY = secret_key.generate_or_read_from_file(os.path.join(LOCAL_PATH,
'.secret_key_store'))
SITE_BRANDING = 'OpenStack Dashboard'
LOGIN_URL = '/auth/login/'
LOGOUT_URL = '/auth/logout/'
# LOGIN_REDIRECT_URL can be used as an alternative for
# HORIZON_CONFIG.user_home, if user_home is not set.
# Do not set it to '/home/', as this will cause circular redirect loop
LOGIN_REDIRECT_URL = '/'
MEDIA_ROOT = os.path.abspath(os.path.join(ROOT_PATH, '..', 'media'))
MEDIA_URL = '/media/'
STATIC_ROOT = os.path.abspath(os.path.join(ROOT_PATH, '..', 'static'))
STATIC_URL = '/static/'
ROOT_URLCONF = 'openstack_dashboard.urls'
HORIZON_CONFIG = {
'dashboards': ('project', 'admin', 'settings', 'router',),
'default_dashboard': 'project',
'user_home': 'openstack_dashboard.views.get_user_home',
'ajax_queue_limit': 10,
'auto_fade_alerts': {
'delay': 3000,
'fade_duration': 1500,
'types': ['alert-success', 'alert-info']
},
'help_url': "http://docs.openstack.org",
'exceptions': {'recoverable': exceptions.RECOVERABLE,
'not_found': exceptions.NOT_FOUND,
'unauthorized': exceptions.UNAUTHORIZED},
}
# Set to True to allow users to upload images to glance via Horizon server.
# When enabled, a file form field will appear on the create image form.
# See documentation for deployment considerations.
HORIZON_IMAGES_ALLOW_UPLOAD = True
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'horizon.middleware.HorizonMiddleware',
'django.middleware.doc.XViewMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.request',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.contrib.messages.context_processors.messages',
'horizon.context_processors.horizon',
'openstack_dashboard.context_processors.openstack',
)
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
'horizon.loaders.TemplateLoader'
)
TEMPLATE_DIRS = (
os.path.join(ROOT_PATH, 'templates'),
)
STATICFILES_FINDERS = (
'compressor.finders.CompressorFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
COMPRESS_PRECOMPILERS = (
('text/less', ('lesscpy {infile}')),
)
COMPRESS_CSS_FILTERS = (
'compressor.filters.css_default.CssAbsoluteFilter',
)
COMPRESS_ENABLED = True
COMPRESS_OUTPUT_DIR = 'dashboard'
COMPRESS_CSS_HASHING_METHOD = 'hash'
COMPRESS_PARSER = 'compressor.parser.HtmlParser'
INSTALLED_APPS = (
'openstack_dashboard',
'django.contrib.contenttypes',
'django.contrib.auth',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
'compressor',
'horizon',
'openstack_dashboard.dashboards.project',
'openstack_dashboard.dashboards.admin',
'openstack_dashboard.dashboards.settings',
'openstack_auth',
'openstack_dashboard.dashboards.router',
)
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
AUTHENTICATION_BACKENDS = ('openstack_auth.backend.KeystoneBackend',)
MESSAGE_STORAGE = 'django.contrib.messages.storage.cookie.CookieStorage'
SESSION_ENGINE = 'django.contrib.sessions.backends.signed_cookies'
SESSION_COOKIE_HTTPONLY = True
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
SESSION_COOKIE_SECURE = False
SESSION_TIMEOUT = 1800
gettext_noop = lambda s: s
LANGUAGES = (
('bg', gettext_noop('Bulgarian (Bulgaria)')),
('cs', gettext_noop('Czech')),
('en', gettext_noop('English')),
('es', gettext_noop('Spanish')),
('fr', gettext_noop('French')),
('it', gettext_noop('Italiano')),
('ja', gettext_noop('Japanese')),
('ko', gettext_noop('Korean (Korea)')),
('nl', gettext_noop('Dutch (Netherlands)')),
('pl', gettext_noop('Polish')),
('pt', gettext_noop('Portuguese')),
('pt-br', gettext_noop('Portuguese (Brazil)')),
('zh-cn', gettext_noop('Simplified Chinese')),
('zh-tw', gettext_noop('Traditional Chinese')),
)
LANGUAGE_CODE = 'en'
USE_I18N = True
USE_L10N = True
USE_TZ = True
OPENSTACK_KEYSTONE_DEFAULT_ROLE = 'Member'
DEFAULT_EXCEPTION_REPORTER_FILTER = 'horizon.exceptions.HorizonReporterFilter'
POLICY_FILES_PATH = os.path.join(ROOT_PATH, "conf")
# Map of local copy of service policy files
POLICY_FILES = {
'identity': 'keystone_policy.json',
'compute': 'nova_policy.json'
}
try:
from local.local_settings import * # noqa
except ImportError:
logging.warning("No local_settings file found.")
from openstack_dashboard import policy
POLICY_CHECK_FUNCTION = policy.check
# Add HORIZON_CONFIG to the context information for offline compression
COMPRESS_OFFLINE_CONTEXT = {
'STATIC_URL': STATIC_URL,
'HORIZON_CONFIG': HORIZON_CONFIG
}
if DEBUG:
logging.basicConfig(level=logging.DEBUG)
| |
from __future__ import print_function
from __future__ import unicode_literals
import logging
import re
import signal
import sys
from inspect import getdoc
from operator import attrgetter
import dockerpty
from docker.errors import APIError
from requests.exceptions import ReadTimeout
from .. import __version__
from .. import legacy
from ..config import parse_environment
from ..const import DEFAULT_TIMEOUT
from ..const import HTTP_TIMEOUT
from ..progress_stream import StreamOutputError
from ..project import ConfigurationError
from ..project import NoSuchService
from ..service import BuildError
from ..service import ConvergenceStrategy
from ..service import NeedsBuildError
from .command import Command
from .docopt_command import NoSuchCommand
from .errors import UserError
from .formatter import Formatter
from .log_printer import LogPrinter
from .utils import get_version_info
from .utils import yesno
log = logging.getLogger(__name__)
console_handler = logging.StreamHandler(sys.stderr)
INSECURE_SSL_WARNING = """
Warning: --allow-insecure-ssl is deprecated and has no effect.
It will be removed in a future version of Compose.
"""
def main():
setup_logging()
try:
command = TopLevelCommand()
command.sys_dispatch()
except KeyboardInterrupt:
log.error("\nAborting.")
sys.exit(1)
except (UserError, NoSuchService, ConfigurationError, legacy.LegacyError) as e:
log.error(e.msg)
sys.exit(1)
except NoSuchCommand as e:
log.error("No such command: %s", e.command)
log.error("")
log.error("\n".join(parse_doc_section("commands:", getdoc(e.supercommand))))
sys.exit(1)
except APIError as e:
log.error(e.explanation)
sys.exit(1)
except BuildError as e:
log.error("Service '%s' failed to build: %s" % (e.service.name, e.reason))
sys.exit(1)
except StreamOutputError as e:
log.error(e)
sys.exit(1)
except NeedsBuildError as e:
log.error("Service '%s' needs to be built, but --no-build was passed." % e.service.name)
sys.exit(1)
except ReadTimeout as e:
log.error(
"An HTTP request took too long to complete. Retry with --verbose to obtain debug information.\n"
"If you encounter this issue regularly because of slow network conditions, consider setting "
"COMPOSE_HTTP_TIMEOUT to a higher value (current value: %s)." % HTTP_TIMEOUT
)
def setup_logging():
root_logger = logging.getLogger()
root_logger.addHandler(console_handler)
root_logger.setLevel(logging.DEBUG)
# Disable requests logging
logging.getLogger("requests").propagate = False
# stolen from docopt master
def parse_doc_section(name, source):
pattern = re.compile('^([^\n]*' + name + '[^\n]*\n?(?:[ \t].*?(?:\n|$))*)',
re.IGNORECASE | re.MULTILINE)
return [s.strip() for s in pattern.findall(source)]
class TopLevelCommand(Command):
"""Define and run multi-container applications with Docker.
Usage:
docker-compose [options] [COMMAND] [ARGS...]
docker-compose -h|--help
Options:
-f, --file FILE Specify an alternate compose file (default: docker-compose.yml)
-p, --project-name NAME Specify an alternate project name (default: directory name)
--verbose Show more output
-v, --version Print version and exit
Commands:
build Build or rebuild services
help Get help on a command
kill Kill containers
logs View output from containers
pause Pause services
port Print the public port for a port binding
ps List containers
pull Pulls service images
restart Restart services
rm Remove stopped containers
run Run a one-off command
scale Set number of containers for a service
start Start services
stop Stop services
unpause Unpause services
up Create and start containers
migrate-to-labels Recreate containers to add labels
version Show the Docker-Compose version information
"""
def docopt_options(self):
options = super(TopLevelCommand, self).docopt_options()
options['version'] = get_version_info('compose')
return options
def perform_command(self, options, *args, **kwargs):
if options.get('--verbose'):
console_handler.setFormatter(logging.Formatter('%(name)s.%(funcName)s: %(message)s'))
console_handler.setLevel(logging.DEBUG)
else:
console_handler.setFormatter(logging.Formatter())
console_handler.setLevel(logging.INFO)
return super(TopLevelCommand, self).perform_command(options, *args, **kwargs)
def build(self, project, options):
"""
Build or rebuild services.
Services are built once and then tagged as `project_service`,
e.g. `composetest_db`. If you change a service's `Dockerfile` or the
contents of its build directory, you can run `docker-compose build` to rebuild it.
Usage: build [options] [SERVICE...]
Options:
--no-cache Do not use cache when building the image.
"""
no_cache = bool(options.get('--no-cache', False))
project.build(service_names=options['SERVICE'], no_cache=no_cache)
def help(self, project, options):
"""
Get help on a command.
Usage: help COMMAND
"""
handler = self.get_handler(options['COMMAND'])
raise SystemExit(getdoc(handler))
def kill(self, project, options):
"""
Force stop service containers.
Usage: kill [options] [SERVICE...]
Options:
-s SIGNAL SIGNAL to send to the container.
Default signal is SIGKILL.
"""
signal = options.get('-s', 'SIGKILL')
project.kill(service_names=options['SERVICE'], signal=signal)
def logs(self, project, options):
"""
View output from containers.
Usage: logs [options] [SERVICE...]
Options:
--no-color Produce monochrome output.
"""
containers = project.containers(service_names=options['SERVICE'], stopped=True)
monochrome = options['--no-color']
print("Attaching to", list_containers(containers))
LogPrinter(containers, attach_params={'logs': True}, monochrome=monochrome).run()
def pause(self, project, options):
"""
Pause services.
Usage: pause [SERVICE...]
"""
project.pause(service_names=options['SERVICE'])
def port(self, project, options):
"""
Print the public port for a port binding.
Usage: port [options] SERVICE PRIVATE_PORT
Options:
--protocol=proto tcp or udp [default: tcp]
--index=index index of the container if there are multiple
instances of a service [default: 1]
"""
index = int(options.get('--index'))
service = project.get_service(options['SERVICE'])
try:
container = service.get_container(number=index)
except ValueError as e:
raise UserError(str(e))
print(container.get_local_port(
options['PRIVATE_PORT'],
protocol=options.get('--protocol') or 'tcp') or '')
def ps(self, project, options):
"""
List containers.
Usage: ps [options] [SERVICE...]
Options:
-q Only display IDs
"""
containers = sorted(
project.containers(service_names=options['SERVICE'], stopped=True) +
project.containers(service_names=options['SERVICE'], one_off=True),
key=attrgetter('name'))
if options['-q']:
for container in containers:
print(container.id)
else:
headers = [
'Name',
'Command',
'State',
'Ports',
]
rows = []
for container in containers:
command = container.human_readable_command
if len(command) > 30:
command = '%s ...' % command[:26]
rows.append([
container.name,
command,
container.human_readable_state,
container.human_readable_ports,
])
print(Formatter().table(headers, rows))
def pull(self, project, options):
"""
Pulls images for services.
Usage: pull [options] [SERVICE...]
Options:
--allow-insecure-ssl Deprecated - no effect.
"""
if options['--allow-insecure-ssl']:
log.warn(INSECURE_SSL_WARNING)
project.pull(
service_names=options['SERVICE'],
)
def rm(self, project, options):
"""
Remove stopped service containers.
Usage: rm [options] [SERVICE...]
Options:
-f, --force Don't ask to confirm removal
-v Remove volumes associated with containers
"""
all_containers = project.containers(service_names=options['SERVICE'], stopped=True)
stopped_containers = [c for c in all_containers if not c.is_running]
if len(stopped_containers) > 0:
print("Going to remove", list_containers(stopped_containers))
if options.get('--force') \
or yesno("Are you sure? [yN] ", default=False):
project.remove_stopped(
service_names=options['SERVICE'],
v=options.get('-v', False)
)
else:
print("No stopped containers")
def run(self, project, options):
"""
Run a one-off command on a service.
For example:
$ docker-compose run web python manage.py shell
By default, linked services will be started, unless they are already
running. If you do not want to start linked services, use
`docker-compose run --no-deps SERVICE COMMAND [ARGS...]`.
Usage: run [options] [-p PORT...] [-e KEY=VAL...] SERVICE [COMMAND] [ARGS...]
Options:
--allow-insecure-ssl Deprecated - no effect.
-d Detached mode: Run container in the background, print
new container name.
--name NAME Assign a name to the container
--entrypoint CMD Override the entrypoint of the image.
-e KEY=VAL Set an environment variable (can be used multiple times)
-u, --user="" Run as specified username or uid
--no-deps Don't start linked services.
--rm Remove container after run. Ignored in detached mode.
-p, --publish=[] Publish a container's port(s) to the host
--service-ports Run command with the service's ports enabled and mapped
to the host.
-T Disable pseudo-tty allocation. By default `docker-compose run`
allocates a TTY.
"""
service = project.get_service(options['SERVICE'])
if options['--allow-insecure-ssl']:
log.warn(INSECURE_SSL_WARNING)
if not options['--no-deps']:
deps = service.get_linked_service_names()
if len(deps) > 0:
project.up(
service_names=deps,
start_deps=True,
strategy=ConvergenceStrategy.never,
)
tty = True
if options['-d'] or options['-T'] or not sys.stdin.isatty():
tty = False
if options['COMMAND']:
command = [options['COMMAND']] + options['ARGS']
else:
command = service.options.get('command')
container_options = {
'command': command,
'tty': tty,
'stdin_open': not options['-d'],
'detach': options['-d'],
}
if options['-e']:
container_options['environment'] = parse_environment(options['-e'])
if options['--entrypoint']:
container_options['entrypoint'] = options.get('--entrypoint')
if options['--rm']:
container_options['restart'] = None
if options['--user']:
container_options['user'] = options.get('--user')
if not options['--service-ports']:
container_options['ports'] = []
if options['--publish']:
container_options['ports'] = options.get('--publish')
if options['--publish'] and options['--service-ports']:
raise UserError(
'Service port mapping and manual port mapping '
'can not be used togather'
)
if options['--name']:
container_options['name'] = options['--name']
try:
container = service.create_container(
quiet=True,
one_off=True,
**container_options
)
except APIError as e:
legacy.check_for_legacy_containers(
project.client,
project.name,
[service.name],
allow_one_off=False,
)
raise e
if options['-d']:
service.start_container(container)
print(container.name)
else:
dockerpty.start(project.client, container.id, interactive=not options['-T'])
exit_code = container.wait()
if options['--rm']:
project.client.remove_container(container.id)
sys.exit(exit_code)
def scale(self, project, options):
"""
Set number of containers to run for a service.
Numbers are specified in the form `service=num` as arguments.
For example:
$ docker-compose scale web=2 worker=3
Usage: scale [options] [SERVICE=NUM...]
Options:
-t, --timeout TIMEOUT Specify a shutdown timeout in seconds.
(default: 10)
"""
timeout = int(options.get('--timeout') or DEFAULT_TIMEOUT)
for s in options['SERVICE=NUM']:
if '=' not in s:
raise UserError('Arguments to scale should be in the form service=num')
service_name, num = s.split('=', 1)
try:
num = int(num)
except ValueError:
raise UserError('Number of containers for service "%s" is not a '
'number' % service_name)
project.get_service(service_name).scale(num, timeout=timeout)
def start(self, project, options):
"""
Start existing containers.
Usage: start [SERVICE...]
"""
project.start(service_names=options['SERVICE'])
def stop(self, project, options):
"""
Stop running containers without removing them.
They can be started again with `docker-compose start`.
Usage: stop [options] [SERVICE...]
Options:
-t, --timeout TIMEOUT Specify a shutdown timeout in seconds.
(default: 10)
"""
timeout = int(options.get('--timeout') or DEFAULT_TIMEOUT)
project.stop(service_names=options['SERVICE'], timeout=timeout)
def restart(self, project, options):
"""
Restart running containers.
Usage: restart [options] [SERVICE...]
Options:
-t, --timeout TIMEOUT Specify a shutdown timeout in seconds.
(default: 10)
"""
timeout = int(options.get('--timeout') or DEFAULT_TIMEOUT)
project.restart(service_names=options['SERVICE'], timeout=timeout)
def unpause(self, project, options):
"""
Unpause services.
Usage: unpause [SERVICE...]
"""
project.unpause(service_names=options['SERVICE'])
def up(self, project, options):
"""
Builds, (re)creates, starts, and attaches to containers for a service.
Unless they are already running, this command also starts any linked services.
The `docker-compose up` command aggregates the output of each container. When
the command exits, all containers are stopped. Running `docker-compose up -d`
starts the containers in the background and leaves them running.
If there are existing containers for a service, and the service's configuration
or image was changed after the container's creation, `docker-compose up` picks
up the changes by stopping and recreating the containers (preserving mounted
volumes). To prevent Compose from picking up changes, use the `--no-recreate`
flag.
If you want to force Compose to stop and recreate all containers, use the
`--force-recreate` flag.
Usage: up [options] [SERVICE...]
Options:
--allow-insecure-ssl Deprecated - no effect.
-d Detached mode: Run containers in the background,
print new container names.
--no-color Produce monochrome output.
--no-deps Don't start linked services.
--force-recreate Recreate containers even if their configuration and
image haven't changed. Incompatible with --no-recreate.
--no-recreate If containers already exist, don't recreate them.
Incompatible with --force-recreate.
--no-build Don't build an image, even if it's missing
-t, --timeout TIMEOUT Use this timeout in seconds for container shutdown
when attached or when containers are already
running. (default: 10)
"""
if options['--allow-insecure-ssl']:
log.warn(INSECURE_SSL_WARNING)
monochrome = options['--no-color']
start_deps = not options['--no-deps']
service_names = options['SERVICE']
timeout = int(options.get('--timeout') or DEFAULT_TIMEOUT)
to_attach = project.up(
service_names=service_names,
start_deps=start_deps,
strategy=convergence_strategy_from_opts(options),
do_build=not options['--no-build'],
timeout=timeout
)
if not options['-d']:
log_printer = build_log_printer(to_attach, service_names, monochrome)
attach_to_logs(project, log_printer, service_names, timeout)
def migrate_to_labels(self, project, _options):
"""
Recreate containers to add labels
If you're coming from Compose 1.2 or earlier, you'll need to remove or
migrate your existing containers after upgrading Compose. This is
because, as of version 1.3, Compose uses Docker labels to keep track
of containers, and so they need to be recreated with labels added.
If Compose detects containers that were created without labels, it
will refuse to run so that you don't end up with two sets of them. If
you want to keep using your existing containers (for example, because
they have data volumes you want to preserve) you can migrate them with
the following command:
docker-compose migrate-to-labels
Alternatively, if you're not worried about keeping them, you can
remove them - Compose will just create new ones.
docker rm -f myapp_web_1 myapp_db_1 ...
Usage: migrate-to-labels
"""
legacy.migrate_project_to_labels(project)
def version(self, project, options):
"""
Show version informations
Usage: version [--short]
Options:
--short Shows only Compose's version number.
"""
if options['--short']:
print(__version__)
else:
print(get_version_info('full'))
def convergence_strategy_from_opts(options):
no_recreate = options['--no-recreate']
force_recreate = options['--force-recreate']
if force_recreate and no_recreate:
raise UserError("--force-recreate and --no-recreate cannot be combined.")
if force_recreate:
return ConvergenceStrategy.always
if no_recreate:
return ConvergenceStrategy.never
return ConvergenceStrategy.changed
def build_log_printer(containers, service_names, monochrome):
if service_names:
containers = [c for c in containers if c.service in service_names]
return LogPrinter(
containers,
attach_params={"logs": True},
monochrome=monochrome)
def attach_to_logs(project, log_printer, service_names, timeout):
print("Attaching to", list_containers(log_printer.containers))
try:
log_printer.run()
finally:
def handler(signal, frame):
project.kill(service_names=service_names)
sys.exit(0)
signal.signal(signal.SIGINT, handler)
print("Gracefully stopping... (press Ctrl+C again to force)")
project.stop(service_names=service_names, timeout=timeout)
def list_containers(containers):
return ", ".join(c.name for c in containers)
| |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import traceback
from uuid import uuid4
from django import VERSION
from six import string_types
from django.core.exceptions import ImproperlyConfigured
from django.core.handlers.wsgi import STATUS_CODE_TEXT
from django.shortcuts import resolve_url
from django.test import TestCase
from django.utils import six
# start configuration error messages
IMPROPERLY_BUILT_CONFIGURATION_MSG = \
'Every test method config should contain three or four elements (url, ' \
'status, method, data=None).'
EMPTY_TEST_CONFIGURATION_MSG = \
'django-skd-smoke TestCase has empty TESTS_CONFIGURATION.'
INCORRECT_TEST_CONFIGURATION_MSG = \
'django-skd-smoke TestCase should define TESTS_CONFIGURATION list or ' \
'tuple.'
UNSUPPORTED_CONFIGURATION_KEY_MSG = \
'django-skd-smoke configuration does not support those keys: %s.'
REQUIRED_PARAMS = (
{'display_name': 'url', 'expected_type': string_types},
{'display_name': 'status', 'expected_type': int},
{'display_name': 'method', 'expected_type': string_types},
)
def check_type(types):
def check(obj):
return isinstance(obj, types)
return check
def list_or_callable(l):
return isinstance(l, (list, tuple)) or callable(l)
def string_or_callable(v):
return isinstance(v, string_types) or callable(v)
def dict_or_callable(d):
return isinstance(d, dict) or callable(d)
# name and function
NOT_REQUIRED_PARAM_TYPE_CHECK = {
'comment': {'type': 'string', 'func': check_type(string_types)},
'initialize': {'type': 'callable', 'func': callable},
'url_args': {'type': 'list or callable', 'func': list_or_callable},
'url_kwargs': {'type': 'dict or callable', 'func': dict_or_callable},
'request_data': {'type': 'dict or callable', 'func': dict_or_callable},
'user_credentials': {'type': 'dict or callable', 'func': dict_or_callable},
'redirect_to': {'type': 'string or callable', 'func': string_or_callable},
}
DEFAULTABLE_PARAMS = (
'comment', 'initialize', 'url_args', 'url_kwargs', 'request_data', 'user_credentials', 'redirect_to',
)
INCORRECT_REQUIRED_PARAM_TYPE_MSG = \
'django-skd-smoke: Configuration parameter "%s" with index=%s should be ' \
'%s but is %s with next value: %s.'
INCORRECT_NOT_REQUIRED_PARAM_TYPE_MSG = \
'django-skd-smoke: Configuration parameter "%s" should be ' \
'%s but is %s with next value: %s.'
LINK_TO_DOCUMENTATION = \
'For more information please refer to project documentation on ' \
'https://github.com/steelkiwi/django-skd-smoke#configuration.'
UNKNOWN_HTTP_METHOD_MSG = \
'Your django-skd-smoke configuration defines unknown http method: "%s".'
HTTP_METHODS = {'get', 'post', 'head', 'options', 'put', 'patch', 'detete',
'trace'}
INCORRECT_USER_CREDENTIALS = \
'Authentication process failed. Supplied user credentials are incorrect: '\
'%r. Ensure that related user was created successfully.'
# end configuration error messages
def append_doc_link(error_message):
return error_message + '\n' + LINK_TO_DOCUMENTATION
def prepare_configuration(tests_configuration, defaults=None):
"""
Prepares initial tests configuration. Raises exception if there is any
problem with it.
:param tests_configuration: initial tests configuration as tuple or list \
with predefined structure
:param defaults: pass in a dict of defined default functions attached to \
the class. key should be full function name and value should be \
attribute value (either a callable or a plain value)
:return: adjusted configuration which should be used further
:raises: ``django.core.exceptions.ImproperlyConfigured`` if there is any \
problem with supplied ``tests_configuration``
"""
defaults = defaults or {}
confs = []
if isinstance(tests_configuration, (tuple, list)):
number_of_required_params = len(REQUIRED_PARAMS)
total_number_of_params = number_of_required_params + 1
for test_config in tests_configuration:
if len(test_config) == number_of_required_params:
test_config += ({},)
check_dict = False
elif len(test_config) == total_number_of_params:
diff = set(test_config[-1].keys()) - \
set(NOT_REQUIRED_PARAM_TYPE_CHECK.keys())
if diff:
raise ImproperlyConfigured(
append_doc_link(UNSUPPORTED_CONFIGURATION_KEY_MSG %
', '.join(diff))
)
check_dict = True
else:
raise ImproperlyConfigured(
append_doc_link(IMPROPERLY_BUILT_CONFIGURATION_MSG)
)
type_errors = []
# required params check
for idx, required_param in enumerate(test_config[:3]):
required_type = REQUIRED_PARAMS[idx]['expected_type']
if not isinstance(required_param, required_type):
type_errors.append(
INCORRECT_REQUIRED_PARAM_TYPE_MSG %
(REQUIRED_PARAMS[idx]['display_name'], idx,
required_type, type(required_param), required_param)
)
http_method = test_config[2]
if isinstance(http_method, REQUIRED_PARAMS[2]['expected_type']) \
and http_method.lower() not in HTTP_METHODS:
type_errors.append(UNKNOWN_HTTP_METHOD_MSG % http_method)
# not required params check
if check_dict:
for key, value in test_config[-1].items():
type_info = NOT_REQUIRED_PARAM_TYPE_CHECK[key]
function = type_info['func']
# If they passed in a None then it can be a signal to not use
# the default value/callable that may have been defined in the
# test class. Permit this None only if the config item is 'defaultable'
# and a default has been defined on the test class
if key in DEFAULTABLE_PARAMS and value is None:
default_attr_name = "default_" + key
if default_attr_name in defaults:
value = defaults[default_attr_name]
if not function(value):
type_errors.append(
INCORRECT_NOT_REQUIRED_PARAM_TYPE_MSG %
(key, type_info['type'], type(value), value)
)
if type_errors:
type_errors.append(LINK_TO_DOCUMENTATION)
raise ImproperlyConfigured('\n'.join(type_errors))
confs.append(test_config)
if not confs:
raise ImproperlyConfigured(
append_doc_link(EMPTY_TEST_CONFIGURATION_MSG))
else:
raise ImproperlyConfigured(
append_doc_link(INCORRECT_TEST_CONFIGURATION_MSG))
return confs
def generate_fail_test_method(exception_stacktrace):
"""
Generates test method which fails and informs user about occurred
exception.
:param exception_stacktrace: stacktrace of occurred exception
:return: method which takes ``TestCase`` and calls its ``fail`` method \
with provided ``exception_stacktrace``
"""
def fail_method(self):
self.fail(exception_stacktrace)
return fail_method
def generate_test_method(urlname, status, method='GET', initialize=None,
url_args=None, url_kwargs=None, request_data=None,
user_credentials=None, redirect_to=None):
"""
Generates test method which takes or calls ``url_args`` and ``url_kwargs``,
resolves supplied ``urlname``, calls proper ``self.client`` method (get,
post, etc.) with ``request_data`` if any and compares response status with
parameter ``status`` using ``assertEqual``.
:param urlname: plain url or urlname or namespace:urlname
:param status: http status code
:param method: http method (get, post, etc.)
:param initialize: callable object which is called in the very beginning \
of test method
:param url_args: list or callable object which returns args list to \
resolve url using ``django.shortcuts.resolve_url``
:param url_kwargs: dict or callable object which returns kwargs dict to \
resolve url using ``django.shortcuts.resolve_url``
:param request_data: dict or callable object which returns dict to pass it\
into http method request
:param user_credentials: dict or callable object which returns dict to \
login user using ``TestCase.client.login``
:param redirect_to: plain url or callable which is checked if only expected status \
code is one of the [301, 302, 303, 307]
:return: new test method
"""
def new_test_method(self):
if initialize:
initialize(self)
if callable(url_args):
prepared_url_args = url_args(self)
else:
prepared_url_args = url_args or []
if callable(redirect_to):
redirect_url = redirect_to(self)
else:
redirect_url = redirect_to
if callable(url_kwargs):
prepared_url_kwargs = url_kwargs(self)
else:
prepared_url_kwargs = url_kwargs or {}
resolved_url = resolve_url(
urlname, *prepared_url_args, **prepared_url_kwargs)
if user_credentials:
if callable(user_credentials):
credentials = user_credentials(self)
else:
credentials = user_credentials
logged_in = self.client.login(**credentials)
self.assertTrue(
logged_in, INCORRECT_USER_CREDENTIALS % credentials)
function = getattr(self.client, method.lower())
if callable(request_data):
prepared_data = request_data(self)
else:
prepared_data = request_data or {}
response = function(resolved_url, data=prepared_data)
self.assertEqual(response.status_code, status)
if status in (301, 302, 303, 307) and redirect_url:
extra_args = {"fetch_redirect_response": False} if VERSION >= (1, 7) else {}
self.assertRedirects(response, redirect_url, **extra_args)
return new_test_method
def prepare_test_name(urlname, method, status):
"""
Prepares name for smoke test method with supplied parameters.
:param urlname: initial urlname
:param method: http method (get, post, etc.)
:param status: http status code
:return: test name
"""
prepared_url = urlname.replace(':', '_').strip('/').replace('/', '_')
prepared_method = method.lower()
name = 'test_smoke_%(url)s_%(method)s_%(status)s_%(uuid)s' % {
'url': prepared_url,
'method': prepared_method,
'status': status,
'uuid': uuid4().hex
}
return name
def prepare_test_method_doc(method, urlname, status, status_text, data,
comment=None):
"""
Prepares doc string to describe called http query.
:param method: http method (get, post, etc.)
:param urlname: initial urlname
:param status: http status code
:param status_text: humanized http status
:param data: request data as dict or callable
:param comment: comment is added to the end of the result
:return: doc string
"""
if callable(data):
data = data.__name__
else:
data = data or {}
result = '%(method)s %(urlname)s %(status)s "%(status_text)s" %(data)r' % {
'method': method.upper(),
'urlname': urlname,
'status': status,
'status_text': status_text,
'data': data
}
# append comment to the end if any
if comment:
result = '%s %s' % (result, comment)
return result
class GenerateTestMethodsMeta(type):
"""
Metaclass which creates new test methods according to tests configuration.
It adds them to subclass instances only. So the first user class of this
metaclass will not got anything. It's done in order to have
``SmokeTestCase`` defined inside our project and give possibility to
derive it anywhere without duplicating test methods creation
(in ``SmokeTestCase`` and library user test case).
"""
def __new__(mcs, name, bases, attrs):
cls = super(GenerateTestMethodsMeta, mcs).__new__(
mcs, name, bases, attrs)
# If this metaclass is instantiating something found in one of these modules
# then we skip generation of test cases, etc. Without this, we will erroneously
# detect error case of TESTS_CONFIGURATION being None
# Also, skip certain classes based on class name
skip_modules = ['skd_smoke', ]
skip_names = ['NewBase', 'SmokeTestCase', ]
if attrs.get('__module__') in skip_modules or name in skip_names:
return cls
# Build a dict of all the 'defaults' they defined to go with their test class
defined_defaults = {}
for defaultable in DEFAULTABLE_PARAMS:
attr_name = "default_" + defaultable
if hasattr(cls, attr_name):
defined_defaults[attr_name] = getattr(cls, attr_name)
# noinspection PyBroadException
try:
config = prepare_configuration(cls.TESTS_CONFIGURATION, defined_defaults)
except Exception:
fail_method = generate_fail_test_method(traceback.format_exc())
fail_method_name = cls.FAIL_METHOD_NAME
fail_method.__name__ = str(fail_method_name)
setattr(cls, fail_method_name, fail_method)
else:
for urlname, status, method, data in config:
# For each config item, if it doesnt exist in config tuple's "data" then
# see if user has defined a default on the test class
find_conf = lambda name: data.get(name, getattr(cls, "default_" + name, None))
comment = find_conf('comment')
initialize = find_conf('initialize')
url_args = find_conf('url_args')
url_kwargs = find_conf('url_kwargs')
request_data = find_conf('request_data')
get_user_credentials = find_conf('user_credentials')
redirect_to = find_conf('redirect_to')
status_text = STATUS_CODE_TEXT.get(status, 'UNKNOWN')
test_method_name = prepare_test_name(urlname, method, status)
test_method = generate_test_method(
urlname, status, method, initialize, url_args, url_kwargs,
request_data, get_user_credentials, redirect_to
)
test_method.__name__ = str(test_method_name)
test_method.__doc__ = prepare_test_method_doc(
method, urlname, status, status_text, request_data,
comment
)
setattr(cls, test_method_name, test_method)
return cls
class SmokeTestCase(six.with_metaclass(GenerateTestMethodsMeta, TestCase)):
"""
TestCase which should be derived by any library user. It's required
to define ``TESTS_CONFIGURATION`` inside subclass. It should be defined as
tuple or list of tuples with next structure:
(url, status, method,
{'comment': None, 'initialize': None,
'url_kwargs': None, 'request_data': None,
'user_credentials': None, 'redirect_to': None})
For more information please refer to project documentation:
https://github.com/steelkiwi/django-skd-smoke#configuration
"""
TESTS_CONFIGURATION = None
FAIL_METHOD_NAME = 'test_fail_cause_bad_configuration'
| |
# -*- coding: utf-8 -*-
"""Test Markdown lexers."""
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
import re
from pprint import pprint
from ..base_lexer import BaseRenderer
from ..markdown import BlockLexer, InlineLexer, MarkdownWriter
from ...utils.utils import _show_outputs
# -----------------------------------------------------------------------------
# Test renderers
# -----------------------------------------------------------------------------
class BlockRenderer(BaseRenderer):
def __init__(self):
super(BlockRenderer, self).__init__()
self.output = []
def paragraph(self, text):
self.output.append('<p>')
self.text(text)
self.output.append('</p>')
def list_start(self, ordered=None):
self._ordered = ordered
if self._ordered:
self.output.append('<ol>')
else:
self.output.append('<ul>')
def list_end(self):
if self._ordered:
self.output.append('</ol>')
else:
self.output.append('</ul>')
self._ordered = None
def list_item_start(self):
self.output.append('<li>')
def list_item_end(self):
self.output.append('</li>')
def newline(self):
self.output.append('\n')
def text(self, text):
self.output.append(text)
def block_code(self, text, lang=None):
self.output.append('<code>')
self.output.append(text)
self.output.append('</code>')
def block_quote_start(self):
self.output.append('<quote>')
def block_quote_end(self):
self.output.append('</quote>')
class InlineRenderer(BaseRenderer):
def __init__(self, output=None):
super(InlineRenderer, self).__init__()
if output is None:
output = []
self.output = output
def text(self, text):
self.output.append(text)
def emphasis(self, text):
self.output.append('<i>')
self.text(text)
self.output.append('</i>')
def double_emphasis(self, text):
self.output.append('<b>')
self.text(text)
self.output.append('</b>')
def codespan(self, text):
self.output.append('<codespan>')
self.text(text)
self.output.append('</codespan>')
def linebreak(self):
self.output.append('<br>')
def link(self, link, title, text):
self.output.append('<a>')
self.text(text)
self.output('</a>')
class FullBlockRenderer(BlockRenderer):
def text(self, text):
inline_renderer = InlineRenderer(self.output)
inline_lexer = InlineLexer(renderer=inline_renderer)
inline_lexer.read(text)
# -----------------------------------------------------------------------------
# Tests Markdown block lexer
# -----------------------------------------------------------------------------
_TEST_TEXT = ("First *paragraph*.\n**Second** line.\n\n"
"* Item 1.\n* Item 2.\n\n```\ncode\n```\n\n"
"1. First.\n2. Second.\n\n"
"> End.\n")
def test_block_lexer():
renderer = BlockRenderer()
text = _TEST_TEXT
lexer = BlockLexer(renderer=renderer)
lexer.read(text)
expected = ['<p>', 'First *paragraph*.\n**Second** line.', '</p>',
'<ul>',
'<li>', 'Item 1.', '</li>',
'<li>', 'Item 2.', '</li>',
'</ul>',
'<code>', 'code', '</code>',
'<ol>',
'<li>', 'First.', '</li>',
'<li>', 'Second.', '</li>',
'</ol>',
'<quote>', '<p>', 'End.', '</p>', '</quote>'
]
assert renderer.output == expected
def test_block_lexer_list():
renderer = BlockRenderer()
text = "* 1\n* 2\n * 2.1\n* 3"
lexer = BlockLexer(renderer=renderer)
lexer.read(text)
expected = ['<ul>',
'<li>', '1', '</li>',
'<li>', '2',
'<ul>',
'<li>', '2.1', '</li>',
'</ul>',
'</li>',
'<li>', '3', '</li>',
'</ul>',
]
assert renderer.output == expected
# -----------------------------------------------------------------------------
# Tests Markdown inline lexer
# -----------------------------------------------------------------------------
def test_inline_lexer():
renderer = InlineRenderer()
text = ("First *paragraph*.\n**Second** line.")
lexer = InlineLexer(renderer=renderer)
lexer.read(text)
expected = ['First ',
'<i>', 'paragraph', '</i>',
'.',
'<br>',
'<b>', 'Second', '</b>',
' line.'
]
assert renderer.output == expected
def test_brackets():
renderer = InlineRenderer()
text = ("Some [1] reference.")
lexer = InlineLexer(renderer=renderer)
lexer.read(text)
expected = ['Some ',
'[1] reference.',
]
assert renderer.output == expected
# -----------------------------------------------------------------------------
# Tests full Markdown lexer
# -----------------------------------------------------------------------------
def test_full_lexer():
renderer = FullBlockRenderer()
lexer = BlockLexer(renderer=renderer)
text = _TEST_TEXT
lexer.read(text)
expected = ['<p>',
'First ', '<i>', 'paragraph', '</i>', '.',
'<br>',
'<b>', 'Second', '</b>', ' line.',
'</p>',
'<ul>',
'<li>', 'Item 1.', '</li>',
'<li>', 'Item 2.', '</li>',
'</ul>',
'<code>', 'code', '</code>',
'<ol>',
'<li>', 'First.', '</li>',
'<li>', 'Second.', '</li>',
'</ol>',
'<quote>', '<p>', 'End.', '</p>', '</quote>'
]
assert renderer.output == expected
# -----------------------------------------------------------------------------
# Test Markdown writer
# -----------------------------------------------------------------------------
def test_markdown_writer_newline():
w = MarkdownWriter()
w.text('Hello.')
w.ensure_newline(1)
w.text('Hello.\n')
w.ensure_newline(1)
w.text('Hello.\n\n')
w.ensure_newline(1)
w.text('Hello.\n\n\n')
w.ensure_newline(2)
w.text('End')
expected = ('Hello.\n' * 4) + '\nEnd\n'
assert w.contents == expected
def test_markdown_writer():
w = MarkdownWriter()
expected = '\n'.join(("# First chapter",
"",
"**Hello** *world*!",
"How are you? Some `code`.",
"",
"> Good, and you?",
"> End of citation.",
"",
"* Item **1**.",
"* Item 2.",
"",
"1. 1",
" * 1.1",
" * 1.1.1",
"2. 2",
"",
"```",
"print(\"Hello world!\")",
"```",
"",
("Go to [google](http://www.google.com). "
"And here is an image for you:"),
"",
"\n"))
w.heading('First chapter', 1)
w.newline()
w.bold('Hello')
w.text(' ')
w.italic('world')
w.text('!')
w.linebreak()
w.text('How are you? Some ')
w.inline_code('code')
w.text('.')
w.newline()
w.quote_start()
w.text('Good, and you?')
w.linebreak()
w.text('End of citation.')
w.quote_end()
w.newline()
w.list_item('Item ')
w.bold('1')
w.text('.')
w.linebreak()
w.list_item('Item 2.')
w.newline()
w.numbered_list_item('1')
w.linebreak()
w.list_item('1.1', level=1)
w.linebreak()
w.list_item('1.1.1', level=2)
w.linebreak()
w.numbered_list_item('2')
w.newline()
w.code_start()
w.text('print("Hello world!")')
w.code_end()
w.newline()
w.text('Go to ')
w.link('google', 'http://www.google.com')
w.text('. And here is an image for you:')
w.newline()
w.image('Some image', 'my_image.png')
_show_outputs(w.contents, expected)
assert w.contents == expected
| |
# Copyright (c) 2012 Lars Hupfeldt Nielsen, Hupfeldt IT
# All rights reserved. This work is under a BSD license, see LICENSE.TXT.
from pytest import raises
from multiconf import mc_config, ConfigItem, RepeatableConfigItem, ConfigException
from multiconf.decorators import nested_repeatables, named_as
from multiconf.envs import EnvFactory
from .utils.utils import replace_ids, local_func
from .utils.tstclasses import ItemWithAA
ef1_prod = EnvFactory()
prod1 = ef1_prod.Env('prod')
ef2_pp_prod = EnvFactory()
pp2 = ef2_pp_prod.Env('pp')
prod2 = ef2_pp_prod.Env('prod')
ef2_pp_prod.EnvGroup('g_prod_like', prod2, pp2)
@named_as('recursive_items')
@nested_repeatables('recursive_items')
class NestedRepeatable(RepeatableConfigItem):
def __init__(self, mc_key, aa=None):
super().__init__(mc_key=mc_key)
self.id = mc_key
self.aa = aa
class KwargsItem(ConfigItem):
def __init__(self, **kwargs):
super().__init__()
for key, val in kwargs.items():
setattr(self, key, val)
def test_find_contained_in_named_as():
@named_as('x')
@nested_repeatables('recursive_items')
class X(ItemWithAA):
pass
@named_as('y')
class Y(ItemWithAA):
pass
@nested_repeatables('recursive_items')
class root3(ItemWithAA):
pass
@mc_config(ef2_pp_prod, load_now=True)
def config(_):
with root3() as cr:
cr.aa = 0
NestedRepeatable(mc_key=0)
with X() as ci:
ci.setattr('aa', prod=0, pp=2)
NestedRepeatable(mc_key='aa')
with NestedRepeatable(mc_key='bb') as ci:
NestedRepeatable(mc_key='cc')
with X() as ci:
ci.setattr('aa', prod=1, pp=2)
with NestedRepeatable(mc_key='dd') as ci:
ci.setattr('aa', prod=2, pp=2)
with Y() as ci:
ci.setattr('aa', prod=3, pp=2)
cr = config(prod2).root3
assert cr.x.recursive_items['bb'].x.recursive_items['dd'].y.find_contained_in(named_as='x').aa == 1
assert cr.x.recursive_items['bb'].x.recursive_items['dd'].y.find_contained_in(named_as='root3').aa == 0
assert cr.x.recursive_items['bb'].x.recursive_items['dd'].y.find_contained_in(named_as='recursive_items').aa == 2
assert cr.x.recursive_items['bb'].x.find_contained_in(named_as='x').aa == 0
def test_find_attribute_attribute_name():
@named_as('x')
@nested_repeatables('recursive_items')
class X(ItemWithAA):
pass
@nested_repeatables('recursive_items')
class root4(ItemWithAA):
pass
@mc_config(ef2_pp_prod, load_now=True)
def config(_):
with root4(aa=-1) as cr:
cr.setattr('q', default='q0', mc_set_unknown=True)
NestedRepeatable(mc_key=0)
with X() as ci:
ci.setattr('yy', default=999, mc_set_unknown=True)
ci.setattr('aa', prod=0, pp=20)
NestedRepeatable(mc_key='aa', aa=9)
with NestedRepeatable(mc_key='bb') as ci:
NestedRepeatable(mc_key='cc', aa=7)
with X() as ci:
ci.aa = 117
ci.setattr('bb', prod='b1', pp='b21', mc_set_unknown=True)
with NestedRepeatable(mc_key='dd') as ci:
ci.setattr('aa', prod=2, pp=22)
with X() as ci:
ci.setattr('aa', prod=3, pp=23)
cr = config(prod2).root4
assert cr.x.recursive_items['bb'].x.recursive_items['dd'].x.find_attribute('aa') == 3
assert cr.x.recursive_items['bb'].x.recursive_items['dd'].x.find_attribute('bb') == 'b1'
assert cr.x.recursive_items['bb'].x.recursive_items['dd'].x.find_attribute('q') == 'q0'
assert cr.x.recursive_items['bb'].x.find_attribute(name='aa') == 117
assert cr.x.recursive_items['bb'].x.contained_in.find_attribute(name='aa') is None
assert cr.x.recursive_items['bb'].x.find_attribute(name='yy') == 999
# Find Item, not attribute, on parent
assert cr.x.recursive_items['bb'].x.recursive_items['dd'].x.find_attribute('recursive_items')['dd'].aa == 2
def test_find_contained_in_or_none():
i1_exp = [None]
@mc_config(ef2_pp_prod, load_now=True)
def config(_):
with KwargsItem(aa=1) as i1:
i1_exp[0] = i1
KwargsItem(aa=2)
cr = config(prod2)
assert cr.KwargsItem.KwargsItem.find_contained_in_or_none('notthere') is None
assert cr.KwargsItem.KwargsItem.find_contained_in_or_none('KwargsItem') == i1_exp[0]
def test_find_attribute_or_none():
exp_item = [None]
@mc_config(ef2_pp_prod, load_now=True)
def config(_):
with KwargsItem(aa=1, my_attr=0) as i1:
i1.my_attr = 7
KwargsItem(aa=2)
exp_item[0] = ConfigItem()
cr = config(prod2)
assert cr.KwargsItem.KwargsItem.find_attribute_or_none('notthere') is None
assert cr.KwargsItem.KwargsItem.find_attribute_or_none('my_attr') == 7
# Find Item, not attribute, on parent
assert cr.KwargsItem.KwargsItem.find_attribute_or_none('ConfigItem') == exp_item[0]
_find_contained_in_named_as_not_found_expected = """Searching from: <class 'test.find_test.%(local_func)sY'>: Could not find a parent container named as: 'notthere' in hieracy with names: ['someitems', 'x', 'someitems', 'x', 'root', 'McConfigRoot']"""
def test_find_contained_in_named_as_not_found():
@named_as('someitems')
@nested_repeatables('someitems')
class NestedRepeatable(RepeatableConfigItem):
def __init__(self, mc_key):
super().__init__(mc_key=mc_key)
self.id = mc_key
self.a = None
@named_as('x')
@nested_repeatables('someitems')
class X(ItemWithAA):
pass
@named_as('y')
class Y(ItemWithAA):
pass
@nested_repeatables('someitems')
class root(ItemWithAA):
pass
@mc_config(ef1_prod, load_now=True)
def config(_):
with root(aa=0):
NestedRepeatable(mc_key=0)
with X() as ci:
ci.setattr('aa', prod=0)
NestedRepeatable(mc_key='a')
with NestedRepeatable(mc_key='b') as ci:
NestedRepeatable(mc_key='c')
with X() as ci:
ci.setattr('aa', prod=1)
with NestedRepeatable(mc_key='d') as ci:
ci.setattr('a', prod=2)
with Y() as ci:
ci.setattr('aa', prod=3)
cr = config(prod1).root
with raises(ConfigException) as exinfo:
cr.x.someitems['b'].x.someitems['d'].y.find_contained_in(named_as='notthere').a
assert replace_ids(str(exinfo.value)) == _find_contained_in_named_as_not_found_expected % dict(local_func=local_func())
_find_attribute_with_attribute_name_not_found = """Searching from: <class 'test.find_test.%(local_func)sX'>: Could not find an attribute named: 'e' in hieracy with names: ['x', 'someitems', 'x', 'someitems', 'x', 'root', 'McConfigRoot']"""
def test_find_attribute_with_attribute_name_not_found():
@named_as('someitems')
@nested_repeatables('someitems')
class NestedRepeatable(RepeatableConfigItem):
def __init__(self, mc_key):
super().__init__(mc_key=mc_key)
self.id = mc_key
self.a = None
@named_as('x')
@nested_repeatables('someitems')
class X(ItemWithAA):
pass
@nested_repeatables('someitems')
class root(ItemWithAA):
def __init__(self, aa):
super().__init__(aa=aa)
self.q = None
@mc_config(ef1_prod, load_now=True)
def config(_):
with root(aa=0) as cr:
cr.q = 17
NestedRepeatable(mc_key=1)
with X() as ci:
ci.setattr('aa', prod=0)
with NestedRepeatable(mc_key='a') as nr:
nr.a = 9
with NestedRepeatable(mc_key='b') as ci:
with NestedRepeatable(mc_key='c') as nr:
nr.a = 7
with X(aa=1) as ci:
ci.setattr('b', prod=1, mc_set_unknown=True)
with NestedRepeatable(mc_key='d') as ci:
ci.setattr('a', prod=2)
with X() as ci:
ci.setattr('aa', prod=3)
cr = config(prod1).root
with raises(ConfigException) as exinfo:
assert cr.x.someitems['b'].x.someitems['d'].x.find_attribute('e') == 3
assert replace_ids(str(exinfo.value)) == _find_attribute_with_attribute_name_not_found % dict(local_func=local_func())
| |
import functools
import json
from collections import namedtuple
from contextlib import contextmanager
import dcos_launch
import dcos_launch.cli
import dcos_launch.config
import dcos_launch.onprem
import dcos_launch.platforms
import dcos_test_utils
import dcos_test_utils.ssh_client
import pytest
from dcos_launch.util import get_temp_config_path, stub
from dcos_test_utils.helpers import Host
class MockTunnelled:
def __init__(self, base_cmd: list, target: str):
pass
def command(self, cmd, **kwargs):
return b''
def copy_file(self, src, dst):
pass
@contextmanager
def mocked_context(*args, **kwargs):
""" To be directly patched into an ssh.tunnel invocation to prevent
any real SSH attempt
"""
yield MockTunnelled(list(), 'foo')
@pytest.fixture
def mocked_test_runner(monkeypatch):
monkeypatch.setattr(dcos_launch.util, 'try_to_output_unbuffered', stub(0))
@pytest.fixture
def mock_ssh_client(monkeypatch):
# monkeypatch.setattr(dcos_test_utils.ssh_client, 'Tunnelled', MockTunnelled)
monkeypatch.setattr(dcos_test_utils.ssh_client, 'open_tunnel', mocked_context)
monkeypatch.setattr(dcos_test_utils.ssh_client.SshClient, 'command', stub(b''))
monkeypatch.setattr(dcos_test_utils.ssh_client.SshClient, 'get_home_dir', stub(b''))
# need to nullify platforms.onprem
monkeypatch.setattr(dcos_launch.platforms.onprem, 'prepare_bootstrap', stub('foo'))
monkeypatch.setattr(dcos_launch.platforms.onprem, 'install_dcos', stub(None))
@pytest.fixture
def ssh_key_path(tmpdir):
ssh_key_path = tmpdir.join('ssh_key')
ssh_key_path.write(dcos_launch.util.MOCK_SSH_KEY_DATA)
return str(ssh_key_path)
class MockStack:
def __init__(self):
self.stack_id = dcos_launch.util.MOCK_STACK_ID
class MockGceWrapper:
def __init__(self, credentials_dict):
DeploymentManagerMock = namedtuple('DeploymentManagerMock', 'deployments')
DeploymentFunctionsMock = namedtuple('DeploymentFunctionsMock', 'insert delete get')
ApiRequestMock = namedtuple('ApiRequestMock', 'execute')
self.project_id = ''
api_request_mock = ApiRequestMock(lambda: {'operation': {'status': 'DONE'}})
self.deployment_manager = DeploymentManagerMock(lambda: DeploymentFunctionsMock(stub(api_request_mock),
stub(api_request_mock),
stub(api_request_mock)))
mock_pub_priv_host = Host('127.0.0.1', '12.34.56')
mock_priv_host = Host('127.0.0.1', None)
MOCK_GCE_DEPLOYMENT_INFO = {'operation': {'status': 'DONE'}}
MOCK_GCE_INSTANCE_INFO = {'name': 'mock_instance',
'networkInterfaces': [{'networkIP': 'mock_net_ip',
'accessConfigs': [{'natIP': 'mock_nat_ip'}]}],
'metadata': {'fingerprint': 'mock_fingerprint'}}
@pytest.fixture
def mocked_aws_cf(monkeypatch, mocked_test_runner):
"""Does not include SSH key mocking
"""
# mock credentials
monkeypatch.setenv('AWS_ACCESS_KEY_ID', 'AEF234DFLDWQMNEZ2')
monkeypatch.setenv('AWS_SECRET_ACCESS_KEY', 'ASDPFOKAWEFN123')
monkeypatch.setattr(dcos_launch.platforms.aws.DcosCfStack, '__init__', stub(None))
monkeypatch.setattr(
dcos_launch.platforms.aws, 'fetch_stack',
lambda stack_name, bw: dcos_launch.platforms.aws.DcosCfStack(stack_name, bw))
# mock create
monkeypatch.setattr(dcos_launch.platforms.aws.BotoWrapper, 'create_stack', stub(MockStack()))
# mock wait
monkeypatch.setattr(dcos_launch.platforms.aws.CfStack, 'wait_for_complete', stub('DELETE_COMPLETE'))
monkeypatch.setattr(dcos_launch.platforms.aws.CfStack, 'get_status', stub('CREATE_COMPLETE'))
# mock describe
monkeypatch.setattr(dcos_launch.platforms.aws.DcosCfStack, 'get_master_ips',
stub([mock_pub_priv_host]))
monkeypatch.setattr(dcos_launch.platforms.aws.DcosCfStack, 'get_private_agent_ips',
stub([mock_priv_host]))
monkeypatch.setattr(dcos_launch.platforms.aws.DcosCfStack, 'get_public_agent_ips',
stub([mock_pub_priv_host]))
# mock delete
monkeypatch.setattr(dcos_launch.platforms.aws.DcosCfStack, 'delete', stub(None))
monkeypatch.setattr(dcos_launch.platforms.aws.BotoWrapper, 'delete_key_pair', stub(None))
# mock config
monkeypatch.setattr(dcos_launch.platforms.aws.BotoWrapper, 'create_key_pair',
stub(dcos_launch.util.MOCK_SSH_KEY_DATA))
@pytest.fixture
def mocked_aws_zen_cf(monkeypatch, mocked_aws_cf):
monkeypatch.setattr(dcos_launch.platforms.aws.DcosZenCfStack, '__init__', stub(None))
monkeypatch.setattr(
dcos_launch.platforms.aws, 'fetch_stack',
lambda stack_name, bw: dcos_launch.platforms.aws.DcosZenCfStack(stack_name, bw))
# mock create
monkeypatch.setattr(dcos_launch.platforms.aws.BotoWrapper, 'create_vpc_tagged', stub(dcos_launch.util.MOCK_VPC_ID))
monkeypatch.setattr(
dcos_launch.platforms.aws.BotoWrapper, 'create_internet_gateway_tagged',
stub(dcos_launch.util.MOCK_GATEWAY_ID))
monkeypatch.setattr(dcos_launch.platforms.aws.BotoWrapper, 'create_subnet_tagged',
stub(dcos_launch.util.MOCK_SUBNET_ID))
# mock delete
monkeypatch.setattr(dcos_launch.platforms.aws.BotoWrapper, 'delete_subnet', stub(None))
monkeypatch.setattr(dcos_launch.platforms.aws.BotoWrapper, 'delete_vpc', stub(None))
monkeypatch.setattr(dcos_launch.platforms.aws.BotoWrapper, 'delete_internet_gateway', stub(None))
# mock describe
monkeypatch.setattr(dcos_launch.platforms.aws.DcosZenCfStack, 'get_master_ips',
stub([mock_pub_priv_host]))
monkeypatch.setattr(dcos_launch.platforms.aws.DcosZenCfStack, 'get_private_agent_ips',
stub([mock_priv_host]))
monkeypatch.setattr(dcos_launch.platforms.aws.DcosZenCfStack, 'get_public_agent_ips',
stub([mock_pub_priv_host]))
# mock delete
monkeypatch.setattr(dcos_launch.platforms.aws.DcosZenCfStack, 'delete', stub(None))
@pytest.fixture
def mocked_azure(monkeypatch, mocked_test_runner):
monkeypatch.setenv('AZURE_CLIENT_ID', 'AEF234DFLDWQMNEZ2')
monkeypatch.setenv('AZURE_CLIENT_SECRET', 'ASDPFOKAWEFN123')
monkeypatch.setenv('AZURE_TENANT_ID', 'ASDPFOKAWEFN123')
monkeypatch.setenv('AZURE_SUBSCRIPTION_ID', 'ASDPFOKAWEFN123')
monkeypatch.setattr(dcos_launch.platforms.arm.ServicePrincipalCredentials, '__init__', stub(None))
monkeypatch.setattr(dcos_launch.platforms.arm.ResourceManagementClient, '__init__', stub(None))
monkeypatch.setattr(dcos_launch.platforms.arm.NetworkManagementClient, '__init__', stub(None))
monkeypatch.setattr(dcos_launch.platforms.arm.AzureWrapper, 'deploy_template_to_new_resource_group', stub(None))
monkeypatch.setattr(dcos_launch.platforms.arm.DcosAzureResourceGroup, 'wait_for_deployment', stub(None))
monkeypatch.setattr(dcos_launch.platforms.arm.DcosAzureResourceGroup, 'delete', stub(None))
monkeypatch.setattr(dcos_launch.platforms.arm.DcosAzureResourceGroup, 'get_master_ips',
stub([mock_pub_priv_host]))
monkeypatch.setattr(dcos_launch.platforms.arm.DcosAzureResourceGroup, 'get_private_agent_ips',
stub([mock_priv_host]))
monkeypatch.setattr(dcos_launch.platforms.arm.DcosAzureResourceGroup, 'get_public_agent_ips',
stub([mock_pub_priv_host]))
monkeypatch.setattr(dcos_launch.platforms.arm.DcosAzureResourceGroup, 'public_agent_lb_fqdn', 'abc-foo-bar')
monkeypatch.setattr(dcos_launch.platforms.arm.DcosAzureResourceGroup, 'public_master_lb_fqdn', 'dead-beef')
@pytest.fixture
def mocked_gcp(monkeypatch, mock_ssh_client):
monkeypatch.setenv('GCE_CREDENTIALS', '{"project_id":"foobar"}')
monkeypatch.setenv('GCE_ZONE', 'us-west1-a')
monkeypatch.setattr(dcos_launch.platforms.gcp.GcpWrapper, '__init__', MockGceWrapper.__init__)
monkeypatch.setattr(dcos_launch.platforms.gcp.GcpWrapper, 'get_instance_info',
lambda _, __: MOCK_GCE_INSTANCE_INFO)
monkeypatch.setattr(dcos_launch.platforms.gcp.GcpWrapper, 'list_group_instances',
lambda _, __: [{'instance': 'mock'}])
monkeypatch.setattr(dcos_launch.gcp.OnPremLauncher, 'key_helper', lambda self: self.config.update(
{'ssh_private_key': dcos_launch.util.MOCK_SSH_KEY_DATA, 'ssh_public_key': dcos_launch.util.MOCK_SSH_KEY_DATA}))
monkeypatch.setattr(dcos_launch.gcp.OnPremLauncher, 'get_cluster_hosts',
lambda self: [mock_pub_priv_host] * (self.config['num_masters'] +
self.config['num_public_agents'] +
self.config['num_private_agents']))
monkeypatch.setattr(dcos_launch.gcp.OnPremLauncher, 'get_bootstrap_host', lambda self: mock_pub_priv_host)
@pytest.fixture
def mocked_aws_cfstack_bare_cluster(monkeypatch, mock_ssh_client, mocked_aws_cf):
monkeypatch.setattr(dcos_launch.platforms.aws.BareClusterCfStack, '__init__', stub(None))
monkeypatch.setattr(dcos_launch.platforms.aws.BareClusterCfStack, 'delete', stub(None))
monkeypatch.setattr(dcos_launch.aws.OnPremLauncher, 'get_onprem_cluster',
dcos_launch.onprem.AbstractOnpremLauncher.get_onprem_cluster)
monkeypatch.setattr(
dcos_launch.platforms.aws.BareClusterCfStack, 'get_cluster_host_ips', stub([mock_pub_priv_host] * 4))
monkeypatch.setattr(dcos_launch.platforms.aws.BareClusterCfStack, 'get_bootstrap_ip', stub(mock_pub_priv_host))
monkeypatch.setattr(
dcos_launch.platforms.aws, 'fetch_stack', lambda stack_name,
bw: dcos_launch.platforms.aws.BareClusterCfStack(stack_name, bw))
@pytest.fixture
def mocked_terraform(monkeypatch):
monkeypatch.setenv('GCE_CREDENTIALS', '{"project_id":"foobar"}')
@pytest.fixture
def aws_cf_config_path(tmpdir, ssh_key_path, mocked_aws_cf):
return get_temp_config_path(tmpdir, 'aws-cf.yaml', update={'ssh_private_key_filename': ssh_key_path})
@pytest.fixture
def aws_cf_with_helper_config_path(tmpdir, mocked_aws_cf):
return get_temp_config_path(tmpdir, 'aws-cf-with-helper.yaml')
@pytest.fixture
def aws_zen_cf_config_path(tmpdir, ssh_key_path, mocked_aws_zen_cf):
return get_temp_config_path(tmpdir, 'aws-zen-cf.yaml')
@pytest.fixture
def aws_cf_no_pytest_config_path(tmpdir, mocked_aws_cf):
return get_temp_config_path(tmpdir, 'aws-cf-no-pytest.yaml')
@pytest.fixture
def azure_config_path(tmpdir, mocked_azure, ssh_key_path):
return get_temp_config_path(tmpdir, 'azure.yaml', update={'ssh_private_key_filename': ssh_key_path})
@pytest.fixture
def azure_with_helper_config_path(tmpdir, mocked_azure):
return get_temp_config_path(tmpdir, 'azure-with-helper.yaml')
@pytest.fixture
def aws_onprem_config_path(tmpdir, ssh_key_path, mocked_aws_cfstack_bare_cluster):
return get_temp_config_path(tmpdir, 'aws-onprem.yaml', update={
'ssh_private_key_filename': ssh_key_path})
@pytest.fixture
def aws_onprem_with_helper_config_path(tmpdir, mocked_aws_cfstack_bare_cluster):
return get_temp_config_path(tmpdir, 'aws-onprem-with-helper.yaml')
@pytest.fixture
def aws_onprem_with_extra_volumes_config_path(tmpdir, mocked_aws_cfstack_bare_cluster):
return get_temp_config_path(tmpdir, 'aws-onprem-with-extra-volumes.yaml')
@pytest.fixture
def aws_onprem_with_extra_iam_config_path(tmpdir, mocked_aws_cfstack_bare_cluster):
return get_temp_config_path(tmpdir, 'aws-onprem-with-extra-iam.yaml')
@pytest.fixture
def aws_onprem_enterprise_config_path(tmpdir, mocked_aws_cfstack_bare_cluster):
return get_temp_config_path(tmpdir, 'aws-onprem-enterprise-with-helper.yaml')
@pytest.fixture
def mock_genconf_dir(tmpdir):
""" For testing genconf_dir and providing onprem configuration via a local
genconf dir. Similarly, the DC/OS config can be provided by a 'dcos_config' field
in the dcos-launch config.yaml or it can be provided in a (native) genconf/config.yaml
"""
genconf_dir = tmpdir.join('genconf')
genconf_dir.ensure(dir=True)
genconf_dir.join('config.yaml').write("""
---
cluster_name: My Awesome DC/OS
resolvers:
- 8.8.4.4
- 8.8.8.8
dns_search: mesos
master_discovery: static
exhibitor_storage_backend: static
""")
return str(genconf_dir)
@pytest.fixture
def aws_onprem_with_genconf_config_path(tmpdir, mock_genconf_dir, mocked_aws_cfstack_bare_cluster):
return get_temp_config_path(tmpdir, 'aws-onprem-with-genconf.yaml', update={
'genconf_dir': mock_genconf_dir})
@pytest.fixture
def gcp_onprem_config_path(tmpdir, ssh_key_path, mocked_gcp):
return get_temp_config_path(tmpdir, 'gcp-onprem.yaml', update={
'ssh_private_key_filename': ssh_key_path})
@pytest.fixture
def gcp_onprem_with_helper_config_path(tmpdir, mocked_gcp):
return get_temp_config_path(tmpdir, 'gcp-onprem-with-helper.yaml')
@pytest.fixture
def gcp_onprem_with_fd_helper_config_path(tmpdir, mocked_gcp):
return get_temp_config_path(tmpdir, 'gcp-onprem-with-fd-helper.yaml')
def check_cli(cmd):
assert dcos_launch.cli.main(cmd) == 0, 'Command failed! {}'.format(' '.join(cmd))
def check_success(capsys, tmpdir, config_path):
"""
Runs through the required functions of a launcher and then
runs through the default usage of the script for a
given config path and info path, ensuring each step passes
if all steps finished successfully, this parses and returns the generated
info JSON and stdout description JSON for more specific checks
"""
# Test launcher directly first
config = dcos_launch.config.get_validated_config_from_path(config_path)
launcher = dcos_launch.get_launcher(config)
info = launcher.create()
# Grab the launcher again with the output from create
launcher = dcos_launch.get_launcher(info)
launcher.wait()
launcher.describe()
launcher.test([], {})
launcher.delete()
info_path = str(tmpdir.join('my_specific_info.json')) # test non-default name
# Now check launcher via CLI
check_cli(['create', '--config-path={}'.format(config_path), '--info-path={}'.format(info_path)])
# use the info written to disk to ensure JSON parsable
with open(info_path) as f:
info = json.load(f)
check_cli(['wait', '--info-path={}'.format(info_path)])
# clear stdout capture
capsys.readouterr()
check_cli(['describe', '--info-path={}'.format(info_path)])
# capture stdout from describe and ensure JSON parse-able
description = json.loads(capsys.readouterr()[0])
# general assertions about description
assert 'masters' in description
assert 'private_agents' in description
assert 'public_agents' in description
check_cli(['pytest', '--info-path={}'.format(info_path)])
check_cli(['delete', '--info-path={}'.format(info_path)])
return info, description
@pytest.fixture
def check_cli_success(capsys, tmpdir):
return functools.partial(check_success, capsys, tmpdir)
| |
from datetime import datetime, timedelta
import json
from uuid import uuid4
import six
from cqlengine import Model, ValidationError
from cqlengine import columns
from cqlengine.management import sync_table, drop_table
from cqlengine.tests.base import BaseCassEngTestCase
class TestSetModel(Model):
partition = columns.UUID(primary_key=True, default=uuid4)
int_set = columns.Set(columns.Integer, required=False)
text_set = columns.Set(columns.Text, required=False)
class JsonTestColumn(columns.Column):
db_type = 'text'
def to_python(self, value):
if value is None: return
if isinstance(value, six.string_types):
return json.loads(value)
else:
return value
def to_database(self, value):
if value is None: return
return json.dumps(value)
class TestSetColumn(BaseCassEngTestCase):
@classmethod
def setUpClass(cls):
super(TestSetColumn, cls).setUpClass()
drop_table(TestSetModel)
sync_table(TestSetModel)
@classmethod
def tearDownClass(cls):
super(TestSetColumn, cls).tearDownClass()
drop_table(TestSetModel)
def test_add_none_fails(self):
with self.assertRaises(ValidationError):
m = TestSetModel.create(int_set=set([None]))
def test_empty_set_initial(self):
"""
tests that sets are set() by default, should never be none
:return:
"""
m = TestSetModel.create()
m.int_set.add(5)
m.save()
def test_deleting_last_item_should_succeed(self):
m = TestSetModel.create()
m.int_set.add(5)
m.save()
m.int_set.remove(5)
m.save()
m = TestSetModel.get(partition=m.partition)
self.assertNotIn(5, m.int_set)
def test_blind_deleting_last_item_should_succeed(self):
m = TestSetModel.create()
m.int_set.add(5)
m.save()
TestSetModel.objects(partition=m.partition).update(int_set=set())
m = TestSetModel.get(partition=m.partition)
self.assertNotIn(5, m.int_set)
def test_empty_set_retrieval(self):
m = TestSetModel.create()
m2 = TestSetModel.get(partition=m.partition)
m2.int_set.add(3)
def test_io_success(self):
""" Tests that a basic usage works as expected """
m1 = TestSetModel.create(int_set={1, 2}, text_set={'kai', 'andreas'})
m2 = TestSetModel.get(partition=m1.partition)
assert isinstance(m2.int_set, set)
assert isinstance(m2.text_set, set)
assert 1 in m2.int_set
assert 2 in m2.int_set
assert 'kai' in m2.text_set
assert 'andreas' in m2.text_set
def test_type_validation(self):
"""
Tests that attempting to use the wrong types will raise an exception
"""
with self.assertRaises(ValidationError):
TestSetModel.create(int_set={'string', True}, text_set={1, 3.0})
def test_element_count_validation(self):
"""
Tests that big collections are detected and raise an exception.
"""
TestSetModel.create(text_set={str(uuid4()) for i in range(65535)})
with self.assertRaises(ValidationError):
TestSetModel.create(text_set={str(uuid4()) for i in range(65536)})
def test_partial_updates(self):
""" Tests that partial udpates work as expected """
m1 = TestSetModel.create(int_set={1, 2, 3, 4})
m1.int_set.add(5)
m1.int_set.remove(1)
assert m1.int_set == {2, 3, 4, 5}
m1.save()
m2 = TestSetModel.get(partition=m1.partition)
assert m2.int_set == {2, 3, 4, 5}
def test_instantiation_with_column_class(self):
"""
Tests that columns instantiated with a column class work properly
and that the class is instantiated in the constructor
"""
column = columns.Set(columns.Text)
assert isinstance(column.value_col, columns.Text)
def test_instantiation_with_column_instance(self):
"""
Tests that columns instantiated with a column instance work properly
"""
column = columns.Set(columns.Text(min_length=100))
assert isinstance(column.value_col, columns.Text)
def test_to_python(self):
""" Tests that to_python of value column is called """
column = columns.Set(JsonTestColumn)
val = {1, 2, 3}
db_val = column.to_database(val)
assert db_val.value == {json.dumps(v) for v in val}
py_val = column.to_python(db_val.value)
assert py_val == val
def test_default_empty_container_saving(self):
""" tests that the default empty container is not saved if it hasn't been updated """
pkey = uuid4()
# create a row with set data
TestSetModel.create(partition=pkey, int_set={3, 4})
# create another with no set data
TestSetModel.create(partition=pkey)
m = TestSetModel.get(partition=pkey)
self.assertEqual(m.int_set, {3, 4})
class TestListModel(Model):
partition = columns.UUID(primary_key=True, default=uuid4)
int_list = columns.List(columns.Integer, required=False)
text_list = columns.List(columns.Text, required=False)
class TestListColumn(BaseCassEngTestCase):
@classmethod
def setUpClass(cls):
super(TestListColumn, cls).setUpClass()
drop_table(TestListModel)
sync_table(TestListModel)
@classmethod
def tearDownClass(cls):
super(TestListColumn, cls).tearDownClass()
drop_table(TestListModel)
def test_initial(self):
tmp = TestListModel.create()
tmp.int_list.append(1)
def test_initial(self):
tmp = TestListModel.create()
tmp2 = TestListModel.get(partition=tmp.partition)
tmp2.int_list.append(1)
def test_io_success(self):
""" Tests that a basic usage works as expected """
m1 = TestListModel.create(int_list=[1, 2], text_list=['kai', 'andreas'])
m2 = TestListModel.get(partition=m1.partition)
assert isinstance(m2.int_list, list)
assert isinstance(m2.text_list, list)
assert len(m2.int_list) == 2
assert len(m2.text_list) == 2
assert m2.int_list[0] == 1
assert m2.int_list[1] == 2
assert m2.text_list[0] == 'kai'
assert m2.text_list[1] == 'andreas'
def test_type_validation(self):
"""
Tests that attempting to use the wrong types will raise an exception
"""
with self.assertRaises(ValidationError):
TestListModel.create(int_list=['string', True], text_list=[1, 3.0])
def test_element_count_validation(self):
"""
Tests that big collections are detected and raise an exception.
"""
TestListModel.create(text_list=[str(uuid4()) for i in range(65535)])
with self.assertRaises(ValidationError):
TestListModel.create(text_list=[str(uuid4()) for i in range(65536)])
def test_partial_updates(self):
""" Tests that partial udpates work as expected """
final = list(range(10))
initial = final[3:7]
m1 = TestListModel.create(int_list=initial)
m1.int_list = final
m1.save()
m2 = TestListModel.get(partition=m1.partition)
assert list(m2.int_list) == final
def test_instantiation_with_column_class(self):
"""
Tests that columns instantiated with a column class work properly
and that the class is instantiated in the constructor
"""
column = columns.List(columns.Text)
assert isinstance(column.value_col, columns.Text)
def test_instantiation_with_column_instance(self):
"""
Tests that columns instantiated with a column instance work properly
"""
column = columns.List(columns.Text(min_length=100))
assert isinstance(column.value_col, columns.Text)
def test_to_python(self):
""" Tests that to_python of value column is called """
column = columns.List(JsonTestColumn)
val = [1, 2, 3]
db_val = column.to_database(val)
assert db_val.value == [json.dumps(v) for v in val]
py_val = column.to_python(db_val.value)
assert py_val == val
def test_default_empty_container_saving(self):
""" tests that the default empty container is not saved if it hasn't been updated """
pkey = uuid4()
# create a row with list data
TestListModel.create(partition=pkey, int_list=[1,2,3,4])
# create another with no list data
TestListModel.create(partition=pkey)
m = TestListModel.get(partition=pkey)
self.assertEqual(m.int_list, [1,2,3,4])
def test_remove_entry_works(self):
pkey = uuid4()
tmp = TestListModel.create(partition=pkey, int_list=[1,2])
tmp.int_list.pop()
tmp.update()
tmp = TestListModel.get(partition=pkey)
self.assertEqual(tmp.int_list, [1])
def test_update_from_non_empty_to_empty(self):
pkey = uuid4()
tmp = TestListModel.create(partition=pkey, int_list=[1,2])
tmp.int_list = []
tmp.update()
tmp = TestListModel.get(partition=pkey)
self.assertEqual(tmp.int_list, [])
def test_insert_none(self):
pkey = uuid4()
with self.assertRaises(ValidationError):
TestListModel.create(partition=pkey, int_list=[None])
def test_blind_list_updates_from_none(self):
""" Tests that updates from None work as expected """
m = TestListModel.create(int_list=None)
expected = [1, 2]
m.int_list = expected
m.save()
m2 = TestListModel.get(partition=m.partition)
assert m2.int_list == expected
TestListModel.objects(partition=m.partition).update(int_list=[])
m3 = TestListModel.get(partition=m.partition)
assert m3.int_list == []
class TestMapModel(Model):
partition = columns.UUID(primary_key=True, default=uuid4)
int_map = columns.Map(columns.Integer, columns.UUID, required=False)
text_map = columns.Map(columns.Text, columns.DateTime, required=False)
class TestMapColumn(BaseCassEngTestCase):
@classmethod
def setUpClass(cls):
super(TestMapColumn, cls).setUpClass()
drop_table(TestMapModel)
sync_table(TestMapModel)
@classmethod
def tearDownClass(cls):
super(TestMapColumn, cls).tearDownClass()
drop_table(TestMapModel)
def test_empty_default(self):
tmp = TestMapModel.create()
tmp.int_map['blah'] = 1
def test_add_none_as_map_key(self):
with self.assertRaises(ValidationError):
TestMapModel.create(int_map={None:1})
def test_add_none_as_map_value(self):
with self.assertRaises(ValidationError):
TestMapModel.create(int_map={None:1})
def test_empty_retrieve(self):
tmp = TestMapModel.create()
tmp2 = TestMapModel.get(partition=tmp.partition)
tmp2.int_map['blah'] = 1
def test_remove_last_entry_works(self):
tmp = TestMapModel.create()
tmp.text_map["blah"] = datetime.now()
tmp.save()
del tmp.text_map["blah"]
tmp.save()
tmp = TestMapModel.get(partition=tmp.partition)
self.assertNotIn("blah", tmp.int_map)
def test_io_success(self):
""" Tests that a basic usage works as expected """
k1 = uuid4()
k2 = uuid4()
now = datetime.now()
then = now + timedelta(days=1)
m1 = TestMapModel.create(int_map={1: k1, 2: k2}, text_map={'now': now, 'then': then})
m2 = TestMapModel.get(partition=m1.partition)
assert isinstance(m2.int_map, dict)
assert isinstance(m2.text_map, dict)
assert 1 in m2.int_map
assert 2 in m2.int_map
assert m2.int_map[1] == k1
assert m2.int_map[2] == k2
assert 'now' in m2.text_map
assert 'then' in m2.text_map
assert (now - m2.text_map['now']).total_seconds() < 0.001
assert (then - m2.text_map['then']).total_seconds() < 0.001
def test_type_validation(self):
"""
Tests that attempting to use the wrong types will raise an exception
"""
with self.assertRaises(ValidationError):
TestMapModel.create(int_map={'key': 2, uuid4(): 'val'}, text_map={2: 5})
def test_element_count_validation(self):
"""
Tests that big collections are detected and raise an exception.
"""
TestMapModel.create(text_map={str(uuid4()): i for i in range(65535)})
with self.assertRaises(ValidationError):
TestMapModel.create(text_map={str(uuid4()): i for i in range(65536)})
def test_partial_updates(self):
""" Tests that partial udpates work as expected """
now = datetime.now()
#derez it a bit
now = datetime(*now.timetuple()[:-3])
early = now - timedelta(minutes=30)
earlier = early - timedelta(minutes=30)
later = now + timedelta(minutes=30)
initial = {'now': now, 'early': earlier}
final = {'later': later, 'early': early}
m1 = TestMapModel.create(text_map=initial)
m1.text_map = final
m1.save()
m2 = TestMapModel.get(partition=m1.partition)
assert m2.text_map == final
def test_updates_from_none(self):
""" Tests that updates from None work as expected """
m = TestMapModel.create(int_map=None)
expected = {1: uuid4()}
m.int_map = expected
m.save()
m2 = TestMapModel.get(partition=m.partition)
assert m2.int_map == expected
m2.int_map = None
m2.save()
m3 = TestMapModel.get(partition=m.partition)
assert m3.int_map != expected
def test_blind_updates_from_none(self):
""" Tests that updates from None work as expected """
m = TestMapModel.create(int_map=None)
expected = {1: uuid4()}
m.int_map = expected
m.save()
m2 = TestMapModel.get(partition=m.partition)
assert m2.int_map == expected
TestMapModel.objects(partition=m.partition).update(int_map={})
m3 = TestMapModel.get(partition=m.partition)
assert m3.int_map != expected
def test_updates_to_none(self):
""" Tests that setting the field to None works as expected """
m = TestMapModel.create(int_map={1: uuid4()})
m.int_map = None
m.save()
m2 = TestMapModel.get(partition=m.partition)
assert m2.int_map == {}
def test_instantiation_with_column_class(self):
"""
Tests that columns instantiated with a column class work properly
and that the class is instantiated in the constructor
"""
column = columns.Map(columns.Text, columns.Integer)
assert isinstance(column.key_col, columns.Text)
assert isinstance(column.value_col, columns.Integer)
def test_instantiation_with_column_instance(self):
"""
Tests that columns instantiated with a column instance work properly
"""
column = columns.Map(columns.Text(min_length=100), columns.Integer())
assert isinstance(column.key_col, columns.Text)
assert isinstance(column.value_col, columns.Integer)
def test_to_python(self):
""" Tests that to_python of value column is called """
column = columns.Map(JsonTestColumn, JsonTestColumn)
val = {1: 2, 3: 4, 5: 6}
db_val = column.to_database(val)
assert db_val.value == {json.dumps(k):json.dumps(v) for k,v in val.items()}
py_val = column.to_python(db_val.value)
assert py_val == val
def test_default_empty_container_saving(self):
""" tests that the default empty container is not saved if it hasn't been updated """
pkey = uuid4()
tmap = {1: uuid4(), 2: uuid4()}
# create a row with set data
TestMapModel.create(partition=pkey, int_map=tmap)
# create another with no set data
TestMapModel.create(partition=pkey)
m = TestMapModel.get(partition=pkey)
self.assertEqual(m.int_map, tmap)
# def test_partial_update_creation(self):
# """
# Tests that proper update statements are created for a partial list update
# :return:
# """
# final = range(10)
# initial = final[3:7]
#
# ctx = {}
# col = columns.List(columns.Integer, db_field="TEST")
# statements = col.get_update_statement(final, initial, ctx)
#
# assert len([v for v in ctx.values() if [0,1,2] == v.value]) == 1
# assert len([v for v in ctx.values() if [7,8,9] == v.value]) == 1
# assert len([s for s in statements if '"TEST" = "TEST" +' in s]) == 1
# assert len([s for s in statements if '+ "TEST"' in s]) == 1
class TestCamelMapModel(Model):
partition = columns.UUID(primary_key=True, default=uuid4)
camelMap = columns.Map(columns.Text, columns.Integer, required=False)
class TestCamelMapColumn(BaseCassEngTestCase):
@classmethod
def setUpClass(cls):
super(TestCamelMapColumn, cls).setUpClass()
drop_table(TestCamelMapModel)
sync_table(TestCamelMapModel)
@classmethod
def tearDownClass(cls):
super(TestCamelMapColumn, cls).tearDownClass()
drop_table(TestCamelMapModel)
def test_camelcase_column(self):
TestCamelMapModel.create(camelMap={'blah': 1})
| |
"""
Aggregates agent type data by month for used entities.
Usage:
attribute_aggregator (-h|--help)
attribute_aggregator <input> <output_aggregations>
[--debug]
[--verbose]
Options:
-h, --help This help message is printed
<input> Path to misalignment/edit
breakdown file to process.
<output_aggregations> Where aggregation
output will be written
--debug Print debug logging to stderr
--verbose Print dots and stuff to stderr
"""
import docopt
import logging
import operator
from collections import defaultdict
import mysqltsv
import sys
import re
logger = logging.getLogger(__name__)
EDIT_KIND_RE = re.compile(r'/\* (wb(set|create|edit|remove)([a-z]+)((-[a-z]+)*))', re.I)
def main(argv=None):
args = docopt.docopt(__doc__)
logging.basicConfig(
level=logging.INFO if not args['--debug'] else logging.DEBUG,
format='%(asctime)s %(levelname)s:%(name)s -- %(message)s'
)
input_file = mysqltsv.Reader(open(args['<input>'],
'rt', encoding='utf-8', errors='replace'), headers=False,
types=[str, int, str, str, str, int, int, int, str, str,
str, str, str, str, str, str, int, int, str, str, str, str, str,
str])
output_aggregations_file = mysqltsv.Writer(
open(args['<output_aggregations>'], "w"),
headers=[
'year',
'month',
'bot_edit',
'quickstatements',
'petscan',
'autolist2',
'autoedit',
'labellister',
'itemcreator',
'dragrefjs',
'lcjs',
'wikidatagame',
'wikidataprimary',
'mixnmatch',
'distributedgame',
'nameguzzler',
'mergejs',
'reasonator',
'duplicity',
'tabernacle',
'Widar',
'reCh',
'HHVM',
'PAWS',
'Kaspar',
'itemFinder',
'rgCh',
'not_flagged_elsewhere_quickstatments_bot_account',
'other_semi_automated_edit_since_change_tag',
'anon_edit',
'human_edit',
'tool_bot_like_edit',
'human_bot_like_edit',
'anon_bot_like_edit'])
verbose = args['--verbose']
run(input_file, output_aggregations_file,
verbose)
def run(input_file, output_aggregations_file,
verbose):
agg = \
defaultdict(lambda: defaultdict(lambda: defaultdict(int)))
for i, line in enumerate(input_file):
if verbose and i % 10000 == 0 and i != 0:
sys.stderr.write("Processing revision: {0}\n".format(i))
sys.stderr.flush()
page_title = line[0]
revision_id = line[1]
revision_user = line[2]
comment = line[3]
namespace = line[4]
revision_timestamp = line[5]
year = line[6]
month = line[7]
bot_user_id = line[8]
change_tag_revision_id = line[9]
number_of_revisions = line[10]
page_views = line[11]
agent_type = line[12]
year_month_page_title = line[13]
bot_prediction_threshold = line[14]
session_start = line[15]
m_match_year = line[16]
m_match_month = line[17]
agent_anon_recall_level = line[18]
reference_edit = line[19]
sitelink_edit = line[20]
l_d_or_a_edit = line[21]
quality_class = line[22]
views_class = line[23]
# Filtering out unused entities
if quality_class == '\\N':
continue
# Incrementing the agent type count
agg[m_match_year][m_match_month][agent_type] += 1
if not (agent_anon_recall_level == '\\N'):
if agent_type == 'human_edit' and \
(agent_anon_recall_level == 'anon_ten_recall_bot_edit' or \
agent_anon_recall_level == 'anon_twenty_recall_bot_edit' or \
agent_anon_recall_level == 'anon_thirty_recall_bot_edit'):
agg[m_match_year][m_match_month]['human_bot_like_edit'] += 1
elif agent_type == 'anon_edit' and \
(agent_anon_recall_level == 'anon_ten_recall_bot_edit' or \
agent_anon_recall_level == 'anon_twenty_recall_bot_edit' or \
agent_anon_recall_level == 'anon_thirty_recall_bot_edit'):
agg[m_match_year][m_match_month]['anon_bot_like_edit'] += 1
elif agent_type != 'bot_edit' and \
(agent_anon_recall_level == 'anon_ten_recall_bot_edit' or \
agent_anon_recall_level == 'anon_twenty_recall_bot_edit' or \
agent_anon_recall_level == 'anon_thirty_recall_bot_edit'):
agg[m_match_year][m_match_month]['tool_bot_like_edit'] += 1
for year in agg:
for month in agg[year]:
output_aggregations_file.write([
year,
month,
agg[year][month]['bot_edit'],
agg[year][month]['quickstatements'],
agg[year][month]['petscan'],
agg[year][month]['autolist2'],
agg[year][month]['autoedit'],
agg[year][month]['labellister'],
agg[year][month]['itemcreator'],
agg[year][month]['dragrefjs'],
agg[year][month]['lcjs'],
agg[year][month]['wikidatagame'],
agg[year][month]['wikidataprimary'],
agg[year][month]['mixnmatch'],
agg[year][month]['distributedgame'],
agg[year][month]['nameguzzler'],
agg[year][month]['mergejs'],
agg[year][month]['reasonator'],
agg[year][month]['duplicity'],
agg[year][month]['tabernacle'],
agg[year][month]['Widar'],
agg[year][month]['reCh'],
agg[year][month]['HHVM'],
agg[year][month]['PAWS'],
agg[year][month]['Kaspar'],
agg[year][month]['itemFinder'],
agg[year][month]['rgCh'],
agg[year][month]['not_flagged_elsewhere_quickstatments_bot_account'],
agg[year][month]['other_semi_automated_edit_since_change_tag'],
agg[year][month]['anon_edit'],
agg[year][month]['human_edit'],
agg[year][month]['tool_bot_like_edit'],
agg[year][month]['human_bot_like_edit'],
agg[year][month]['anon_bot_like_edit']])
main()
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._bindings_operations import build_create_or_update_request_initial, build_delete_request_initial, build_get_request, build_list_request, build_update_request_initial
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class BindingsOperations:
"""BindingsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.appplatform.v2021_06_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def get(
self,
resource_group_name: str,
service_name: str,
app_name: str,
binding_name: str,
**kwargs: Any
) -> "_models.BindingResource":
"""Get a Binding and its properties.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:param app_name: The name of the App resource.
:type app_name: str
:param binding_name: The name of the Binding resource.
:type binding_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BindingResource, or the result of cls(response)
:rtype: ~azure.mgmt.appplatform.v2021_06_01_preview.models.BindingResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BindingResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
service_name=service_name,
app_name=app_name,
binding_name=binding_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('BindingResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}/bindings/{bindingName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
service_name: str,
app_name: str,
binding_name: str,
binding_resource: "_models.BindingResource",
**kwargs: Any
) -> "_models.BindingResource":
cls = kwargs.pop('cls', None) # type: ClsType["_models.BindingResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(binding_resource, 'BindingResource')
request = build_create_or_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
service_name=service_name,
app_name=app_name,
binding_name=binding_name,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('BindingResource', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('BindingResource', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('BindingResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}/bindings/{bindingName}'} # type: ignore
@distributed_trace_async
async def begin_create_or_update(
self,
resource_group_name: str,
service_name: str,
app_name: str,
binding_name: str,
binding_resource: "_models.BindingResource",
**kwargs: Any
) -> AsyncLROPoller["_models.BindingResource"]:
"""Create a new Binding or update an exiting Binding.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:param app_name: The name of the App resource.
:type app_name: str
:param binding_name: The name of the Binding resource.
:type binding_name: str
:param binding_resource: Parameters for the create or update operation.
:type binding_resource: ~azure.mgmt.appplatform.v2021_06_01_preview.models.BindingResource
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either BindingResource or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.appplatform.v2021_06_01_preview.models.BindingResource]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.BindingResource"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
service_name=service_name,
app_name=app_name,
binding_name=binding_name,
binding_resource=binding_resource,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('BindingResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}/bindings/{bindingName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
service_name: str,
app_name: str,
binding_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
service_name=service_name,
app_name=app_name,
binding_name=binding_name,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}/bindings/{bindingName}'} # type: ignore
@distributed_trace_async
async def begin_delete(
self,
resource_group_name: str,
service_name: str,
app_name: str,
binding_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Operation to delete a Binding.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:param app_name: The name of the App resource.
:type app_name: str
:param binding_name: The name of the Binding resource.
:type binding_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
service_name=service_name,
app_name=app_name,
binding_name=binding_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}/bindings/{bindingName}'} # type: ignore
async def _update_initial(
self,
resource_group_name: str,
service_name: str,
app_name: str,
binding_name: str,
binding_resource: "_models.BindingResource",
**kwargs: Any
) -> "_models.BindingResource":
cls = kwargs.pop('cls', None) # type: ClsType["_models.BindingResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(binding_resource, 'BindingResource')
request = build_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
service_name=service_name,
app_name=app_name,
binding_name=binding_name,
content_type=content_type,
json=_json,
template_url=self._update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('BindingResource', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('BindingResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}/bindings/{bindingName}'} # type: ignore
@distributed_trace_async
async def begin_update(
self,
resource_group_name: str,
service_name: str,
app_name: str,
binding_name: str,
binding_resource: "_models.BindingResource",
**kwargs: Any
) -> AsyncLROPoller["_models.BindingResource"]:
"""Operation to update an exiting Binding.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:param app_name: The name of the App resource.
:type app_name: str
:param binding_name: The name of the Binding resource.
:type binding_name: str
:param binding_resource: Parameters for the update operation.
:type binding_resource: ~azure.mgmt.appplatform.v2021_06_01_preview.models.BindingResource
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either BindingResource or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.appplatform.v2021_06_01_preview.models.BindingResource]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.BindingResource"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_initial(
resource_group_name=resource_group_name,
service_name=service_name,
app_name=app_name,
binding_name=binding_name,
binding_resource=binding_resource,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('BindingResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}/bindings/{bindingName}'} # type: ignore
@distributed_trace
def list(
self,
resource_group_name: str,
service_name: str,
app_name: str,
**kwargs: Any
) -> AsyncIterable["_models.BindingResourceCollection"]:
"""Handles requests to list all resources in an App.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:param app_name: The name of the App resource.
:type app_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either BindingResourceCollection or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.appplatform.v2021_06_01_preview.models.BindingResourceCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BindingResourceCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
service_name=service_name,
app_name=app_name,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
service_name=service_name,
app_name=app_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("BindingResourceCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}/bindings'} # type: ignore
| |
#!/usr/bin/env python
import os
import time
import watchdog.events
import watchdog.observers
import watchdog.utils
import sys
import subprocess
import string
import tempfile
import argparse
import re
import atexit
import posixpath
import errno
class PypushHandler(watchdog.events.FileSystemEventHandler):
"""Push all changes in the current directory to a remote server."""
def __init__(self, flags):
self.vcs = None
# vcs stores the version control system used to check whether a file
# should be ignored or not - 'git', 'hg' or None
try:
# If this or any parent directory isn't a git/hg repo, the commands
# below return non-zero status
if not subprocess.Popen(['git', 'rev-parse'], stderr=subprocess.PIPE).communicate()[1]:
self.vcs = 'git'
except OSError as e:
if e.errno == errno.ENOENT: # git doesn't exist on this system
pass
else:
raise
try:
if not self.vcs and not subprocess.Popen(['hg', 'root'], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()[1]:
self.vcs = 'hg'
except OSError as e:
if e.errno == errno.ENOENT: # hg doesn't exist on this system
pass
else:
raise
if flags.include_all:
self.vcs = None
elif self.vcs is None:
print "Couldn't detect a git/hg repo, no files will be ignored"
if flags.skip_init and flags.exit_after:
print 'Error: cannot use flags -s and -e together'
sys.exit(1)
self.user = flags.user
self.path = flags.dest
self.quiet = flags.quiet
self.verbose = flags.verbose
self.show_ignored = flags.show_ignored
self.exit_after = flags.exit_after
self.ssh_options = flags.ssh_options
self.port = str(flags.port) # Store as string to allow passing it as a flag to ssh/rsync
self.keep_extra = flags.keep_extra
self.cwd = os.getcwd() + '/'
if self.path[-1] != '/': # Ensure path ends in a slash, i.e. it is a directory
self.path += '/'
self.check_ignore = False
if self.vcs == 'git':
# check_ignore stores whether we can use the 'git check-ignore' command -
# it was only introduced in a fairly recent version of git
args = ['git', 'check-ignore', '.']
if not subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()[1]:
# No error, so we can use 'git check-ignore'
self.check_ignore = True
args = ['ssh', '-t', '-t', # Force tty allocation - this prevents certain error messages
'-M', '-S', '~/.ssh/socket-%r@%h:%p', # Create a master TCP connection that we can use later every time a file changes
'-fN', # Go to the background when the connection is established - so after this command returns, we can be sure that the master connection has been created
'-p', self.port,
self.user] + self.get_ssh_options()
if subprocess.call(args):
print 'Error with ssh, aborting'
sys.exit(1)
atexit.register(subprocess.call, ['ssh', '-O', 'exit', '-S', '~/.ssh/socket-%r@%h:%p', '-p', self.port, self.user] +
self.get_ssh_options(), stderr=subprocess.PIPE) # Close the master connection before exiting
if flags.skip_init:
print 'Waiting for file changes\n'
else:
self.sync()
def escape(self, string, escape_tilde=False):
"""Escape all special characters in string, except the tilde (~) by default."""
special_chars = r'[\|&;<>\(\)\$`\\"\' \*\?\[#]' # List of special characters from http://pubs.opengroup.org/onlinepubs/009695399/utilities/xcu_chap02.html
if escape_tilde:
special_chars += '~'
return re.sub('(' + special_chars + ')', r'\\\1', string)
def get_ssh_options(self):
args = []
for opt in self.ssh_options:
args.append('-o')
args.append(opt)
return args
def get_rsh(self):
"""Return a command specifying the remote shell to use for rsync (for the -e flag)."""
command = 'ssh -S ~/.ssh/socket-%r@%h:%p -p ' + self.port # Connect to the master connection from earlier
for opt in self.ssh_options:
command += ' -o ' + self.escape(opt, True)
return command
def sync(self):
"""Perform a one-way sync to the remote directory.
Exclude any files ignored by git.
"""
if self.vcs == 'git':
args = ['git', 'ls-files', '-i', '-o', '--directory', '--exclude-standard'] # Show all untracked, ignored files in the current directory
elif self.vcs == 'hg':
args = ['hg', 'status', '-i', '-n']
print 'Performing initial one-way sync'
if self.vcs:
output = subprocess.Popen(args, stdout=subprocess.PIPE).communicate()[0]
tf = tempfile.NamedTemporaryFile(delete=False)
if self.vcs == 'git':
tf.write('/.git/\n')
else:
assert self.vcs == 'hg'
tf.write('/.hg/\n')
for line in string.split(output, '\n'):
if line != '':
tf.write('/' + line + '\n')
tf.close()
args = ['rsync', '-az', # Usual flags - archive, compress
'-e', self.get_rsh(),
'./', # Sync current directory
self.user + ':' + self.escape(self.path)]
if self.vcs:
args.append('--exclude-from=' + tf.name)
if not self.keep_extra:
args.append('--delete-excluded')
elif not self.keep_extra:
args.append('--delete')
if self.verbose:
args.append('-v')
if subprocess.call(args):
print 'Error with rsync, aborting'
sys.exit(1)
if self.vcs:
os.remove(tf.name)
if self.exit_after:
print 'Done'
sys.exit(0)
else:
print 'Startup complete, waiting for file changes\n'
def print_quiet(self, message, newline=True):
"""Only print the given message if not in quiet mode.
Optionally print without a newline.
"""
if not self.quiet:
if newline:
print message
else:
sys.stdout.write(message)
sys.stdout.flush()
def should_ignore(self, filename):
"""Return whether changes to filename should be ignored."""
if not self.vcs:
return False
elif self.vcs == 'git' and filename.startswith('.git/'):
return True
elif self.vcs == 'hg' and filename.startswith('.hg/'):
return True
if self.vcs == 'git':
if self.check_ignore:
args = ['git', 'check-ignore', filename]
else:
args = ['git', 'ls-files', '-i', '-o', '--exclude-standard', filename]
else:
assert self.vcs == 'hg'
args = ['hg', 'status', '-i', '-n', filename]
if subprocess.Popen(args, stdout=subprocess.PIPE).communicate()[0]: # If git outputs something, then that file is ignored
return True
return False
def relative_path(self, filename):
"""Convert filename to a path relative to the current directory."""
return filename.replace(self.cwd, '', 1)
def dispatch(self, event):
"""Dispatch events to the appropriate methods."""
if not event.is_directory: # Git doesn't care about directories, so neither do we
path = self.relative_path(event.src_path)
if event.event_type == 'moved':
dest = self.relative_path(event.dest_path)
self.on_moved(path, dest)
elif event.event_type == 'deleted':
self.on_deleted(path)
else: # Created or modified
if not self.should_ignore(path):
self.on_modified(path, path + ' ' + event.event_type)
elif self.show_ignored:
self.print_quiet(path + ' ' + event.event_type + ' (ignored)')
def create_parent_dir(self, path):
"""Check if the parent directory of the given path exists on the remote.
If not, create it and all intermediate directories.
"""
parent_dir = posixpath.dirname(path)
args = ['ssh', '-S', '~/.ssh/socket-%r@%h:%p',
'-p', self.port, self.user] + self.get_ssh_options() + ['mkdir -p ' + self.escape(parent_dir)]
subprocess.call(args)
def on_modified(self, path, output=''):
"""Call rsync on the given relative path."""
if output:
self.print_quiet(output, False)
self.create_parent_dir(self.path + path)
args = ['rsync', '-az',
'-e', self.get_rsh(), path, self.user + ':' + self.escape(self.path + path)]
if self.verbose:
args.append('-v')
subprocess.call(args)
if output:
self.print_quiet('...pushed')
def on_moved(self, src, dest):
if self.should_ignore(dest):
self.on_deleted(src)
else:
self.print_quiet(src + ' moved to ' + dest, False)
# Try to move src to dest on the remote with ssh and mv. Then call
# rsync on it, in case either src was changed on the remote, or it
# didn't exist.
self.create_parent_dir(self.path + dest)
args = (['ssh', '-S', '~/.ssh/socket-%r@%h:%p', '-p', self.port, self.user] +
self.get_ssh_options() + ['mv -f ' + self.escape(self.path + src) + ' ' + self.escape(self.path + dest)])
subprocess.call(args, stderr=subprocess.PIPE)
self.on_modified(dest)
self.print_quiet('...pushed')
def on_deleted(self, path):
"""Handles deleting a file.
If self.check_ignore is True, only deletes the file on the remote if the
deleted file would not have been ignored. Also prints output
appropriately if self.show_ignored is True.
"""
if self.check_ignore:
if self.should_ignore(path):
# Ignore deletion
if self.show_ignored:
self.print_quiet(path + ' deleted (ignored)')
return
# If we can't use 'git check-ignore', we can't do 'git ls-files' on a
# deleted file, so just try to delete it - if it doesn't exist on the
# remote, nothing will happen
self.print_quiet(path + ' deleted', False)
args = (['ssh', '-S', '~/.ssh/socket-%r@%h:%p', '-p', self.port, self.user] +
self.get_ssh_options() + ['rm -f ' + self.escape(self.path + path)])
subprocess.call(args)
self.print_quiet('...pushed')
def main():
parser = argparse.ArgumentParser(
description="""Continuously push changes in the current directory to a remote server.
If this is a Git/Mercurial directory, files that are ignored by Git/Mercurial will not be pushed.""",
epilog="""WARNING: pypush only performs a one-way sync. If you make
changes directly on the remote machine, they may be overwritten at
any time by changes made locally.""")
parser.add_argument('-q', '--quiet', action='store_true',
help='quiet mode - do not show output whenever a file changes')
parser.add_argument('-v', '--verbose', action='store_true',
help='verbose mode - run rsync in verbose mode')
parser.add_argument('-s', '--skip-init', action='store_true',
help='skip the initial one-way sync performed on startup')
parser.add_argument('-i', '--show-ignored', action='store_true',
help='print output even when ignored files are created or modified (this flag is overridden by quiet mode)')
parser.add_argument('-e', '--exit-after', action='store_true',
help='exit after the initial sync, i.e. do not monitor the directory for changes')
parser.add_argument('-a', '--include-all', action='store_true',
help='do not ignore any files')
parser.add_argument('-p', '--port', type=int, default=22, help='the SSH port to use')
parser.add_argument('-k', '--keep-extra', action='store_true',
help='keep files on the remote that do not exist locally')
parser.add_argument('-o', '--ssh-options', default=[], action='append',
help='options to pass on to SSH with the -o flag. This argument may be specified multiple times.')
parser.add_argument('--version', action='version', version='%(prog)s 1.3')
parser.add_argument('user', metavar='user@hostname', help='the remote machine (and optional user name) to login to')
# The user argument is passed on to rsync and ssh, so actually the 'user@'
# part is optional, but using metavar='[user@]hostname' causes an error
# because of a bug in argparse - see http://bugs.python.org/issue11874
parser.add_argument('dest', help='the path to the remote directory to push changes to')
args = parser.parse_args()
observer = watchdog.observers.Observer()
observer.schedule(PypushHandler(args), path='.', recursive=True)
observer.start()
try:
while True:
time.sleep(10)
except KeyboardInterrupt:
observer.stop()
observer.join()
if __name__ == '__main__':
main()
| |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import absolute_import
import sys, os
from . import common
from proton import *
from .common import pump, Skipped
from proton._compat import str2bin
def _sslCertpath(file):
""" Return the full path to the certificate,keyfile, etc.
"""
return os.path.join(os.path.dirname(__file__),
"ssl_db/%s" % file)
def _testSaslMech(self, mech, clientUser='user@proton', authUser='user@proton', encrypted=False, authenticated=True):
self.s1.allowed_mechs(mech)
self.c1.open()
self.c2.open()
pump(self.t1, self.t2, 1024)
if encrypted:
assert self.t2.encrypted == encrypted
assert self.t1.encrypted == encrypted
assert self.t2.authenticated == authenticated
assert self.t1.authenticated == authenticated
if authenticated:
# Server
assert self.t2.user == authUser
assert self.s2.user == authUser
assert self.s2.mech == mech.strip()
assert self.s2.outcome == SASL.OK
assert self.c2.state & Endpoint.LOCAL_ACTIVE and self.c2.state & Endpoint.REMOTE_ACTIVE,\
"local_active=%s, remote_active=%s" % (self.c1.state & Endpoint.LOCAL_ACTIVE, self.c1.state & Endpoint.REMOTE_ACTIVE)
# Client
assert self.t1.user == clientUser
assert self.s1.user == clientUser
assert self.s1.mech == mech.strip()
assert self.s1.outcome == SASL.OK
assert self.c1.state & Endpoint.LOCAL_ACTIVE and self.c1.state & Endpoint.REMOTE_ACTIVE,\
"local_active=%s, remote_active=%s" % (self.c1.state & Endpoint.LOCAL_ACTIVE, self.c1.state & Endpoint.REMOTE_ACTIVE)
else:
# Server
assert self.t2.user == None
assert self.s2.user == None
assert self.s2.outcome != SASL.OK
# Client
assert self.t1.user == clientUser
assert self.s1.user == clientUser
assert self.s1.outcome != SASL.OK
class Test(common.Test):
pass
class SaslTest(Test):
def setup(self):
self.t1 = Transport()
self.s1 = SASL(self.t1)
self.t2 = Transport(Transport.SERVER)
self.s2 = SASL(self.t2)
def pump(self):
pump(self.t1, self.t2, 1024)
# Note that due to server protocol autodetect, there can be no "pipelining"
# of protocol frames from the server end only from the client end.
#
# This is because the server cannot know which protocol layers are active
# and therefore which headers need to be sent,
# until it sees the respective protocol headers from the client.
def testPipelinedClient(self):
if "java" in sys.platform:
raise Skipped("Proton-J does not support client pipelining")
# Client
self.s1.allowed_mechs('ANONYMOUS')
# Server
self.s2.allowed_mechs('ANONYMOUS')
assert self.s1.outcome is None
assert self.s2.outcome is None
# Push client bytes into server
out1 = self.t1.peek(1024)
self.t1.pop(len(out1))
self.t2.push(out1)
out2 = self.t2.peek(1024)
self.t2.pop(len(out2))
assert self.s1.outcome is None
self.t1.push(out2)
assert self.s1.outcome == SASL.OK
assert self.s2.outcome == SASL.OK
def testPipelinedClientFail(self):
if "java" in sys.platform:
raise Skipped("Proton-J does not support client pipelining")
# Client
self.s1.allowed_mechs('ANONYMOUS')
# Server
self.s2.allowed_mechs('PLAIN DIGEST-MD5 SCRAM-SHA-1')
assert self.s1.outcome is None
assert self.s2.outcome is None
# Push client bytes into server
out1 = self.t1.peek(1024)
self.t1.pop(len(out1))
self.t2.push(out1)
out2 = self.t2.peek(1024)
self.t2.pop(len(out2))
assert self.s1.outcome is None
self.t1.push(out2)
assert self.s1.outcome == SASL.AUTH
assert self.s2.outcome == SASL.AUTH
def testSaslAndAmqpInSingleChunk(self):
if "java" in sys.platform:
raise Skipped("Proton-J does not support client pipelining")
self.s1.allowed_mechs('ANONYMOUS')
self.s2.allowed_mechs('ANONYMOUS')
# send the server's OK to the client
# This is still needed for the Java impl
out2 = self.t2.peek(1024)
self.t2.pop(len(out2))
self.t1.push(out2)
# do some work to generate AMQP data
c1 = Connection()
c2 = Connection()
self.t1.bind(c1)
c1._transport = self.t1
self.t2.bind(c2)
c2._transport = self.t2
c1.open()
# get all t1's output in one buffer then pass it all to t2
out1_sasl_and_amqp = str2bin("")
t1_still_producing = True
while t1_still_producing:
out1 = self.t1.peek(1024)
self.t1.pop(len(out1))
out1_sasl_and_amqp += out1
t1_still_producing = out1
t2_still_consuming = True
while t2_still_consuming:
num = min(self.t2.capacity(), len(out1_sasl_and_amqp))
self.t2.push(out1_sasl_and_amqp[:num])
out1_sasl_and_amqp = out1_sasl_and_amqp[num:]
t2_still_consuming = num > 0 and len(out1_sasl_and_amqp) > 0
assert len(out1_sasl_and_amqp) == 0, (len(out1_sasl_and_amqp), out1_sasl_and_amqp)
# check that t2 processed both the SASL data and the AMQP data
assert self.s2.outcome == SASL.OK
assert c2.state & Endpoint.REMOTE_ACTIVE
def testPipelined2(self):
if "java" in sys.platform:
raise Skipped("Proton-J does not support client pipelining")
out1 = self.t1.peek(1024)
self.t1.pop(len(out1))
self.t2.push(out1)
self.s2.allowed_mechs('ANONYMOUS')
c2 = Connection()
c2.open()
self.t2.bind(c2)
out2 = self.t2.peek(1024)
self.t2.pop(len(out2))
self.t1.push(out2)
out1 = self.t1.peek(1024)
assert len(out1) > 0
def testFracturedSASL(self):
""" PROTON-235
"""
assert self.s1.outcome is None
# self.t1.trace(Transport.TRACE_FRM)
out = self.t1.peek(1024)
self.t1.pop(len(out))
self.t1.push(str2bin("AMQP\x03\x01\x00\x00"))
out = self.t1.peek(1024)
self.t1.pop(len(out))
self.t1.push(str2bin("\x00\x00\x00"))
out = self.t1.peek(1024)
self.t1.pop(len(out))
self.t1.push(str2bin("6\x02\x01\x00\x00\x00S@\xc04\x01\xe01\x04\xa3\x05PLAIN\x0aDIGEST-MD5\x09ANONYMOUS\x08CRAM-MD5"))
out = self.t1.peek(1024)
self.t1.pop(len(out))
self.t1.push(str2bin("\x00\x00\x00\x10\x02\x01\x00\x00\x00SD\xc0\x03\x01P\x00"))
out = self.t1.peek(1024)
self.t1.pop(len(out))
while out:
out = self.t1.peek(1024)
self.t1.pop(len(out))
assert self.s1.outcome == SASL.OK, self.s1.outcome
def test_singleton(self):
"""Verify that only a single instance of SASL can exist per Transport"""
transport = Transport()
attr = object()
sasl1 = SASL(transport)
sasl1.my_attribute = attr
sasl2 = transport.sasl()
sasl3 = SASL(transport)
assert sasl1 == sasl2
assert sasl1 == sasl3
assert sasl1.my_attribute == attr
assert sasl2.my_attribute == attr
assert sasl3.my_attribute == attr
transport = Transport()
sasl1 = transport.sasl()
sasl1.my_attribute = attr
sasl2 = SASL(transport)
assert sasl1 == sasl2
assert sasl1.my_attribute == attr
assert sasl2.my_attribute == attr
def testSaslSkipped(self):
"""Verify that the server (with SASL) correctly handles a client without SASL"""
self.t1 = Transport()
self.t2.require_auth(False)
self.pump()
assert self.s2.outcome == None
assert self.t2.condition == None
assert self.t2.authenticated == False
assert self.s1.outcome == None
assert self.t1.condition == None
assert self.t1.authenticated == False
def testSaslSkippedFail(self):
"""Verify that the server (with SASL) correctly handles a client without SASL"""
self.t1 = Transport()
self.t2.require_auth(True)
self.pump()
assert self.s2.outcome == None
assert self.t2.condition != None
assert self.s1.outcome == None
assert self.t1.condition != None
def testMechNotFound(self):
if "java" in sys.platform:
raise Skipped("Proton-J does not support checking authentication state")
self.c1 = Connection()
self.c1.open()
self.t1.bind(self.c1)
self.s1.allowed_mechs('IMPOSSIBLE')
self.pump()
assert self.t2.authenticated == False
assert self.t1.authenticated == False
assert self.s1.outcome != SASL.OK
assert self.s2.outcome != SASL.OK
class CyrusSASLTest(Test):
def setup(self):
self.t1 = Transport()
self.s1 = SASL(self.t1)
self.t2 = Transport(Transport.SERVER)
self.s2 = SASL(self.t2)
self.c1 = Connection()
self.c1.user = 'user@proton'
self.c1.password = 'password'
self.c1.hostname = 'localhost'
self.c2 = Connection()
def testMechANON(self):
self.t1.bind(self.c1)
self.t2.bind(self.c2)
_testSaslMech(self, 'ANONYMOUS', authUser='anonymous')
def testMechCRAMMD5(self):
common.ensureCanTestExtendedSASL()
self.t1.bind(self.c1)
self.t2.bind(self.c2)
_testSaslMech(self, 'CRAM-MD5')
def testMechDIGESTMD5(self):
common.ensureCanTestExtendedSASL()
self.t1.bind(self.c1)
self.t2.bind(self.c2)
_testSaslMech(self, 'DIGEST-MD5')
# SCRAM not supported before Cyrus SASL 2.1.26
# so not universal and hance need a test for support
# to keep it in tests.
# def testMechSCRAMSHA1(self):
# common.ensureCanTestExtendedSASL()
#
# self.t1.bind(self.c1)
# self.t2.bind(self.c2)
# _testSaslMech(self, 'SCRAM-SHA-1')
def _sslConnection(domain, transport, connection):
transport.bind(connection)
ssl = SSL(transport, domain, None )
return connection
class SSLSASLTest(Test):
def setup(self):
if not common.isSSLPresent():
raise Skipped("No SSL libraries found.")
self.server_domain = SSLDomain(SSLDomain.MODE_SERVER)
self.client_domain = SSLDomain(SSLDomain.MODE_CLIENT)
self.t1 = Transport()
self.s1 = SASL(self.t1)
self.t2 = Transport(Transport.SERVER)
self.s2 = SASL(self.t2)
self.c1 = Connection()
self.c2 = Connection()
def testSSLPlainSimple(self):
if "java" in sys.platform:
raise Skipped("Proton-J does not support SSL with SASL")
if not SASL.extended():
raise Skipped("Simple SASL server does not support PLAIN")
common.ensureCanTestExtendedSASL()
clientUser = 'user@proton'
mech = 'PLAIN'
self.c1.user = clientUser
self.c1.password = 'password'
self.c1.hostname = 'localhost'
ssl1 = _sslConnection(self.client_domain, self.t1, self.c1)
ssl2 = _sslConnection(self.server_domain, self.t2, self.c2)
_testSaslMech(self, mech, encrypted=True)
def testSSLPlainSimpleFail(self):
if "java" in sys.platform:
raise Skipped("Proton-J does not support SSL with SASL")
if not SASL.extended():
raise Skipped("Simple SASL server does not support PLAIN")
common.ensureCanTestExtendedSASL()
clientUser = 'usr@proton'
mech = 'PLAIN'
self.c1.user = clientUser
self.c1.password = 'password'
self.c1.hostname = 'localhost'
ssl1 = _sslConnection(self.client_domain, self.t1, self.c1)
ssl2 = _sslConnection(self.server_domain, self.t2, self.c2)
_testSaslMech(self, mech, clientUser='usr@proton', encrypted=True, authenticated=False)
def testSSLExternalSimple(self):
if "java" in sys.platform:
raise Skipped("Proton-J does not support SSL with SASL")
extUser = 'O=Client,CN=127.0.0.1'
mech = 'EXTERNAL'
self.server_domain.set_credentials(_sslCertpath("server-certificate.pem"),
_sslCertpath("server-private-key.pem"),
"server-password")
self.server_domain.set_trusted_ca_db(_sslCertpath("ca-certificate.pem"))
self.server_domain.set_peer_authentication(SSLDomain.VERIFY_PEER,
_sslCertpath("ca-certificate.pem") )
self.client_domain.set_credentials(_sslCertpath("client-certificate.pem"),
_sslCertpath("client-private-key.pem"),
"client-password")
self.client_domain.set_trusted_ca_db(_sslCertpath("ca-certificate.pem"))
self.client_domain.set_peer_authentication(SSLDomain.VERIFY_PEER)
ssl1 = _sslConnection(self.client_domain, self.t1, self.c1)
ssl2 = _sslConnection(self.server_domain, self.t2, self.c2)
_testSaslMech(self, mech, clientUser=None, authUser=extUser, encrypted=True)
def testSSLExternalSimpleFail(self):
if "java" in sys.platform:
raise Skipped("Proton-J does not support SSL with SASL")
mech = 'EXTERNAL'
self.server_domain.set_credentials(_sslCertpath("server-certificate.pem"),
_sslCertpath("server-private-key.pem"),
"server-password")
self.server_domain.set_trusted_ca_db(_sslCertpath("ca-certificate.pem"))
self.server_domain.set_peer_authentication(SSLDomain.VERIFY_PEER,
_sslCertpath("ca-certificate.pem") )
self.client_domain.set_trusted_ca_db(_sslCertpath("ca-certificate.pem"))
self.client_domain.set_peer_authentication(SSLDomain.VERIFY_PEER)
ssl1 = _sslConnection(self.client_domain, self.t1, self.c1)
ssl2 = _sslConnection(self.server_domain, self.t2, self.c2)
_testSaslMech(self, mech, clientUser=None, authUser=None, encrypted=None, authenticated=False)
| |
"""Mail (SMTP) notification service."""
from email.mime.application import MIMEApplication
from email.mime.image import MIMEImage
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import email.utils
import logging
import os
import smtplib
import voluptuous as vol
from homeassistant.components.notify import (
ATTR_DATA,
ATTR_TITLE,
ATTR_TITLE_DEFAULT,
PLATFORM_SCHEMA,
BaseNotificationService,
)
from homeassistant.const import (
CONF_PASSWORD,
CONF_PORT,
CONF_RECIPIENT,
CONF_SENDER,
CONF_TIMEOUT,
CONF_USERNAME,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.reload import setup_reload_service
import homeassistant.util.dt as dt_util
from . import DOMAIN, PLATFORMS
_LOGGER = logging.getLogger(__name__)
ATTR_IMAGES = "images" # optional embedded image file attachments
ATTR_HTML = "html"
CONF_ENCRYPTION = "encryption"
CONF_DEBUG = "debug"
CONF_SERVER = "server"
CONF_SENDER_NAME = "sender_name"
DEFAULT_HOST = "localhost"
DEFAULT_PORT = 587
DEFAULT_TIMEOUT = 5
DEFAULT_DEBUG = False
DEFAULT_ENCRYPTION = "starttls"
ENCRYPTION_OPTIONS = ["tls", "starttls", "none"]
# pylint: disable=no-value-for-parameter
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_RECIPIENT): vol.All(cv.ensure_list, [vol.Email()]),
vol.Required(CONF_SENDER): vol.Email(),
vol.Optional(CONF_SERVER, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int,
vol.Optional(CONF_ENCRYPTION, default=DEFAULT_ENCRYPTION): vol.In(
ENCRYPTION_OPTIONS
),
vol.Optional(CONF_USERNAME): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Optional(CONF_SENDER_NAME): cv.string,
vol.Optional(CONF_DEBUG, default=DEFAULT_DEBUG): cv.boolean,
}
)
def get_service(hass, config, discovery_info=None):
"""Get the mail notification service."""
setup_reload_service(hass, DOMAIN, PLATFORMS)
mail_service = MailNotificationService(
config.get(CONF_SERVER),
config.get(CONF_PORT),
config.get(CONF_TIMEOUT),
config.get(CONF_SENDER),
config.get(CONF_ENCRYPTION),
config.get(CONF_USERNAME),
config.get(CONF_PASSWORD),
config.get(CONF_RECIPIENT),
config.get(CONF_SENDER_NAME),
config.get(CONF_DEBUG),
)
if mail_service.connection_is_valid():
return mail_service
return None
class MailNotificationService(BaseNotificationService):
"""Implement the notification service for E-mail messages."""
def __init__(
self,
server,
port,
timeout,
sender,
encryption,
username,
password,
recipients,
sender_name,
debug,
):
"""Initialize the SMTP service."""
self._server = server
self._port = port
self._timeout = timeout
self._sender = sender
self.encryption = encryption
self.username = username
self.password = password
self.recipients = recipients
self._sender_name = sender_name
self.debug = debug
self.tries = 2
def connect(self):
"""Connect/authenticate to SMTP Server."""
if self.encryption == "tls":
mail = smtplib.SMTP_SSL(self._server, self._port, timeout=self._timeout)
else:
mail = smtplib.SMTP(self._server, self._port, timeout=self._timeout)
mail.set_debuglevel(self.debug)
mail.ehlo_or_helo_if_needed()
if self.encryption == "starttls":
mail.starttls()
mail.ehlo()
if self.username and self.password:
mail.login(self.username, self.password)
return mail
def connection_is_valid(self):
"""Check for valid config, verify connectivity."""
server = None
try:
server = self.connect()
except (smtplib.socket.gaierror, ConnectionRefusedError):
_LOGGER.exception(
"SMTP server not found or refused connection (%s:%s). "
"Please check the IP address, hostname, and availability of your SMTP server",
self._server,
self._port,
)
except smtplib.SMTPAuthenticationError:
_LOGGER.exception(
"Login not possible. "
"Please check your setting and/or your credentials"
)
return False
finally:
if server:
server.quit()
return True
def send_message(self, message="", **kwargs):
"""
Build and send a message to a user.
Will send plain text normally, or will build a multipart HTML message
with inline image attachments if images config is defined, or will
build a multipart HTML if html config is defined.
"""
subject = kwargs.get(ATTR_TITLE, ATTR_TITLE_DEFAULT)
data = kwargs.get(ATTR_DATA)
if data:
if ATTR_HTML in data:
msg = _build_html_msg(
message, data[ATTR_HTML], images=data.get(ATTR_IMAGES, [])
)
else:
msg = _build_multipart_msg(message, images=data.get(ATTR_IMAGES, []))
else:
msg = _build_text_msg(message)
msg["Subject"] = subject
msg["To"] = ",".join(self.recipients)
if self._sender_name:
msg["From"] = f"{self._sender_name} <{self._sender}>"
else:
msg["From"] = self._sender
msg["X-Mailer"] = "Home Assistant"
msg["Date"] = email.utils.format_datetime(dt_util.now())
msg["Message-Id"] = email.utils.make_msgid()
return self._send_email(msg)
def _send_email(self, msg):
"""Send the message."""
mail = self.connect()
for _ in range(self.tries):
try:
mail.sendmail(self._sender, self.recipients, msg.as_string())
break
except smtplib.SMTPServerDisconnected:
_LOGGER.warning(
"SMTPServerDisconnected sending mail: retrying connection"
)
mail.quit()
mail = self.connect()
except smtplib.SMTPException:
_LOGGER.warning("SMTPException sending mail: retrying connection")
mail.quit()
mail = self.connect()
mail.quit()
def _build_text_msg(message):
"""Build plaintext email."""
_LOGGER.debug("Building plain text email")
return MIMEText(message)
def _attach_file(atch_name, content_id):
"""Create a message attachment."""
try:
with open(atch_name, "rb") as attachment_file:
file_bytes = attachment_file.read()
except FileNotFoundError:
_LOGGER.warning("Attachment %s not found. Skipping", atch_name)
return None
try:
attachment = MIMEImage(file_bytes)
except TypeError:
_LOGGER.warning(
"Attachment %s has an unknown MIME type. " "Falling back to file",
atch_name,
)
attachment = MIMEApplication(file_bytes, Name=atch_name)
attachment["Content-Disposition"] = "attachment; " 'filename="%s"' % atch_name
attachment.add_header("Content-ID", f"<{content_id}>")
return attachment
def _build_multipart_msg(message, images):
"""Build Multipart message with in-line images."""
_LOGGER.debug("Building multipart email with embedded attachment(s)")
msg = MIMEMultipart("related")
msg_alt = MIMEMultipart("alternative")
msg.attach(msg_alt)
body_txt = MIMEText(message)
msg_alt.attach(body_txt)
body_text = [f"<p>{message}</p><br>"]
for atch_num, atch_name in enumerate(images):
cid = f"image{atch_num}"
body_text.append(f'<img src="cid:{cid}"><br>')
attachment = _attach_file(atch_name, cid)
if attachment:
msg.attach(attachment)
body_html = MIMEText("".join(body_text), "html")
msg_alt.attach(body_html)
return msg
def _build_html_msg(text, html, images):
"""Build Multipart message with in-line images and rich HTML (UTF-8)."""
_LOGGER.debug("Building HTML rich email")
msg = MIMEMultipart("related")
alternative = MIMEMultipart("alternative")
alternative.attach(MIMEText(text, _charset="utf-8"))
alternative.attach(MIMEText(html, ATTR_HTML, _charset="utf-8"))
msg.attach(alternative)
for atch_name in images:
name = os.path.basename(atch_name)
attachment = _attach_file(atch_name, name)
if attachment:
msg.attach(attachment)
return msg
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import mxnet as mx
import unittest
import os
import numpy as np
from mxnet import gluon
from mxnet.gluon import nn
from mxnet.test_utils import assert_almost_equal
from common import setup_module, with_seed, assertRaises
from copy import deepcopy
from nose.tools import raises, assert_raises
@with_seed()
@raises(RuntimeError)
def test_multi_trainer():
x = gluon.Parameter('x', shape=(10,), stype='row_sparse')
x.initialize()
# test set trainer
trainer0 = gluon.Trainer([x], 'sgd')
assert(x._trainer is trainer0)
# test unset trainer
x._set_trainer(None)
assert(x._trainer is None)
x._set_trainer(trainer0)
# multiple trainers for a sparse Parameter is not allowed
trainer1 = gluon.Trainer([x], 'sgd')
@with_seed()
def test_trainer():
def dict_equ(a, b):
assert set(a) == set(b)
for k in a:
assert (a[k].asnumpy() == b[k].asnumpy()).all()
x = gluon.Parameter('x', shape=(10,))
x.initialize(ctx=[mx.cpu(0), mx.cpu(1)], init='zeros')
trainer = gluon.Trainer([x], 'sgd', {'learning_rate': 1.0, 'momentum': 0.5})
with mx.autograd.record():
for w in x.list_data():
y = w + 1
y.backward()
trainer.step(1)
assert trainer._optimizer.param_dict == trainer._optimizer.param_dict
assert (x.data(mx.cpu(1)).asnumpy() == -2).all()
x.lr_mult = 0.5
with mx.autograd.record():
for w in x.list_data():
y = w + 1
y.backward()
trainer.step(1)
assert (x.data(mx.cpu(1)).asnumpy() == -4).all()
trainer.save_states('test_trainer.states')
states = deepcopy(trainer._kvstore._updater.states) if trainer._update_on_kvstore \
else deepcopy(trainer._updaters[0].states)
trainer.load_states('test_trainer.states')
if trainer._update_on_kvstore:
dict_equ(trainer._kvstore._updater.states, states)
assert trainer._optimizer == trainer._kvstore._updater.optimizer
# invalid usage of update and allreduce_grads if update_on_kvstore
assert_raises(AssertionError, trainer.update, 1)
assert_raises(AssertionError, trainer.allreduce_grads)
else:
for updater in trainer._updaters:
dict_equ(updater.states, states)
assert trainer._optimizer == trainer._updaters[0].optimizer
x = gluon.Parameter('x', shape=(10,))
x.initialize(ctx=[mx.cpu(0), mx.cpu(1)], init='zeros')
trainer2 = gluon.Trainer([x], 'sgd', {'learning_rate': 1.0, 'momentum': 0.5},
update_on_kvstore=False)
with mx.autograd.record():
for i, w in enumerate(x.list_data()):
y = i*w
y.backward()
assert (x.grad(mx.cpu(0)).asnumpy() != x.grad(mx.cpu(1)).asnumpy()).all()
trainer2.allreduce_grads()
assert (x.grad(mx.cpu(0)).asnumpy() == x.grad(mx.cpu(1)).asnumpy()).all()
trainer2.update(1)
assert (x.data(mx.cpu(1)).asnumpy() == -1).all(), x.data(mx.cpu(1)).asnumpy()
@with_seed()
def test_trainer_save_load():
previous_update_on_kvstore = os.getenv('MXNET_UPDATE_ON_KVSTORE', "1")
os.putenv('MXNET_UPDATE_ON_KVSTORE', '1')
x = gluon.Parameter('x', shape=(10,), lr_mult=1.0)
x.initialize(ctx=[mx.cpu(0), mx.cpu(1)], init='zeros')
trainer = gluon.Trainer([x], 'sgd', {'learning_rate': 0.1})
with mx.autograd.record():
for w in x.list_data():
y = w + 1
y.backward()
trainer.step(1)
assert trainer._kvstore._updater.optimizer._get_lr(0) == 0.1
trainer.save_states('test_trainer_save_load.states')
trainer.load_states('test_trainer_save_load.states')
x.lr_mult = 2.0
# check if parameter dict is correctly associated with optimizer after load_state
assert trainer._kvstore._updater.optimizer._get_lr(0) == 0.2
os.putenv('MXNET_UPDATE_ON_KVSTORE', previous_update_on_kvstore)
@with_seed()
def test_trainer_sparse_save_load():
x = gluon.Parameter('x', shape=(10, 1), lr_mult=1.0, stype='row_sparse')
x.initialize(ctx=[mx.cpu(0)], init='zeros')
trainer = gluon.Trainer([x], 'sgd', {'learning_rate': 0.1})
all_rows = mx.nd.arange(0, 10, ctx=mx.cpu(0))
with mx.autograd.record():
for w in x.list_row_sparse_data(all_rows):
y = w * 1
y.backward()
trainer.step(1)
assert trainer._kvstore._updater.optimizer._get_lr(0) == 0.1
trainer.save_states('test_trainer_sparse_save_load.states')
trainer.load_states('test_trainer_sparse_save_load.states')
x.lr_mult = 2.0
# check if parameter dict is correctly associated with optimizer after load_state
assert trainer._kvstore._updater.optimizer._get_lr(0) == 0.2
@with_seed()
def test_trainer_multi_layer_init():
class Net(gluon.Block):
def __init__(self, **kwargs):
super(Net, self).__init__(**kwargs)
with self.name_scope():
# sparse param
self.embed_weight = self.params.get('embed_weight', stype='row_sparse',
shape=(4,3), grad_stype='row_sparse')
# dense param from a hybrid block
self.dense0 = nn.Dense(2)
def forward(self, x):
embed_weight = self.embed_weight.row_sparse_data(x)
embed = mx.nd.Embedding(data=x, weight=embed_weight,
input_dim=4, output_dim=3, sparse_grad=True)
return self.dense0(embed)
def check_init(ctxes):
net = Net(prefix='net_')
net.initialize(mx.init.One(), ctx=ctxes)
trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 1})
data = mx.nd.array([[0,2], [1,2]])
xs = gluon.utils.split_and_load(data, ctxes)
ys = []
with mx.autograd.record():
for x in xs:
y = net(x)
ys.append(y)
for y in ys:
y.backward()
trainer.step(1)
# all parameters should be initialized
assert not trainer._params_to_init
all_rows = mx.nd.arange(0, 4, ctx=mx.cpu(1))
# check the updated weights
weight = net.embed_weight.row_sparse_data(all_rows).asnumpy()
assert (weight[0] == -1).all()
assert (weight[1] == -1).all()
assert (weight[2] == -3).all()
assert (weight[3] == 1).all()
check_init([mx.cpu(1), mx.cpu(2)])
check_init([mx.cpu(1)])
@with_seed()
def test_trainer_reset_kv():
def check_trainer_reset_kv(kv):
params = gluon.ParameterDict()
x = params.get('x', shape=(10,), lr_mult=1.0)
params.initialize(ctx=[mx.cpu(0), mx.cpu(1)], init='zeros')
trainer = gluon.Trainer(params, 'sgd', {'learning_rate': 0.1}, kvstore=kv)
params.save('test_trainer_reset_kv.params')
with mx.autograd.record():
for w in x.list_data():
y = w + 1
y.backward()
trainer.step(1)
assert trainer._kvstore.type == kv
# load would reset kvstore
mx.nd.waitall()
params.load('test_trainer_reset_kv.params')
if trainer._update_on_kvstore:
# drop kvstore state if new parameters are loaded
assert trainer._kvstore is None
assert trainer._kv_initialized is False
with mx.autograd.record():
for w in x.list_data():
y = w + 1
y.backward()
trainer.step(1)
# the updated parameter should be based on the loaded checkpoint
assert (x.data(mx.cpu()) == -0.2).asnumpy().all()
kvs = ['local', 'device']
for kv in kvs:
check_trainer_reset_kv(kv)
@with_seed()
def test_trainer_sparse_kv():
def check_trainer_sparse_kv(kv, stype, grad_stype, update_on_kv, expected):
params = gluon.ParameterDict()
x = params.get('x', shape=(10,1), lr_mult=1.0, stype=stype, grad_stype=grad_stype)
params.initialize(ctx=[mx.cpu(0), mx.cpu(1)], init='zeros')
trainer = gluon.Trainer(params, 'sgd', {'learning_rate': 0.1},
kvstore=kv, update_on_kvstore=update_on_kv)
all_rows = mx.nd.arange(0, 10, ctx=mx.cpu(0))
try:
ws = x.list_data() if stype == 'default' else x.list_row_sparse_data(all_rows)
with mx.autograd.record():
for w in ws:
y = w + 1
y.backward()
trainer.step(1)
assert trainer._kvstore.type == kv
assert trainer._kv_initialized
assert trainer._update_on_kvstore is expected
# the updated parameter should be based on the loaded checkpoint
mx.nd.waitall()
updated_w = x.data(mx.cpu(0)) if stype == 'default' else x.row_sparse_data(all_rows)
assert (updated_w == -0.2).asnumpy().all()
except Exception as err:
assert isinstance(err, expected)
kvs = ['local', 'device']
global_update_on_kvstore = bool(int(os.getenv('MXNET_UPDATE_ON_KVSTORE', "1")))
for kv in kvs:
check_trainer_sparse_kv(kv, 'default', 'default', True, True)
check_trainer_sparse_kv(kv, 'default', 'default', False, False)
check_trainer_sparse_kv(kv, 'default', 'default', None, global_update_on_kvstore)
check_trainer_sparse_kv(kv, 'default', 'row_sparse', None, False)
check_trainer_sparse_kv(kv, 'default', 'row_sparse', True, True)
check_trainer_sparse_kv(kv, 'default', 'row_sparse', False, False)
check_trainer_sparse_kv(kv, 'row_sparse', 'row_sparse', None, True)
check_trainer_sparse_kv(kv, 'row_sparse', 'row_sparse', False, ValueError)
@with_seed()
def test_trainer_lr_sched():
x = gluon.Parameter('x', shape=(10,))
x.initialize(ctx=[mx.cpu(0), mx.cpu(1)], init='zeros')
freq = 2
factor = 0.1
lr = 1
lr_sched = mx.lr_scheduler.FactorScheduler(freq, factor=factor, base_lr=lr)
trainer = gluon.Trainer([x], 'sgd', {'learning_rate': lr, 'lr_scheduler': lr_sched})
for i in range(10):
with mx.autograd.record():
for w in x.list_data():
y = w + 1
y.backward()
trainer.step(1)
if i % freq == 0:
assert trainer.learning_rate == lr, (lr, trainer.learning_rate, i)
lr *= factor
mx.nd.waitall()
# Update on kvstore = False
x = gluon.Parameter('x', shape=(10,))
x.initialize(ctx=[mx.cpu(0), mx.cpu(1)], init='zeros')
freq = 2
factor = 0.1
lr = 1
lr_sched = mx.lr_scheduler.FactorScheduler(freq, factor=factor, base_lr=lr)
trainer = gluon.Trainer([x], 'sgd', {'learning_rate': lr, 'lr_scheduler': lr_sched},
update_on_kvstore=False)
for i in range(10):
with mx.autograd.record():
for w in x.list_data():
y = w + 1
y.backward()
trainer.step(1)
if i % freq == 0:
assert trainer.learning_rate == lr, (lr, trainer.learning_rate, i)
lr *= factor
mx.nd.waitall()
@with_seed()
def test_gluon_trainer_param_order():
net = mx.gluon.nn.Sequential()
# layers may be added in a random order for all workers
layers = {'ones_': 1, 'zeros_': 0}
for name, init in layers.items():
net.add(mx.gluon.nn.Dense(10, in_units=10, weight_initializer=mx.init.Constant(init),
use_bias=False, prefix=name))
params = net.collect_params()
net.initialize()
trainer = gluon.Trainer(params, 'sgd')
for name, init in layers.items():
expected_idx = 0 if name == 'ones_' else 1
expected_name = name + 'weight'
assert trainer._params[expected_idx].name == expected_name
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.