gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
from __future__ import unicode_literals, division, absolute_import
from builtins import * # pylint: disable=unused-import, redefined-builtin
import logging
from sqlalchemy import Column, Integer, String, ForeignKey, or_, and_, select, update, func, Unicode
from sqlalchemy.orm.exc import NoResultFound, MultipleResultsFound
from flexget import db_schema, plugin
from flexget.entry import Entry
from flexget.event import event
from flexget.manager import Session
from flexget.utils import qualities
from flexget.utils.database import quality_requirement_property, with_session
from flexget.utils.imdb import extract_id
from flexget.utils.log import log_once
from flexget.utils.sqlalchemy_utils import table_exists, table_schema, table_add_column
try:
from flexget.plugins.filter import queue_base
except ImportError:
raise plugin.DependencyError(issued_by='movie_queue', missing='queue_base',
message='movie_queue requires the queue_base plugin')
log = logging.getLogger('movie_queue')
Base = db_schema.versioned_base('movie_queue', 4)
@event('manager.lock_acquired')
def migrate_imdb_queue(manager):
"""If imdb_queue table is found, migrate the data to movie_queue"""
session = Session()
try:
if table_exists('imdb_queue', session):
log.info('Migrating imdb_queue items to movie_queue')
old_table = table_schema('imdb_queue', session)
for row in session.execute(old_table.select()):
try:
queue_add(imdb_id=row['imdb_id'], quality=row['quality'], session=session)
except QueueError as e:
log.error('Unable to migrate %s from imdb_queue to movie_queue' % row['title'])
old_table.drop()
session.commit()
finally:
session.close()
@db_schema.upgrade('movie_queue')
def upgrade(ver, session):
if ver == 0:
# Translate old qualities into new quality requirements
movie_table = table_schema('movie_queue', session)
for row in session.execute(select([movie_table.c.id, movie_table.c.quality])):
# Webdl quality no longer has dash
new_qual = row['quality'].replace('web-dl', 'webdl')
if new_qual.lower() != 'any':
# Old behavior was to get specified quality or greater, approximate that with new system
new_qual = ' '.join(qual + '+' for qual in new_qual.split(' '))
session.execute(update(movie_table, movie_table.c.id == row['id'],
{'quality': new_qual}))
ver = 1
if ver == 1:
# Bad upgrade left some qualities as 'ANY+'
movie_table = table_schema('movie_queue', session)
for row in session.execute(select([movie_table.c.id, movie_table.c.quality])):
if row['quality'].lower() == 'any+':
session.execute(update(movie_table, movie_table.c.id == row['id'],
{'quality': 'ANY'}))
ver = 2
if ver == 2:
from flexget.utils.imdb import ImdbParser
# Corrupted movie titles may be in the queue due to imdb layout changes. GitHub #729
movie_table = table_schema('movie_queue', session)
queue_base_table = table_schema('queue', session)
query = select([movie_table.c.id, movie_table.c.imdb_id, queue_base_table.c.title])
query = query.where(movie_table.c.id == queue_base_table.c.id)
for row in session.execute(query):
if row['imdb_id'] and (not row['title'] or row['title'] == 'None' or '\n' in row['title']):
log.info('Fixing movie_queue title for %s' % row['imdb_id'])
parser = ImdbParser()
parser.parse(row['imdb_id'])
if parser.name:
session.execute(update(queue_base_table, queue_base_table.c.id == row['id'],
{'title': parser.name}))
ver = 3
if ver == 3:
# adding queue_name column to movie_queue table and setting initial value to default)
table_add_column('movie_queue', 'queue_name', Unicode, session, default='default')
ver = 4
return ver
class QueuedMovie(queue_base.QueuedItem, Base):
__tablename__ = 'movie_queue'
__mapper_args__ = {'polymorphic_identity': 'movie'}
id = Column(Integer, ForeignKey('queue.id'), primary_key=True)
imdb_id = Column(String)
tmdb_id = Column(Integer)
quality = Column('quality', String)
quality_req = quality_requirement_property('quality')
queue_name = Column(Unicode)
def to_dict(self):
return {
'added': self.added,
'is_downloaded': True if self.downloaded else False,
'download_date': self.downloaded if self.downloaded else None,
'entry_original_url': self.entry_original_url,
'entry_title': self.entry_title,
'entry_url': self.entry_url,
'id': self.id,
'imdb_id': self.imdb_id,
'tmdb_id': self.tmdb_id,
'quality': self.quality,
'title': self.title,
'queue_name:': self.queue_name
}
class MovieQueue(queue_base.FilterQueueBase):
schema = {
'oneOf': [
{'type': 'string', 'enum': ['accept', 'add', 'remove', 'forget']},
{
'type': 'object',
'properties': {
'action': {'type': 'string', 'enum': ['accept', 'add', 'remove', 'forget']},
'quality': {'type': 'string', 'format': 'quality_requirements'},
'queue_name': {'type': 'string'}
},
'required': ['action'],
'additionalProperties': False,
'deprecated': 'movie_queue plugin is deprecated. Please switch to using movie_list'
}
]
}
def matches(self, task, config, entry):
if not config:
return
if not isinstance(config, dict):
config = {'action': config}
# only the accept action is applied in the 'matches' section
if config.get('action') != 'accept':
return
queue_name = config.get('queue_name', 'default')
# Tell tmdb_lookup to add lazy lookup fields if not already present
try:
plugin.get_plugin_by_name('imdb_lookup').instance.register_lazy_fields(entry)
except plugin.DependencyError:
log.debug('imdb_lookup is not available, queue will not work if movie ids are not populated')
try:
plugin.get_plugin_by_name('tmdb_lookup').instance.lookup(entry)
except plugin.DependencyError:
log.debug('tmdb_lookup is not available, queue will not work if movie ids are not populated')
conditions = []
# Check if a movie id is already populated before incurring a lazy lookup
for lazy in [False, True]:
if entry.get('imdb_id', eval_lazy=lazy):
conditions.append(QueuedMovie.imdb_id == entry['imdb_id'])
if entry.get('tmdb_id', eval_lazy=lazy and not conditions):
conditions.append(QueuedMovie.tmdb_id == entry['tmdb_id'])
if conditions:
break
if not conditions:
log_once('IMDB and TMDB lookups failed for %s.' % entry['title'], log, logging.WARN)
return
quality = entry.get('quality', qualities.Quality())
movie = task.session.query(QueuedMovie).filter(QueuedMovie.downloaded == None).filter(
QueuedMovie.queue_name == queue_name).filter(or_(*conditions)).first()
if movie and movie.quality_req.allows(quality):
return movie
def on_task_output(self, task, config):
if not config:
return
if not isinstance(config, dict):
config = {'action': config}
config.setdefault('queue_name', 'default')
for entry in task.accepted:
# Tell tmdb_lookup to add lazy lookup fields if not already present
try:
plugin.get_plugin_by_name('tmdb_lookup').instance.lookup(entry)
except plugin.DependencyError:
log.debug('tmdb_lookup is not available, queue will not work if movie ids are not populated')
# Find one or both movie id's for this entry. See if an id is already populated before incurring lazy lookup
kwargs = {}
for lazy in [False, True]:
if entry.get('imdb_id', eval_lazy=lazy):
kwargs['imdb_id'] = entry['imdb_id']
if entry.get('tmdb_id', eval_lazy=lazy):
kwargs['tmdb_id'] = entry['tmdb_id']
if kwargs:
break
if not kwargs:
log.warning('Could not determine a movie id for %s, it will not be added to queue.' % entry['title'])
continue
# Provide movie title if it is already available, to avoid movie_queue doing a lookup
kwargs['title'] = (entry.get('imdb_name', eval_lazy=False) or
entry.get('tmdb_name', eval_lazy=False) or
entry.get('movie_name', eval_lazy=False))
log.debug('movie_queue kwargs: %s' % kwargs)
kwargs['queue_name'] = config.get('queue_name')
try:
action = config.get('action')
if action == 'add':
# since entries usually have unknown quality we need to ignore that ..
if entry.get('quality_req'):
kwargs['quality'] = qualities.Requirements(entry['quality_req'])
elif entry.get('quality'):
kwargs['quality'] = qualities.Requirements(entry['quality'].name)
else:
kwargs['quality'] = qualities.Requirements(config.get('quality', 'any'))
queue_add(**kwargs)
elif action == 'remove':
queue_del(**kwargs)
elif action == 'forget':
queue_forget(**kwargs)
except QueueError as e:
# Ignore already in queue errors
if e.errno != 1:
entry.fail('Error adding movie to queue: %s' % e.message)
class QueueError(Exception):
"""Exception raised if there is an error with a queue operation"""
# TODO: I think message was removed from exception baseclass and is now masked
# some other custom exception (DependencyError) had to make so tweaks to make it work ..
def __init__(self, message, errno=0):
self.message = message
self.errno = errno
@with_session
def parse_what(what, lookup=True, session=None):
"""
Determines what information was provided by the search string `what`.
If `lookup` is true, will fill in other information from tmdb.
:param what: Can be one of:
<Movie Title>: Search based on title
imdb_id=<IMDB id>: search based on imdb id
tmdb_id=<TMDB id>: search based on tmdb id
:param bool lookup: Whether missing info should be filled in from tmdb.
:param session: An existing session that will be used for lookups if provided.
:rtype: dict
:return: A dictionary with 'title', 'imdb_id' and 'tmdb_id' keys
"""
tmdb_lookup = plugin.get_plugin_by_name('api_tmdb').instance.lookup
result = {'title': None, 'imdb_id': None, 'tmdb_id': None}
result['imdb_id'] = extract_id(what)
if not result['imdb_id']:
if isinstance(what, int):
result['tmdb_id'] = what
elif what.startswith('tmdb_id='):
result['tmdb_id'] = what[8:]
else:
result['title'] = what
if not lookup:
# If not doing an online lookup we can return here
return result
search_entry = Entry(title=result['title'] or '')
for field in ['imdb_id', 'tmdb_id']:
if result.get(field):
search_entry[field] = result[field]
# Put lazy lookup fields on the search entry
plugin.get_plugin_by_name('imdb_lookup').instance.register_lazy_fields(search_entry)
plugin.get_plugin_by_name('tmdb_lookup').instance.lookup(search_entry)
try:
# Both ids are optional, but if movie_name was populated at least one of them will be there
return {'title': search_entry['movie_name'], 'imdb_id': search_entry.get('imdb_id'),
'tmdb_id': search_entry.get('tmdb_id')}
except KeyError as e:
raise QueueError(e.message)
# API functions to edit queue
@with_session
def queue_add(title=None, imdb_id=None, tmdb_id=None, quality=None, session=None, queue_name='default'):
"""
Add an item to the queue with the specified quality requirements.
One or more of `title` `imdb_id` or `tmdb_id` must be specified when calling this function.
:param title: Title of the movie. (optional)
:param imdb_id: IMDB id for the movie. (optional)
:param tmdb_id: TMDB id for the movie. (optional)
:param quality: A QualityRequirements object defining acceptable qualities.
:param queue_name: Name of movie queue to get items from
:param session: Optional session to use for database updates
"""
quality = quality or qualities.Requirements('any')
if not title or not (imdb_id or tmdb_id):
# We don't have all the info we need to add movie, do a lookup for more info
result = parse_what(imdb_id or title or tmdb_id, session=session)
title = result['title']
if not title:
raise QueueError('Could not parse movie info for given parameters: title=%s, imdb_id=%s, tmdb_id=%s' % (
title, imdb_id, tmdb_id))
imdb_id = result['imdb_id']
tmdb_id = result['tmdb_id']
# check if the item is already queued
item = session.query(QueuedMovie).filter(and_((func.lower(QueuedMovie.queue_name) == queue_name.lower()),
or_(and_(QueuedMovie.imdb_id != None, QueuedMovie.imdb_id == imdb_id),
and_(QueuedMovie.tmdb_id != None,
QueuedMovie.tmdb_id == tmdb_id)))).first()
if not item:
item = QueuedMovie(title=title, imdb_id=imdb_id, tmdb_id=tmdb_id, quality=quality.text, queue_name=queue_name)
session.add(item)
session.commit()
log.info('Adding %s to movie queue %s with quality=%s.', title, queue_name, quality)
return item.to_dict()
else:
if item.downloaded:
raise QueueError('ERROR: %s has already been queued and downloaded' % title, errno=1)
else:
raise QueueError('ERROR: %s is already in the queue %s' % (title, queue_name), errno=1)
@with_session
def queue_del(title=None, imdb_id=None, tmdb_id=None, session=None, movie_id=None, queue_name='default'):
"""
Delete the given item from the queue.
:param title: Movie title
:param imdb_id: Imdb id
:param tmdb_id: Tmdb id
:param session: Optional session to use, new session used otherwise
:param queue_name: Name of movie queue to get items from
:return: Title of forgotten movie
:raises QueueError: If queued item could not be found with given arguments
"""
log.debug('queue_del - title=%s, imdb_id=%s, tmdb_id=%s, movie_id=%s, queue_name=%s',
title, imdb_id, tmdb_id, movie_id, queue_name)
query = session.query(QueuedMovie).filter(func.lower(QueuedMovie.queue_name) == queue_name.lower())
if imdb_id:
query = query.filter(QueuedMovie.imdb_id == imdb_id)
elif tmdb_id:
query = query.filter(QueuedMovie.tmdb_id == tmdb_id)
elif title:
query = query.filter(func.lower(QueuedMovie.title) == func.lower(title))
elif movie_id:
query = query.filter(QueuedMovie.id == movie_id)
try:
item = query.one()
title = item.title
session.delete(item)
return title
except NoResultFound as e:
raise QueueError(
'title=%s, imdb_id=%s, tmdb_id=%s, movie_id=%s not found in queue %s' % (
title, imdb_id, tmdb_id, movie_id, queue_name))
except MultipleResultsFound:
raise QueueError('title=%s, imdb_id=%s, tmdb_id=%s, movie_id=%s matches multiple results in queue %s' %
(title, imdb_id, tmdb_id, movie_id, queue_name))
@with_session
def queue_clear(queue_name='default', session=None):
"""Deletes waiting movies from queue"""
results = queue_get(downloaded=False, queue_name=queue_name, session=session)
for res in results:
session.delete(res)
@with_session
def queue_forget(title=None, imdb_id=None, tmdb_id=None, session=None, movie_id=None, queue_name='default'):
"""
Forget movie download from the queue.
:param title: Movie title
:param imdb_id: Imdb id
:param tmdb_id: Tmdb id
:param session: Optional session to use, new session used otherwise
:param queue_name: Name of movie queue to get items from
:return: Title of forgotten movie
:raises QueueError: If queued item could not be found with given arguments
"""
log.debug('queue_forget - title=%s, imdb_id=%s, tmdb_id=%s, movie_id=%s, queue_name=%s', title, imdb_id, tmdb_id,
movie_id, queue_name)
query = session.query(QueuedMovie).filter(func.lower(QueuedMovie.queue_name) == queue_name.lower())
if imdb_id:
query = query.filter(QueuedMovie.imdb_id == imdb_id)
elif tmdb_id:
query = query.filter(QueuedMovie.tmdb_id == tmdb_id)
elif title:
query = query.filter(func.lower(QueuedMovie.title) == func.lower(title))
elif movie_id:
query = query.filter(QueuedMovie.id == movie_id)
try:
item = query.one()
title = item.title
if not item.downloaded:
raise QueueError(message=('%s is not marked as downloaded' % title), errno=1)
item.downloaded = None
return item.to_dict()
except NoResultFound as e:
raise QueueError(message=('title=%s, imdb_id=%s, tmdb_id=%s, movie_id=%s, queue_name=%s not found in queue' %
(title, imdb_id, tmdb_id, movie_id, queue_name)), errno=2)
@with_session
def queue_edit(quality, imdb_id=None, tmdb_id=None, session=None, movie_id=None, queue_name='default'):
"""
:param quality: Change the required quality for a movie in the queue
:param imdb_id: Imdb id
:param tmdb_id: Tmdb id
:param session: Optional session to use, new session used otherwise
:param queue_name: Name of movie queue to get items from
:return: Title of edited item
:raises QueueError: If queued item could not be found with given arguments
"""
# check if the item is queued
log.debug('queue_edit - quality=%s, imdb_id=%s, tmdb_id=%s, movie_id=%s, queue_name=%s', quality, imdb_id, tmdb_id,
movie_id, queue_name)
query = session.query(QueuedMovie).filter(func.lower(QueuedMovie.queue_name) == queue_name.lower())
if imdb_id:
query = session.query(QueuedMovie).filter(QueuedMovie.imdb_id == imdb_id)
elif tmdb_id:
query = session.query(QueuedMovie).filter(QueuedMovie.tmdb_id == tmdb_id)
elif movie_id:
query = session.query(QueuedMovie).filter(QueuedMovie.id == movie_id)
try:
item = query.one()
item.quality = quality
session.commit()
return item.to_dict()
except NoResultFound as e:
raise QueueError(
'imdb_id=%s, tmdb_id=%s, movie_id=%s not found in queue %s' % (imdb_id, tmdb_id, movie_id, queue_name))
@with_session
def queue_get(session=None, downloaded=None, queue_name='default'):
"""
Get the current movie queue.
:param session: New session is used it not given
:param bool downloaded: Whether or not to return only downloaded
:param queue_name: Name of movie queue to get items from
:return: List of QueuedMovie objects (detached from session)
"""
query = session.query(QueuedMovie).filter(func.lower(QueuedMovie.queue_name) == queue_name.lower())
if downloaded is False:
return query.filter(QueuedMovie.downloaded == None).all()
elif downloaded:
return query.filter(QueuedMovie.downloaded != None).all()
else:
return query.all()
@with_session
def get_movie_by_id(movie_id, session=None):
"""
Return movie item from movie_id
:param movie_id: ID of queued movie
:param session: Session
:return: Dict of movie details
"""
return session.query(QueuedMovie).filter(QueuedMovie.id == movie_id).one().to_dict()
@with_session
def delete_movie_by_id(movie_id, session=None):
"""
Deletes movie by its ID
:param movie_id: ID of queued movie
:param session: Session
"""
movie = session.query(QueuedMovie).filter(QueuedMovie.id == movie_id).one()
session.delete(movie)
@event('plugin.register')
def register_plugin():
plugin.register(MovieQueue, 'movie_queue', api_ver=2)
|
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from bambou import NURESTObject
class NUNSGInfo(NURESTObject):
""" Represents a NSGInfo in the VSD
Notes:
Device information coming from the NSG. That includes the hardware type of the NSG (CPU, memory, family), the version of BIOS, build information, and software application packages installed.
"""
__rest_name__ = "nsginfo"
__resource_name__ = "nsginfos"
## Constants
CONST_FAMILY_NSG_C = "NSG_C"
CONST_FAMILY_NSG_E = "NSG_E"
CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE"
CONST_BOOTSTRAP_STATUS_NOTIFICATION_APP_REQ_SENT = "NOTIFICATION_APP_REQ_SENT"
CONST_PERSONALITY_NSGDUC = "NSGDUC"
CONST_FAMILY_NSG_V = "NSG_V"
CONST_BOOTSTRAP_STATUS_ACTIVE = "ACTIVE"
CONST_CMD_STATUS_RUNNING = "RUNNING"
CONST_FAMILY_NSG_X = "NSG_X"
CONST_FAMILY_NSG_DOCKER = "NSG_DOCKER"
CONST_FAMILY_NSG_E200 = "NSG_E200"
CONST_CMD_STATUS_COMPLETED = "COMPLETED"
CONST_PERSONALITY_NSG = "NSG"
CONST_CMD_STATUS_FAILED = "FAILED"
CONST_BOOTSTRAP_STATUS_CERTIFICATE_SIGNED = "CERTIFICATE_SIGNED"
CONST_FAMILY_NSG_AZ = "NSG_AZ"
CONST_FAMILY_ANY = "ANY"
CONST_CMD_STATUS_SKIPPED = "SKIPPED"
CONST_BOOTSTRAP_STATUS_NOTIFICATION_APP_REQ_ACK = "NOTIFICATION_APP_REQ_ACK"
CONST_CMD_STATUS_UNKNOWN = "UNKNOWN"
CONST_FAMILY_NSG_X200 = "NSG_X200"
CONST_FAMILY_NSG_E300 = "NSG_E300"
CONST_CMD_TYPE_NSG_UPGRADE_TO_IMAGE = "NSG_UPGRADE_TO_IMAGE"
CONST_CMD_STATUS_STARTED = "STARTED"
CONST_FAMILY_NSG_AMI = "NSG_AMI"
CONST_CMD_STATUS_ABANDONED = "ABANDONED"
CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL"
CONST_CMD_TYPE_NSG_DOWNLOAD_OS_IMAGE = "NSG_DOWNLOAD_OS_IMAGE"
CONST_BOOTSTRAP_STATUS_INACTIVE = "INACTIVE"
CONST_PERSONALITY_NSGBR = "NSGBR"
def __init__(self, **kwargs):
""" Initializes a NSGInfo instance
Notes:
You can specify all parameters while calling this methods.
A special argument named `data` will enable you to load the
object from a Python dictionary
Examples:
>>> nsginfo = NUNSGInfo(id=u'xxxx-xxx-xxx-xxx', name=u'NSGInfo')
>>> nsginfo = NUNSGInfo(data=my_dict)
"""
super(NUNSGInfo, self).__init__()
# Read/Write Attributes
self._mac_address = None
self._ca_certificate = None
self._aar_application_release_date = None
self._aar_application_version = None
self._bios_release_date = None
self._bios_version = None
self._sku = None
self._tpm_status = None
self._tpm_version = None
self._cpu_core_allocation = None
self._cpu_type = None
self._nsg_version = None
self._uuid = None
self._name = None
self._family = None
self._patches_detail = None
self._serial_number = None
self._personality = None
self._certificate = None
self._libraries = None
self._cmd_detailed_status = None
self._cmd_detailed_status_code = None
self._cmd_download_progress = None
self._cmd_id = None
self._cmd_last_updated_date = None
self._cmd_status = None
self._cmd_type = None
self._enterprise_id = None
self._enterprise_name = None
self._entity_scope = None
self._bootstrap_status = None
self._product_name = None
self._associated_entity_type = None
self._associated_ns_gateway_id = None
self._huge_page_setting = None
self._external_id = None
self._system_id = None
self.expose_attribute(local_name="mac_address", remote_name="MACAddress", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="ca_certificate", remote_name="CACertificate", attribute_type=dict, is_required=False, is_unique=False)
self.expose_attribute(local_name="aar_application_release_date", remote_name="AARApplicationReleaseDate", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="aar_application_version", remote_name="AARApplicationVersion", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="bios_release_date", remote_name="BIOSReleaseDate", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="bios_version", remote_name="BIOSVersion", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="sku", remote_name="SKU", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="tpm_status", remote_name="TPMStatus", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="tpm_version", remote_name="TPMVersion", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="cpu_core_allocation", remote_name="CPUCoreAllocation", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="cpu_type", remote_name="CPUType", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="nsg_version", remote_name="NSGVersion", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="uuid", remote_name="UUID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="name", remote_name="name", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="family", remote_name="family", attribute_type=str, is_required=False, is_unique=False, choices=[u'ANY', u'NSG_AMI', u'NSG_AZ', u'NSG_C', u'NSG_DOCKER', u'NSG_E', u'NSG_E200', u'NSG_E300', u'NSG_V', u'NSG_X', u'NSG_X200'])
self.expose_attribute(local_name="patches_detail", remote_name="patchesDetail", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="serial_number", remote_name="serialNumber", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="personality", remote_name="personality", attribute_type=str, is_required=False, is_unique=False, choices=[u'NSG', u'NSGBR', u'NSGDUC'])
self.expose_attribute(local_name="certificate", remote_name="certificate", attribute_type=dict, is_required=False, is_unique=False)
self.expose_attribute(local_name="libraries", remote_name="libraries", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="cmd_detailed_status", remote_name="cmdDetailedStatus", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="cmd_detailed_status_code", remote_name="cmdDetailedStatusCode", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="cmd_download_progress", remote_name="cmdDownloadProgress", attribute_type=dict, is_required=False, is_unique=False)
self.expose_attribute(local_name="cmd_id", remote_name="cmdID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="cmd_last_updated_date", remote_name="cmdLastUpdatedDate", attribute_type=float, is_required=False, is_unique=False)
self.expose_attribute(local_name="cmd_status", remote_name="cmdStatus", attribute_type=str, is_required=False, is_unique=False, choices=[u'ABANDONED', u'COMPLETED', u'FAILED', u'RUNNING', u'SKIPPED', u'STARTED', u'UNKNOWN'])
self.expose_attribute(local_name="cmd_type", remote_name="cmdType", attribute_type=str, is_required=False, is_unique=False, choices=[u'NSG_DOWNLOAD_OS_IMAGE', u'NSG_UPGRADE_TO_IMAGE'])
self.expose_attribute(local_name="enterprise_id", remote_name="enterpriseID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="enterprise_name", remote_name="enterpriseName", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL'])
self.expose_attribute(local_name="bootstrap_status", remote_name="bootstrapStatus", attribute_type=str, is_required=False, is_unique=False, choices=[u'ACTIVE', u'CERTIFICATE_SIGNED', u'INACTIVE', u'NOTIFICATION_APP_REQ_ACK', u'NOTIFICATION_APP_REQ_SENT'])
self.expose_attribute(local_name="product_name", remote_name="productName", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="associated_entity_type", remote_name="associatedEntityType", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="associated_ns_gateway_id", remote_name="associatedNSGatewayID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="huge_page_setting", remote_name="hugePageSetting", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True)
self.expose_attribute(local_name="system_id", remote_name="systemID", attribute_type=str, is_required=False, is_unique=False)
self._compute_args(**kwargs)
# Properties
@property
def mac_address(self):
""" Get mac_address value.
Notes:
A comma separated list of MAC Addresses associated to the NSG's interfaces (eg, port1, port2, port3).
This attribute is named `MACAddress` in VSD API.
"""
return self._mac_address
@mac_address.setter
def mac_address(self, value):
""" Set mac_address value.
Notes:
A comma separated list of MAC Addresses associated to the NSG's interfaces (eg, port1, port2, port3).
This attribute is named `MACAddress` in VSD API.
"""
self._mac_address = value
@property
def ca_certificate(self):
""" Get ca_certificate value.
Notes:
The certficate metadata of NSG's certificate authority.
This attribute is named `CACertificate` in VSD API.
"""
return self._ca_certificate
@ca_certificate.setter
def ca_certificate(self, value):
""" Set ca_certificate value.
Notes:
The certficate metadata of NSG's certificate authority.
This attribute is named `CACertificate` in VSD API.
"""
self._ca_certificate = value
@property
def aar_application_release_date(self):
""" Get aar_application_release_date value.
Notes:
Release Date of the AAR Application
This attribute is named `AARApplicationReleaseDate` in VSD API.
"""
return self._aar_application_release_date
@aar_application_release_date.setter
def aar_application_release_date(self, value):
""" Set aar_application_release_date value.
Notes:
Release Date of the AAR Application
This attribute is named `AARApplicationReleaseDate` in VSD API.
"""
self._aar_application_release_date = value
@property
def aar_application_version(self):
""" Get aar_application_version value.
Notes:
The AAR Application Version
This attribute is named `AARApplicationVersion` in VSD API.
"""
return self._aar_application_version
@aar_application_version.setter
def aar_application_version(self, value):
""" Set aar_application_version value.
Notes:
The AAR Application Version
This attribute is named `AARApplicationVersion` in VSD API.
"""
self._aar_application_version = value
@property
def bios_release_date(self):
""" Get bios_release_date value.
Notes:
Release Date of the NSG BiOS
This attribute is named `BIOSReleaseDate` in VSD API.
"""
return self._bios_release_date
@bios_release_date.setter
def bios_release_date(self, value):
""" Set bios_release_date value.
Notes:
Release Date of the NSG BiOS
This attribute is named `BIOSReleaseDate` in VSD API.
"""
self._bios_release_date = value
@property
def bios_version(self):
""" Get bios_version value.
Notes:
NSG BIOS Version as received from the NSG during bootstrap or a reboot. If the information exeeds 255 characters, the extra characters will be truncated.
This attribute is named `BIOSVersion` in VSD API.
"""
return self._bios_version
@bios_version.setter
def bios_version(self, value):
""" Set bios_version value.
Notes:
NSG BIOS Version as received from the NSG during bootstrap or a reboot. If the information exeeds 255 characters, the extra characters will be truncated.
This attribute is named `BIOSVersion` in VSD API.
"""
self._bios_version = value
@property
def sku(self):
""" Get sku value.
Notes:
The part number of the NSG
This attribute is named `SKU` in VSD API.
"""
return self._sku
@sku.setter
def sku(self, value):
""" Set sku value.
Notes:
The part number of the NSG
This attribute is named `SKU` in VSD API.
"""
self._sku = value
@property
def tpm_status(self):
""" Get tpm_status value.
Notes:
TPM status code as reported by the NSG during bootstrapping. This informate indicates if TPM is being used in securing the private key/certificate of an NSG. Possible values are 0(Unknown), 1(Enabled_Not_Operational), 2(Enabled_Operational), 3(Disabled).
This attribute is named `TPMStatus` in VSD API.
"""
return self._tpm_status
@tpm_status.setter
def tpm_status(self, value):
""" Set tpm_status value.
Notes:
TPM status code as reported by the NSG during bootstrapping. This informate indicates if TPM is being used in securing the private key/certificate of an NSG. Possible values are 0(Unknown), 1(Enabled_Not_Operational), 2(Enabled_Operational), 3(Disabled).
This attribute is named `TPMStatus` in VSD API.
"""
self._tpm_status = value
@property
def tpm_version(self):
""" Get tpm_version value.
Notes:
TPM (Trusted Platform Module) version as reported by the NSG.
This attribute is named `TPMVersion` in VSD API.
"""
return self._tpm_version
@tpm_version.setter
def tpm_version(self, value):
""" Set tpm_version value.
Notes:
TPM (Trusted Platform Module) version as reported by the NSG.
This attribute is named `TPMVersion` in VSD API.
"""
self._tpm_version = value
@property
def cpu_core_allocation(self):
""" Get cpu_core_allocation value.
Notes:
Current CPU allocation for network accelerated gateways. Displays total number of cores and those isolated.
This attribute is named `CPUCoreAllocation` in VSD API.
"""
return self._cpu_core_allocation
@cpu_core_allocation.setter
def cpu_core_allocation(self, value):
""" Set cpu_core_allocation value.
Notes:
Current CPU allocation for network accelerated gateways. Displays total number of cores and those isolated.
This attribute is named `CPUCoreAllocation` in VSD API.
"""
self._cpu_core_allocation = value
@property
def cpu_type(self):
""" Get cpu_type value.
Notes:
The NSG Processor Type based on information extracted during bootstrapping. This may refer to a type of processor manufactured by Intel, ARM, AMD, Cyrix, VIA, or others.
This attribute is named `CPUType` in VSD API.
"""
return self._cpu_type
@cpu_type.setter
def cpu_type(self, value):
""" Set cpu_type value.
Notes:
The NSG Processor Type based on information extracted during bootstrapping. This may refer to a type of processor manufactured by Intel, ARM, AMD, Cyrix, VIA, or others.
This attribute is named `CPUType` in VSD API.
"""
self._cpu_type = value
@property
def nsg_version(self):
""" Get nsg_version value.
Notes:
The NSG Version as reported during a bootstrap or a reboot of the NSG.
This attribute is named `NSGVersion` in VSD API.
"""
return self._nsg_version
@nsg_version.setter
def nsg_version(self, value):
""" Set nsg_version value.
Notes:
The NSG Version as reported during a bootstrap or a reboot of the NSG.
This attribute is named `NSGVersion` in VSD API.
"""
self._nsg_version = value
@property
def uuid(self):
""" Get uuid value.
Notes:
The Redhat/CentOS UUID of the NSG
This attribute is named `UUID` in VSD API.
"""
return self._uuid
@uuid.setter
def uuid(self, value):
""" Set uuid value.
Notes:
The Redhat/CentOS UUID of the NSG
This attribute is named `UUID` in VSD API.
"""
self._uuid = value
@property
def name(self):
""" Get name value.
Notes:
Name of the Gateway.
"""
return self._name
@name.setter
def name(self, value):
""" Set name value.
Notes:
Name of the Gateway.
"""
self._name = value
@property
def family(self):
""" Get family value.
Notes:
The NSG Family type as it was returned by the NSG during bootstrapping.
"""
return self._family
@family.setter
def family(self, value):
""" Set family value.
Notes:
The NSG Family type as it was returned by the NSG during bootstrapping.
"""
self._family = value
@property
def patches_detail(self):
""" Get patches_detail value.
Notes:
Base64 Encoded JSON String of the extra details pertaining to each successfully installed patch
This attribute is named `patchesDetail` in VSD API.
"""
return self._patches_detail
@patches_detail.setter
def patches_detail(self, value):
""" Set patches_detail value.
Notes:
Base64 Encoded JSON String of the extra details pertaining to each successfully installed patch
This attribute is named `patchesDetail` in VSD API.
"""
self._patches_detail = value
@property
def serial_number(self):
""" Get serial_number value.
Notes:
The NSG's serial number as it is stored in the system's CMOS (Motherboard)
This attribute is named `serialNumber` in VSD API.
"""
return self._serial_number
@serial_number.setter
def serial_number(self, value):
""" Set serial_number value.
Notes:
The NSG's serial number as it is stored in the system's CMOS (Motherboard)
This attribute is named `serialNumber` in VSD API.
"""
self._serial_number = value
@property
def personality(self):
""" Get personality value.
Notes:
Personality of the Gateway.
"""
return self._personality
@personality.setter
def personality(self, value):
""" Set personality value.
Notes:
Personality of the Gateway.
"""
self._personality = value
@property
def certificate(self):
""" Get certificate value.
Notes:
The certificate metadata of NSG's current certficate.
"""
return self._certificate
@certificate.setter
def certificate(self, value):
""" Set certificate value.
Notes:
The certificate metadata of NSG's current certficate.
"""
self._certificate = value
@property
def libraries(self):
""" Get libraries value.
Notes:
Tracks RPM package installed for some libraries installed on the NSG.
"""
return self._libraries
@libraries.setter
def libraries(self, value):
""" Set libraries value.
Notes:
Tracks RPM package installed for some libraries installed on the NSG.
"""
self._libraries = value
@property
def cmd_detailed_status(self):
""" Get cmd_detailed_status value.
Notes:
Detailed status of the current running or last run command.
This attribute is named `cmdDetailedStatus` in VSD API.
"""
return self._cmd_detailed_status
@cmd_detailed_status.setter
def cmd_detailed_status(self, value):
""" Set cmd_detailed_status value.
Notes:
Detailed status of the current running or last run command.
This attribute is named `cmdDetailedStatus` in VSD API.
"""
self._cmd_detailed_status = value
@property
def cmd_detailed_status_code(self):
""" Get cmd_detailed_status_code value.
Notes:
Numerical value representing the code mapping to detailed status of the current or last command operation.
This attribute is named `cmdDetailedStatusCode` in VSD API.
"""
return self._cmd_detailed_status_code
@cmd_detailed_status_code.setter
def cmd_detailed_status_code(self, value):
""" Set cmd_detailed_status_code value.
Notes:
Numerical value representing the code mapping to detailed status of the current or last command operation.
This attribute is named `cmdDetailedStatusCode` in VSD API.
"""
self._cmd_detailed_status_code = value
@property
def cmd_download_progress(self):
""" Get cmd_download_progress value.
Notes:
DownloadProgress object representing the progress of Gateway image download.
This attribute is named `cmdDownloadProgress` in VSD API.
"""
return self._cmd_download_progress
@cmd_download_progress.setter
def cmd_download_progress(self, value):
""" Set cmd_download_progress value.
Notes:
DownloadProgress object representing the progress of Gateway image download.
This attribute is named `cmdDownloadProgress` in VSD API.
"""
self._cmd_download_progress = value
@property
def cmd_id(self):
""" Get cmd_id value.
Notes:
Identifier of the running or last Command.
This attribute is named `cmdID` in VSD API.
"""
return self._cmd_id
@cmd_id.setter
def cmd_id(self, value):
""" Set cmd_id value.
Notes:
Identifier of the running or last Command.
This attribute is named `cmdID` in VSD API.
"""
self._cmd_id = value
@property
def cmd_last_updated_date(self):
""" Get cmd_last_updated_date value.
Notes:
Time stamp when the command was last updated.
This attribute is named `cmdLastUpdatedDate` in VSD API.
"""
return self._cmd_last_updated_date
@cmd_last_updated_date.setter
def cmd_last_updated_date(self, value):
""" Set cmd_last_updated_date value.
Notes:
Time stamp when the command was last updated.
This attribute is named `cmdLastUpdatedDate` in VSD API.
"""
self._cmd_last_updated_date = value
@property
def cmd_status(self):
""" Get cmd_status value.
Notes:
Status of the current or last command.
This attribute is named `cmdStatus` in VSD API.
"""
return self._cmd_status
@cmd_status.setter
def cmd_status(self, value):
""" Set cmd_status value.
Notes:
Status of the current or last command.
This attribute is named `cmdStatus` in VSD API.
"""
self._cmd_status = value
@property
def cmd_type(self):
""" Get cmd_type value.
Notes:
Specifies the type of command that is stated for execution on the system. A request for download or a request for upgrade.
This attribute is named `cmdType` in VSD API.
"""
return self._cmd_type
@cmd_type.setter
def cmd_type(self, value):
""" Set cmd_type value.
Notes:
Specifies the type of command that is stated for execution on the system. A request for download or a request for upgrade.
This attribute is named `cmdType` in VSD API.
"""
self._cmd_type = value
@property
def enterprise_id(self):
""" Get enterprise_id value.
Notes:
The enterprise associated with this Gateway.
This attribute is named `enterpriseID` in VSD API.
"""
return self._enterprise_id
@enterprise_id.setter
def enterprise_id(self, value):
""" Set enterprise_id value.
Notes:
The enterprise associated with this Gateway.
This attribute is named `enterpriseID` in VSD API.
"""
self._enterprise_id = value
@property
def enterprise_name(self):
""" Get enterprise_name value.
Notes:
Name of the Enterprise associated with this Gateway.
This attribute is named `enterpriseName` in VSD API.
"""
return self._enterprise_name
@enterprise_name.setter
def enterprise_name(self, value):
""" Set enterprise_name value.
Notes:
Name of the Enterprise associated with this Gateway.
This attribute is named `enterpriseName` in VSD API.
"""
self._enterprise_name = value
@property
def entity_scope(self):
""" Get entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
return self._entity_scope
@entity_scope.setter
def entity_scope(self, value):
""" Set entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
self._entity_scope = value
@property
def bootstrap_status(self):
""" Get bootstrap_status value.
Notes:
The bootstrap status of the NSG from which the infomation was collected.
This attribute is named `bootstrapStatus` in VSD API.
"""
return self._bootstrap_status
@bootstrap_status.setter
def bootstrap_status(self, value):
""" Set bootstrap_status value.
Notes:
The bootstrap status of the NSG from which the infomation was collected.
This attribute is named `bootstrapStatus` in VSD API.
"""
self._bootstrap_status = value
@property
def product_name(self):
""" Get product_name value.
Notes:
NSG Product Name as reported when the device bootstraps.
This attribute is named `productName` in VSD API.
"""
return self._product_name
@product_name.setter
def product_name(self, value):
""" Set product_name value.
Notes:
NSG Product Name as reported when the device bootstraps.
This attribute is named `productName` in VSD API.
"""
self._product_name = value
@property
def associated_entity_type(self):
""" Get associated_entity_type value.
Notes:
Object type of the associated entity.
This attribute is named `associatedEntityType` in VSD API.
"""
return self._associated_entity_type
@associated_entity_type.setter
def associated_entity_type(self, value):
""" Set associated_entity_type value.
Notes:
Object type of the associated entity.
This attribute is named `associatedEntityType` in VSD API.
"""
self._associated_entity_type = value
@property
def associated_ns_gateway_id(self):
""" Get associated_ns_gateway_id value.
Notes:
The ID of the NSG from which the infomation was collected.
This attribute is named `associatedNSGatewayID` in VSD API.
"""
return self._associated_ns_gateway_id
@associated_ns_gateway_id.setter
def associated_ns_gateway_id(self, value):
""" Set associated_ns_gateway_id value.
Notes:
The ID of the NSG from which the infomation was collected.
This attribute is named `associatedNSGatewayID` in VSD API.
"""
self._associated_ns_gateway_id = value
@property
def huge_page_setting(self):
""" Get huge_page_setting value.
Notes:
The size and number of huge pages for an NSG that is running in network accelerated mode. Hugepage values states the portion of memory reserved for network accelerated services.
This attribute is named `hugePageSetting` in VSD API.
"""
return self._huge_page_setting
@huge_page_setting.setter
def huge_page_setting(self, value):
""" Set huge_page_setting value.
Notes:
The size and number of huge pages for an NSG that is running in network accelerated mode. Hugepage values states the portion of memory reserved for network accelerated services.
This attribute is named `hugePageSetting` in VSD API.
"""
self._huge_page_setting = value
@property
def external_id(self):
""" Get external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
return self._external_id
@external_id.setter
def external_id(self, value):
""" Set external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
self._external_id = value
@property
def system_id(self):
""" Get system_id value.
Notes:
System identifier of the Gateway.
This attribute is named `systemID` in VSD API.
"""
return self._system_id
@system_id.setter
def system_id(self, value):
""" Set system_id value.
Notes:
System identifier of the Gateway.
This attribute is named `systemID` in VSD API.
"""
self._system_id = value
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import copy
import datetime
import json
import time
import mock
import mox
from oslo_config import cfg
import six
import testtools
from heat.common import context
from heat.common import exception
from heat.common import template_format
from heat.common import timeutils
from heat.db import api as db_api
from heat.engine.clients.os import keystone
from heat.engine.clients.os import nova
from heat.engine import environment
from heat.engine import function
from heat.engine import resource
from heat.engine import scheduler
from heat.engine import stack
from heat.engine import template
from heat.objects import raw_template as raw_template_object
from heat.objects import stack as stack_object
from heat.objects import stack_tag as stack_tag_object
from heat.objects import user_creds as ucreds_object
from heat.tests import common
from heat.tests import fakes
from heat.tests import generic_resource as generic_rsrc
from heat.tests import utils
empty_template = template_format.parse('''{
"HeatTemplateFormatVersion" : "2012-12-12",
}''')
class StackTest(common.HeatTestCase):
def setUp(self):
super(StackTest, self).setUp()
self.tmpl = template.Template(copy.deepcopy(empty_template))
self.ctx = utils.dummy_context()
def test_stack_reads_tenant(self):
self.stack = stack.Stack(self.ctx, 'test_stack', self.tmpl,
tenant_id='bar')
self.assertEqual('bar', self.stack.tenant_id)
def test_stack_reads_tenant_from_context_if_empty(self):
self.ctx.tenant_id = 'foo'
self.stack = stack.Stack(self.ctx, 'test_stack', self.tmpl,
tenant_id=None)
self.assertEqual('foo', self.stack.tenant_id)
def test_stack_reads_username(self):
self.stack = stack.Stack(self.ctx, 'test_stack', self.tmpl,
username='bar')
self.assertEqual('bar', self.stack.username)
def test_stack_reads_username_from_context_if_empty(self):
self.ctx.username = 'foo'
self.stack = stack.Stack(self.ctx, 'test_stack', self.tmpl,
username=None)
self.assertEqual('foo', self.stack.username)
def test_stack_string_repr(self):
self.stack = stack.Stack(self.ctx, 'test_stack', self.tmpl)
expected = 'Stack "%s" [%s]' % (self.stack.name, self.stack.id)
observed = str(self.stack)
self.assertEqual(expected, observed)
def test_state_defaults(self):
self.stack = stack.Stack(self.ctx, 'test_stack', self.tmpl)
self.assertEqual(('CREATE', 'IN_PROGRESS'), self.stack.state)
self.assertEqual('', self.stack.status_reason)
def test_timeout_secs_default(self):
cfg.CONF.set_override('stack_action_timeout', 1000)
self.stack = stack.Stack(self.ctx, 'test_stack', self.tmpl)
self.assertIsNone(self.stack.timeout_mins)
self.assertEqual(1000, self.stack.timeout_secs())
def test_timeout_secs(self):
self.stack = stack.Stack(self.ctx, 'test_stack', self.tmpl,
timeout_mins=10)
self.assertEqual(600, self.stack.timeout_secs())
@mock.patch.object(stack, 'datetime')
def test_time_elapsed(self, mock_dt):
self.stack = stack.Stack(self.ctx, 'test_stack', self.tmpl)
# dummy create time 10:00:00
self.stack.created_time = datetime.datetime(2015, 7, 27, 10, 0, 0)
# mock utcnow set to 10:10:00 (600s offset)
mock_dt.datetime.utcnow.return_value = datetime.datetime(2015, 7, 27,
10, 10, 0)
self.assertEqual(600, self.stack.time_elapsed())
@mock.patch.object(stack, 'datetime')
def test_time_elapsed_with_updated_time(self, mock_dt):
self.stack = stack.Stack(self.ctx, 'test_stack', self.tmpl)
# dummy create time 10:00:00
self.stack.created_time = datetime.datetime(2015, 7, 27, 10, 0, 0)
# dummy updated time 11:00:00; should consider this not created_time
self.stack.updated_time = datetime.datetime(2015, 7, 27, 11, 0, 0)
# mock utcnow set to 11:10:00 (600s offset)
mock_dt.datetime.utcnow.return_value = datetime.datetime(2015, 7, 27,
11, 10, 0)
self.assertEqual(600, self.stack.time_elapsed())
@mock.patch.object(stack.Stack, 'time_elapsed')
def test_time_remaining(self, mock_te):
self.stack = stack.Stack(self.ctx, 'test_stack', self.tmpl)
# mock time elapsed; set to 600 seconds
mock_te.return_value = 600
# default stack timeout is 3600 seconds; remaining time 3000 secs
self.assertEqual(3000, self.stack.time_remaining())
@mock.patch.object(stack.Stack, 'time_elapsed')
def test_has_timed_out(self, mock_te):
self.stack = stack.Stack(self.ctx, 'test_stack', self.tmpl)
self.stack.status = self.stack.IN_PROGRESS
# test with timed out stack
mock_te.return_value = 3601
# default stack timeout is 3600 seconds; stack should time out
self.assertTrue(self.stack.has_timed_out())
# mock time elapsed; set to 600 seconds
mock_te.return_value = 600
# default stack timeout is 3600 seconds; remaining time 3000 secs
self.assertFalse(self.stack.has_timed_out())
# has_timed_out has no meaning when stack completes/fails;
# should return false
self.stack.status = self.stack.COMPLETE
self.assertFalse(self.stack.has_timed_out())
self.stack.status = self.stack.FAILED
self.assertFalse(self.stack.has_timed_out())
def test_no_auth_token(self):
ctx = utils.dummy_context()
ctx.auth_token = None
self.stub_auth()
self.m.ReplayAll()
self.stack = stack.Stack(ctx, 'test_stack', self.tmpl)
self.assertEqual('abcd1234',
self.stack.clients.client('keystone').auth_token)
self.m.VerifyAll()
def test_state(self):
self.stack = stack.Stack(self.ctx, 'test_stack', self.tmpl,
action=stack.Stack.CREATE,
status=stack.Stack.IN_PROGRESS)
self.assertEqual((stack.Stack.CREATE, stack.Stack.IN_PROGRESS),
self.stack.state)
self.stack.state_set(stack.Stack.CREATE, stack.Stack.COMPLETE, 'test')
self.assertEqual((stack.Stack.CREATE, stack.Stack.COMPLETE),
self.stack.state)
self.stack.state_set(stack.Stack.DELETE, stack.Stack.COMPLETE, 'test')
self.assertEqual((stack.Stack.DELETE, stack.Stack.COMPLETE),
self.stack.state)
def test_state_deleted(self):
self.stack = stack.Stack(self.ctx, 'test_stack', self.tmpl,
action=stack.Stack.CREATE,
status=stack.Stack.IN_PROGRESS)
self.stack.id = '1234'
# Simulate a deleted stack
self.m.StubOutWithMock(stack_object.Stack, 'get_by_id')
stack_object.Stack.get_by_id(self.stack.context,
self.stack.id).AndReturn(None)
self.m.ReplayAll()
self.assertIsNone(self.stack.state_set(stack.Stack.CREATE,
stack.Stack.COMPLETE,
'test'))
self.m.VerifyAll()
def test_state_bad(self):
self.stack = stack.Stack(self.ctx, 'test_stack', self.tmpl,
action=stack.Stack.CREATE,
status=stack.Stack.IN_PROGRESS)
self.assertEqual((stack.Stack.CREATE, stack.Stack.IN_PROGRESS),
self.stack.state)
self.assertRaises(ValueError, self.stack.state_set,
'baad', stack.Stack.COMPLETE, 'test')
self.assertRaises(ValueError, self.stack.state_set,
stack.Stack.CREATE, 'oops', 'test')
def test_status_reason(self):
self.stack = stack.Stack(self.ctx, 'test_stack', self.tmpl,
status_reason='quux')
self.assertEqual('quux', self.stack.status_reason)
self.stack.state_set(stack.Stack.CREATE, stack.Stack.IN_PROGRESS,
'wibble')
self.assertEqual('wibble', self.stack.status_reason)
def test_load_nonexistant_id(self):
self.assertRaises(exception.NotFound, stack.Stack.load,
None, -1)
def test_total_resources_empty(self):
self.stack = stack.Stack(self.ctx, 'test_stack', self.tmpl,
status_reason='flimflam')
self.stack.store()
self.assertEqual(0, self.stack.total_resources(self.stack.id))
self.assertEqual(0, self.stack.total_resources())
def test_total_resources_not_found(self):
self.stack = stack.Stack(self.ctx, 'test_stack', self.tmpl,
status_reason='flimflam')
self.assertEqual(0, self.stack.total_resources('1234'))
@mock.patch.object(db_api, 'stack_count_total_resources')
def test_total_resources_generic(self, sctr):
tpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources':
{'A': {'Type': 'GenericResourceType'}}}
self.stack = stack.Stack(self.ctx, 'test_stack',
template.Template(tpl),
status_reason='blarg')
self.stack.store()
sctr.return_value = 1
self.assertEqual(1, self.stack.total_resources(self.stack.id))
self.assertEqual(1, self.stack.total_resources())
def test_iter_resources(self):
tpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources':
{'A': {'Type': 'StackResourceType'},
'B': {'Type': 'GenericResourceType'}}}
self.stack = stack.Stack(self.ctx, 'test_stack',
template.Template(tpl),
status_reason='blarg')
def get_more(nested_depth=0):
yield 'X'
yield 'Y'
yield 'Z'
self.stack['A'].nested = mock.MagicMock()
self.stack['A'].nested.return_value.iter_resources = mock.MagicMock(
side_effect=get_more)
resource_generator = self.stack.iter_resources()
self.assertIsNot(resource_generator, list)
first_level_resources = list(resource_generator)
self.assertEqual(2, len(first_level_resources))
all_resources = list(self.stack.iter_resources(1))
self.assertEqual(5, len(all_resources))
@mock.patch.object(stack.Stack, 'db_resource_get')
def test_iter_resources_cached(self, mock_drg):
tpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources':
{'A': {'Type': 'StackResourceType'},
'B': {'Type': 'GenericResourceType'}}}
cache_data = {'A': {'reference_id': 'A-id'},
'B': {'reference_id': 'B-id'}}
self.stack = stack.Stack(self.ctx, 'test_stack',
template.Template(tpl),
status_reason='blarg',
cache_data=cache_data)
def get_more(nested_depth=0):
yield 'X'
yield 'Y'
yield 'Z'
self.stack['A'].nested = mock.MagicMock()
self.stack['A'].nested.return_value.iter_resources = mock.MagicMock(
side_effect=get_more)
resource_generator = self.stack.iter_resources()
self.assertIsNot(resource_generator, list)
first_level_resources = list(resource_generator)
self.assertEqual(2, len(first_level_resources))
all_resources = list(self.stack.iter_resources(1))
self.assertEqual(5, len(all_resources))
# A cache supplied means we should never query the database.
self.assertFalse(mock_drg.called)
def test_load_parent_resource(self):
self.stack = stack.Stack(self.ctx, 'load_parent_resource', self.tmpl,
parent_resource='parent')
self.stack.store()
stk = stack_object.Stack.get_by_id(self.ctx, self.stack.id)
t = template.Template.load(self.ctx, stk.raw_template_id)
self.m.StubOutWithMock(template.Template, 'load')
template.Template.load(
self.ctx, stk.raw_template_id, stk.raw_template
).AndReturn(t)
self.m.StubOutWithMock(stack.Stack, '__init__')
stack.Stack.__init__(self.ctx, stk.name, t, stack_id=stk.id,
action=stk.action, status=stk.status,
status_reason=stk.status_reason,
timeout_mins=stk.timeout, resolve_data=True,
disable_rollback=stk.disable_rollback,
parent_resource='parent', owner_id=None,
stack_user_project_id=None,
created_time=mox.IgnoreArg(),
updated_time=None,
user_creds_id=stk.user_creds_id,
tenant_id='test_tenant_id',
use_stored_context=False,
username=mox.IgnoreArg(),
convergence=False,
current_traversal=None,
tags=mox.IgnoreArg(),
prev_raw_template_id=None,
current_deps=None, cache_data=None)
self.m.ReplayAll()
stack.Stack.load(self.ctx, stack_id=self.stack.id)
self.m.VerifyAll()
def test_identifier(self):
self.stack = stack.Stack(self.ctx, 'identifier_test', self.tmpl)
self.stack.store()
identifier = self.stack.identifier()
self.assertEqual(self.stack.tenant_id, identifier.tenant)
self.assertEqual('identifier_test', identifier.stack_name)
self.assertTrue(identifier.stack_id)
self.assertFalse(identifier.path)
def test_get_stack_abandon_data(self):
tpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Parameters': {'param1': {'Type': 'String'}},
'Resources':
{'A': {'Type': 'GenericResourceType'},
'B': {'Type': 'GenericResourceType'}}}
resources = '''{"A": {"status": "COMPLETE", "name": "A",
"resource_data": {}, "resource_id": null, "action": "INIT",
"type": "GenericResourceType", "metadata": {}},
"B": {"status": "COMPLETE", "name": "B", "resource_data": {},
"resource_id": null, "action": "INIT", "type": "GenericResourceType",
"metadata": {}}}'''
env = environment.Environment({'parameters': {'param1': 'test'}})
self.stack = stack.Stack(self.ctx, 'stack_details_test',
template.Template(tpl, env=env),
tenant_id='123',
stack_user_project_id='234')
self.stack.store()
info = self.stack.prepare_abandon()
self.assertEqual('CREATE', info['action'])
self.assertIn('id', info)
self.assertEqual('stack_details_test', info['name'])
self.assertEqual(json.loads(resources), info['resources'])
self.assertEqual('IN_PROGRESS', info['status'])
self.assertEqual(tpl, info['template'])
self.assertEqual('123', info['project_id'])
self.assertEqual('234', info['stack_user_project_id'])
self.assertEqual(env.params, info['environment']['parameters'])
def test_set_param_id(self):
self.stack = stack.Stack(self.ctx, 'param_arn_test', self.tmpl)
exp_prefix = ('arn:openstack:heat::test_tenant_id'
':stacks/param_arn_test/')
self.assertEqual(self.stack.parameters['AWS::StackId'],
exp_prefix + 'None')
self.stack.store()
identifier = self.stack.identifier()
self.assertEqual(exp_prefix + self.stack.id,
self.stack.parameters['AWS::StackId'])
self.assertEqual(self.stack.parameters['AWS::StackId'],
identifier.arn())
self.m.VerifyAll()
def test_set_param_id_update(self):
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'AResource': {'Type': 'ResourceWithPropsType',
'Metadata': {'Bar': {'Ref': 'AWS::StackId'}},
'Properties': {'Foo': 'abc'}}}}
self.stack = stack.Stack(self.ctx, 'update_stack_arn_test',
template.Template(tmpl))
self.stack.store()
self.stack.create()
self.assertEqual((stack.Stack.CREATE, stack.Stack.COMPLETE),
self.stack.state)
stack_arn = self.stack.parameters['AWS::StackId']
tmpl2 = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'AResource': {'Type': 'ResourceWithPropsType',
'Metadata': {'Bar':
{'Ref': 'AWS::StackId'}},
'Properties': {'Foo': 'xyz'}}}}
updated_stack = stack.Stack(self.ctx, 'updated_stack',
template.Template(tmpl2))
self.stack.update(updated_stack)
self.assertEqual((stack.Stack.UPDATE, stack.Stack.COMPLETE),
self.stack.state)
self.assertEqual('xyz', self.stack['AResource'].properties['Foo'])
self.assertEqual(
stack_arn, self.stack['AResource'].metadata_get()['Bar'])
def test_load_param_id(self):
self.stack = stack.Stack(self.ctx, 'param_load_arn_test', self.tmpl)
self.stack.store()
identifier = self.stack.identifier()
self.assertEqual(self.stack.parameters['AWS::StackId'],
identifier.arn())
newstack = stack.Stack.load(self.ctx, stack_id=self.stack.id)
self.assertEqual(identifier.arn(), newstack.parameters['AWS::StackId'])
def test_load_reads_tenant_id(self):
self.ctx.tenant_id = 'foobar'
self.stack = stack.Stack(self.ctx, 'stack_name', self.tmpl)
self.stack.store()
stack_id = self.stack.id
self.ctx.tenant_id = None
self.stack = stack.Stack.load(self.ctx, stack_id=stack_id)
self.assertEqual('foobar', self.stack.tenant_id)
def test_load_reads_username_from_db(self):
self.ctx.username = 'foobar'
self.stack = stack.Stack(self.ctx, 'stack_name', self.tmpl)
self.stack.store()
stack_id = self.stack.id
self.ctx.username = None
stk = stack.Stack.load(self.ctx, stack_id=stack_id)
self.assertEqual('foobar', stk.username)
self.ctx.username = 'not foobar'
stk = stack.Stack.load(self.ctx, stack_id=stack_id)
self.assertEqual('foobar', stk.username)
def test_load_all(self):
stack1 = stack.Stack(self.ctx, 'stack1', self.tmpl)
stack1.store()
stack2 = stack.Stack(self.ctx, 'stack2', self.tmpl)
stack2.store()
stacks = list(stack.Stack.load_all(self.ctx))
self.assertEqual(2, len(stacks))
# Add another, nested, stack
stack3 = stack.Stack(self.ctx, 'stack3', self.tmpl,
owner_id=stack2.id)
stack3.store()
# Should still be 2 without show_nested
stacks = list(stack.Stack.load_all(self.ctx))
self.assertEqual(2, len(stacks))
stacks = list(stack.Stack.load_all(self.ctx, show_nested=True))
self.assertEqual(3, len(stacks))
# A backup stack should not be returned
stack1._backup_stack()
stacks = list(stack.Stack.load_all(self.ctx))
self.assertEqual(2, len(stacks))
stacks = list(stack.Stack.load_all(self.ctx, show_nested=True))
self.assertEqual(3, len(stacks))
def test_created_time(self):
self.stack = stack.Stack(self.ctx, 'creation_time_test', self.tmpl)
self.assertIsNone(self.stack.created_time)
self.stack.store()
self.assertIsNotNone(self.stack.created_time)
def test_updated_time(self):
self.stack = stack.Stack(self.ctx, 'updated_time_test',
self.tmpl)
self.assertIsNone(self.stack.updated_time)
self.stack.store()
self.stack.create()
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {'R1': {'Type': 'GenericResourceType'}}}
newstack = stack.Stack(self.ctx, 'updated_time_test',
template.Template(tmpl))
self.stack.update(newstack)
self.assertIsNotNone(self.stack.updated_time)
def test_access_policy_update(self):
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'R1': {'Type': 'GenericResourceType'},
'Policy': {
'Type': 'OS::Heat::AccessPolicy',
'Properties': {
'AllowedResources': ['R1']
}}}}
self.stack = stack.Stack(self.ctx, 'update_stack_access_policy_test',
template.Template(tmpl))
self.stack.store()
self.stack.create()
self.assertEqual((stack.Stack.CREATE, stack.Stack.COMPLETE),
self.stack.state)
tmpl2 = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'R1': {'Type': 'GenericResourceType'},
'R2': {'Type': 'GenericResourceType'},
'Policy': {
'Type': 'OS::Heat::AccessPolicy',
'Properties': {
'AllowedResources': ['R1', 'R2'],
}}}}
updated_stack = stack.Stack(self.ctx, 'updated_stack',
template.Template(tmpl2))
self.stack.update(updated_stack)
self.assertEqual((stack.Stack.UPDATE, stack.Stack.COMPLETE),
self.stack.state)
def test_abandon_nodelete_project(self):
self.stack = stack.Stack(self.ctx, 'delete_trust', self.tmpl)
stack_id = self.stack.store()
self.stack.set_stack_user_project_id(project_id='aproject456')
db_s = stack_object.Stack.get_by_id(self.ctx, stack_id)
self.assertIsNotNone(db_s)
self.stack.delete(abandon=True)
db_s = stack_object.Stack.get_by_id(self.ctx, stack_id)
self.assertIsNone(db_s)
self.assertEqual((stack.Stack.DELETE, stack.Stack.COMPLETE),
self.stack.state)
def test_suspend_resume(self):
self.m.ReplayAll()
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {'AResource': {'Type': 'GenericResourceType'}}}
self.stack = stack.Stack(self.ctx, 'suspend_test',
template.Template(tmpl))
self.stack.store()
self.stack.create()
self.assertEqual((self.stack.CREATE, self.stack.COMPLETE),
self.stack.state)
self.assertIsNone(self.stack.updated_time)
self.stack.suspend()
self.assertEqual((self.stack.SUSPEND, self.stack.COMPLETE),
self.stack.state)
stack_suspend_time = self.stack.updated_time
self.assertIsNotNone(stack_suspend_time)
self.stack.resume()
self.assertEqual((self.stack.RESUME, self.stack.COMPLETE),
self.stack.state)
self.assertNotEqual(stack_suspend_time, self.stack.updated_time)
self.m.VerifyAll()
def test_suspend_stack_suspended_ok(self):
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {'AResource': {'Type': 'GenericResourceType'}}}
self.stack = stack.Stack(self.ctx, 'suspend_test',
template.Template(tmpl))
self.stack.store()
self.stack.create()
self.assertEqual((self.stack.CREATE, self.stack.COMPLETE),
self.stack.state)
self.stack.suspend()
self.assertEqual((self.stack.SUSPEND, self.stack.COMPLETE),
self.stack.state)
# unexpected to call Resource.suspend
self.m.StubOutWithMock(generic_rsrc.GenericResource, 'suspend')
self.m.ReplayAll()
self.stack.suspend()
self.assertEqual((self.stack.SUSPEND, self.stack.COMPLETE),
self.stack.state)
self.m.VerifyAll()
def test_resume_stack_resumeed_ok(self):
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {'AResource': {'Type': 'GenericResourceType'}}}
self.stack = stack.Stack(self.ctx, 'suspend_test',
template.Template(tmpl))
self.stack.store()
self.stack.create()
self.assertEqual((self.stack.CREATE, self.stack.COMPLETE),
self.stack.state)
self.stack.suspend()
self.assertEqual((self.stack.SUSPEND, self.stack.COMPLETE),
self.stack.state)
self.stack.resume()
self.assertEqual((self.stack.RESUME, self.stack.COMPLETE),
self.stack.state)
# unexpected to call Resource.resume
self.m.StubOutWithMock(generic_rsrc.GenericResource, 'resume')
self.m.ReplayAll()
self.stack.resume()
self.assertEqual((self.stack.RESUME, self.stack.COMPLETE),
self.stack.state)
self.m.VerifyAll()
def test_suspend_fail(self):
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {'AResource': {'Type': 'GenericResourceType'}}}
self.m.StubOutWithMock(generic_rsrc.GenericResource, 'handle_suspend')
exc = Exception('foo')
generic_rsrc.GenericResource.handle_suspend().AndRaise(exc)
self.m.ReplayAll()
self.stack = stack.Stack(self.ctx, 'suspend_test_fail',
template.Template(tmpl))
self.stack.store()
self.stack.create()
self.assertEqual((self.stack.CREATE, self.stack.COMPLETE),
self.stack.state)
self.stack.suspend()
self.assertEqual((self.stack.SUSPEND, self.stack.FAILED),
self.stack.state)
self.assertEqual('Resource SUSPEND failed: Exception: '
'resources.AResource: foo',
self.stack.status_reason)
self.m.VerifyAll()
def test_resume_fail(self):
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {'AResource': {'Type': 'GenericResourceType'}}}
self.m.StubOutWithMock(generic_rsrc.GenericResource, 'handle_resume')
generic_rsrc.GenericResource.handle_resume().AndRaise(Exception('foo'))
self.m.ReplayAll()
self.stack = stack.Stack(self.ctx, 'resume_test_fail',
template.Template(tmpl))
self.stack.store()
self.stack.create()
self.assertEqual((self.stack.CREATE, self.stack.COMPLETE),
self.stack.state)
self.stack.suspend()
self.assertEqual((self.stack.SUSPEND, self.stack.COMPLETE),
self.stack.state)
self.stack.resume()
self.assertEqual((self.stack.RESUME, self.stack.FAILED),
self.stack.state)
self.assertEqual('Resource RESUME failed: Exception: '
'resources.AResource: foo',
self.stack.status_reason)
self.m.VerifyAll()
def test_suspend_timeout(self):
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {'AResource': {'Type': 'GenericResourceType'}}}
self.m.StubOutWithMock(generic_rsrc.GenericResource, 'handle_suspend')
exc = scheduler.Timeout('foo', 0)
generic_rsrc.GenericResource.handle_suspend().AndRaise(exc)
self.m.ReplayAll()
self.stack = stack.Stack(self.ctx, 'suspend_test_fail_timeout',
template.Template(tmpl))
self.stack.store()
self.stack.create()
self.assertEqual((self.stack.CREATE, self.stack.COMPLETE),
self.stack.state)
self.stack.suspend()
self.assertEqual((self.stack.SUSPEND, self.stack.FAILED),
self.stack.state)
self.assertEqual('Suspend timed out', self.stack.status_reason)
self.m.VerifyAll()
def test_resume_timeout(self):
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {'AResource': {'Type': 'GenericResourceType'}}}
self.m.StubOutWithMock(generic_rsrc.GenericResource, 'handle_resume')
exc = scheduler.Timeout('foo', 0)
generic_rsrc.GenericResource.handle_resume().AndRaise(exc)
self.m.ReplayAll()
self.stack = stack.Stack(self.ctx, 'resume_test_fail_timeout',
template.Template(tmpl))
self.stack.store()
self.stack.create()
self.assertEqual((self.stack.CREATE, self.stack.COMPLETE),
self.stack.state)
self.stack.suspend()
self.assertEqual((self.stack.SUSPEND, self.stack.COMPLETE),
self.stack.state)
self.stack.resume()
self.assertEqual((self.stack.RESUME, self.stack.FAILED),
self.stack.state)
self.assertEqual('Resume timed out', self.stack.status_reason)
self.m.VerifyAll()
def _get_stack_to_check(self, name):
tpl = {"HeatTemplateFormatVersion": "2012-12-12",
"Resources": {
"A": {"Type": "GenericResourceType"},
"B": {"Type": "GenericResourceType"}}}
self.stack = stack.Stack(self.ctx, name, template.Template(tpl),
status_reason=name)
self.stack.store()
def _mock_check(res):
res.handle_check = mock.Mock()
[_mock_check(res) for res in six.itervalues(self.stack.resources)]
return self.stack
def test_check_supported(self):
stack1 = self._get_stack_to_check('check-supported')
stack1.check()
self.assertEqual(stack1.COMPLETE, stack1.status)
self.assertEqual(stack1.CHECK, stack1.action)
[self.assertTrue(res.handle_check.called)
for res in six.itervalues(stack1.resources)]
self.assertNotIn('not fully supported', stack1.status_reason)
def test_check_not_supported(self):
stack1 = self._get_stack_to_check('check-not-supported')
del stack1['B'].handle_check
stack1.check()
self.assertEqual(stack1.COMPLETE, stack1.status)
self.assertEqual(stack1.CHECK, stack1.action)
self.assertTrue(stack1['A'].handle_check.called)
self.assertIn('not fully supported', stack1.status_reason)
def test_check_fail(self):
stk = self._get_stack_to_check('check-fail')
stk['A'].handle_check.side_effect = Exception('fail-A')
stk['B'].handle_check.side_effect = Exception('fail-B')
stk.check()
self.assertEqual(stk.FAILED, stk.status)
self.assertEqual(stk.CHECK, stk.action)
self.assertTrue(stk['A'].handle_check.called)
self.assertTrue(stk['B'].handle_check.called)
self.assertIn('fail-A', stk.status_reason)
self.assertIn('fail-B', stk.status_reason)
def test_adopt_stack(self):
adopt_data = '''{
"action": "CREATE",
"status": "COMPLETE",
"name": "my-test-stack-name",
"resources": {
"AResource": {
"status": "COMPLETE",
"name": "AResource",
"resource_data": {},
"metadata": {},
"resource_id": "test-res-id",
"action": "CREATE",
"type": "GenericResourceType"
}
}
}'''
tmpl = {
'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {'AResource': {'Type': 'GenericResourceType'}},
'Outputs': {'TestOutput': {'Value': {
'Fn::GetAtt': ['AResource', 'Foo']}}
}
}
self.stack = stack.Stack(utils.dummy_context(), 'test_stack',
template.Template(tmpl),
adopt_stack_data=json.loads(adopt_data))
self.stack.store()
self.stack.adopt()
res = self.stack['AResource']
self.assertEqual(u'test-res-id', res.resource_id)
self.assertEqual('AResource', res.name)
self.assertEqual('COMPLETE', res.status)
self.assertEqual('ADOPT', res.action)
self.assertEqual((self.stack.ADOPT, self.stack.COMPLETE),
self.stack.state)
self.assertEqual('AResource', self.stack.output('TestOutput'))
loaded_stack = stack.Stack.load(self.ctx, self.stack.id)
self.assertEqual({}, loaded_stack['AResource']._stored_properties_data)
def test_adopt_stack_fails(self):
adopt_data = '''{
"action": "CREATE",
"status": "COMPLETE",
"name": "my-test-stack-name",
"resources": {}
}'''
tmpl = template.Template({
'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'foo': {'Type': 'GenericResourceType'},
}
})
self.stack = stack.Stack(utils.dummy_context(), 'test_stack',
tmpl,
adopt_stack_data=json.loads(adopt_data))
self.stack.store()
self.stack.adopt()
self.assertEqual((self.stack.ADOPT, self.stack.FAILED),
self.stack.state)
expected = ('Resource ADOPT failed: Exception: resources.foo: '
'Resource ID was not provided.')
self.assertEqual(expected, self.stack.status_reason)
def test_adopt_stack_rollback(self):
adopt_data = '''{
"name": "my-test-stack-name",
"resources": {}
}'''
tmpl = template.Template({
'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'foo': {'Type': 'GenericResourceType'},
}
})
self.stack = stack.Stack(utils.dummy_context(),
'test_stack',
tmpl,
disable_rollback=False,
adopt_stack_data=json.loads(adopt_data))
self.stack.store()
with mock.patch.object(self.stack, 'delete',
side_effect=self.stack.delete) as mock_delete:
self.stack.adopt()
self.assertEqual((self.stack.ROLLBACK, self.stack.COMPLETE),
self.stack.state)
mock_delete.assert_called_once_with(action=self.stack.ROLLBACK,
abandon=True)
def test_resource_by_refid(self):
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {'AResource': {'Type': 'GenericResourceType'}}}
self.stack = stack.Stack(self.ctx, 'resource_by_refid_stack',
template.Template(tmpl))
self.stack.store()
self.stack.create()
self.assertEqual((stack.Stack.CREATE, stack.Stack.COMPLETE),
self.stack.state)
self.assertIn('AResource', self.stack)
rsrc = self.stack['AResource']
rsrc.resource_id_set('aaaa')
self.assertIsNotNone(resource)
for action, status in (
(rsrc.INIT, rsrc.COMPLETE),
(rsrc.CREATE, rsrc.IN_PROGRESS),
(rsrc.CREATE, rsrc.COMPLETE),
(rsrc.RESUME, rsrc.IN_PROGRESS),
(rsrc.RESUME, rsrc.COMPLETE),
(rsrc.UPDATE, rsrc.IN_PROGRESS),
(rsrc.UPDATE, rsrc.COMPLETE)):
rsrc.state_set(action, status)
self.assertEqual(rsrc, self.stack.resource_by_refid('aaaa'))
rsrc.state_set(rsrc.DELETE, rsrc.IN_PROGRESS)
try:
self.assertIsNone(self.stack.resource_by_refid('aaaa'))
self.assertIsNone(self.stack.resource_by_refid('bbbb'))
# if there is cached data, we should ignore the state
self.stack.cache_data = {'AResource': {'reference_id': 'aaaa'}}
self.assertEqual(rsrc, self.stack.resource_by_refid('aaaa'))
finally:
rsrc.state_set(rsrc.CREATE, rsrc.COMPLETE)
def test_create_failure_recovery(self):
'''
assertion:
check that rollback still works with dynamic metadata
this test fails the second instance
'''
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'AResource': {'Type': 'OverwrittenFnGetRefIdType',
'Properties': {'Foo': 'abc'}},
'BResource': {'Type': 'ResourceWithPropsType',
'Properties': {
'Foo': {'Ref': 'AResource'}}}}}
self.stack = stack.Stack(self.ctx, 'update_test_stack',
template.Template(tmpl),
disable_rollback=True)
self.m.StubOutWithMock(generic_rsrc.ResourceWithFnGetRefIdType,
'handle_create')
self.m.StubOutWithMock(generic_rsrc.ResourceWithFnGetRefIdType,
'handle_delete')
# create
generic_rsrc.ResourceWithFnGetRefIdType.handle_create().AndRaise(
Exception)
# update
generic_rsrc.ResourceWithFnGetRefIdType.handle_delete()
generic_rsrc.ResourceWithFnGetRefIdType.handle_create()
self.m.ReplayAll()
self.stack.store()
self.stack.create()
self.assertEqual((stack.Stack.CREATE, stack.Stack.FAILED),
self.stack.state)
self.assertEqual('abc', self.stack['AResource'].properties['Foo'])
updated_stack = stack.Stack(self.ctx, 'updated_stack',
template.Template(tmpl),
disable_rollback=True)
self.stack.update(updated_stack)
self.assertEqual((stack.Stack.UPDATE, stack.Stack.COMPLETE),
self.stack.state)
self.assertEqual('abc', self.stack['AResource'].properties['Foo'])
self.assertEqual('ID-AResource',
self.stack['BResource'].properties['Foo'])
self.m.VerifyAll()
def test_create_bad_attribute(self):
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'AResource': {'Type': 'GenericResourceType'},
'BResource': {'Type': 'ResourceWithPropsType',
'Properties': {
'Foo': {'Fn::GetAtt': ['AResource',
'Foo']}}}}}
self.stack = stack.Stack(self.ctx, 'bad_attr_test_stack',
template.Template(tmpl),
disable_rollback=True)
self.m.StubOutWithMock(generic_rsrc.ResourceWithProps,
'_update_stored_properties')
generic_rsrc.ResourceWithProps._update_stored_properties().AndRaise(
exception.InvalidTemplateAttribute(resource='a', key='foo'))
self.m.ReplayAll()
self.stack.store()
self.stack.create()
self.assertEqual((stack.Stack.CREATE, stack.Stack.FAILED),
self.stack.state)
self.assertEqual('Resource CREATE failed: The Referenced Attribute '
'(a foo) is incorrect.', self.stack.status_reason)
self.m.VerifyAll()
@testtools.skipIf(six.PY3, "skipped until review 193726 is merged")
def test_stack_create_timeout(self):
self.m.StubOutWithMock(scheduler.DependencyTaskGroup, '__call__')
self.m.StubOutWithMock(timeutils, 'wallclock')
stk = stack.Stack(self.ctx, 's', self.tmpl)
def dummy_task():
while True:
yield
start_time = time.time()
timeutils.wallclock().AndReturn(start_time)
timeutils.wallclock().AndReturn(start_time + 1)
scheduler.DependencyTaskGroup.__call__().AndReturn(dummy_task())
timeutils.wallclock().AndReturn(start_time + stk.timeout_secs() + 1)
self.m.ReplayAll()
stk.create()
self.assertEqual((stack.Stack.CREATE, stack.Stack.FAILED), stk.state)
self.assertEqual('Create timed out', stk.status_reason)
self.m.VerifyAll()
def test_stack_name_valid(self):
stk = stack.Stack(self.ctx, 's', self.tmpl)
self.assertIsInstance(stk, stack.Stack)
stk = stack.Stack(self.ctx, 'stack123', self.tmpl)
self.assertIsInstance(stk, stack.Stack)
stk = stack.Stack(self.ctx, 'test.stack', self.tmpl)
self.assertIsInstance(stk, stack.Stack)
stk = stack.Stack(self.ctx, 'test_stack', self.tmpl)
self.assertIsInstance(stk, stack.Stack)
stk = stack.Stack(self.ctx, 'TEST', self.tmpl)
self.assertIsInstance(stk, stack.Stack)
stk = stack.Stack(self.ctx, 'test-stack', self.tmpl)
self.assertIsInstance(stk, stack.Stack)
def test_stack_name_invalid(self):
stack_names = ['_foo', '1bad', '.kcats', 'test stack', ' teststack',
'^-^', '"stack"', '1234', 'cat|dog', '$(foo)',
'test/stack', 'test\stack', 'test::stack', 'test;stack',
'test~stack', '#test']
for stack_name in stack_names:
self.assertRaises(exception.StackValidationFailed, stack.Stack,
self.ctx, stack_name, self.tmpl)
def test_resource_state_get_att(self):
tmpl = {
'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {'AResource': {'Type': 'GenericResourceType'}},
'Outputs': {'TestOutput': {'Value': {
'Fn::GetAtt': ['AResource', 'Foo']}}
}
}
self.stack = stack.Stack(self.ctx, 'resource_state_get_att',
template.Template(tmpl))
self.stack.store()
self.stack.create()
self.assertEqual((stack.Stack.CREATE, stack.Stack.COMPLETE),
self.stack.state)
self.assertIn('AResource', self.stack)
rsrc = self.stack['AResource']
rsrc.resource_id_set('aaaa')
self.assertEqual('AResource', rsrc.FnGetAtt('Foo'))
for action, status in (
(rsrc.CREATE, rsrc.IN_PROGRESS),
(rsrc.CREATE, rsrc.COMPLETE),
(rsrc.CREATE, rsrc.FAILED),
(rsrc.SUSPEND, rsrc.IN_PROGRESS),
(rsrc.SUSPEND, rsrc.COMPLETE),
(rsrc.RESUME, rsrc.IN_PROGRESS),
(rsrc.RESUME, rsrc.COMPLETE),
(rsrc.UPDATE, rsrc.IN_PROGRESS),
(rsrc.UPDATE, rsrc.FAILED),
(rsrc.UPDATE, rsrc.COMPLETE)):
rsrc.state_set(action, status)
self.assertEqual('AResource', self.stack.output('TestOutput'))
for action, status in (
(rsrc.DELETE, rsrc.IN_PROGRESS),
(rsrc.DELETE, rsrc.FAILED),
(rsrc.DELETE, rsrc.COMPLETE)):
rsrc.state_set(action, status)
self.assertIsNone(self.stack.output('TestOutput'))
def test_resource_required_by(self):
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {'AResource': {'Type': 'GenericResourceType'},
'BResource': {'Type': 'GenericResourceType',
'DependsOn': 'AResource'},
'CResource': {'Type': 'GenericResourceType',
'DependsOn': 'BResource'},
'DResource': {'Type': 'GenericResourceType',
'DependsOn': 'BResource'}}}
self.stack = stack.Stack(self.ctx, 'depends_test_stack',
template.Template(tmpl))
self.stack.store()
self.stack.create()
self.assertEqual((stack.Stack.CREATE, stack.Stack.COMPLETE),
self.stack.state)
self.assertEqual(['BResource'],
self.stack['AResource'].required_by())
self.assertEqual([],
self.stack['CResource'].required_by())
required_by = self.stack['BResource'].required_by()
self.assertEqual(2, len(required_by))
for r in ['CResource', 'DResource']:
self.assertIn(r, required_by)
def test_resource_multi_required_by(self):
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {'AResource': {'Type': 'GenericResourceType'},
'BResource': {'Type': 'GenericResourceType'},
'CResource': {'Type': 'GenericResourceType'},
'DResource': {'Type': 'GenericResourceType',
'DependsOn': ['AResource',
'BResource',
'CResource']}}}
self.stack = stack.Stack(self.ctx, 'depends_test_stack',
template.Template(tmpl))
self.stack.store()
self.stack.create()
self.assertEqual((stack.Stack.CREATE, stack.Stack.COMPLETE),
self.stack.state)
for r in ['AResource', 'BResource', 'CResource']:
self.assertEqual(['DResource'],
self.stack[r].required_by())
def test_store_saves_owner(self):
"""
The owner_id attribute of Store is saved to the database when stored.
"""
self.stack = stack.Stack(self.ctx, 'owner_stack', self.tmpl)
stack_ownee = stack.Stack(self.ctx, 'ownee_stack', self.tmpl,
owner_id=self.stack.id)
stack_ownee.store()
db_stack = stack_object.Stack.get_by_id(self.ctx, stack_ownee.id)
self.assertEqual(self.stack.id, db_stack.owner_id)
def test_init_user_creds_id(self):
ctx_init = utils.dummy_context(user='my_user',
password='my_pass')
ctx_init.request_id = self.ctx.request_id
creds = ucreds_object.UserCreds.create(ctx_init)
self.stack = stack.Stack(self.ctx, 'creds_init', self.tmpl,
user_creds_id=creds.id)
self.stack.store()
self.assertEqual(creds.id, self.stack.user_creds_id)
ctx_expected = ctx_init.to_dict()
ctx_expected['auth_token'] = None
self.assertEqual(ctx_expected, self.stack.stored_context().to_dict())
def test_load_reads_tags(self):
self.stack = stack.Stack(self.ctx, 'stack_tags', self.tmpl)
self.stack.store()
stack_id = self.stack.id
test_stack = stack.Stack.load(self.ctx, stack_id=stack_id)
self.assertIsNone(test_stack.tags)
self.stack = stack.Stack(self.ctx, 'stack_name', self.tmpl,
tags=['tag1', 'tag2'])
self.stack.store()
stack_id = self.stack.id
test_stack = stack.Stack.load(self.ctx, stack_id=stack_id)
self.assertEqual(['tag1', 'tag2'], test_stack.tags)
def test_store_saves_tags(self):
self.stack = stack.Stack(self.ctx, 'tags_stack', self.tmpl)
self.stack.store()
db_tags = stack_tag_object.StackTagList.get(self.stack.context,
self.stack.id)
self.assertIsNone(db_tags)
self.stack = stack.Stack(self.ctx, 'tags_stack', self.tmpl,
tags=['tag1', 'tag2'])
self.stack.store()
db_tags = stack_tag_object.StackTagList.get(self.stack.context,
self.stack.id)
self.assertEqual('tag1', db_tags[0].tag)
self.assertEqual('tag2', db_tags[1].tag)
def test_store_saves_creds(self):
"""
A user_creds entry is created on first stack store
"""
cfg.CONF.set_default('deferred_auth_method', 'password')
self.stack = stack.Stack(self.ctx, 'creds_stack', self.tmpl)
self.stack.store()
# The store should've created a user_creds row and set user_creds_id
db_stack = stack_object.Stack.get_by_id(self.ctx, self.stack.id)
user_creds_id = db_stack.user_creds_id
self.assertIsNotNone(user_creds_id)
# should've stored the username/password in the context
user_creds = ucreds_object.UserCreds.get_by_id(user_creds_id)
self.assertEqual(self.ctx.username, user_creds.get('username'))
self.assertEqual(self.ctx.password, user_creds.get('password'))
self.assertIsNone(user_creds.get('trust_id'))
self.assertIsNone(user_creds.get('trustor_user_id'))
# Check the stored_context is as expected
expected_context = context.RequestContext.from_dict(self.ctx.to_dict())
expected_context.auth_token = None
stored_context = self.stack.stored_context().to_dict()
self.assertEqual(expected_context.to_dict(), stored_context)
# Store again, ID should not change
self.stack.store()
self.assertEqual(user_creds_id, db_stack.user_creds_id)
def test_store_saves_creds_trust(self):
"""
A user_creds entry is created on first stack store
"""
cfg.CONF.set_override('deferred_auth_method', 'trusts')
self.m.StubOutWithMock(keystone.KeystoneClientPlugin, '_create')
keystone.KeystoneClientPlugin._create().AndReturn(
fakes.FakeKeystoneClient(user_id='auser123'))
self.m.ReplayAll()
self.stack = stack.Stack(self.ctx, 'creds_stack', self.tmpl)
self.stack.store()
# The store should've created a user_creds row and set user_creds_id
db_stack = stack_object.Stack.get_by_id(self.ctx, self.stack.id)
user_creds_id = db_stack.user_creds_id
self.assertIsNotNone(user_creds_id)
# should've stored the trust_id and trustor_user_id returned from
# FakeKeystoneClient.create_trust_context, username/password should
# not have been stored
user_creds = ucreds_object.UserCreds.get_by_id(user_creds_id)
self.assertIsNone(user_creds.get('username'))
self.assertIsNone(user_creds.get('password'))
self.assertEqual('atrust', user_creds.get('trust_id'))
self.assertEqual('auser123', user_creds.get('trustor_user_id'))
# Check the stored_context is as expected
expected_context = context.RequestContext(
trust_id='atrust', trustor_user_id='auser123',
request_id=self.ctx.request_id, is_admin=False).to_dict()
stored_context = self.stack.stored_context().to_dict()
self.assertEqual(expected_context, stored_context)
# Store again, ID should not change
self.stack.store()
self.assertEqual(user_creds_id, db_stack.user_creds_id)
def test_backup_copies_user_creds_id(self):
ctx_init = utils.dummy_context(user='my_user',
password='my_pass')
ctx_init.request_id = self.ctx.request_id
creds = ucreds_object.UserCreds.create(ctx_init)
self.stack = stack.Stack(self.ctx, 'creds_init', self.tmpl,
user_creds_id=creds.id)
self.stack.store()
self.assertEqual(creds.id, self.stack.user_creds_id)
backup = self.stack._backup_stack()
self.assertEqual(creds.id, backup.user_creds_id)
def test_stored_context_err(self):
"""
Test stored_context error path.
"""
self.stack = stack.Stack(self.ctx, 'creds_stack', self.tmpl)
ex = self.assertRaises(exception.Error, self.stack.stored_context)
expected_err = 'Attempt to use stored_context with no user_creds'
self.assertEqual(expected_err, six.text_type(ex))
def test_store_gets_username_from_stack(self):
self.stack = stack.Stack(self.ctx, 'username_stack',
self.tmpl, username='foobar')
self.ctx.username = 'not foobar'
self.stack.store()
db_stack = stack_object.Stack.get_by_id(self.ctx, self.stack.id)
self.assertEqual('foobar', db_stack.username)
def test_store_backup_true(self):
self.stack = stack.Stack(self.ctx, 'username_stack',
self.tmpl, username='foobar')
self.ctx.username = 'not foobar'
self.stack.store(backup=True)
db_stack = stack_object.Stack.get_by_id(self.ctx, self.stack.id)
self.assertTrue(db_stack.backup)
def test_store_backup_false(self):
self.stack = stack.Stack(self.ctx, 'username_stack',
self.tmpl, username='foobar')
self.ctx.username = 'not foobar'
self.stack.store(backup=False)
db_stack = stack_object.Stack.get_by_id(self.ctx, self.stack.id)
self.assertFalse(db_stack.backup)
def test_init_stored_context_false(self):
ctx_init = utils.dummy_context(user='mystored_user',
password='mystored_pass')
ctx_init.request_id = self.ctx.request_id
creds = ucreds_object.UserCreds.create(ctx_init)
self.stack = stack.Stack(self.ctx, 'creds_store1', self.tmpl,
user_creds_id=creds.id,
use_stored_context=False)
ctx_expected = self.ctx.to_dict()
self.assertEqual(ctx_expected, self.stack.context.to_dict())
self.stack.store()
self.assertEqual(ctx_expected, self.stack.context.to_dict())
def test_init_stored_context_true(self):
ctx_init = utils.dummy_context(user='mystored_user',
password='mystored_pass')
ctx_init.request_id = self.ctx.request_id
creds = ucreds_object.UserCreds.create(ctx_init)
self.stack = stack.Stack(self.ctx, 'creds_store2', self.tmpl,
user_creds_id=creds.id,
use_stored_context=True)
ctx_expected = ctx_init.to_dict()
ctx_expected['auth_token'] = None
self.assertEqual(ctx_expected, self.stack.context.to_dict())
self.stack.store()
self.assertEqual(ctx_expected, self.stack.context.to_dict())
def test_load_stored_context_false(self):
ctx_init = utils.dummy_context(user='mystored_user',
password='mystored_pass')
ctx_init.request_id = self.ctx.request_id
creds = ucreds_object.UserCreds.create(ctx_init)
self.stack = stack.Stack(self.ctx, 'creds_store3', self.tmpl,
user_creds_id=creds.id)
self.stack.store()
load_stack = stack.Stack.load(self.ctx, stack_id=self.stack.id,
use_stored_context=False)
self.assertEqual(self.ctx.to_dict(), load_stack.context.to_dict())
def test_load_stored_context_true(self):
ctx_init = utils.dummy_context(user='mystored_user',
password='mystored_pass')
ctx_init.request_id = self.ctx.request_id
creds = ucreds_object.UserCreds.create(ctx_init)
self.stack = stack.Stack(self.ctx, 'creds_store4', self.tmpl,
user_creds_id=creds.id)
self.stack.store()
ctx_expected = ctx_init.to_dict()
ctx_expected['auth_token'] = None
load_stack = stack.Stack.load(self.ctx, stack_id=self.stack.id,
use_stored_context=True)
self.assertEqual(ctx_expected, load_stack.context.to_dict())
def test_load_honors_owner(self):
"""
Loading a stack from the database will set the owner_id of the
resultant stack appropriately.
"""
self.stack = stack.Stack(self.ctx, 'owner_stack', self.tmpl)
stack_ownee = stack.Stack(self.ctx, 'ownee_stack', self.tmpl,
owner_id=self.stack.id)
stack_ownee.store()
saved_stack = stack.Stack.load(self.ctx, stack_id=stack_ownee.id)
self.assertEqual(self.stack.id, saved_stack.owner_id)
def test_requires_deferred_auth(self):
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {'AResource': {'Type': 'GenericResourceType'},
'BResource': {'Type': 'GenericResourceType'},
'CResource': {'Type': 'GenericResourceType'}}}
self.stack = stack.Stack(self.ctx, 'update_test_stack',
template.Template(tmpl),
disable_rollback=False)
self.assertFalse(self.stack.requires_deferred_auth())
self.stack['CResource'].requires_deferred_auth = True
self.assertTrue(self.stack.requires_deferred_auth())
def test_stack_user_project_id_default(self):
self.stack = stack.Stack(self.ctx, 'user_project_none', self.tmpl)
self.stack.store()
self.assertIsNone(self.stack.stack_user_project_id)
db_stack = stack_object.Stack.get_by_id(self.ctx, self.stack.id)
self.assertIsNone(db_stack.stack_user_project_id)
def test_stack_user_project_id_constructor(self):
self.stub_keystoneclient()
self.m.ReplayAll()
self.stack = stack.Stack(self.ctx, 'user_project_init',
self.tmpl,
stack_user_project_id='aproject1234')
self.stack.store()
self.assertEqual('aproject1234', self.stack.stack_user_project_id)
db_stack = stack_object.Stack.get_by_id(self.ctx, self.stack.id)
self.assertEqual('aproject1234', db_stack.stack_user_project_id)
self.stack.delete()
self.assertEqual((stack.Stack.DELETE, stack.Stack.COMPLETE),
self.stack.state)
self.m.VerifyAll()
def test_stack_user_project_id_setter(self):
self.stub_keystoneclient()
self.m.ReplayAll()
self.stack = stack.Stack(self.ctx, 'user_project_init', self.tmpl)
self.stack.store()
self.assertIsNone(self.stack.stack_user_project_id)
self.stack.set_stack_user_project_id(project_id='aproject456')
self.assertEqual('aproject456', self.stack.stack_user_project_id)
db_stack = stack_object.Stack.get_by_id(self.ctx, self.stack.id)
self.assertEqual('aproject456', db_stack.stack_user_project_id)
self.stack.delete()
self.assertEqual((stack.Stack.DELETE, stack.Stack.COMPLETE),
self.stack.state)
self.m.VerifyAll()
def test_stack_user_project_id_create(self):
self.stub_keystoneclient()
self.m.ReplayAll()
self.stack = stack.Stack(self.ctx, 'user_project_init', self.tmpl)
self.stack.store()
self.assertIsNone(self.stack.stack_user_project_id)
self.stack.create_stack_user_project_id()
self.assertEqual('aprojectid', self.stack.stack_user_project_id)
db_stack = stack_object.Stack.get_by_id(self.ctx, self.stack.id)
self.assertEqual('aprojectid', db_stack.stack_user_project_id)
self.stack.delete()
self.assertEqual((stack.Stack.DELETE, stack.Stack.COMPLETE),
self.stack.state)
self.m.VerifyAll()
def test_preview_resources_returns_list_of_resource_previews(self):
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {'AResource': {'Type': 'GenericResourceType'}}}
self.stack = stack.Stack(self.ctx, 'preview_stack',
template.Template(tmpl))
res = mock.Mock()
res.preview.return_value = 'foo'
self.stack._resources = {'r1': res}
resources = self.stack.preview_resources()
self.assertEqual(['foo'], resources)
def test_correct_outputs(self):
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'AResource': {'Type': 'ResourceWithPropsType',
'Properties': {'Foo': 'abc'}},
'BResource': {'Type': 'ResourceWithPropsType',
'Properties': {'Foo': 'def'}}},
'Outputs': {
'Resource_attr': {
'Value': {
'Fn::GetAtt': ['AResource', 'Foo']}}}}
self.stack = stack.Stack(self.ctx, 'stack_with_correct_outputs',
template.Template(tmpl))
self.stack.store()
self.stack.create()
self.assertEqual((stack.Stack.CREATE, stack.Stack.COMPLETE),
self.stack.state)
self.assertEqual('abc', self.stack['AResource'].properties['Foo'])
# According _resolve_attribute method in GenericResource output
# value will be equal with name AResource.
self.assertEqual('AResource', self.stack.output('Resource_attr'))
self.stack.delete()
self.assertEqual((self.stack.DELETE, self.stack.COMPLETE),
self.stack.state)
def test_incorrect_outputs(self):
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'AResource': {'Type': 'ResourceWithPropsType',
'Properties': {'Foo': 'abc'}}},
'Outputs': {
'Resource_attr': {
'Value': {
'Fn::GetAtt': ['AResource', 'Bar']}}}}
self.stack = stack.Stack(self.ctx, 'stack_with_incorrect_outputs',
template.Template(tmpl))
self.stack.store()
self.stack.create()
self.assertEqual((stack.Stack.CREATE, stack.Stack.COMPLETE),
self.stack.state)
self.assertIsNone(self.stack.output('Resource_attr'))
self.assertEqual('The Referenced Attribute (AResource Bar) is '
'incorrect.',
self.stack.outputs['Resource_attr']['error_msg'])
self.stack.delete()
self.assertEqual((self.stack.DELETE, self.stack.COMPLETE),
self.stack.state)
def test_stack_load_no_param_value_validation(self):
'''
Test stack loading with disabled parameter value validation.
'''
tmpl = template_format.parse('''
heat_template_version: 2013-05-23
parameters:
flavor:
type: string
description: A flavor.
constraints:
- custom_constraint: nova.flavor
resources:
a_resource:
type: GenericResourceType
''')
# Mock objects so the query for flavors in server.FlavorConstraint
# works for stack creation
fc = fakes.FakeClient()
self.m.StubOutWithMock(nova.NovaClientPlugin, '_create')
nova.NovaClientPlugin._create().AndReturn(fc)
fc.flavors = self.m.CreateMockAnything()
flavor = collections.namedtuple("Flavor", ["id", "name"])
flavor.id = "1234"
flavor.name = "dummy"
fc.flavors.list().AndReturn([flavor])
self.m.ReplayAll()
test_env = environment.Environment({'flavor': 'dummy'})
self.stack = stack.Stack(self.ctx, 'stack_with_custom_constraint',
template.Template(tmpl, env=test_env))
self.stack.validate()
self.stack.store()
self.stack.create()
stack_id = self.stack.id
self.m.VerifyAll()
self.assertEqual((stack.Stack.CREATE, stack.Stack.COMPLETE),
self.stack.state)
loaded_stack = stack.Stack.load(self.ctx, stack_id=self.stack.id)
self.assertEqual(stack_id, loaded_stack.parameters['OS::stack_id'])
# verify that fc.flavors.list() has not been called, i.e. verify that
# parameter value validation did not happen and FlavorConstraint was
# not invoked
self.m.VerifyAll()
def test_snapshot_delete(self):
snapshots = []
class ResourceDeleteSnapshot(generic_rsrc.ResourceWithProps):
def handle_delete_snapshot(self, data):
snapshots.append(data)
resource._register_class(
'ResourceDeleteSnapshot', ResourceDeleteSnapshot)
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {'AResource': {'Type': 'ResourceDeleteSnapshot'}}}
self.stack = stack.Stack(self.ctx, 'snapshot_stack',
template.Template(tmpl))
data = self.stack.prepare_abandon()
fake_snapshot = collections.namedtuple('Snapshot', ('data',))(data)
self.stack.delete_snapshot(fake_snapshot)
self.assertEqual([data['resources']['AResource']], snapshots)
def test_delete_snapshot_without_data(self):
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {'R1': {'Type': 'GenericResourceType'}}}
self.stack = stack.Stack(self.ctx, 'snapshot_stack',
template.Template(tmpl))
fake_snapshot = collections.namedtuple('Snapshot', ('data',))(None)
self.assertIsNone(self.stack.delete_snapshot(fake_snapshot))
def test_incorrect_outputs_cfn_get_attr(self):
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'AResource': {'Type': 'ResourceWithPropsType',
'Properties': {'Foo': 'abc'}}},
'Outputs': {
'Resource_attr': {
'Value': {
'Fn::GetAtt': ['AResource', 'Bar']}}}}
self.stack = stack.Stack(self.ctx, 'stack_with_correct_outputs',
template.Template(tmpl))
ex = self.assertRaises(exception.StackValidationFailed,
self.stack.validate)
self.assertEqual('Output validation error: '
'Outputs.Resource_attr.Value: '
'The Referenced Attribute '
'(AResource Bar) is incorrect.',
six.text_type(ex))
def test_incorrect_outputs_cfn_incorrect_reference(self):
tmpl = template_format.parse("""
HeatTemplateFormatVersion: '2012-12-12'
Outputs:
Output:
Value:
Fn::GetAtt:
- Resource
- Foo
""")
self.stack = stack.Stack(self.ctx, 'stack_with_incorrect_outputs',
template.Template(tmpl))
ex = self.assertRaises(exception.StackValidationFailed,
self.stack.validate)
self.assertIn('The specified reference "Resource" '
'(in unknown) is incorrect.', six.text_type(ex))
def test_incorrect_outputs_incorrect_reference(self):
tmpl = template_format.parse("""
heat_template_version: 2013-05-23
outputs:
output:
value: { get_attr: [resource, foo] }
""")
self.stack = stack.Stack(self.ctx, 'stack_with_incorrect_outputs',
template.Template(tmpl))
ex = self.assertRaises(exception.StackValidationFailed,
self.stack.validate)
self.assertIn('The specified reference "resource" '
'(in unknown) is incorrect.', six.text_type(ex))
def test_incorrect_outputs_cfn_missing_value(self):
tmpl = template_format.parse("""
HeatTemplateFormatVersion: '2012-12-12'
Resources:
AResource:
Type: ResourceWithPropsType
Properties:
Foo: abc
Outputs:
Resource_attr:
Description: the attr
""")
self.stack = stack.Stack(self.ctx, 'stack_with_correct_outputs',
template.Template(tmpl))
ex = self.assertRaises(exception.StackValidationFailed,
self.stack.validate)
self.assertIn('Each Output must contain a Value key.',
six.text_type(ex))
def test_incorrect_outputs_cfn_empty_value(self):
tmpl = template_format.parse("""
HeatTemplateFormatVersion: '2012-12-12'
Resources:
AResource:
Type: ResourceWithPropsType
Properties:
Foo: abc
Outputs:
Resource_attr:
Value: ''
""")
self.stack = stack.Stack(self.ctx, 'stack_with_correct_outputs',
template.Template(tmpl))
self.assertIsNone(self.stack.validate())
def test_incorrect_outputs_cfn_none_value(self):
tmpl = template_format.parse("""
HeatTemplateFormatVersion: '2012-12-12'
Resources:
AResource:
Type: ResourceWithPropsType
Properties:
Foo: abc
Outputs:
Resource_attr:
Value:
""")
self.stack = stack.Stack(self.ctx, 'stack_with_correct_outputs',
template.Template(tmpl))
self.assertIsNone(self.stack.validate())
def test_incorrect_outputs_cfn_string_data(self):
tmpl = template_format.parse("""
HeatTemplateFormatVersion: '2012-12-12'
Resources:
AResource:
Type: ResourceWithPropsType
Properties:
Foo: abc
Outputs:
Resource_attr:
This is wrong data
""")
self.stack = stack.Stack(self.ctx, 'stack_with_correct_outputs',
template.Template(tmpl))
ex = self.assertRaises(exception.StackValidationFailed,
self.stack.validate)
self.assertIn('Outputs must contain Output. '
'Found a [%s] instead' % six.text_type,
six.text_type(ex))
def test_prop_validate_value(self):
tmpl = template_format.parse("""
HeatTemplateFormatVersion: '2012-12-12'
Resources:
AResource:
Type: ResourceWithPropsType
Properties:
FooInt: notanint
""")
self.stack = stack.Stack(self.ctx, 'stack_with_bad_property',
template.Template(tmpl))
ex = self.assertRaises(exception.StackValidationFailed,
self.stack.validate)
self.assertIn("'notanint' is not an integer",
six.text_type(ex))
self.stack.strict_validate = False
self.assertIsNone(self.stack.validate())
def test_validate_property_getatt(self):
tmpl = {
'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'R1': {'Type': 'ResourceWithPropsType'},
'R2': {'Type': 'ResourceWithPropsType',
'Properties': {'Foo': {'Fn::GetAtt': ['R1', 'Foo']}}}}
}
self.stack = stack.Stack(self.ctx, 'test_stack',
template.Template(tmpl))
self.assertIsNone(self.stack.validate())
def test_param_validate_value(self):
tmpl = template_format.parse("""
HeatTemplateFormatVersion: '2012-12-12'
Parameters:
foo:
Type: Number
""")
env1 = environment.Environment({'parameters': {'foo': 'abc'}})
self.stack = stack.Stack(self.ctx, 'stack_with_bad_param',
template.Template(tmpl, env=env1))
ex = self.assertRaises(exception.StackValidationFailed,
self.stack.validate)
self.assertIn("Parameter 'foo' is invalid: could not convert "
"string to float:", six.text_type(ex))
self.assertIn("abc", six.text_type(ex))
self.stack.strict_validate = False
self.assertIsNone(self.stack.validate())
def test_incorrect_outputs_cfn_list_data(self):
tmpl = template_format.parse("""
HeatTemplateFormatVersion: '2012-12-12'
Resources:
AResource:
Type: ResourceWithPropsType
Properties:
Foo: abc
Outputs:
Resource_attr:
- Data is not what it seems
""")
self.stack = stack.Stack(self.ctx, 'stack_with_correct_outputs',
template.Template(tmpl))
ex = self.assertRaises(exception.StackValidationFailed,
self.stack.validate)
self.assertIn('Outputs must contain Output. '
'Found a [%s] instead' % type([]), six.text_type(ex))
def test_incorrect_outputs_hot_get_attr(self):
tmpl = {'heat_template_version': '2013-05-23',
'resources': {
'AResource': {'type': 'ResourceWithPropsType',
'properties': {'Foo': 'abc'}}},
'outputs': {
'resource_attr': {
'value': {
'get_attr': ['AResource', 'Bar']}}}}
self.stack = stack.Stack(self.ctx, 'stack_with_correct_outputs',
template.Template(tmpl))
ex = self.assertRaises(exception.StackValidationFailed,
self.stack.validate)
self.assertEqual('Output validation error: '
'outputs.resource_attr.value: '
'The Referenced Attribute '
'(AResource Bar) is incorrect.',
six.text_type(ex))
def test_snapshot_save_called_first(self):
def snapshotting_called_first(stack, action, status, reason):
self.assertEqual(stack.status, stack.IN_PROGRESS)
self.assertEqual(stack.action, stack.SNAPSHOT)
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'A': {'Type': 'GenericResourceType'},
'B': {'Type': 'GenericResourceType'}}}
self.stack = stack.Stack(self.ctx, 'stack_details_test',
template.Template(tmpl))
self.stack.store()
self.stack.create()
self.stack.snapshot(save_snapshot_func=snapshotting_called_first)
def test_restore(self):
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'A': {'Type': 'GenericResourceType'},
'B': {'Type': 'GenericResourceType'}}}
self.stack = stack.Stack(self.ctx, 'stack_details_test',
template.Template(tmpl))
self.stack.store()
self.stack.create()
data = copy.deepcopy(self.stack.prepare_abandon())
fake_snapshot = collections.namedtuple(
'Snapshot', ('data', 'stack_id'))(data, self.stack.id)
new_tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {'A': {'Type': 'GenericResourceType'}}}
updated_stack = stack.Stack(self.ctx, 'updated_stack',
template.Template(new_tmpl))
self.stack.update(updated_stack)
self.assertEqual(1, len(self.stack.resources))
self.stack.restore(fake_snapshot)
self.assertEqual((stack.Stack.RESTORE, stack.Stack.COMPLETE),
self.stack.state)
self.assertEqual(2, len(self.stack.resources))
def test_restore_with_original_env(self):
tmpl = {
'heat_template_version': '2013-05-23',
'parameters': {
'foo': {'type': 'string'}
},
'resources': {
'A': {
'type': 'ResourceWithPropsType',
'properties': {'Foo': {'get_param': 'foo'}}
}
}
}
self.stack = stack.Stack(self.ctx, 'stack_restore_test',
template.Template(
tmpl,
env=environment.Environment(
{'foo': 'abc'})))
self.stack.store()
self.stack.create()
self.assertEqual('abc',
self.stack.resources['A'].properties['Foo'])
data = copy.deepcopy(self.stack.prepare_abandon())
fake_snapshot = collections.namedtuple(
'Snapshot', ('data', 'stack_id'))(data, self.stack.id)
updated_stack = stack.Stack(self.ctx, 'updated_stack',
template.Template(
tmpl,
env=environment.Environment(
{'foo': 'xyz'})))
self.stack.update(updated_stack)
self.assertEqual('xyz',
self.stack.resources['A'].properties['Foo'])
self.stack.restore(fake_snapshot)
self.assertEqual((stack.Stack.RESTORE, stack.Stack.COMPLETE),
self.stack.state)
self.assertEqual('abc',
self.stack.resources['A'].properties['Foo'])
def test_hot_restore(self):
tpl = {'heat_template_version': '2013-05-23',
'resources':
{'A': {'type': 'ResourceWithRestoreType'}}}
self.stack = stack.Stack(self.ctx, 'stack_details_test',
template.Template(tpl))
self.stack.store()
self.stack.create()
data = self.stack.prepare_abandon()
data['resources']['A']['resource_data']['a_string'] = 'foo'
fake_snapshot = collections.namedtuple(
'Snapshot', ('data', 'stack_id'))(data, self.stack.id)
self.stack.restore(fake_snapshot)
self.assertEqual((stack.Stack.RESTORE, stack.Stack.COMPLETE),
self.stack.state)
self.assertEqual(
'foo', self.stack.resources['A'].properties['a_string'])
@mock.patch.object(stack.Stack, 'db_resource_get')
def test_lightweight_stack_getatt(self, mock_drg):
tmpl = template.Template({
'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'foo': {'Type': 'GenericResourceType'},
'bar': {
'Type': 'ResourceWithPropsType',
'Properties': {
'Foo': {'Fn::GetAtt': ['foo', 'bar']},
}
}
}
})
cache_data = {'foo': {'reference_id': 'foo-id',
'attrs': {'bar': 'baz'}},
'bar': {'reference_id': 'bar-id'}}
tmpl_stack = stack.Stack(self.ctx, 'test', tmpl)
tmpl_stack.store()
lightweight_stack = stack.Stack.load(self.ctx, stack_id=tmpl_stack.id,
cache_data=cache_data)
# Check if the property has the appropriate resolved value.
cached_property = lightweight_stack['bar'].properties['Foo']
self.assertEqual(cached_property, 'baz')
# Make sure FnGetAtt returns the cached value.
attr_value = lightweight_stack['foo'].FnGetAtt('bar')
self.assertEqual('baz', attr_value)
# Make sure calls are not made to the database to retrieve the
# resource state.
self.assertFalse(mock_drg.called)
@mock.patch.object(stack.Stack, 'db_resource_get')
def test_lightweight_stack_getrefid(self, mock_drg):
tmpl = template.Template({
'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'foo': {'Type': 'GenericResourceType'},
'bar': {
'Type': 'ResourceWithPropsType',
'Properties': {
'Foo': {'Ref': 'foo'},
}
}
}
})
cache_data = {'foo': {'reference_id': 'physical-resource-id'},
'bar': {'reference_id': 'bar-id'}}
tmpl_stack = stack.Stack(self.ctx, 'test', tmpl)
tmpl_stack.store()
lightweight_stack = stack.Stack.load(self.ctx, stack_id=tmpl_stack.id,
cache_data=cache_data)
# Check if the property has the appropriate resolved value.
cached_property = lightweight_stack['bar'].properties['Foo']
self.assertEqual(cached_property, 'physical-resource-id')
# Make sure FnGetRefId returns the cached value.
resource_id = lightweight_stack['foo'].FnGetRefId()
self.assertEqual('physical-resource-id', resource_id)
# Make sure calls are not made to the database to retrieve the
# resource state.
self.assertFalse(mock_drg.called)
def test_encrypt_parameters_false_parameters_stored_plaintext(self):
'''
Test stack loading with disabled parameter value validation.
'''
tmpl = template_format.parse('''
heat_template_version: 2013-05-23
parameters:
param1:
type: string
description: value1.
param2:
type: string
description: value2.
hidden: true
resources:
a_resource:
type: GenericResourceType
''')
env1 = environment.Environment({'param1': 'foo', 'param2': 'bar'})
self.stack = stack.Stack(self.ctx, 'test',
template.Template(tmpl, env=env1))
cfg.CONF.set_override('encrypt_parameters_and_properties', False)
# Verify that hidden parameters stored in plain text
self.stack.store()
db_stack = stack_object.Stack.get_by_id(self.ctx, self.stack.id)
params = db_stack.raw_template.environment['parameters']
self.assertEqual('foo', params['param1'])
self.assertEqual('bar', params['param2'])
def test_parameters_stored_encrypted_decrypted_on_load(self):
'''
Test stack loading with disabled parameter value validation.
'''
tmpl = template_format.parse('''
heat_template_version: 2013-05-23
parameters:
param1:
type: string
description: value1.
param2:
type: string
description: value2.
hidden: true
resources:
a_resource:
type: GenericResourceType
''')
env1 = environment.Environment({'param1': 'foo', 'param2': 'bar'})
self.stack = stack.Stack(self.ctx, 'test',
template.Template(tmpl, env=env1))
cfg.CONF.set_override('encrypt_parameters_and_properties', True)
# Verify that hidden parameters are stored encrypted
self.stack.store()
db_tpl = db_api.raw_template_get(self.ctx, self.stack.t.id)
db_params = db_tpl.environment['parameters']
self.assertEqual('foo', db_params['param1'])
self.assertEqual('cryptography_decrypt_v1', db_params['param2'][0])
self.assertIsNotNone(db_params['param2'][1])
# Verify that loaded stack has decrypted paramters
loaded_stack = stack.Stack.load(self.ctx, stack_id=self.stack.id)
params = loaded_stack.t.env.params
self.assertEqual('foo', params.get('param1'))
self.assertEqual('bar', params.get('param2'))
def test_parameters_stored_decrypted_successful_load(self):
'''
Test stack loading with disabled parameter value validation.
'''
tmpl = template_format.parse('''
heat_template_version: 2013-05-23
parameters:
param1:
type: string
description: value1.
param2:
type: string
description: value2.
hidden: true
resources:
a_resource:
type: GenericResourceType
''')
env1 = environment.Environment({'param1': 'foo', 'param2': 'bar'})
self.stack = stack.Stack(self.ctx, 'test',
template.Template(tmpl, env=env1))
cfg.CONF.set_override('encrypt_parameters_and_properties', False)
# Verify that hidden parameters are stored decrypted
self.stack.store()
db_tpl = db_api.raw_template_get(self.ctx, self.stack.t.id)
db_params = db_tpl.environment['parameters']
self.assertEqual('foo', db_params['param1'])
self.assertEqual('bar', db_params['param2'])
# Verify that stack loads without error
loaded_stack = stack.Stack.load(self.ctx, stack_id=self.stack.id)
params = loaded_stack.t.env.params
self.assertEqual('foo', params.get('param1'))
self.assertEqual('bar', params.get('param2'))
@mock.patch.object(stack_object.Stack, 'delete')
@mock.patch.object(raw_template_object.RawTemplate, 'delete')
def test_mark_complete_create(self, mock_tmpl_delete, mock_stack_delete):
tmpl = template.Template({
'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'foo': {'Type': 'GenericResourceType'}
}
})
tmpl_stack = stack.Stack(self.ctx, 'test', tmpl)
tmpl_stack.store()
tmpl_stack.action = tmpl_stack.CREATE
tmpl_stack.status = tmpl_stack.IN_PROGRESS
tmpl_stack.current_traversal = 'some-traversal'
tmpl_stack.mark_complete('some-traversal')
self.assertEqual(tmpl_stack.prev_raw_template_id,
None)
self.assertFalse(mock_tmpl_delete.called)
self.assertFalse(mock_stack_delete.called)
self.assertEqual(tmpl_stack.status, tmpl_stack.COMPLETE)
@mock.patch.object(stack_object.Stack, 'delete')
@mock.patch.object(raw_template_object.RawTemplate, 'delete')
@mock.patch.object(stack.Stack, 'store')
def test_mark_complete_update(self, mock_store, mock_tmpl_delete,
mock_stack_delete):
tmpl = template.Template({
'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'foo': {'Type': 'GenericResourceType'}
}
})
tmpl_stack = stack.Stack(self.ctx, 'test', tmpl)
tmpl_stack.id = 2
tmpl_stack.t.id = 2
tmpl_stack.prev_raw_template_id = 1
tmpl_stack.action = tmpl_stack.UPDATE
tmpl_stack.status = tmpl_stack.IN_PROGRESS
tmpl_stack.current_traversal = 'some-traversal'
tmpl_stack.mark_complete('some-traversal')
self.assertEqual(tmpl_stack.prev_raw_template_id,
None)
self.assertFalse(mock_stack_delete.called)
mock_tmpl_delete.assert_called_once_with(self.ctx, 1)
self.assertEqual(tmpl_stack.status, tmpl_stack.COMPLETE)
@mock.patch.object(stack_object.Stack, 'delete')
@mock.patch.object(raw_template_object.RawTemplate, 'delete')
@mock.patch.object(stack.Stack, 'store')
def test_mark_complete_update_delete(self, mock_store, mock_tmpl_delete,
mock_stack_delete):
tmpl = template.Template({
'HeatTemplateFormatVersion': '2012-12-12',
'Description': 'Empty Template'
})
tmpl_stack = stack.Stack(self.ctx, 'test', tmpl)
tmpl_stack.id = 2
tmpl_stack.t.id = 2
tmpl_stack.prev_raw_template_id = 1
tmpl_stack.action = tmpl_stack.DELETE
tmpl_stack.status = tmpl_stack.IN_PROGRESS
tmpl_stack.current_traversal = 'some-traversal'
tmpl_stack.mark_complete('some-traversal')
self.assertEqual(tmpl_stack.prev_raw_template_id,
None)
mock_tmpl_delete.assert_called_once_with(self.ctx, 1)
mock_stack_delete.assert_called_once_with(self.ctx, 2)
self.assertEqual(tmpl_stack.status, tmpl_stack.COMPLETE)
@mock.patch.object(stack_object.Stack, 'delete')
@mock.patch.object(raw_template_object.RawTemplate, 'delete')
@mock.patch.object(stack.Stack, 'store')
def test_mark_complete_stale_traversal(self, mock_store, mock_tmpl_delete,
mock_stack_delete):
tmpl = template.Template({
'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'foo': {'Type': 'GenericResourceType'}
}
})
tmpl_stack = stack.Stack(self.ctx, 'test', tmpl)
tmpl_stack.current_traversal = 'new-traversal'
tmpl_stack.mark_complete('old-traversal')
self.assertFalse(mock_tmpl_delete.called)
self.assertFalse(mock_stack_delete.called)
self.assertIsNone(tmpl_stack.prev_raw_template_id)
self.assertFalse(mock_store.called)
@mock.patch.object(function, 'validate')
def test_validate_assertion_exception_rethrow(self, func_val):
expected_msg = 'Expected Assertion Error'
with mock.patch('heat.engine.stack.dependencies',
new_callable=mock.PropertyMock) as mock_dependencies:
mock_dependency = mock.MagicMock()
mock_dependency.validate.side_effect = AssertionError(expected_msg)
mock_dependencies.Dependencies.return_value = [mock_dependency]
stc = stack.Stack(self.ctx, utils.random_name(), self.tmpl)
expected_exception = self.assertRaises(AssertionError,
stc.validate)
self.assertEqual(expected_msg, six.text_type(expected_exception))
mock_dependency.validate.assert_called_once_with()
stc = stack.Stack(self.ctx, utils.random_name(), self.tmpl)
stc.outputs = {'foo': {'Value': 'bar'}}
func_val.side_effect = AssertionError(expected_msg)
expected_exception = self.assertRaises(AssertionError, stc.validate)
self.assertEqual(expected_msg, six.text_type(expected_exception))
def test_resolve_static_data_assertion_exception_rethrow(self):
tmpl = mock.MagicMock()
expected_message = 'Expected Assertion Error'
tmpl.parse.side_effect = AssertionError(expected_message)
stc = stack.Stack(self.ctx, utils.random_name(),
tmpl, resolve_data=False)
expected_exception = self.assertRaises(AssertionError,
stc.resolve_static_data,
None)
self.assertEqual(expected_message, six.text_type(expected_exception))
def update_exception_handler(self, exc, action=stack.Stack.UPDATE,
disable_rollback=False):
tmpl = template.Template({
'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'foo': {'Type': 'GenericResourceType'}
}
})
update_task = mock.MagicMock()
self.stack = stack.Stack(utils.dummy_context(),
'test_stack',
tmpl,
disable_rollback=disable_rollback)
self.stack.store()
self.m.ReplayAll()
res = self.stack._update_exception_handler(
exc=exc, action=action, update_task=update_task)
if isinstance(exc, exception.ResourceFailure):
if disable_rollback:
self.assertFalse(res)
else:
self.assertTrue(res)
elif isinstance(exc, stack.ForcedCancel):
update_task.updater.cancel_all.assert_called_once_with()
if exc.with_rollback or not disable_rollback:
self.assertTrue(res)
else:
self.assertFalse(res)
self.m.VerifyAll()
def test_update_exception_handler_resource_failure_no_rollback(self):
reason = 'something strange happened'
exc = exception.ResourceFailure(reason, None, action='UPDATE')
self.update_exception_handler(exc, disable_rollback=True)
def test_update_exception_handler_resource_failure_rollback(self):
reason = 'something strange happened'
exc = exception.ResourceFailure(reason, None, action='UPDATE')
self.update_exception_handler(exc, disable_rollback=False)
def test_update_exception_handler_force_cancel_with_rollback(self):
exc = stack.ForcedCancel(with_rollback=True)
self.update_exception_handler(exc, disable_rollback=False)
def test_update_exception_handler_force_cancel_no_rollback(self):
exc = stack.ForcedCancel(with_rollback=False)
self.update_exception_handler(exc, disable_rollback=True)
class StackKwargsForCloningTest(common.HeatTestCase):
scenarios = [
('default', dict(keep_status=False, only_db=False,
not_included=['action', 'status', 'status_reason'])),
('only_db', dict(keep_status=False, only_db=True,
not_included=['action', 'status', 'status_reason',
'strict_validate'])),
('keep_status', dict(keep_status=True, only_db=False,
not_included=[])),
('status_db', dict(keep_status=True, only_db=True,
not_included=['strict_validate'])),
]
def test_kwargs(self):
tmpl = template.Template(copy.deepcopy(empty_template))
ctx = utils.dummy_context()
test_data = dict(action='x', status='y',
status_reason='z', timeout_mins=33,
disable_rollback=True, parent_resource='fred',
owner_id=32, stack_user_project_id=569,
user_creds_id=123, tenant_id='some-uuid',
username='jo', nested_depth=3,
strict_validate=True, convergence=False,
current_traversal=45)
db_map = {'parent_resource': 'parent_resource_name',
'tenant_id': 'tenant', 'timeout_mins': 'timeout'}
test_db_data = {}
for key in test_data:
dbkey = db_map.get(key, key)
test_db_data[dbkey] = test_data[key]
self.stack = stack.Stack(ctx, utils.random_name(), tmpl,
**test_data)
res = self.stack.get_kwargs_for_cloning(keep_status=self.keep_status,
only_db=self.only_db)
for key in self.not_included:
self.assertNotIn(key, res)
for key in test_data:
if key not in self.not_included:
dbkey = db_map.get(key, key)
if self.only_db:
self.assertEqual(test_data[key], res[dbkey])
else:
self.assertEqual(test_data[key], res[key])
if not self.only_db:
# just make sure that the kwargs are valid
# (no exception should be raised)
stack.Stack(ctx, utils.random_name(), tmpl, **res)
|
|
"""
Support to interact with a Music Player Daemon.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.mpd/
"""
import logging
import os
from datetime import timedelta
import voluptuous as vol
from homeassistant.components.media_player import (
MEDIA_TYPE_MUSIC, SUPPORT_NEXT_TRACK, SUPPORT_PAUSE, PLATFORM_SCHEMA,
SUPPORT_PREVIOUS_TRACK, SUPPORT_STOP, SUPPORT_PLAY,
SUPPORT_VOLUME_SET, SUPPORT_PLAY_MEDIA, MEDIA_TYPE_PLAYLIST,
SUPPORT_SELECT_SOURCE, SUPPORT_CLEAR_PLAYLIST, SUPPORT_SHUFFLE_SET,
SUPPORT_SEEK, SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_STEP,
SUPPORT_TURN_OFF, SUPPORT_TURN_ON, MediaPlayerDevice)
from homeassistant.const import (
STATE_OFF, STATE_PAUSED, STATE_PLAYING,
CONF_PORT, CONF_PASSWORD, CONF_HOST, CONF_NAME)
import homeassistant.helpers.config_validation as cv
from homeassistant.util import Throttle
REQUIREMENTS = ['python-mpd2==0.5.5']
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = 'MPD'
DEFAULT_PORT = 6600
PLAYLIST_UPDATE_INTERVAL = timedelta(seconds=120)
SUPPORT_MPD = SUPPORT_PAUSE | SUPPORT_VOLUME_SET | SUPPORT_VOLUME_STEP | \
SUPPORT_PREVIOUS_TRACK | SUPPORT_NEXT_TRACK | SUPPORT_VOLUME_MUTE | \
SUPPORT_PLAY_MEDIA | SUPPORT_PLAY | SUPPORT_SELECT_SOURCE | \
SUPPORT_CLEAR_PLAYLIST | SUPPORT_SHUFFLE_SET | SUPPORT_SEEK | \
SUPPORT_STOP | SUPPORT_TURN_OFF | SUPPORT_TURN_ON
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
})
# pylint: disable=unused-argument
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the MPD platform."""
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
name = config.get(CONF_NAME)
password = config.get(CONF_PASSWORD)
device = MpdDevice(host, port, password, name)
add_devices([device], True)
class MpdDevice(MediaPlayerDevice):
"""Representation of a MPD server."""
# pylint: disable=no-member
def __init__(self, server, port, password, name):
"""Initialize the MPD device."""
import mpd
self.server = server
self.port = port
self._name = name
self.password = password
self._status = None
self._currentsong = None
self._playlists = []
self._currentplaylist = None
self._is_connected = False
self._muted = False
self._muted_volume = 0
# set up MPD client
self._client = mpd.MPDClient()
self._client.timeout = 5
self._client.idletimeout = None
def _connect(self):
"""Connect to MPD."""
import mpd
try:
self._client.connect(self.server, self.port)
if self.password is not None:
self._client.password(self.password)
except mpd.ConnectionError:
return
self._is_connected = True
def _disconnect(self):
"""Disconnect from MPD."""
import mpd
try:
self._client.disconnect()
except mpd.ConnectionError:
pass
self._is_connected = False
self._status = None
def _fetch_status(self):
"""Fetch status from MPD."""
self._status = self._client.status()
self._currentsong = self._client.currentsong()
self._update_playlists()
@property
def available(self):
"""True if MPD is available and connected."""
return self._is_connected
def update(self):
"""Get the latest data and update the state."""
import mpd
try:
if not self._is_connected:
self._connect()
self._fetch_status()
except (mpd.ConnectionError, OSError, BrokenPipeError, ValueError):
# Cleanly disconnect in case connection is not in valid state
self._disconnect()
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the media state."""
if self._status is None:
return STATE_OFF
elif self._status['state'] == 'play':
return STATE_PLAYING
elif self._status['state'] == 'pause':
return STATE_PAUSED
elif self._status['state'] == 'stop':
return STATE_OFF
return STATE_OFF
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
return self._muted
@property
def media_content_id(self):
"""Return the content ID of current playing media."""
return self._currentsong.get('file')
@property
def media_content_type(self):
"""Return the content type of current playing media."""
return MEDIA_TYPE_MUSIC
@property
def media_duration(self):
"""Return the duration of current playing media in seconds."""
# Time does not exist for streams
return self._currentsong.get('time')
@property
def media_title(self):
"""Return the title of current playing media."""
name = self._currentsong.get('name', None)
title = self._currentsong.get('title', None)
file_name = self._currentsong.get('file', None)
if name is None and title is None:
if file_name is None:
return "None"
else:
return os.path.basename(file_name)
elif name is None:
return title
elif title is None:
return name
return '{}: {}'.format(name, title)
@property
def media_artist(self):
"""Return the artist of current playing media (Music track only)."""
return self._currentsong.get('artist')
@property
def media_album_name(self):
"""Return the album of current playing media (Music track only)."""
return self._currentsong.get('album')
@property
def volume_level(self):
"""Return the volume level."""
return int(self._status['volume'])/100
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_MPD
@property
def source(self):
"""Name of the current input source."""
return self._currentplaylist
@property
def source_list(self):
"""Return the list of available input sources."""
return self._playlists
def select_source(self, source):
"""Choose a different available playlist and play it."""
self.play_media(MEDIA_TYPE_PLAYLIST, source)
@Throttle(PLAYLIST_UPDATE_INTERVAL)
def _update_playlists(self, **kwargs):
"""Update available MPD playlists."""
self._playlists = []
for playlist_data in self._client.listplaylists():
self._playlists.append(playlist_data['playlist'])
def set_volume_level(self, volume):
"""Set volume of media player."""
self._client.setvol(int(volume * 100))
def volume_up(self):
"""Service to send the MPD the command for volume up."""
current_volume = int(self._status['volume'])
if current_volume <= 100:
self._client.setvol(current_volume + 5)
def volume_down(self):
"""Service to send the MPD the command for volume down."""
current_volume = int(self._status['volume'])
if current_volume >= 0:
self._client.setvol(current_volume - 5)
def media_play(self):
"""Service to send the MPD the command for play/pause."""
self._client.pause(0)
def media_pause(self):
"""Service to send the MPD the command for play/pause."""
self._client.pause(1)
def media_stop(self):
"""Service to send the MPD the command for stop."""
self._client.stop()
def media_next_track(self):
"""Service to send the MPD the command for next track."""
self._client.next()
def media_previous_track(self):
"""Service to send the MPD the command for previous track."""
self._client.previous()
def mute_volume(self, mute):
"""Mute. Emulated with set_volume_level."""
if mute is True:
self._muted_volume = self.volume_level
self.set_volume_level(0)
elif mute is False:
self.set_volume_level(self._muted_volume)
self._muted = mute
def play_media(self, media_type, media_id, **kwargs):
"""Send the media player the command for playing a playlist."""
_LOGGER.debug(str.format("Playing playlist: {0}", media_id))
if media_type == MEDIA_TYPE_PLAYLIST:
if media_id in self._playlists:
self._currentplaylist = media_id
else:
self._currentplaylist = None
_LOGGER.warning(str.format("Unknown playlist name %s.",
media_id))
self._client.clear()
self._client.load(media_id)
self._client.play()
else:
self._client.clear()
self._client.add(media_id)
self._client.play()
@property
def shuffle(self):
"""Boolean if shuffle is enabled."""
return bool(self._status['random'])
def set_shuffle(self, shuffle):
"""Enable/disable shuffle mode."""
self._client.random(int(shuffle))
def turn_off(self):
"""Service to send the MPD the command to stop playing."""
self._client.stop()
def turn_on(self):
"""Service to send the MPD the command to start playing."""
self._client.play()
self._update_playlists(no_throttle=True)
def clear_playlist(self):
"""Clear players playlist."""
self._client.clear()
def media_seek(self, position):
"""Send seek command."""
self._client.seekcur(position)
|
|
import django
from django.test import TestCase
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth import get_user_model
User = get_user_model()
import denorm
from denorm import denorms
import models
# Use all but denorms in FailingTriggers models by default
failingdenorms = denorms.alldenorms
denorms.alldenorms = [d for d in failingdenorms if d.model not in (models.FailingTriggersModelA, models.FailingTriggersModelB)]
class TestTriggers(TestCase):
def setUp(self):
denorms.drop_triggers()
def test_triggers(self):
"""Test potentially failing denorms.
"""
# save and restore alldenorms
# test will fail if it's raising an exception
alldenorms = denorms.alldenorms
denorms.alldenorms = failingdenorms
try:
denorms.install_triggers()
finally:
denorms.alldenorms = alldenorms
class TestCached(TestCase):
def setUp(self):
denorms.drop_triggers()
denorms.install_triggers()
def tearDown(self):
models.CachedModelA.objects.all().delete()
models.CachedModelB.objects.all().delete()
def test_depends_related(self):
models.CachedModelB.objects.create(data='Hello')
b = models.CachedModelB.objects.all()[0]
self.assertEqual('Hello', b.data)
models.CachedModelA.objects.create(b=b)
a = models.CachedModelA.objects.all()[0]
self.assertEqual("HELLO", a.cached_data['upper'])
self.assertEqual("hello", a.cached_data['lower'])
b.data = 'World'
self.assertEqual("HELLO", a.cached_data['upper'])
self.assertEqual("hello", a.cached_data['lower'])
b.save()
a = models.CachedModelA.objects.all()[0]
self.assertEqual("WORLD", a.cached_data['upper'])
self.assertEqual("world", a.cached_data['lower'])
class TestAbstract(TestCase):
def setUp(self):
denorms.drop_triggers()
denorms.install_triggers()
def test_abstract(self):
d1 = models.RealDenormModel.objects.create(text='onion')
self.assertEqual("Ham and onion", d1.ham)
self.assertEqual("Eggs and onion", d1.eggs)
class TestSkip(TestCase):
"""
Tests for the skip feature.
"""
def setUp(self):
denorms.drop_triggers()
denorms.install_triggers()
post = models.SkipPost(text='Here be ponies.')
post.save()
self.post = post
# TODO: Enable and check!
# Unsure on how to test this behaviour. It results in an endless loop:
# update -> trigger -> update -> trigger -> ...
#
#def test_without_skip(self):
# # This results in an infinate loop on SQLite.
# comment = SkipCommentWithoutSkip(post=self.post, text='Oh really?')
# comment.save()
#
# denorm.flush()
# TODO: Check if an infinate loop happens and stop it.
def test_with_skip(self):
# This should not result in an endless loop.
comment = models.SkipCommentWithSkip(post=self.post, text='Oh really?')
comment.save()
denorm.flush()
def test_meta_skip(self):
"""Test a model with the attribute listed under denorm_always_skip."""
comment = models.SkipCommentWithAttributeSkip(post=self.post, text='Yup, and they have wings!')
comment.save()
denorm.flush()
class TestDenormalisation(TestCase):
"""
Tests for the denormalisation fields.
"""
def setUp(self):
denorms.drop_triggers()
denorms.install_triggers()
self.testuser = User.objects.create_user("testuser", "testuser", "testuser")
self.testuser.is_staff = True
ctype = ContentType.objects.get_for_model(models.Member)
self.testuser.save()
def tearDown(self):
# delete all model instances
self.testuser.delete()
models.Attachment.objects.all().delete()
models.Post.objects.all().delete()
models.Forum.objects.all().delete()
def test_depends_related(self):
"""
Test the DependsOnRelated stuff.
"""
# Make a forum, check it's got no posts
f1 = models.Forum.objects.create(title="forumone")
self.assertEqual(f1.post_count, 0)
# Check its database copy too
self.assertEqual(models.Forum.objects.get(id=f1.id).post_count, 0)
# Add a post
p1 = models.Post.objects.create(forum=f1)
# Has the post count updated?
self.assertEqual(models.Forum.objects.get(id=f1.id).post_count, 1)
denorm.flush()
# Check its title, in p1 and the DB
self.assertEqual(p1.forum_title, "forumone")
self.assertEqual(models.Post.objects.get(id=p1.id).forum_title, "forumone")
# Update the forum title
f1.title = "forumtwo"
f1.save()
denorm.flush()
# Has the post's title changed?
self.assertEqual(models.Post.objects.get(id=p1.id).forum_title, "forumtwo")
# Add and remove some posts and check the post count
models.Post.objects.create(forum=f1)
self.assertEqual(models.Forum.objects.get(id=f1.id).post_count, 2)
models.Post.objects.create(forum=f1)
self.assertEqual(models.Forum.objects.get(id=f1.id).post_count, 3)
p1.delete()
self.assertEqual(models.Forum.objects.get(id=f1.id).post_count, 2)
# Delete everything, check once more.
models.Post.objects.all().delete()
self.assertEqual(models.Forum.objects.get(id=f1.id).post_count, 0)
# Make an orphaned post, see what its title is.
# Doesn't work yet - no support for null FKs
#p4 = Post.objects.create(forum=None)
#self.assertEqual(p4.forum_title, None)
def test_dependency_chains(self):
# create a forum, a member and a post
f1 = models.Forum.objects.create(title="forumone")
m1 = models.Member.objects.create(name="memberone")
models.Post.objects.create(forum=f1, author=m1)
denorm.flush()
# check the forums author list contains the member
self.assertEqual(models.Forum.objects.get(id=f1.id).author_names, "memberone")
# change the member's name
m1.name = "membertwo"
m1.save()
denorm.flush()
# check again
self.assertEqual(models.Forum.objects.get(id=f1.id).author_names, "membertwo")
def test_trees(self):
f1 = models.Forum.objects.create(title="forumone")
f2 = models.Forum.objects.create(title="forumtwo", parent_forum=f1)
f3 = models.Forum.objects.create(title="forumthree", parent_forum=f2)
denorm.flush()
self.assertEqual(f1.path, '/forumone/')
self.assertEqual(f2.path, '/forumone/forumtwo/')
self.assertEqual(f3.path, '/forumone/forumtwo/forumthree/')
f1.title = 'someothertitle'
f1.save()
denorm.flush()
f1 = models.Forum.objects.get(id=f1.id)
f2 = models.Forum.objects.get(id=f2.id)
f3 = models.Forum.objects.get(id=f3.id)
self.assertEqual(f1.path, '/someothertitle/')
self.assertEqual(f2.path, '/someothertitle/forumtwo/')
self.assertEqual(f3.path, '/someothertitle/forumtwo/forumthree/')
def test_reverse_fk_null(self):
f1 = models.Forum.objects.create(title="forumone")
m1 = models.Member.objects.create(name="memberone")
models.Post.objects.create(forum=f1, author=m1)
models.Attachment.objects.create()
denorm.flush()
def test_bulk_update(self):
"""
Test the DependsOnRelated stuff.
"""
f1 = models.Forum.objects.create(title="forumone")
f2 = models.Forum.objects.create(title="forumtwo")
p1 = models.Post.objects.create(forum=f1)
p2 = models.Post.objects.create(forum=f2)
denorm.flush()
self.assertEqual(models.Post.objects.get(id=p1.id).forum_title, "forumone")
self.assertEqual(models.Post.objects.get(id=p2.id).forum_title, "forumtwo")
self.assertEqual(models.Forum.objects.get(id=f1.id).post_count, 1)
self.assertEqual(models.Forum.objects.get(id=f2.id).post_count, 1)
models.Post.objects.update(forum=f1)
denorm.flush()
self.assertEqual(models.Post.objects.get(id=p1.id).forum_title, "forumone")
self.assertEqual(models.Post.objects.get(id=p2.id).forum_title, "forumone")
self.assertEqual(models.Forum.objects.get(id=f1.id).post_count, 2)
self.assertEqual(models.Forum.objects.get(id=f2.id).post_count, 0)
models.Forum.objects.update(title="oneforall")
denorm.flush()
self.assertEqual(models.Post.objects.get(id=p1.id).forum_title, "oneforall")
self.assertEqual(models.Post.objects.get(id=p2.id).forum_title, "oneforall")
def test_no_dependency(self):
m1 = models.Member.objects.create(first_name="first", name="last")
denorm.flush()
self.assertEqual(models.Member.objects.get(id=m1.id).full_name, "first last")
models.Member.objects.filter(id=m1.id).update(first_name="second")
denorm.flush()
self.assertEqual(models.Member.objects.get(id=m1.id).full_name, "second last")
def test_self_backward_relation(self):
f1 = models.Forum.objects.create(title="forumone")
p1 = models.Post.objects.create(forum=f1, )
p2 = models.Post.objects.create(forum=f1, response_to=p1)
p3 = models.Post.objects.create(forum=f1, response_to=p1)
p4 = models.Post.objects.create(forum=f1, response_to=p2)
denorm.flush()
self.assertEqual(models.Post.objects.get(id=p1.id).response_count, 3)
self.assertEqual(models.Post.objects.get(id=p2.id).response_count, 1)
self.assertEqual(models.Post.objects.get(id=p3.id).response_count, 0)
self.assertEqual(models.Post.objects.get(id=p4.id).response_count, 0)
def test_m2m_relation(self):
f1 = models.Forum.objects.create(title="forumone")
p1 = models.Post.objects.create(forum=f1, title="post1")
m1 = models.Member.objects.create(first_name="first1", name="last1")
denorm.flush()
m1.bookmarks.add(p1)
denorm.flush()
self.assertTrue('post1' in models.Member.objects.get(id=m1.id).bookmark_titles)
p1.title = "othertitle"
p1.save()
denorm.flush()
self.assertTrue('post1' not in models.Member.objects.get(id=m1.id).bookmark_titles)
self.assertTrue('othertitle' in models.Member.objects.get(id=m1.id).bookmark_titles)
p2 = models.Post.objects.create(forum=f1, title="thirdtitle")
m1.bookmarks.add(p2)
denorm.flush()
self.assertTrue('post1' not in models.Member.objects.get(id=m1.id).bookmark_titles)
self.assertTrue('othertitle' in models.Member.objects.get(id=m1.id).bookmark_titles)
self.assertTrue('thirdtitle' in models.Member.objects.get(id=m1.id).bookmark_titles)
m1.bookmarks.remove(p1)
denorm.flush()
self.assertTrue('othertitle' not in models.Member.objects.get(id=m1.id).bookmark_titles)
self.assertTrue('thirdtitle' in models.Member.objects.get(id=m1.id).bookmark_titles)
def test_middleware(self):
# FIXME, this test currently does not work with a transactional
# database, so it's skipped for now.
return
# FIXME, set and de-set middleware values
f1 = models.Forum.objects.create(title="forumone")
m1 = models.Member.objects.create(first_name="first1", name="last1")
p1 = models.Post.objects.create(forum=f1, author=m1)
self.assertEqual(models.Post.objects.get(id=p1.id).author_name, "last1")
self.client.login(username="testuser", password="testuser")
self.client.post("/admin/denorm_testapp/member/%s/" % (m1.pk), {
'name': 'last2',
'first_name': 'first2',
})
self.assertEqual(models.Post.objects.get(id=p1.id).author_name, "last2")
def test_countfield(self):
f1 = models.Forum.objects.create(title="forumone")
f2 = models.Forum.objects.create(title="forumone")
self.assertEqual(models.Forum.objects.get(id=f1.id).post_count, 0)
self.assertEqual(models.Forum.objects.get(id=f2.id).post_count, 0)
models.Post.objects.create(forum=f1)
self.assertEqual(models.Forum.objects.get(id=f1.id).post_count, 1)
self.assertEqual(models.Forum.objects.get(id=f2.id).post_count, 0)
p2 = models.Post.objects.create(forum=f2)
p3 = models.Post.objects.create(forum=f2)
self.assertEqual(models.Forum.objects.get(id=f1.id).post_count, 1)
self.assertEqual(models.Forum.objects.get(id=f2.id).post_count, 2)
p2.forum = f1
p2.save()
self.assertEqual(models.Forum.objects.get(id=f1.id).post_count, 2)
self.assertEqual(models.Forum.objects.get(id=f2.id).post_count, 1)
models.Post.objects.filter(pk=p3.pk).update(forum=f1)
self.assertEqual(models.Forum.objects.get(id=f1.id).post_count, 3)
self.assertEqual(models.Forum.objects.get(id=f2.id).post_count, 0)
def test_foreignkey(self):
f1 = models.Forum.objects.create(title="forumone")
f2 = models.Forum.objects.create(title="forumtwo")
m1 = models.Member.objects.create(first_name="first1", name="last1")
p1 = models.Post.objects.create(forum=f1, author=m1)
a1 = models.Attachment.objects.create(post=p1)
self.assertEqual(models.Attachment.objects.get(id=a1.id).forum, f1)
a2 = models.Attachment.objects.create()
self.assertEqual(models.Attachment.objects.get(id=a2.id).forum, None)
# Change forum
p1.forum = f2
p1.save()
denorm.flush()
self.assertEqual(models.Attachment.objects.get(id=a1.id).forum, f2)
# test denorm function returning object, not PK
models.Attachment.forum_as_object = True
a3 = models.Attachment.objects.create(post=p1)
models.Attachment.forum_as_object = False
def test_m2m(self):
f1 = models.Forum.objects.create(title="forumone")
m1 = models.Member.objects.create(name="memberone")
models.Post.objects.create(forum=f1, author=m1)
denorm.flush()
# check the forums author list contains the member
self.assertTrue(m1 in models.Forum.objects.get(id=f1.id).authors.all())
m2 = models.Member.objects.create(name="membertwo")
p2 = models.Post.objects.create(forum=f1, author=m2)
denorm.flush()
self.assertTrue(m1 in models.Forum.objects.get(id=f1.id).authors.all())
self.assertTrue(m2 in models.Forum.objects.get(id=f1.id).authors.all())
p2.delete()
denorm.flush()
self.assertTrue(m2 not in models.Forum.objects.get(id=f1.id).authors.all())
def test_denorm_rebuild(self):
f1 = models.Forum.objects.create(title="forumone")
m1 = models.Member.objects.create(name="memberone")
p1 = models.Post.objects.create(forum=f1, author=m1)
denorm.denorms.rebuildall()
f1 = models.Forum.objects.get(id=f1.id)
m1 = models.Member.objects.get(id=m1.id)
p1 = models.Post.objects.get(id=p1.id)
self.assertEqual(f1.post_count, 1)
self.assertEqual(f1.authors.all()[0], m1)
def test_denorm_update(self):
f1 = models.Forum.objects.create(title="forumone")
m1 = models.Member.objects.create(name="memberone")
p1 = models.Post.objects.create(forum=f1, author=m1)
a1 = models.Attachment.objects.create(post=p1)
denorm.denorms.rebuildall()
f2 = models.Forum.objects.create(title="forumtwo")
p1.forum = f2
p1.save()
# BUG https://github.com/initcrash/django-denorm/issues/24
# We have to update the Attachment.forum field first to trigger this bug. Simply doing rebuildall() will
# trigger an a1.save() at an some earlier point during the update. By the time we get to updating the value of
# forum field the value is already correct and no update is done bypassing the broken code.
denorm.denorms.rebuildall(model_name='Attachment', field_name='forum')
def test_denorm_subclass(self):
f1 = models.Forum.objects.create(title="forumone")
m1 = models.Member.objects.create(name="memberone")
p1 = models.Post.objects.create(forum=f1, author=m1)
self.assertEqual(f1.tags_string, '')
self.assertEqual(p1.tags_string, '')
models.Tag.objects.create(name='tagone', content_object=f1)
models.Tag.objects.create(name='tagtwo', content_object=f1)
denorm.denorms.flush()
f1 = models.Forum.objects.get(id=f1.id)
m1 = models.Member.objects.get(id=m1.id)
p1 = models.Post.objects.get(id=p1.id)
self.assertEqual(f1.tags_string, 'tagone, tagtwo')
self.assertEqual(p1.tags_string, '')
models.Tag.objects.create(name='tagthree', content_object=p1)
t4 = models.Tag.objects.create(name='tagfour', content_object=p1)
denorm.denorms.flush()
f1 = models.Forum.objects.get(id=f1.id)
m1 = models.Member.objects.get(id=m1.id)
p1 = models.Post.objects.get(id=p1.id)
self.assertEqual(f1.tags_string, 'tagone, tagtwo')
self.assertEqual(p1.tags_string, 'tagfour, tagthree')
t4.content_object = f1
t4.save()
denorm.denorms.flush()
f1 = models.Forum.objects.get(id=f1.id)
m1 = models.Member.objects.get(id=m1.id)
p1 = models.Post.objects.get(id=p1.id)
self.assertEqual(f1.tags_string, 'tagfour, tagone, tagtwo')
self.assertEqual(p1.tags_string, 'tagthree')
def test_cache_key_field_backward(self):
f1 = models.Forum.objects.create(title="forumone")
f2 = models.Forum.objects.create(title="forumtwo")
ck1 = f1.cachekey
ck2 = f2.cachekey
p1 = models.Post.objects.create(forum=f1)
f1 = models.Forum.objects.get(id=f1.id)
f2 = models.Forum.objects.get(id=f2.id)
self.assertNotEqual(ck1, f1.cachekey)
self.assertEqual(ck2, f2.cachekey)
ck1 = f1.cachekey
ck2 = f2.cachekey
p1 = models.Post.objects.get(id=p1.id)
p1.forum = f2
p1.save()
f1 = models.Forum.objects.get(id=f1.id)
f2 = models.Forum.objects.get(id=f2.id)
self.assertNotEqual(ck1, f1.cachekey)
self.assertNotEqual(ck2, f2.cachekey)
def test_cache_key_field_forward(self):
f1 = models.Forum.objects.create(title="forumone")
p1 = models.Post.objects.create(title='initial_title', forum=f1)
a1 = models.Attachment.objects.create(post=p1)
a2 = models.Attachment.objects.create(post=p1)
a1 = models.Attachment.objects.get(id=a1.id)
a2 = models.Attachment.objects.get(id=a2.id)
self.assertNotEqual(a1.cachekey, a2.cachekey)
ck1 = a1.cachekey
ck2 = a2.cachekey
p1.title = 'new_title'
p1.save()
a1 = models.Attachment.objects.get(id=a1.id)
a2 = models.Attachment.objects.get(id=a2.id)
self.assertNotEqual(ck1, a1.cachekey)
self.assertNotEqual(ck2, a2.cachekey)
a1 = models.Attachment.objects.get(id=a1.id)
a2 = models.Attachment.objects.get(id=a2.id)
self.assertNotEqual(a1.cachekey, a2.cachekey)
def test_cache_key_field_m2m(self):
f1 = models.Forum.objects.create(title="forumone")
m1 = models.Member.objects.create(name="memberone")
p1 = models.Post.objects.create(title='initial_title', forum=f1)
m1 = models.Member.objects.get(id=m1.id)
ck1 = m1.cachekey
m1.bookmarks.add(p1)
m1 = models.Member.objects.get(id=m1.id)
self.assertNotEqual(ck1, m1.cachekey)
ck1 = m1.cachekey
p1 = models.Post.objects.get(id=p1.id)
p1.title = 'new_title'
p1.save()
m1 = models.Member.objects.get(id=m1.id)
self.assertNotEqual(ck1, m1.cachekey)
if not hasattr(django.db.backend, 'sqlite3'):
class TestFilterCount(TestCase):
"""
Tests for the filtered count feature.
"""
def setUp(self):
denorms.drop_triggers()
denorms.install_triggers()
def test_filter_count(self):
master = models.FilterCountModel.objects.create()
self.assertEqual(master.active_item_count, 0)
master.items.create(active=True, text='text')
master.items.create(active=True, text='')
master = models.FilterCountModel.objects.get(id=master.id)
self.assertEqual(master.active_item_count, 1, 'created active item')
master.items.create(active=False)
master = models.FilterCountModel.objects.get(id=master.id)
self.assertEqual(master.active_item_count, 1, 'created inactive item')
master.items.create(active=True, text='true')
master = models.FilterCountModel.objects.get(pk=master.pk)
self.assertEqual(master.active_item_count, 2)
master.items.filter(active=False).delete()
master = models.FilterCountModel.objects.get(pk=master.pk)
self.assertEqual(master.active_item_count, 2)
master.items.filter(active=True, text='true')[0].delete()
master = models.FilterCountModel.objects.get(pk=master.pk)
self.assertEqual(master.active_item_count, 1)
item = master.items.filter(active=True, text='text')[0]
item.active = False
item.save()
master = models.FilterCountModel.objects.get(pk=master.pk)
self.assertEqual(master.active_item_count, 0)
item = master.items.filter(active=False, text='text')[0]
item.active = True
item.text = ''
item.save()
master = models.FilterCountModel.objects.get(pk=master.pk)
self.assertEqual(master.active_item_count, 0)
item = master.items.filter(active=True, text='')[0]
item.text = '123'
item.save()
master = models.FilterCountModel.objects.get(pk=master.pk)
self.assertEqual(master.active_item_count, 1)
class TestFilterCountM2M(TestCase):
"""
Tests for the filtered count feature.
"""
def setUp(self):
denorms.drop_triggers()
denorms.install_triggers()
def test_filter_count(self):
master = models.FilterCountModel.objects.create()
self.assertEqual(master.active_item_count, 0)
master.items.create(active=True, text='true')
master = models.FilterCountModel.objects.get(id=master.id)
self.assertEqual(master.active_item_count, 1, 'created active item')
master.items.create(active=False, text='true')
master = models.FilterCountModel.objects.get(id=master.id)
self.assertEqual(master.active_item_count, 1, 'created inactive item')
master.items.create(active=True, text='true')
master = models.FilterCountModel.objects.get(pk=master.pk)
self.assertEqual(master.active_item_count, 2)
master.items.filter(active=False).delete()
master = models.FilterCountModel.objects.get(pk=master.pk)
self.assertEqual(master.active_item_count, 2)
master.items.filter(active=True)[0].delete()
master = models.FilterCountModel.objects.get(pk=master.pk)
self.assertEqual(master.active_item_count, 1)
item = master.items.filter(active=True)[0]
item.active = False
item.save()
master = models.FilterCountModel.objects.get(pk=master.pk)
self.assertEqual(master.active_item_count, 0)
item = master.items.filter(active=False)[0]
item.active = True
item.save()
master = models.FilterCountModel.objects.get(pk=master.pk)
self.assertEqual(master.active_item_count, 1)
class TestFilterSum(TestCase):
"""
Tests for the filtered count feature.
"""
def setUp(self):
denorms.drop_triggers()
denorms.install_triggers()
def test_filter_count(self):
master = models.FilterSumModel.objects.create()
self.assertEqual(master.active_item_sum, 0)
master.counts.create(age=18, active_item_count=8)
master = models.FilterSumModel.objects.get(id=master.id)
self.assertEqual(master.active_item_sum, 8)
master.counts.create(age=16, active_item_count=10)
master = models.FilterSumModel.objects.get(id=master.id)
self.assertEqual(master.active_item_sum, 8, 'created inactive item')
master.counts.create(age=19, active_item_count=9)
master = models.FilterSumModel.objects.get(pk=master.pk)
self.assertEqual(master.active_item_sum, 17)
master.counts.filter(age__lt=18).delete()
master = models.FilterSumModel.objects.get(pk=master.pk)
self.assertEqual(master.active_item_sum, 17)
master.counts.filter(age=19)[0].delete()
master = models.FilterSumModel.objects.get(pk=master.pk)
self.assertEqual(master.active_item_sum, 8)
item = master.counts.filter(age=18)[0]
item.age = 15
item.save()
master = models.FilterSumModel.objects.get(pk=master.pk)
self.assertEqual(master.active_item_sum, 0)
item = master.counts.filter(age=15)[0]
item.age = 18
item.save()
master = models.FilterSumModel.objects.get(pk=master.pk)
self.assertEqual(master.active_item_sum, 8)
|
|
import os
import shutil
from kivy.event import EventDispatcher
from kivy.properties import StringProperty, ObjectProperty, \
ConfigParser, ConfigParserProperty, partial, Clock
from kivy.uix.popup import Popup
import designer
from designer.confirmation_dialog import ConfirmationDialog
class Builder(EventDispatcher):
'''Builder interface
'''
def __init__(self, profiler):
self.profiler = profiler
self.designer = self.profiler.designer
self.designer_settings = self.designer.designer_settings
self.proj_watcher = self.designer.project_watcher
self.proj_settings = self.designer.proj_settings
self.ui_creator = self.designer.ui_creator
self.run_command = self.ui_creator.kivy_console.run_command
self.can_run = False # if the env if ok to run the project
self.last_command = None # last method executed.
if not self.profiler.pro_mode:
self.profiler.pro_mode = 'Debug'
class Buildozer(Builder):
'''Class to handle Buildozer builder
'''
def __init__(self, profiler):
super(Buildozer, self).__init__(profiler)
self.buildozer_path = ''
def _initialize(self):
'''Try to get the buildozer path and check required variables
If there is something wrong shows an alert.
'''
# first, check if buildozer is set
self.buildozer_path = self.designer_settings.config_parser.getdefault(
'buildozer',
'buildozer_path',
''
)
if self.buildozer_path == '':
self.profiler.dispatch('on_error', 'Buildozer Path not specified.'
'\n\nUpdate it on \'File\' -> \'Settings\'')
self.can_run = False
return
envs = self.proj_settings.config_parser.getdefault(
'env variables',
'env',
''
)
for env in envs.split(' '):
self.ui_creator.kivy_console.environment[
env[:env.find('=')]] = env[env.find('=') + 1:]
# check if buildozer.spec exists
if not os.path.isfile(os.path.join(self.profiler.project_path,
'buildozer.spec')):
self._confirm_dlg = ConfirmationDialog(
message='buildozer.spec not found.\n'
'Do you want to create it now?')
self._popup = Popup(title='Buildozer', content=self._confirm_dlg,
size_hint=(None, None), size=('200pt', '150pt'),
auto_dismiss=False)
self._confirm_dlg.bind(on_ok=self._perform_create_spec,
on_cancel=self._popup.dismiss)
self._popup.open()
self.can_run = False
return
# TODO check if buildozer source.dir and main file exists
self.can_run = True
def _perform_create_spec(self, *args):
'''Creates the default buildozer.spec file
'''
_dir = os.path.dirname(designer.__file__)
_dir = os.path.split(_dir)[0]
templates_dir = os.path.join(_dir, 'new_templates')
shutil.copy(os.path.join(templates_dir, 'default.spec'),
os.path.join(self.profiler.project_path, 'buildozer.spec'))
self.designer.designer_content.update_tree_view(
self.designer.project_loader)
self._popup.dismiss()
self.last_command()
def _create_command(self, extra):
'''Generate the buildozer command
'''
self.proj_watcher.stop()
self._initialize()
self.ui_creator.tab_pannel.switch_to(
self.ui_creator.tab_pannel.tab_list[2])
cd = 'cd ' + self.profiler.project_path
args = []
args.append(self.buildozer_path)
if self.profiler.pro_verbose:
args.append('--verbose')
args.append(self.profiler.pro_target.lower()) # android or ios
args += extra
return [cd, " ".join(args)]
def build(self, *args):
'''Build the Buildozer project.
Will read the necessary information from the profile and build the app
'''
build_mode = self.profiler.pro_mode.lower()
cmd = self._create_command([build_mode])
if not self.can_run:
self.last_command = self.build
return
self.run_command(cmd)
self.profiler.dispatch('on_message', 'Building project...')
self.ui_creator.kivy_console.bind(on_command_list_done=self.on_build)
def rebuild(self, *args):
'''Update project dependencies, and build it again
'''
self.clean()
self.profiler.bind(on_clean=self._rebuild)
def _rebuild(self, *args):
'''Perform the project rebuild
'''
cmd = self._create_command(['update'])
if not self.can_run:
self.last_command = self.rebuild
return
self.run_command(cmd)
self.profiler.dispatch('on_message',
'Updating project dependencies...')
self.ui_creator.kivy_console.bind(on_command_list_done=self.build)
def run(self, *args, **kwargs):
'''Run the build command and then run the application on the device
'''
self.build()
if not self.can_run:
self.last_command = self.run
return
if not self.profiler.pro_install:
self.profiler.bind(on_build=self.deploy)
self.profiler.bind(on_deploy=self._run)
def _run(self, *args):
'''Perform the buildozer run
'''
cmds = ['run']
if self.profiler.pro_debug and self.profiler.pro_target == 'Android':
cmds.append('logcat')
cmd = self._create_command(cmds)
if not self.can_run:
return
self.run_command(cmd)
self.profiler.dispatch('on_message', 'Running on device...')
self.ui_creator.kivy_console.bind(on_command_list_done=self.on_run)
def deploy(self, *args):
'''Perform the buildozer deploy
'''
cmd = self._create_command(['deploy'])
if not self.can_run:
return
self.run_command(cmd)
self.profiler.dispatch('on_message', 'Installing on device...')
self.ui_creator.kivy_console.bind(on_command_list_done=self.on_deploy)
def clean(self, *args):
'''Clean the project directory
'''
cmd = self._create_command(['clean'])
if not self.can_run:
self.last_command = self.clean
return
self.run_command(cmd)
self.profiler.dispatch('on_message', 'Cleaning project...')
self.ui_creator.kivy_console.bind(on_command_list_done=self.on_clean)
def on_clean(self, *args):
'''on_clean event handler
'''
self.ui_creator.kivy_console.unbind(on_command_list_done=self.on_clean)
self.proj_watcher.start_watching(self.profiler.project_path)
self.profiler.dispatch('on_message', 'Project clean', 5)
self.profiler.dispatch('on_clean')
def on_build(self, *args):
'''on_build event handler
'''
self.ui_creator.kivy_console.unbind(on_command_list_done=self.on_build)
self.proj_watcher.start_watching(self.profiler.project_path)
self.profiler.dispatch('on_message', 'Build complete', 5)
self.profiler.dispatch('on_build')
if self.profiler.pro_install:
self.deploy()
def on_deploy(self, *args):
'''on_build event handler
'''
self.ui_creator.kivy_console.unbind(on_command_list_done=self.on_deploy)
self.proj_watcher.start_watching(self.profiler.project_path)
self.profiler.dispatch('on_message', 'Installed on device', 5)
self.profiler.dispatch('on_deploy')
def on_stop(self, *args):
'''on_stop event handler
'''
self.ui_creator.kivy_console.unbind(on_command_list_done=self.on_stop)
self.profiler.dispatch('on_stop')
def on_run(self, *args):
'''on_run event handler
'''
self.ui_creator.kivy_console.unbind(on_command_list_done=self.on_run)
self.proj_watcher.start_watching(self.profiler.project_path)
self.profiler.dispatch('on_message', '', 1)
self.profiler.dispatch('on_run')
self.designer.ids.actn_btn_stop_proj.disabled = True
class Hanga(Builder):
'''Class to handle Hanga builder
'''
def __init__(self, profiler):
super(Hanga, self).__init__(profiler)
class Desktop(Builder):
'''Class to handle Desktop builder
'''
def __init__(self, profiler):
super(Desktop, self).__init__(profiler)
self.python_path = ''
self.args = ''
# TODO check if buildozer source.dir and main file is set, if so
# use this file
def _get_python(self):
'''Initialize python variables
If there is something wrong shows an alert
'''
self.python_path = self.designer_settings.config_parser.getdefault(
'global',
'python_shell_path',
''
)
if self.python_path == '':
self.profiler.dispatch('on_error', 'Python Shell Path not '
'specified.'
'\n\nUpdate it on \'File\' -> \'Settings\'')
self.can_run = False
return
self.args = self.proj_settings.config_parser.getdefault(
'arguments',
'arg',
''
)
envs = self.proj_settings.config_parser.getdefault(
'env variables',
'env',
''
)
for env in envs.split(' '):
self.ui_creator.kivy_console.environment[
env[:env.find('=')]] = env[env.find('=') + 1:]
self.can_run = True
def _perform_kill_run(self, *args):
'''Stop the running project/command and then run the project
'''
self._popup.dismiss()
self.stop()
Clock.schedule_once(self.run)
def run(self, *args, **kwargs):
'''Run the project using Python
'''
mod = kwargs.get('mod', '')
data = kwargs.get('data', [])
self._get_python()
if not self.can_run:
return
py_main = os.path.join(self.profiler.project_path, 'main.py')
if not os.path.isfile(py_main):
self.profiler.dispatch('on_error', 'Cannot find main.py')
return
cmd = ''
if mod == '':
cmd = '%s %s %s' % (self.python_path, py_main, self.args)
elif mod == 'screen':
cmd = '%s %s -m screen:%s %s' % (self.python_path, py_main,
data, self.args)
else:
cmd = '%s %s -m %s %s' % (self.python_path, py_main,
mod, self.args)
status = self.run_command(cmd)
# popen busy
if status is False:
self._confirm_dlg = ConfirmationDialog(
message="There is another command running.\n"
"Do you want to stop it to run your project?")
self._popup = Popup(title='Kivy Designer',
content=self._confirm_dlg,
size_hint=(None, None),
size=('300pt', '150pt'),
auto_dismiss=False)
self._confirm_dlg.bind(on_ok=self._perform_kill_run,
on_cancel=self._popup.dismiss)
self._popup.open()
return
self.ui_creator.tab_pannel.switch_to(
self.ui_creator.tab_pannel.tab_list[2])
self.profiler.dispatch('on_message', 'Running main.py...')
self.profiler.dispatch('on_run')
self.ui_creator.kivy_console.bind(on_command_list_done=self.on_stop)
def stop(self, *args):
'''If there is a process running, it'll be stopped
'''
self.ui_creator.kivy_console.kill_process()
self.profiler.dispatch('on_stop')
self.profiler.dispatch('on_message', '', 1)
def clean(self, *args):
'''Remove .pyc files and __pycache__ folder
'''
# here it's necessary to stop the listener as long as the
# python is managing files
self.proj_watcher.stop()
for _file in os.listdir(self.profiler.project_path):
ext = _file.split('.')[-1]
if ext == 'pyc':
os.remove(os.path.join(self.profiler.project_path, _file))
__pycache__ = os.path.join(self.profiler.project_path, '__pycache__')
if os.path.exists(__pycache__):
shutil.rmtree(__pycache__)
self.proj_watcher.start_watching(self.profiler.project_path)
self.profiler.dispatch('on_message', 'Project cleaned', 5)
def build(self, *args):
'''Compile all .py to .pyc
'''
self._get_python()
if not self.can_run:
return
self.proj_watcher.stop()
proj_path = self.profiler.project_path
self.run_command(
'%s -m compileall -l %s' % (self.python_path, proj_path))
self.ui_creator.tab_pannel.switch_to(
self.ui_creator.tab_pannel.tab_list[2])
self.profiler.dispatch('on_message', 'Building project...')
self.ui_creator.kivy_console.bind(on_command_list_done=self.on_build)
def rebuild(self, *args):
'''Clean and build the project
'''
self.clean()
self.build()
def on_build(self, *args):
'''on_build event handler
'''
self.proj_watcher.start_watching(self.profiler.project_path)
self.profiler.dispatch('on_message', 'Build complete', 5)
self.profiler.dispatch('on_build')
def on_stop(self, *args):
'''on_stop event handler
'''
self.profiler.dispatch('on_message', '', 1)
self.profiler.dispatch('on_stop')
class Profiler(EventDispatcher):
profile_path = StringProperty('')
''' Profile settings path
:class:`~kivy.properties.StringProperty` and defaults to ''.
'''
project_path = StringProperty('')
''' Project path
:class:`~kivy.properties.StringProperty` and defaults to ''.
'''
designer = ObjectProperty(None)
'''Reference of :class:`~designer.app.Designer`.
:data:`designer` is a :class:`~kivy.properties.ObjectProperty`
'''
profile_config = ObjectProperty(None)
'''Reference to a ConfigParser with the profile settings
:class:`~kivy.properties.ObjectProperty` and defaults to None.
'''
pro_name = ConfigParserProperty('', 'profile', 'name', 'profiler')
'''Reference to a ConfigParser with the profile settings
Get the profile name
:class:`~kivy.properties.ConfigParserProperty`
'''
pro_builder = ConfigParserProperty('', 'profile', 'builder', 'profiler')
'''Reference to a ConfigParser with the profile settings
Get the profile builder
:class:`~kivy.properties.ConfigParserProperty`
'''
pro_target = ConfigParserProperty('', 'profile', 'target', 'profiler')
'''Reference to a ConfigParser with the profile settings
Get the profile target
:class:`~kivy.properties.ConfigParserProperty`
'''
pro_mode = ConfigParserProperty('', 'profile', 'mode', 'profiler')
'''Reference to a ConfigParser with the profile settings
Get the profile builder
:class:`~kivy.properties.ConfigParserProperty`
'''
pro_install = ConfigParserProperty('', 'profile', 'install', 'profiler')
'''Reference to a ConfigParser with the profile settings
Get the profile install_on_device
:class:`~kivy.properties.ConfigParserProperty`
'''
pro_debug = ConfigParserProperty('', 'profile', 'debug', 'profiler')
'''Reference to a ConfigParser with the profile settings
Get the profile debug mode
:class:`~kivy.properties.ConfigParserProperty`
'''
pro_verbose = ConfigParserProperty('', 'profile', 'verbose', 'profiler')
'''Reference to a ConfigParser with the profile settings
Get the profile verbose mode
:class:`~kivy.properties.ConfigParserProperty`
'''
builder = ObjectProperty(None)
'''Reference to the builder class. Can be Hanga, Buildozer or Desktop
:class:`~kivy.properties.ObjectProperty`
'''
__events__ = ('on_run', 'on_stop', 'on_error', 'on_message', 'on_build',
'on_deploy', 'on_clean')
def __init__(self, **kwargs):
super(Profiler, self).__init__(**kwargs)
self.profile_config = ConfigParser(name='profiler')
def run(self, *args, **kwargs):
'''Run project
'''
self.builder.run(*args, **kwargs)
def stop(self):
'''Stop project
'''
self.builder.stop()
def clean(self):
'''Clean project
'''
self.builder.clean()
def build(self):
'''Build project
'''
self.builder.build()
def rebuild(self):
'''Rebuild project
'''
self.builder.rebuild()
def load_profile(self, prof_path, proj_path):
'''Read the settings
'''
self.profile_path = prof_path
self.project_path = proj_path
self.profile_config.read(self.profile_path)
if self.pro_target == 'Desktop':
self.builder = Desktop(self)
else:
if self.pro_builder == 'Buildozer':
self.builder = Buildozer(self)
elif self.pro_builder == 'Hanga':
# TODO implement hanga
self.builder = Desktop(self)
self.dispatch('on_error', 'Hanga Builder not yet implemented!\n'
'Using Desktop')
else:
self.builder = Desktop(self)
def on_error(self, *args):
'''on_error event handler
'''
pass
def on_message(self, *args):
'''on_message event handler
'''
pass
def on_run(self, *args):
'''on_run event handler
'''
pass
def on_stop(self, *args):
'''on_stop event handler
'''
pass
def on_build(self, *args):
'''on_build event handler
'''
pass
def on_deploy(self, *args):
'''on_deploy event handler
'''
pass
def on_clean(self, *args):
'''on_clean event handler
'''
pass
|
|
"""
Collection of postgres-specific extensions, currently including:
* Support for hstore, a key/value type storage
"""
import uuid
from peewee import *
from peewee import coerce_to_unicode
from peewee import Expression
from peewee import logger
from peewee import Node
from peewee import Param
from peewee import returns_clone
from peewee import QueryCompiler
from peewee import SelectQuery
from peewee import UUIDField # For backwards-compatibility.
try:
from psycopg2cffi import compat
compat.register()
except ImportError:
pass
from psycopg2 import extensions
from psycopg2.extensions import adapt
from psycopg2.extensions import AsIs
from psycopg2.extensions import register_adapter
from psycopg2.extras import register_hstore
try:
from psycopg2.extras import Json
except:
Json = None
class _LookupNode(Node):
def __init__(self, node, parts):
self.node = node
self.parts = parts
super(_LookupNode, self).__init__()
def clone_base(self):
return type(self)(self.node, list(self.parts))
class _JsonLookupBase(_LookupNode):
def __init__(self, node, parts, as_json=False):
super(_JsonLookupBase, self).__init__(node, parts)
self._as_json = as_json
def clone_base(self):
return type(self)(self.node, list(self.parts), self._as_json)
@returns_clone
def as_json(self, as_json=True):
self._as_json = as_json
class JsonLookup(_JsonLookupBase):
_node_type = 'json_lookup'
def __getitem__(self, value):
return JsonLookup(self.node, self.parts + [value], self._as_json)
class JsonPath(_JsonLookupBase):
_node_type = 'json_path'
class ObjectSlice(_LookupNode):
_node_type = 'object_slice'
@classmethod
def create(cls, node, value):
if isinstance(value, slice):
parts = [value.start or 0, value.stop or 0]
elif isinstance(value, int):
parts = [value]
else:
parts = map(int, value.split(':'))
return cls(node, parts)
def __getitem__(self, value):
return ObjectSlice.create(self, value)
class _Array(Node):
def __init__(self, field, items):
self.field = field
self.items = items
super(_Array, self).__init__()
def adapt_array(arr):
conn = arr.field.model_class._meta.database.get_conn()
items = adapt(arr.items)
items.prepare(conn)
return AsIs('%s::%s%s' % (
items,
arr.field.get_column_type(),
'[]'* arr.field.dimensions))
register_adapter(_Array, adapt_array)
class IndexedField(Field):
default_index_type = 'GiST'
def __init__(self, index_type=None, *args, **kwargs):
kwargs.setdefault('index', True) # By default, use an index.
super(IndexedField, self).__init__(*args, **kwargs)
self.index_type = index_type or self.default_index_type
class ArrayField(IndexedField):
default_index_type = 'GIN'
def __init__(self, field_class=IntegerField, dimensions=1, *args,
**kwargs):
self.__field = field_class(*args, **kwargs)
self.dimensions = dimensions
self.db_field = self.__field.get_db_field()
super(ArrayField, self).__init__(*args, **kwargs)
def __ddl_column__(self, column_type):
sql = self.__field.__ddl_column__(column_type)
sql.value += '[]' * self.dimensions
return sql
def __getitem__(self, value):
return ObjectSlice.create(self, value)
def contains(self, *items):
return Expression(self, OP_ACONTAINS, _Array(self, list(items)))
def contains_any(self, *items):
return Expression(self, OP_ACONTAINS_ANY, _Array(self, list(items)))
class DateTimeTZField(DateTimeField):
db_field = 'datetime_tz'
class HStoreField(IndexedField):
db_field = 'hash'
def __getitem__(self, key):
return Expression(self, OP_HKEY, Param(key))
def keys(self):
return fn.akeys(self)
def values(self):
return fn.avals(self)
def items(self):
return fn.hstore_to_matrix(self)
def slice(self, *args):
return fn.slice(self, Param(list(args)))
def exists(self, key):
return fn.exist(self, key)
def defined(self, key):
return fn.defined(self, key)
def update(self, **data):
return Expression(self, OP_HUPDATE, data)
def delete(self, *keys):
return fn.delete(self, Param(list(keys)))
def contains(self, value):
if isinstance(value, dict):
return Expression(self, OP_HCONTAINS_DICT, Param(value))
elif isinstance(value, (list, tuple)):
return Expression(self, OP_HCONTAINS_KEYS, Param(value))
return Expression(self, OP_HCONTAINS_KEY, value)
def contains_any(self, *keys):
return Expression(self, OP_HCONTAINS_ANY_KEY, Param(value))
class JSONField(Field):
db_field = 'json'
def __init__(self, dumps=None, *args, **kwargs):
if Json is None:
raise Exception('Your version of psycopg2 does not support JSON.')
self.dumps = dumps
super(JSONField, self).__init__(*args, **kwargs)
def db_value(self, value):
return Json(value, dumps=self.dumps)
def __getitem__(self, value):
return JsonLookup(self, [value])
def path(self, *keys):
return JsonPath(self, keys)
class TSVectorField(IndexedField):
db_field = 'tsvector'
default_index_type = 'GIN'
def coerce(self, value):
return coerce_to_unicode(value or '')
def match(self, query):
return Expression(self, OP_TS_MATCH, fn.to_tsquery(query))
def Match(field, query):
return Expression(fn.to_tsvector(field), OP_TS_MATCH, fn.to_tsquery(query))
OP_HKEY = 'key'
OP_HUPDATE = 'H@>'
OP_HCONTAINS_DICT = 'H?&'
OP_HCONTAINS_KEYS = 'H?'
OP_HCONTAINS_KEY = 'H?|'
OP_HCONTAINS_ANY_KEY = 'H||'
OP_ACONTAINS = 'A@>'
OP_ACONTAINS_ANY = 'A||'
OP_TS_MATCH = 'T@@'
class PostgresqlExtCompiler(QueryCompiler):
def _create_index(self, model_class, fields, unique=False):
clause = super(PostgresqlExtCompiler, self)._create_index(
model_class, fields, unique)
# Allow fields to specify a type of index. HStore and Array fields
# may want to use GiST indexes, for example.
index_type = None
for field in fields:
if isinstance(field, IndexedField):
index_type = field.index_type
if index_type:
clause.nodes.insert(-1, SQL('USING %s' % index_type))
return clause
def _parse_object_slice(self, node, alias_map, conv):
sql, params = self.parse_node(node.node, alias_map, conv)
# Postgresql uses 1-based indexes.
parts = [str(part + 1) for part in node.parts]
sql = '%s[%s]' % (sql, ':'.join(parts))
return sql, params
def _parse_json_lookup(self, node, alias_map, conv):
sql, params = self.parse_node(node.node, alias_map, conv)
lookups = [sql]
for part in node.parts:
part_sql, part_params = self.parse_node(
part, alias_map, conv)
lookups.append(part_sql)
params.extend(part_params)
if node._as_json:
sql = '->'.join(lookups)
else:
# The last lookup should be converted to text.
head, tail = lookups[:-1], lookups[-1]
sql = '->>'.join(('->'.join(head), tail))
return sql, params
def _parse_json_path(self, node, alias_map, conv):
sql, params = self.parse_node(node.node, alias_map, conv)
if node._as_json:
operand = '#>'
else:
operand = '#>>'
params.append('{%s}' % ','.join(map(str, node.parts)))
return operand.join((sql, self.interpolation)), params
def get_parse_map(self):
parse_map = super(PostgresqlExtCompiler, self).get_parse_map()
parse_map.update(
object_slice=self._parse_object_slice,
json_lookup=self._parse_json_lookup,
json_path=self._parse_json_path)
return parse_map
class PostgresqlExtDatabase(PostgresqlDatabase):
compiler_class = PostgresqlExtCompiler
def __init__(self, *args, **kwargs):
self.server_side_cursors = kwargs.pop('server_side_cursors', False)
self.register_hstore = kwargs.pop('register_hstore', True)
super(PostgresqlExtDatabase, self).__init__(*args, **kwargs)
def get_cursor(self, name=None):
return self.get_conn().cursor(name=name)
def execute_sql(self, sql, params=None, require_commit=True,
named_cursor=False):
logger.debug((sql, params))
use_named_cursor = (named_cursor or (
self.server_side_cursors and
sql.lower().startswith('select')))
with self.exception_wrapper():
if use_named_cursor:
cursor = self.get_cursor(name=str(uuid.uuid1()))
require_commit = False
else:
cursor = self.get_cursor()
try:
res = cursor.execute(sql, params or ())
except Exception as exc:
logger.exception('%s %s', sql, params)
if self.sql_error_handler(exc, sql, params, require_commit):
raise
else:
if require_commit and self.get_autocommit():
self.commit()
return cursor
def _connect(self, database, **kwargs):
conn = super(PostgresqlExtDatabase, self)._connect(database, **kwargs)
if self.register_hstore:
register_hstore(conn, globally=True)
return conn
class ServerSideSelectQuery(SelectQuery):
@classmethod
def clone_from_query(cls, query):
clone = ServerSideSelectQuery(query.model_class)
return query._clone_attributes(clone)
def _execute(self):
sql, params = self.sql()
return self.database.execute_sql(
sql, params, require_commit=False, named_cursor=True)
PostgresqlExtDatabase.register_fields({
'datetime_tz': 'timestamp with time zone',
'hash': 'hstore',
'json': 'json',
'tsvector': 'tsvector',
})
PostgresqlExtDatabase.register_ops({
OP_HCONTAINS_DICT: '@>',
OP_HCONTAINS_KEYS: '?&',
OP_HCONTAINS_KEY: '?',
OP_HCONTAINS_ANY_KEY: '?|',
OP_HKEY: '->',
OP_HUPDATE: '||',
OP_ACONTAINS: '@>',
OP_ACONTAINS_ANY: '&&',
OP_TS_MATCH: '@@',
})
def ServerSide(select_query):
# Flag query for execution using server-side cursors.
clone = ServerSideSelectQuery.clone_from_query(select_query)
with clone.database.transaction():
# Execute the query.
query_result = clone.execute()
# Patch QueryResultWrapper onto original query.
select_query._qr = query_result
# Expose generator for iterating over query.
for obj in query_result.iterator():
yield obj
|
|
from __future__ import division
import logging
log = logging.getLogger(__name__)
class PwmError(Exception):
pass
class BiDict(object):
def __init__(self, dic):
self.norm = dic
self.inv = dict([(v, k) for k, v in dic.items()])
base_divisor = {
3: 512,
5: 256,
6: 256,
9: 512,
10: 512,
11: 512,
}
_div1 = BiDict({
# 0: None,
1: 1,
2: 8,
3: 64,
4: 256,
5: 1024,
# 6: None,
# 7: None,
})
_div2 = BiDict({
# 0: None,
1: 1,
2: 8,
3: 32,
4: 64,
5: 128,
6: 256,
7: 1024,
})
divisor_mapping = {
3: _div2,
5: _div1,
6: _div1,
9: _div1,
10: _div1,
11: _div2,
}
TIMERS_A = ['NOT_ON_TIMER',
'TCCR0A',
'TCCR0A',
'TCCR1A',
'TCCR1A',
None, # TODO: atmega8
'TCCR2A',
'TCCR2A',
]
TIMERS_B = ['NOT_ON_TIMER',
'TCCR0B',
'TCCR0B',
'TCCR1B',
'TCCR1B',
'TCCR2',
'TCCR2B',
'TCCR2B',
]
timer_mask = 7 # 0b111
# TODO: pwm_mode read/write
# TODO: read mappings
class ArduinoPwmPin(object):
'''Object-oriented representation of the pin PWM functionality
'''
DEFAULT_DIVISOR = 64
def __init__(self, nr, define, register, core, api):
self.nr = nr
self.register = register
self.F_CPU = define.get('F_CPU')
self.api = api
self.core = core
def reset(self):
'''reset to the PWM default state: default frequency is set
'''
if not self.available:
return
self.write_divisor(self.DEFAULT_DIVISOR)
@property
def available(self):
"""PWM is available for this pin?"""
timer_id = self._timer_id()
return timer_id > 0 and timer_id < len(TIMERS_B)
# return self.nr in timer_register
def _check(self):
if not self.available:
raise PwmError('pwm not available for pin: %s' % self.nr)
def write_value(self, value):
"""same as analogWrite."""
self._check()
# assert self.mcu.pins.read_mode(self.nr) == OUTPUT
self.api.analogWrite(self.nr, value)
@property
def divisors_available(self):
"""list of available divisors."""
try:
return list(divisor_mapping[self.nr].norm.values())
except KeyError:
return []
def read_divisor(self):
"""read current divisor."""
if self.register is None:
return
self._check()
d = divisor_mapping[self.nr]
return d.norm[self.read_timer_mode()]
def write_divisor(self, value):
"""write current divisor."""
self._check()
d = divisor_mapping[self.nr]
self.write_timer_mode(d.inv[value])
divisor = property(read_divisor, write_divisor)
def _timer_id(self):
return self.core.digitalPinToTimer(self.nr)
def _timer_register_name(self, variant='B'):
self._check()
i = self._timer_id()
return dict(A=TIMERS_A, B=TIMERS_B)[variant][i]
@property
def timer_register_name_a(self):
return self._timer_register_name(variant='A')
@property
def timer_register_name_b(self):
return self._timer_register_name(variant='B')
def read_timer_mode(self):
if self.register is None:
return
reg_name = self.timer_register_name_b
return self.register.get(reg_name).read_value() & timer_mask
def write_timer_mode(self, value):
assert value <= 7
reg_name = self.timer_register_name_b
reg = self.register.get(reg_name)
old = reg.value & ~timer_mask
reg.value = (old | value)
timer_mode = property(read_timer_mode, write_timer_mode)
@property
def base_divisor(self):
self._check()
return base_divisor[self.nr]
def calculate_frequency(self, divisor):
if self.register is None:
return
return 1.0 * self.F_CPU / self.base_divisor / divisor
@property
def frequencies_available(self):
"""list of available frequencies."""
ls = sorted([self.calculate_frequency(x)
for x in self.divisors_available])
return ls
def read_frequency(self):
self._check()
wgm = self.read_wgm()
if wgm == 14:
# high freq mode
return self.F_CPU / self.register.get('ICR1').value
else:
return self.calculate_frequency(self.read_divisor())
def write_frequency(self, value):
self._check()
d = divisor_mapping[self.nr]
for x in self.divisors_available:
f = self.calculate_frequency(x)
if abs(f - value) <= 1:
self.write_timer_mode(d.inv[x])
return
frequency = property(read_frequency, write_frequency)
def read_wgm(self):
"""read waveform generation mode."""
if self.register is None:
return
self._check()
rega = self.timer_register_name_a
regb = self.timer_register_name_b
if regb == 'TCCR1B':
maskb = 24 # 0b00011000
else:
maskb = 8 # 0b00001000
maska = 3 # 0b00000011
a = self.register.get(rega).value & maska
b = self.register.get(regb).value & maskb
return a + (b >> 1)
wgm = property(read_wgm, None)
def _check_high_freq(self):
# TODO: read config
if self.nr not in [9, 10]:
raise PwmError('high freq pwm not available for pin: %s' % self.nr)
def set_high_freq_around_pwm(self, top, fill):
'F_CPU/divisor'
# TODO:
d = top
self._check_high_freq()
assert d >= 2
self.write_divisor(1)
self.write_value(128)
TCCR1A = self.register.get('TCCR1A')
TCCR1B = self.register.get('TCCR1B')
TCNT1 = self.register.get('TCNT1')
ICR1 = self.register.get('ICR1')
OCR1A = self.register.get('OCR1A')
OCR1B = self.register.get('OCR1B')
TCCR1A.value = 2 + \
(240 & TCCR1A.value) # 0b00000010 + (0b11110000 & reg.TCCR1A)
TCCR1B.value = 25 # 0b00011001
TCNT1.value = 0
ICR1.value = d
OCR1A.value = fill
OCR1B.value = fill
def set_high_freq_around(self, freq):
"""set high frequency mode, and try to set frequency as close as
possible.
available frequencies: F_CPU/N (N=2..65535)
Example: F_CPU=16000000 (default) -> frequencies=244Hz-8MHz
TODO: test it on more AVRs, read config from firmware
"""
top = int(self.F_CPU / freq + 0.5)
assert 1 < top < (1 << 16)
self.set_high_freq_around_pwm(top, int(top / 2))
|
|
from flask import Flask
from flask import jsonify, Response, render_template, abort, make_response
from flask import request
from cookbook_manager import CookbookManager
from barista import Barista
import json
import httplib
from threading import Thread
from utils import channel
from utils import json_config
# ===============================================================================
#
# Global Variables
#
# ===============================================================================
app = Flask(__name__)
cmgr = CookbookManager()
config = json_config.parse_json('config.json')
barista = Barista()
@app.route('/')
def index():
return render_template('index.jinja2')
# ===============================================================================
#
# Cookbook Manager API
#
# ===============================================================================
@app.route('/cookbooks', methods=['GET'])
def list_cookbooks():
"""
{
"cookbook1":{
"name": "cookbook1",
"description": "cookbook description"
}
}
"""
cookbooks = cmgr.list()
resp = {}
for cookbook in cookbooks:
resp[cookbook.name] = {
'name': cookbook.name,
'description': cookbook.description
}
return jsonify(resp)
@app.route('/cookbooks/<string:name>', methods=['GET'])
def read_cookbook(name):
"""
{
"name": "cookbook1",
"date": "",
"description": ""
}
"""
cookbook = cmgr.get(name)
data = {
'name': name,
'description': cookbook.description
}
return jsonify(data)
@app.route('/cookbooks/<string:name>', methods=['PUT'])
def update_cookbook(name):
"""
{
"name": "new_cookbook_name"
}
"""
if request.data:
params = json.loads(request.data)
else:
params = {}
if 'name' in params:
new_name = params['name']
cmgr.rename(name, new_name)
else:
# If no name params in the params, create a new cookbook
cmgr.update(name)
resp = make_response()
resp.status_code = httplib.CREATED
return resp
@app.route('/cookbooks/<string:name>/content', methods=['GET'])
def read_cookbook_content(name):
cookbook = cmgr.get(name)
return cookbook.content
@app.route('/cookbooks/<string:name>/content', methods=['PUT'])
def update_cookbook_content(name):
new_content = request.data
cmgr.update(name, new_content)
resp = make_response()
resp.status_code = httplib.OK
return resp
@app.route('/cookbooks/<string:name>', methods=['DELETE'])
def delete_cookbook(name):
cmgr.delete(name)
resp = make_response()
resp.status_code = httplib.NO_CONTENT
return resp
# ===============================================================================
#
# Barista API
#
# ===============================================================================
@app.route('/barista', methods=['GET'])
def get_barista_status():
"""
{
"State": "Brewing",
"Now steps": "Step title",
"Now steps index": 3,
"Now process": "Process title",
"Now process index": 1,
"Now cookbook name": "Test",
"Temperature": 90,
"Is water full": true,
"Total commands": 1000,
"Progress": 834
}
"""
status = {
'State': barista.state,
'Now steps': barista.now_step,
'Now steps index': barista.now_step_index,
'Now process': barista.now_process,
'Now process index': barista.now_process_index,
'Now cookbook name': barista.now_cookbook_name,
'Temperature': barista.heater_temperature,
'Is water full': barista.is_water_full,
'Total commands': barista.total_cmd,
'Progress': barista.printer_progress
}
return jsonify(status)
@app.route('/barista', methods=['PUT'])
def brew():
"""
{
"Command": "Start|Pause|Resume|Stop",
"Name": "Cookbook"
}
"""
params = json.loads(request.data)
cmd = params['Command']
name = params['Name']
app.logger.debug('{} {} ...'.format(cmd, name))
if cmd == 'Start':
barista.brew(name)
elif cmd == 'Stop':
barista.stop_brew()
resp = make_response()
resp.status_code = httplib.OK
return resp
# ===============================================================================
#
# Printer API
#
# ===============================================================================
@app.route('/printer', methods=['GET'])
def get_printer_status():
"""
{
"state": "Printing",
"progress": 198,
"total": 3000
}
"""
status = {
'state': barista.printer_state_string,
'progress': barista.printer_progress,
'total': barista.total_cmd
}
return jsonify(status)
@app.route('/printer/home', methods=['PUT'])
def go_home():
barista.go_home()
resp = make_response()
resp.status_code = httplib.CREATED
return resp
@app.route('/printer/jog', methods=['PUT'])
def control_printer():
"""
{
"X": 0,
"Y": 0,
"Z": 0,
"E1": 100,
"E2": 100,
"F": 100
}
"""
params = json.loads(request.data)
barista.printer_jog(params.get('X', None),
params.get('Y', None),
params.get('Z', None),
params.get('E1', None),
params.get('E2', None),
params.get('F', None))
resp = make_response()
resp.status_code = httplib.CREATED
return resp
# ===============================================================================
#
# Heater API
#
# ===============================================================================
@app.route('/heater', methods=['GET'])
def get_heater_status():
"""
{
"duty_cycle": 100 ,
"set_point": 80,
"temperature": 24.32,
"update_time": 147998232.38,
"is_water_full": true
}
"""
status = {
'duty_cycle': barista.heater_duty_cycle,
'set_point': barista.heater_set_point,
'temperature': barista.heater_temperature,
'update_time': barista.heater_update_time,
'is_water_full': barista.is_water_full
}
return jsonify(status)
@app.route('/heater', methods=['PUT'])
def control_heater():
"""
{
"Set Point": 80
}
"""
if request.data:
params = json.loads(request.data)
else:
params = {}
set_point = params.get('Set Point', None)
if set_point is not None:
barista.set_temperature(float(set_point))
resp = make_response()
resp.status_code = httplib.OK
return resp
# ===============================================================================
#
# Refill API
#
# ===============================================================================
@app.route('/refill', methods=['GET'])
def get_refill_status():
return jsonify({'full': barista.is_water_full})
@app.route('/refill', methods=['PUT'])
def control_refill():
"""
{
"Command": "Start|Stop"
}
"""
pass
if __name__ == '__main__':
# Close the werkzeug logger
import logging
logger = logging.getLogger('werkzeug')
logger.setLevel(logging.ERROR)
app.run(host='0.0.0.0')
|
|
#!/usr/bin/env python
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
To run these tests:
$ pip install webtest nosegae
$ nosetests --with-gae --gae-lib-root ~/google_appengine/
"""
import unittest
import webtest
import cloudstorage as gcs
import main
import gcs_async
import gcs_async_test
write = gcs_async_test.write
app = webtest.TestApp(main.app)
JUNIT_SUITE = '''<testsuite tests="8" failures="0" time="1000.24">
<testcase name="First" classname="Example e2e suite" time="0">
<skipped/>
</testcase>
<testcase name="Second" classname="Example e2e suite" time="36.49"/>
<testcase name="Third" classname="Example e2e suite" time="96.49">
<failure>/go/src/k8s.io/kubernetes/test.go:123
Error Goes Here</failure>
</testcase>
</testsuite>'''
def init_build(build_dir, started=True, finished=True,
finished_has_version=False):
"""Create faked files for a build."""
start_json = {'timestamp': 1406535800}
finish_json = {'result': 'SUCCESS', 'timestamp': 1406536800}
(finish_json if finished_has_version else start_json)['version'] = 'v1+56'
if started:
write(build_dir + 'started.json', start_json)
if finished:
write(build_dir + 'finished.json', finish_json)
write(build_dir + 'artifacts/junit_01.xml', JUNIT_SUITE)
class TestBase(unittest.TestCase):
def init_stubs(self):
self.testbed.init_memcache_stub()
self.testbed.init_app_identity_stub()
self.testbed.init_urlfetch_stub()
self.testbed.init_blobstore_stub()
self.testbed.init_datastore_v3_stub()
# redirect GCS calls to the local proxy
gcs_async.GCS_API_URL = gcs.common.local_api_url()
class AppTest(TestBase):
# pylint: disable=too-many-public-methods
BUILD_DIR = '/kubernetes-jenkins/logs/somejob/1234/'
def setUp(self):
self.init_stubs()
init_build(self.BUILD_DIR)
def test_index(self):
"""Test that the index works."""
response = app.get('/')
self.assertIn('kubernetes-e2e-gce', response)
def test_nodelog_missing_files(self):
"""Test that a missing all files gives a 404."""
build_dir = self.BUILD_DIR + 'nodelog?pod=abc'
response = app.get('/build' + build_dir, status=404)
self.assertIn('Unable to find', response)
def test_nodelog_kubelet(self):
"""Test for a kubelet file with junit file.
- missing the default kube-apiserver"""
nodelog_url = self.BUILD_DIR + 'nodelog?pod=abc&junit=junit_01.xml'
init_build(self.BUILD_DIR)
write(self.BUILD_DIR + 'artifacts/tmp-node-image/junit_01.xml', JUNIT_SUITE)
write(self.BUILD_DIR + 'artifacts/tmp-node-image/kubelet.log',
'abc\nEvent(api.ObjectReference{Name:"abc", UID:"podabc"})\n')
response = app.get('/build' + nodelog_url)
self.assertIn("Wrap line", response)
def test_nodelog_apiserver(self):
"""Test for default apiserver file
- no kubelet file to find objrefdict
- no file with junit file"""
nodelog_url = self.BUILD_DIR + 'nodelog?pod=abc&junit=junit_01.xml'
init_build(self.BUILD_DIR)
write(self.BUILD_DIR + 'artifacts/tmp-node-image/junit_01.xml', JUNIT_SUITE)
write(self.BUILD_DIR + 'artifacts/tmp-node-image/kube-apiserver.log',
'apiserver pod abc\n')
response = app.get('/build' + nodelog_url)
self.assertIn("Wrap line", response)
def test_nodelog_no_junit(self):
"""Test for when no junit in same folder
- multiple folders"""
nodelog_url = self.BUILD_DIR + 'nodelog?pod=abc&junit=junit_01.xml'
init_build(self.BUILD_DIR)
write(self.BUILD_DIR + 'artifacts/junit_01.xml', JUNIT_SUITE)
write(self.BUILD_DIR + 'artifacts/tmp-node-image/kube-apiserver.log',
'apiserver pod abc\n')
write(self.BUILD_DIR + 'artifacts/tmp-node-2/kube-apiserver.log',
'apiserver pod abc\n')
write(self.BUILD_DIR + 'artifacts/tmp-node-image/kubelet.log',
'abc\nEvent(api.ObjectReference{Name:"abc", UID:"podabc"})\n')
response = app.get('/build' + nodelog_url)
self.assertIn("tmp-node-2", response)
def test_nodelog_no_junit_apiserver(self):
"""Test for when no junit in same folder
- multiple folders
- no kube-apiserver.log"""
nodelog_url = self.BUILD_DIR + 'nodelog?pod=abc&junit=junit_01.xml'
init_build(self.BUILD_DIR)
write(self.BUILD_DIR + 'artifacts/junit_01.xml', JUNIT_SUITE)
write(self.BUILD_DIR + 'artifacts/tmp-node-image/docker.log',
'Containers\n')
write(self.BUILD_DIR + 'artifacts/tmp-node-2/kubelet.log',
'apiserver pod abc\n')
write(self.BUILD_DIR + 'artifacts/tmp-node-image/kubelet.log',
'abc\nEvent(api.ObjectReference{Name:"abc", UID:"podabc"})\n')
response = app.get('/build' + nodelog_url)
self.assertIn("tmp-node-2", response)
def test_no_failed_pod(self):
"""Test that filtering page still loads when no failed pod name is given"""
nodelog_url = self.BUILD_DIR + 'nodelog?junit=junit_01.xml'
init_build(self.BUILD_DIR)
write(self.BUILD_DIR + 'artifacts/tmp-node-image/junit_01.xml', JUNIT_SUITE)
write(self.BUILD_DIR + 'artifacts/tmp-node-image/kubelet.log',
'abc\nEvent(api.ObjectReference{Name:"abc", UID:"podabc"} failed)\n')
response = app.get('/build' + nodelog_url)
self.assertIn("Wrap line", response)
def test_parse_by_timestamp(self):
"""Test parse_by_timestamp and get_woven_logs
- Weave separate logs together by timestamp
- Check that lines without timestamp are combined
- Test different timestamp formats"""
kubelet_filepath = self.BUILD_DIR + 'artifacts/tmp-node-image/kubelet.log'
kubeapi_filepath = self.BUILD_DIR + 'artifacts/tmp-node-image/kube-apiserver.log'
query_string = 'nodelog?pod=abc&junit=junit_01.xml&weave=on&logfiles=%s&logfiles=%s' % (
kubelet_filepath, kubeapi_filepath)
nodelog_url = self.BUILD_DIR + query_string
init_build(self.BUILD_DIR)
write(self.BUILD_DIR + 'artifacts/tmp-node-image/junit_01.xml', JUNIT_SUITE)
write(kubelet_filepath,
'abc\n0101 01:01:01.001 Event(api.ObjectReference{Name:"abc", UID:"podabc"})\n')
write(kubeapi_filepath,
'0101 01:01:01.000 kubeapi\n0101 01:01:01.002 pod\n01-01T01:01:01.005Z last line')
expected = ('0101 01:01:01.000 kubeapi\n'
'<span class="highlight">abc0101 01:01:01.001 Event(api.ObjectReference{Name:'
'"<span class="keyword">abc</span>", UID:"podabc"})</span>\n'
'0101 01:01:01.002 pod\n'
'01-01T01:01:01.005Z last line')
response = app.get('/build' + nodelog_url)
print response
self.assertIn(expected, response)
def test_timestamp_no_apiserver(self):
"""Test parse_by_timestamp and get_woven_logs without an apiserver file
- Weave separate logs together by timestamp
- Check that lines without timestamp are combined
- Test different timestamp formats
- no kube-apiserver.log"""
kubelet_filepath = self.BUILD_DIR + 'artifacts/tmp-node-image/kubelet.log'
proxy_filepath = self.BUILD_DIR + 'artifacts/tmp-node-image/kube-proxy.log'
query_string = 'nodelog?pod=abc&junit=junit_01.xml&weave=on&logfiles=%s&logfiles=%s' % (
kubelet_filepath, proxy_filepath)
nodelog_url = self.BUILD_DIR + query_string
init_build(self.BUILD_DIR)
write(self.BUILD_DIR + 'artifacts/tmp-node-image/junit_01.xml', JUNIT_SUITE)
write(kubelet_filepath,
'abc\n0101 01:01:01.001 Event(api.ObjectReference{Name:"abc", UID:"podabc"})\n')
write(proxy_filepath,
'0101 01:01:01.000 proxy\n0101 01:01:01.002 pod\n01-01T01:01:01.005Z last line')
expected = ('0101 01:01:01.000 proxy\n'
'<span class="highlight">abc0101 01:01:01.001 Event(api.ObjectReference{Name:'
'"<span class="keyword">abc</span>", UID:"podabc"})</span>\n'
'0101 01:01:01.002 pod\n'
'01-01T01:01:01.005Z last line')
response = app.get('/build' + nodelog_url)
self.assertIn(expected, response)
|
|
#!/usr/bin/env python
"""
====================================
rsfMRI: ANTS, FS, FSL, NiPy, aCompCor
====================================
A preprocessing workflow for Siemens resting state data.
This workflow makes use of:
- ANTS
- FreeSurfer
- FSL
- NiPy
- CompCor
For example::
python rsfmri_preprocessing.py -d /data/12345-34-1.dcm -f /data/Resting.nii
-s subj001 -o output -p PBS --plugin_args "dict(qsub_args='-q many')"
or
python rsfmri_vol_surface_preprocessing.py -f SUB_1024011/E?/func/rest.nii
-t OASIS-30_Atropos_template_in_MNI152_2mm.nii.gz --TR 2 -s SUB_1024011
--subjects_dir fsdata --slice_times 0 17 1 18 2 19 3 20 4 21 5 22 6 23
7 24 8 25 9 26 10 27 11 28 12 29 13 30 14 31 15 32 16 -o .
This workflow takes resting timeseries and a Siemens dicom file corresponding
to it and preprocesses it to produce timeseries coordinates or grayordinates.
For non-Siemens dicoms, provide slice times instead, since the dicom extractor is not guaranteed to work.
This workflow also requires 2mm subcortical atlas and templates that are
available from:
http://mindboggle.info/data.html
specifically the 2mm versions of:
- `Joint Fusion Atlas <http://mindboggle.info/data/atlases/jointfusion/OASIS-TRT-20_jointfusion_DKT31_CMA_labels_in_MNI152_2mm_v2.nii.gz>`_
- `MNI template <http://mindboggle.info/data/templates/ants/OASIS-30_Atropos_template_in_MNI152_2mm.nii.gz>`_
"""
import os
from nipype.interfaces.base import CommandLine
CommandLine.set_default_terminal_output('allatonce')
from dcmstack.extract import default_extractor
from dicom import read_file
from nipype.interfaces import (fsl, Function, ants, freesurfer,nipy)
from nipype.interfaces.c3 import C3dAffineTool
fsl.FSLCommand.set_default_output_type('NIFTI_GZ')
from nipype import Workflow, Node, MapNode
from nipype.algorithms.rapidart import ArtifactDetect
from nipype.algorithms.misc import TSNR
from nipype.interfaces.utility import Rename, Merge, IdentityInterface
from nipype.utils.filemanip import filename_to_list
from nipype.interfaces.io import DataSink, FreeSurferSource
import nipype.interfaces.freesurfer as fs
import numpy as np
import scipy as sp
import nibabel as nb
imports = ['import os',
'import nibabel as nb',
'import numpy as np',
'import scipy as sp',
'from nipype.utils.filemanip import filename_to_list, list_to_filename, split_filename',
'from scipy.special import legendre'
]
def get_info(dicom_files):
"""Given a Siemens dicom file return metadata
Returns
-------
RepetitionTime
Slice Acquisition Times
Spacing between slices
"""
meta = default_extractor(read_file(filename_to_list(dicom_files)[0],
stop_before_pixels=True,
force=True))
return (meta['RepetitionTime']/1000., meta['CsaImage.MosaicRefAcqTimes'],
meta['SpacingBetweenSlices'])
def median(in_files):
"""Computes an average of the median of each realigned timeseries
Parameters
----------
in_files: one or more realigned Nifti 4D time series
Returns
-------
out_file: a 3D Nifti file
"""
average = None
for idx, filename in enumerate(filename_to_list(in_files)):
img = nb.load(filename)
data = np.median(img.get_data(), axis=3)
if average is None:
average = data
else:
average = average + data
median_img = nb.Nifti1Image(average/float(idx + 1),
img.get_affine(), img.get_header())
filename = os.path.join(os.getcwd(), 'median.nii.gz')
median_img.to_filename(filename)
return filename
def bandpass_filter(files, lowpass_freq, highpass_freq, fs):
"""Bandpass filter the input files
Parameters
----------
files: list of 4d nifti files
lowpass_freq: cutoff frequency for the low pass filter (in Hz)
highpass_freq: cutoff frequency for the high pass filter (in Hz)
fs: sampling rate (in Hz)
"""
out_files = []
for filename in filename_to_list(files):
path, name, ext = split_filename(filename)
out_file = os.path.join(os.getcwd(), name + '_bp' + ext)
img = nb.load(filename)
timepoints = img.shape[-1]
F = np.zeros((timepoints))
lowidx = timepoints/2 + 1
if lowpass_freq > 0:
lowidx = np.round(float(lowpass_freq) / fs * timepoints)
highidx = 0
if highpass_freq > 0:
highidx = np.round(float(highpass_freq) / fs * timepoints)
F[highidx:lowidx] = 1
F = ((F + F[::-1]) > 0).astype(int)
data = img.get_data()
if np.all(F == 1):
filtered_data = data
else:
filtered_data = np.real(np.fft.ifftn(np.fft.fftn(data) * F))
img_out = nb.Nifti1Image(filtered_data, img.get_affine(),
img.get_header())
img_out.to_filename(out_file)
out_files.append(out_file)
return list_to_filename(out_files)
def motion_regressors(motion_params, order=0, derivatives=1):
"""Compute motion regressors upto given order and derivative
motion + d(motion)/dt + d2(motion)/dt2 (linear + quadratic)
"""
out_files = []
for idx, filename in enumerate(filename_to_list(motion_params)):
params = np.genfromtxt(filename)
out_params = params
for d in range(1, derivatives + 1):
cparams = np.vstack((np.repeat(params[0, :][None, :], d, axis=0),
params))
out_params = np.hstack((out_params, np.diff(cparams, d, axis=0)))
out_params2 = out_params
for i in range(2, order + 1):
out_params2 = np.hstack((out_params2, np.power(out_params, i)))
filename = os.path.join(os.getcwd(), "motion_regressor%02d.txt" % idx)
np.savetxt(filename, out_params2, fmt="%.10f")
out_files.append(filename)
return out_files
def build_filter1(motion_params, comp_norm, outliers, detrend_poly=None):
"""Builds a regressor set comprisong motion parameters, composite norm and
outliers
The outliers are added as a single time point column for each outlier
Parameters
----------
motion_params: a text file containing motion parameters and its derivatives
comp_norm: a text file containing the composite norm
outliers: a text file containing 0-based outlier indices
detrend_poly: number of polynomials to add to detrend
Returns
-------
components_file: a text file containing all the regressors
"""
out_files = []
for idx, filename in enumerate(filename_to_list(motion_params)):
params = np.genfromtxt(filename)
norm_val = np.genfromtxt(filename_to_list(comp_norm)[idx])
out_params = np.hstack((params, norm_val[:, None]))
try:
outlier_val = np.genfromtxt(filename_to_list(outliers)[idx])
except IOError:
outlier_val = np.empty((0))
for index in np.atleast_1d(outlier_val):
outlier_vector = np.zeros((out_params.shape[0], 1))
outlier_vector[index] = 1
out_params = np.hstack((out_params, outlier_vector))
if detrend_poly:
timepoints = out_params.shape[0]
X = np.empty((timepoints, 0))
for i in range(detrend_poly):
X = np.hstack((X, legendre(
i + 1)(np.linspace(-1, 1, timepoints))[:, None]))
out_params = np.hstack((out_params, X))
filename = os.path.join(os.getcwd(), "filter_regressor%02d.txt" % idx)
np.savetxt(filename, out_params, fmt="%.10f")
out_files.append(filename)
return out_files
def extract_noise_components(realigned_file, mask_file, num_components=5,
extra_regressors=None):
"""Derive components most reflective of physiological noise
Parameters
----------
realigned_file: a 4D Nifti file containing realigned volumes
mask_file: a 3D Nifti file containing white matter + ventricular masks
num_components: number of components to use for noise decomposition
extra_regressors: additional regressors to add
Returns
-------
components_file: a text file containing the noise components
"""
imgseries = nb.load(realigned_file)
components = None
for filename in filename_to_list(mask_file):
mask = nb.load(filename).get_data()
if len(np.nonzero(mask > 0)[0]) == 0:
continue
voxel_timecourses = imgseries.get_data()[mask > 0]
voxel_timecourses[np.isnan(np.sum(voxel_timecourses, axis=1)), :] = 0
# remove mean and normalize by variance
# voxel_timecourses.shape == [nvoxels, time]
X = voxel_timecourses.T
stdX = np.std(X, axis=0)
stdX[stdX == 0] = 1.
stdX[np.isnan(stdX)] = 1.
stdX[np.isinf(stdX)] = 1.
X = (X - np.mean(X, axis=0))/stdX
u, _, _ = sp.linalg.svd(X, full_matrices=False)
if components is None:
components = u[:, :num_components]
else:
components = np.hstack((components, u[:, :num_components]))
if extra_regressors:
regressors = np.genfromtxt(extra_regressors)
components = np.hstack((components, regressors))
components_file = os.path.join(os.getcwd(), 'noise_components.txt')
np.savetxt(components_file, components, fmt="%.10f")
return components_file
def rename(in_files, suffix=None):
from nipype.utils.filemanip import (filename_to_list, split_filename,
list_to_filename)
out_files = []
for idx, filename in enumerate(filename_to_list(in_files)):
_, name, ext = split_filename(filename)
if suffix is None:
out_files.append(name + ('_%03d' % idx) + ext)
else:
out_files.append(name + suffix + ext)
return list_to_filename(out_files)
def get_aparc_aseg(files):
"""Return the aparc+aseg.mgz file"""
for name in files:
if 'aparc+aseg.mgz' in name:
return name
raise ValueError('aparc+aseg.mgz not found')
def extract_subrois(timeseries_file, label_file, indices):
"""Extract voxel time courses for each subcortical roi index
Parameters
----------
timeseries_file: a 4D Nifti file
label_file: a 3D file containing rois in the same space/size of the 4D file
indices: a list of indices for ROIs to extract.
Returns
-------
out_file: a text file containing time courses for each voxel of each roi
The first four columns are: freesurfer index, i, j, k positions in the
label file
"""
img = nb.load(timeseries_file)
data = img.get_data()
roiimg = nb.load(label_file)
rois = roiimg.get_data()
prefix = split_filename(timeseries_file)[1]
out_ts_file = os.path.join(os.getcwd(), '%s_subcortical_ts.txt' % prefix)
with open(out_ts_file, 'wt') as fp:
for fsindex in indices:
ijk = np.nonzero(rois == fsindex)
ts = data[ijk]
for i0, row in enumerate(ts):
fp.write('%d,%d,%d,%d,' % (fsindex, ijk[0][i0],
ijk[1][i0], ijk[2][i0]) +
','.join(['%.10f' % val for val in row]) + '\n')
return out_ts_file
def combine_hemi(left, right):
"""Combine left and right hemisphere time series into a single text file
"""
lh_data = nb.load(left).get_data()
rh_data = nb.load(right).get_data()
indices = np.vstack((1000000 + np.arange(0, lh_data.shape[0])[:, None],
2000000 + np.arange(0, rh_data.shape[0])[:, None]))
all_data = np.hstack((indices, np.vstack((lh_data.squeeze(),
rh_data.squeeze()))))
filename = left.split('.')[1] + '_combined.txt'
np.savetxt(filename, all_data,
fmt=','.join(['%d'] + ['%.10f'] * (all_data.shape[1] - 1)))
return os.path.abspath(filename)
def create_reg_workflow(name='registration'):
"""Create a FEAT preprocessing workflow together with freesurfer
Parameters
----------
name : name of workflow (default: 'registration')
Inputs::
inputspec.source_files : files (filename or list of filenames to register)
inputspec.mean_image : reference image to use
inputspec.anatomical_image : anatomical image to coregister to
inputspec.target_image : registration target
Outputs::
outputspec.func2anat_transform : FLIRT transform
outputspec.anat2target_transform : FLIRT+FNIRT transform
outputspec.transformed_files : transformed files in target space
outputspec.transformed_mean : mean image in target space
"""
register = Workflow(name=name)
inputnode = Node(interface=IdentityInterface(fields=['source_files',
'mean_image',
'subject_id',
'subjects_dir',
'target_image']),
name='inputspec')
outputnode = Node(interface=IdentityInterface(fields=['func2anat_transform',
'out_reg_file',
'anat2target_transform',
'transforms',
'transformed_mean',
'segmentation_files',
'anat2target',
'aparc',
'min_cost_file'
]),
name='outputspec')
# Get the subject's freesurfer source directory
fssource = Node(FreeSurferSource(),
name='fssource')
fssource.run_without_submitting = True
register.connect(inputnode, 'subject_id', fssource, 'subject_id')
register.connect(inputnode, 'subjects_dir', fssource, 'subjects_dir')
convert = Node(freesurfer.MRIConvert(out_type='nii'),
name="convert")
register.connect(fssource, 'T1', convert, 'in_file')
# Coregister the median to the surface
bbregister = Node(freesurfer.BBRegister(),
name='bbregister')
bbregister.inputs.init = 'fsl'
bbregister.inputs.contrast_type = 't2'
bbregister.inputs.out_fsl_file = True
bbregister.inputs.epi_mask = True
register.connect(inputnode, 'subject_id', bbregister, 'subject_id')
register.connect(inputnode, 'mean_image', bbregister, 'source_file')
register.connect(inputnode, 'subjects_dir', bbregister, 'subjects_dir')
"""
Estimate the tissue classes from the anatomical image. But use aparc+aseg's brain mask
"""
binarize = Node(fs.Binarize(min=0.5, out_type="nii.gz", dilate=1), name="binarize_aparc")
register.connect(fssource, ("aparc_aseg", get_aparc_aseg), binarize, "in_file")
stripper = Node(fsl.ApplyMask(), name ='stripper')
register.connect(binarize, "binary_file", stripper, "mask_file")
register.connect(convert, 'out_file', stripper, 'in_file')
fast = Node(fsl.FAST(), name='fast')
register.connect(stripper, 'out_file', fast, 'in_files')
"""
Binarize the segmentation
"""
binarize = MapNode(fsl.ImageMaths(op_string='-nan -thr 0.9 -ero -bin'),
iterfield=['in_file'],
name='binarize')
register.connect(fast, 'partial_volume_files', binarize, 'in_file')
"""
Apply inverse transform to take segmentations to functional space
"""
applyxfm = MapNode(freesurfer.ApplyVolTransform(inverse=True,
interp='nearest'),
iterfield=['target_file'],
name='inverse_transform')
register.connect(inputnode, 'subjects_dir', applyxfm, 'subjects_dir')
register.connect(bbregister, 'out_reg_file', applyxfm, 'reg_file')
register.connect(binarize, 'out_file', applyxfm, 'target_file')
register.connect(inputnode, 'mean_image', applyxfm, 'source_file')
"""
Apply inverse transform to aparc file
"""
aparcxfm = Node(freesurfer.ApplyVolTransform(inverse=True,
interp='nearest'),
name='aparc_inverse_transform')
register.connect(inputnode, 'subjects_dir', aparcxfm, 'subjects_dir')
register.connect(bbregister, 'out_reg_file', aparcxfm, 'reg_file')
register.connect(fssource, ('aparc_aseg', get_aparc_aseg),
aparcxfm, 'target_file')
register.connect(inputnode, 'mean_image', aparcxfm, 'source_file')
"""
Convert the BBRegister transformation to ANTS ITK format
"""
convert2itk = Node(C3dAffineTool(), name='convert2itk')
convert2itk.inputs.fsl2ras = True
convert2itk.inputs.itk_transform = True
register.connect(bbregister, 'out_fsl_file', convert2itk, 'transform_file')
register.connect(inputnode, 'mean_image',convert2itk, 'source_file')
register.connect(stripper, 'out_file', convert2itk, 'reference_file')
"""
Compute registration between the subject's structural and MNI template
This is currently set to perform a very quick registration. However, the
registration can be made significantly more accurate for cortical
structures by increasing the number of iterations
All parameters are set using the example from:
#https://github.com/stnava/ANTs/blob/master/Scripts/newAntsExample.sh
"""
reg = Node(ants.Registration(), name='antsRegister')
reg.inputs.output_transform_prefix = "output_"
reg.inputs.transforms = ['Rigid', 'Affine', 'SyN']
reg.inputs.transform_parameters = [(0.1,), (0.1,), (0.2, 3.0, 0.0)]
reg.inputs.number_of_iterations = [[10000, 11110, 11110]] * 2 + [[100, 30, 20]]
reg.inputs.dimension = 3
reg.inputs.write_composite_transform = True
reg.inputs.collapse_output_transforms = True
reg.inputs.initial_moving_transform_com = True
reg.inputs.metric = ['Mattes'] * 2 + [['Mattes', 'CC']]
reg.inputs.metric_weight = [1] * 2 + [[0.5, 0.5]]
reg.inputs.radius_or_number_of_bins = [32] * 2 + [[32, 4]]
reg.inputs.sampling_strategy = ['Regular'] * 2 + [[None, None]]
reg.inputs.sampling_percentage = [0.3] * 2 + [[None, None]]
reg.inputs.convergence_threshold = [1.e-8] * 2 + [-0.01]
reg.inputs.convergence_window_size = [20] * 2 + [5]
reg.inputs.smoothing_sigmas = [[4, 2, 1]] * 2 + [[1, 0.5, 0]]
reg.inputs.sigma_units = ['vox'] * 3
reg.inputs.shrink_factors = [[3, 2, 1]]*2 + [[4, 2, 1]]
reg.inputs.use_estimate_learning_rate_once = [True] * 3
reg.inputs.use_histogram_matching = [False] * 2 + [True]
reg.inputs.winsorize_lower_quantile = 0.005
reg.inputs.winsorize_upper_quantile = 0.995
reg.inputs.args = '--float'
reg.inputs.output_warped_image = 'output_warped_image.nii.gz'
reg.inputs.num_threads = 4
reg.plugin_args = {'sbatch_args': '-c%d' % 4}
register.connect(stripper, 'out_file', reg, 'moving_image')
register.connect(inputnode,'target_image', reg,'fixed_image')
"""
Concatenate the affine and ants transforms into a list
"""
pickfirst = lambda x: x[0]
merge = Node(Merge(2), iterfield=['in2'], name='mergexfm')
register.connect(convert2itk, 'itk_transform', merge, 'in2')
register.connect(reg, ('composite_transform', pickfirst), merge, 'in1')
"""
Transform the mean image. First to anatomical and then to target
"""
warpmean = Node(ants.ApplyTransforms(), name='warpmean')
warpmean.inputs.input_image_type = 3
warpmean.inputs.interpolation = 'Linear'
warpmean.inputs.invert_transform_flags = [False, False]
warpmean.inputs.terminal_output = 'file'
warpmean.inputs.args = '--float'
warpmean.inputs.num_threads = 4
warpmean.plugin_args = {'sbatch_args': '-c%d' % 4}
register.connect(inputnode,'target_image', warpmean,'reference_image')
register.connect(inputnode, 'mean_image', warpmean, 'input_image')
register.connect(merge, 'out', warpmean, 'transforms')
"""
Assign all the output files
"""
register.connect(reg, 'warped_image', outputnode, 'anat2target')
register.connect(warpmean, 'output_image', outputnode, 'transformed_mean')
register.connect(applyxfm, 'transformed_file',
outputnode, 'segmentation_files')
register.connect(aparcxfm, 'transformed_file',
outputnode, 'aparc')
register.connect(bbregister, 'out_fsl_file',
outputnode, 'func2anat_transform')
register.connect(bbregister, 'out_reg_file',
outputnode, 'out_reg_file')
register.connect(reg, 'composite_transform',
outputnode, 'anat2target_transform')
register.connect(merge, 'out', outputnode, 'transforms')
register.connect(bbregister, 'min_cost_file',
outputnode, 'min_cost_file')
return register
"""
Creates the main preprocessing workflow
"""
def create_workflow(files,
target_file,
subject_id,
TR,
slice_times,
norm_threshold=1,
num_components=5,
vol_fwhm=None,
surf_fwhm=None,
lowpass_freq=-1,
highpass_freq=-1,
subjects_dir=None,
sink_directory=os.getcwd(),
target_subject=['fsaverage3', 'fsaverage4'],
name='resting'):
wf = Workflow(name=name)
# Rename files in case they are named identically
name_unique = MapNode(Rename(format_string='rest_%(run)02d'),
iterfield=['in_file', 'run'],
name='rename')
name_unique.inputs.keep_ext = True
name_unique.inputs.run = range(1, len(files) + 1)
name_unique.inputs.in_file = files
realign = Node(nipy.SpaceTimeRealigner(), name="spacetime_realign")
realign.inputs.slice_times = slice_times
realign.inputs.tr = TR
realign.inputs.slice_info = 2
realign.plugin_args = {'sbatch_args': '-c%d' % 4}
# Comute TSNR on realigned data regressing polynomials upto order 2
tsnr = MapNode(TSNR(regress_poly=2), iterfield=['in_file'], name='tsnr')
wf.connect(realign,"out_file", tsnr, "in_file")
# Compute the median image across runs
calc_median = Node(Function(input_names=['in_files'],
output_names=['median_file'],
function=median,
imports=imports),
name='median')
wf.connect(tsnr, 'detrended_file', calc_median, 'in_files')
"""Segment and Register
"""
registration = create_reg_workflow(name='registration')
wf.connect(calc_median, 'median_file', registration, 'inputspec.mean_image')
registration.inputs.inputspec.subject_id = subject_id
registration.inputs.inputspec.subjects_dir = subjects_dir
registration.inputs.inputspec.target_image = target_file
"""Quantify TSNR in each freesurfer ROI
"""
get_roi_tsnr = MapNode(fs.SegStats(default_color_table=True),
iterfield=['in_file'], name='get_aparc_tsnr')
get_roi_tsnr.inputs.avgwf_txt_file = True
wf.connect(tsnr, 'tsnr_file', get_roi_tsnr, 'in_file')
wf.connect(registration, 'outputspec.aparc', get_roi_tsnr, 'segmentation_file')
"""Use :class:`nipype.algorithms.rapidart` to determine which of the
images in the functional series are outliers based on deviations in
intensity or movement.
"""
art = Node(interface=ArtifactDetect(), name="art")
art.inputs.use_differences = [True, True]
art.inputs.use_norm = True
art.inputs.norm_threshold = norm_threshold
art.inputs.zintensity_threshold = 9
art.inputs.mask_type = 'spm_global'
art.inputs.parameter_source = 'NiPy'
"""Here we are connecting all the nodes together. Notice that we add the merge node only if you choose
to use 4D. Also `get_vox_dims` function is passed along the input volume of normalise to set the optimal
voxel sizes.
"""
wf.connect([(name_unique, realign, [('out_file', 'in_file')]),
(realign, art, [('out_file', 'realigned_files')]),
(realign, art, [('par_file', 'realignment_parameters')]),
])
def selectindex(files, idx):
import numpy as np
from nipype.utils.filemanip import filename_to_list, list_to_filename
return list_to_filename(np.array(filename_to_list(files))[idx].tolist())
mask = Node(fsl.BET(), name='getmask')
mask.inputs.mask = True
wf.connect(calc_median, 'median_file', mask, 'in_file')
# get segmentation in normalized functional space
def merge_files(in1, in2):
out_files = filename_to_list(in1)
out_files.extend(filename_to_list(in2))
return out_files
# filter some noise
# Compute motion regressors
motreg = Node(Function(input_names=['motion_params', 'order',
'derivatives'],
output_names=['out_files'],
function=motion_regressors,
imports=imports),
name='getmotionregress')
wf.connect(realign, 'par_file', motreg, 'motion_params')
# Create a filter to remove motion and art confounds
createfilter1 = Node(Function(input_names=['motion_params', 'comp_norm',
'outliers', 'detrend_poly'],
output_names=['out_files'],
function=build_filter1,
imports=imports),
name='makemotionbasedfilter')
createfilter1.inputs.detrend_poly = 2
wf.connect(motreg, 'out_files', createfilter1, 'motion_params')
wf.connect(art, 'norm_files', createfilter1, 'comp_norm')
wf.connect(art, 'outlier_files', createfilter1, 'outliers')
filter1 = MapNode(fsl.GLM(out_f_name='F_mcart.nii.gz',
out_pf_name='pF_mcart.nii.gz',
demean=True),
iterfield=['in_file', 'design', 'out_res_name'],
name='filtermotion')
wf.connect(realign, 'out_file', filter1, 'in_file')
wf.connect(realign, ('out_file', rename, '_filtermotart'),
filter1, 'out_res_name')
wf.connect(createfilter1, 'out_files', filter1, 'design')
createfilter2 = MapNode(Function(input_names=['realigned_file', 'mask_file',
'num_components',
'extra_regressors'],
output_names=['out_files'],
function=extract_noise_components,
imports=imports),
iterfield=['realigned_file', 'extra_regressors'],
name='makecompcorrfilter')
createfilter2.inputs.num_components = num_components
wf.connect(createfilter1, 'out_files', createfilter2, 'extra_regressors')
wf.connect(filter1, 'out_res', createfilter2, 'realigned_file')
wf.connect(registration, ('outputspec.segmentation_files', selectindex, [0, 2]),
createfilter2, 'mask_file')
filter2 = MapNode(fsl.GLM(out_f_name='F.nii.gz',
out_pf_name='pF.nii.gz',
demean=True),
iterfield=['in_file', 'design', 'out_res_name'],
name='filter_noise_nosmooth')
wf.connect(filter1, 'out_res', filter2, 'in_file')
wf.connect(filter1, ('out_res', rename, '_cleaned'),
filter2, 'out_res_name')
wf.connect(createfilter2, 'out_files', filter2, 'design')
wf.connect(mask, 'mask_file', filter2, 'mask')
bandpass = Node(Function(input_names=['files', 'lowpass_freq',
'highpass_freq', 'fs'],
output_names=['out_files'],
function=bandpass_filter,
imports=imports),
name='bandpass_unsmooth')
bandpass.inputs.fs = 1./TR
bandpass.inputs.highpass_freq = highpass_freq
bandpass.inputs.lowpass_freq = lowpass_freq
wf.connect(filter2, 'out_res', bandpass, 'files')
"""Smooth the functional data using
:class:`nipype.interfaces.fsl.IsotropicSmooth`.
"""
smooth = MapNode(interface=fsl.IsotropicSmooth(), name="smooth", iterfield=["in_file"])
smooth.inputs.fwhm = vol_fwhm
wf.connect(bandpass, 'out_files', smooth, 'in_file')
collector = Node(Merge(2), name='collect_streams')
wf.connect(smooth, 'out_file', collector, 'in1')
wf.connect(bandpass, 'out_files', collector, 'in2')
"""
Transform the remaining images. First to anatomical and then to target
"""
warpall = MapNode(ants.ApplyTransforms(), iterfield=['input_image'],
name='warpall')
warpall.inputs.input_image_type = 3
warpall.inputs.interpolation = 'Linear'
warpall.inputs.invert_transform_flags = [False, False]
warpall.inputs.terminal_output = 'file'
warpall.inputs.reference_image = target_file
warpall.inputs.args = '--float'
warpall.inputs.num_threads = 2
warpall.plugin_args = {'sbatch_args': '-c%d' % 2}
# transform to target
wf.connect(collector, 'out', warpall, 'input_image')
wf.connect(registration, 'outputspec.transforms', warpall, 'transforms')
mask_target = Node(fsl.ImageMaths(op_string='-bin'), name='target_mask')
wf.connect(registration, 'outputspec.anat2target', mask_target, 'in_file')
maskts = MapNode(fsl.ApplyMask(), iterfield=['in_file'], name='ts_masker')
wf.connect(warpall, 'output_image', maskts, 'in_file')
wf.connect(mask_target, 'out_file', maskts, 'mask_file')
# map to surface
# extract aparc+aseg ROIs
# extract subcortical ROIs
# extract target space ROIs
# combine subcortical and cortical rois into a single cifti file
#######
# Convert aparc to subject functional space
# Sample the average time series in aparc ROIs
sampleaparc = MapNode(freesurfer.SegStats(default_color_table=True),
iterfield=['in_file', 'summary_file',
'avgwf_txt_file'],
name='aparc_ts')
sampleaparc.inputs.segment_id = ([8] + range(10, 14) + [17, 18, 26, 47] +
range(49, 55) + [58] + range(1001, 1036) +
range(2001, 2036))
wf.connect(registration, 'outputspec.aparc',
sampleaparc, 'segmentation_file')
wf.connect(collector, 'out', sampleaparc, 'in_file')
def get_names(files, suffix):
"""Generate appropriate names for output files
"""
from nipype.utils.filemanip import (split_filename, filename_to_list,
list_to_filename)
import os
out_names = []
for filename in files:
path, name, _ = split_filename(filename)
out_names.append(os.path.join(path,name + suffix))
return list_to_filename(out_names)
wf.connect(collector, ('out', get_names, '_avgwf.txt'),
sampleaparc, 'avgwf_txt_file')
wf.connect(collector, ('out', get_names, '_summary.stats'),
sampleaparc, 'summary_file')
# Sample the time series onto the surface of the target surface. Performs
# sampling into left and right hemisphere
target = Node(IdentityInterface(fields=['target_subject']), name='target')
target.iterables = ('target_subject', filename_to_list(target_subject))
samplerlh = MapNode(freesurfer.SampleToSurface(),
iterfield=['source_file'],
name='sampler_lh')
samplerlh.inputs.sampling_method = "average"
samplerlh.inputs.sampling_range = (0.1, 0.9, 0.1)
samplerlh.inputs.sampling_units = "frac"
samplerlh.inputs.interp_method = "trilinear"
samplerlh.inputs.smooth_surf = surf_fwhm
#samplerlh.inputs.cortex_mask = True
samplerlh.inputs.out_type = 'niigz'
samplerlh.inputs.subjects_dir = subjects_dir
samplerrh = samplerlh.clone('sampler_rh')
samplerlh.inputs.hemi = 'lh'
wf.connect(collector, 'out', samplerlh, 'source_file')
wf.connect(registration, 'outputspec.out_reg_file', samplerlh, 'reg_file')
wf.connect(target, 'target_subject', samplerlh, 'target_subject')
samplerrh.set_input('hemi', 'rh')
wf.connect(collector, 'out', samplerrh, 'source_file')
wf.connect(registration, 'outputspec.out_reg_file', samplerrh, 'reg_file')
wf.connect(target, 'target_subject', samplerrh, 'target_subject')
# Combine left and right hemisphere to text file
combiner = MapNode(Function(input_names=['left', 'right'],
output_names=['out_file'],
function=combine_hemi,
imports=imports),
iterfield=['left', 'right'],
name="combiner")
wf.connect(samplerlh, 'out_file', combiner, 'left')
wf.connect(samplerrh, 'out_file', combiner, 'right')
# Sample the time series file for each subcortical roi
ts2txt = MapNode(Function(input_names=['timeseries_file', 'label_file',
'indices'],
output_names=['out_file'],
function=extract_subrois,
imports=imports),
iterfield=['timeseries_file'],
name='getsubcortts')
ts2txt.inputs.indices = [8] + range(10, 14) + [17, 18, 26, 47] +\
range(49, 55) + [58]
ts2txt.inputs.label_file = \
os.path.abspath(('OASIS-TRT-20_jointfusion_DKT31_CMA_labels_in_MNI152_'
'2mm_v2.nii.gz'))
wf.connect(maskts, 'out_file', ts2txt, 'timeseries_file')
######
substitutions = [('_target_subject_', ''),
('_filtermotart_cleaned_bp_trans_masked', ''),
('_filtermotart_cleaned_bp', ''),
]
substitutions += [("_smooth%d" % i,"") for i in range(11)[::-1]]
substitutions += [("_ts_masker%d" % i,"") for i in range(11)[::-1]]
substitutions += [("_getsubcortts%d" % i,"") for i in range(11)[::-1]]
substitutions += [("_combiner%d" % i,"") for i in range(11)[::-1]]
substitutions += [("_filtermotion%d" % i,"") for i in range(11)[::-1]]
substitutions += [("_filter_noise_nosmooth%d" % i,"") for i in range(11)[::-1]]
substitutions += [("_makecompcorfilter%d" % i,"") for i in range(11)[::-1]]
substitutions += [("_get_aparc_tsnr%d/" % i, "run%d_" % (i + 1)) for i in range(11)[::-1]]
substitutions += [("T1_out_brain_pve_0_maths_warped", "compcor_csf"),
("T1_out_brain_pve_1_maths_warped", "compcor_gm"),
("T1_out_brain_pve_2_maths_warped", "compcor_wm"),
("output_warped_image_maths", "target_brain_mask"),
("median_brain_mask", "native_brain_mask"),
("corr_", "")]
regex_subs = [('_combiner.*/sar', '/smooth/'),
('_combiner.*/ar', '/unsmooth/'),
('_aparc_ts.*/sar', '/smooth/'),
('_aparc_ts.*/ar', '/unsmooth/'),
('_getsubcortts.*/sar', '/smooth/'),
('_getsubcortts.*/ar', '/unsmooth/'),
('series/sar', 'series/smooth/'),
('series/ar', 'series/unsmooth/'),
('_inverse_transform./', ''),
]
# Save the relevant data into an output directory
datasink = Node(interface=DataSink(), name="datasink")
datasink.inputs.base_directory = sink_directory
datasink.inputs.container = subject_id
datasink.inputs.substitutions = substitutions
datasink.inputs.regexp_substitutions = regex_subs #(r'(/_.*(\d+/))', r'/run\2')
wf.connect(realign, 'par_file', datasink, 'resting.qa.motion')
wf.connect(art, 'norm_files', datasink, 'resting.qa.art.@norm')
wf.connect(art, 'intensity_files', datasink, 'resting.qa.art.@intensity')
wf.connect(art, 'outlier_files', datasink, 'resting.qa.art.@outlier_files')
wf.connect(registration, 'outputspec.segmentation_files', datasink, 'resting.mask_files')
wf.connect(registration, 'outputspec.anat2target', datasink, 'resting.qa.ants')
wf.connect(mask, 'mask_file', datasink, 'resting.mask_files.@brainmask')
wf.connect(mask_target, 'out_file', datasink, 'resting.mask_files.target')
wf.connect(filter1, 'out_f', datasink, 'resting.qa.compmaps.@mc_F')
wf.connect(filter1, 'out_pf', datasink, 'resting.qa.compmaps.@mc_pF')
wf.connect(filter2, 'out_f', datasink, 'resting.qa.compmaps')
wf.connect(filter2, 'out_pf', datasink, 'resting.qa.compmaps.@p')
wf.connect(registration, 'outputspec.min_cost_file', datasink, 'resting.qa.mincost')
wf.connect(tsnr, 'tsnr_file', datasink, 'resting.qa.tsnr.@map')
wf.connect([(get_roi_tsnr, datasink, [('avgwf_txt_file', 'resting.qa.tsnr'),
('summary_file', 'resting.qa.tsnr.@summary')])])
wf.connect(bandpass, 'out_files', datasink, 'resting.timeseries.@bandpassed')
wf.connect(smooth, 'out_file', datasink, 'resting.timeseries.@smoothed')
wf.connect(createfilter1, 'out_files',
datasink, 'resting.regress.@regressors')
wf.connect(createfilter2, 'out_files',
datasink, 'resting.regress.@compcorr')
wf.connect(maskts, 'out_file', datasink, 'resting.timeseries.target')
wf.connect(sampleaparc, 'summary_file',
datasink, 'resting.parcellations.aparc')
wf.connect(sampleaparc, 'avgwf_txt_file',
datasink, 'resting.parcellations.aparc.@avgwf')
wf.connect(ts2txt, 'out_file',
datasink, 'resting.parcellations.grayo.@subcortical')
datasink2 = Node(interface=DataSink(), name="datasink2")
datasink2.inputs.base_directory = sink_directory
datasink2.inputs.container = subject_id
datasink2.inputs.substitutions = substitutions
datasink2.inputs.regexp_substitutions = regex_subs #(r'(/_.*(\d+/))', r'/run\2')
wf.connect(combiner, 'out_file',
datasink2, 'resting.parcellations.grayo.@surface')
return wf
"""
Creates the full workflow including getting information from dicom files
"""
def create_resting_workflow(args, name=None):
TR = args.TR
slice_times = args.slice_times
if args.dicom_file:
TR, slice_times, slice_thickness = get_info(args.dicom_file)
slice_times = (np.array(slice_times)/1000.).tolist()
if name is None:
name = 'resting_' + args.subject_id
kwargs = dict(files=[os.path.abspath(filename) for filename in args.files],
target_file=os.path.abspath(args.target_file),
subject_id=args.subject_id,
TR=TR,
slice_times=slice_times,
vol_fwhm=args.vol_fwhm,
surf_fwhm=args.surf_fwhm,
norm_threshold=2.,
subjects_dir=os.path.abspath(args.fsdir),
target_subject=args.target_surfs,
lowpass_freq=args.lowpass_freq,
highpass_freq=args.highpass_freq,
sink_directory=os.path.abspath(args.sink),
name=name)
wf = create_workflow(**kwargs)
return wf
if __name__ == "__main__":
from argparse import ArgumentParser, RawTextHelpFormatter
defstr = ' (default %(default)s)'
parser = ArgumentParser(description=__doc__,
formatter_class=RawTextHelpFormatter)
parser.add_argument("-d", "--dicom_file", dest="dicom_file",
help="a SIEMENS example dicom file from the resting series")
parser.add_argument("-f", "--files", dest="files", nargs="+",
help="4d nifti files for resting state",
required=True)
parser.add_argument("-t", "--target", dest="target_file",
help=("Target in MNI space. Best to use the MindBoggle "
"template - "
"OASIS-30_Atropos_template_in_MNI152_2mm.nii.gz"),
required=True)
parser.add_argument("-s", "--subject_id", dest="subject_id",
help="FreeSurfer subject id", required=True)
parser.add_argument("--subjects_dir", dest="fsdir",
help="FreeSurfer subject directory", required=True)
parser.add_argument("--target_surfaces", dest="target_surfs", nargs="+",
default=['fsaverage5'],
help="FreeSurfer target surfaces" + defstr)
parser.add_argument("--TR", dest="TR", default=None, type=float,
help="TR if dicom not provided in seconds")
parser.add_argument("--slice_times", dest="slice_times", nargs="+",
type=float, help="Slice onset times in seconds")
parser.add_argument('--vol_fwhm', default=6., dest='vol_fwhm',
type=float, help="Spatial FWHM" + defstr)
parser.add_argument('--surf_fwhm', default=15., dest='surf_fwhm',
type=float, help="Spatial FWHM" + defstr)
parser.add_argument("-l", "--lowpass_freq", dest="lowpass_freq",
default=0.1, type=float,
help="Low pass frequency (Hz)" + defstr)
parser.add_argument("-u", "--highpass_freq", dest="highpass_freq",
default=0.01, type=float,
help="High pass frequency (Hz)" + defstr)
parser.add_argument("-o", "--output_dir", dest="sink",
help="Output directory base", required=True)
parser.add_argument("-w", "--work_dir", dest="work_dir",
help="Output directory base")
parser.add_argument("-p", "--plugin", dest="plugin",
default='Linear',
help="Plugin to use")
parser.add_argument("--plugin_args", dest="plugin_args",
help="Plugin arguments")
args = parser.parse_args()
wf = create_resting_workflow(args)
if args.work_dir:
work_dir = os.path.abspath(args.work_dir)
else:
work_dir = os.getcwd()
wf.base_dir = work_dir
if args.plugin_args:
wf.run(args.plugin, plugin_args=eval(args.plugin_args))
else:
wf.run(args.plugin)
|
|
"""
sentry.utils.http
~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import sentry
import ipaddress
import six
import socket
import requests
import warnings
import time
import logging
from sentry import options
from django.core.exceptions import SuspiciousOperation
from collections import namedtuple
from django.conf import settings
from requests.adapters import HTTPAdapter
from requests.exceptions import SSLError, RequestException, Timeout, ReadTimeout
from six.moves.urllib.parse import urlparse
from sentry.models import EventError
from sentry.exceptions import RestrictedIPAddress
from sentry.utils.cache import cache
from sentry.utils.hashlib import md5_text
from sentry.utils.strings import truncatechars
logger = logging.getLogger(__name__)
# TODO(dcramer): we want to change these to be constants so they are easier
# to translate/link again
# the maximum number of remote resources (i.e. sourc eifles) that should be
# fetched
MAX_URL_LENGTH = 150
# UrlResult.body **must** be bytes
UrlResult = namedtuple('UrlResult', ['url', 'headers', 'body', 'status', 'encoding'])
# In case SSL is unavailable (light builds) we can't import this here.
try:
from OpenSSL.SSL import ZeroReturnError, Error as OpenSSLError
except ImportError:
class ZeroReturnError(Exception):
pass
class OpenSSLError(Exception):
pass
USER_AGENT = 'sentry/{version} (https://sentry.io)'.format(
version=sentry.VERSION,
)
DISALLOWED_IPS = {
ipaddress.ip_network(six.text_type(i), strict=False) for i in settings.SENTRY_DISALLOWED_IPS
}
class BadSource(Exception):
error_type = EventError.UNKNOWN_ERROR
def __init__(self, data=None):
if data is None:
data = {}
data.setdefault('type', self.error_type)
super(BadSource, self).__init__(data['type'])
self.data = data
class CannotFetch(BadSource):
error_type = EventError.FETCH_GENERIC_ERROR
def get_server_hostname():
return urlparse(options.get('system.url-prefix')).hostname
def is_valid_url(url):
"""
Tests a URL to ensure it doesn't appear to be a blacklisted IP range.
"""
# If we have no disallowed ips, we can skip any further validation
# and there's no point in doing a DNS lookup to validate against
# an empty list.
if not DISALLOWED_IPS:
return True
parsed = urlparse(url)
if not parsed.hostname:
return False
server_hostname = get_server_hostname()
if parsed.hostname == server_hostname:
return True
# NOTE: The use of `socket.gethostbyname` is slightly flawed.
# `gethostbyname` doesn't handle octal IP addresses correctly, nor
# does it fetch all of the IP addresses for the record.
# `getaddrinfo` does the correct thing with octals here, and also returns all
# ip addresses for the hostname.
#
# WARNING: This behavior is only correct on Linux. On OSX, `getaddrinfo` also
# returns the wrong IP.
#
# The following should all technically resolve to `127.0.0.1`:
# Python 2.7.11 Linux
# >>> socket.gethostbyname('0177.0000.0000.0001')
# '177.0.0.1'
# >>> socket.getaddrinfo('0177.0000.0000.0001', 0)[0]
# (2, 1, 6, '', ('127.0.0.1', 0))
# Python 2.7.11 macOS
# >>> socket.gethostbyname('0177.0000.0000.0001')
# '177.0.0.1'
# >>> socket.getaddrinfo('0177.0000.0000.0001', None)[0]
# (2, 2, 17, '', ('177.0.0.1', 0))
try:
ip_addresses = set(addr for _, _, _, _, addr in socket.getaddrinfo(parsed.hostname, 0))
except socket.gaierror:
return False
for addr in ip_addresses:
ip_address = addr[0]
if ip_address == server_hostname:
return True
ip_address = ipaddress.ip_address(six.text_type(ip_address))
for ip_network in DISALLOWED_IPS:
if ip_address in ip_network:
return False
return True
class BlacklistAdapter(HTTPAdapter):
def send(self, request, *args, **kwargs):
if not is_valid_url(request.url):
raise RestrictedIPAddress('%s matches the URL blacklist' % (request.url, ))
return super(BlacklistAdapter, self).send(request, *args, **kwargs)
class Session(requests.Session):
def request(self, *args, **kwargs):
kwargs.setdefault('timeout', 30)
try:
response = requests.Session.request(self, *args, **kwargs)
# Our version of requests does not transform ZeroReturnError into an
# appropriately generically catchable exception
except ZeroReturnError as exc:
import sys
exc_tb = sys.exc_info()[2]
six.reraise(SSLError, exc, exc_tb)
del exc_tb
# requests' attempts to use chardet internally when no encoding is found
# and we want to avoid that slow behavior
if not response.encoding:
response.encoding = 'utf-8'
return response
class SafeSession(Session):
def __init__(self):
requests.Session.__init__(self)
self.headers.update({'User-Agent': USER_AGENT})
self.mount('https://', BlacklistAdapter())
self.mount('http://', BlacklistAdapter())
build_session = SafeSession
def safe_urlopen(
url,
method=None,
params=None,
data=None,
json=None,
headers=None,
allow_redirects=False,
timeout=30,
verify_ssl=True,
user_agent=None
):
"""
A slightly safer version of ``urlib2.urlopen`` which prevents redirection
and ensures the URL isn't attempting to hit a blacklisted IP range.
"""
if user_agent is not None:
warnings.warn('user_agent is no longer used with safe_urlopen')
session = SafeSession()
kwargs = {}
if json:
kwargs['json'] = json
if not headers:
headers = {}
headers.setdefault('Content-Type', 'application/json')
if data:
kwargs['data'] = data
if params:
kwargs['params'] = params
if headers:
kwargs['headers'] = headers
if method is None:
method = 'POST' if (data or json) else 'GET'
response = session.request(
method=method,
url=url,
allow_redirects=allow_redirects,
timeout=timeout,
verify=verify_ssl,
**kwargs
)
return response
def safe_urlread(response):
return response.content
def expose_url(url):
if url is None:
return u'<unknown>'
if url[:5] == 'data:':
return u'<data url>'
url = truncatechars(url, MAX_URL_LENGTH)
if isinstance(url, six.binary_type):
url = url.decode('utf-8', 'replace')
return url
def fetch_file(
url,
domain_lock_enabled=True,
outfile=None,
headers=None,
allow_redirects=True,
verify_ssl=False,
timeout=settings.SENTRY_SOURCE_FETCH_SOCKET_TIMEOUT,
**kwargs
):
"""
Pull down a URL, returning a UrlResult object.
"""
# lock down domains that are problematic
if domain_lock_enabled:
domain = urlparse(url).netloc
domain_key = 'source:blacklist:v2:%s' % (md5_text(domain).hexdigest(), )
domain_result = cache.get(domain_key)
if domain_result:
domain_result['url'] = url
raise CannotFetch(domain_result)
logger.debug('Fetching %r from the internet', url)
http_session = build_session()
response = None
try:
try:
start = time.time()
response = http_session.get(
url,
allow_redirects=allow_redirects,
verify=verify_ssl,
headers=headers,
timeout=timeout,
stream=True,
**kwargs
)
try:
cl = int(response.headers['content-length'])
except (LookupError, ValueError):
cl = 0
if cl > settings.SENTRY_SOURCE_FETCH_MAX_SIZE:
raise OverflowError()
return_body = False
if outfile is None:
outfile = six.BytesIO()
return_body = True
cl = 0
# Only need to even attempt to read the response body if we
# got a 200 OK
if response.status_code == 200:
for chunk in response.iter_content(16 * 1024):
if time.time() - start > settings.SENTRY_SOURCE_FETCH_TIMEOUT:
raise Timeout()
outfile.write(chunk)
cl += len(chunk)
if cl > settings.SENTRY_SOURCE_FETCH_MAX_SIZE:
raise OverflowError()
except Exception as exc:
logger.debug('Unable to fetch %r', url, exc_info=True)
if isinstance(exc, RestrictedIPAddress):
error = {
'type': EventError.RESTRICTED_IP,
'url': expose_url(url),
}
elif isinstance(exc, SuspiciousOperation):
error = {
'type': EventError.SECURITY_VIOLATION,
'url': expose_url(url),
}
elif isinstance(exc, (Timeout, ReadTimeout)):
error = {
'type': EventError.FETCH_TIMEOUT,
'url': expose_url(url),
'timeout': settings.SENTRY_SOURCE_FETCH_TIMEOUT,
}
elif isinstance(exc, OverflowError):
error = {
'type': EventError.FETCH_TOO_LARGE,
'url': expose_url(url),
# We want size in megabytes to format nicely
'max_size': float(settings.SENTRY_SOURCE_FETCH_MAX_SIZE) / 1024 / 1024,
}
elif isinstance(exc, (RequestException, ZeroReturnError, OpenSSLError)):
error = {
'type': EventError.FETCH_GENERIC_ERROR,
'value': six.text_type(type(exc)),
'url': expose_url(url),
}
else:
logger.exception(six.text_type(exc))
error = {
'type': EventError.UNKNOWN_ERROR,
'url': expose_url(url),
}
# TODO(dcramer): we want to be less aggressive on disabling domains
if domain_lock_enabled:
cache.set(domain_key, error or '', 300)
logger.warning('source.disabled', extra=error)
raise CannotFetch(error)
headers = {k.lower(): v for k, v in response.headers.items()}
encoding = response.encoding
body = None
if return_body:
body = outfile.getvalue()
outfile.close() # we only want to close StringIO
result = (headers, body, response.status_code, encoding)
finally:
if response is not None:
response.close()
if result[2] != 200:
logger.debug('HTTP %s when fetching %r', result[2], url, exc_info=True)
error = {
'type': EventError.FETCH_INVALID_HTTP_CODE,
'value': result[2],
'url': expose_url(url),
}
raise CannotFetch(error)
return UrlResult(url, result[0], result[1], result[2], result[3])
|
|
#!
from __future__ import division
from __future__ import print_function
import codecs
import datetime
import sys
import os
import shutil
#import tabulate
import codecs
from numpy import *
import numpy.linalg as LA
from sympy import *
from sympy import var as varsym
from sympy import printing
from sympy.core.alphabets import greeks
import once
import once.config as cfg
from once.calcheck import ModCheck
from once.calpdf import *
from once.calunit import *
try:
from PIL import Image as PImage
from PIL import ImageOps as PImageOps
except:
pass
__version__ = "0.9.0"
__author__ = 'rholland'
class CalcRST(object):
"""Write PDF file.
::
Arguments:
odict (ordered dict) : model dictionary
Files written:
_cfilepdf : PDF calc file name
_rstfile : reST file name
_texfile : tex file name
_auxfile : auxiliary file name
_outfile : out file name
_texmak2 : fls file name
_texmak3 : latexmk file name
Operation keys, number of parameters and associated tags:
_r + line number - 7 - [r] run
_i + line number - 6 - [i] insert
_v + line number - 4 - [v] value
_e + line number - 7 - [e] equation
_t + line number - 9 - [t] table
_s + line number - 3 - [s] sectionsm,kl
_~ + line number - 1 - blank line
_x + line number - 2 - pass-through text
_y + line number - 2 - value heading
_lt - 2 - license text [licensetext]
_# - 1 - control line
[r] p0 | p1 | p2 | p3 | p4 | p5
'os' command arg1 arg2 arg3 arg4
'py' script arg1 arg2 arg3 arg4
[i] p0 | p1 | p2 | p4 | p5
'fg' figure caption size location
'tx' text
'md' model
'fn' function var name reference
'rd' file var name vector or table
'wr' file var name
[v] p0 | p1 | p2 | p3
var expr statemnt descrip
[e] p0 | p1 | p2 | p3 | p4 | p5 | p6
var expr statemnt descrip dec1 dec2 units
[t] p0 | p1 | p2 | p3 | p4 | p5 | p6 | p7 | p8
var expr state1 desc range1 range2 dec1 un1 un2
[s] p0 | p1 | p2 | p3
left string calc number sect num toc flag
"""
def __init__(self, odict1):
"""Initialize parameters for UTF calc.
::
Arguments:
odict1 (dictionary): model dictionary
"""
# dicts and lists
self.vbos = cfg.verboseflag
self.el = ModCheck()
self.odict = odict1
self.symb = self.odict.keys()
self.symblist = []
# paths and files
self.rfile = cfg.rstfile
self.figpath = cfg.ipath
#print('rest file name', cfg.rstfile)
self.rf1 = codecs.open(self.rfile, 'w', encoding='utf8')
# parameters
self.fignum = 0
self.widthp = 70
self.xtraline = False
self.prfilename = ''
self.previous = ''
self.literalflag = 0
self.lastsect = ''
self.lastcalcnumber = ''
def gen_rst(self):
""" Parse model dictionary and write rst file.
"""
self.xtraline = True
for _i in self.odict: # execute dictionary line by line
mtag = _i[0:2]
mvals = self.odict[_i]
#print('rstmtag', mtag, _i, mvals, mvals[0])
if mvals[2:9] == '#- page': #- add page break
print(' ', file=self.rf1)
print(".. raw:: latex", file=self.rf1)
print(' ', file=self.rf1)
print(' \\newpage', file=self.rf1)
print(' ', file=self.rf1)
self.el.logwrite("pdf new page", self.vbos)
if mvals[2:4] == '#-':
if isinstance(str(mvals.strip())[-1], int): # add spaces
_numspace = eval(mvals.strip()[-1])
for _i in range(_numspace):
self._rst_blank()
if mtag == '_r':
self._rst_run(self.odict[_i])
elif mtag == '_i':
self._rst_ins(self.odict[_i])
elif mtag == '_v':
self._rst_val2(self.odict[_i])
elif mtag == '_e':
self._rst_eq(self.odict[_i])
elif mtag == '_t':
self._rst_table(self.odict[_i])
elif mtag == '_s':
self._rst_sect(self.odict[_i])
self.xtraline = False
elif mtag == '_x':
self._rst_txt(self.odict[_i])
self.xtraline = False
elif mtag == '_y':
self._rst_val1(self.odict[_i])
self.xtraline = False
else:
pass
if mtag == '_~':
self._rst_blnk()
continue
if self.xtraline:
self._rst_blnk()
if '_lt' in self.odict: # add calc license
self._rst_txt(self.odict[_i2],0)
#for _i in self.odict: print(i, self.odict[i])
self._rst_terms() # add term definitions
self._rst_blnk()
self._rst_txt([' **[end of calc]**']) # end calc
self.rf1.close() # close rst file
self.el.logwrite("< reST file written >", self.vbos)
def _rst_txt(self, txt):
"""Print pass-through text.
arguments:
txt (string): text that is not part of an tag
"""
#print('txt ',txt)
if txt[0][0:3] == ' |' : # handle lines
print(txt[0][2:].rstrip(), file=self.rf1)
self.xtraline = False
elif txt[0][0:3] == ' `' : # handle literal
if self.literalflag == 2:
self.literalflag = 0
if self.literalflag == 1:
self.literalflag = 2
print(' ', file=self.rf1)
self.xtraline = False
elif txt[0][0:4] == ' ::' : # handle literal
print(txt[0][2:].rstrip(), file=self.rf1)
self.literalflag = 1
self.xtraline = False
elif txt[0][0:4] == ' ..' : # handle raw
print(txt[0][2:].rstrip(), file=self.rf1)
self.xtraline = False
else:
print(txt[0][2:].rstrip(), file=self.rf1)
self.xtraline = True
def _rst_blnk(self):
"""Print blank line.
"""
if self.literalflag == 2: # handle literal
print(' ', file=self.rf1)
self.xtraline = False
else:
print(' ', file=self.rf1)
print(".. raw:: latex", file=self.rf1)
print(' ', file=self.rf1)
print(' \\vspace{3mm}', file=self.rf1)
print(' ', file=self.rf1)
print(' ', file=self.rf1)
def _rst_run(self, dval2):
"""
[r] p0 | p1 | p2 | p3 | p4 | p5 | p6
'os' command arg1 arg2 arg3 arg4
'py' script arg1 arg2 arg3 arg4
"""
pass
def _rst_ins(self, dval2):
"""Insert file data into or from reST
[i] p0 | p1 | p2 | p3 | p4
'fig' file caption size location
'text' file reference
'lit' file reference
'csv' file
'mod' file
'func' file var name reference
'read' file var name vector or table
'write' file var name
'app' file var name
only the first three letters of p0 are read
"""
option = ""
fpath = ""
fp = ""
var2 = ""
var3 = "100"
var4 = "center"
option = dval2[0].strip()[0:3]
fname = dval2[1].strip()
fp = os.path.join(self.figpath,fname)
var2 = dval2[2].strip()
var3 = dval2[3].strip()
var4 = dval2[4].strip()
if option == 'fig':
if var4[0:1] == 'r' : var4 = 'right'
elif var4[0:1] == 'l' : var4 = 'left'
elif var4[0:1] == 'c' : var4 = 'center'
else: var4 = 'center'
print(' ', file=self.rf1)
print('.. figure:: ' + fp, file=self.rf1)
print(' :width: ' + var3 + ' %', file=self.rf1)
print(' :align: ' + var4, file=self.rf1)
print(' ', file=self.rf1)
print(' ' + var2, file=self.rf1)
print(' ', file=self.rf1)
self.el.logwrite("< figure "+fname+" added to TeX >", self.vbos)
def _rst_val1(self, dval2):
"""Print value description to reST.
key: values
_y : p0 | p1
block description eqnum
"""
#print('dval2', dval2)
descrip = dval2[0].strip()
eqnum = dval2[1].strip()
# equation reference line
print(' ', file=self.rf1)
print(".. raw:: latex", file=self.rf1)
print(' ', file=self.rf1)
print(' \\vspace{7mm}', file=self.rf1)
print(' ', file=self.rf1)
print(' \\hfill\\textbf{'+descrip+ ' ' +eqnum +'}', file=self.rf1)
print(' ', file=self.rf1)
def _rst_val2(self, dval2):
"""Print values to reST:
key: values
_v : p0 | p1 | p2 | p3
var expr statemnt descrip
"""
val1 = eval(dval2[1])
ptype = type(val1)
var1 = dval2[0].strip()
state = var1 + ' = ' + str(val1)
shift = int(self.widthp / 2.0)
ref = dval2[3].strip().ljust(shift)
valuepdf = " "*4 + ref + ' | ' + state
if ptype == ndarray or ptype == list or ptype == tuple:
shift = int(self.widthp / 2.1)
ref = dval2[3].strip().ljust(shift)
tmp1 = str(val1)
if ptype == ndarray:
if '[[' in tmp1:
tmp2 = tmp1.replace(' [', '. [')
tmp1 = tmp2.replace('[[', '. [[')
else:
tmp1 = tmp1.replace('[', '. [')
valuepdf = '. ' + ref + ' | ' + var1 + ' = ' + tmp1
elif ptype == list or ptype == tuple:
if ']]' in tmp1:
tmp1 = tmp1.replace(']]', ']]\n')
else:
tmp1 = tmp1.replace(']', ']\n')
valuepdf = '. ' + ref + ' | ' + var1 + ' = ' + tmp1
print(' ', file=self.rf1)
print('::', file=self.rf1)
print(' ', file=self.rf1)
print(valuepdf, file=self.rf1)
print(' ', file=self.rf1)
print(' ', file=self.rf1)
def _rst_eq(self, dval):
"""Print equation to reST:
key : _e + line number
value: p0 | p1 | p2 | p3 | p4 | p5 | p6 | p7
var expr state descrip dec1 dec2 unit eqnum
"""
try: # set decimal format
eformat, rformat = str(dval[4]).strip(), str(dval[5]).strip()
exec("set_printoptions(precision=" + eformat.strip() + ")")
exec("Unum.VALUE_FORMAT = '%." + eformat.strip() + "f'")
#print('eformat',eformat, rformat)
except:
eformat = '3'
rformat = '3'
set_printoptions(precision=3)
Unum.VALUE_FORMAT = "%.3f"
try:
eunit = dval[6].strip()
#print('eunit', eunit)
except:
eunit = ' '
var0 = dval[0].strip()
for k1 in self.odict: # evaluate values and equations
if k1[0:2] in ['_v','_e']:
try:
exec(self.odict[k1][2].strip())
except:
pass
descrip = dval[3].strip() # equation reference line
eqnum = dval[7].strip()
print(' ', file=self.rf1)
print(".. raw:: latex", file=self.rf1)
print(' ', file=self.rf1)
print(' \\vspace{7mm}', file=self.rf1)
print(' ', file=self.rf1)
print(' \\hfill\\textbf{'+descrip+ ' ' +eqnum +'}', file=self.rf1)
print(' ', file=self.rf1)
for _j in self.odict: # symbolic form
if _j[0:2] in ['_v','_e']:
#print(str(self.odict[_j][0]))
varsym(str(self.odict[_j][0]))
try: # try sympy processing
symeq = sympify(dval[1].strip())
print(' ', file=self.rf1)
print(".. raw:: latex", file=self.rf1)
print(' ', file=self.rf1)
print(' \\vspace{3mm}', file=self.rf1)
print(' ', file=self.rf1)
print('.. math:: ', file=self.rf1)
print(' ', file=self.rf1)
print(' ' + latex(symeq, mul_symbol="dot"), file=self.rf1)
print(' ', file=self.rf1)
print(".. raw:: latex", file=self.rf1)
print(' ', file=self.rf1)
print(' \\vspace{4mm}', file=self.rf1)
print(' ', file=self.rf1)
except: # otherwise write ASCII form
symeq = dval[1].strip()
print(' ', file=self.rf1)
print('::', file=self.rf1)
print(' ', file=self.rf1)
print(' ' + symeq, file=self.rf1)
print(' ', file=self.rf1)
try: # substitute values for variables
symat = symeq.atoms(Symbol)
latexrep = latex(symeq, mul_symbol="dot")
#print('latex1', latexrep)
switch1 = []
for _n in symat: # rewrite latex equation with braces
#print('_n1', _n)
newlatex1 = str(_n).split('__')
if len(newlatex1) == 2:
newlatex1[1] += '}'
newlatex1 = '~d~'.join(newlatex1)
newlatex1 = str(_n).split('_')
if len(newlatex1) == 2:
newlatex1[1] += '}'
newlatex1 = '~s~'.join(newlatex1)
newlatex1 = ''.join(newlatex1)
newlatex1 = newlatex1.replace('~d~', '__{')
newlatex1 = newlatex1.replace('~s~', '_{')
#print('newlatex1', newlatex1)
switch1.append([str(_n), newlatex1])
for _n in switch1:
#print('_n2', _n)
expr1 = eval(_n[0])
if type(expr1) == float: # set sub decimal points
form = '{:.' + eformat.strip() +'f}'
symvar1 = '{' + form.format(expr1) + '}'
else:
symvar1 = '{' + str(expr1) + '}'
#print('replace',_n[1], symvar1)
latexrep = latexrep.replace(_n[1], symvar1)
latexrep = latexrep.replace("\{", "{")
#print('latex2', latexrep)
print(' ', file=self.rf1) # add equation to rst file
print('.. math:: ', file=self.rf1)
print(' ', file=self.rf1)
print(' ' + latexrep, file=self.rf1)
print(' ', file=self.rf1)
print(".. raw:: latex", file=self.rf1)
print(' ', file=self.rf1)
print(' \\vspace{1mm}', file=self.rf1)
print(' ', file=self.rf1)
print(' ', file=self.rf1)
except:
pass
for j2 in self.odict: # restore units
try:
statex = self.odict[j2][2].strip()
exec(statex)
except:
pass
var3s = var0.split('_')
if var3s[0] in greeks: # result var to greek
var3g = "\\" + var0
else:
var3g = var0
typev = type(eval(var0))
#print('typev', typev)
print1 = 0 # format result printing
if typev == ndarray:
print1 = 1
tmp1 = str(eval(var0))
if '[[' in tmp1:
tmp2 = tmp1.replace(' [', '. [')
tmp1 = tmp2.replace('[[', '. [[')
else:
tmp1 = tmp1.replace('[', '. [')
elif typev == list or typev == tuple:
print1 = 1
tmp1 = str(eval(var0))
if '[[' in tmp1:
tmp2 = tmp1.replace(' [', '. [')
tmp1 = tmp2.replace('[[', '. [[')
tmp1 = tmp1.replace('],', '],\n')
else:
tmp1 = tmp1.replace('[', '. [')
tmp1 = tmp1.replace('],', '],\n')
elif typev == Unum:
print1 = 2
exec("Unum.VALUE_FORMAT = '%." + rformat.strip() + "f'")
if len(eunit) > 0:
tmp = eval(var0).au(eval(eunit))
else:
tmp = eval(var0)
tmp1 = tmp.strUnit()
tmp2 = tmp.asNumber()
chkunit = str(tmp).split()
#print('chkunit', tmp, chkunit)
if len(chkunit) < 2:
tmp1 = ''
resultform = "{:,."+rformat + "f}"
result1 = resultform.format(tmp2)
tmp3 = var0 +"="+ result1 + ' ' + tmp1
else:
print1 = 2
if type(eval(var0)) == float or type(eval(var0)) == float64:
resultform = "{:,."+rformat + "f}"
result1 = resultform.format(eval(var0))
tmp3 = var0 +"="+ result1
else:
tmp3 = var0 +"="+ str(eval(var0))
if print1 == 1: # for lists and arrays
print(' ', file=self.rf1)
print('::', file=self.rf1)
print(' ', file=self.rf1)
print('. ' + var0 + ' = ', file=self.rf1)
print(tmp1, file=self.rf1)
print(' ', file=self.rf1)
print(".. raw:: latex", file=self.rf1)
print(' ', file=self.rf1)
print(' \\vspace{4mm}', file=self.rf1)
print(' ', file=self.rf1)
if print1 == 2: # add space at units
try:
result2 = latex(tmp3).split()
tmp3 = ''.join(result2[:-2]) + ' \ '.join(result2[-2:])
except:
pass
#print(' ', file=self.rf1)
#print('.. math:: ', file=self.rf1)
#print(' ', file=self.rf1)
#print(" {" + latex(tmp3, mode='plain') + "}", file=self.rf1)
print(' ', file=self.rf1)
print(".. raw:: latex", file=self.rf1)
print(' ', file=self.rf1)
print(' \\hfill{\\underline{'+tmp3 +'}}', file=self.rf1)
print(' ', file=self.rf1)
print(".. raw:: latex", file=self.rf1)
print(' ', file=self.rf1)
print(' \\vspace{8mm}', file=self.rf1)
print(' ', file=self.rf1)
def _rst_table(self, dval):
"""Print table to reStructuredText:
_t + line number - 9 - [t] table
[t] p0 | p1 | p2 | p3 | p4 | p5 | p6 | p7 | p8
var state1 desc range1 range2 dec1 un1 un2
"""
try:
eformat, rformat = dval[6].split(',')
exec("set_printoptions(precision=" + eformat + ")")
exec("Unum.VALUE_FORMAT = '%." + eformat + "f'")
except:
eformat = '3'
rformat = '3'
set_printoptions(precision=3)
Unum.VALUE_FORMAT = "%.3f"
Unum.VALUE_FORMAT = "%.3f"
# table heading
vect = dval[1:]
eqnum = dval[10].strip()
tablehdr = 'Table ' + eqnum
print(".. raw:: latex", file=self.rf1)
print(' ', file=self.rf1)
print(' \\vspace{7mm}', file=self.rf1)
print(' ', file=self.rf1)
print("aaxbb " + "**" + tablehdr + "**", file=self.rf1)
print(' ', file=self.rf1)
ref5 = dval[5].strip()
if ref5 != '':
print(".. raw:: latex", file=self.rf1)
print(' ', file=self.rf1)
print(' \\hfill\\text{' + ref5 + '}', file=self.rf1)
print(' \\begin{flushleft}', file=self.rf1)
print(' ', file=self.rf1)
print(' \\end{flushleft}', file=self.rf1)
print(' ', file=self.rf1)
# draw horizontal line
#print(' ', file=self.rf1)
#print(".. raw:: latex", file=self.rf1)
#print(' ', file=self.rf1)
#print(' \\vspace{-1mm}', file=self.rf1)
#print(' ', file=self.rf1)
#print(' \\hrulefill', file=self.rf1)
#print(' ', file=self.rf1)
# symbolic forms
for _j in self.symb:
if str(_j)[0] != '_':
varsym(str(_j))
# range variables
try:
var1 = vect[2].strip()
var2 = vect[3].strip()
except:
pass
# equation
try:
var0 = vect[0].split('=')[0].strip()
symeq = vect[0].split('=')[1].strip()
except:
pass
# evaluate equation and array variables - keep units
for k1 in self.odict:
if k1[0] != '_' or k1[0:2] == '_a':
try:
exec(self.odict[k1][3].strip())
except:
pass
try:
exec(self.odict[k1][4].strip())
except:
pass
try:
exec(self.odict[k1][1].strip())
except:
pass
#print(k1, eval(k1))
# write explicit table
if len(str(vect[2])) == 0 and len(str(vect[3])) == 0:
ops = [' - ',' + ',' * ',' / ']
_z1 = vect[0].split('=')[0].strip()
cmd_str1 = _z1 + ' = array(' + vect[1] +')'
exec(cmd_str1)
cunit = dval[7]
print('cunit', cunit)
_rc = eval(_z1).tolist()
# evaluate variables with units
for inx in ndindex(eval(_z1).shape):
print(21, type(_rc[inx[0]][inx[1]]),_rc[inx[0]][inx[1]] )
try:
_fltn2a = _rc[inx[0]][inx[1]]
_fltn2b = _fltn2a.au(eval(cunit))
_fltn2c = _fltn2b.asNumber()
_rc[inx[0]][inx[1]] = str(_fltn2c)
except:
pass
# evaluate numbers
for inx in ndindex(eval(_z1).shape):
try:
_fltn1 = float(_rc[inx[0]][inx[1]])
_rc[inx[0]][inx[1]] = _fltn1
except:
pass
# evaluate expressions
for inx in ndindex(eval(_z1).shape):
for _k in ops:
if _k in str(_rc[inx[0]][inx[1]]) :
_fltn2 = _rc[inx[0]][inx[1]]
_rc[inx[0]][inx[1]] = eval(_fltn2)
break
# print table
table2 = tabulate
fltf = "." + eformat.strip() + "f"
ptable = table2.tabulate(_rc[1:], _rc[0], 'rst', floatfmt=fltf)
print(ptable, file=self.rf1)
print(' ', file=self.rf1)
return
# evaluate variables - strip units for arrays
for k1 in self.odict:
#print('k1', k1)
if k1[0] != '_':
try:
exec(self.odict[k1][1].strip())
except:
pass
try:
state = self.odict[k1][1].strip()
varx = state.split('=')
state2 = varx[0].strip()+'='\
+varx[0].strip() + '.asNumber()'
exec(state2)
#print('j1', k1)
except:
pass
if k1[0:2] == '_a':
#print('k1-2', k1)
try:
exec(self.odict[k1][3].strip())
exec(self.odict[k1][4].strip())
exec(self.odict[k1][1].strip())
except:
pass
# imported table
if len(str(vect[1])) == 0:
_a = eval(vect[0])
# print table
table2 = tabulate
flt1 = "." + eformat.strip() + "f"
ptable = table2.tabulate(_a[1:], _a[0], 'rst', floatfmt=flt1)
print(ptable, file=self.rf1)
print(' ', file=self.rf1)
# explicit table
elif len(str(vect[2])) == 0 and len(str(vect[3])) == 0:
ops = [' - ',' + ',' * ',' / ']
_a1 = vect[0].split('=')[0].strip()
cmd_str1 = _a1 + ' = array(' + vect[1] +')'
exec(cmd_str1)
_z1 = vect[0].split('=')[0].strip()
cmd_str1 = _z1 + ' = array(' + vect[1] +')'
#print(cmd_str1)
exec(cmd_str1)
_rc = eval(_z1).tolist()
# evaluate numbers
for inx in ndindex(eval(_z1).shape):
try:
_fltn1 = float(_rc[inx[0]][inx[1]])
_rc[inx[0]][inx[1]] = _fltn1
except:
pass
#print('chk1', inx, _a[inx[0]][inx[1]])
# evaluate expressions
for inx in ndindex(eval(_z1).shape):
for _k in ops:
if _k in str(_rc[inx[0]][inx[1]]) :
_fltn2 = _rc[inx[0]][inx[1]]
_rc[inx[0]][inx[1]] = eval(_fltn2)
break
# print table
table2 = tabulate
flt1 = "." + eformat.strip() + "f"
ptable = table2.tabulate(_rc[1:], _rc[0], 'rst', floatfmt=flt1)
print(ptable, file=self.rf1)
print(' ', file=self.rf1)
# single row vector - 1D table
elif len(str(vect[3])) == 0 and len(str(vect[0])) != 0:
# process range variable 1 and heading
symeq1 = sympify(symeq)
print(' ', file=self.rf1)
print('.. raw:: latex', file=self.rf1)
print(' ', file=self.rf1)
print(' \\vspace{2mm}', file=self.rf1)
print(' ', file=self.rf1)
print(' Variables:', file=self.rf1)
print(' ', file=self.rf1)
print(' \\vspace{1mm}', file=self.rf1)
print(' ', file=self.rf1)
print(' ' + latex(var1, mul_symbol="dot"), file=self.rf1)
print(' ', file=self.rf1)
print(' \\vspace{1mm}', file=self.rf1)
print(' ', file=self.rf1)
print('.. math:: ', file=self.rf1)
print(' ', file=self.rf1)
print(' ' + latex(symeq1, mul_symbol="dot"), file=self.rf1)
print(' ', file=self.rf1)
rnge1 = vect[2]
exec(rnge1.strip())
rnge1a = rnge1.split('=')
rlist = [vect[6].strip() + ' = ' +
str(_r)for _r in eval(rnge1a[1])]
#process equation
equa1 = vect[0].strip()
#print(equa1)
exec(equa1)
var2 = equa1.split('=')[0]
etype = equa1.split('=')[1]
elist1 = eval(var2)
if etype.strip()[:1] == '[':
# data is in list form
#elist2 = []
elist2 = eval(equa1.split('=')[1])
# for _v in alist1:
# try:
# elist2.append(list(_v))
# except:
# elist2.append(_v)
else:
try:
elist2 = elist1.tolist()
except:
elist2 = elist1
elist2 = [elist2]
# create 1D table
ptable = tabulate.tabulate(elist2, rlist, 'rst',
floatfmt="."+eformat.strip()+"f")
print(ptable, file=self.rf1)
print(' ', file=self.rf1)
#print(ptable)
# 2D table
elif len(str(vect[3])) != 0 and len(str(vect[0])) != 0:
symeq1 = sympify(symeq)
print(' ', file=self.rf1)
print('.. raw:: latex', file=self.rf1)
print(' ', file=self.rf1)
print(' \\vspace{2mm}', file=self.rf1)
print(' ', file=self.rf1)
print(' Variables:', file=self.rf1)
print(' ', file=self.rf1)
print(' \\vspace{1mm}', file=self.rf1)
print(' ', file=self.rf1)
print(' ' + latex(var1, mul_symbol="dot"), file=self.rf1)
print(' ', file=self.rf1)
print(' \\vspace{2mm}', file=self.rf1)
print(' ', file=self.rf1)
print(' ' + latex(var2, mul_symbol="dot"), file=self.rf1)
print(' ', file=self.rf1)
print('.. math:: ', file=self.rf1)
print(' ', file=self.rf1)
print(' \\vspace{4mm}', file=self.rf1)
print(' ', file=self.rf1)
print(' ' + latex(symeq1, mul_symbol="dot"), file=self.rf1)
print(' ', file=self.rf1)
# process range variable 1
rnge1 = vect[2]
exec(rnge1.strip())
rnge1a = rnge1.split('=')
rlist = [vect[6].strip() + ' = ' +
str(_r) for _r in eval(rnge1a[1])]
# process range variable 2
rnge2 = vect[3]
exec(rnge2.strip())
rnge2a = rnge2.split('=')
clist = [str(_r).strip() for _r in eval(rnge2a[1])]
rlist.insert(0, vect[7].strip())
# process equation
equa1 = vect[0].strip()
#print('equa1', equa1)
exec(equa1)
etype = equa1.split('=')[1]
if etype.strip()[:1] == '[':
# data is in list form
#alist = []
alist = eval(equa1.split('=')[1])
#print('alist1', alist1)
# for _v in alist1:
# for _x in _v:
#print('_x', _x)
# alist.append(list(_x))
#print('append', alist)
else:
# data is in equation form
equa1a = vect[0].strip().split('=')
equa2 = equa1a[1]
rngx = rnge1a[1]
rngy = rnge2a[1]
ascii1 = rnge1a[0].strip()
ascii2 = rnge2a[0].strip()
# format table
alist = []
for _y12 in eval(rngy):
alistr = []
for _x12 in eval(rngx):
eq2a = equa2.replace(ascii1, str(_x12))
eq2b = eq2a.replace(ascii2, str(_y12))
el = eval(eq2b)
alistr.append(el)
alist.append(alistr)
#print('append', alist)
for _n, _p in enumerate(alist): _p.insert(0, clist[_n])
# create 2D table
flt1 = "." + eformat.strip() + "f"
#print(alist)
ptable = tabulate.tabulate(alist, rlist, 'rst', floatfmt=flt1)
print(ptable, file=self.rf1)
print(' ', file=self.rf1)
def _rst_sect(self, dval):
"""Print section title to reST.
::
_s : p0 | p1 | p2
left string calc number sect num
"""
tleft = dval[0].strip()
tright = dval[1].strip() + dval[2].strip()
self.lastsect = int(dval[2].strip()[1:-1])
self.lastcalcnumber = dval[1].strip()
print(' ', file=self.rf1)
print(".. raw:: latex", file=self.rf1)
print(' ', file=self.rf1)
print(' \\vspace{3mm}', file=self.rf1)
print(' ', file=self.rf1)
print(' ', file=self.rf1)
print(tleft.strip() + "aaxbb " + tright.strip(),file=self.rf1)
print("-" * self.widthp, file=self.rf1)
print(' ', file=self.rf1)
print(' ', file=self.rf1)
print(".. raw:: latex", file=self.rf1)
print(' ', file=self.rf1)
print(' \\vspace{1mm}', file=self.rf1)
print(' ', file=self.rf1)
def _rst_terms(self):
"""Print section with term definitions to reST:
key: values
_v : p0 | p1 | p2 | p3
var expr statemnt descrip
key : _e
value: p0 | p1 | p2 | p3 | p4 | p5 | p6 | p7
var expr state descrip dec1 dec2 unit eqnu
"""
taglist =[]
for _i in self.odict:
mtag = _i[0:2]
taglist.append(mtag)
if ('_v' or '_e') in taglist:
tleft = "AST Variables and Definitions"
tright = self.lastcalcnumber + '['+str(self.lastsect+1)+']'
print(' ', file=self.rf1)
print(".. raw:: latex", file=self.rf1)
print(' ', file=self.rf1)
print(' \\vspace{3mm}', file=self.rf1)
print(' ', file=self.rf1)
print(' ', file=self.rf1)
print(tleft.strip() + "aaxbb " + tright.strip(),file=self.rf1)
print("-" * self.widthp, file=self.rf1)
print(' ', file=self.rf1)
print(' ', file=self.rf1)
print(".. math::", file=self.rf1)
print(' ', file=self.rf1)
print(' \\begin{align}', file=self.rf1)
cnt = 0
for _i in self.odict: # execute dictionary line by line
if _i[0:2] in ['_v','_e']:
cnt += 1
if cnt == 35:
print(' \\end{align}', file=self.rf1)
print(' ', file=self.rf1)
print(' ', file=self.rf1)
print(".. math::", file=self.rf1)
print(' ', file=self.rf1)
print(' \\begin{align}', file=self.rf1)
cnt = 0
mvals = self.odict[_i]
varstring1 = " \\bm{" + str(mvals[0]) + "} "
varstring2 = "&= \\textrm{" + str(mvals[3]) + "}\\\\"
print(varstring1 + varstring2, file=self.rf1)
#print('rstmtag', mtag, _i, mvals, mvals[0])
print(' \\end{align}', file=self.rf1)
print(' ', file=self.rf1)
else:
pass
class CalcPDF(object):
"""write PDF calc from rst file
"""
def __init__(self):
"""Initialize rst, tex and pdf file paths.
"""
self.vbos = cfg.verboseflag
self.el = ModCheck()
self.mfile = cfg.mfile
#print('mfile', self.mfile)
self.xpath = cfg.xpath
self.ppath = cfg.ppath
self.cpath = cfg.cpath
self.pdffile = cfg.cfilepdf
self.rfile = cfg.rstfile
self.texfile = cfg.texfile
self.rpath = cfg.rpath
self.calctitle = cfg.calctitle
self.texfile2 = os.path.join(cfg.xpath, self.texfile)
self.auxfile = os.path.join(cfg.xpath, cfg.mbase + '.aux')
self.outfile = os.path.join(cfg.xpath, cfg.mbase + '.out')
self.texmak2 = os.path.join(cfg.xpath, cfg.mbase + '.fls')
self.texmak3 = os.path.join(cfg.xpath, cfg.mbase + '.fdb_latexmk')
self.stylepathpdf = os.path.join(once.__path__[0],'once.sty')
def gen_tex(self):
"""Generate tex file and call mod_tex.
"""
#print("gen_tex1")
fixstylepath = self.stylepathpdf.replace('\\', '/')
try:
pypath = os.path.dirname(sys.executable)
rstexec = os.path.join(pypath,"Scripts","rst2latex.py")
with open(rstexec) as f1:
f1.close()
pythoncall = 'python '
#print("< rst2latex path 1> " + rstexec)
except:
try:
pypath = os.path.dirname(sys.executable)
rstexec = os.path.join(pypath,"rst2latex.py")
with open(rstexec) as f1:
f1.close()
pythoncall = 'python '
#print("< rst2latex path 2> " + rstexec)
except:
rstexec = "/usr/local/bin/rst2latex.py"
pythoncall = 'python '
#print("< rst2latex path 3> " + rstexec)
tex1 = "".join([pythoncall, rstexec
,
" --documentclass=report ",
" --documentoptions=12pt,notitle,letterpaper",
" --stylesheet=",
fixstylepath + " ", self.rfile + " ", self.texfile2])
self.el.logwrite("tex call:\n" + tex1, self.vbos)
try:
os.system(tex1)
self.el.logwrite("< TeX file written >", self.vbos)
except:
print()
self.el.logwrite("< error in docutils call >", self.vbos)
self.mod_tex(self.texfile2)
def mod_tex(self, tfile):
"""Modify TeX file to avoid problems with escapes:
- Replace marker "aaxbb " inserted by once with
\\hfill (not handled by reST).
- Delete inputenc package
- Modify title section and add table of contents
"""
with open(tfile, 'r') as texin:
texf = texin.read()
texf = texf.replace("""inputenc""", """ """)
texf = texf.replace("aaxbb ", """\\hfill""")
texf = texf.replace("""\\begin{document}""",
"""\\renewcommand{\contentsname}{"""+
self.calctitle + "}\n"+
"""\\begin{document}"""+"\n"+
"""\\makeatletter"""+
"""\\renewcommand\@dotsep{10000}"""+
"""\\makeatother"""+
"""\\tableofcontents"""+
"""\\listoftables"""+
"""\\listoffigures""")
with open (tfile, 'w') as texout:
print(texf, file=texout)
def gen_pdf(self):
"""Write PDF file from tex file.
"""
os.chdir(self.xpath)
if os.path.isfile(os.path.join(self.ppath,self.pdffile)):
os.remove(os.path.join(self.ppath,self.pdffile))
pdf1 ='latexmk -xelatex -quiet -f '+os.path.join(self.xpath,self.texfile)
#print("pdf call: ", pdf1)
self.el.logwrite("< PDF calc written >", self.vbos)
os.system(pdf1)
pdfname = self.pdffile
pdfname = list(pdfname)
pdfname[0]='m'
pdfname2 = "".join(pdfname)
pdfftemp = os.path.join(self.xpath, pdfname2)
pdffnew = os.path.join(self.cpath, self.pdffile)
try:
os.remove(pdffnew)
except:
pass
try:
os.rename(pdfftemp, pdffnew)
except:
self.el.logwrite("< PDF calc not moved from temp >", 1)
tocname2 = pdfname2.replace('.pdf','.toc')
toctemp = os.path.join(self.xpath, tocname2)
tocnew = os.path.join(self.rpath, tocname2)
try:
shutil.copyfile(toctemp, tocnew)
except:
self.el.logwrite("< TOC not moved from temp >", 1)
def reprt_list(self):
"""Append calc name to reportmerge.txt
"""
try:
filen1 = os.path.join(self.rpath, "reportmerge.txt")
print(filen1)
file1 = open(filen1, 'r')
mergelist = file1.readlines()
file1.close()
mergelist2 = mergelist[:]
except OSError:
print('< reportmerge.txt file not found in reprt folder >')
return
calnum1 = self.pdffile[0:5]
file2 = open(filen1, 'w')
newstr1 = 'c | ' + self.pdffile + ' | ' + self.calctitle
for itm1 in mergelist:
if calnum1 in itm1:
indx1 = mergelist2.index(itm1)
mergelist2[indx1] = newstr1
for j1 in mergelist2: file2.write(j1)
file2.close()
return
mergelist2.append("\n" + newstr1)
for j1 in mergelist2: file2.write(j1)
file2.close()
return
|
|
from contextlib import closing
import html
import json
import mimetypes
import os
import socket
import time
import traceback
import uuid
import webbrowser
from http.server import HTTPServer
from http.server import SimpleHTTPRequestHandler
from dateutil.parser import parse
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
import jinja2
from jirafs import utils
from jirafs.plugin import CommandPlugin
SESSION_CONNECTED = None
class CountingEventHandler(FileSystemEventHandler):
counter = 0
def on_modified(self, event):
self.counter += 1
def get_converted_markup(folder, data):
url = folder.jira._get_url("../1.0/render")
response = folder.jira._session.post(
url,
headers={"Accept": "text/html, */*; q=0.01"},
data=json.dumps(
{
"issueKey": folder.ticket_number,
"rendererType": "atlassian-wiki-renderer",
"unrenderedMarkup": data,
}
),
)
return response.text
class IssueRequestHandler(SimpleHTTPRequestHandler):
folder = None
field_data = None
def get_rendered_template(self, template_name, context):
loader = jinja2.FileSystemLoader(
searchpath=os.path.join(os.path.dirname(__file__), "templates")
)
templates = jinja2.Environment(loader=loader)
template = templates.get_template(template_name)
return template.render(context)
def get_all(self):
lines = []
lines.append(
"Jump to: [#Description] | [New Comment|#NewComment] | [#Comments]"
)
lines.append(
f"h1. {self.folder.issue.key}: {self.get_field_data('summary')}\n\n"
)
lines.append(f"h2. Description\n\n")
description_data = self.get_field_data("description")
if not description_data.strip():
lines.append("_Empty_")
else:
lines.append(description_data)
lines.append("\n")
lines.append(f"h2. New Comment\n\n")
comment_data = self.get_field_data("new_comment")
if not comment_data:
lines.append("_Empty_")
else:
lines.append(comment_data)
lines.append("\n")
lines.append(f"h2. Comments\n\n")
lines.append(self.get_comments())
return "\n".join(lines)
def get_comments(self):
lines = []
for comment in self.folder.issue.fields.comment.comments:
lines.append(
"h3. On %s, [~%s] wrote:\n\n"
% (
utils.format_date(self.folder, parse(comment.created)),
utils.get_comment_author_display(comment),
)
)
lines.append(comment.body.replace("\r\n", "\n"))
lines.append("\n")
return "\n".join(lines)
def get_field_data(self, dotpath):
special_fields = {
"": self.get_all,
"new_comment": self.folder.get_new_comment,
"comments": self.get_comments,
}
if dotpath in special_fields:
data = special_fields[dotpath]()
else:
data = self.folder.get_field_value_by_dotpath(dotpath)
return data
def get_local_file_escaped_field_data(self, dotpath):
data = self.get_field_data(dotpath)
if not data:
return {}, ""
local_files = os.listdir(self.folder.path)
referenced_files = utils.find_files_referenced_in_markup(data)
matches_in_reverse_order = sorted(
[
(
filename,
match_data,
)
for filename, match_data in referenced_files.items()
],
key=lambda match: -1 * match[1][2],
)
placeholders = {}
for filename, (full, start, end) in matches_in_reverse_order:
if filename not in local_files:
continue
id = uuid.uuid4()
placeholder = f"JIRAFS-PLACEHOLDER:{id}"
placeholders[placeholder] = (filename, full)
data = "".join([data[:start], placeholder, data[end:]])
return placeholders, data
def get_issue_title(self, html_title):
return f"[{self.folder.issue.key}]: {html_title}"
def replace_placeholders(self, placeholders, data):
for placeholder, (filename, full) in placeholders.items():
if full.startswith("!"):
data = data.replace(placeholder, f'<img src="files/{filename}" />')
elif full.startswith("[^"):
data = data.replace(
placeholder, f'<a href="files/{filename}">{filename}</a>'
)
return data
def serve_preview_content(self, dotpath):
content_type = "text/html"
placeholders, data = self.get_local_file_escaped_field_data(dotpath)
html_title = dotpath
if not html_title:
html_title = self.get_field_data("summary")
if isinstance(data, str):
response = self.get_rendered_template(
"base.html",
{
"content": self.replace_placeholders(
placeholders, get_converted_markup(self.folder, data)
),
"title": self.get_issue_title(html_title),
},
)
else:
response = json.dumps(data)
content_type = "application/json"
self.send_response(200)
self.send_header("Content-type", content_type)
self.send_header("Content-length", len(response))
self.end_headers()
self.wfile.write(response.encode("utf-8"))
def serve_file(self, path):
if path not in os.listdir(self.folder.path):
self.send_response(404)
self.send_header("Content-length", 0)
self.end_headers()
return
with open(os.path.join(self.folder.path, path), "rb") as inf:
self.send_response(200)
inf.seek(0, 2)
content_type = mimetypes.guess_type(path)[0] or "application/octet-stream"
self.send_header("Content-type", content_type)
self.send_header("Content-length", inf.tell())
self.end_headers()
inf.seek(0)
self.wfile.write(inf.read())
def send_eventsource_message(self, message):
self.wfile.write(str(len(message.encode("utf-8")) + 2).encode("utf-8"))
self.wfile.write("\r\n".encode("utf-8"))
self.wfile.write(message.encode("utf-8"))
self.wfile.write("\r\n".encode("utf-8"))
def serve_eventsource(self):
event_handler = CountingEventHandler()
observer = Observer()
observer.schedule(event_handler, path=self.folder.path, recursive=True)
observer.start()
self.send_response(200)
self.send_header("Transfer-encoding", "chunked")
self.send_header("Content-Type", "text/event-stream")
self.end_headers()
while True:
self.send_eventsource_message(
"event: counter\r\ndata: %s\r\n" % event_handler.counter
)
time.sleep(0.5)
def do_DELETE(self):
global SESSION_CONNECTED
if self.path == "/eventsource/":
SESSION_CONNECTED = False
self.send_response(200)
else:
self.send_response(404)
self.end_headers()
def do_GET(self):
global SESSION_COUNTER
self.folder.clear_cache()
try:
if self.path.startswith("/files/"):
self.serve_file(self.path[7:])
elif self.path == "/eventsource/":
SESSION_COUNTER = True
self.serve_eventsource()
else:
self.serve_preview_content(self.path[1:].replace("/", "."))
except BrokenPipeError:
pass
except Exception as e:
self.send_response(500)
response = self.get_rendered_template(
"traceback.html",
{
"content": html.escape(traceback.format_exc()),
"title": f"Error: {e}",
},
)
self.send_header("Content-type", "text/html")
self.send_header("Content-length", len(response))
self.end_headers()
self.wfile.write(response.encode("utf-8"))
class Command(CommandPlugin):
"""Preview your Jira wiki markup"""
MIN_VERSION = "2.0.0"
MAX_VERSION = "3.0.0"
def add_arguments(self, parser):
parser.add_argument(
"--port",
"-p",
help=(
"Start a webserver on this port; defaults to asking "
"the operating system for any available port."
),
type=int,
default=0,
)
parser.add_argument(
"--no-browser",
"-n",
action="store_true",
default=False,
help=("Do not open a webbrowser to the created webserver."),
)
parser.add_argument(
"--serve-forever",
"-f",
action="store_true",
default=False,
help=(
"Do not automatically terminate preview session "
"when user navigates away from preview URL."
),
)
parser.add_argument("field_name", nargs="?")
def handle(self, args, folder, **kwargs):
return self.cmd(
folder,
args.field_name or "",
port=args.port,
open_browser=not args.no_browser,
serve_forever=args.serve_forever,
)
def continue_serving(self, serve_forever=True):
if serve_forever:
return True
if SESSION_CONNECTED is None or SESSION_CONNECTED:
return True
return False
def main(
self,
folder,
field_name,
port=0,
open_browser=True,
serve_forever=True,
**kwargs,
):
if os.path.isfile(field_name) and field_name.endswith(".jira"):
field_name = field_name.split(".")[0]
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
s.bind(("", port))
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
port = s.getsockname()[1]
path = field_name.replace(".", "/")
IssueRequestHandler.folder = folder
IssueRequestHandler.get_converted_markup = get_converted_markup
server = HTTPServer(("", port), IssueRequestHandler)
server.timeout = 0.1
print(f"Serving from http://127.0.0.1:{port}")
print("Press <Ctrl+C> to Exit")
if open_browser:
webbrowser.open(f"http://127.0.0.1:{port}/{path}")
try:
while self.continue_serving(serve_forever):
server.handle_request()
except KeyboardInterrupt:
pass
|
|
from __future__ import print_function
import numpy as np
from numpy.linalg import norm
import numpy.testing as npt
from dipy.testing.memory import get_type_refcount
from nose.tools import assert_true, assert_equal, assert_almost_equal
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_raises, run_module_suite)
import dipy.tracking.utils as ut
from dipy.tracking.streamline import (set_number_of_points,
length as ds_length,
relist_streamlines,
unlist_streamlines,
center_streamlines,
transform_streamlines,
select_random_set_of_streamlines,
compress_streamlines,
select_by_rois,
orient_by_rois,
values_from_volume)
streamline = np.array([[82.20181274, 91.36505890, 43.15737152],
[82.38442230, 91.79336548, 43.87036514],
[82.48710632, 92.27861023, 44.56298065],
[82.53310394, 92.78545380, 45.24635315],
[82.53793335, 93.26902008, 45.94785309],
[82.48797607, 93.75003815, 46.64939880],
[82.35533142, 94.25181580, 47.32533264],
[82.15484619, 94.76634216, 47.97451019],
[81.90982819, 95.28792572, 48.60244370],
[81.63336945, 95.78153229, 49.23971176],
[81.35479736, 96.24868011, 49.89558792],
[81.08713531, 96.69807434, 50.56812668],
[80.81504822, 97.14285278, 51.24193192],
[80.52591705, 97.56719971, 51.92168427],
[80.26599884, 97.98269653, 52.61848068],
[80.04635620, 98.38131714, 53.33855820],
[79.84691620, 98.77052307, 54.06955338],
[79.57667542, 99.13599396, 54.78985596],
[79.23351288, 99.43207550, 55.51065063],
[78.84815979, 99.64141846, 56.24016571],
[78.47383881, 99.77347565, 56.99299240],
[78.12837219, 99.81330872, 57.76969528],
[77.80438995, 99.85082245, 58.55574799],
[77.49439240, 99.88065338, 59.34777069],
[77.21414185, 99.85343933, 60.15090561],
[76.96416473, 99.82772827, 60.96406937],
[76.74712372, 99.80519104, 61.78676605],
[76.52263641, 99.79122162, 62.60765076],
[76.03757477, 100.08692169, 63.24152374],
[75.44867706, 100.35265350, 63.79513168],
[74.78033447, 100.57255554, 64.27278900],
[74.11605835, 100.77330780, 64.76428986],
[73.51222992, 100.98779297, 65.32373047],
[72.97387695, 101.23387146, 65.93502045],
[72.47355652, 101.49151611, 66.57343292],
[71.99834442, 101.72480774, 67.23979950],
[71.56909180, 101.98665619, 67.92664337],
[71.18083191, 102.29483795, 68.61888123],
[70.81879425, 102.63343048, 69.31127167],
[70.47422791, 102.98672485, 70.00532532],
[70.10092926, 103.28502655, 70.70999908],
[69.69512177, 103.51667023, 71.42147064],
[69.27423096, 103.71351624, 72.13452911],
[68.91260529, 103.81676483, 72.89796448],
[68.60788727, 103.81982422, 73.69258118],
[68.34162903, 103.76619720, 74.49915314],
[68.08542633, 103.70635223, 75.30856323],
[67.83590698, 103.60187531, 76.11553955],
[67.56822968, 103.44821930, 76.90870667],
[67.28399658, 103.25878906, 77.68825531],
[67.00117493, 103.03740692, 78.45989227],
[66.72718048, 102.80329895, 79.23099518],
[66.46197510, 102.54130554, 79.99622345],
[66.20803833, 102.22305298, 80.74387360],
[65.96872711, 101.88980865, 81.48987579],
[65.72864532, 101.59316254, 82.25085449],
[65.47808075, 101.33383942, 83.02194214],
[65.21841431, 101.11295319, 83.80186462],
[64.95678711, 100.94080353, 84.59326935],
[64.71759033, 100.82022095, 85.40114594],
[64.48053741, 100.73490143, 86.21411896],
[64.24304199, 100.65074158, 87.02709198],
[64.01773834, 100.55318451, 87.84204865],
[63.83801651, 100.41996765, 88.66333008],
[63.70982361, 100.25119019, 89.48779297],
[63.60707855, 100.06730652, 90.31262207],
[63.46164322, 99.91001892, 91.13648224],
[63.26287842, 99.78648376, 91.95485687],
[63.03713226, 99.68377686, 92.76905823],
[62.81192398, 99.56619263, 93.58140564],
[62.57145309, 99.42708588, 94.38592529],
[62.32259369, 99.25592804, 95.18167114],
[62.07497787, 99.05770111, 95.97154236],
[61.82253647, 98.83877563, 96.75438690],
[61.59536743, 98.59293365, 97.53706360],
[61.46530151, 98.30503845, 98.32772827],
[61.39904785, 97.97928619, 99.11172485],
[61.33279419, 97.65353394, 99.89572906],
[61.26067352, 97.30914307, 100.67123413],
[61.19459534, 96.96743011, 101.44847107],
[61.19580460, 96.63417053, 102.23215485],
[61.26572037, 96.29887390, 103.01185608],
[61.39840698, 95.96297455, 103.78307343],
[61.57207870, 95.64262390, 104.55268097],
[61.78163528, 95.35540771, 105.32629395],
[62.06700134, 95.09746552, 106.08564758],
[62.39427185, 94.85724640, 106.83369446],
[62.74076462, 94.62278748, 107.57482147],
[63.11461639, 94.40107727, 108.30641937],
[63.53397751, 94.20418549, 109.02002716],
[64.00019836, 94.03809357, 109.71183777],
[64.43580627, 93.87523651, 110.42416382],
[64.84857941, 93.69993591, 111.14715576],
[65.26740265, 93.51858521, 111.86515808],
[65.69511414, 93.36718750, 112.58474731],
[66.10470581, 93.22719574, 113.31711578],
[66.45891571, 93.06028748, 114.07256317],
[66.78582001, 92.90560913, 114.84281921],
[67.11138916, 92.79004669, 115.62040710],
[67.44729614, 92.75711823, 116.40135193],
[67.75688171, 92.98265076, 117.16111755],
[68.02041626, 93.28012848, 117.91371155],
[68.25725555, 93.53466797, 118.69052124],
[68.46047974, 93.63263702, 119.51107788],
[68.62039948, 93.62007141, 120.34690094],
[68.76782227, 93.56475067, 121.18331909],
[68.90222168, 93.46326447, 122.01765442],
[68.99872589, 93.30039978, 122.84759521],
[69.04119873, 93.05428314, 123.66156769],
[69.05086517, 92.74394989, 124.45450592],
[69.02742004, 92.40427399, 125.23509979],
[68.95466614, 92.09059143, 126.02339935],
[68.84975433, 91.79674530, 126.81564331],
[68.72673798, 91.53726196, 127.61715698],
[68.60685730, 91.30300140, 128.42681885],
[68.50636292, 91.12481689, 129.25317383],
[68.39311218, 91.01572418, 130.08976746],
[68.25946808, 90.94654083, 130.92756653]],
dtype=np.float32)
streamline_64bit = streamline.astype(np.float64)
streamlines = [streamline[[0, 10]], streamline,
streamline[::2], streamline[::3],
streamline[::5], streamline[::6]]
streamlines_64bit = [streamline_64bit[[0, 10]], streamline_64bit,
streamline_64bit[::2], streamline_64bit[::3],
streamline_64bit[::4], streamline_64bit[::5]]
heterogeneous_streamlines = [streamline_64bit,
streamline_64bit.reshape((-1, 6)),
streamline_64bit.reshape((-1, 2))]
def length_python(xyz, along=False):
xyz = np.asarray(xyz, dtype=np.float64)
if xyz.shape[0] < 2:
if along:
return np.array([0])
return 0
dists = np.sqrt((np.diff(xyz, axis=0)**2).sum(axis=1))
if along:
return np.cumsum(dists)
return np.sum(dists)
def set_number_of_points_python(xyz, n_pols=3):
def _extrap(xyz, cumlen, distance):
''' Helper function for extrapolate '''
ind = np.where((cumlen-distance) > 0)[0][0]
len0 = cumlen[ind-1]
len1 = cumlen[ind]
Ds = distance-len0
Lambda = Ds/(len1-len0)
return Lambda*xyz[ind] + (1-Lambda)*xyz[ind-1]
cumlen = np.zeros(xyz.shape[0])
cumlen[1:] = length_python(xyz, along=True)
step = cumlen[-1] / (n_pols-1)
ar = np.arange(0, cumlen[-1], step)
if np.abs(ar[-1] - cumlen[-1]) < np.finfo('f4').eps:
ar = ar[:-1]
xyz2 = [_extrap(xyz, cumlen, distance) for distance in ar]
return np.vstack((np.array(xyz2), xyz[-1]))
def test_set_number_of_points():
# Test resampling of only one streamline
nb_points = 12
modified_streamline_cython = set_number_of_points(
streamline, nb_points)
modified_streamline_python = set_number_of_points_python(
streamline, nb_points)
assert_equal(len(modified_streamline_cython), nb_points)
# Using a 5 digits precision because of streamline is in float32.
assert_array_almost_equal(modified_streamline_cython,
modified_streamline_python, 5)
modified_streamline_cython = set_number_of_points(
streamline_64bit, nb_points)
modified_streamline_python = set_number_of_points_python(
streamline_64bit, nb_points)
assert_equal(len(modified_streamline_cython), nb_points)
assert_array_almost_equal(modified_streamline_cython,
modified_streamline_python)
res = []
simple_streamline = np.array([[0, 0, 0], [1, 1, 1], [2, 2, 2]], 'f4')
for nb_points in range(2, 200):
modified_streamline_cython = set_number_of_points(
simple_streamline, nb_points)
res.append(nb_points - len(modified_streamline_cython))
assert_equal(np.sum(res), 0)
# Test resampling of multiple streamlines of different nb_points
nb_points = 12
modified_streamlines_cython = set_number_of_points(
streamlines, nb_points)
for i, s in enumerate(streamlines):
modified_streamline_python = set_number_of_points_python(s, nb_points)
# Using a 5 digits precision because of streamline is in float32.
assert_array_almost_equal(modified_streamlines_cython[i],
modified_streamline_python, 5)
modified_streamlines_cython = set_number_of_points(
streamlines_64bit, nb_points)
for i, s in enumerate(streamlines_64bit):
modified_streamline_python = set_number_of_points_python(s, nb_points)
assert_array_almost_equal(modified_streamlines_cython[i],
modified_streamline_python)
# Test streamlines with mixed dtype
streamlines_mixed_dtype = [streamline,
streamline.astype(np.float64),
streamline.astype(np.int32),
streamline.astype(np.int64)]
nb_points_mixed_dtype = [len(s) for s in set_number_of_points(
streamlines_mixed_dtype, nb_points)]
assert_array_equal(nb_points_mixed_dtype,
[nb_points] * len(streamlines_mixed_dtype))
# Test streamlines with different shape
modified_streamlines_cython = set_number_of_points(
heterogeneous_streamlines, nb_points)
for i, s in enumerate(heterogeneous_streamlines):
modified_streamline_python = set_number_of_points_python(s, nb_points)
assert_array_almost_equal(modified_streamlines_cython[i],
modified_streamline_python)
# Test streamline with integer dtype
modified_streamline = set_number_of_points(streamline.astype(np.int32))
assert_true(modified_streamline.dtype == np.float32)
modified_streamline = set_number_of_points(streamline.astype(np.int64))
assert_true(modified_streamline.dtype == np.float64)
# Test empty list
assert_equal(set_number_of_points([]), [])
# Test streamline having only one point
assert_raises(ValueError, set_number_of_points, np.array([[1, 2, 3]]))
# We do not support list of lists, it should be numpy ndarray.
streamline_unsupported = [[1, 2, 3], [4, 5, 5], [2, 1, 3], [4, 2, 1]]
assert_raises(AttributeError, set_number_of_points, streamline_unsupported)
# Test setting number of points of a numpy with flag WRITABLE=False
streamline_readonly = streamline.copy()
streamline_readonly.setflags(write=False)
assert_equal(len(set_number_of_points(streamline_readonly, nb_points=42)),
42)
# Test setting computing length of a numpy with flag WRITABLE=False
streamlines_readonly = []
for s in streamlines:
streamlines_readonly.append(s.copy())
streamlines_readonly[-1].setflags(write=False)
assert_equal(len(set_number_of_points(streamlines_readonly, nb_points=42)),
len(streamlines_readonly))
streamlines_readonly = []
for s in streamlines_64bit:
streamlines_readonly.append(s.copy())
streamlines_readonly[-1].setflags(write=False)
assert_equal(len(set_number_of_points(streamlines_readonly, nb_points=42)),
len(streamlines_readonly))
# Test if nb_points is less than 2
assert_raises(ValueError, set_number_of_points, [np.ones((10, 3)),
np.ones((10, 3))], nb_points=1)
def test_set_number_of_points_memory_leaks():
# Test some dtypes
dtypes = [np.float32, np.float64, np.int32, np.int64]
for dtype in dtypes:
rng = np.random.RandomState(1234)
NB_STREAMLINES = 10000
streamlines = [rng.randn(rng.randint(10, 100), 3).astype(dtype)
for _ in range(NB_STREAMLINES)]
list_refcount_before = get_type_refcount()["list"]
rstreamlines = set_number_of_points(streamlines, nb_points=2)
list_refcount_after = get_type_refcount()["list"]
del rstreamlines # Delete `rstreamlines` because it holds a reference
# to `list`.
# Calling `set_number_of_points` should increase the refcount of `list`
# by one since we kept the returned value.
assert_equal(list_refcount_after, list_refcount_before+1)
# Test mixed dtypes
rng = np.random.RandomState(1234)
NB_STREAMLINES = 10000
streamlines = []
for i in range(NB_STREAMLINES):
dtype = dtypes[i % len(dtypes)]
streamlines.append(rng.randn(rng.randint(10, 100), 3).astype(dtype))
list_refcount_before = get_type_refcount()["list"]
rstreamlines = set_number_of_points(streamlines, nb_points=2)
list_refcount_after = get_type_refcount()["list"]
# Calling `set_number_of_points` should increase the refcount of `list`
# by one since we kept the returned value.
assert_equal(list_refcount_after, list_refcount_before+1)
def test_length():
# Test length of only one streamline
length_streamline_cython = ds_length(streamline)
length_streamline_python = length_python(streamline)
assert_almost_equal(length_streamline_cython, length_streamline_python)
length_streamline_cython = ds_length(streamline_64bit)
length_streamline_python = length_python(streamline_64bit)
assert_almost_equal(length_streamline_cython, length_streamline_python)
# Test computing length of multiple streamlines of different nb_points
length_streamlines_cython = ds_length(streamlines)
for i, s in enumerate(streamlines):
length_streamline_python = length_python(s)
assert_array_almost_equal(length_streamlines_cython[i],
length_streamline_python)
length_streamlines_cython = ds_length(streamlines_64bit)
for i, s in enumerate(streamlines_64bit):
length_streamline_python = length_python(s)
assert_array_almost_equal(length_streamlines_cython[i],
length_streamline_python)
# Test streamlines having mixed dtype
streamlines_mixed_dtype = [streamline,
streamline.astype(np.float64),
streamline.astype(np.int32),
streamline.astype(np.int64)]
lengths_mixed_dtype = [ds_length(s)
for s in streamlines_mixed_dtype]
assert_array_equal(ds_length(streamlines_mixed_dtype),
lengths_mixed_dtype)
# Test streamlines with different shape
length_streamlines_cython = ds_length(
heterogeneous_streamlines)
for i, s in enumerate(heterogeneous_streamlines):
length_streamline_python = length_python(s)
assert_array_almost_equal(length_streamlines_cython[i],
length_streamline_python)
# Test streamline having integer dtype
length_streamline = ds_length(streamline.astype('int'))
assert_true(length_streamline.dtype == np.float64)
# Test empty list
assert_equal(ds_length([]), 0.0)
# Test streamline having only one point
assert_equal(ds_length(np.array([[1, 2, 3]])), 0.0)
# We do not support list of lists, it should be numpy ndarray.
streamline_unsupported = [[1, 2, 3], [4, 5, 5], [2, 1, 3], [4, 2, 1]]
assert_raises(AttributeError, ds_length,
streamline_unsupported)
# Test setting computing length of a numpy with flag WRITABLE=False
streamlines_readonly = []
for s in streamlines:
streamlines_readonly.append(s.copy())
streamlines_readonly[-1].setflags(write=False)
assert_array_almost_equal(ds_length(streamlines_readonly),
[length_python(s) for s in streamlines_readonly])
streamlines_readonly = []
for s in streamlines_64bit:
streamlines_readonly.append(s.copy())
streamlines_readonly[-1].setflags(write=False)
assert_array_almost_equal(ds_length(streamlines_readonly),
[length_python(s) for s in streamlines_readonly])
def test_length_memory_leaks():
# Test some dtypes
dtypes = [np.float32, np.float64, np.int32, np.int64]
for dtype in dtypes:
rng = np.random.RandomState(1234)
NB_STREAMLINES = 10000
streamlines = [rng.randn(rng.randint(10, 100), 3).astype(dtype)
for _ in range(NB_STREAMLINES)]
list_refcount_before = get_type_refcount()["list"]
lengths = ds_length(streamlines)
list_refcount_after = get_type_refcount()["list"]
# Calling `ds_length` shouldn't increase the refcount of `list`
# since the return value is a numpy array.
assert_equal(list_refcount_after, list_refcount_before)
# Test mixed dtypes
rng = np.random.RandomState(1234)
NB_STREAMLINES = 10000
streamlines = []
for i in range(NB_STREAMLINES):
dtype = dtypes[i % len(dtypes)]
streamlines.append(rng.randn(rng.randint(10, 100), 3).astype(dtype))
list_refcount_before = get_type_refcount()["list"]
lengths = ds_length(streamlines)
list_refcount_after = get_type_refcount()["list"]
# Calling `ds_length` shouldn't increase the refcount of `list`
# since the return value is a numpy array.
assert_equal(list_refcount_after, list_refcount_before)
def test_unlist_relist_streamlines():
streamlines = [np.random.rand(10, 3),
np.random.rand(20, 3),
np.random.rand(5, 3)]
points, offsets = unlist_streamlines(streamlines)
assert_equal(offsets.dtype, np.dtype('i8'))
assert_equal(points.shape, (35, 3))
assert_equal(len(offsets), len(streamlines))
streamlines2 = relist_streamlines(points, offsets)
assert_equal(len(streamlines), len(streamlines2))
for i in range(len(streamlines)):
assert_array_equal(streamlines[i], streamlines2[i])
def test_center_and_transform():
A = np.array([[1, 2, 3], [1, 2, 3.]])
streamlines = [A for i in range(10)]
streamlines2, center = center_streamlines(streamlines)
B = np.zeros((2, 3))
assert_array_equal(streamlines2[0], B)
assert_array_equal(center, A[0])
affine = np.eye(4)
affine[0, 0] = 2
affine[:3, -1] = - np.array([2, 1, 1]) * center
streamlines3 = transform_streamlines(streamlines, affine)
assert_array_equal(streamlines3[0], B)
def test_select_random_streamlines():
streamlines = [np.random.rand(10, 3),
np.random.rand(20, 3),
np.random.rand(5, 3)]
new_streamlines = select_random_set_of_streamlines(streamlines, 2)
assert_equal(len(new_streamlines), 2)
new_streamlines = select_random_set_of_streamlines(streamlines, 4)
assert_equal(len(new_streamlines), 3)
def compress_streamlines_python(streamline, tol_error=0.01,
max_segment_length=10):
"""
Python version of the FiberCompression found on
https://github.com/scilus/FiberCompression.
"""
if streamline.shape[0] <= 2:
return streamline.copy()
# Euclidean distance
def segment_length(prev, next):
return np.sqrt(((prev-next)**2).sum())
# Projection of a 3D point on a 3D line, minimal distance
def dist_to_line(prev, next, curr):
return norm(np.cross(next-prev, curr-next)) / norm(next-prev)
nb_points = 0
compressed_streamline = np.zeros_like(streamline)
# Copy first point since it is always kept.
compressed_streamline[0, :] = streamline[0, :]
nb_points += 1
prev = streamline[0]
prev_id = 0
for next_id, next in enumerate(streamline[2:], start=2):
# Euclidean distance between last added point and current point.
if segment_length(prev, next) > max_segment_length:
compressed_streamline[nb_points, :] = streamline[next_id-1, :]
nb_points += 1
prev = streamline[next_id-1]
prev_id = next_id-1
continue
# Check that each point is not offset by more than `tol_error` mm.
for o, curr in enumerate(streamline[prev_id+1:next_id],
start=prev_id+1):
dist = dist_to_line(prev, next, curr)
if np.isnan(dist) or dist > tol_error:
compressed_streamline[nb_points, :] = streamline[next_id-1, :]
nb_points += 1
prev = streamline[next_id-1]
prev_id = next_id-1
break
# Copy last point since it is always kept.
compressed_streamline[nb_points, :] = streamline[-1, :]
nb_points += 1
# Make sure the array have the correct size
return compressed_streamline[:nb_points]
def test_compress_streamlines():
for compress_func in [compress_streamlines_python, compress_streamlines]:
# Small streamlines (less than two points) are uncompressable.
for small_streamline in [np.array([[]]),
np.array([[1, 1, 1]]),
np.array([[1, 1, 1], [2, 2, 2]])]:
c_streamline = compress_func(small_streamline)
assert_equal(len(c_streamline), len(small_streamline))
assert_array_equal(c_streamline, small_streamline)
# Compressing a straight streamline that is less than 10mm long
# should output a two points streamline.
linear_streamline = np.linspace(0, 5, 100*3).reshape((100, 3))
c_streamline = compress_func(linear_streamline)
assert_equal(len(c_streamline), 2)
assert_array_equal(c_streamline, [linear_streamline[0],
linear_streamline[-1]])
# The distance of consecutive points must be less or equal than some
# value.
max_segment_length = 10
linear_streamline = np.linspace(0, 100, 100*3).reshape((100, 3))
linear_streamline[:, 1:] = 0.
c_streamline = compress_func(linear_streamline,
max_segment_length=max_segment_length)
segments_length = np.sqrt((np.diff(c_streamline,
axis=0)**2).sum(axis=1))
assert_true(np.all(segments_length <= max_segment_length))
assert_equal(len(c_streamline), 12)
assert_array_equal(c_streamline, linear_streamline[::9])
# A small `max_segment_length` should keep all points.
c_streamline = compress_func(linear_streamline,
max_segment_length=0.01)
assert_array_equal(c_streamline, linear_streamline)
# Test we can set `max_segment_length` to infinity
# (like the C++ version)
compress_func(streamline, max_segment_length=np.inf)
# Uncompressable streamline when `tol_error` == 1.
simple_streamline = np.array([[0, 0, 0],
[1, 1, 0],
[1.5, np.inf, 0],
[2, 2, 0],
[2.5, 20, 0],
[3, 3, 0]])
# Because of np.inf, compressing that streamline causes a warning.
with np.errstate(invalid='ignore'):
c_streamline = compress_func(simple_streamline, tol_error=1)
assert_array_equal(c_streamline, simple_streamline)
# Create a special streamline where every other point is increasingly
# farther from a straigth line formed by the streamline endpoints.
tol_errors = np.linspace(0, 10, 21)
orthogonal_line = np.array([[-np.sqrt(2)/2, np.sqrt(2)/2, 0]],
dtype=np.float32)
special_streamline = np.array([range(len(tol_errors)*2+1)] * 3,
dtype=np.float32).T
special_streamline[1::2] += orthogonal_line * tol_errors[:, None]
# # Uncomment to see the streamline.
# import pylab as plt
# plt.plot(special_streamline[:, 0], special_streamline[:, 1], '.-')
# plt.axis('equal'); plt.show()
# Test different values for `tol_error`.
for i, tol_error in enumerate(tol_errors):
cspecial_streamline = compress_streamlines(special_streamline,
tol_error=tol_error+1e-4,
max_segment_length=np.inf)
# First and last points should always be the same as the original ones.
assert_array_equal(cspecial_streamline[0], special_streamline[0])
assert_array_equal(cspecial_streamline[-1], special_streamline[-1])
assert_equal(len(cspecial_streamline),
len(special_streamline)-((i*2)+1))
# Make sure Cython and Python versions are the same.
cstreamline_python = compress_streamlines_python(
special_streamline,
tol_error=tol_error+1e-4,
max_segment_length=np.inf)
assert_equal(len(cspecial_streamline), len(cstreamline_python))
assert_array_almost_equal(cspecial_streamline, cstreamline_python)
def test_compress_streamlines_memory_leaks():
# Test some dtypes
dtypes = [np.float32, np.float64, np.int32, np.int64]
for dtype in dtypes:
rng = np.random.RandomState(1234)
NB_STREAMLINES = 10000
streamlines = [rng.randn(rng.randint(10, 100), 3).astype(dtype)
for _ in range(NB_STREAMLINES)]
list_refcount_before = get_type_refcount()["list"]
cstreamlines = compress_streamlines(streamlines)
list_refcount_after = get_type_refcount()["list"]
del cstreamlines # Delete `cstreamlines` because it holds a reference
# to `list`.
# Calling `compress_streamlines` should increase the refcount of `list`
# by one since we kept the returned value.
assert_equal(list_refcount_after, list_refcount_before+1)
# Test mixed dtypes
rng = np.random.RandomState(1234)
NB_STREAMLINES = 10000
streamlines = []
for i in range(NB_STREAMLINES):
dtype = dtypes[i % len(dtypes)]
streamlines.append(rng.randn(rng.randint(10, 100), 3).astype(dtype))
list_refcount_before = get_type_refcount()["list"]
cstreamlines = compress_streamlines(streamlines)
list_refcount_after = get_type_refcount()["list"]
# Calling `compress_streamlines` should increase the refcount of `list` by
# one since we kept the returned value.
assert_equal(list_refcount_after, list_refcount_before+1)
def test_select_by_rois():
streamlines = [np.array([[0, 0., 0.9],
[1.9, 0., 0.]]),
np.array([[0.1, 0., 0],
[0, 1., 1.],
[0, 2., 2.]]),
np.array([[2, 2, 2],
[3, 3, 3]])]
# Make two ROIs:
mask1 = np.zeros((4, 4, 4), dtype=bool)
mask2 = np.zeros_like(mask1)
mask1[0, 0, 0] = True
mask2[1, 0, 0] = True
selection = select_by_rois(streamlines, [mask1], [True],
tol=1)
npt.assert_array_equal(list(selection), [streamlines[0],
streamlines[1]])
selection = select_by_rois(streamlines, [mask1, mask2], [True, True],
tol=1)
npt.assert_array_equal(list(selection), [streamlines[0],
streamlines[1]])
selection = select_by_rois(streamlines, [mask1, mask2], [True, False])
npt.assert_array_equal(list(selection), [streamlines[1]])
# Setting tolerance too low gets overridden:
selection = select_by_rois(streamlines, [mask1, mask2], [True, False],
tol=0.1)
npt.assert_array_equal(list(selection), [streamlines[1]])
selection = select_by_rois(streamlines, [mask1, mask2], [True, True],
tol=0.87)
npt.assert_array_equal(list(selection), [streamlines[1]])
mask3 = np.zeros_like(mask1)
mask3[0, 2, 2] = 1
selection = select_by_rois(streamlines, [mask1, mask2, mask3],
[True, True, False], tol=1.0)
npt.assert_array_equal(list(selection), [streamlines[0]])
# Select using only one ROI
selection = select_by_rois(streamlines, [mask1], [True], tol=0.87)
npt.assert_array_equal(list(selection), [streamlines[1]])
selection = select_by_rois(streamlines, [mask1], [True], tol=1.0)
npt.assert_array_equal(list(selection), [streamlines[0],
streamlines[1]])
# Use different modes:
selection = select_by_rois(streamlines, [mask1, mask2, mask3],
[True, True, False],
mode="all",
tol=1.0)
npt.assert_array_equal(list(selection), [streamlines[0]])
selection = select_by_rois(streamlines, [mask1, mask2, mask3],
[True, True, False],
mode="either_end",
tol=1.0)
npt.assert_array_equal(list(selection), [streamlines[0]])
selection = select_by_rois(streamlines, [mask1, mask2, mask3],
[True, True, False],
mode="both_end",
tol=1.0)
npt.assert_array_equal(list(selection), [streamlines[0]])
mask2[0, 2, 2] = True
selection = select_by_rois(streamlines, [mask1, mask2, mask3],
[True, True, False],
mode="both_end",
tol=1.0)
npt.assert_array_equal(list(selection), [streamlines[0],
streamlines[1]])
# Test with generator input:
def generate_sl(streamlines):
for sl in streamlines:
yield sl
selection = select_by_rois(generate_sl(streamlines), [mask1], [True],
tol=1.0)
npt.assert_array_equal(list(selection), [streamlines[0],
streamlines[1]])
def test_orient_by_rois():
streamlines = [np.array([[0, 0., 0],
[1, 0., 0.],
[2, 0., 0.]]),
np.array([[2, 0., 0.],
[1, 0., 0],
[0, 0, 0.]])]
# Make two ROIs:
mask1_vol = np.zeros((4, 4, 4), dtype=bool)
mask2_vol = np.zeros_like(mask1_vol)
mask1_vol[0, 0, 0] = True
mask2_vol[1, 0, 0] = True
mask1_coords = np.array(np.where(mask1_vol)).T
mask2_coords = np.array(np.where(mask2_vol)).T
# If there is an affine, we'll use it:
affine = np.eye(4)
affine[:, 3] = [-1, 100, -20, 1]
# Transform the streamlines:
x_streamlines = [sl + affine[:3, 3] for sl in streamlines]
for copy in [True, False]:
for sl, affine in zip([streamlines, x_streamlines], [None, affine]):
for mask1, mask2 in \
zip([mask1_vol, mask1_coords], [mask2_vol, mask2_coords]):
new_streamlines = orient_by_rois(sl, mask1, mask2,
affine=affine, copy=copy)
if copy:
flipped_sl = [sl[0], sl[1][::-1]]
else:
flipped_sl = [np.array([[0, 0., 0],
[1, 0., 0.],
[2, 0., 0.]]),
np.array([[0, 0., 0.],
[1, 0., 0],
[2, 0, 0.]])]
if affine is not None:
flipped_sl = [s + affine[:3, 3] for s in flipped_sl]
npt.assert_equal(new_streamlines, flipped_sl)
def test_values_from_volume():
decimal = 4
data3d = np.arange(2000).reshape(20, 10, 10)
# Test two cases of 4D data (handled differently)
# One where the last dimension is length 3:
data4d_3vec = np.arange(6000).reshape(20, 10, 10, 3)
# The other where the last dimension is not 3:
data4d_2vec = np.arange(4000).reshape(20, 10, 10, 2)
for dt in [np.float32, np.float64]:
for data in [data3d, data4d_3vec, data4d_2vec]:
sl1 = [np.array([[1, 0, 0],
[1.5, 0, 0],
[2, 0, 0],
[2.5, 0, 0]]).astype(dt),
np.array([[2, 0, 0],
[3.1, 0, 0],
[3.9, 0, 0],
[4.1, 0, 0]]).astype(dt)]
ans1 = [[data[1, 0, 0],
data[1, 0, 0] + (data[2, 0, 0] - data[1, 0, 0]) / 2,
data[2, 0, 0],
data[2, 0, 0] + (data[3, 0, 0] - data[2, 0, 0]) / 2],
[data[2, 0, 0],
data[3, 0, 0] + (data[4, 0, 0] - data[3, 0, 0]) * 0.1,
data[3, 0, 0] + (data[4, 0, 0] - data[3, 0, 0]) * 0.9,
data[4, 0, 0] + (data[5, 0, 0] - data[4, 0, 0]) * 0.1]]
vv = values_from_volume(data, sl1)
npt.assert_almost_equal(vv, ans1, decimal=decimal)
vv = values_from_volume(data, np.array(sl1))
npt.assert_almost_equal(vv, ans1, decimal=decimal)
affine = np.eye(4)
affine[:, 3] = [-100, 10, 1, 1]
x_sl1 = ut.move_streamlines(sl1, affine)
x_sl2 = ut.move_streamlines(sl1, affine)
vv = values_from_volume(data, x_sl1, affine=affine)
npt.assert_almost_equal(vv, ans1, decimal=decimal)
# The generator has already been consumed so needs to be
# regenerated:
x_sl1 = list(ut.move_streamlines(sl1, affine))
vv = values_from_volume(data, x_sl1, affine=affine)
npt.assert_almost_equal(vv, ans1, decimal=decimal)
# Test that the streamlines haven't mutated:
l_sl2 = list(x_sl2)
npt.assert_equal(x_sl1, l_sl2)
vv = values_from_volume(data, np.array(x_sl1), affine=affine)
npt.assert_almost_equal(vv, ans1, decimal=decimal)
npt.assert_equal(np.array(x_sl1), np.array(l_sl2))
# Test for lists of streamlines with different numbers of nodes:
sl2 = [sl1[0][:-1], sl1[1]]
ans2 = [ans1[0][:-1], ans1[1]]
vv = values_from_volume(data, sl2)
for ii, v in enumerate(vv):
npt.assert_almost_equal(v, ans2[ii], decimal=decimal)
# We raise an error if the streamlines fed don't make sense. In this
# case, a tuple instead of a list, generator or array
nonsense_sl = (np.array([[1, 0, 0],
[1.5, 0, 0],
[2, 0, 0],
[2.5, 0, 0]]),
np.array([[2, 0, 0],
[3.1, 0, 0],
[3.9, 0, 0],
[4.1, 0, 0]]))
npt.assert_raises(RuntimeError, values_from_volume, data, nonsense_sl)
# For some use-cases we might have singleton streamlines (with only one
# node each):
data3D = np.ones((2, 2, 2))
streamlines = np.ones((10, 1, 3))
npt.assert_equal(values_from_volume(data3D, streamlines).shape, (10, 1))
data4D = np.ones((2, 2, 2, 2))
streamlines = np.ones((10, 1, 3))
npt.assert_equal(values_from_volume(data4D, streamlines).shape, (10, 1, 2))
if __name__ == '__main__':
run_module_suite()
|
|
from django.core.exceptions import FieldError
from django.db import connections
from django.db.backends.util import truncate_name
from django.db.models.sql.constants import *
from django.db.models.sql.datastructures import EmptyResultSet
from django.db.models.sql.expressions import SQLEvaluator
from django.db.models.sql.query import get_proxied_model, get_order_dir, \
select_related_descend, Query
class SQLCompiler(object):
def __init__(self, query, connection, using):
self.query = query
self.connection = connection
self.using = using
self.quote_cache = {}
def pre_sql_setup(self):
"""
Does any necessary class setup immediately prior to producing SQL. This
is for things that can't necessarily be done in __init__ because we
might not have all the pieces in place at that time.
"""
if not self.query.tables:
self.query.join((None, self.query.model._meta.db_table, None, None))
if (not self.query.select and self.query.default_cols and not
self.query.included_inherited_models):
self.query.setup_inherited_models()
if self.query.select_related and not self.query.related_select_cols:
self.fill_related_selections()
def quote_name_unless_alias(self, name):
"""
A wrapper around connection.ops.quote_name that doesn't quote aliases
for table names. This avoids problems with some SQL dialects that treat
quoted strings specially (e.g. PostgreSQL).
"""
if name in self.quote_cache:
return self.quote_cache[name]
if ((name in self.query.alias_map and name not in self.query.table_map) or
name in self.query.extra_select):
self.quote_cache[name] = name
return name
r = self.connection.ops.quote_name(name)
self.quote_cache[name] = r
return r
def as_sql(self, with_limits=True, with_col_aliases=False):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
If 'with_limits' is False, any limit/offset information is not included
in the query.
"""
self.pre_sql_setup()
out_cols = self.get_columns(with_col_aliases)
ordering, ordering_group_by = self.get_ordering()
# This must come after 'select' and 'ordering' -- see docstring of
# get_from_clause() for details.
from_, f_params = self.get_from_clause()
qn = self.quote_name_unless_alias
where, w_params = self.query.where.as_sql(qn=qn, connection=self.connection)
having, h_params = self.query.having.as_sql(qn=qn, connection=self.connection)
params = []
for val in self.query.extra_select.itervalues():
params.extend(val[1])
result = ['SELECT']
if self.query.distinct:
result.append('DISTINCT')
result.append(', '.join(out_cols + self.query.ordering_aliases))
result.append('FROM')
result.extend(from_)
params.extend(f_params)
if where:
result.append('WHERE %s' % where)
params.extend(w_params)
grouping, gb_params = self.get_grouping()
if grouping:
if ordering:
# If the backend can't group by PK (i.e., any database
# other than MySQL), then any fields mentioned in the
# ordering clause needs to be in the group by clause.
if not self.connection.features.allows_group_by_pk:
for col, col_params in ordering_group_by:
if col not in grouping:
grouping.append(str(col))
gb_params.extend(col_params)
else:
ordering = self.connection.ops.force_no_ordering()
result.append('GROUP BY %s' % ', '.join(grouping))
params.extend(gb_params)
if having:
result.append('HAVING %s' % having)
params.extend(h_params)
if ordering:
result.append('ORDER BY %s' % ', '.join(ordering))
if with_limits:
if self.query.high_mark is not None:
result.append('LIMIT %d' % (self.query.high_mark - self.query.low_mark))
if self.query.low_mark:
if self.query.high_mark is None:
val = self.connection.ops.no_limit_value()
if val:
result.append('LIMIT %d' % val)
result.append('OFFSET %d' % self.query.low_mark)
return ' '.join(result), tuple(params)
def as_nested_sql(self):
"""
Perform the same functionality as the as_sql() method, returning an
SQL string and parameters. However, the alias prefixes are bumped
beforehand (in a copy -- the current query isn't changed), and any
ordering is removed if the query is unsliced.
Used when nesting this query inside another.
"""
obj = self.query.clone()
if obj.low_mark == 0 and obj.high_mark is None:
# If there is no slicing in use, then we can safely drop all ordering
obj.clear_ordering(True)
obj.bump_prefix()
return obj.get_compiler(connection=self.connection).as_sql()
def get_columns(self, with_aliases=False):
"""
Returns the list of columns to use in the select statement. If no
columns have been specified, returns all columns relating to fields in
the model.
If 'with_aliases' is true, any column names that are duplicated
(without the table names) are given unique aliases. This is needed in
some cases to avoid ambiguity with nested queries.
"""
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
result = ['(%s) AS %s' % (col[0], qn2(alias)) for alias, col in self.query.extra_select.iteritems()]
aliases = set(self.query.extra_select.keys())
if with_aliases:
col_aliases = aliases.copy()
else:
col_aliases = set()
if self.query.select:
only_load = self.deferred_to_columns()
for col in self.query.select:
if isinstance(col, (list, tuple)):
alias, column = col
table = self.query.alias_map[alias][TABLE_NAME]
if table in only_load and col not in only_load[table]:
continue
r = '%s.%s' % (qn(alias), qn(column))
if with_aliases:
if col[1] in col_aliases:
c_alias = 'Col%d' % len(col_aliases)
result.append('%s AS %s' % (r, c_alias))
aliases.add(c_alias)
col_aliases.add(c_alias)
else:
result.append('%s AS %s' % (r, qn2(col[1])))
aliases.add(r)
col_aliases.add(col[1])
else:
result.append(r)
aliases.add(r)
col_aliases.add(col[1])
else:
result.append(col.as_sql(qn, self.connection))
if hasattr(col, 'alias'):
aliases.add(col.alias)
col_aliases.add(col.alias)
elif self.query.default_cols:
cols, new_aliases = self.get_default_columns(with_aliases,
col_aliases)
result.extend(cols)
aliases.update(new_aliases)
max_name_length = self.connection.ops.max_name_length()
result.extend([
'%s%s' % (
aggregate.as_sql(qn, self.connection),
alias is not None
and ' AS %s' % qn(truncate_name(alias, max_name_length))
or ''
)
for alias, aggregate in self.query.aggregate_select.items()
])
for table, col in self.query.related_select_cols:
r = '%s.%s' % (qn(table), qn(col))
if with_aliases and col in col_aliases:
c_alias = 'Col%d' % len(col_aliases)
result.append('%s AS %s' % (r, c_alias))
aliases.add(c_alias)
col_aliases.add(c_alias)
else:
result.append(r)
aliases.add(r)
col_aliases.add(col)
self._select_aliases = aliases
return result
def get_default_columns(self, with_aliases=False, col_aliases=None,
start_alias=None, opts=None, as_pairs=False, local_only=False):
"""
Computes the default columns for selecting every field in the base
model. Will sometimes be called to pull in related models (e.g. via
select_related), in which case "opts" and "start_alias" will be given
to provide a starting point for the traversal.
Returns a list of strings, quoted appropriately for use in SQL
directly, as well as a set of aliases used in the select statement (if
'as_pairs' is True, returns a list of (alias, col_name) pairs instead
of strings as the first component and None as the second component).
"""
result = []
if opts is None:
opts = self.query.model._meta
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
aliases = set()
only_load = self.deferred_to_columns()
# Skip all proxy to the root proxied model
proxied_model = get_proxied_model(opts)
if start_alias:
seen = {None: start_alias}
for field, model in opts.get_fields_with_model():
if local_only and model is not None:
continue
if start_alias:
try:
alias = seen[model]
except KeyError:
if model is proxied_model:
alias = start_alias
else:
link_field = opts.get_ancestor_link(model)
alias = self.query.join((start_alias, model._meta.db_table,
link_field.column, model._meta.pk.column))
seen[model] = alias
else:
# If we're starting from the base model of the queryset, the
# aliases will have already been set up in pre_sql_setup(), so
# we can save time here.
alias = self.query.included_inherited_models[model]
table = self.query.alias_map[alias][TABLE_NAME]
if table in only_load and field.column not in only_load[table]:
continue
if as_pairs:
result.append((alias, field.column))
aliases.add(alias)
continue
if with_aliases and field.column in col_aliases:
c_alias = 'Col%d' % len(col_aliases)
result.append('%s.%s AS %s' % (qn(alias),
qn2(field.column), c_alias))
col_aliases.add(c_alias)
aliases.add(c_alias)
else:
r = '%s.%s' % (qn(alias), qn2(field.column))
result.append(r)
aliases.add(r)
if with_aliases:
col_aliases.add(field.column)
return result, aliases
def get_ordering(self):
"""
Returns a tuple containing a list representing the SQL elements in the
"order by" clause, and the list of SQL elements that need to be added
to the GROUP BY clause as a result of the ordering.
Also sets the ordering_aliases attribute on this instance to a list of
extra aliases needed in the select.
Determining the ordering SQL can change the tables we need to include,
so this should be run *before* get_from_clause().
"""
if self.query.extra_order_by:
ordering = self.query.extra_order_by
elif not self.query.default_ordering:
ordering = self.query.order_by
else:
ordering = self.query.order_by or self.query.model._meta.ordering
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
distinct = self.query.distinct
select_aliases = self._select_aliases
result = []
group_by = []
ordering_aliases = []
if self.query.standard_ordering:
asc, desc = ORDER_DIR['ASC']
else:
asc, desc = ORDER_DIR['DESC']
# It's possible, due to model inheritance, that normal usage might try
# to include the same field more than once in the ordering. We track
# the table/column pairs we use and discard any after the first use.
processed_pairs = set()
for field in ordering:
if field == '?':
result.append(self.connection.ops.random_function_sql())
continue
if isinstance(field, int):
if field < 0:
order = desc
field = -field
else:
order = asc
result.append('%s %s' % (field, order))
group_by.append((field, []))
continue
col, order = get_order_dir(field, asc)
if col in self.query.aggregate_select:
result.append('%s %s' % (col, order))
continue
if '.' in field:
# This came in through an extra(order_by=...) addition. Pass it
# on verbatim.
table, col = col.split('.', 1)
if (table, col) not in processed_pairs:
elt = '%s.%s' % (qn(table), col)
processed_pairs.add((table, col))
if not distinct or elt in select_aliases:
result.append('%s %s' % (elt, order))
group_by.append((elt, []))
elif get_order_dir(field)[0] not in self.query.extra_select:
# 'col' is of the form 'field' or 'field1__field2' or
# '-field1__field2__field', etc.
for table, col, order in self.find_ordering_name(field,
self.query.model._meta, default_order=asc):
if (table, col) not in processed_pairs:
elt = '%s.%s' % (qn(table), qn2(col))
processed_pairs.add((table, col))
if distinct and elt not in select_aliases:
ordering_aliases.append(elt)
result.append('%s %s' % (elt, order))
group_by.append((elt, []))
else:
elt = qn2(col)
if distinct and col not in select_aliases:
ordering_aliases.append(elt)
result.append('%s %s' % (elt, order))
group_by.append(self.query.extra_select[col])
self.query.ordering_aliases = ordering_aliases
return result, group_by
def find_ordering_name(self, name, opts, alias=None, default_order='ASC',
already_seen=None):
"""
Returns the table alias (the name might be ambiguous, the alias will
not be) and column name for ordering by the given 'name' parameter.
The 'name' is of the form 'field1__field2__...__fieldN'.
"""
name, order = get_order_dir(name, default_order)
pieces = name.split(LOOKUP_SEP)
if not alias:
alias = self.query.get_initial_alias()
field, target, opts, joins, last, extra = self.query.setup_joins(pieces,
opts, alias, False)
alias = joins[-1]
col = target.column
if not field.rel:
# To avoid inadvertent trimming of a necessary alias, use the
# refcount to show that we are referencing a non-relation field on
# the model.
self.query.ref_alias(alias)
# Must use left outer joins for nullable fields and their relations.
self.query.promote_alias_chain(joins,
self.query.alias_map[joins[0]][JOIN_TYPE] == self.query.LOUTER)
# If we get to this point and the field is a relation to another model,
# append the default ordering for that model.
if field.rel and len(joins) > 1 and opts.ordering:
# Firstly, avoid infinite loops.
if not already_seen:
already_seen = set()
join_tuple = tuple([self.query.alias_map[j][TABLE_NAME] for j in joins])
if join_tuple in already_seen:
raise FieldError('Infinite loop caused by ordering.')
already_seen.add(join_tuple)
results = []
for item in opts.ordering:
results.extend(self.find_ordering_name(item, opts, alias,
order, already_seen))
return results
if alias:
# We have to do the same "final join" optimisation as in
# add_filter, since the final column might not otherwise be part of
# the select set (so we can't order on it).
while 1:
join = self.query.alias_map[alias]
if col != join[RHS_JOIN_COL]:
break
self.query.unref_alias(alias)
alias = join[LHS_ALIAS]
col = join[LHS_JOIN_COL]
return [(alias, col, order)]
def get_from_clause(self):
"""
Returns a list of strings that are joined together to go after the
"FROM" part of the query, as well as a list any extra parameters that
need to be included. Sub-classes, can override this to create a
from-clause via a "select".
This should only be called after any SQL construction methods that
might change the tables we need. This means the select columns and
ordering must be done first.
"""
result = []
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
first = True
for alias in self.query.tables:
if not self.query.alias_refcount[alias]:
continue
try:
name, alias, join_type, lhs, lhs_col, col, nullable = self.query.alias_map[alias]
except KeyError:
# Extra tables can end up in self.tables, but not in the
# alias_map if they aren't in a join. That's OK. We skip them.
continue
alias_str = (alias != name and ' %s' % alias or '')
if join_type and not first:
result.append('%s %s%s ON (%s.%s = %s.%s)'
% (join_type, qn(name), alias_str, qn(lhs),
qn2(lhs_col), qn(alias), qn2(col)))
else:
connector = not first and ', ' or ''
result.append('%s%s%s' % (connector, qn(name), alias_str))
first = False
for t in self.query.extra_tables:
alias, unused = self.query.table_alias(t)
# Only add the alias if it's not already present (the table_alias()
# calls increments the refcount, so an alias refcount of one means
# this is the only reference.
if alias not in self.query.alias_map or self.query.alias_refcount[alias] == 1:
connector = not first and ', ' or ''
result.append('%s%s' % (connector, qn(alias)))
first = False
return result, []
def get_grouping(self):
"""
Returns a tuple representing the SQL elements in the "group by" clause.
"""
qn = self.quote_name_unless_alias
result, params = [], []
if self.query.group_by is not None:
if len(self.query.model._meta.fields) == len(self.query.select) and \
self.connection.features.allows_group_by_pk:
self.query.group_by = [(self.query.model._meta.db_table, self.query.model._meta.pk.column)]
group_by = self.query.group_by or []
extra_selects = []
for extra_select, extra_params in self.query.extra_select.itervalues():
extra_selects.append(extra_select)
params.extend(extra_params)
for col in group_by + self.query.related_select_cols + extra_selects:
if isinstance(col, (list, tuple)):
result.append('%s.%s' % (qn(col[0]), qn(col[1])))
elif hasattr(col, 'as_sql'):
result.append(col.as_sql(qn))
else:
result.append('(%s)' % str(col))
return result, params
def fill_related_selections(self, opts=None, root_alias=None, cur_depth=1,
used=None, requested=None, restricted=None, nullable=None,
dupe_set=None, avoid_set=None):
"""
Fill in the information needed for a select_related query. The current
depth is measured as the number of connections away from the root model
(for example, cur_depth=1 means we are looking at models with direct
connections to the root model).
"""
if not restricted and self.query.max_depth and cur_depth > self.query.max_depth:
# We've recursed far enough; bail out.
return
if not opts:
opts = self.query.get_meta()
root_alias = self.query.get_initial_alias()
self.query.related_select_cols = []
self.query.related_select_fields = []
if not used:
used = set()
if dupe_set is None:
dupe_set = set()
if avoid_set is None:
avoid_set = set()
orig_dupe_set = dupe_set
# Setup for the case when only particular related fields should be
# included in the related selection.
if requested is None:
if isinstance(self.query.select_related, dict):
requested = self.query.select_related
restricted = True
else:
restricted = False
for f, model in opts.get_fields_with_model():
if not select_related_descend(f, restricted, requested):
continue
# The "avoid" set is aliases we want to avoid just for this
# particular branch of the recursion. They aren't permanently
# forbidden from reuse in the related selection tables (which is
# what "used" specifies).
avoid = avoid_set.copy()
dupe_set = orig_dupe_set.copy()
table = f.rel.to._meta.db_table
promote = nullable or f.null
if model:
int_opts = opts
alias = root_alias
alias_chain = []
for int_model in opts.get_base_chain(model):
# Proxy model have elements in base chain
# with no parents, assign the new options
# object and skip to the next base in that
# case
if not int_opts.parents[int_model]:
int_opts = int_model._meta
continue
lhs_col = int_opts.parents[int_model].column
dedupe = lhs_col in opts.duplicate_targets
if dedupe:
avoid.update(self.query.dupe_avoidance.get((id(opts), lhs_col),
()))
dupe_set.add((opts, lhs_col))
int_opts = int_model._meta
alias = self.query.join((alias, int_opts.db_table, lhs_col,
int_opts.pk.column), exclusions=used,
promote=promote)
alias_chain.append(alias)
for (dupe_opts, dupe_col) in dupe_set:
self.query.update_dupe_avoidance(dupe_opts, dupe_col, alias)
if self.query.alias_map[root_alias][JOIN_TYPE] == self.query.LOUTER:
self.query.promote_alias_chain(alias_chain, True)
else:
alias = root_alias
dedupe = f.column in opts.duplicate_targets
if dupe_set or dedupe:
avoid.update(self.query.dupe_avoidance.get((id(opts), f.column), ()))
if dedupe:
dupe_set.add((opts, f.column))
alias = self.query.join((alias, table, f.column,
f.rel.get_related_field().column),
exclusions=used.union(avoid), promote=promote)
used.add(alias)
columns, aliases = self.get_default_columns(start_alias=alias,
opts=f.rel.to._meta, as_pairs=True)
self.query.related_select_cols.extend(columns)
if self.query.alias_map[alias][JOIN_TYPE] == self.query.LOUTER:
self.query.promote_alias_chain(aliases, True)
self.query.related_select_fields.extend(f.rel.to._meta.fields)
if restricted:
next = requested.get(f.name, {})
else:
next = False
new_nullable = f.null or promote
for dupe_opts, dupe_col in dupe_set:
self.query.update_dupe_avoidance(dupe_opts, dupe_col, alias)
self.fill_related_selections(f.rel.to._meta, alias, cur_depth + 1,
used, next, restricted, new_nullable, dupe_set, avoid)
if restricted:
related_fields = [
(o.field, o.model)
for o in opts.get_all_related_objects()
if o.field.unique
]
for f, model in related_fields:
if not select_related_descend(f, restricted, requested, reverse=True):
continue
# The "avoid" set is aliases we want to avoid just for this
# particular branch of the recursion. They aren't permanently
# forbidden from reuse in the related selection tables (which is
# what "used" specifies).
avoid = avoid_set.copy()
dupe_set = orig_dupe_set.copy()
table = model._meta.db_table
int_opts = opts
alias = root_alias
alias_chain = []
chain = opts.get_base_chain(f.rel.to)
if chain is not None:
for int_model in chain:
# Proxy model have elements in base chain
# with no parents, assign the new options
# object and skip to the next base in that
# case
if not int_opts.parents[int_model]:
int_opts = int_model._meta
continue
lhs_col = int_opts.parents[int_model].column
dedupe = lhs_col in opts.duplicate_targets
if dedupe:
avoid.update((self.query.dupe_avoidance.get(id(opts), lhs_col),
()))
dupe_set.add((opts, lhs_col))
int_opts = int_model._meta
alias = self.query.join(
(alias, int_opts.db_table, lhs_col, int_opts.pk.column),
exclusions=used, promote=True, reuse=used
)
alias_chain.append(alias)
for dupe_opts, dupe_col in dupe_set:
self.query.update_dupe_avoidance(dupe_opts, dupe_col, alias)
dedupe = f.column in opts.duplicate_targets
if dupe_set or dedupe:
avoid.update(self.query.dupe_avoidance.get((id(opts), f.column), ()))
if dedupe:
dupe_set.add((opts, f.column))
alias = self.query.join(
(alias, table, f.rel.get_related_field().column, f.column),
exclusions=used.union(avoid),
promote=True
)
used.add(alias)
columns, aliases = self.get_default_columns(start_alias=alias,
opts=model._meta, as_pairs=True, local_only=True)
self.query.related_select_cols.extend(columns)
self.query.related_select_fields.extend(model._meta.fields)
next = requested.get(f.related_query_name(), {})
new_nullable = f.null or None
self.fill_related_selections(model._meta, table, cur_depth+1,
used, next, restricted, new_nullable)
def deferred_to_columns(self):
"""
Converts the self.deferred_loading data structure to mapping of table
names to sets of column names which are to be loaded. Returns the
dictionary.
"""
columns = {}
self.query.deferred_to_data(columns, self.query.deferred_to_columns_cb)
return columns
def results_iter(self):
"""
Returns an iterator over the results from executing this query.
"""
resolve_columns = hasattr(self, 'resolve_columns')
fields = None
for rows in self.execute_sql(MULTI):
for row in rows:
if resolve_columns:
if fields is None:
# We only set this up here because
# related_select_fields isn't populated until
# execute_sql() has been called.
if self.query.select_fields:
fields = self.query.select_fields + self.query.related_select_fields
else:
fields = self.query.model._meta.fields
# If the field was deferred, exclude it from being passed
# into `resolve_columns` because it wasn't selected.
only_load = self.deferred_to_columns()
if only_load:
db_table = self.query.model._meta.db_table
fields = [f for f in fields if db_table in only_load and
f.column in only_load[db_table]]
row = self.resolve_columns(row, fields)
if self.query.aggregate_select:
aggregate_start = len(self.query.extra_select.keys()) + len(self.query.select)
aggregate_end = aggregate_start + len(self.query.aggregate_select)
row = tuple(row[:aggregate_start]) + tuple([
self.query.resolve_aggregate(value, aggregate, self.connection)
for (alias, aggregate), value
in zip(self.query.aggregate_select.items(), row[aggregate_start:aggregate_end])
]) + tuple(row[aggregate_end:])
yield row
def has_results(self):
# This is always executed on a query clone, so we can modify self.query
self.query.add_extra({'a': 1}, None, None, None, None, None)
self.query.set_extra_mask(('a',))
return bool(self.execute_sql(SINGLE))
def execute_sql(self, result_type=MULTI):
"""
Run the query against the database and returns the result(s). The
return value is a single data item if result_type is SINGLE, or an
iterator over the results if the result_type is MULTI.
result_type is either MULTI (use fetchmany() to retrieve all rows),
SINGLE (only retrieve a single row), or None. In this last case, the
cursor is returned if any query is executed, since it's used by
subclasses such as InsertQuery). It's possible, however, that no query
is needed, as the filters describe an empty set. In that case, None is
returned, to avoid any unnecessary database interaction.
"""
try:
sql, params = self.as_sql()
if not sql:
raise EmptyResultSet
except EmptyResultSet:
if result_type == MULTI:
return empty_iter()
else:
return
cursor = self.connection.cursor()
cursor.execute(sql, params)
if not result_type:
return cursor
if result_type == SINGLE:
if self.query.ordering_aliases:
return cursor.fetchone()[:-len(self.query.ordering_aliases)]
return cursor.fetchone()
# The MULTI case.
if self.query.ordering_aliases:
result = order_modified_iter(cursor, len(self.query.ordering_aliases),
self.connection.features.empty_fetchmany_value)
else:
result = iter((lambda: cursor.fetchmany(GET_ITERATOR_CHUNK_SIZE)),
self.connection.features.empty_fetchmany_value)
if not self.connection.features.can_use_chunked_reads:
# If we are using non-chunked reads, we return the same data
# structure as normally, but ensure it is all read into memory
# before going any further.
return list(result)
return result
class SQLInsertCompiler(SQLCompiler):
def placeholder(self, field, val):
if field is None:
# A field value of None means the value is raw.
return val
elif hasattr(field, 'get_placeholder'):
# Some fields (e.g. geo fields) need special munging before
# they can be inserted.
return field.get_placeholder(val, self.connection)
else:
# Return the common case for the placeholder
return '%s'
def as_sql(self):
# We don't need quote_name_unless_alias() here, since these are all
# going to be column names (so we can avoid the extra overhead).
qn = self.connection.ops.quote_name
opts = self.query.model._meta
result = ['INSERT INTO %s' % qn(opts.db_table)]
result.append('(%s)' % ', '.join([qn(c) for c in self.query.columns]))
values = [self.placeholder(*v) for v in self.query.values]
result.append('VALUES (%s)' % ', '.join(values))
params = self.query.params
if self.return_id and self.connection.features.can_return_id_from_insert:
col = "%s.%s" % (qn(opts.db_table), qn(opts.pk.column))
r_fmt, r_params = self.connection.ops.return_insert_id()
result.append(r_fmt % col)
params = params + r_params
return ' '.join(result), params
def execute_sql(self, return_id=False):
self.return_id = return_id
cursor = super(SQLInsertCompiler, self).execute_sql(None)
if not (return_id and cursor):
return
if self.connection.features.can_return_id_from_insert:
return self.connection.ops.fetch_returned_insert_id(cursor)
return self.connection.ops.last_insert_id(cursor,
self.query.model._meta.db_table, self.query.model._meta.pk.column)
class SQLDeleteCompiler(SQLCompiler):
def as_sql(self):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
"""
assert len(self.query.tables) == 1, \
"Can only delete from one table at a time."
qn = self.quote_name_unless_alias
result = ['DELETE FROM %s' % qn(self.query.tables[0])]
where, params = self.query.where.as_sql(qn=qn, connection=self.connection)
result.append('WHERE %s' % where)
return ' '.join(result), tuple(params)
class SQLUpdateCompiler(SQLCompiler):
def as_sql(self):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
"""
from django.db.models.base import Model
self.pre_sql_setup()
if not self.query.values:
return '', ()
table = self.query.tables[0]
qn = self.quote_name_unless_alias
result = ['UPDATE %s' % qn(table)]
result.append('SET')
values, update_params = [], []
for field, model, val in self.query.values:
if hasattr(val, 'prepare_database_save'):
val = val.prepare_database_save(field)
else:
val = field.get_db_prep_save(val, connection=self.connection)
# Getting the placeholder for the field.
if hasattr(field, 'get_placeholder'):
placeholder = field.get_placeholder(val, self.connection)
else:
placeholder = '%s'
if hasattr(val, 'evaluate'):
val = SQLEvaluator(val, self.query, allow_joins=False)
name = field.column
if hasattr(val, 'as_sql'):
sql, params = val.as_sql(qn, self.connection)
values.append('%s = %s' % (qn(name), sql))
update_params.extend(params)
elif val is not None:
values.append('%s = %s' % (qn(name), placeholder))
update_params.append(val)
else:
values.append('%s = NULL' % qn(name))
if not values:
return '', ()
result.append(', '.join(values))
where, params = self.query.where.as_sql(qn=qn, connection=self.connection)
if where:
result.append('WHERE %s' % where)
return ' '.join(result), tuple(update_params + params)
def execute_sql(self, result_type):
"""
Execute the specified update. Returns the number of rows affected by
the primary update query. The "primary update query" is the first
non-empty query that is executed. Row counts for any subsequent,
related queries are not available.
"""
cursor = super(SQLUpdateCompiler, self).execute_sql(result_type)
rows = cursor and cursor.rowcount or 0
is_empty = cursor is None
del cursor
for query in self.query.get_related_updates():
aux_rows = query.get_compiler(self.using).execute_sql(result_type)
if is_empty:
rows = aux_rows
is_empty = False
return rows
def pre_sql_setup(self):
"""
If the update depends on results from other tables, we need to do some
munging of the "where" conditions to match the format required for
(portable) SQL updates. That is done here.
Further, if we are going to be running multiple updates, we pull out
the id values to update at this point so that they don't change as a
result of the progressive updates.
"""
self.query.select_related = False
self.query.clear_ordering(True)
super(SQLUpdateCompiler, self).pre_sql_setup()
count = self.query.count_active_tables()
if not self.query.related_updates and count == 1:
return
# We need to use a sub-select in the where clause to filter on things
# from other tables.
query = self.query.clone(klass=Query)
query.bump_prefix()
query.extra = {}
query.select = []
query.add_fields([query.model._meta.pk.name])
must_pre_select = count > 1 and not self.connection.features.update_can_self_select
# Now we adjust the current query: reset the where clause and get rid
# of all the tables we don't need (since they're in the sub-select).
self.query.where = self.query.where_class()
if self.query.related_updates or must_pre_select:
# Either we're using the idents in multiple update queries (so
# don't want them to change), or the db backend doesn't support
# selecting from the updating table (e.g. MySQL).
idents = []
for rows in query.get_compiler(self.using).execute_sql(MULTI):
idents.extend([r[0] for r in rows])
self.query.add_filter(('pk__in', idents))
self.query.related_ids = idents
else:
# The fast path. Filters and updates in one query.
self.query.add_filter(('pk__in', query))
for alias in self.query.tables[1:]:
self.query.alias_refcount[alias] = 0
class SQLAggregateCompiler(SQLCompiler):
def as_sql(self, qn=None):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
"""
if qn is None:
qn = self.quote_name_unless_alias
sql = ('SELECT %s FROM (%s) subquery' % (
', '.join([
aggregate.as_sql(qn, self.connection)
for aggregate in self.query.aggregate_select.values()
]),
self.query.subquery)
)
params = self.query.sub_params
return (sql, params)
class SQLDateCompiler(SQLCompiler):
def results_iter(self):
"""
Returns an iterator over the results from executing this query.
"""
resolve_columns = hasattr(self, 'resolve_columns')
if resolve_columns:
from django.db.models.fields import DateTimeField
fields = [DateTimeField()]
else:
from django.db.backends.util import typecast_timestamp
needs_string_cast = self.connection.features.needs_datetime_string_cast
offset = len(self.query.extra_select)
for rows in self.execute_sql(MULTI):
for row in rows:
date = row[offset]
if resolve_columns:
date = self.resolve_columns(row, fields)[offset]
elif needs_string_cast:
date = typecast_timestamp(str(date))
yield date
def empty_iter():
"""
Returns an iterator containing no results.
"""
yield iter([]).next()
def order_modified_iter(cursor, trim, sentinel):
"""
Yields blocks of rows from a cursor. We use this iterator in the special
case when extra output columns have been added to support ordering
requirements. We must trim those extra columns before anything else can use
the results, since they're only needed to make the SQL valid.
"""
for rows in iter((lambda: cursor.fetchmany(GET_ITERATOR_CHUNK_SIZE)),
sentinel):
yield [r[:-trim] for r in rows]
|
|
#!/usr/bin/python
# coding: UTF-8
# musicdata service to read from SPOP
# Written by: Ron Ritchey
from __future__ import unicode_literals
import threading, logging, Queue, time, sys, telnetlib, json, getopt
import musicdata
class musicdata_spop(musicdata.musicdata):
def __init__(self, q, server=u'localhost', port=6602, pwd=u''):
super(musicdata_spop, self).__init__(q)
self.server = server
self.port = port
self.pwd = pwd
self.connection_failed = 0
self.timeout = 20
self.idle_state = False
self.dataclient = None
# Now set up a thread to listen to the channel and update our data when
# the channel indicates a relevant key has changed
data_t = threading.Thread(target=self.run)
data_t.daemon = True
data_t.start()
# Start the idle timer
idle_t = threading.Thread(target=self.idlealert)
idle_t.daemon = True
idle_t.start()
def idlealert(self):
while True:
# Generate a noidle event every timeout seconds
time.sleep(self.timeout)
# If blocked waiting for a response then allow idleaert to issue notify to unblock the service
if self.idle_state:
try:
#self.dataclient.noidle()
self.dataclient.write(u"notify\n")
self.dataclient.read_until(u"\n")
except (IOError, AttributeError):
# If not idle (or not created yet) return to sleeping
pass
def connect(self):
# Try up to 10 times to connect to REDIS
self.connection_failed = 0
self.dataclient = None
logging.debug(u"Connecting to SPOP service on {0}:{1}".format(self.server, self.port))
while True:
if self.connection_failed >= 10:
logging.debug(u"Could not connect to SPOP")
break
try:
# Connection to MPD
client = telnetlib.Telnet(self.server, self.port)
client.read_until("\n")
self.dataclient = client
break
except:
self.dataclient = None
self.connection_failed += 1
time.sleep(1)
if self.dataclient is None:
raise IOError(u"Could not connect to SPOP")
else:
logging.debug(u"Connected to SPOP service")
def run(self):
logging.debug(u"SPOP musicdata service starting")
while True:
if self.dataclient is None:
try:
# Try to connect
self.connect()
self.status()
self.sendUpdate()
except (IOError, RuntimeError):
self.dataclient = None
# On connection error, sleep 5 and then return to top and try again
time.sleep(5)
continue
try:
# Wait for notice that state has changed
self.idle_state = True
self.dataclient.write("idle\n")
msg = self.dataclient.read_until("\n")
self.idle_state = False
self.status()
self.sendUpdate()
time.sleep(.01)
except (IOError, RuntimeError):
self.dataclient = None
logging.debug(u"Could not get status from SPOP")
time.sleep(5)
continue
def status(self):
# Read musicplayer status and update musicdata
try:
self.dataclient.write(u"status\n")
msg = self.dataclient.read_until("\n").strip()
status = json.loads(msg)
except (IOError, ValueError):
logging.debug(u"Bad status message received. Contents were {0}".format(msg))
raise RuntimeError(u"Bad status message received.")
except:
# Caught something else. Report it and then inform calling function that the connection is bad
e = sys.exc_info()[0]
logging.debug(u"Caught {0} trying to get status from SPOP".format(e))
raise RuntimeError(u"Could not get status from SPOP")
state = status.get(u'status')
if state != u"playing":
self.musicdata[u'state'] = u"stop"
else:
self.musicdata[u'state'] = u"play"
# Update remaining variables
self.musicdata[u'artist'] = status[u'artist'] if u'artist' in status else u""
self.musicdata[u'title'] = status[u'title'] if u'title' in status else u""
self.musicdata[u'album'] = status[u'album'] if u'album' in status else u""
self.musicdata[u'volume'] = 0
self.musicdata[u'length'] = self.intn(status[u'duration']/1000) if u'duration' in status else 0
self.musicdata[u'elapsed'] = self.intn(status[u'position']) if u'position' in status else 0
self.musicdata[u'playlist_position'] = self.intn(status[u'current_track']) if u'current_track' in status else 0
self.musicdata[u'playlist_length'] = self.musicdata[u'playlist_count'] = self.intn(status[u'total_tracks']) if u'total_tracks' in status else 0
self.musicdata[u'uri'] = status[u'uri'] if u'uri' in status else u""
self.musicdata[u'repeat'] = status[u'repeat'] if u'repeat' in status else False
self.musicdata[u'random'] = status[u'shuffle'] if u'shuffle' in status else False
self.musicdata[u'single'] = False # Not support in SPOP
self.musicdata[u'current'] = self.musicdata[u'elapsed']
self.musicdata[u'duration'] = self.musicdata[u'length']
self.musicdata[u'actPlayer'] = u"SPOP"
self.musicdata[u'musicdatasource'] = u"SPOP"
self.musicdata[u'bitrate'] = u""
self.musicdata[u'tracktype'] = u""
plp = self.musicdata[u'playlist_position']
plc = self.musicdata[u'playlist_length']
if self.musicdata[u'length'] > 0:
timepos = time.strftime(u"%-M:%S", time.gmtime(self.musicdata[u'elapsed'])) + "/" + time.strftime(u"%-M:%S", time.gmtime(self.musicdata[u'length']))
remaining = time.strftime(u"%-M:%S", time.gmtime(self.musicdata[u'length'] - self.musicdata[u'duration'] ) )
else:
timepos = time.strftime(u"%-M:%S", time.gmtime(self.musicdata[u'elapsed']))
remaining = timepos
self.musicdata[u'remaining'] = remaining.decode()
self.musicdata[u'elapsed_formatted'] = self.musicdata[u'position'] = timepos.decode()
self.musicdata[u'playlist_display'] = u"{0}/{1}".format(plp, plc)
self.musicdata[u'tracktype'] = u"SPOP"
self.validatemusicvars(self.musicdata)
if __name__ == u'__main__':
logging.basicConfig(format='%(asctime)s:%(levelname)s:%(message)s', filename=u'musicdata_spop.log', level=logging.DEBUG)
logging.getLogger().addHandler(logging.StreamHandler())
try:
opts, args = getopt.getopt(sys.argv[1:],u"hs:p:w:",[u"server=",u"port=",u"pwd="])
except getopt.GetoptError:
print u'musicdata_spop.py -s <server> -p <port> -w <password>'
sys.exit(2)
# Set defaults
server = u'localhost'
port = 6602
pwd= u''
for opt, arg in opts:
if opt == u'-h':
print u'musicdata_spop.py -s <server> -p <port> -w <password>'
sys.exit()
elif opt in (u"-s", u"--server"):
server = arg
elif opt in (u"-p", u"--port"):
port = arg
elif opt in (u"-w", u"--pwd"):
pwd = arg
import sys
q = Queue.Queue()
mds = musicdata_spop(q, server, port, pwd)
try:
start = time.time()
while True:
if start+120 < time.time():
break;
try:
item = q.get(timeout=1000)
print u"+++++++++"
for k,v in item.iteritems():
print u"[{0}] '{1}' type {2}".format(k,v,type(v))
print u"+++++++++"
print
q.task_done()
except Queue.Empty:
pass
except KeyboardInterrupt:
print u''
pass
print u"Exiting..."
|
|
"""The Session is a wrapper around a Shotgun instance, proxying requests to
the server and applying additional logic on top of it. The Session instance is
designed to be used for a single task and then discarded, since it makes the
assumption that entity relationships do not change.
While not fully documented below, this object will proxy all attributes to the
underlying Shotgun instance, so you can treat this as you would a Shotgun
instance.
"""
from __future__ import with_statement, absolute_import
import errno
import functools
import itertools
import json
import logging
import os
import re
import threading
import urlparse
import warnings
from sgschema import Schema
from .entity import Entity
from .pool import ShotgunPool
from .utils import expand_braces, parse_isotime
log = logging.getLogger(__name__)
class EntityNotFoundWarning(UserWarning):
pass
class EntityNotFoundError(ValueError):
pass
def _asyncable(func):
"""Wrap a function, so that async=True will run it in a thread."""
@functools.wraps(func)
def _wrapped(self, *args, **kwargs):
if kwargs.pop('async', False):
return self._submit_concurrent(func, self, *args, **kwargs)
else:
return func(self, *args, **kwargs)
return _wrapped
def _assert_ownership(func):
"""Wrap a function that takes a list of entities, and make sure that we own them."""
@functools.wraps(func)
def _wrapped(self, entities, *args, **kwargs):
entities = list(entities)
for e in entities:
if isinstance(e, Entity):
if e.session is not self:
raise ValueError('Entity not from this session', e, self)
else:
raise TypeError('Non-Entity passed as entity', e)
return func(self, entities, *args, **kwargs)
return _wrapped
_recursion_sentinel = object()
class Session(object):
"""Shotgun wrapper.
:param shotgun: A Shotgun instance to wrap, or the name to be passed to
``shotgun_api3_registry.connect()`` in order to construct one.
If passed a name, the remaining args and kwargs will also be passed to the
api registry connector.
If passed a descendant of ``shotgun_api3.Shotgun`` (or one is constructed
via the registry), it will be wrapped in a :class:`~sgsession.pool.ShotgunPool` so that
it becomes thread-safe. Any other objects (e.g. mock servers) are used
unmodified.
If passed nothing, ``shotgun_api3_registry.connect`` will be called
the first time :attr:`shotgun` is accessed (which will happen on many
operations). To stop this behaviour, pass ``False``.
"""
#: Mapping of entity types to the field where their "parent" lives.
parent_fields = {
'Asset': 'project',
'Project': None,
'Sequence': 'project',
'Shot': 'sg_sequence',
'Task': 'entity',
'PublishEvent': 'sg_link',
'Version': 'entity',
}
#: Fields to always fetch for every entity.
important_fields_for_all = ['updated_at']
#: Fields to always fetch: maps entity type to a list of fields.
important_fields = {
'Asset': ['code', 'sg_asset_type'],
'HumanUser': ['firstname', 'lastname', 'email', 'login'],
'Project': ['name'],
'PublishEvent': ['code', 'sg_type', 'sg_version'],
'Sequence': ['code'],
'Shot': ['code'],
'Step': ['code', 'short_name', 'entity_type'],
'Task': ['step', 'content'],
'Version': ['code', 'sg_task'],
}
#: Links to always fetch: maps entity type to a mapping of field names to
#: a list of their potential entity types.
important_links = {
'Asset': {
'project': ['Project'],
},
'Sequence': {
'project': ['Project'],
},
'Shot': {
'project': ['Project'],
'sg_sequence': ['Sequence'],
},
'Task': {
'project': ['Project'],
'entity': ['Asset', 'Shot'],
'step': ['Step'],
},
'PublishEvent': {
'project': ['Project'],
'sg_link': ['Task'],
},
}
def __init__(self, shotgun=None, schema=None, *args, **kwargs):
# Lookup strings in the script registry.
if isinstance(shotgun, basestring):
import shotgun_api3_registry
shotgun = shotgun_api3_registry.connect(shotgun, *args, **kwargs)
# Wrap basic shotgun instances in our threader.
self._shotgun = ShotgunPool.wrap(shotgun)
self._shotgun_args = None if shotgun else args
self._shotgun_kwargs = None if shotgun else kwargs
self._schema = schema
self._cache = {}
self._thread_pool = None
@classmethod
def from_entity(cls, entity, *args, **kwargs):
if isinstance(entity, Entity) and entity.session:
return entity.session
else:
return cls(*args, **kwargs)
@property
def shotgun(self):
# Automatically generate Shotgun when we need one.
# We use False to track that there should be nothing set here.
if self._shotgun is None:
import shotgun_api3_registry
self._shotgun = ShotgunPool.wrap(shotgun_api3_registry.connect(
*self._shotgun_args, **self._shotgun_kwargs
))
return self._shotgun or None
@property
def schema(self):
# Automaticaly load schema when we need one.
# We use False to track that there should be nothing set here.
if self._schema is None:
# Wait on caching a schema here until there is a Shotgun.
shotgun = self.shotgun
if not shotgun:
return
try:
self._schema = Schema.from_cache(shotgun)
except ValueError:
self._schema = False
return self._schema or None
def __getattr__(self, name):
return getattr(self.shotgun, name)
def __reduce__(self):
# We assume that the shotgun and sgcache will automatically regenerate.
# Generally, the user should be very careful when pickling sessions.
shotgun = False if self._shotgun is False else None
schema = False if self._schema is False else None
return self.__class__, (shotgun, schema)
def merge(self, data, over=None, created_at=None, _depth=0, _memo=None):
"""Import data containing raw entities into the session.
This will effectively return a copy of any nested structure of lists,
tuples, and dicts, while converting any dicts which look like entities
into an :class:`.Entity`. The returned structure is a copy of the
original.
:param dict data: The raw fields to convert into an :class:`~sgsession.entity.Entity`.
:param bool over: Control for merge behaviour with existing data.
``True`` results in the new data taking precedence, and ``False``
the old data. The default of ``None`` will automatically decide
based on the ``updated_at`` field.
:return: The :class:`~sgsession.entity.Entity`. This will not be a new instance if the
entity was already in the session, but it will have all the newly
merged data in it.
"""
# Track down where we are getting string created_at from.
if created_at and isinstance(created_at, basestring):
# This can be a huge message...
log.error('string created_at (%r) given to Session.merge at depth %d; data to merge: %r' % (
created_at, _depth, data,
))
created_at = parse_isotime(created_at)
# Since we are dealing with recursive structures, we need to memoize
# the outputs by all of the inputs as we create them.
if _memo is None:
_memo = {}
id_ = id(data)
if id_ in _memo:
return _memo[id_]
_memo[id_] = _recursion_sentinel
obj = self._merge(data, over, created_at, _depth, _memo)
# If something fails at setting up a recursive object before returning,
# then we want to fail very hard.
if obj is _recursion_sentinel:
raise RuntimeError('un-memoized recursion')
_memo[id_] = obj
return obj
def _merge(self, data, over, created_at, depth, memo):
# No need to worry about resolving schema here, since Entity.__setitem__
# will ultimately do it.
# Pass through entities if they are owned by us.
if isinstance(data, Entity) and data.session is self:
return data
# Contents of lists and tuples should get merged.
if isinstance(data, list):
# Lists can be cyclic; memoize them.
memo[id(data)] = new = type(data)()
new.extend(self.merge(x, over, created_at, depth + 1, memo) for x in data)
return new
if isinstance(data, tuple):
return type(data)(self.merge(x, over, created_at, depth + 1, memo) for x in data)
if not isinstance(data, dict):
return data
# Non-entity dicts have all their values merged.
if not ('type' in data and 'id' in data):
memo[id(data)] = new = type(data)() # Setup recursion block.
new.update((k, self.merge(v, over, created_at)) for k, v in data.iteritems())
return new
# If it already exists, then merge this into the old one.
new = Entity(data['type'], data['id'], self)
key = new.cache_key
entity = self._cache.setdefault(new.cache_key, new)
memo[id(data)] = entity # Setup recursion block.
entity._update(data, over, created_at, depth + 1, memo)
return entity
def parse_user_input(self, spec, entity_types=None, fetch_project_from_page=False):
spec = spec.strip()
# JSON.
if spec.startswith('{') and spec.endswith('}'):
raw = json.loads(spec)
if 'type' not in raw or 'id' not in raw:
raise ValueError('incomplete JSON entity', spec)
if not isinstance(raw['type'], basestring) or not isinstance(raw['id'], int):
raise ValueError('malformed JSON entity', spec)
return self.merge(raw)
# Accept integer IDs if we know we want a specific type.
if spec.isdigit():
if isinstance(entity_types, basestring):
entity_types = [entity_types]
if entity_types and len(entity_types) == 1:
return self.merge({'type': entity_types[0], 'id': int(spec)})
else:
raise ValueError('int-only spec without single entity_types', spec, entity_types)
# Shotgun detail URL.
m = re.match(r'^https?://\w+\.shotgunstudio\.com/detail/([A-Za-z]+)/(\d+)', spec)
if m:
return self.merge({'type': m.group(1), 'id': int(m.group(2))})
# Shotgun project overview URL.
m = re.match(r'^https?://\w+\.shotgunstudio\.com/page/\d+#([A-Z][A-Za-z]+)_(\d+)_', spec)
if m:
return self.merge({'type': m.group(1), 'id': int(m.group(2))})
# Shotgun page URL.
m = re.match(r'^https?://\w+\.shotgunstudio\.com/page/(\d+)$', spec)
if m:
if not fetch_project_from_page:
raise ValueError('page URL without fetch_project_from_page', spec)
page = self.get('Page', int(m.group(1)), ['project'])
if not page:
raise ValueError('Page entity not found for page URL', spec)
if page.get('project'):
return self.merge(page['project'])
raise ValueError('page URL has no project', spec)
# Direct entities. E.g. `shot:12345?code=whatever`
m = re.match(r'^([A-Za-z]{3,})[:_ -](\d+)(?:_|$|\?(\S*))', spec)
if m:
type_, id_, query = m.groups()
raw = {
'type': type_[0].upper() + type_[1:],
'id': int(id_),
}
if query:
for k, v in urlparse.parse_qsl(query, keep_blank_values=True):
raw.setdefault(k, v)
return self.merge(raw)
raise ValueError('could not parse entity spec', spec)
def _submit_concurrent(self, func, *args, **kwargs):
if not self._thread_pool:
from concurrent.futures import ThreadPoolExecutor
self._thread_pool = ThreadPoolExecutor(8)
return self._thread_pool.submit(func, *args, **kwargs)
@_asyncable
def create(self, type, data=None, return_fields=None, **kwargs):
"""Create an entity of the given type and data.
:return: The new :class:`~sgsession.entity.Entity`.
`See the Shotgun docs for more. <https://github.com/shotgunsoftware/python-api/wiki/Reference%3A-Methods#wiki-create>`_
"""
if data is not None and kwargs:
# This isn't quite ideal, but it doesn't let must confusing get through.
raise TypeError('provide only one of data or **kwargs')
data = self._minimize_entities(data if data is not None else kwargs)
if self.schema:
type = self.schema.resolve_one_entity(type)
data = self.schema.resolve_structure(data, type)
return_fields = self.schema.resolve_field(type, return_fields) if return_fields else []
return_fields = self._add_default_fields(type, return_fields)
return self.merge(self.shotgun.create(type, data, return_fields))
@_asyncable
def update(self, *args, **kwargs):
"""Update the given entity with the given fields.
.. todo:: Add this to the Entity.
`See the Shotgun docs for more. <https://github.com/shotgunsoftware/python-api/wiki/Reference%3A-Methods#wiki-update>`_
"""
# Grab the "type" or 1st argument.
if not (args or kwargs):
raise TypeError('no arguments')
type_ = kwargs.pop('type', None)
if type_ is None:
if not args:
raise TypeError('must provide "type" kwarg or positional type argument')
type_ = args[0]
args = args[1:]
# Figure out if we were given an Entity, or an entity type (string)
if isinstance(type_, Entity):
ids = [type_['id']]
type_ = type_['type']
do_batch = False
elif isinstance(type_, basestring):
ids = kwargs.pop('id', None) or args[0]
args = args[1:]
do_batch = not isinstance(ids, int)
ids = list(ids) if do_batch else [ids]
elif isinstance(type_, (list, type)):
do_batch = True
entities = list(type_)
if not entities:
raise ValueError('entity sequence is empty')
sentinel = object()
non_entity = next((e for e in entities if not isinstance(e, Entity)), sentinel)
if non_entity is not sentinel:
raise ValueError('entity sequence contains non-Entity', non_entity)
type_ = entities[0]['type']
mismatched = next((e for e in entities if e['type'] != type_), None)
if mismatched is not None:
raise ValueError('mismatched entity types', type_, mismatched['type'])
ids = [e['id'] for e in entities]
else:
raise TypeError('first argument must be an Entity, list of entities, or string (entity type)', entity_or_type)
data = {}
for arg in args:
data.update(arg)
data.update(kwargs)
if not data:
raise ValueError('no data provided')
data = self._minimize_entities(data)
if self.schema:
type_ = self.schema.resolve_one_entity(type_)
data = self.schema.resolve_structure(data, type_)
if do_batch:
return self.batch([{
'request_type': 'update',
'entity_type': type_,
'entity_id': id_,
'data': data,
} for id_ in ids])
else:
return self.merge(self.shotgun.update(type_, ids[0], data), over=True)
@_asyncable
def batch(self, requests):
"""Perform a series of requests in a transaction.
`See the Shotgun docs for more. <https://github.com/shotgunsoftware/python-api/wiki/Reference%3A-Methods#wiki-batch>`_
"""
requests = self._minimize_entities(requests)
if self.schema:
requests = self.schema.resolve_structure(requests)
return [self.merge(x, over=True) if isinstance(x, dict) else x for x in self.shotgun.batch(requests)]
def _add_default_fields(self, type_, fields):
fields = set(fields or ['id'])
# Add important fields for this type.
fields.update(self.important_fields_for_all)
fields.update(self.important_fields.get(type_, []))
# Add parent.
parent_field = self.parent_fields.get(type_)
if parent_field:
fields.add(parent_field)
# Add implied owners of deep-fields.
implied = set()
for field in fields:
parts = field.split('.')
for i in xrange(2, len(parts) + 1, 2):
implied.add('.'.join(parts[:i]) + '.id')
fields.update(implied)
# Add important deep-fields for requested type.
for local_field, link_types in self.important_links.get(type_, {}).iteritems():
fields.add(local_field)
for link_type in link_types:
remote_fields = self.important_fields.get(link_type, [])
remote_links = self.important_links.get(link_type, {})
for remote_field in itertools.chain(self.important_fields_for_all, remote_fields, remote_links.iterkeys()):
fields.add('%s.%s.%s' % (local_field, link_type, remote_field))
return sorted(fields)
def _minimize_entities(self, data):
if isinstance(data, dict):
# Attachments need to not be minimized, since they are often
# merged in with their own metadata. If we special cased merging
# them, then this could be a bit smarter and send only what is
# nessesary.
if data.get('type') == 'Attachment':
return data
if 'type' in data and 'id' in data:
return dict(type=data['type'], id=data['id'])
return dict((k, self._minimize_entities(v)) for k, v in data.iteritems())
if isinstance(data, (list, tuple)):
return [self._minimize_entities(x) for x in data]
return data
@_asyncable
def find(self, type_, filters, fields=None, *args, **kwargs):
"""Find entities.
:return: :class:`list` of found :class:`~sgsession.entity.Entity`.
`See the Shotgun docs for more. <https://github.com/shotgunsoftware/python-api/wiki/Reference%3A-Methods#wiki-find>`_
"""
merge = kwargs.pop('merge', True)
if self.schema:
type_ = self.schema.resolve_one_entity(type_)
if kwargs.pop('add_default_fields', True):
fields = self._add_default_fields(type_, fields)
# Expand braces in fields.
expanded_fields = set()
for field in fields:
expanded_fields.update(expand_braces(field))
fields = sorted(expanded_fields)
# Resolve names in fields.
if self.schema:
fields = self.schema.resolve_field(type_, fields) if fields else []
filters = self._minimize_entities(filters)
# Resolve names in filters.
if self.schema and isinstance(filters, (list, tuple)):
for i, old_filter in enumerate(filters):
filter_ = [self.schema.resolve_one_field(type_, old_filter[0])]
filter_.extend(old_filter[1:])
filters[i] = filter_
result = self.shotgun.find(type_, filters, fields, *args, **kwargs)
return [self.merge(x, over=True) for x in result] if merge else result
@_asyncable
def find_one(self, entity_type, filters, fields=None, order=None,
filter_operator=None, retired_only=False, **kwargs):
"""Find one entity.
:return: :class:`~sgsession.entity.Entity` or ``None``.
`See the Shotgun docs for more. <https://github.com/shotgunsoftware/python-api/wiki/Reference%3A-Methods#wiki-find_one>`_
"""
results = self.find(entity_type, filters, fields, order,
filter_operator, 1, retired_only, **kwargs)
if results:
return results[0]
return None
def find_iter(self, *args, **kwargs):
limit = kwargs.pop('limit', None) or None
per_page = kwargs.pop('per_page', limit or 500) # this is the default
async_count = kwargs.pop('async_count', 1)
kwargs['limit'] = per_page
kwargs['async'] = True
page = 1
futures = []
done = False
while not done:
# extract all complete results; we wait for the first one, but
# then take as many others as are already done
rows = futures.pop(0).result() if futures else None
while rows and futures and futures[0].done():
rows.extend(futures.pop(0).result())
# determine if we are done yet
if rows is not None:
# print 'got', len(rows)
# we hit the end of results
if not rows or len(rows) < per_page:
done = True
# we hit the total requested
if limit is not None:
limit -= len(rows)
if limit <= 0:
done = True
# queue up the next queries
while not done and len(futures) < async_count:
# print 'queing', page
kwargs['page'] = page
futures.append(self.find(*args, **kwargs))
page += 1
# yield results
if rows is not None:
for x in rows:
yield x
@_asyncable
def delete(self, entity, entity_id=None):
"""Delete one entity.
.. warning:: This session will **not** forget about the deleted entity,
and all links from other entities will remain intact.
`See the Shotgun docs for more. <https://github.com/shotgunsoftware/python-api/wiki/Reference%3A-Methods#wiki-delete>`_
"""
if not isinstance(entity, Entity):
if self.schema:
entity = self.schema.resolve_one_entity(entity)
if not entity_id:
raise ValueError('must provide entity_id')
entity = self.merge({'type': entity, 'id': entity_id})
res = self.shotgun.delete(entity['type'], entity['id'])
entity._exists = False
return res
@_asyncable
def get(self, type_, id_, fields=None, fetch=True):
"""Get one entity by type and ID.
:param str type_: The entity type to lookup.
:param int id_: The entity ID to lookup. Accepts ``list`` or ``tuple``
of IDs, and returns the same.
:param bool fetch: Request this entity from the server if not cached?
"""
# Handle multiple IDs.
if isinstance(id_, (list, tuple)):
return type(id_)(self.get(type_, x) for x in id_)
if self.schema:
type_ = self.schema.resolve_one_entity(type_)
try:
entity = self._cache[(type_, id_)]
except KeyError:
return self.find_one(type_, [('id', 'is', id_)], fields or [])
else:
if fetch and fields:
entity.fetch(fields)
return entity
def _fetch(self, entities, fields, force=False):
types = list(set(x['type'] for x in entities))
if len(types) > 1:
raise ValueError('can only fetch one type at once')
type_ = types[0]
ids_ = set()
for e in entities:
if force or any(f not in e for f in fields):
ids_.add(e['id'])
if ids_:
res = self.find(
type_,
[['id', 'in'] + list(ids_)],
fields,
)
missing = ids_.difference(e['id'] for e in res)
# Update _exists on the entities.
for e in entities:
e._exists = e['id'] not in missing
if missing:
raise EntityNotFoundError('%s %s not found' % (type_, ', '.join(map(str, sorted(missing)))))
@_assert_ownership
@_asyncable
def filter_exists(self, entities, check=True, force=False):
"""Return the subset of given entities which exist (non-retired).
:param list entities: An iterable of entities to check.
:param bool check: Should the server be consulted if we don't already know?
:param bool force: Should we always check the server?
:returns set: The entities which exist, or aren't sure about.
This will handle multiple entity-types in multiple requests.
"""
if check:
by_type = {}
for x in entities:
by_type.setdefault(x['type'], set()).add(x)
for type_, sub_entities in by_type.iteritems():
if force or any(e._exists is None for e in sub_entities):
found = self.find(type_, [['id', 'in'] + list(e['id'] for e in sub_entities)])
found_ids = set(e['id'] for e in found)
for e in sub_entities:
e._exists = e['id'] in found_ids
return set(e for e in entities if (e._exists or e._exists is None))
@_assert_ownership
@_asyncable
def fetch(self, to_fetch, fields, force=False):
"""Fetch the named fields on the given entities.
:param list to_fetch: Entities to fetch fields for.
:param list fields: The names of fields to fetch on those entities.
:param bool force: Perform a request even if we already have this data?
This will safely handle multiple entitiy types at the same time, and
by default will only make requests of the server if some of the data
does not already exist.
.. note:: This does not assert that all "important" fields exist. See
:meth:`fetch_core`.
"""
by_type = {}
for x in to_fetch:
by_type.setdefault(x['type'], set()).add(x)
for type_, entities in by_type.iteritems():
self._fetch(entities, fields, force=force)
@_assert_ownership
@_asyncable
def fetch_backrefs(self, to_fetch, backref_type, field):
"""Fetch requested backrefs on the given entities.
:param list to_fetch: Entities to get backrefs on.
:param str backref_type: The entity type to look for backrefs on.
:param str field: The name of the field to look for backrefs in.
::
# Find all tasks which refer to this shot.
>>> session.fetch_backrefs([shot], 'Task', 'entity')
"""
by_type = {}
for x in to_fetch:
by_type.setdefault(x['type'], set()).add(x)
for type_, entities in by_type.iteritems():
self.find(backref_type, [[field, 'is'] + [x.minimal for x in entities]])
@_assert_ownership
@_asyncable
def fetch_core(self, to_fetch):
"""Assert all "important" fields exist, and fetch them if they do not.
:param list to_fetch: The entities to get the core fields on.
This will populate all important fields, and important fields on linked
entities.
"""
by_type = {}
for x in to_fetch:
by_type.setdefault(x['type'], set()).add(x)
for type_, entities in by_type.iteritems():
self._fetch(entities, itertools.chain(
self.important_fields_for_all,
self.important_fields.get(type_) or (),
self.important_links.get(type_, {}).iterkeys(),
))
@_assert_ownership
@_asyncable
def fetch_heirarchy(self, to_fetch):
"""Populate the parents as far up as we can go, and return all involved.
With (new-ish) arbitrarily-deep-links on Shotgun, this method could be
made quite a bit more effiecient, since it should be able to request
the entire heirarchy for any given type at once.
See :attr:`parent_fields`.
"""
all_nodes = set()
to_resolve = set()
loop_count = 0
while to_fetch or to_resolve:
# Just in case (because we have messed this up a few times before).
if loop_count > 20:
raise RuntimeError('likely infinite loop')
loop_count += 1
# Go as far up as we already have for the specified entities.
for entity in to_fetch:
all_nodes.add(entity)
while entity.parent(fetch=False):
entity = entity.parent()
all_nodes.add(entity)
if entity['type'] != 'Project':
to_resolve.add(entity)
# There is nothing new to fetch; bail!
if not to_resolve:
break
# Find the type that we have the most entities of, and remove them
# from the list to resolve.
by_type = {}
for x in to_resolve:
all_nodes.add(x)
by_type.setdefault(x['type'], set()).add(x)
type_, to_fetch = max(by_type.iteritems(), key=lambda x: len(x[1]))
to_resolve.difference_update(to_fetch)
# Fetch the parent names.
ids = [x['id'] for x in to_fetch]
parent_name = self.parent_fields[type_]
found = self.find(type_, [['id', 'in'] + ids], [parent_name])
# Make sure we actually get something back for the parent field.
no_parent = [e['id'] for e in found if not e.get(parent_name)]
if no_parent:
raise ValueError('%s %s %s no %s' % (
type_,
', '.join(str(id_) for id_ in sorted(no_parent)),
'have' if len(no_parent) > 1 else 'has',
parent_name,
))
# Track those which didn't come back from the API. Normally, this
# wouldn't happen, but can result from a race condition OR from
# an error on the server side (or a caching layer).
missing = to_fetch.difference(found)
if missing:
raise EntityNotFoundError('%s %s %s not exist' % (
type_,
', '.join(str(id_) for id_ in sorted(no_parent)),
'do' if len(missing) > 1 else 'does',
))
return list(all_nodes)
_guessed_user_lock = threading.Lock()
@_asyncable
def guess_user(self, filter=('email', 'starts_with', '{login}@'), fields=(), fetch=True):
"""Guess Shotgun user from current login name.
Looks for $SHOTGUN_USER_ID in your environment, then a user with an
email that has the login name as the account.
:returns: ``dict`` of ``HumanUser``, or ``None``.
"""
with self._guessed_user_lock:
try:
user = self._guessed_user
except AttributeError:
user = self._guess_user(filter, fields, fetch)
if user:
Session._guessed_user = self.merge(user).as_dict()
else:
Session._guessed_user = None
if not user:
return
entity = self.merge(user)
if fields:
entity.fetch(fields)
return entity
def _guess_user(self, filter, fields, fetch):
# This envvar is used only for this purpose (at Western Post)
id_ = os.environ.get('SHOTGUN_USER_ID')
if id_:
return {'type': 'HumanUser', 'id': int(id_)}
if not fetch:
return
# This envvar is more general, and respected by shotgun_api3_registry.
login = os.environ.get('SHOTGUN_SUDO_AS_LOGIN')
if login:
return self.find_one('HumanUser', [
('login', 'is', login),
], fields or ())
# Finally, search for a user based on the current login.
try:
login = os.getlogin()
except OSError as e:
# this fails on the farm, so fall back onto the envvar
if e.errno != errno.ENOTTY:
raise
login = os.environ.get('USER')
filter_ = tuple(x.format(login=login) for x in filter)
return self.find_one('HumanUser', [filter_], fields)
|
|
# The MIT License
#
# Copyright (c) 2008 William T. Katz
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
"""A simple RESTful status framework on Google App Engine
This app's API should be reasonably clean and easily targeted by other
clients, like a Flex app or a desktop program.
"""
__author__ = 'Kyle Conroy'
import string
import re
import os
import cgi
import logging
from datetime import timedelta
from datetime import date
from datetime import datetime
from datetime import time
from dateutil.parser import parse
from google.appengine.api import memcache
from google.appengine.api import datastore_errors
from google.appengine.api import taskqueue
from google.appengine.api import users
from google.appengine.ext import webapp
from google.appengine.ext import db
from handlers import restful
from time import mktime
from utils import authorized
from utils import slugify
from models import List, Status, Event, Service, Image
from wsgiref.handlers import format_date_time
def invalidate_cache():
all_pages = memcache.get("__all_pages__")
if all_pages is not None:
for page,d in all_pages.items():
if not memcache.delete(page):
logging.error("Memcache delete failed on %s", page)
if not memcache.delete("__all_pages__"):
logging.error("Memcache delete failed on __all_pages__")
taskqueue.add(url='/', method="GET")
def aware_to_naive(d):
"""Convert an aware date to an naive date, in UTC"""
offset = d.utcoffset()
if offset:
d = d.replace(tzinfo=None)
d = d - offset
return d
class NotFoundHandler(restful.Controller):
def get(self):
self.error(404, "Can't find resource")
class ListsListHandler(restful.Controller):
def get(self, version):
if not self.valid_version(version):
self.error(404, "API Version %s not supported" % version)
return
query = List.all().order('name')
data = [s.rest(self.base_url(version)) for s in query]
data = {"lists": data}
self.json(data)
@authorized.api("admin")
def post(self, version):
if not self.valid_version(version):
self.error(404, "API Version %s not supported" % version)
return
name = self.request.get('name', default_value=None)
description = self.request.get('description', default_value=None)
if not name or not description:
self.error(400, "Bad Data: Name: %s, Description: %s" \
% (name, description))
return
slug = slugify.slugify(name)
existing_s = List.get_by_slug(slug)
if existing_s:
self.error(404, "A list with this name already exists")
return
l = List(name=name, slug=slug, description=description)
l.put()
invalidate_cache()
self.response.set_status(201)
self.json(l.rest(self.base_url(version)))
class ListInstanceHandler(restful.Controller):
def get(self, version, list_slug):
if not self.valid_version(version):
self.error(404, "API Version %s not supported" % version)
return
list = List.get_by_slug(list_slug)
if not list:
self.error(404, "List %s does not exist" % list_slug)
return
self.json(list.rest(self.base_url(version)))
@authorized.api("admin")
def post(self, version, list_slug):
if not self.valid_version(version):
self.error(404, "API Version %s not supported" % version)
return
list = List.get_by_slug(list_slug)
if not list:
self.error(404, "Service %s does not exist" % list_slug)
return
name = self.request.get('name', default_value=None)
description = self.request.get('description', default_value=None)
if description:
list.description = description
if name:
list.name = name
if name or description:
invalidate_cache()
list.put()
self.json(list.rest(self.base_url(version)))
@authorized.api("admin")
def delete(self, version, list_slug):
if not self.valid_version(version):
self.error(404, "API Version %s not supported" % version)
return
list = List.get_by_slug(list_slug)
if not list:
self.error(404, "List %s not found" % list_slug)
return
query = Service.all()
query.filter('list =', list)
if query:
for s in query:
s.list = None
s.put()
invalidate_cache()
list.delete()
self.json(list.rest(self.base_url(version)))
class ServicesListHandler(restful.Controller):
def get(self, version):
if not self.valid_version(version):
self.error(404, "API Version %s not supported" % version)
return
query = Service.all().order('name')
data = [s.rest(self.base_url(version)) for s in query]
data = {"services": data}
self.json(data)
@authorized.api("admin")
def post(self, version):
if not self.valid_version(version):
self.error(404, "API Version %s not supported" % version)
return
name = self.request.get('name', default_value=None)
description = self.request.get('description', default_value=None)
slist = self.request.get('list', default_value=None)
l = None
if slist:
l = List.all().filter("slug =", slist).get()
if not name:
self.error(400, "Bad name: %s" % name)
return
if not description:
self.error(400, "Bad description: %s" % description)
return
if slist and not l:
self.error(400, "Bad list slug: %s" % slist)
return
slug = slugify.slugify(name)
existing_s = Service.get_by_slug(slug)
if existing_s:
self.error(404, "A sevice with this name already exists")
return
s = Service(name=name, slug=slug, description=description, list=l)
s.put()
invalidate_cache()
self.response.set_status(201)
self.json(s.rest(self.base_url(version)))
class ServiceInstanceHandler(restful.Controller):
def get(self, version, service_slug):
if not self.valid_version(version):
self.error(404, "API Version %s not supported" % version)
return
service = Service.get_by_slug(service_slug)
if not service:
self.error(404, "Service %s does not exist" % service_slug)
return
self.json(service.rest(self.base_url(version)))
@authorized.api("admin")
def post(self, version, service_slug):
if not self.valid_version(version):
self.error(404, "API Version %s not supported" % version)
return
service = Service.get_by_slug(service_slug)
if not service:
self.error(404, "Service %s does not exist" % service_slug)
return
name = self.request.get('name', default_value=None)
description = self.request.get('description', default_value=None)
list = self.request.get('list', default_value=None)
if description:
service.description = description
if name:
service.name = name
if list:
l = List.all().filter("slug = ", list).get()
if l is None:
self.error(400, "Can't find list with slug %s" % list)
return
service.list = l
if "" == list:
service.list = None
list = "removed"
if name or description or list:
invalidate_cache()
service.put()
self.json(service.rest(self.base_url(version)))
@authorized.api("admin")
def delete(self, version, service_slug):
if not self.valid_version(version):
self.error(404, "API Version %s not supported" % version)
return
service = Service.get_by_slug(service_slug)
if not service:
self.error(404, "Service %s not found" % service_slug)
return
query = Event.all()
query.filter('service =', service)
if query:
for e in query:
e.delete()
invalidate_cache()
service.delete()
self.json(service.rest(self.base_url(version)))
class EventsListHandler(restful.Controller):
def get(self, version, service_slug):
if not self.valid_version(version):
self.error(404, "API Version %s not supported" % version)
return
service = Service.get_by_slug(service_slug)
if not service:
self.error(404, "Service %s not found" % service_slug)
return
start = self.request.get('start', default_value=None)
end = self.request.get('end', default_value=None)
query = Event.all()
query.filter('service =', service)
if start:
try:
_start = aware_to_naive(parse(start))
query.filter("start >= ", _start)
except:
self.error(400, "Invalid Date: %s" % start)
return
if end:
try:
_end = aware_to_naive(parse(end))
query.filter("start <=", _end)
except:
self.error(400, "Invalid Date: %s" % end)
return
query.order('-start')
data = [s.rest(self.base_url(version)) for s in query]
self.json({"events": data})
@authorized.api("admin")
def post(self, version, service_slug):
if not self.valid_version(version):
self.error(404, "API Version %s not supported" % version)
return
status_slug = self.request.get("status", default_value=None)
message = self.request.get("message", default_value=None)
informational = self.request.get("informational", default_value=None)
if not message:
self.error(400, "Event message is required")
return
service = Service.get_by_slug(service_slug)
if not service:
self.error(404, "Service %s not found" % service_slug)
return
if not status_slug:
event = service.current_event()
if event:
status = event.status
else:
status = Status.get_default()
else:
status = Status.get_by_slug(status_slug)
if not status:
self.error(404, "Status %s not found" % status_slug)
return
e = Event(status=status, service=service, message=message)
e.informational = informational and informational == "true"
e.put()
# Queue up a task that calls the Twitter API to make a tweet.
if self.request.get('tweet'):
logging.info('Attempting to post a tweet for the latest event via async GAE task queue.')
taskqueue.add(url='/admin/tweet', params={'service_name': service.name, 'status_name': status.name, 'message': message})
invalidate_cache()
self.json(e.rest(self.base_url(version)))
class CurrentEventHandler(restful.Controller):
def get(self, version, service_slug):
if not self.valid_version(version):
self.error(404, "API Version %s not supported" % version)
return
service = Service.get_by_slug(service_slug)
if not service:
self.error(404, "Service %s not found" % service_slug)
return
event = service.current_event()
if not event:
self.error(404, "No current event for Service %s" % service_slug)
return
self.json(event.rest(self.base_url(version)))
class EventInstanceHandler(restful.Controller):
def get(self, version, service_slug, sid):
if not self.valid_version(version):
self.error(404, "API Version %s not supported" % version)
return
service = Service.get_by_slug(service_slug)
if not service:
self.error(404, "Service %s not found" % service_slug)
return
try:
event = Event.get(db.Key(sid))
except datastore_errors.BadKeyError:
self.error(404, "Event %s not found" % sid)
return
if not event or service.key() != event.service.key():
self.error(404, "No event for Service %s with sid = %s" \
% (service_slug, sid))
return
self.json(event.rest(self.base_url(version)))
@authorized.api("admin")
def delete(self, version, service_slug, sid):
if not self.valid_version(version):
self.error(404, "API Version %s not supported" % version)
return
service = Service.get_by_slug(service_slug)
if not service:
self.error(404, "Service %s not found" % service_slug)
return
try:
event = Event.get(db.Key(sid))
except datastore_errors.BadKeyError:
self.error(404, "Event %s not found" % sid)
return
if not event or service.key() != event.service.key():
self.error(404, "No event for Service %s with sid = %s" \
% (service_slug, sid))
return
event.delete()
invalidate_cache()
# Why not JSON?
self.success(event.rest(self.base_url(version)))
class StatusesListHandler(restful.Controller):
def get(self, version):
if not self.valid_version(version):
self.error(404, "API Version %s not supported" % version)
return
query = Status.all().order('name')
data = [s.rest(self.base_url(version)) for s in query]
self.json({"statuses": data})
@authorized.api("admin")
def post(self, version):
if not self.valid_version(version):
self.error(404, "API Version %s not supported" % version)
return
name = self.request.get('name', default_value=None)
description = self.request.get('description', default_value=None)
image_slug = self.request.get('image', default_value=None)
default = self.request.get('default', default_value="false")
if default not in ["true", "false"]:
self.error(400, "Default must be true or false")
return
if not name or not description or not image_slug:
self.error(400, "Bad Data")
return
slug = slugify.slugify(name)
status = Status.get_by_slug(slug)
image = Image.get_by_slug(image_slug)
if status is not None:
self.error(400, "A Status with the slug %s already exists" % slug)
return
if image is None:
msg = "An Image with the slug %s doesn't exist" % image_slug
self.error(400, msg)
return
# Reset default status
if default == "true":
for stat in Status.all().filter("default", True):
stat.default = False
stat.put()
default = default == "true"
status = Status(name=name, slug=slug, description=description,
image=image.path, default=default)
status.put()
invalidate_cache()
self.response.set_status(201)
self.json(status.rest(self.base_url(version)))
class StatusInstanceHandler(restful.Controller):
def get(self, version, status_slug):
if not self.valid_version(version):
self.error(404, "API Version %s not supported" % version)
return
status = Status.get_by_slug(status_slug)
if not status:
self.error(404, "No status with the slug %s found" % status_slug)
return
self.json(status.rest(self.base_url(version)))
@authorized.api("admin")
def post(self, version, status_slug):
if not self.valid_version(version):
self.error(404, "API Version %s not supported" % version)
return
status = Status.get_by_slug(status_slug)
if not status:
self.error(404, "No status with the slug %s found" % status_slug)
return
name = self.request.get('name', default_value=None)
image_slug = self.request.get('image', default_value=None)
image = None
default = self.request.get('default', default_value=None)
description = self.request.get('description', default_value=None)
if image_slug is not None:
image = Image.get_by_slug(image_slug)
if image is None:
self.error(400, "An Image with the "
"slug %s doesn't exist" % image_slug)
return
status.image = image.path
if description is not None:
status.description = description
if default is not None and default in ["false", "true"]:
# Reset default status
if default == "true":
for stat in Status.all().filter("default", True):
stat.default = False
stat.put()
status.default = default == "true"
if name is not None:
status.name = name
if description or name or image or default:
status.put()
invalidate_cache()
self.json(status.rest(self.base_url(version)))
@authorized.api("admin")
def delete(self, version, status_slug):
if not self.valid_version(version):
self.error(404, "API Version %s not supported" % version)
return
status = Status.get_by_slug(status_slug)
if not status:
self.error(404, "Status %s not found" % status_slug)
return
# We may want to think more about this
events = Event.all().filter('status =', status).fetch(1000)
for event in events:
event.delete()
status.delete()
self.json(status.rest(self.base_url(version)))
class LevelListHandler(restful.Controller):
def get(self, version):
if not self.valid_version(version):
self.error(404, "API Version %s not supported" % version)
return
self.json({"levels": ["NORMAL", "WARNING", "ERROR", "CRITICAL"]})
class ImagesListHandler(restful.Controller):
def get(self, version):
if not self.valid_version(version):
self.error(404, "API Version %s not supported" % version)
return
host = self.request.headers.get('host', 'nohost')
images = []
for img in Image.all().fetch(1000):
image = {
"url": "http://" + host + "/images/" + img.path,
"icon_set": img.icon_set,
"name": img.slug,
}
images.append(image)
self.json({"images": images})
|
|
import os
import subprocess
import json
import matplotlib.pyplot as plt
import cv2
from numpy.linalg.linalg import inv
from numpy import loadtxt
from moving import userTypeNames
from storage import loadTrajectoriesFromSqlite
from cvutils import cvPlot, cvColors, cvGreen, imageBox
colors = cvColors['colorblind']
green = cvGreen['colorblind']
tracking_filename = "tracking.mp4"
highlight_filename = "highlight.mp4"
def clean_video_folder(video_folder):
delete_files(video_folder, excluded_files=[tracking_filename, highlight_filename])
def convert_path_extension(path, extension):
new_video_filename = ''.join(os.path.basename(path).split('.')[:-1])+'.'+extension
return os.path.join(os.path.dirname(path),new_video_filename)
def create_tracking_video(project_path, video_path):
db_filename = os.path.join(project_path, 'run', 'results.sqlite')
homography_path = os.path.join(project_path, 'homography', 'homography.txt')
videos_folder = os.path.join(project_path, 'final_videos')
output_path = convert_path_extension(os.path.join(videos_folder, tracking_filename), 'avi')
if not os.path.exists(videos_folder):
os.makedirs(videos_folder)
create_trajectory_video(video_path, db_filename, homography_path, output_path)
convert_to_mp4(output_path)
clean_video_folder(videos_folder)
def create_highlight_video(project_path, video_path, list_of_near_misses):
db_filename = os.path.join(project_path, 'run', 'results.sqlite')
homography_path = os.path.join(project_path, 'homography', 'homography.txt')
videos_folder = os.path.join(project_path, "final_videos")
output_path = convert_path_extension(os.path.join(videos_folder, highlight_filename), 'avi')
temp_video_prefix = "temp_highlight_video-"
# Make the videos folder if it doesn't exists
if not os.path.exists(videos_folder):
os.makedirs(videos_folder)
clean_video_folder(videos_folder)
# Slow down by 2x for highlight video
slowdown = 2.0
current_framerate = get_framerate(video_path)
upper_frame_limit = get_number_of_frames(video_path)
for i, near_miss in enumerate(list_of_near_misses):
start_frame, end_frame, object_id1, object_id2 = near_miss
# Create a short video snippet of the near miss interaction
snippet_number = 2*i + 1
print([object_id1, object_id2])
# Create a short tracking video
snippet_path = os.path.join(videos_folder, temp_video_prefix + str(snippet_number) + '.avi')
create_trajectory_video(video_path, db_filename, homography_path, snippet_path, first_frame=max(0, start_frame-30), last_frame=min(upper_frame_limit, end_frame+30), objects_to_label=[object_id1, object_id2])
width, height = get_resolution(snippet_path)
# create title slide image
slide_name = temp_video_prefix + str(2*i)
slide_path = os.path.join(videos_folder, slide_name + '.png')
create_title_slide(width, height, slide_path, object_id1, object_id2)
# create title slide video with 5 second duration
num_frames = int(current_framerate * 5.0 / slowdown)
create_video_from_image(videos_folder, slide_name+'.png', slide_name+'.avi', current_framerate, num_frames)
files = get_list_of_files(videos_folder, temp_video_prefix, 'avi')
combine_videos(files, output_path)
convert_to_mp4(output_path, slowdown=slowdown)
clean_video_folder(videos_folder)
def create_test_config_video(project_path, video_path, output_path, db_path, first_frame, last_frame, video_type):
videos_folder = os.path.dirname(output_path)
homography_path = os.path.join(project_path, 'homography', 'homography.txt')
avi_output_path = convert_path_extension(output_path, 'avi')
if not os.path.exists(videos_folder):
os.makedirs(videos_folder)
create_trajectory_video(video_path, db_path, homography_path, avi_output_path, first_frame=first_frame, last_frame=last_frame, video_type=video_type)
convert_to_mp4(avi_output_path)
## Helpers -- Internal use
def get_video_writer(output_path, framerate, width, height):
codec = 'DIVX'
fourcc = cv2.cv.CV_FOURCC(*codec)
writer = cv2.VideoWriter(output_path, fourcc, framerate, (width, height), True)
return writer
def create_trajectory_video(video_path, db_filename, homography_path, output_path, first_frame=0, last_frame=None, video_type='object', objects_to_label=None, bounding_boxes=False):
'''
Creates a video of tracked trajectories.
video_path: a path to the video on which to overlay
db_filename: path to the database of tracked objects and features
homography_path: the path to the homography.txt of the project
output_path: The path of the video to be created. Please be sure that the output video format works on your system! (mp4 doesn't work on Windows AFAIK)
first_frame: frame to start at
last_frame: frame to end at
video_type: either 'object' or 'feature'
bounding_boxes: boolean indicating whether to show bounding boxes for objects
'''
capture = cv2.VideoCapture(video_path)
width = int(capture.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH))
height = int(capture.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT))
capture.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, first_frame)
frame_num = first_frame
framerate = get_framerate(video_path)
print('Loading objects, please wait...')
objects = loadTrajectoriesFromSqlite(db_filename, video_type, objectNumbers=objects_to_label, withFeatures=bounding_boxes)
homography = inv(loadtxt(homography_path))
ret = True
objectToDeleteIds = []
out = get_video_writer(output_path, framerate, width, height)
while ret:
if last_frame is not None and frame_num > last_frame:
break
ret, img = capture.read()
if ret:
if frame_num % 100 == 0:
print('frame {0}'.format(frame_num))
if len(objectToDeleteIds) > 0:
objects = [o for o in objects if o.getNum() not in objectToDeleteIds]
objectToDeleteIds = []
# plot objects
for obj in objects:
if obj.existsAtInstant(frame_num):
# Only draw for objects that should be labeled, if passed in
if objects_to_label is not None and obj.getNum() not in objects_to_label:
continue
if obj.getLastInstant() == frame_num:
objectToDeleteIds.append(obj.getNum())
# Project the positions with homography
if not hasattr(obj, 'projectedPositions'):
obj.projectedPositions = obj.positions.project(homography)
# Plot it's trajectory until now
cvPlot(img, obj.projectedPositions, colors[obj.getNum()], frame_num-obj.getFirstInstant())
# Plot the object's bounds if it has features
if obj.hasFeatures():
imgcrop, yCropMin, yCropMax, xCropMin, xCropMax = imageBox(img, obj, frame_num, homography, width, height)
cv2.rectangle(img, (xCropMin, yCropMin), (xCropMax, yCropMax), green, 1)
# Put object id and type if it's an object video
# If it's a feature video, there's too many numbers, let's ignore it.
if video_type == 'object':
objDescription = '{} '.format(obj.num)
if userTypeNames[obj.userType] != 'unknown':
objDescription += userTypeNames[obj.userType][0].upper()
cv2.putText(img, objDescription, obj.projectedPositions[frame_num-obj.getFirstInstant()].asint().astuple(), cv2.FONT_HERSHEY_PLAIN, 3, colors[obj.getNum()], thickness=4)
# Write image
out.write(img)
frame_num += 1
capture.release()
out.release()
cv2.destroyAllWindows()
def combine_videos(videos_list, output_path):
video_index = 0
if len(videos_list) == 0:
return
capture = cv2.VideoCapture(videos_list[video_index])
width = int(capture.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH))
height = int(capture.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT))
framerate = get_framerate(videos_list[video_index])
out = get_video_writer(output_path, framerate, width, height)
ret = True
while ret:
ret, frame = capture.read()
if not ret:
print("End of video: " + str(videos_list[video_index]))
video_index += 1
if video_index >= len(videos_list):
break
capture = cv2.VideoCapture(videos_list[video_index])
ret, frame = capture.read()
if ret:
out.write(frame)
capture.release()
out.release()
cv2.destroyAllWindows()
def create_video_from_image(folder, image_filename, video_filename, framerate, num_frames):
print('Creating video from image')
input_path = os.path.join(folder, image_filename)
output_path = os.path.join(folder, video_filename)
image = cv2.imread(input_path)
height, width, _ = image.shape
out = get_video_writer(output_path, framerate, width, height)
for i in range(num_frames):
out.write(image)
out.release()
cv2.destroyAllWindows()
def convert_to_mp4(video_path, slowdown=None):
new_video_path = convert_path_extension(video_path, 'mp4')
cmd = ["ffmpeg",
"-y",
"-i", video_path,
"-c:v", "libx264",
"-crf", "23",
"-preset", "veryfast",
"-c:a", "aac",
"-b:a", "128k",
"-vf", "scale=-2:720,format=yuv420p"]
if slowdown is not None:
cmd.extend(["-filter:v", "setpts={:0.1f}*PTS".format(slowdown)])
cmd.append(new_video_path)
subprocess.call(cmd)
## File helpers
def delete_files(folder, prefix="", extensions=[], excluded_files=[]):
if os.path.exists(folder):
for file in os.listdir(folder):
if file.startswith(prefix):
s = file.split('.')
has_extension = len(s) == 2
extension_included = True
# If it has an extension, don't delete it if it doesn't have an extension provided
if has_extension:
e = s[1]
if len(extensions) > 0 and e not in extensions:
extension_included = False
# If it has an extension that should be deleted (or extensions don't exist), and not in excluded files, delete it
if extension_included:
if not file in excluded_files:
os.remove(os.path.join(folder, file))
def get_list_of_files(folder, prefix, extension):
count = 0
files = []
while os.path.exists(os.path.join(folder, prefix+str(count)+"."+extension)):
files.append(os.path.join(folder, prefix+str(count)+"."+extension))
count += 1
return files
### Video Helpers
#### Video Metadata
def get_number_of_frames(videopath):
""" fast cmdline way to get # of frames taken from
http://stackoverflow.com/questions/2017843/fetch-frame-count-with-ffmpeg#comment-72336543
"""
if os.path.exists(videopath):
cmd = "ffmpeg -i %s -vcodec copy -acodec copy -f null /dev/null 2>&1 | grep -Eo 'frame= *[0-9]+ *' | grep -Eo '[0-9]+' | tail -1" % videopath
num = subprocess.check_output(cmd, shell=True)
return num
def get_framerate(videopath):
if not os.path.exists(videopath):
print("ERROR: filename %s was not found!" % videopath)
return -1
out = subprocess.check_output(["ffprobe",videopath,"-v","0","-select_streams","v","-print_format","flat","-show_entries","stream=r_frame_rate"])
rate = out.split('=')[1].strip().strip('"').split('/')
if len(rate)==1:
return float(rate[0])
if len(rate)==2:
return float(rate[0])/float(rate[1])
return -1
def get_resolution(videopath):
"""
Returns
-------
(width, height) in number of pixels
"""
capture = cv2.VideoCapture(videopath)
width = int(capture.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH))
height = int(capture.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT))
return (width, height)
#### Video Creation
def create_title_slide(width, height, save_path, object_id1, object_id2, fontsize=None, textcolor='#FFFFFF', facecolor='#000000'):
"""
width: int, width in pixels of desired output
height: int, pixel height of desired output
Note: fontsize varies with the resolution of the video in a very dumb manner right now.
If you want to control fontsize, provide it as a parameter
"""
# heights and widths in matplotlib are units of inches, not pixels
dpi = 100.0
mpl_width = width / dpi
mpl_height = height / dpi
# make figure without frame
fig = plt.figure(frameon=False)
fig.set_size_inches(mpl_width, mpl_height)
ax = fig.add_subplot(111)
# hide axis
ax.set_axis_off()
# set your axis size
ax.axis([0, mpl_width, 0, mpl_height])
# 20 is a good font size for a 400 height image...
# 40 is a good font size for a 800 height image
if not fontsize:
fontsize = height / 20
ax.text(0.2*mpl_width, 0.75*mpl_height, 'Near Miss between', style='italic', fontsize=fontsize, color=textcolor)
ax.text(0.2*mpl_width, 0.6*mpl_height, 'Object {}'.format(object_id1), fontsize=fontsize, color=textcolor)
ax.text(0.2*mpl_width, 0.4*mpl_height, 'Object {}'.format(object_id2), fontsize=fontsize, color=textcolor)
fig.savefig(save_path, dpi=dpi, bbox_inches=0, pad_inches=0, facecolor=facecolor)
|
|
#!/usr/bin/env python3
# -*- Coding: UTF-8 -*-
# ---------------------------------------------------------------------------
# Open Asset Import Library (ASSIMP)
# ---------------------------------------------------------------------------
#
# Copyright (c) 2006-2016, ASSIMP Development Team
#
# All rights reserved.
#
# Redistribution and use of this software in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the
# following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other
# materials provided with the distribution.
#
# * Neither the name of the ASSIMP team, nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior
# written permission of the ASSIMP Development Team.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ---------------------------------------------------------------------------
"""Generate BlenderSceneGen.h and BlenderScene.cpp from the
data structures in BlenderScene.h to map from *any* DNA to
*our* DNA"""
import sys
import os
import re
inputfile = os.path.join("..","..","code","BlenderScene.h")
outputfile_gen = os.path.join("..","..","code","BlenderSceneGen.h")
outputfile_src = os.path.join("..","..","code","BlenderScene.cpp")
template_gen = "BlenderSceneGen.h.template"
template_src = "BlenderScene.cpp.template"
# workaround for stackoverflowing when reading the linked list of scene objects
# with the usual approach. See embedded notes for details.
Structure_Convert_Base_fullcode = """
template <> void Structure::Convert<Base>( Base& dest, const FileDatabase& db ) const {
// note: as per https://github.com/assimp/assimp/issues/128,
// reading the Object linked list recursively is prone to stack overflow.
// This structure converter is therefore an hand-written exception that
// does it iteratively.
const int initial_pos = db.reader->GetCurrentPos();
std::pair<Base*, int> todo = std::make_pair(&dest, initial_pos);
Base* saved_prev = NULL;
while(true) {
Base& cur_dest = *todo.first;
db.reader->SetCurrentPos(todo.second);
// we know that this is a double-linked, circular list which we never
// traverse backwards, so don't bother resolving the back links.
cur_dest.prev = NULL;
ReadFieldPtr<ErrorPolicy_Warn>(cur_dest.object,"*object",db);
// just record the offset of the blob data and allocate storage.
// Does _not_ invoke Convert() recursively.
const int old = db.reader->GetCurrentPos();
// the return value of ReadFieldPtr indicates whether the object
// was already cached. In this case, we don't need to resolve
// it again.
if(!ReadFieldPtr<ErrorPolicy_Warn>(cur_dest.next,"*next",db, true) && cur_dest.next) {
todo = std::make_pair(&*cur_dest.next, db.reader->GetCurrentPos());
continue;
}
break;
}
db.reader->SetCurrentPos(initial_pos + size);
}
"""
Structure_Convert_decl = """
template <> void Structure :: Convert<{a}> (
{a}& dest,
const FileDatabase& db
) const
"""
Structure_Convert_ptrdecl = """
ReadFieldPtr<{policy}>({destcast}dest.{name_canonical},"{name_dna}",db);"""
Structure_Convert_rawptrdecl = """
{{
boost::shared_ptr<{type}> {name_canonical};
ReadFieldPtr<{policy}>({destcast}{name_canonical},"{name_dna}",db);
dest.{name_canonical} = {name_canonical}.get();
}}"""
Structure_Convert_arraydecl = """
ReadFieldArray<{policy}>({destcast}dest.{name_canonical},"{name_dna}",db);"""
Structure_Convert_arraydecl2d = """
ReadFieldArray2<{policy}>({destcast}dest.{name_canonical},"{name_dna}",db);"""
Structure_Convert_normal = """
ReadField<{policy}>({destcast}dest.{name_canonical},"{name_dna}",db);"""
DNA_RegisterConverters_decl = """
void DNA::RegisterConverters() """
DNA_RegisterConverters_add = """
converters["{a}"] = DNA::FactoryPair( &Structure::Allocate<{a}>, &Structure::Convert<{a}> );"""
map_policy = {
"" : "ErrorPolicy_Igno"
,"IGNO" : "ErrorPolicy_Igno"
,"WARN" : "ErrorPolicy_Warn"
,"FAIL" : "ErrorPolicy_Fail"
}
#
def main():
# -----------------------------------------------------------------------
# Parse structure definitions from BlenderScene.h
input = open(inputfile,"rt").read()
#flags = re.ASCII|re.DOTALL|re.MULTILINE
flags = re.DOTALL|re.MULTILINE
#stripcoms = re.compile(r"/\*(.*?)*\/",flags)
getstruct = re.compile(r"struct\s+(\w+?)\s*(:\s*ElemBase)?\s*\{(.*?)^\}\s*;",flags)
getsmartx = re.compile(r"(std\s*::\s*)?(vector)\s*<\s*(boost\s*::\s*)?shared_(ptr)\s*<\s*(\w+)\s*>\s*>\s*",flags)
getsmartp = re.compile(r"(boost\s*::\s*)?shared_(ptr)\s*<\s*(\w+)\s*>\s*",flags)
getrawp = re.compile(r"(\w+)\s*\*\s*",flags)
getsmarta = re.compile(r"(std\s*::\s*)?(vector)\s*<\s*(\w+)\s*>\s*",flags)
getpolicy = re.compile(r"\s*(WARN|FAIL|IGNO)",flags)
stripenum = re.compile(r"enum\s+(\w+)\s*{.*?\}\s*;",flags)
assert getsmartx and getsmartp and getsmarta and getrawp and getpolicy and stripenum
enums = set()
#re.sub(stripcoms," ",input)
#print(input)
hits = {}
while 1:
match = re.search(getstruct,input)
if match is None:
break
tmp = match.groups()[2]
while 1:
match2 = re.search(stripenum,tmp)
if match2 is None:
break
tmp = tmp[match2.end():]
enums.add(match2.groups()[0])
hits[match.groups()[0]] = list(
filter(lambda x:x[:2] != "//" and len(x),
map(str.strip,
re.sub(stripenum," ",match.groups()[2]).split(";")
)))
input = input[match.end():]
for e in enums:
print("Enum: "+e)
for k,v in hits.items():
out = []
for line in v:
policy = "IGNO"
py = re.search(getpolicy,line)
if not py is None:
policy = py.groups()[0]
line = re.sub(getpolicy,"",line)
ty = re.match(getsmartx,line) or re.match(getsmartp,line) or\
re.match(getsmarta,line) or re.match(getrawp,line)
if ty is None:
ty = line.split(None,1)[0]
else:
if len(ty.groups()) == 1:
ty = ty.groups()[-1] + "$"
elif ty.groups()[1] == "ptr":
ty = ty.groups()[2] + "*"
elif ty.groups()[1] == "vector":
ty = ty.groups()[-1] + ("*" if len(ty.groups()) == 3 else "**")
else:
assert False
#print(line)
sp = line.split(',')
out.append((ty,sp[0].split(None)[-1].strip(),policy))
for m in sp[1:]:
out.append((ty,m.strip(),policy))
v[:] = out
print("Structure {0}".format(k))
for elem in out:
print("\t"+"\t".join(elem))
print("")
output = open(outputfile_gen,"wt")
templt = open(template_gen,"rt").read()
s = ""
# -----------------------------------------------------------------------
# Structure::Convert<T> declarations for all supported structures
for k,v in hits.items():
s += Structure_Convert_decl.format(a=k)+";\n";
output.write(templt.replace("<HERE>",s))
output = open(outputfile_src,"wt")
templt = open(template_src,"rt").read()
s = ""
# -----------------------------------------------------------------------
# Structure::Convert<T> definitions for all supported structures
for k,v in hits.items():
s += "//" + "-"*80
if k == 'Base':
s += Structure_Convert_Base_fullcode
continue
s += Structure_Convert_decl.format(a=k)+ "{ \n";
for type, name, policy in v:
splits = name.split("[",1)
name_canonical = splits[0]
#array_part = "" if len(splits)==1 else "["+splits[1]
is_raw_ptr = not not type.count("$")
ptr_decl = "*"*(type.count("*") + (1 if is_raw_ptr else 0))
name_dna = ptr_decl+name_canonical #+array_part
#required = "false"
policy = map_policy[policy]
destcast = "(int&)" if type in enums else ""
# POINTER
if is_raw_ptr:
type = type.replace('$','')
s += Structure_Convert_rawptrdecl.format(**locals())
elif ptr_decl:
s += Structure_Convert_ptrdecl.format(**locals())
# ARRAY MEMBER
elif name.count('[')==1:
s += Structure_Convert_arraydecl.format(**locals())
elif name.count('[')==2:
s += Structure_Convert_arraydecl2d.format(**locals())
# NORMAL MEMBER
else:
s += Structure_Convert_normal.format(**locals())
s += "\n\n\tdb.reader->IncPtr(size);\n}\n\n"
# -----------------------------------------------------------------------
# DNA::RegisterConverters - collect all available converter functions
# in a std::map<name,converter_proc>
#s += "#if 0\n"
s += "//" + "-"*80 + DNA_RegisterConverters_decl + "{\n"
for k,v in hits.items():
s += DNA_RegisterConverters_add.format(a=k)
s += "\n}\n"
#s += "#endif\n"
output.write(templt.replace("<HERE>",s))
# we got here, so no error
return 0
if __name__ == "__main__":
sys.exit(main())
|
|
from __future__ import absolute_import
from django import forms
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from uuid import uuid1
from sentry.models import (
AuditLogEntry, AuditLogEntryEvent, OrganizationMemberType, Project, Team
)
from sentry.permissions import can_remove_project, can_set_public_projects
from sentry.plugins import plugins
from sentry.web.forms.fields import (
CustomTypedChoiceField, RangeField, OriginsField
)
from sentry.web.frontend.base import ProjectView
BLANK_CHOICE = [("", "")]
class EditProjectForm(forms.ModelForm):
name = forms.CharField(label=_('Project Name'), max_length=200,
widget=forms.TextInput(attrs={'placeholder': _('Production')}))
platform = forms.ChoiceField(choices=Project._meta.get_field('platform').get_choices(blank_choice=BLANK_CHOICE),
widget=forms.Select(attrs={'data-placeholder': _('Select a platform')}))
public = forms.BooleanField(required=False,
help_text=_('Imply public access to any event for this project.'))
team = CustomTypedChoiceField(choices=(), coerce=int, required=False)
origins = OriginsField(label=_('Allowed Domains'), required=False,
help_text=_('Separate multiple entries with a newline.'))
token = forms.CharField(label=_('Security token'), required=True,
help_text=_('Outbound requests matching Allowed Domains will have the header "X-Sentry-Token: {token}" appended.'))
resolve_age = RangeField(help_text=_('Treat an event as resolved if it hasn\'t been seen for this amount of time.'),
required=False, min_value=0, max_value=168, step_value=1)
scrub_data = forms.BooleanField(
label=_('Data Scrubber'),
help_text=_('Apply server-side data scrubbing to prevent things like passwords and credit cards from being stored.'),
required=False
)
sensitive_fields = forms.CharField(
label=_('Additional sensitive fields'),
help_text=_('Additional field names to match against when scrubbing data. Separate multiple entries with a newline.'),
widget=forms.Textarea(attrs={
'placeholder': mark_safe(_('e.g. email')),
'class': 'span8',
'rows': '3',
}),
required=False,
)
scrub_ip_address = forms.BooleanField(
label=_('Don\'t store IP Addresses'),
help_text=_('Prevent IP addresses from being stored for new events.'),
required=False
)
class Meta:
fields = ('name', 'platform', 'public', 'team', 'slug')
model = Project
def __init__(self, request, organization, team_list, data, instance, *args, **kwargs):
super(EditProjectForm, self).__init__(data=data, instance=instance, *args, **kwargs)
self.organization = organization
self.team_list = team_list
if not can_set_public_projects(request.user):
del self.fields['public']
self.fields['team'].choices = self.get_team_choices(team_list, instance.team)
self.fields['team'].widget.choices = self.fields['team'].choices
def get_team_label(self, team):
return '%s (%s)' % (team.name, team.slug)
def get_team_choices(self, team_list, default=None):
sorted_team_list = sorted(team_list, key=lambda x: x.name)
choices = []
for team in sorted_team_list:
# TODO: optimize queries
choices.append(
(team.id, self.get_team_label(team))
)
if default is None:
choices.insert(0, (-1, mark_safe('–' * 8)))
elif default not in sorted_team_list:
choices.insert(0, (default.id, self.get_team_label(default)))
return choices
def clean_sensitive_fields(self):
value = self.cleaned_data.get('sensitive_fields')
if not value:
return
return filter(bool, (v.lower().strip() for v in value.split('\n')))
def clean_team(self):
value = self.cleaned_data.get('team')
if not value:
return
# TODO: why is this not already an int?
value = int(value)
if value == -1:
return
if self.instance.team and value == self.instance.team.id:
return self.instance.team
for team in self.team_list:
if value == team.id:
return team
raise forms.ValidationError('Unable to find chosen team')
def clean_slug(self):
slug = self.cleaned_data.get('slug')
if not slug:
return
exists_qs = Project.objects.filter(
slug=slug,
organization=self.organization
).exclude(id=self.instance.id)
if exists_qs.exists():
raise forms.ValidationError('Another project is already using that slug')
return slug
class ProjectSettingsView(ProjectView):
required_access = OrganizationMemberType.ADMIN
def get_default_context(self, request, **kwargs):
context = super(ProjectSettingsView, self).get_default_context(request, **kwargs)
context.update({
'can_remove_project': can_remove_project(request.user, kwargs['project']),
})
return context
def has_permission(self, request, organization, team, project):
if project is None:
return False
if request.user.is_superuser:
return True
result = plugins.first('has_perm', request.user, 'edit_project', project)
if result is False:
return False
return True
def get_form(self, request, project):
organization = project.organization
if request.user.is_superuser:
accessing_user = organization.owner
else:
accessing_user = request.user
team_list = Team.objects.get_for_user(
organization=organization,
user=accessing_user,
access=OrganizationMemberType.ADMIN,
)
# TODO(dcramer): this update should happen within a lock
security_token = project.get_option('sentry:token', None)
if security_token is None:
security_token = uuid1().hex
project.update_option('sentry:token', security_token)
return EditProjectForm(
request, organization, team_list, request.POST or None,
instance=project, initial={
'origins': '\n'.join(project.get_option('sentry:origins', None) or []),
'token': security_token,
'resolve_age': int(project.get_option('sentry:resolve_age', 0)),
'scrub_data': bool(project.get_option('sentry:scrub_data', True)),
'sensitive_fields': '\n'.join(project.get_option('sentry:sensitive_fields', None) or []),
'scrub_ip_address': bool(project.get_option('sentry:scrub_ip_address', False)),
},
)
def handle(self, request, organization, team, project):
form = self.get_form(request, project)
if form.is_valid():
project = form.save()
for opt in ('origins', 'resolve_age', 'scrub_data', 'sensitive_fields',
'scrub_ip_address', 'token'):
value = form.cleaned_data.get(opt)
if value is None:
project.delete_option('sentry:%s' % (opt,))
else:
project.update_option('sentry:%s' % (opt,), value)
AuditLogEntry.objects.create(
organization=organization,
actor=request.user,
ip_address=request.META['REMOTE_ADDR'],
target_object=project.id,
event=AuditLogEntryEvent.PROJECT_EDIT,
data=project.get_audit_log_data(),
)
messages.add_message(
request, messages.SUCCESS,
_('Changes to your project were saved.'))
redirect = reverse('sentry-manage-project', args=[project.organization.slug, project.slug])
return HttpResponseRedirect(redirect)
context = {
'form': form,
'page': 'details',
}
return self.respond('sentry/projects/manage.html', context)
|
|
import ast
import os
import re
from ate import exception, utils
variable_regexp = r"\$([\w_]+)"
function_regexp = r"\$\{[\w_]+\([\$\w_ =,]*\)\}"
function_regexp_compile = re.compile(r"^\$\{([\w_]+)\(([\$\w_ =,]*)\)\}$")
def extract_variables(content):
""" extract all variable names from content, which is in format $variable
@param (str) content
@return (list) variable name list
e.g. $variable => ["variable"]
/blog/$postid => ["postid"]
/$var1/$var2 => ["var1", "var2"]
abc => []
"""
try:
return re.findall(variable_regexp, content)
except TypeError:
return []
def extract_functions(content):
""" extract all functions from string content, which are in format ${fun()}
@param (str) content
@return (list) functions list
e.g. ${func(5)} => ["${func(5)}"]
${func(a=1, b=2)} => ["${func(a=1, b=2)}"]
/api/1000?_t=${get_timestamp()} => ["get_timestamp()"]
/api/${add(1, 2)} => ["add(1, 2)"]
"/api/${add(1, 2)}?_t=${get_timestamp()}" => ["${add(1, 2)}", "${get_timestamp()}"]
"""
try:
return re.findall(function_regexp, content)
except TypeError:
return []
def parse_string_value(str_value):
""" parse string to number if possible
e.g. "123" => 123
"12.2" => 12.3
"abc" => "abc"
"$var" => "$var"
"""
try:
return ast.literal_eval(str_value)
except ValueError:
return str_value
except SyntaxError:
# e.g. $var, ${func}
return str_value
def parse_function(content):
""" parse function name and args from string content.
@param (str) content
@return (dict) function name and args
e.g. ${func()} => {'func_name': 'func', 'args': [], 'kwargs': {}}
${func(5)} => {'func_name': 'func', 'args': [5], 'kwargs': {}}
${func(1, 2)} => {'func_name': 'func', 'args': [1, 2], 'kwargs': {}}
${func(a=1, b=2)} => {'func_name': 'func', 'args': [], 'kwargs': {'a': 1, 'b': 2}}
${func(1, 2, a=3, b=4)} => {'func_name': 'func', 'args': [1, 2], 'kwargs': {'a':3, 'b':4}}
"""
function_meta = {
"args": [],
"kwargs": {}
}
matched = function_regexp_compile.match(content)
function_meta["func_name"] = matched.group(1)
args_str = matched.group(2).replace(" ", "")
if args_str == "":
return function_meta
args_list = args_str.split(',')
for arg in args_list:
if '=' in arg:
key, value = arg.split('=')
function_meta["kwargs"][key] = parse_string_value(value)
else:
function_meta["args"].append(parse_string_value(arg))
return function_meta
class TestcaseParser(object):
def __init__(self, variables_binds={}, functions_binds={}, file_path=None):
self.bind_variables(variables_binds)
self.bind_functions(functions_binds)
self.file_path = file_path
def bind_variables(self, variables_binds):
""" bind variables to current testcase parser
@param (dict) variables_binds, variables binds mapping
{
"authorization": "a83de0ff8d2e896dbd8efb81ba14e17d",
"random": "A2dEx",
"data": {"name": "user", "password": "123456"},
"uuid": 1000
}
"""
self.variables_binds = variables_binds
def bind_functions(self, functions_binds):
""" bind functions to current testcase parser
@param (dict) functions_binds, functions binds mapping
{
"add_two_nums": lambda a, b=1: a + b
}
"""
self.functions_binds = functions_binds
def get_bind_item(self, item_type, item_name):
if item_type == "function":
if item_name in self.functions_binds:
return self.functions_binds[item_name]
elif item_type == "variable":
if item_name in self.variables_binds:
return self.variables_binds[item_name]
else:
raise exception.ParamsError("bind item should only be function or variable.")
try:
assert self.file_path is not None
return utils.search_conf_item(self.file_path, item_type, item_name)
except (AssertionError, exception.FunctionNotFound):
raise exception.ParamsError(
"{} is not defined in bind {}s!".format(item_name, item_type))
def eval_content_functions(self, content):
functions_list = extract_functions(content)
for func_content in functions_list:
function_meta = parse_function(func_content)
func_name = function_meta['func_name']
func = self.get_bind_item("function", func_name)
args = function_meta.get('args', [])
kwargs = function_meta.get('kwargs', {})
args = self.parse_content_with_bindings(args)
kwargs = self.parse_content_with_bindings(kwargs)
eval_value = func(*args, **kwargs)
if func_content == content:
# content is a variable
content = eval_value
else:
# content contains one or many variables
content = content.replace(
func_content,
str(eval_value), 1
)
return content
def eval_content_variables(self, content):
""" replace all variables of string content with mapping value.
@param (str) content
@return (str) parsed content
e.g.
variable_mapping = {
"var_1": "abc",
"var_2": "def"
}
$var_1 => "abc"
$var_1#XYZ => "abc#XYZ"
/$var_1/$var_2/var3 => "/abc/def/var3"
${func($var_1, $var_2, xyz)} => "${func(abc, def, xyz)}"
"""
variables_list = extract_variables(content)
for variable_name in variables_list:
variable_value = self.get_bind_item("variable", variable_name)
if "${}".format(variable_name) == content:
# content is a variable
content = variable_value
else:
# content contains one or many variables
content = content.replace(
"${}".format(variable_name),
str(variable_value), 1
)
return content
def parse_content_with_bindings(self, content):
""" parse content recursively, each variable and function in content will be evaluated.
@param (dict) content in any data structure
{
"url": "http://127.0.0.1:5000/api/users/$uid/${add_two_nums(1, 1)}",
"method": "POST",
"headers": {
"Content-Type": "application/json",
"authorization": "$authorization",
"random": "$random",
"sum": "${add_two_nums(1, 2)}"
},
"body": "$data"
}
@return (dict) parsed content with evaluated bind values
{
"url": "http://127.0.0.1:5000/api/users/1000/2",
"method": "POST",
"headers": {
"Content-Type": "application/json",
"authorization": "a83de0ff8d2e896dbd8efb81ba14e17d",
"random": "A2dEx",
"sum": 3
},
"body": {"name": "user", "password": "123456"}
}
"""
if isinstance(content, (list, tuple)):
return [
self.parse_content_with_bindings(item)
for item in content
]
if isinstance(content, dict):
evaluated_data = {}
for key, value in content.items():
eval_key = self.parse_content_with_bindings(key)
eval_value = self.parse_content_with_bindings(value)
evaluated_data[eval_key] = eval_value
return evaluated_data
if isinstance(content, (int, utils.long_type, float, complex)):
return content
# content is in string format here
content = "" if content is None else content.strip()
# replace functions with evaluated value
# Notice: eval_content_functions must be called before eval_content_variables
content = self.eval_content_functions(content)
# replace variables with binding value
content = self.eval_content_variables(content)
return content
|
|
#!/usr/bin/env python
#
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Upload and download support for apitools."""
from __future__ import print_function
import email.generator as email_generator
import email.mime.multipart as mime_multipart
import email.mime.nonmultipart as mime_nonmultipart
import io
import json
import mimetypes
import os
import threading
import six
from six.moves import http_client
from apitools.base.py import buffered_stream
from apitools.base.py import compression
from apitools.base.py import exceptions
from apitools.base.py import http_wrapper
from apitools.base.py import stream_slice
from apitools.base.py import util
__all__ = [
'Download',
'Upload',
'RESUMABLE_UPLOAD',
'SIMPLE_UPLOAD',
'DownloadProgressPrinter',
'DownloadCompletePrinter',
'UploadProgressPrinter',
'UploadCompletePrinter',
]
_RESUMABLE_UPLOAD_THRESHOLD = 5 << 20
SIMPLE_UPLOAD = 'simple'
RESUMABLE_UPLOAD = 'resumable'
def DownloadProgressPrinter(response, unused_download):
"""Print download progress based on response."""
if 'content-range' in response.info:
print('Received %s' % response.info['content-range'])
else:
print('Received %d bytes' % response.length)
def DownloadCompletePrinter(unused_response, unused_download):
"""Print information about a completed download."""
print('Download complete')
def UploadProgressPrinter(response, unused_upload):
"""Print upload progress based on response."""
print('Sent %s' % response.info['range'])
def UploadCompletePrinter(unused_response, unused_upload):
"""Print information about a completed upload."""
print('Upload complete')
class _Transfer(object):
"""Generic bits common to Uploads and Downloads."""
def __init__(self, stream, close_stream=False, chunksize=None,
auto_transfer=True, http=None, num_retries=5):
self.__bytes_http = None
self.__close_stream = close_stream
self.__http = http
self.__stream = stream
self.__url = None
self.__num_retries = 5
# Let the @property do validation
self.num_retries = num_retries
self.retry_func = (
http_wrapper.HandleExceptionsAndRebuildHttpConnections)
self.auto_transfer = auto_transfer
self.chunksize = chunksize or 1048576
def __repr__(self):
return str(self)
@property
def close_stream(self):
return self.__close_stream
@property
def http(self):
return self.__http
@property
def bytes_http(self):
return self.__bytes_http or self.http
@bytes_http.setter
def bytes_http(self, value):
self.__bytes_http = value
@property
def num_retries(self):
return self.__num_retries
@num_retries.setter
def num_retries(self, value):
util.Typecheck(value, six.integer_types)
if value < 0:
raise exceptions.InvalidDataError(
'Cannot have negative value for num_retries')
self.__num_retries = value
@property
def stream(self):
return self.__stream
@property
def url(self):
return self.__url
def _Initialize(self, http, url):
"""Initialize this download by setting self.http and self.url.
We want the user to be able to override self.http by having set
the value in the constructor; in that case, we ignore the provided
http.
Args:
http: An httplib2.Http instance or None.
url: The url for this transfer.
Returns:
None. Initializes self.
"""
self.EnsureUninitialized()
if self.http is None:
self.__http = http or http_wrapper.GetHttp()
self.__url = url
@property
def initialized(self):
return self.url is not None and self.http is not None
@property
def _type_name(self):
return type(self).__name__
def EnsureInitialized(self):
if not self.initialized:
raise exceptions.TransferInvalidError(
'Cannot use uninitialized %s' % self._type_name)
def EnsureUninitialized(self):
if self.initialized:
raise exceptions.TransferInvalidError(
'Cannot re-initialize %s' % self._type_name)
def __del__(self):
if self.__close_stream:
self.__stream.close()
def _ExecuteCallback(self, callback, response):
# TODO(craigcitro): Push these into a queue.
if callback is not None:
threading.Thread(target=callback, args=(response, self)).start()
class Download(_Transfer):
"""Data for a single download.
Public attributes:
chunksize: default chunksize to use for transfers.
"""
_ACCEPTABLE_STATUSES = set((
http_client.OK,
http_client.NO_CONTENT,
http_client.PARTIAL_CONTENT,
http_client.REQUESTED_RANGE_NOT_SATISFIABLE,
))
_REQUIRED_SERIALIZATION_KEYS = set((
'auto_transfer', 'progress', 'total_size', 'url'))
def __init__(self, stream, progress_callback=None, finish_callback=None,
**kwds):
total_size = kwds.pop('total_size', None)
super(Download, self).__init__(stream, **kwds)
self.__initial_response = None
self.__progress = 0
self.__total_size = total_size
self.__encoding = None
self.progress_callback = progress_callback
self.finish_callback = finish_callback
@property
def progress(self):
return self.__progress
@property
def encoding(self):
return self.__encoding
@classmethod
def FromFile(cls, filename, overwrite=False, auto_transfer=True, **kwds):
"""Create a new download object from a filename."""
path = os.path.expanduser(filename)
if os.path.exists(path) and not overwrite:
raise exceptions.InvalidUserInputError(
'File %s exists and overwrite not specified' % path)
return cls(open(path, 'wb'), close_stream=True,
auto_transfer=auto_transfer, **kwds)
@classmethod
def FromStream(cls, stream, auto_transfer=True, total_size=None, **kwds):
"""Create a new Download object from a stream."""
return cls(stream, auto_transfer=auto_transfer, total_size=total_size,
**kwds)
@classmethod
def FromData(cls, stream, json_data, http=None, auto_transfer=None,
client=None, **kwds):
"""Create a new Download object from a stream and serialized data."""
info = json.loads(json_data)
missing_keys = cls._REQUIRED_SERIALIZATION_KEYS - set(info.keys())
if missing_keys:
raise exceptions.InvalidDataError(
'Invalid serialization data, missing keys: %s' % (
', '.join(missing_keys)))
download = cls.FromStream(stream, **kwds)
if auto_transfer is not None:
download.auto_transfer = auto_transfer
else:
download.auto_transfer = info['auto_transfer']
if client is not None:
url = client.FinalizeTransferUrl(info['url'])
else:
url = info['url']
setattr(download, '_Download__progress', info['progress'])
setattr(download, '_Download__total_size', info['total_size'])
download._Initialize( # pylint: disable=protected-access
http, url)
return download
@property
def serialization_data(self):
self.EnsureInitialized()
return {
'auto_transfer': self.auto_transfer,
'progress': self.progress,
'total_size': self.total_size,
'url': self.url,
}
@property
def total_size(self):
return self.__total_size
def __str__(self):
if not self.initialized:
return 'Download (uninitialized)'
return 'Download with %d/%s bytes transferred from url %s' % (
self.progress, self.total_size, self.url)
def ConfigureRequest(self, http_request, url_builder):
url_builder.query_params['alt'] = 'media'
# TODO(craigcitro): We need to send range requests because by
# default httplib2 stores entire reponses in memory. Override
# httplib2's download method (as gsutil does) so that this is not
# necessary.
http_request.headers['Range'] = 'bytes=0-%d' % (self.chunksize - 1,)
def __SetTotal(self, info):
"""Sets the total size based off info if possible otherwise 0."""
if 'content-range' in info:
_, _, total = info['content-range'].rpartition('/')
if total != '*':
self.__total_size = int(total)
# Note "total_size is None" means we don't know it; if no size
# info was returned on our initial range request, that means we
# have a 0-byte file. (That last statement has been verified
# empirically, but is not clearly documented anywhere.)
if self.total_size is None:
self.__total_size = 0
def InitializeDownload(self, http_request, http=None, client=None):
"""Initialize this download by making a request.
Args:
http_request: The HttpRequest to use to initialize this download.
http: The httplib2.Http instance for this request.
client: If provided, let this client process the final URL before
sending any additional requests. If client is provided and
http is not, client.http will be used instead.
"""
self.EnsureUninitialized()
if http is None and client is None:
raise exceptions.UserError('Must provide client or http.')
http = http or client.http
if client is not None:
http_request.url = client.FinalizeTransferUrl(http_request.url)
url = http_request.url
if self.auto_transfer:
end_byte = self.__ComputeEndByte(0)
self.__SetRangeHeader(http_request, 0, end_byte)
response = http_wrapper.MakeRequest(
self.bytes_http or http, http_request)
if response.status_code not in self._ACCEPTABLE_STATUSES:
raise exceptions.HttpError.FromResponse(response)
self.__initial_response = response
self.__SetTotal(response.info)
url = response.info.get('content-location', response.request_url)
if client is not None:
url = client.FinalizeTransferUrl(url)
self._Initialize(http, url)
# Unless the user has requested otherwise, we want to just
# go ahead and pump the bytes now.
if self.auto_transfer:
self.StreamInChunks()
def __NormalizeStartEnd(self, start, end=None):
"""Normalizes start and end values based on total size."""
if end is not None:
if start < 0:
raise exceptions.TransferInvalidError(
'Cannot have end index with negative start index ' +
'[start=%d, end=%d]' % (start, end))
elif start >= self.total_size:
raise exceptions.TransferInvalidError(
'Cannot have start index greater than total size ' +
'[start=%d, total_size=%d]' % (start, self.total_size))
end = min(end, self.total_size - 1)
if end < start:
raise exceptions.TransferInvalidError(
'Range requested with end[%s] < start[%s]' % (end, start))
return start, end
else:
if start < 0:
start = max(0, start + self.total_size)
return start, self.total_size - 1
def __SetRangeHeader(self, request, start, end=None):
if start < 0:
request.headers['range'] = 'bytes=%d' % start
elif end is None or end < start:
request.headers['range'] = 'bytes=%d-' % start
else:
request.headers['range'] = 'bytes=%d-%d' % (start, end)
def __ComputeEndByte(self, start, end=None, use_chunks=True):
"""Compute the last byte to fetch for this request.
This is all based on the HTTP spec for Range and
Content-Range.
Note that this is potentially confusing in several ways:
* the value for the last byte is 0-based, eg "fetch 10 bytes
from the beginning" would return 9 here.
* if we have no information about size, and don't want to
use the chunksize, we'll return None.
See the tests for more examples.
Args:
start: byte to start at.
end: (int or None, default: None) Suggested last byte.
use_chunks: (bool, default: True) If False, ignore self.chunksize.
Returns:
Last byte to use in a Range header, or None.
"""
end_byte = end
if start < 0 and not self.total_size:
return end_byte
if use_chunks:
alternate = start + self.chunksize - 1
if end_byte is not None:
end_byte = min(end_byte, alternate)
else:
end_byte = alternate
if self.total_size:
alternate = self.total_size - 1
if end_byte is not None:
end_byte = min(end_byte, alternate)
else:
end_byte = alternate
return end_byte
def __GetChunk(self, start, end, additional_headers=None):
"""Retrieve a chunk, and return the full response."""
self.EnsureInitialized()
request = http_wrapper.Request(url=self.url)
self.__SetRangeHeader(request, start, end=end)
if additional_headers is not None:
request.headers.update(additional_headers)
return http_wrapper.MakeRequest(
self.bytes_http, request, retry_func=self.retry_func,
retries=self.num_retries)
def __ProcessResponse(self, response):
"""Process response (by updating self and writing to self.stream)."""
if response.status_code not in self._ACCEPTABLE_STATUSES:
# We distinguish errors that mean we made a mistake in setting
# up the transfer versus something we should attempt again.
if response.status_code in (http_client.FORBIDDEN,
http_client.NOT_FOUND):
raise exceptions.HttpError.FromResponse(response)
else:
raise exceptions.TransferRetryError(response.content)
if response.status_code in (http_client.OK,
http_client.PARTIAL_CONTENT):
try:
self.stream.write(six.ensure_binary(response.content))
except TypeError:
self.stream.write(six.ensure_text(response.content))
self.__progress += response.length
if response.info and 'content-encoding' in response.info:
# TODO(craigcitro): Handle the case where this changes over a
# download.
self.__encoding = response.info['content-encoding']
elif response.status_code == http_client.NO_CONTENT:
# It's important to write something to the stream for the case
# of a 0-byte download to a file, as otherwise python won't
# create the file.
self.stream.write('')
return response
def GetRange(self, start, end=None, additional_headers=None,
use_chunks=True):
"""Retrieve a given byte range from this download, inclusive.
Range must be of one of these three forms:
* 0 <= start, end = None: Fetch from start to the end of the file.
* 0 <= start <= end: Fetch the bytes from start to end.
* start < 0, end = None: Fetch the last -start bytes of the file.
(These variations correspond to those described in the HTTP 1.1
protocol for range headers in RFC 2616, sec. 14.35.1.)
Args:
start: (int) Where to start fetching bytes. (See above.)
end: (int, optional) Where to stop fetching bytes. (See above.)
additional_headers: (bool, optional) Any additional headers to
pass with the request.
use_chunks: (bool, default: True) If False, ignore self.chunksize
and fetch this range in a single request.
Returns:
None. Streams bytes into self.stream.
"""
self.EnsureInitialized()
progress_end_normalized = False
if self.total_size is not None:
progress, end_byte = self.__NormalizeStartEnd(start, end)
progress_end_normalized = True
else:
progress = start
end_byte = end
while (not progress_end_normalized or end_byte is None or
progress <= end_byte):
end_byte = self.__ComputeEndByte(progress, end=end_byte,
use_chunks=use_chunks)
response = self.__GetChunk(progress, end_byte,
additional_headers=additional_headers)
if not progress_end_normalized:
self.__SetTotal(response.info)
progress, end_byte = self.__NormalizeStartEnd(start, end)
progress_end_normalized = True
response = self.__ProcessResponse(response)
progress += response.length
if response.length == 0:
if response.status_code == http_client.OK:
# There can legitimately be no Content-Length header sent
# in some cases (e.g., when there's a Transfer-Encoding
# header) and if this was a 200 response (as opposed to
# 206 Partial Content) we know we're done now without
# looping further on received length.
return
raise exceptions.TransferRetryError(
'Zero bytes unexpectedly returned in download response')
def StreamInChunks(self, callback=None, finish_callback=None,
additional_headers=None):
"""Stream the entire download in chunks."""
self.StreamMedia(callback=callback, finish_callback=finish_callback,
additional_headers=additional_headers,
use_chunks=True)
def StreamMedia(self, callback=None, finish_callback=None,
additional_headers=None, use_chunks=True):
"""Stream the entire download.
Args:
callback: (default: None) Callback to call as each chunk is
completed.
finish_callback: (default: None) Callback to call when the
download is complete.
additional_headers: (default: None) Additional headers to
include in fetching bytes.
use_chunks: (bool, default: True) If False, ignore self.chunksize
and stream this download in a single request.
Returns:
None. Streams bytes into self.stream.
"""
callback = callback or self.progress_callback
finish_callback = finish_callback or self.finish_callback
self.EnsureInitialized()
while True:
if self.__initial_response is not None:
response = self.__initial_response
self.__initial_response = None
else:
end_byte = self.__ComputeEndByte(self.progress,
use_chunks=use_chunks)
response = self.__GetChunk(
self.progress, end_byte,
additional_headers=additional_headers)
if self.total_size is None:
self.__SetTotal(response.info)
response = self.__ProcessResponse(response)
self._ExecuteCallback(callback, response)
if (response.status_code == http_client.OK or
self.progress >= self.total_size):
break
self._ExecuteCallback(finish_callback, response)
if six.PY3:
class MultipartBytesGenerator(email_generator.BytesGenerator):
"""Generates a bytes Message object tree for multipart messages
This is a BytesGenerator that has been modified to not attempt line
termination character modification in the bytes payload. Known to
work with the compat32 policy only. It may work on others, but not
tested. The outfp object must accept bytes in its write method.
"""
def _handle_text(self, msg):
# If the string has surrogates the original source was bytes, so
# just write it back out.
if msg._payload is None:
return
self.write(msg._payload)
def _encode(self, s):
return s.encode('ascii', 'surrogateescape')
# Default body handler
_writeBody = _handle_text
class Upload(_Transfer):
"""Data for a single Upload.
Fields:
stream: The stream to upload.
mime_type: MIME type of the upload.
total_size: (optional) Total upload size for the stream.
close_stream: (default: False) Whether or not we should close the
stream when finished with the upload.
auto_transfer: (default: True) If True, stream all bytes as soon as
the upload is created.
"""
_REQUIRED_SERIALIZATION_KEYS = set((
'auto_transfer', 'mime_type', 'total_size', 'url'))
def __init__(self, stream, mime_type, total_size=None, http=None,
close_stream=False, chunksize=None, auto_transfer=True,
progress_callback=None, finish_callback=None,
gzip_encoded=False, **kwds):
super(Upload, self).__init__(
stream, close_stream=close_stream, chunksize=chunksize,
auto_transfer=auto_transfer, http=http, **kwds)
self.__complete = False
self.__final_response = None
self.__mime_type = mime_type
self.__progress = 0
self.__server_chunk_granularity = None
self.__strategy = None
self.__total_size = None
self.__gzip_encoded = gzip_encoded
self.progress_callback = progress_callback
self.finish_callback = finish_callback
self.total_size = total_size
@property
def progress(self):
return self.__progress
@classmethod
def FromFile(cls, filename, mime_type=None, auto_transfer=True,
gzip_encoded=False, **kwds):
"""Create a new Upload object from a filename."""
path = os.path.expanduser(filename)
if not os.path.exists(path):
raise exceptions.NotFoundError('Could not find file %s' % path)
if not mime_type:
mime_type, _ = mimetypes.guess_type(path)
if mime_type is None:
raise exceptions.InvalidUserInputError(
'Could not guess mime type for %s' % path)
size = os.stat(path).st_size
return cls(open(path, 'rb'), mime_type, total_size=size,
close_stream=True, auto_transfer=auto_transfer,
gzip_encoded=gzip_encoded, **kwds)
@classmethod
def FromStream(cls, stream, mime_type, total_size=None, auto_transfer=True,
gzip_encoded=False, **kwds):
"""Create a new Upload object from a stream."""
if mime_type is None:
raise exceptions.InvalidUserInputError(
'No mime_type specified for stream')
return cls(stream, mime_type, total_size=total_size,
close_stream=False, auto_transfer=auto_transfer,
gzip_encoded=gzip_encoded, **kwds)
@classmethod
def FromData(cls, stream, json_data, http, auto_transfer=None,
gzip_encoded=False, client=None, **kwds):
"""Create a new Upload of stream from serialized json_data and http."""
info = json.loads(json_data)
missing_keys = cls._REQUIRED_SERIALIZATION_KEYS - set(info.keys())
if missing_keys:
raise exceptions.InvalidDataError(
'Invalid serialization data, missing keys: %s' % (
', '.join(missing_keys)))
if 'total_size' in kwds:
raise exceptions.InvalidUserInputError(
'Cannot override total_size on serialized Upload')
upload = cls.FromStream(stream, info['mime_type'],
total_size=info.get('total_size'),
gzip_encoded=gzip_encoded, **kwds)
if isinstance(stream, io.IOBase) and not stream.seekable():
raise exceptions.InvalidUserInputError(
'Cannot restart resumable upload on non-seekable stream')
if auto_transfer is not None:
upload.auto_transfer = auto_transfer
else:
upload.auto_transfer = info['auto_transfer']
if client is not None:
url = client.FinalizeTransferUrl(info['url'])
else:
url = info['url']
upload.strategy = RESUMABLE_UPLOAD
upload._Initialize( # pylint: disable=protected-access
http, url)
upload.RefreshResumableUploadState()
upload.EnsureInitialized()
if upload.auto_transfer:
upload.StreamInChunks()
return upload
@property
def serialization_data(self):
self.EnsureInitialized()
if self.strategy != RESUMABLE_UPLOAD:
raise exceptions.InvalidDataError(
'Serialization only supported for resumable uploads')
return {
'auto_transfer': self.auto_transfer,
'mime_type': self.mime_type,
'total_size': self.total_size,
'url': self.url,
}
@property
def complete(self):
return self.__complete
@property
def mime_type(self):
return self.__mime_type
def __str__(self):
if not self.initialized:
return 'Upload (uninitialized)'
return 'Upload with %d/%s bytes transferred for url %s' % (
self.progress, self.total_size or '???', self.url)
@property
def strategy(self):
return self.__strategy
@strategy.setter
def strategy(self, value):
if value not in (SIMPLE_UPLOAD, RESUMABLE_UPLOAD):
raise exceptions.UserError((
'Invalid value "%s" for upload strategy, must be one of '
'"simple" or "resumable".') % value)
self.__strategy = value
@property
def total_size(self):
return self.__total_size
@total_size.setter
def total_size(self, value):
self.EnsureUninitialized()
self.__total_size = value
def __SetDefaultUploadStrategy(self, upload_config, http_request):
"""Determine and set the default upload strategy for this upload.
We generally prefer simple or multipart, unless we're forced to
use resumable. This happens when any of (1) the upload is too
large, (2) the simple endpoint doesn't support multipart requests
and we have metadata, or (3) there is no simple upload endpoint.
Args:
upload_config: Configuration for the upload endpoint.
http_request: The associated http request.
Returns:
None.
"""
if upload_config.resumable_path is None:
self.strategy = SIMPLE_UPLOAD
if self.strategy is not None:
return
strategy = SIMPLE_UPLOAD
if (self.total_size is not None and
self.total_size > _RESUMABLE_UPLOAD_THRESHOLD):
strategy = RESUMABLE_UPLOAD
if http_request.body and not upload_config.simple_multipart:
strategy = RESUMABLE_UPLOAD
if not upload_config.simple_path:
strategy = RESUMABLE_UPLOAD
self.strategy = strategy
def ConfigureRequest(self, upload_config, http_request, url_builder):
"""Configure the request and url for this upload."""
# Validate total_size vs. max_size
if (self.total_size and upload_config.max_size and
self.total_size > upload_config.max_size):
raise exceptions.InvalidUserInputError(
'Upload too big: %s larger than max size %s' % (
self.total_size, upload_config.max_size))
# Validate mime type
if not util.AcceptableMimeType(upload_config.accept, self.mime_type):
raise exceptions.InvalidUserInputError(
'MIME type %s does not match any accepted MIME ranges %s' % (
self.mime_type, upload_config.accept))
self.__SetDefaultUploadStrategy(upload_config, http_request)
if self.strategy == SIMPLE_UPLOAD:
url_builder.relative_path = upload_config.simple_path
if http_request.body:
url_builder.query_params['uploadType'] = 'multipart'
self.__ConfigureMultipartRequest(http_request)
else:
url_builder.query_params['uploadType'] = 'media'
self.__ConfigureMediaRequest(http_request)
# Once the entire body is written, compress the body if configured
# to. Both multipart and media request uploads will read the
# entire stream into memory, which means full compression is also
# safe to perform. Because the strategy is set to SIMPLE_UPLOAD,
# StreamInChunks throws an exception, meaning double compression
# cannot happen.
if self.__gzip_encoded:
http_request.headers['Content-Encoding'] = 'gzip'
# Turn the body into a stream so that we can compress it, then
# read the compressed bytes. In the event of a retry (e.g. if
# our access token has expired), we need to be able to re-read
# the body, which we can't do with a stream. So, we consume the
# bytes from the stream now and store them in a re-readable
# bytes container.
http_request.body = (
compression.CompressStream(
six.BytesIO(http_request.body))[0].read())
else:
url_builder.relative_path = upload_config.resumable_path
url_builder.query_params['uploadType'] = 'resumable'
self.__ConfigureResumableRequest(http_request)
def __ConfigureMediaRequest(self, http_request):
"""Configure http_request as a simple request for this upload."""
http_request.headers['content-type'] = self.mime_type
http_request.body = self.stream.read()
http_request.loggable_body = '<media body>'
def __ConfigureMultipartRequest(self, http_request):
"""Configure http_request as a multipart request for this upload."""
# This is a multipart/related upload.
msg_root = mime_multipart.MIMEMultipart('related')
# msg_root should not write out its own headers
setattr(msg_root, '_write_headers', lambda self: None)
# attach the body as one part
msg = mime_nonmultipart.MIMENonMultipart(
*http_request.headers['content-type'].split('/'))
msg.set_payload(http_request.body)
msg_root.attach(msg)
# attach the media as the second part
msg = mime_nonmultipart.MIMENonMultipart(*self.mime_type.split('/'))
msg['Content-Transfer-Encoding'] = 'binary'
msg.set_payload(self.stream.read())
msg_root.attach(msg)
# NOTE: We encode the body, but can't use
# `email.message.Message.as_string` because it prepends
# `> ` to `From ` lines.
fp = six.BytesIO()
if six.PY3:
generator_class = MultipartBytesGenerator
else:
generator_class = email_generator.Generator
g = generator_class(fp, mangle_from_=False)
g.flatten(msg_root, unixfrom=False)
http_request.body = fp.getvalue()
multipart_boundary = msg_root.get_boundary()
http_request.headers['content-type'] = (
'multipart/related; boundary=%r' % multipart_boundary)
if isinstance(multipart_boundary, six.text_type):
multipart_boundary = multipart_boundary.encode('ascii')
body_components = http_request.body.split(multipart_boundary)
headers, _, _ = body_components[-2].partition(b'\n\n')
body_components[-2] = b'\n\n'.join([headers, b'<media body>\n\n--'])
http_request.loggable_body = multipart_boundary.join(body_components)
def __ConfigureResumableRequest(self, http_request):
http_request.headers['X-Upload-Content-Type'] = self.mime_type
if self.total_size is not None:
http_request.headers[
'X-Upload-Content-Length'] = str(self.total_size)
def RefreshResumableUploadState(self):
"""Talk to the server and refresh the state of this resumable upload.
Returns:
Response if the upload is complete.
"""
if self.strategy != RESUMABLE_UPLOAD:
return
self.EnsureInitialized()
refresh_request = http_wrapper.Request(
url=self.url, http_method='PUT',
headers={'Content-Range': 'bytes */*'})
refresh_response = http_wrapper.MakeRequest(
self.http, refresh_request, redirections=0,
retries=self.num_retries)
range_header = self._GetRangeHeaderFromResponse(refresh_response)
if refresh_response.status_code in (http_client.OK,
http_client.CREATED):
self.__complete = True
self.__progress = self.total_size
self.stream.seek(self.progress)
# If we're finished, the refresh response will contain the metadata
# originally requested. Cache it so it can be returned in
# StreamInChunks.
self.__final_response = refresh_response
elif refresh_response.status_code == http_wrapper.RESUME_INCOMPLETE:
if range_header is None:
self.__progress = 0
else:
self.__progress = self.__GetLastByte(range_header) + 1
self.stream.seek(self.progress)
else:
raise exceptions.HttpError.FromResponse(refresh_response)
def _GetRangeHeaderFromResponse(self, response):
return response.info.get('Range', response.info.get('range'))
def InitializeUpload(self, http_request, http=None, client=None):
"""Initialize this upload from the given http_request."""
if self.strategy is None:
raise exceptions.UserError(
'No upload strategy set; did you call ConfigureRequest?')
if http is None and client is None:
raise exceptions.UserError('Must provide client or http.')
if self.strategy != RESUMABLE_UPLOAD:
return
http = http or client.http
if client is not None:
http_request.url = client.FinalizeTransferUrl(http_request.url)
self.EnsureUninitialized()
http_response = http_wrapper.MakeRequest(http, http_request,
retries=self.num_retries)
if http_response.status_code != http_client.OK:
raise exceptions.HttpError.FromResponse(http_response)
self.__server_chunk_granularity = http_response.info.get(
'X-Goog-Upload-Chunk-Granularity')
url = http_response.info['location']
if client is not None:
url = client.FinalizeTransferUrl(url)
self._Initialize(http, url)
# Unless the user has requested otherwise, we want to just
# go ahead and pump the bytes now.
if self.auto_transfer:
return self.StreamInChunks()
return http_response
def __GetLastByte(self, range_header):
_, _, end = range_header.partition('-')
# TODO(craigcitro): Validate start == 0?
return int(end)
def __ValidateChunksize(self, chunksize=None):
if self.__server_chunk_granularity is None:
return
chunksize = chunksize or self.chunksize
if chunksize % self.__server_chunk_granularity:
raise exceptions.ConfigurationValueError(
'Server requires chunksize to be a multiple of %d' %
self.__server_chunk_granularity)
def __IsRetryable(self, response):
return (response.status_code >= 500 or
response.status_code == http_wrapper.TOO_MANY_REQUESTS or
response.retry_after)
def __StreamMedia(self, callback=None, finish_callback=None,
additional_headers=None, use_chunks=True):
"""Helper function for StreamMedia / StreamInChunks."""
if self.strategy != RESUMABLE_UPLOAD:
raise exceptions.InvalidUserInputError(
'Cannot stream non-resumable upload')
callback = callback or self.progress_callback
finish_callback = finish_callback or self.finish_callback
# final_response is set if we resumed an already-completed upload.
response = self.__final_response
def CallSendChunk(start):
return self.__SendChunk(
start, additional_headers=additional_headers)
def CallSendMediaBody(start):
return self.__SendMediaBody(
start, additional_headers=additional_headers)
send_func = CallSendChunk if use_chunks else CallSendMediaBody
if not use_chunks and self.__gzip_encoded:
raise exceptions.InvalidUserInputError(
'Cannot gzip encode non-chunked upload')
if use_chunks:
self.__ValidateChunksize(self.chunksize)
self.EnsureInitialized()
while not self.complete:
response = send_func(self.stream.tell())
if response.status_code in (http_client.OK, http_client.CREATED):
self.__complete = True
break
if response.status_code not in (
http_client.OK, http_client.CREATED,
http_wrapper.RESUME_INCOMPLETE):
# Only raise an exception if the error is something we can't
# recover from.
if (self.strategy != RESUMABLE_UPLOAD or
not self.__IsRetryable(response)):
raise exceptions.HttpError.FromResponse(response)
# We want to reset our state to wherever the server left us
# before this failed request, and then raise.
self.RefreshResumableUploadState()
self._ExecuteCallback(callback, response)
continue
self.__progress = self.__GetLastByte(
self._GetRangeHeaderFromResponse(response))
if self.progress + 1 != self.stream.tell():
# TODO(craigcitro): Add a better way to recover here.
raise exceptions.CommunicationError(
'Failed to transfer all bytes in chunk, upload paused at '
'byte %d' % self.progress)
self._ExecuteCallback(callback, response)
if self.__complete and hasattr(self.stream, 'seek'):
current_pos = self.stream.tell()
self.stream.seek(0, os.SEEK_END)
end_pos = self.stream.tell()
self.stream.seek(current_pos)
if current_pos != end_pos:
raise exceptions.TransferInvalidError(
'Upload complete with %s additional bytes left in stream' %
(int(end_pos) - int(current_pos)))
self._ExecuteCallback(finish_callback, response)
return response
def StreamMedia(self, callback=None, finish_callback=None,
additional_headers=None):
"""Send this resumable upload in a single request.
Args:
callback: Progress callback function with inputs
(http_wrapper.Response, transfer.Upload)
finish_callback: Final callback function with inputs
(http_wrapper.Response, transfer.Upload)
additional_headers: Dict of headers to include with the upload
http_wrapper.Request.
Returns:
http_wrapper.Response of final response.
"""
return self.__StreamMedia(
callback=callback, finish_callback=finish_callback,
additional_headers=additional_headers, use_chunks=False)
def StreamInChunks(self, callback=None, finish_callback=None,
additional_headers=None):
"""Send this (resumable) upload in chunks."""
return self.__StreamMedia(
callback=callback, finish_callback=finish_callback,
additional_headers=additional_headers)
def __SendMediaRequest(self, request, end):
"""Request helper function for SendMediaBody & SendChunk."""
def CheckResponse(response):
if response is None:
# Caller shouldn't call us if the response is None,
# but handle anyway.
raise exceptions.RequestError(
'Request to url %s did not return a response.' %
response.request_url)
response = http_wrapper.MakeRequest(
self.bytes_http, request, retry_func=self.retry_func,
retries=self.num_retries, check_response_func=CheckResponse)
if response.status_code == http_wrapper.RESUME_INCOMPLETE:
last_byte = self.__GetLastByte(
self._GetRangeHeaderFromResponse(response))
if last_byte + 1 != end:
self.stream.seek(last_byte + 1)
return response
def __SendMediaBody(self, start, additional_headers=None):
"""Send the entire media stream in a single request."""
self.EnsureInitialized()
if self.total_size is None:
raise exceptions.TransferInvalidError(
'Total size must be known for SendMediaBody')
body_stream = stream_slice.StreamSlice(
self.stream, self.total_size - start)
request = http_wrapper.Request(url=self.url, http_method='PUT',
body=body_stream)
request.headers['Content-Type'] = self.mime_type
if start == self.total_size:
# End of an upload with 0 bytes left to send; just finalize.
range_string = 'bytes */%s' % self.total_size
else:
range_string = 'bytes %s-%s/%s' % (start, self.total_size - 1,
self.total_size)
request.headers['Content-Range'] = range_string
if additional_headers:
request.headers.update(additional_headers)
return self.__SendMediaRequest(request, self.total_size)
def __SendChunk(self, start, additional_headers=None):
"""Send the specified chunk."""
self.EnsureInitialized()
no_log_body = self.total_size is None
request = http_wrapper.Request(url=self.url, http_method='PUT')
if self.__gzip_encoded:
request.headers['Content-Encoding'] = 'gzip'
body_stream, read_length, exhausted = compression.CompressStream(
self.stream, self.chunksize)
end = start + read_length
# If the stream length was previously unknown and the input stream
# is exhausted, then we're at the end of the stream.
if self.total_size is None and exhausted:
self.__total_size = end
elif self.total_size is None:
# For the streaming resumable case, we need to detect when
# we're at the end of the stream.
body_stream = buffered_stream.BufferedStream(
self.stream, start, self.chunksize)
end = body_stream.stream_end_position
if body_stream.stream_exhausted:
self.__total_size = end
# TODO: Here, change body_stream from a stream to a string object,
# which means reading a chunk into memory. This works around
# https://code.google.com/p/httplib2/issues/detail?id=176 which can
# cause httplib2 to skip bytes on 401's for file objects.
# Rework this solution to be more general.
body_stream = body_stream.read(self.chunksize)
else:
end = min(start + self.chunksize, self.total_size)
body_stream = stream_slice.StreamSlice(self.stream, end - start)
# TODO(craigcitro): Think about clearer errors on "no data in
# stream".
request.body = body_stream
request.headers['Content-Type'] = self.mime_type
if no_log_body:
# Disable logging of streaming body.
# TODO: Remove no_log_body and rework as part of a larger logs
# refactor.
request.loggable_body = '<media body>'
if self.total_size is None:
# Streaming resumable upload case, unknown total size.
range_string = 'bytes %s-%s/*' % (start, end - 1)
elif end == start:
# End of an upload with 0 bytes left to send; just finalize.
range_string = 'bytes */%s' % self.total_size
else:
# Normal resumable upload case with known sizes.
range_string = 'bytes %s-%s/%s' % (start, end - 1, self.total_size)
request.headers['Content-Range'] = range_string
if additional_headers:
request.headers.update(additional_headers)
return self.__SendMediaRequest(request, end)
|
|
"""Device, context and memory management on CuPy.
Chainer uses CuPy (with very thin wrapper) to exploit the speed of GPU
computation. Following modules and classes are imported to :mod:`cuda`
module for convenience (refer to this table when reading chainer's source
codes).
============================ =================================
imported name original name
============================ =================================
``chainer.cuda.cupy`` :mod:`cupy`
``chainer.cuda.ndarray`` :class:`cupy.ndarray`
``chainer.cuda.cupy.cuda`` :mod:`cupy.cuda`
``chainer.cuda.Device`` :class:`cupy.cuda.Device`
``chainer.cuda.Event`` :class:`cupy.cuda.Event`
``chainer.cuda.Stream`` :class:`cupy.cuda.Stream`
============================ =================================
Chainer replaces the default allocator of CuPy by its memory pool
implementation. It enables us to reuse the device memory over multiple
forward/backward computations, and temporary arrays for consecutive elementwise
operations.
"""
import functools
import os
import warnings
import numpy
available = False
cudnn_enabled = False
try:
import cupy
import cupy.cuda
import cupy.cuda.cublas
cuda = cupy.cuda
cublas = cuda.cublas
ndarray = cupy.ndarray
Device = cuda.Device
Event = cuda.Event
Stream = cuda.Stream
available = True
except Exception as e:
_resolution_error = e
class ndarray(object):
pass # for type testing
if available:
try:
import cupy.cudnn
cudnn = cupy.cudnn
cudnn_enabled = int(os.environ.get('CHAINER_CUDNN', '1')) != 0
except Exception as e:
_resolution_error = e
def init(arg=None):
warnings.warn(
'chainer.cuda.init is deprecated. You need to call nothing to '
'initialize your environment. Call chainer.cuda.check_cuda_available '
'to check availability of CUDA.',
DeprecationWarning)
check_cuda_available()
def check_cuda_available():
"""Checks if CUDA is available.
When CUDA is correctly set up, nothing happens.
Otherwise it raises ``RuntimeError``.
"""
if not available:
msg = ('CUDA environment is not correctly set up\n'
'(see https://github.com/pfnet/chainer#installation).')
msg += str(_resolution_error)
raise RuntimeError(msg)
if not cudnn_enabled:
warnings.warn(
'cuDNN is not enabled.\n'
'Please reinstall chainer after you install cudnn\n'
'(see https://github.com/pfnet/chainer#installation).')
class DummyDeviceType(object):
"""Dummy device class that does nothing with cupy.cuda.Device interface.
This class is used to represent CPU device.
"""
def __int__(self):
return -1
def __enter__(self):
return self
def __exit__(self, *args):
pass
def use(self):
pass
def synchronize(self):
pass
def __eq__(self, other):
return isinstance(other, DummyDeviceType)
def __ne__(self, other):
return not (self == other)
DummyDevice = DummyDeviceType()
# ------------------------------------------------------------------------------
# Global states
# ------------------------------------------------------------------------------
if available:
memory_pool = cuda.MemoryPool()
cuda.set_allocator(memory_pool.malloc)
# ------------------------------------------------------------------------------
# Global states
# ------------------------------------------------------------------------------
def get_device(*args):
"""Gets the device from an ID integer or an array object.
This is a convenient utility to select a correct device if the type of
``arg`` is unknown (i.e., one can use this function on arrays that may be
on CPU or GPU). The returned device object supports the context management
protocol of Python for the *with* statement.
Args:
args: Values to specify a GPU device. :class:`numpy.ndarray` objects
are skipped. If all arguments are :class:`numpy.ndarray` objects,
it returns a dummy device object. Otherwise, the first
non-:mod:`numpy` object is used to select a device. If it is a
:class:`cupy.ndarray` object, its device is returned. Otherwise,
the argument is passed to the initializer of
:class:`~cupy.cuda.Device` and it is returned.
Returns:
Device object specified by given ``args``.
.. seealso::
See :class:`cupy.cuda.Device` for the device selection not by arrays.
"""
for arg in args:
if arg is None:
continue
if not isinstance(arg, numpy.ndarray):
check_cuda_available()
if isinstance(arg, cupy.ndarray):
if arg.device is None:
continue
return arg.device
else:
return Device(arg)
return DummyDevice
# ------------------------------------------------------------------------------
# cupy.ndarray allocation and copy
# ------------------------------------------------------------------------------
def to_gpu(array, device=None, stream=None):
"""Copies the given CPU array to specified device.
Args:
array: Array to be sent to GPU.
device: Device specifier.
stream (cupy.cuda.Stream): CUDA stream.
Returns:
cupy.ndarray: Array on GPU.
If ``array`` is already on GPU, then this function just returns
``array`` without performing any copy. Note that this function does not
copy :class:`cupy.ndarray` into specified device.
"""
check_cuda_available()
assert stream is None # TODO(beam2d): FIX IT
with get_device(device):
dev_id = int(get_device(array))
if dev_id != -1 and dev_id != cupy.cuda.device.get_device_id():
# Need to make a copy when an array is copied to another device
return cupy.array(array, copy=True)
else:
return cupy.asarray(array)
def to_cpu(array, stream=None):
"""Copies the given GPU array to host CPU.
Args:
array: Array to be sent to CPU.
stream (cupy.cuda.Stream): CUDA stream.
Returns:
numpy.ndarray: Array on CPU.
If given ``array`` is already on CPU, then this function just returns
``array`` without performing any copy.
"""
if isinstance(array, ndarray):
check_cuda_available()
with get_device(array):
return array.get(stream)
elif isinstance(array, numpy.ndarray):
return array
else:
raise TypeError(
'The array sent to cpu must be numpy.ndarray or cupy.ndarray.'
'\nActual type: {0}.'.format(type(array)))
def empty(shape, dtype=numpy.float32):
"""Creates an uninitialized :class:`cupy.ndarray` object.
Args:
shape (tuple of ints): The shape of array.
dtype (numpy.dtype): Element type.
Returns:
cupy.ndarray: Uninitialized GPU array allocated by the memory pool.
"""
warnings.warn(
'chainer.cuda.empty is deprecated. Use cupy.empty instead.',
DeprecationWarning)
check_cuda_available()
return cupy.empty(shape, dtype)
def full(shape, fill_value, dtype=numpy.float32, stream=None):
"""Creates a constant-filled :class:`cupy.ndarray` object.
Args:
shape (tuple of ints): The shape of array.
fill_value: Constant to fill the array by.
dtype (numpy.dtype): Element type.
stream (cupy.cuda.Stream): CUDA stream.
Returns:
cupy.ndarray: Constant-filled GPU array allocated by the memory pool.
"""
warnings.warn(
'chainer.cuda.full is deprecated. Use cupy.full instead.',
DeprecationWarning)
check_cuda_available()
assert stream is None
return cupy.full(shape, fill_value, dtype=dtype)
def zeros(shape, dtype=numpy.float32, stream=None):
"""Creates a zero-filled :class:`cupy.ndarray` object.
This function is equivalent to ``full(shape, 0, dtype, stream)``.
"""
warnings.warn(
'chainer.cuda.zeros is deprecated. Use cupy.zeros instead.',
DeprecationWarning)
check_cuda_available()
assert stream is None
return cupy.zeros(shape, dtype=dtype)
def ones(shape, dtype=numpy.float32, stream=None):
"""Creates a zero-filled :class:`cupy.ndarray` object.
This function is equivalent to ``full(shape, 1, dtype, stream)``.
"""
warnings.warn(
'chainer.cuda.ones is deprecated. Use cupy.ones instead.',
DeprecationWarning)
check_cuda_available()
assert stream is None
return cupy.ones(shape, dtype=dtype)
def empty_like(array):
"""Creates an uninitialized GPU array like the given one.
Args:
array (cupy.ndarray or numpy.ndarray): Base array.
Returns:
cupy.ndarray: GPU array of the same shape and dtype as `array`.
"""
warnings.warn(
'chainer.cuda.empty_like is deprecated. Use cupy.empty_like instead.',
DeprecationWarning)
check_cuda_available()
if isinstance(array, cupy.ndarray):
return cupy.empty_like(array)
return cupy.empty(array.shape, dtype=array.dtype)
def full_like(array, fill_value, stream=None):
"""Creates a constant-filled :class:`cupy.ndarray` object like the given array.
Args:
array (cupy.ndarray or numpy.ndarray): Base array.
fill_value: Constant value to fill the array by.
stream (cupy.cuda.Stream): CUDA stream.
Returns:
cupy.ndarray: Constant-filled array.
"""
warnings.warn(
'chainer.cuda.full_like is deprecated. Use cupy.full_like instead.',
DeprecationWarning)
check_cuda_available()
assert stream is None
if isinstance(array, cupy.ndarray):
return cupy.full_like(array, fill_value)
return cupy.full(array.shape, fill_value, dtype=array.dtype)
def zeros_like(array, stream=None):
"""Creates a zero-filled :class:`cupy.ndarray` object like the given array.
Args:
array (cupy.ndarray or numpy.ndarray): Base array.
stream (cupy.cuda.Stream): CUDA stream.
Returns:
cupy.ndarray: Zero-filled array.
"""
warnings.warn(
'chainer.cuda.zeros_like is deprecated. Use cupy.zeros_like instead.',
DeprecationWarning)
check_cuda_available()
assert stream is None
if isinstance(array, cupy.ndarray):
return cupy.zeros_like(array)
return cupy.zeros(array.shape, dtype=array.dtype)
def ones_like(array, stream=None):
"""Creates a one-filled :class:`cupy.ndarray` object like the given array.
Args:
array (cupy.ndarray or numpy.ndarray): Base array.
stream (cupy.cuda.Stream): CUDA stream.
Returns:
cupy.ndarray: One-filled array.
"""
warnings.warn(
'chainer.cuda.ones_like is deprecated. Use cupy.ones_like instead.',
DeprecationWarning)
check_cuda_available()
assert stream is None
if isinstance(array, cupy.ndarray):
return cupy.ones_like(array)
return cupy.ones(array.shape, dtype=array.dtype)
def copy(array, out=None, out_device=None, stream=None):
"""Copies a :class:`cupy.ndarray` object using the default stream.
This function can copy the device array to the destination array on another
device.
Args:
array (cupy.ndarray): Array to be copied.
out (cupy.ndarray): Destination array.
If it is not ``None``, then ``out_device`` argument is ignored.
out_device: Destination device specifier. Actual device object is
obtained by passing this value to :func:`get_device`.
stream (cupy.cuda.Stream): CUDA stream.
Returns:
cupy.ndarray: Copied array.
If ``out`` is not specified, then the array is allocated on the device
specified by ``out_device`` argument.
"""
check_cuda_available()
assert stream is None # TODO(beam2d): FIX IT
if out is None:
if out_device is None:
out_device = array
with get_device(out_device):
out = cupy.empty_like(array)
with get_device(array):
cupy.copyto(out, array)
return out
# ------------------------------------------------------------------------------
# Function result memoization
# ------------------------------------------------------------------------------
def memoize(for_each_device=False):
"""Makes a function memoizing the result for each argument and device.
This is a similar version of :func:`cupy.memoize`. The difference is that
this function can be used in the global scope even if CUDA is not
available. In such case, this function does nothing.
.. note::
This decorator acts as a dummy if CUDA is not available. It cannot be
used for general purpose memoization even if ``for_each_device`` is set
to False.
"""
if available:
return cupy.memoize(for_each_device)
def dummy_decorator(f):
@functools.wraps(f)
def ret(*args, **kwargs):
return f(*args, **kwargs)
return ret
return dummy_decorator
# ------------------------------------------------------------------------------
# Kernel definition utility
# ------------------------------------------------------------------------------
@memoize(for_each_device=True)
def elementwise(in_params, out_params, operation, name, **kwargs):
"""Creates an elementwise kernel function.
This function uses :func:`~chainer.cuda.memoize` to cache the
kernel object, i.e. the resulting kernel object is cached for each argument
combination and CUDA device.
The arguments are the same as those for
:class:`cupy.ElementwiseKernel`, except that the ``name`` argument is
mandatory.
"""
check_cuda_available()
return cupy.ElementwiseKernel(
in_params, out_params, operation, name, **kwargs)
@memoize(for_each_device=True)
def reduce(in_params, out_params, map_expr, reduce_expr, post_map_expr,
identity, name, **kwargs):
"""Creates a global reduction kernel function.
This function uses :func:`~chainer.cuda.memoize` to cache the resulting
kernel object, i.e. the resulting kernel object is cached for each argument
combination and CUDA device.
The arguments are the same as those for
:class:`cupy.ReductionKernel`, except that the ``name`` argument is
mandatory.
"""
check_cuda_available()
return cupy.ReductionKernel(
in_params, out_params, map_expr, reduce_expr, post_map_expr,
identity, name, **kwargs)
# ------------------------------------------------------------------------------
# numpy/cupy compatible coding
# ------------------------------------------------------------------------------
def get_array_module(*args):
"""Gets an appropriate one from :mod:`numpy` or :mod:`cupy`.
This is almost equivalent to :func:`cupy.get_array_module`. The only
difference is that this function can be used even if CUDA is not available.
Args:
args: Values to determine whether NumPy or CuPy should be used.
Returns:
module: :mod:`cupy` or :mod:`numpy` is returned based on the types of
the arguments.
"""
if available:
return cupy.get_array_module(*args)
else:
return numpy
_max_workspace_size = 8 * 1024 * 1024
def get_max_workspace_size():
"""Gets the workspace size for cuDNN.
Check "cuDNN Library User Guide" for detail.
Returns:
int: The workspace size for cuDNN.
"""
return _max_workspace_size
def set_max_workspace_size(size):
"""Sets the workspace size for cuDNN.
Check "cuDNN Library User Guide" for detail.
Args:
size: The workspace size for cuDNN.
"""
global _max_workspace_size
_max_workspace_size = size
|
|
"""
A test file for LDpred.
Examples
--------
To run all tests:
$ python -m tests.test
To run a specific test:
$ python -m unittest tests.test.SimpleTests.test_ldpred_inf
"""
import pickle
import filecmp
import gzip
import h5py
from ldpred import coord_genotypes
from ldpred import ld
from ldpred import sum_stats_parsers
from ldpred import run
import numpy as np
import os
import tempfile
import unittest
import sys
np.set_printoptions(linewidth=int(os.environ.get('COLUMNS', 100)))
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
def run_test(mesg, cmd_str, error_mesg, *actual_and_golden_outputs):
print(mesg)
print(cmd_str + '\n')
cmd_args = cmd_str.split()
try:
run.main_with_args(cmd_args)
for i in range(0, len(actual_and_golden_outputs), 2):
actual_output = actual_and_golden_outputs[i]
golden_output = os.path.join(TEST_DIR, actual_and_golden_outputs[i + 1])
print('Diffing actual (%s) vs. golden (%s) outputs...' % (actual_output, golden_output))
assert_files_equal(actual_output, golden_output)
print('Diff passed!')
except:
print(error_mesg + '\n')
raise
def h5_node_walker(h5_node, key_prefix=''):
"""Generator function that walks an hdf5 File or Group object.
Args:
h5_node: an h5py.File or h5py.Group object
key_prefix: the '/' delimited string representing the name path of the
node within the .hdf5 file.
Yields:
(child_key, child_value)
"""
for k, v in h5_node.items():
v_type = type(v)
v_path = key_prefix + '/' + k
if v_type == h5py.Group:
for nested_key, nested_value in h5_node_walker(v, v_path):
yield nested_key, nested_value
elif v_type == h5py.Dataset:
yield v_path, v[...]
else:
assert False, 'Unexpected v_type: %s' % v_type
def h5_file_walker(h5_file):
"""Generator function that walks an hdf5 file.
Args:
h5_file: a string, the name of the .hdf5 file to walk.
Yields:
(child_key, child_value)
"""
with h5py.File(h5_file, 'r') as h5_root_node:
for k, v in h5_node_walker(h5_root_node):
yield k, v
def pkl_node_walker(pkl_node, key_prefix=''):
"""Generator function that walks a Python pickle node (i.e. a dict).
Args:
pkl_node: A dict coming from a depickled object.
key_prefix: the '/' delimited string representing the name path of the
node within the pickle file.
Yields:
(child_key, child_value)
"""
for k in sorted(pkl_node.keys()):
v = pkl_node[k]
v_type = type(v)
v_path = key_prefix + '/' + str(k)
if v_type == dict:
for nested_key, nested_value in pkl_node_walker(v, v_path):
yield nested_key, nested_value
elif v_type == list:
# Convert Python list to Numpy ndarray for assert_deep_equals.
yield v_path, np.array(v)
elif v_type in (float, np.float64, np.float32, int, str, np.ndarray):
yield v_path, v
else:
assert False, 'Unexpected v_type: %s' % v_type
def pkl_file_walker(pkl_file):
"""Generator function that walks a Python pickle file.
Args:
pkl_file: a string, the name of the .pkl.gz file to walk.
Yields:
(child_key, child_value)
"""
try:
with gzip.open(pkl_file) as f:
pkl_root_node = pickle.load(f)
except UnicodeDecodeError as e:
with gzip.open(pkl_file) as f:
pkl_root_node = pickle.load(f,encoding='latin1')
except Exception as e:
print('Unable to load data ', pkl_file, ':', e)
raise
for k, v in pkl_node_walker(pkl_root_node):
yield k, v
def assert_deep_equals(walker1, walker2):
"""Test function that does a deep comparison of two structure walkers."""
for (k1, v1), (k2, v2) in zip(walker1, walker2):
assert k1 == k2, 'Key mismatch: %s vs. %s' % (k1, k2)
assert type(v1) == type(v2), 'Type mismatch: %s vs. %s' % (type(v1), type(v2))
if isinstance(v1, str) or isinstance(v1, int):
assert v1 == v2, 'Value mismatch: %s vs. %s' % (v1, v2)
elif isinstance(v1, float) or isinstance(v1, np.float32):
assert np.isclose(v1, v2), 'Float mismatch: %s vs. %s' % (v1, v2)
elif isinstance(v1, np.ndarray):
assert v1.dtype == v2.dtype, 'dtype mismatch: %s vs. %s' % (v1.dtype, v2.dtype)
if np.issubdtype(v1.dtype, np.number):
assert np.allclose(v1, v2), 'ndarray number mismatch in key %s' % k1
else:
assert np.array_equal(v1, v2), 'ndarray non-number mismatch in key %s: v1=%s ; v2=%s' % (k1,str(v1),str(v2))
def assert_files_equal(file1, file2):
if file1.endswith('.hdf5'):
assert_deep_equals(h5_file_walker(file1), h5_file_walker(file2))
elif file1.endswith('.pkl.gz'):
assert_deep_equals(pkl_file_walker(file1), pkl_file_walker(file2))
else:
assert filecmp.cmp(file1, file2), "Mismatch between: %s and %s" % (file1, file2)
def make_p_dict(*args):
return vars(run.parser.parse_args(args))
class SimpleTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
print('Testing LDpred.\n')
print('Note that this test currently only tests the core functionality of LDpred.')
print('Please report bugs on github (https://github.com/bvilhjal/ldpred) or to Bjarni J Vilhjalmsson (bjarni.vilhjalmsson@gmail.com).\n')
def setUp(self):
self.tf = tempfile.NamedTemporaryFile()
self.tmp_file_prefix = next(tempfile._get_candidate_names())
def tearDown(self):
print('Cleaning up files: %s* ' % self.tmp_file_prefix)
cmd_str = 'rm -f %s*' % self.tmp_file_prefix
print(cmd_str + '\n')
assert os.system(cmd_str) == 0, 'Problems cleaning up test files! Testing stopped'
def test_parse_sum_stats(self):
p_dict = {
'ssf': os.path.join(TEST_DIR, 'test_data/sim1_0_ss.txt'),
'ssf_format': 'LDPRED',
'only_hm3': False,
'N': 10000,
'debug': True,
'z_from_se':False,
'match_genomic_pos': False,
'eff_type':'LINREG'}
bimfile = os.path.join(TEST_DIR, 'test_data/sim1_0_test.bim')
summary_dict = {}
out = '%s_parse_sum_stats.hdf5' % self.tmp_file_prefix
with h5py.File(out, 'w') as h5f:
sum_stats_parsers.parse_sum_stats(h5f, p_dict, bimfile, summary_dict)
self.assertEqual(len(h5f['sum_stats']['chrom_1']['betas']), 2000)
p_dict = {
'ssf': os.path.join(TEST_DIR, 'test_data/sim4_0_ss.txt'),
'ssf_format': 'LDPRED',
'only_hm3': False,
'N': None,
'debug': True,
'z_from_se':True,
'match_genomic_pos': False,}
bimfile = os.path.join(TEST_DIR, 'test_data/sim4_0_test.bim')
summary_dict = {}
out = '%s_parse_sum_stats.hdf5' % self.tmp_file_prefix
with h5py.File(out, 'w') as h5f:
sum_stats_parsers.parse_sum_stats(h5f, p_dict, bimfile, summary_dict)
self.assertEqual(len(h5f['sum_stats']['chrom_1']['betas']), 2000)
def test_coord_genotypes(self):
p_dict = make_p_dict(
'--debug',
'coord',
'--gf=%s/test_data/sim1_0_test' % TEST_DIR,
'--vgf=%s/test_data/sim1_0_test' % TEST_DIR,
'--ssf=%s/test_data/sim1_0_ss.txt' % TEST_DIR,
'--ssf-format=LDPRED',
'--out=%s_coord_genotypes.hdf5' % self.tmp_file_prefix,
)
summary_dict = coord_genotypes.main(p_dict)
# summary_dict[11]['value'], if present, is the count of non-matching nts.
# It should be 0.
self.assertEqual(summary_dict.get(11, {}).get('value', 0), 0)
with h5py.File(p_dict['out'], 'r') as h5f:
self.assertEqual(len(h5f['sum_stats']['chrom_1']['betas']), 2000)
def test_ld_calculation(self):
df = h5py.File('%s/test_data/goldens/golden.coord0.hdf5' % TEST_DIR, 'r')
g = df['cord_data']['chrom_1']
snps, n_raw_snps, n_snps = ld.extract_snps_from_cord_data_chrom(g)
first_10_snps = snps[:10]
self.assertEqual(len(first_10_snps), 10)
ld_dict_and_scores = ld.get_LDpred_ld_tables(first_10_snps)
ld_dict = ld_dict_and_scores['ld_dict']
ld_mat = np.vstack([ld_dict[i] for i in range(10)])
# np.savez(os.path.join(TEST_DIR, 'test_data/goldens/ld_data'),ld=ld_mat)
golden_ld_mat = np.load(os.path.join(TEST_DIR, 'test_data/goldens/ld_data.npz'))['ld']
self.assertTrue(np.allclose(ld_mat, golden_ld_mat))
def test_get_chromosome_herits(self):
p_dict = make_p_dict(
'--debug',
'inf',
'--cf=%s/test_data/goldens/golden.coord.hdf5' % TEST_DIR,
'--ldr=100',
'--ldf=' + self.tmp_file_prefix,
'--N=4000',
'--out=' + self.tmp_file_prefix,
)
summary_dict = {}
ld_dict = ld.get_ld_dict_using_p_dict(p_dict, summary_dict)
coord_file = os.path.join(TEST_DIR, 'test_data/goldens/golden.coord.hdf5')
df = h5py.File(coord_file, 'r')
herit_dict = ld.get_chromosome_herits(df['cord_data'], ld_dict['ld_scores_dict'], n=p_dict['N'])
print(herit_dict)
self.assertAlmostEqual(herit_dict['chrom_1']['h2'], 0.10640501626651437)
self.assertAlmostEqual(herit_dict['gw_h2_ld_score_est'], 0.10640501626651437)
def test_ldpred_coord0(self):
coord_file = self.tmp_file_prefix + '.coord0.hdf5'
run_test(
'Coordinating test data into file %s' % coord_file,
'coord --gf=%s/test_data/sim1_0_test --vgf=%s/test_data/sim1_0_test --ssf=%s/test_data/sim1_0_ss.txt --ssf-format=LDPRED --eff_type LINREG --out=%s' % (TEST_DIR, TEST_DIR, TEST_DIR, coord_file),
'Problems when coordinating data!',
coord_file,
'test_data/goldens/golden.coord0.hdf5'
)
def test_ldpred_coord(self):
coord_file = self.tmp_file_prefix + '.coord.hdf5'
run_test(
'Coordinating test data into file %s' % coord_file,
'--debug coord --gf=%s/test_data/sim2_0_test --vbim=%s/test_data/sim2_0_test.bim --ssf=%s/test_data/sim2_0_ss.txt --ssf-format=LDPRED --eff_type LINREG --out=%s' % (TEST_DIR, TEST_DIR, TEST_DIR, coord_file),
'Problems when coordinating data!',
coord_file,
'test_data/goldens/golden.coord.hdf5')
def test_ldpred_inf(self):
run_test(
'Running LDpred-inf with output file prefix: %s ' % self.tmp_file_prefix,
'--debug inf --cf=%s/test_data/goldens/golden.coord.hdf5 --ldr=100 --ldf=%s --out=%s' % (TEST_DIR, self.tmp_file_prefix, self.tmp_file_prefix),
'Problems when running LDpred_inf!',
self.tmp_file_prefix + '_ldradius100.pkl.gz',
'test_data/goldens/golden_inf_ldradius100.pkl.gz')
def test_ldpred_fast(self):
run_test(
'Running LDpred-inf with output file prefix: %s ' % self.tmp_file_prefix,
'--debug fast --cf=%s/test_data/goldens/golden.coord.hdf5 --f 0.3 0.1 0.03 0.01 --ldr=100 --ldf=%s --out=%s' % (TEST_DIR, self.tmp_file_prefix, self.tmp_file_prefix),
'Problems when running LDpred_fast!')
def test_ldpred_gibbs(self):
run_test(
'Running LDpred with output file prefix: %s ' % self.tmp_file_prefix,
'--debug gibbs --cf=%s/test_data/goldens/golden.coord.hdf5 --ldr=100 --ldf=%s --f=0.001 --out=%s' % (TEST_DIR, self.tmp_file_prefix, self.tmp_file_prefix),
'Problems when running LDpred!')
def test_ldpred_p_plus_t(self):
run_test(
'Running P+T with coordinated file prefix: %s ' % self.tmp_file_prefix,
'--debug p+t --cf=%s/test_data/goldens/golden.coord.hdf5 --ldr=100 --p=0.001 --out=%s' % (TEST_DIR, self.tmp_file_prefix),
'Problems when running P+T!',
self.tmp_file_prefix + '_P+T_r0.20_p1.0000e-03.txt',
'test_data/goldens/golden_P+T_r0.20_p1.0000e-03.txt')
def test_ldpred_score_1(self):
prs_file_prefix = self.tmp_file_prefix
run_test(
'Validating results with output file prefix: %s' % prs_file_prefix,
'--debug score --gf=%s/test_data/sim2_0_test --rf=%s/test_data/goldens/golden --out=%s' % (TEST_DIR, TEST_DIR, prs_file_prefix),
'Problems with the validation step!',
prs_file_prefix + '_LDpred-inf.txt',
'test_data/goldens/goldenprs_LDpred-inf.txt',
prs_file_prefix + '_LDpred_p1.0000e-03.txt',
'test_data/goldens/goldenprs_LDpred_p1.0000e-03.txt')
def test_ldpred_score_2(self):
prs_file_prefix = self.tmp_file_prefix
run_test(
'Validating results with output file prefix: %s' % self.tmp_file_prefix,
'score --gf=%s/test_data/sim2_0_test --rf-format LDPRED --rf=%s/test_data/goldens/golden --out=%s' % (TEST_DIR, TEST_DIR, prs_file_prefix),
'Problems with the validation step!',
prs_file_prefix + '_LDpred-inf.txt',
'test_data/goldens/goldenprs_LDpred-inf.txt',
prs_file_prefix + '_LDpred_p1.0000e-03.txt',
'test_data/goldens/goldenprs_LDpred_p1.0000e-03.txt',)
def test_ldpred_score_3(self):
prs_file_prefix = self.tmp_file_prefix
run_test(
'Validating results with output file prefix: %s' % self.tmp_file_prefix,
'score --gf=%s/test_data/sim2_0_test --only-score --rf=%s/test_data/goldens/golden --rf-format=P+T --out=%s' % (TEST_DIR, TEST_DIR, prs_file_prefix),
'Problems with the P+T validation step!',
prs_file_prefix + '_P+T_r0.20_p1.0000e-03.txt',
'test_data/goldens/goldenprs_only_score_P+T_r0.20_p1.0000e-03.txt')
def test_ldpred_score_4(self):
prs_file_prefix = self.tmp_file_prefix
run_test(
'Validating results with output file prefix: %s' % self.tmp_file_prefix,
'score --gf=%s/test_data/sim2_0_test --rf=%s/test_data/goldens/golden --rf-format=P+T --out=%s' % (TEST_DIR, TEST_DIR, prs_file_prefix),
'Problems with the P+T validation step!',
prs_file_prefix + '_P+T_r0.20_p1.0000e-03.txt',
'test_data/goldens/goldenprs_P+T_r0.20_p1.0000e-03.txt')
class ComplexTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
print('Testing LDpred: Integration tests.\n')
print('Note that this test currently only tests the core functionality of LDpred.')
print('Please report bugs on github (https://github.com/bvilhjal/ldpred) or to Bjarni J Vilhjalmsson (bjarni.vilhjalmsson@gmail.com).\n')
def setUp(self):
self.tf = tempfile.NamedTemporaryFile()
self.tmp_file_prefix = next(tempfile._get_candidate_names())
def tearDown(self):
print('Cleaning up files: %s* ' % self.tmp_file_prefix)
cmd_str = 'rm -f %s*' % self.tmp_file_prefix
print(cmd_str + '\n')
assert os.system(cmd_str) == 0, 'Problems cleaning up test files! Testing stopped'
def test_mix1(self):
t_i = 0
label='mix1'
for sim_i in range(1,6):
td = '%s/test_data/sim%d'%(TEST_DIR, sim_i)
file_prefix = '%s_%s_sim%d_%d'%(self.tmp_file_prefix,label,sim_i,t_i)
df_prefix = '%s_%d'%(td,t_i)
coord_file = file_prefix+'.hdf5'
run_test(
'Validating results with output file prefix: %s' % self.tmp_file_prefix,
'coord --gf=%s_test --vbim=%s_test.bim --ssf=%s_ss.txt --ssf-format=LDPRED --out=%s' % (df_prefix,df_prefix,df_prefix,coord_file),
'Problems when coordinating data!')
ld_file = file_prefix+'.ld'
weights_file = file_prefix+'.weights'
run_test(
'Running LDpred-fast with coordinated file prefix: %s ' % coord_file,
'--debug fast --cf=%s --f 0.3 0.1 0.03 0.01 --ldr=100 --ldf=%s --out=%s' % (coord_file, ld_file, weights_file),
'Problems when running LDpred_fast!')
run_test(
'Running LDpred with coordinated file prefix: %s ' % coord_file,
'gibbs --N 5500 --use-gw-h2 --n-burn-in 5 --n-iter 50 --cf=%s --ldr=100 --ldf=%s --f 1 0.3 0.1 --out=%s' % (coord_file, ld_file, weights_file),
'Problems when running LDpred!')
run_test(
'Running P+T with coordinated file prefix: %s ' % coord_file,
'p+t --cf=%s --ldr=100 --p 1 0.3 0.1 --out=%s' % (coord_file, weights_file),
'Problems when running P+T!')
prs_file_prefix = file_prefix+'.prs'
golden_prs_prefix = '%s/test_data/goldens/golden_%s_prs_%i_%i'%(TEST_DIR,label,sim_i,t_i)
golden_summary_file = '%s.summary.txt'%golden_prs_prefix
summary_file = file_prefix+'.summary.txt'
run_test(
'Validating results with output file prefix: %s' % prs_file_prefix,
'score --gf=%s_test --rf=%s --out=%s --summary-file=%s' % (df_prefix, weights_file, prs_file_prefix, summary_file),
'Problems with the validation step!',
summary_file,golden_summary_file)
def test_mix2(self):
t_i = 0
label='mix2'
for sim_i in range(1,6):
td = '%s/test_data/sim%d'%(TEST_DIR, sim_i)
file_prefix = '%s_%s_sim%d_%d'%(self.tmp_file_prefix,label,sim_i,t_i)
df_prefix = '%s_%d'%(td,t_i)
coord_file = file_prefix+'.hdf5'
run_test(
'Validating results with output file prefix: %s' % self.tmp_file_prefix,
'coord --gf=%s_test --vbim=%s_test.bim --z-from-se --ssf=%s_ss.txt --ssf-format=LDPRED --out=%s' % (df_prefix,df_prefix,df_prefix,coord_file),
'Problems when coordinating data!')
ld_file = file_prefix+'.ld'
weights_file = file_prefix+'.weights'
run_test(
'Running LDpred-fast with coordinated file prefix: %s ' % coord_file,
'--debug fast --cf=%s --f 0.3 0.1 0.03 0.01 0.001 --ldr=150 --ldf=%s --out=%s' % (coord_file, ld_file, weights_file),
'Problems when running LDpred_fast!')
run_test(
'Running LDpred with coordinated file prefix: %s ' % coord_file,
'gibbs --n-burn-in 5 --n-iter 50 --cf=%s --ldr=150 --ldf=%s --f 1 0.1 0.01 0.001 --out=%s' % (coord_file, ld_file, weights_file),
'Problems when running LDpred!')
run_test(
'Running P+T with coordinated file prefix: %s ' % coord_file,
'p+t --cf=%s --ldr=150 --r2 0.5 0.2 0.1 --p 1 0.3 0.1 0.03 0.01 0.003 0.001 0.0003 0.0001 0.00001 --out=%s' % (coord_file, weights_file),
'Problems when running P+T!')
prs_file_prefix = file_prefix+'.prs'
golden_prs_prefix = '%s/test_data/goldens/golden_%s_prs_%i_%i'%(TEST_DIR,label,sim_i,t_i)
golden_summary_file = '%s.summary.txt'%golden_prs_prefix
summary_file = file_prefix+'.summary.txt'
run_test(
'Validating results with output file prefix: %s' % prs_file_prefix,
'score --gf=%s_test --r2 0.5 0.2 0.1 --p 1 0.3 0.1 0.03 0.01 0.003 0.001 0.0003 0.0001 0.00001 --rf=%s --out=%s --summary-file=%s' % (df_prefix, weights_file,
prs_file_prefix, summary_file),
'Problems with the validation step!',
summary_file,golden_summary_file)
def update_golden_files_mix1():
label = 'mix1'
tf = tempfile.NamedTemporaryFile()
tmp_file_prefix = next(tempfile._get_candidate_names())
for sim_i in range(1,6):
print('Updating golden results')
coord_file = '%s_%i_coord.hdf5'%(tmp_file_prefix,sim_i)
cmd_str = 'python -m ldpred --debug coord --gf %s/test_data/sim%i_0_test --vbim %s/test_data/sim%i_0_test.bim --ssf %s/test_data/sim%i_0_ss.txt --ssf-format LDPRED --out=%s' % (TEST_DIR,sim_i,TEST_DIR,sim_i,TEST_DIR,sim_i,coord_file)
print(cmd_str + '\n')
assert os.system(cmd_str) == 0, 'Problems when updating golden files'
weights_prefix = '%s_%i_weights'%(tmp_file_prefix,sim_i)
ld_prefix = '%s_%i'%(tmp_file_prefix,sim_i)
cmd_str = 'python -m ldpred fast --cf %s --ldr 100 --f 0.3 0.1 0.03 0.01 --ldf %s --out %s' % (coord_file,ld_prefix,weights_prefix)
print(cmd_str + '\n')
assert os.system(cmd_str) == 0, 'Problems when updating golden files'
cmd_str = 'python -m ldpred gibbs --N 5500 --use-gw-h2 --n-burn-in 5 --n-iter 50 --cf %s --ldr 100 --ldf %s --f 1 0.3 0.1 --out %s' % (coord_file,ld_prefix,weights_prefix)
print(cmd_str + '\n')
assert os.system(cmd_str) == 0, 'Problems when updating golden files'
cmd_str = 'python -m ldpred p+t --cf %s --ldr 100 --p 1 0.3 0.1 --out %s' % (coord_file,weights_prefix)
print(cmd_str + '\n')
assert os.system(cmd_str) == 0, 'Problems when updating golden files'
prs_prefix = '%s_prs_%i_0'%(tmp_file_prefix,sim_i)
golden_summary_file = '%s/test_data/goldens/golden_%s_prs_%i_0.summary.txt'%(TEST_DIR, label,sim_i)
cmd_str = 'python -m ldpred --debug score --gf %s/test_data/sim%i_0_test --rf %s --out %s --summary-file %s' % (TEST_DIR, sim_i,weights_prefix,prs_prefix, golden_summary_file)
print(cmd_str + '\n')
assert os.system(cmd_str) == 0, 'Problems when updating golden files'
print('Cleaning up files.')
cmd_str = 'rm %s*' % tmp_file_prefix
print(cmd_str + '\n')
assert os.system(cmd_str) == 0, 'Problems cleaning up test files! Testing stopped'
def update_golden_files_mix2():
label = 'mix2'
tf = tempfile.NamedTemporaryFile()
tmp_file_prefix = next(tempfile._get_candidate_names())
for sim_i in range(1,6):
print('Updating golden results')
coord_file = '%s_%i_coord.hdf5'%(tmp_file_prefix,sim_i)
cmd_str = 'python -m ldpred coord --gf %s/test_data/sim%i_0_test --vbim %s/test_data/sim%i_0_test.bim --z-from-se --ssf %s/test_data/sim%i_0_ss.txt --ssf-format LDPRED --out=%s' % (TEST_DIR,sim_i,TEST_DIR,sim_i,TEST_DIR,sim_i,coord_file)
print(cmd_str + '\n')
assert os.system(cmd_str) == 0, 'Problems when updating golden files'
weights_prefix = '%s_%i_weights'%(tmp_file_prefix,sim_i)
ld_prefix = '%s_%i'%(tmp_file_prefix,sim_i)
cmd_str = 'python -m ldpred fast --cf %s --ldr 150 --f 0.3 0.1 0.03 0.01 0.001 --ldf %s --out %s' % (coord_file,ld_prefix,weights_prefix)
print(cmd_str + '\n')
assert os.system(cmd_str) == 0, 'Problems when updating golden files'
cmd_str = 'python -m ldpred gibbs --n-burn-in 5 --n-iter 50 --cf %s --ldr 150 --ldf %s --f 1 0.1 0.01 0.001 --out %s' % (coord_file,ld_prefix,weights_prefix)
print(cmd_str + '\n')
assert os.system(cmd_str) == 0, 'Problems when updating golden files'
cmd_str = 'python -m ldpred p+t --cf %s --ldr 150 --r2 0.5 0.2 0.1 --p 1 0.3 0.1 0.03 0.01 0.003 0.001 0.0003 0.0001 0.00001 --out %s' % (coord_file,weights_prefix)
print(cmd_str + '\n')
assert os.system(cmd_str) == 0, 'Problems when updating golden files'
prs_prefix = '%s_prs_%i_0'%(tmp_file_prefix,sim_i)
golden_summary_file = '%s/test_data/goldens/golden_%s_prs_%i_0.summary.txt'%(TEST_DIR, label,sim_i)
cmd_str = 'python -m ldpred score --gf %s/test_data/sim%i_0_test --r2 0.5 0.2 0.1 --p 1 0.3 0.1 0.03 0.01 0.003 0.001 0.0003 0.0001 0.00001 --rf %s --out %s --summary-file %s' % (TEST_DIR,sim_i,weights_prefix,prs_prefix, golden_summary_file)
print(cmd_str + '\n')
assert os.system(cmd_str) == 0, 'Problems when updating golden files'
print('Cleaning up files.')
cmd_str = 'rm %s*' % tmp_file_prefix
print(cmd_str + '\n')
assert os.system(cmd_str) == 0, 'Problems cleaning up test files! Testing stopped'
def run_integration_tests():
complex_suite = unittest.TestLoader().loadTestsFromTestCase(ComplexTests)
unittest.TextTestRunner().run(complex_suite)
def run_unit_tests():
simple_suite = unittest.TestLoader().loadTestsFromTestCase(SimpleTests)
unittest.TextTestRunner().run(simple_suite)
if __name__ == '__main__':
unittest.main()
|
|
#!/usr/bin/env python
"""
Copyright (c) 2021 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import itertools
import logging
import os
import random
import cocotb_test.simulator
import pytest
import cocotb
from cocotb.clock import Clock
from cocotb.triggers import RisingEdge
from cocotb.regression import TestFactory
from cocotbext.axi import AxiStreamBus, AxiStreamFrame, AxiStreamSource, AxiStreamSink
class TB(object):
def __init__(self, dut):
self.dut = dut
self.log = logging.getLogger("cocotb.tb")
self.log.setLevel(logging.DEBUG)
cocotb.start_soon(Clock(dut.clk, 10, units="ns").start())
self.source = AxiStreamSource(AxiStreamBus.from_prefix(dut, "s_axis"), dut.clk, dut.rst)
self.sink = AxiStreamSink(AxiStreamBus.from_prefix(dut, "m_axis"), dut.clk, dut.rst)
def set_idle_generator(self, generator=None):
if generator:
self.source.set_pause_generator(generator())
def set_backpressure_generator(self, generator=None):
if generator:
self.sink.set_pause_generator(generator())
async def reset(self):
self.dut.rst.setimmediatevalue(0)
await RisingEdge(self.dut.clk)
await RisingEdge(self.dut.clk)
self.dut.rst.value = 1
await RisingEdge(self.dut.clk)
await RisingEdge(self.dut.clk)
self.dut.rst.value = 0
await RisingEdge(self.dut.clk)
await RisingEdge(self.dut.clk)
async def run_test(dut, payload_lengths=None, payload_data=None, idle_inserter=None, backpressure_inserter=None):
tb = TB(dut)
id_count = 2**len(tb.source.bus.tid)
cur_id = 1
await tb.reset()
tb.set_idle_generator(idle_inserter)
tb.set_backpressure_generator(backpressure_inserter)
test_frames = []
for test_data in [payload_data(x) for x in payload_lengths()]:
test_frame = AxiStreamFrame(test_data)
test_frame.tid = cur_id
test_frame.tdest = cur_id
test_frames.append(test_frame)
await tb.source.send(test_frame)
cur_id = (cur_id + 1) % id_count
for test_frame in test_frames:
rx_frame = await tb.sink.recv()
assert rx_frame.tdata == test_frame.tdata
assert rx_frame.tid == test_frame.tid
assert rx_frame.tdest == test_frame.tdest
assert not rx_frame.tuser
assert tb.sink.empty()
await RisingEdge(dut.clk)
await RisingEdge(dut.clk)
async def run_test_tuser_assert(dut):
tb = TB(dut)
await tb.reset()
test_data = bytearray(itertools.islice(itertools.cycle(range(256)), 32))
test_frame = AxiStreamFrame(test_data, tuser=1)
await tb.source.send(test_frame)
if int(os.getenv("PARAM_DROP_BAD_FRAME")):
for k in range(64):
await RisingEdge(dut.clk)
else:
rx_frame = await tb.sink.recv()
assert rx_frame.tdata == test_data
assert rx_frame.tuser
assert tb.sink.empty()
await RisingEdge(dut.clk)
await RisingEdge(dut.clk)
async def run_test_init_sink_pause(dut):
tb = TB(dut)
await tb.reset()
tb.sink.pause = True
test_data = bytearray(itertools.islice(itertools.cycle(range(256)), 32))
test_frame = AxiStreamFrame(test_data)
await tb.source.send(test_frame)
for k in range(64):
await RisingEdge(dut.clk)
tb.sink.pause = False
rx_frame = await tb.sink.recv()
assert rx_frame.tdata == test_data
assert not rx_frame.tuser
assert tb.sink.empty()
await RisingEdge(dut.clk)
await RisingEdge(dut.clk)
async def run_test_init_sink_pause_reset(dut):
tb = TB(dut)
await tb.reset()
tb.sink.pause = True
test_data = bytearray(itertools.islice(itertools.cycle(range(256)), 32))
test_frame = AxiStreamFrame(test_data)
await tb.source.send(test_frame)
for k in range(64):
await RisingEdge(dut.clk)
await tb.reset()
tb.sink.pause = False
for k in range(64):
await RisingEdge(dut.clk)
assert tb.sink.empty()
await RisingEdge(dut.clk)
await RisingEdge(dut.clk)
async def run_test_overflow(dut):
tb = TB(dut)
await tb.reset()
tb.sink.pause = True
test_data = bytearray(itertools.islice(itertools.cycle(range(256)), 2048))
test_frame = AxiStreamFrame(test_data)
await tb.source.send(test_frame)
for k in range(2048):
await RisingEdge(dut.clk)
tb.sink.pause = False
if int(os.getenv("PARAM_DROP_OVERSIZE_FRAME")):
for k in range(2048):
await RisingEdge(dut.clk)
else:
rx_frame = await tb.sink.recv()
assert rx_frame.tdata == test_data
assert not rx_frame.tuser
assert tb.sink.empty()
await RisingEdge(dut.clk)
await RisingEdge(dut.clk)
async def run_stress_test(dut, idle_inserter=None, backpressure_inserter=None):
tb = TB(dut)
byte_lanes = tb.source.byte_lanes
id_count = 2**len(tb.source.bus.tid)
cur_id = 1
await tb.reset()
tb.set_idle_generator(idle_inserter)
tb.set_backpressure_generator(backpressure_inserter)
test_frames = []
for k in range(128):
length = random.randint(1, byte_lanes*16)
test_data = bytearray(itertools.islice(itertools.cycle(range(256)), length))
test_frame = AxiStreamFrame(test_data)
test_frame.tid = cur_id
test_frame.tdest = cur_id
test_frames.append(test_frame)
await tb.source.send(test_frame)
cur_id = (cur_id + 1) % id_count
for test_frame in test_frames:
rx_frame = await tb.sink.recv()
assert rx_frame.tdata == test_frame.tdata
assert rx_frame.tid == test_frame.tid
assert rx_frame.tdest == test_frame.tdest
assert not rx_frame.tuser
assert tb.sink.empty()
await RisingEdge(dut.clk)
await RisingEdge(dut.clk)
def cycle_pause():
return itertools.cycle([1, 1, 1, 0])
def size_list():
data_width = len(cocotb.top.m_axis_tdata)
byte_width = data_width // 8
return list(range(1, byte_width*4+1))+[512]+[1]*64
def incrementing_payload(length):
return bytearray(itertools.islice(itertools.cycle(range(256)), length))
if cocotb.SIM_NAME:
factory = TestFactory(run_test)
factory.add_option("payload_lengths", [size_list])
factory.add_option("payload_data", [incrementing_payload])
factory.add_option("idle_inserter", [None, cycle_pause])
factory.add_option("backpressure_inserter", [None, cycle_pause])
factory.generate_tests()
for test in [
run_test_tuser_assert,
run_test_init_sink_pause,
run_test_init_sink_pause_reset,
run_test_overflow
]:
factory = TestFactory(test)
factory.generate_tests()
factory = TestFactory(run_stress_test)
factory.add_option("idle_inserter", [None, cycle_pause])
factory.add_option("backpressure_inserter", [None, cycle_pause])
factory.generate_tests()
# cocotb-test
tests_dir = os.path.dirname(__file__)
rtl_dir = os.path.abspath(os.path.join(tests_dir, '..', '..', 'rtl'))
@pytest.mark.parametrize(("frame_fifo", "drop_oversize_frame", "drop_bad_frame", "drop_when_full"),
[(0, 0, 0, 0), (1, 0, 0, 0), (1, 1, 0, 0), (1, 1, 1, 0)])
@pytest.mark.parametrize("data_width", [8, 16, 32, 64])
def test_axis_fifo(request, data_width, frame_fifo, drop_oversize_frame, drop_bad_frame, drop_when_full):
dut = "axis_fifo"
module = os.path.splitext(os.path.basename(__file__))[0]
toplevel = dut
verilog_sources = [
os.path.join(rtl_dir, f"{dut}.v"),
]
parameters = {}
parameters['DEPTH'] = 1024
parameters['DATA_WIDTH'] = data_width
parameters['KEEP_ENABLE'] = int(parameters['DATA_WIDTH'] > 8)
parameters['KEEP_WIDTH'] = parameters['DATA_WIDTH'] // 8
parameters['LAST_ENABLE'] = 1
parameters['ID_ENABLE'] = 1
parameters['ID_WIDTH'] = 8
parameters['DEST_ENABLE'] = 1
parameters['DEST_WIDTH'] = 8
parameters['USER_ENABLE'] = 1
parameters['USER_WIDTH'] = 1
parameters['PIPELINE_OUTPUT'] = 2
parameters['FRAME_FIFO'] = frame_fifo
parameters['USER_BAD_FRAME_VALUE'] = 1
parameters['USER_BAD_FRAME_MASK'] = 1
parameters['DROP_OVERSIZE_FRAME'] = drop_oversize_frame
parameters['DROP_BAD_FRAME'] = drop_bad_frame
parameters['DROP_WHEN_FULL'] = drop_when_full
extra_env = {f'PARAM_{k}': str(v) for k, v in parameters.items()}
sim_build = os.path.join(tests_dir, "sim_build",
request.node.name.replace('[', '-').replace(']', ''))
cocotb_test.simulator.run(
python_search=[tests_dir],
verilog_sources=verilog_sources,
toplevel=toplevel,
module=module,
parameters=parameters,
sim_build=sim_build,
extra_env=extra_env,
)
|
|
# Copyright 2013 NEC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
from tempest.common import tempest_fixtures as fixtures
from tempest.common.utils import data_utils
from tempest import exceptions
from tempest import test
class AggregatesAdminTestJSON(base.BaseV2ComputeAdminTest):
"""
Tests Aggregates API that require admin privileges
"""
_host_key = 'OS-EXT-SRV-ATTR:host'
@classmethod
def resource_setup(cls):
super(AggregatesAdminTestJSON, cls).resource_setup()
cls.client = cls.os_adm.aggregates_client
cls.aggregate_name_prefix = 'test_aggregate_'
cls.az_name_prefix = 'test_az_'
resp, hosts_all = cls.os_adm.hosts_client.list_hosts()
hosts = map(lambda x: x['host_name'],
filter(lambda y: y['service'] == 'compute', hosts_all))
cls.host = hosts[0]
def _try_delete_aggregate(self, aggregate_id):
# delete aggregate, if it exists
try:
self.client.delete_aggregate(aggregate_id)
# if aggregate not found, it depict it was deleted in the test
except exceptions.NotFound:
pass
@test.attr(type='gate')
def test_aggregate_create_delete(self):
# Create and delete an aggregate.
aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
resp, aggregate = self.client.create_aggregate(name=aggregate_name)
self.addCleanup(self._try_delete_aggregate, aggregate['id'])
self.assertEqual(200, resp.status)
self.assertEqual(aggregate_name, aggregate['name'])
self.assertIsNone(aggregate['availability_zone'])
resp, _ = self.client.delete_aggregate(aggregate['id'])
self.assertEqual(200, resp.status)
self.client.wait_for_resource_deletion(aggregate['id'])
@test.attr(type='gate')
def test_aggregate_create_delete_with_az(self):
# Create and delete an aggregate.
aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
az_name = data_utils.rand_name(self.az_name_prefix)
resp, aggregate = self.client.create_aggregate(
name=aggregate_name, availability_zone=az_name)
self.addCleanup(self._try_delete_aggregate, aggregate['id'])
self.assertEqual(200, resp.status)
self.assertEqual(aggregate_name, aggregate['name'])
self.assertEqual(az_name, aggregate['availability_zone'])
resp, _ = self.client.delete_aggregate(aggregate['id'])
self.assertEqual(200, resp.status)
self.client.wait_for_resource_deletion(aggregate['id'])
@test.attr(type='gate')
def test_aggregate_create_verify_entry_in_list(self):
# Create an aggregate and ensure it is listed.
aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
resp, aggregate = self.client.create_aggregate(name=aggregate_name)
self.addCleanup(self.client.delete_aggregate, aggregate['id'])
resp, aggregates = self.client.list_aggregates()
self.assertEqual(200, resp.status)
self.assertIn((aggregate['id'], aggregate['availability_zone']),
map(lambda x: (x['id'], x['availability_zone']),
aggregates))
@test.attr(type='gate')
def test_aggregate_create_update_metadata_get_details(self):
# Create an aggregate and ensure its details are returned.
aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
resp, aggregate = self.client.create_aggregate(name=aggregate_name)
self.addCleanup(self.client.delete_aggregate, aggregate['id'])
resp, body = self.client.get_aggregate(aggregate['id'])
self.assertEqual(200, resp.status)
self.assertEqual(aggregate['name'], body['name'])
self.assertEqual(aggregate['availability_zone'],
body['availability_zone'])
self.assertEqual({}, body["metadata"])
# set the metadata of the aggregate
meta = {"key": "value"}
resp, body = self.client.set_metadata(aggregate['id'], meta)
self.assertEqual(200, resp.status)
self.assertEqual(meta, body["metadata"])
# verify the metadata has been set
resp, body = self.client.get_aggregate(aggregate['id'])
self.assertEqual(200, resp.status)
self.assertEqual(meta, body["metadata"])
@test.attr(type='gate')
def test_aggregate_create_update_with_az(self):
# Update an aggregate and ensure properties are updated correctly
aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
az_name = data_utils.rand_name(self.az_name_prefix)
resp, aggregate = self.client.create_aggregate(
name=aggregate_name, availability_zone=az_name)
self.addCleanup(self.client.delete_aggregate, aggregate['id'])
self.assertEqual(200, resp.status)
self.assertEqual(aggregate_name, aggregate['name'])
self.assertEqual(az_name, aggregate['availability_zone'])
self.assertIsNotNone(aggregate['id'])
aggregate_id = aggregate['id']
new_aggregate_name = aggregate_name + '_new'
new_az_name = az_name + '_new'
resp, resp_aggregate = self.client.update_aggregate(aggregate_id,
new_aggregate_name,
new_az_name)
self.assertEqual(200, resp.status)
self.assertEqual(new_aggregate_name, resp_aggregate['name'])
self.assertEqual(new_az_name, resp_aggregate['availability_zone'])
resp, aggregates = self.client.list_aggregates()
self.assertEqual(200, resp.status)
self.assertIn((aggregate_id, new_aggregate_name, new_az_name),
map(lambda x:
(x['id'], x['name'], x['availability_zone']),
aggregates))
@test.attr(type='gate')
def test_aggregate_add_remove_host(self):
# Add an host to the given aggregate and remove.
self.useFixture(fixtures.LockFixture('availability_zone'))
aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
resp, aggregate = self.client.create_aggregate(name=aggregate_name)
self.addCleanup(self.client.delete_aggregate, aggregate['id'])
resp, body = self.client.add_host(aggregate['id'], self.host)
self.assertEqual(200, resp.status)
self.assertEqual(aggregate_name, body['name'])
self.assertEqual(aggregate['availability_zone'],
body['availability_zone'])
self.assertIn(self.host, body['hosts'])
resp, body = self.client.remove_host(aggregate['id'], self.host)
self.assertEqual(200, resp.status)
self.assertEqual(aggregate_name, body['name'])
self.assertEqual(aggregate['availability_zone'],
body['availability_zone'])
self.assertNotIn(self.host, body['hosts'])
@test.attr(type='gate')
def test_aggregate_add_host_list(self):
# Add an host to the given aggregate and list.
self.useFixture(fixtures.LockFixture('availability_zone'))
aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
resp, aggregate = self.client.create_aggregate(name=aggregate_name)
self.addCleanup(self.client.delete_aggregate, aggregate['id'])
self.client.add_host(aggregate['id'], self.host)
self.addCleanup(self.client.remove_host, aggregate['id'], self.host)
resp, aggregates = self.client.list_aggregates()
aggs = filter(lambda x: x['id'] == aggregate['id'], aggregates)
self.assertEqual(1, len(aggs))
agg = aggs[0]
self.assertEqual(aggregate_name, agg['name'])
self.assertIsNone(agg['availability_zone'])
self.assertIn(self.host, agg['hosts'])
@test.attr(type='gate')
def test_aggregate_add_host_get_details(self):
# Add an host to the given aggregate and get details.
self.useFixture(fixtures.LockFixture('availability_zone'))
aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
resp, aggregate = self.client.create_aggregate(name=aggregate_name)
self.addCleanup(self.client.delete_aggregate, aggregate['id'])
self.client.add_host(aggregate['id'], self.host)
self.addCleanup(self.client.remove_host, aggregate['id'], self.host)
resp, body = self.client.get_aggregate(aggregate['id'])
self.assertEqual(aggregate_name, body['name'])
self.assertIsNone(body['availability_zone'])
self.assertIn(self.host, body['hosts'])
@test.attr(type='gate')
def test_aggregate_add_host_create_server_with_az(self):
# Add an host to the given aggregate and create a server.
self.useFixture(fixtures.LockFixture('availability_zone'))
aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
az_name = data_utils.rand_name(self.az_name_prefix)
resp, aggregate = self.client.create_aggregate(
name=aggregate_name, availability_zone=az_name)
self.addCleanup(self.client.delete_aggregate, aggregate['id'])
self.client.add_host(aggregate['id'], self.host)
self.addCleanup(self.client.remove_host, aggregate['id'], self.host)
server_name = data_utils.rand_name('test_server_')
admin_servers_client = self.os_adm.servers_client
resp, server = self.create_test_server(name=server_name,
availability_zone=az_name,
wait_until='ACTIVE')
resp, body = admin_servers_client.get_server(server['id'])
self.assertEqual(self.host, body[self._host_key])
|
|
import json
from os import urandom
import urllib
import urlparse
import flask
import requests
from requests_oauthlib import OAuth1 as OAuth1Manager
from oauthlib.oauth1.rfc5849 import SIGNATURE_HMAC, SIGNATURE_TYPE_AUTH_HEADER
from oauthlib.oauth2.draft25 import tokens
from werkzeug.urls import url_decode
import config
from foauth import OAuthError
BEARER = 'BEARER'
BEARER_HEADER = 'HEADER'
BEARER_BODY = 'BODY'
BEARER_URI = 'URI'
BEARER_TYPES = (BEARER_HEADER, BEARER_BODY, BEARER_URI)
class Bearer(object):
def __init__(self, token, bearer_type=BEARER_HEADER):
self.token = token
if bearer_type in BEARER_TYPES or callable(bearer_type):
self.bearer_type = bearer_type
else:
raise ValueError('Unknown bearer type %s' % bearer_type)
def __call__(self, r):
if self.bearer_type == BEARER_HEADER:
r.headers = tokens.prepare_bearer_headers(self.token, r.headers)
elif self.bearer_type == BEARER_BODY:
r.data = tokens.prepare_bearer_body(self.token, r.data)
elif self.bearer_type == BEARER_URI:
r.url = tokens.prepare_bearer_uri(self.token, r.url)
elif callable(self.bearer_type):
r = self.bearer_type(self.token, r)
return r
class OAuthMeta(type):
def __init__(cls, name, bases, attrs):
if 'alias' not in attrs:
cls.alias = cls.__name__.lower()
if 'api_domain' in attrs and 'api_domains' not in attrs:
cls.api_domains = [cls.api_domain]
if 'provider_url' in attrs and 'favicon_url' not in attrs:
# Use a favicon service when no favicon is supplied
primary = 'https://getfavicon.appspot.com/%s' % cls.provider_url
domain = urlparse.urlparse(cls.provider_url).netloc
backup = 'https://www.google.com/s2/favicons?domain=%s' % domain
cls.favicon_url = '%s?defaulticon=%s' % (primary, urllib.quote(backup))
if 'name' not in attrs:
cls.name = cls.__name__
class OAuth(object):
__metaclass__ = OAuthMeta
https = True
verify = True
signature_method = SIGNATURE_HMAC
signature_type = SIGNATURE_TYPE_AUTH_HEADER
permissions_widget = 'checkbox'
description = ''
disclaimer = ''
def __init__(self, client_id, client_secret):
self.client_id = client_id
self.client_secret = client_secret
self.session = config.app.requests_session
def get_request_token_url(self):
return self.request_token_url
def get_access_token_url(self):
return self.access_token_url
def get_redirect_uri(self, url_name):
root = flask.request.url_root
path = flask.url_for(url_name, alias=self.alias)
return urlparse.urljoin(root, path).decode('utf8')
def get_scope_string(self, scopes):
return ''
def authorize(self, scopes):
redirect_uri = self.get_redirect_uri('callback')
params = self.get_authorize_params(redirect_uri=redirect_uri,
scopes=scopes)
req = requests.Request(url=self.authorize_url, params=params)
return flask.redirect(req.prepare().url)
def login(self):
redirect_uri = self.get_redirect_uri('login_callback')
params = self.get_authorize_params(redirect_uri=redirect_uri,
scopes=[])
req = requests.Request(url=self.authorize_url, params=params)
return flask.redirect(req.prepare().url)
# The remainder of the API must be implemented for each flavor of OAuth
def callback(self, data, url_name):
"""
Receives the full callback from the service and returns a 2-tuple
containing the user token and user secret (if applicable).
"""
raise NotImplementedError("callback() must be defined in a subclass")
def api(self, key, domain, path, method='GET', params=None, data=None):
"""
Passes along an API request to the service and returns the response.
"""
raise NotImplementedError("api() must be defined in a subclass")
class OAuth1(OAuth):
returns_token = True
def parse_token(self, content):
content = url_decode(content)
return {
'access_token': content['oauth_token'],
'secret': content['oauth_token_secret'],
}
def get_request_token_params(self, redirect_uri, scopes):
return {}
def get_request_token_response(self, redirect_uri, scopes):
auth = OAuth1Manager(client_key=self.client_id,
client_secret=self.client_secret,
callback_uri=redirect_uri,
signature_method=self.signature_method,
signature_type=self.signature_type)
return self.session.post(self.get_request_token_url(), auth=auth,
params=self.get_request_token_params(redirect_uri, scopes),
verify=self.verify)
def get_authorize_params(self, redirect_uri, scopes):
resp = self.get_request_token_response(redirect_uri, scopes)
try:
data = self.parse_token(resp.content)
except Exception:
raise OAuthError('Unable to parse access token')
flask.session['%s_temp_secret' % self.alias] = data['secret']
if not self.returns_token:
redirect_uri += ('?oauth_token=%s' % data['access_token'])
return {
'oauth_token': data['access_token'],
'oauth_callback': redirect_uri,
}
def get_access_token_response(self, token, secret, verifier=None):
auth = OAuth1Manager(client_key=self.client_id,
client_secret=self.client_secret,
resource_owner_key=token,
resource_owner_secret=secret,
verifier=verifier,
signature_method=self.signature_method,
signature_type=self.signature_type)
return self.session.post(self.get_access_token_url(),auth=auth,
verify=self.verify)
def callback(self, data, url_name):
token = data['oauth_token']
verifier = data.get('oauth_verifier', None)
secret = flask.session['%s_temp_secret' % self.alias]
del flask.session['%s_temp_secret' % self.alias]
resp = self.get_access_token_response(token, secret, verifier)
try:
return self.parse_token(resp.content)
except Exception:
raise OAuthError('Unable to parse access token')
def api(self, key, domain, path, method='GET', params=None, data=None,
headers=None):
protocol = self.https and 'https' or 'http'
url = '%s://%s%s' % (protocol, domain, path)
auth = OAuth1Manager(client_key=self.client_id,
client_secret=self.client_secret,
resource_owner_key=key.access_token,
resource_owner_secret=key.secret,
signature_method=self.signature_method,
signature_type=self.signature_type)
return self.session.request(method, url, auth=auth, params=params or {},
data=data or {}, headers=headers or {},
verify=self.verify, stream=True)
class OAuth2(OAuth):
token_type = BEARER
bearer_type = BEARER_HEADER
supports_state = True
auth = None
def parse_token(self, content):
return json.loads(content)
def get_scope_string(self, scopes):
return ' '.join(scopes)
def get_authorize_params(self, redirect_uri, scopes):
state = ''.join('%02x' % ord(x) for x in urandom(16))
flask.session['%s_state' % self.alias] = state
if not self.supports_state:
redirect_uri += ('?state=%s' % state)
params = {
'client_id': self.client_id,
'response_type': 'code',
'redirect_uri': redirect_uri,
'state': state,
}
if any(scopes):
params['scope'] = self.get_scope_string(scopes)
return params
def get_access_token_response(self, redirect_uri, data):
return self.session.post(self.get_access_token_url(), {
'client_id': self.client_id,
'client_secret': self.client_secret,
'grant_type': 'authorization_code',
'code': data['code'],
'redirect_uri': redirect_uri,
}, verify=self.verify, auth=self.auth)
def callback(self, data, url_name):
state = flask.session['%s_state' % self.alias]
if 'state' in data and state != data['state']:
flask.abort(403)
del flask.session['%s_state' % self.alias]
redirect_uri = self.get_redirect_uri(url_name)
if not self.supports_state:
redirect_uri += ('?state=%s' % state)
resp = self.get_access_token_response(redirect_uri, data)
return self.parse_token(resp.content)
def refresh_token(self, token):
resp = self.sessions.post(self.get_access_token_url(), {
'client_id': self.client_id,
'client_secret': self.client_secret,
'grant_type': 'refresh_token',
'refresh_token': token,
}, verify=self.verify, auth=self.auth)
return self.parse_token(resp.content)
def api(self, key, domain, path, method='GET', params=None, data=None,
headers=None):
protocol = self.https and 'https' or 'http'
url = '%s://%s%s' % (protocol, domain, path)
if self.token_type == BEARER:
auth = Bearer(key.access_token, bearer_type=self.bearer_type)
return self.session.request(method, url, auth=auth, params=params or {},
data=data or {}, headers=headers or {},
verify=self.verify, stream=True)
|
|
import ctypes
import random
import importlib
import numpy as np
# Set up ctypes function bindings
libpath = importlib.find_loader("libfht").path
libfht = ctypes.CDLL(libpath)
_fht = libfht.fht
_shuffle_bigger_lfsr = libfht.shuffle_bigger_lfsr
_shuffle_smaller_lfsr = libfht.shuffle_smaller_lfsr
_shuffle_bigger_o = libfht.shuffle_bigger_o
_shuffle_smaller_o = libfht.shuffle_smaller_o
u32 = ctypes.c_uint32
u64 = ctypes.c_uint64
ptype = np.ctypeslib.ndpointer(dtype=np.double, ndim=1)
u8array = np.ctypeslib.ndpointer(dtype=np.uint8, ndim=1)
u32array = np.ctypeslib.ndpointer(dtype=np.uint32, ndim=1)
_fht.argtypes = [u32, ptype]
_shuffle_bigger_lfsr.argtypes = [u32, ptype, u32, ptype, u32]
_shuffle_smaller_lfsr.argtypes = [u32, ptype, u32, ptype, u32]
_shuffle_bigger_o.argtypes = [u32, ptype, u32, ptype, u32array]
_shuffle_smaller_o.argtypes = [u32, ptype, u32, ptype, u32array]
def FHTlfsr(N, n):
"""
Legacy LFSR based subsampled FHT
"""
assert (N & (N-1)) == 0
def Az(z, seed=1):
"""Computes A'.z, returns an Nx1 vector."""
zc = np.zeros(N)
_shuffle_bigger_lfsr(n, z.reshape(n), N, zc, seed)
_fht(N, zc)
return zc
def Ab(beta, seed=1):
"""Computes A.b, returns an nx1 vector."""
bc = beta.copy().reshape(N)
_fht(N, bc)
out = np.empty(n)
_shuffle_smaller_lfsr(N, bc, n, out, seed)
return out
return Az, Ab
def FHTo(N, n):
"""
Legacy ordering-specified subsampled FHT
"""
assert (N & (N-1)) == 0
def Az(z, seed=1):
rng = random.Random(seed)
order = np.array(rng.sample(range(1, N), n), dtype=np.uint32)
zc = np.zeros(N)
_shuffle_bigger_o(n, z.reshape(n), N, zc, order)
_fht(N, zc)
return zc
def Ab(beta, seed=1):
rng = random.Random(seed)
order = np.array(rng.sample(range(1, N), n), dtype=np.uint32)
bc = beta.copy().reshape(N)
_fht(N, bc)
out = np.empty(n)
_shuffle_smaller_o(N, bc, n, out, order)
return out
return Az, Ab
FHT = FHTo
def fht(x):
"""
Compute the Walsh-Hadamard transform of x,
which must be a 1d array whose length is a power of 2.
"""
assert len(x.shape) == 1, "x must be 1-dimensional"
assert x.size != 0, "x must not be empty"
assert x.size & (x.size - 1) == 0, "len(x) must be a power of 2"
out = x.copy()
_fht(out.size, out)
return out
def fht_inplace(x):
"""
Compute the Walsh-Hadamard transform of x, in place.
x must be a 1d array with length a power of 2.
"""
_fht(x.size, x)
def sub_fht(n, m, seed=0, ordering=None):
"""
Returns functions to compute the sub-sampled Walsh-Hadamard transform,
i.e., operating with a wide rectangular matrix of random +/-1 entries.
n: number of rows
m: number of columns
It is most efficient (but not required) for max(m+1,n+1) to be a power of
two.
seed: determines choice of random matrix
ordering: optional n-long array of row indices in [1, max(m+1,n+1)] to
implement subsampling; generated by seed if not specified,
but may be given to speed up subsequent runs on the same matrix.
Returns (Ax, Ay, ordering):
Ax(x): computes A.x (of length n), with x having length m
Ay(y): computes A'.y (of length m), with y having length n
ordering: the ordering in use, which may have been generated from seed
"""
assert n > 0, "n must be positive"
assert m > 0, "m must be positive"
w = 2**int(np.ceil(np.log2(max(m+1, n+1))))
if ordering is not None:
assert ordering.shape == (n,)
else:
rng = np.random.RandomState(seed)
idxs = np.arange(1, w, dtype=np.uint32)
rng.shuffle(idxs)
ordering = idxs[:n]
def Ax(x):
assert x.size == m, "x must be m long"
y = np.zeros(w)
y[w-m:] = x.reshape(m)
fht_inplace(y)
return y[ordering]
def Ay(y):
assert y.size == n, "input must be n long"
x = np.zeros(w)
x[ordering] = y
fht_inplace(x)
return x[w-m:]
return Ax, Ay, ordering
def block_sub_fht(n, m, l, seed=0, ordering=None):
"""
As `sub_fht`, but computes in `l` blocks of size `n` by `m`, potentially
offering substantial speed improvements.
n: number of rows
m: number of columns per block
l: number of blocks
It is most efficient (though not required) when max(m+1,n+1) is a power of
two.
seed: determines choice of random matrix
ordering: optional (l, n) shaped array of row indices in [1, max(m+1, n+1)]
to implement subsampling; generated by seed if not specified, but
may be given to speed up subsequent runs on the same matrix.
Returns (Ax, Ay, ordering):
Ax(x): computes A.x (of length n), with x having length l*m
Ay(y): computes A'.y (of length l*m), with y having length n
ordering: the ordering in use, which may have been generated from seed
"""
assert n > 0, "n must be positive"
assert m > 0, "m must be positive"
assert l > 0, "l must be positive"
if ordering is not None:
assert ordering.shape == (l, n)
else:
w = 2**int(np.ceil(np.log2(max(m+1, n+1))))
rng = np.random.RandomState(seed)
ordering = np.empty((l, n), dtype=np.uint32)
idxs = np.arange(1, w, dtype=np.uint32)
for ll in range(l):
rng.shuffle(idxs)
ordering[ll] = idxs[:n]
def Ax(x):
assert x.size == l*m
out = np.zeros(n)
for ll in range(l):
ax, ay, _ = sub_fht(n, m, ordering=ordering[ll])
out += ax(x[ll*m:(ll+1)*m])
return out
def Ay(y):
assert y.size == n
out = np.empty(l*m)
for ll in range(l):
ax, ay, _ = sub_fht(n, m, ordering=ordering[ll])
out[ll*m:(ll+1)*m] = ay(y)
return out
return Ax, Ay, ordering
|
|
# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Routines common to all posix systems."""
import glob
import os
import signal
import sys
import time
from ._common import TimeoutExpired
from ._common import memoize
from ._common import sdiskusage
from ._common import usage_percent
from ._compat import PY3
from ._compat import ChildProcessError
from ._compat import FileNotFoundError
from ._compat import InterruptedError
from ._compat import PermissionError
from ._compat import ProcessLookupError
from ._compat import unicode
if sys.version_info >= (3, 4):
import enum
else:
enum = None
__all__ = ['pid_exists', 'wait_pid', 'disk_usage', 'get_terminal_map']
def pid_exists(pid):
"""Check whether pid exists in the current process table."""
if pid == 0:
# According to "man 2 kill" PID 0 has a special meaning:
# it refers to <<every process in the process group of the
# calling process>> so we don't want to go any further.
# If we get here it means this UNIX platform *does* have
# a process with id 0.
return True
try:
os.kill(pid, 0)
except ProcessLookupError:
return False
except PermissionError:
# EPERM clearly means there's a process to deny access to
return True
# According to "man 2 kill" possible error values are
# (EINVAL, EPERM, ESRCH)
else:
return True
# Python 3.5 signals enum (contributed by me ^^):
# https://bugs.python.org/issue21076
if enum is not None and hasattr(signal, "Signals"):
Negsignal = enum.IntEnum(
'Negsignal', dict([(x.name, -x.value) for x in signal.Signals]))
def negsig_to_enum(num):
"""Convert a negative signal value to an enum."""
try:
return Negsignal(num)
except ValueError:
return num
else: # pragma: no cover
def negsig_to_enum(num):
return num
def wait_pid(pid, timeout=None, proc_name=None,
_waitpid=os.waitpid,
_timer=getattr(time, 'monotonic', time.time),
_min=min,
_sleep=time.sleep,
_pid_exists=pid_exists):
"""Wait for a process PID to terminate.
If the process terminated normally by calling exit(3) or _exit(2),
or by returning from main(), the return value is the positive integer
passed to *exit().
If it was terminated by a signal it returns the negated value of the
signal which caused the termination (e.g. -SIGTERM).
If PID is not a children of os.getpid() (current process) just
wait until the process disappears and return None.
If PID does not exist at all return None immediately.
If *timeout* != None and process is still alive raise TimeoutExpired.
timeout=0 is also possible (either return immediately or raise).
"""
if pid <= 0:
raise ValueError("can't wait for PID 0") # see "man waitpid"
interval = 0.0001
flags = 0
if timeout is not None:
flags |= os.WNOHANG
stop_at = _timer() + timeout
def sleep(interval):
# Sleep for some time and return a new increased interval.
if timeout is not None:
if _timer() >= stop_at:
raise TimeoutExpired(timeout, pid=pid, name=proc_name)
_sleep(interval)
return _min(interval * 2, 0.04)
# See: https://linux.die.net/man/2/waitpid
while True:
try:
retpid, status = os.waitpid(pid, flags)
except InterruptedError:
interval = sleep(interval)
except ChildProcessError:
# This has two meanings:
# - PID is not a child of os.getpid() in which case
# we keep polling until it's gone
# - PID never existed in the first place
# In both cases we'll eventually return None as we
# can't determine its exit status code.
while _pid_exists(pid):
interval = sleep(interval)
return
else:
if retpid == 0:
# WNOHANG flag was used and PID is still running.
interval = sleep(interval)
continue
elif os.WIFEXITED(status):
# Process terminated normally by calling exit(3) or _exit(2),
# or by returning from main(). The return value is the
# positive integer passed to *exit().
return os.WEXITSTATUS(status)
elif os.WIFSIGNALED(status):
# Process exited due to a signal. Return the negative value
# of that signal.
return negsig_to_enum(-os.WTERMSIG(status))
# elif os.WIFSTOPPED(status):
# # Process was stopped via SIGSTOP or is being traced, and
# # waitpid() was called with WUNTRACED flag. PID is still
# # alive. From now on waitpid() will keep returning (0, 0)
# # until the process state doesn't change.
# # It may make sense to catch/enable this since stopped PIDs
# # ignore SIGTERM.
# interval = sleep(interval)
# continue
# elif os.WIFCONTINUED(status):
# # Process was resumed via SIGCONT and waitpid() was called
# # with WCONTINUED flag.
# interval = sleep(interval)
# continue
else:
# Should never happen.
raise ValueError("unknown process exit status %r" % status)
def disk_usage(path):
"""Return disk usage associated with path.
Note: UNIX usually reserves 5% disk space which is not accessible
by user. In this function "total" and "used" values reflect the
total and used disk space whereas "free" and "percent" represent
the "free" and "used percent" user disk space.
"""
if PY3:
st = os.statvfs(path)
else: # pragma: no cover
# os.statvfs() does not support unicode on Python 2:
# - https://github.com/giampaolo/psutil/issues/416
# - http://bugs.python.org/issue18695
try:
st = os.statvfs(path)
except UnicodeEncodeError:
if isinstance(path, unicode):
try:
path = path.encode(sys.getfilesystemencoding())
except UnicodeEncodeError:
pass
st = os.statvfs(path)
else:
raise
# Total space which is only available to root (unless changed
# at system level).
total = (st.f_blocks * st.f_frsize)
# Remaining free space usable by root.
avail_to_root = (st.f_bfree * st.f_frsize)
# Remaining free space usable by user.
avail_to_user = (st.f_bavail * st.f_frsize)
# Total space being used in general.
used = (total - avail_to_root)
# Total space which is available to user (same as 'total' but
# for the user).
total_user = used + avail_to_user
# User usage percent compared to the total amount of space
# the user can use. This number would be higher if compared
# to root's because the user has less space (usually -5%).
usage_percent_user = usage_percent(used, total_user, round_=1)
# NB: the percentage is -5% than what shown by df due to
# reserved blocks that we are currently not considering:
# https://github.com/giampaolo/psutil/issues/829#issuecomment-223750462
return sdiskusage(
total=total, used=used, free=avail_to_user, percent=usage_percent_user)
@memoize
def get_terminal_map():
"""Get a map of device-id -> path as a dict.
Used by Process.terminal()
"""
ret = {}
ls = glob.glob('/dev/tty*') + glob.glob('/dev/pts/*')
for name in ls:
assert name not in ret, name
try:
ret[os.stat(name).st_rdev] = name
except FileNotFoundError:
pass
return ret
|
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
from telemetry.story import story_set as story_set_module
from telemetry.page import page_test
from gpu_tests import gpu_test_base
class GpuProcessSharedPageState(gpu_test_base.GpuSharedPageState):
gpu_switches = ['--gpu-no-complete-info-collection',
'--gpu-testing-os-version',
'--gpu-testing-vendor-id',
'--gpu-testing-device-id',
'--gpu-testing-secondary-vendor-ids',
'--gpu-testing-secondary-device-ids',
'--gpu-testing-driver-date',
'--gpu-testing-gl-vendor',
'--gpu-testing-gl-renderer',
'--gpu-testing-gl-version']
def __init__(self, test, finder_options, story_set):
super(GpuProcessSharedPageState, self).__init__(
test, finder_options, story_set)
options = finder_options.browser_options.extra_browser_args
# Clear all existing gpu testing switches.
old_gpu_switches = []
for opt in options:
for gpu_switch in self.gpu_switches:
if opt.startswith(gpu_switch):
old_gpu_switches.append(opt)
options.difference_update(old_gpu_switches)
class IdentifyActiveGpuPageBase(gpu_test_base.PageBase):
def __init__(self, name=None, page_set=None, shared_page_state_class=None,
expectations=None, active_gpu=None, inactive_gpus=None):
super(IdentifyActiveGpuPageBase, self).__init__(
url='chrome:gpu',
name=name,
page_set=page_set,
shared_page_state_class=shared_page_state_class,
expectations=expectations)
self.active_gpu = active_gpu
self.inactive_gpus = inactive_gpus
def Validate(self, tab, results):
basic_infos = tab.EvaluateJavaScript('browserBridge.gpuInfo.basic_info')
active_gpu = []
inactive_gpus = []
index = 0
for info in basic_infos:
description = info['description']
value = info['value']
if description.startswith('GPU%d' % index) and value.startswith('VENDOR'):
if value.endswith('*ACTIVE*'):
active_gpu.append(value)
else:
inactive_gpus.append(value)
index += 1
if active_gpu != self.active_gpu:
raise page_test.Failure('Active GPU field is wrong %s' % active_gpu)
if inactive_gpus != self.inactive_gpus:
raise page_test.Failure('Inactive GPU field is wrong %s' % inactive_gpus)
class DriverBugWorkaroundsTestsPage(gpu_test_base.PageBase):
def __init__(self, page_set=None, name='',
shared_page_state_class=None,
expectations=None,
expected_workaround=None,
unexpected_workaround=None):
super(DriverBugWorkaroundsTestsPage, self).__init__(
url='chrome:gpu',
page_set=page_set,
name=name,
shared_page_state_class=shared_page_state_class,
expectations=expectations)
self.expected_workaround = expected_workaround
self.unexpected_workaround = unexpected_workaround
def _Validate(self, tab, process_kind, is_expected, workaround_name):
if process_kind == "browser_process":
gpu_driver_bug_workarounds = tab.EvaluateJavaScript( \
'GetDriverBugWorkarounds()')
elif process_kind == "gpu_process":
gpu_driver_bug_workarounds = tab.EvaluateJavaScript( \
'chrome.gpuBenchmarking.getGpuDriverBugWorkarounds()')
is_present = workaround_name in gpu_driver_bug_workarounds
failure = False
if is_expected and not is_present:
failure = True
error_message = "is missing"
elif not is_expected and is_present:
failure = True
error_message = "is not expected"
if failure:
print 'Test failed. Printing page contents:'
print tab.EvaluateJavaScript('document.body.innerHTML')
raise page_test.Failure('%s %s in Browser process workarounds: %s' \
% (workaround_name, error_message, gpu_driver_bug_workarounds))
def Validate(self, tab, results):
if not self.expected_workaround and not self.unexpected_workaround:
return
if self.expected_workaround:
self._Validate(tab, "browser_process", True, self.expected_workaround)
self._Validate(tab, "gpu_process", True, self.expected_workaround)
if self.unexpected_workaround:
self._Validate(tab, "browser_process", False, self.unexpected_workaround)
self._Validate(tab, "gpu_process", False, self.unexpected_workaround)
class GpuProcessTestsPage(gpu_test_base.PageBase):
def __init__(self, url, name, story_set, expectations):
super(GpuProcessTestsPage, self).__init__(url=url,
shared_page_state_class=gpu_test_base.GpuSharedPageState,
page_set=story_set,
name=name,
expectations=expectations)
class FunctionalVideoPage(GpuProcessTestsPage):
def __init__(self, story_set, expectations):
super(FunctionalVideoPage, self).__init__(
url='file://../../data/gpu/functional_video.html',
name='GpuProcess.video',
story_set=story_set,
expectations=expectations)
def RunNavigateSteps(self, action_runner):
super(FunctionalVideoPage, self).RunNavigateSteps(action_runner)
action_runner.WaitForJavaScriptCondition(
'domAutomationController._finished', timeout_in_seconds=30)
class GpuInfoCompletePage(GpuProcessTestsPage):
def __init__(self, story_set, expectations):
super(GpuInfoCompletePage, self).__init__(
url='file://../../data/gpu/functional_3d_css.html',
name='GpuProcess.gpu_info_complete',
story_set=story_set,
expectations=expectations)
def Validate(self, tab, results):
# Regression test for crbug.com/454906
if not tab.browser.supports_system_info:
raise page_test.Failure('Browser must support system info')
system_info = tab.browser.GetSystemInfo()
if not system_info.gpu:
raise page_test.Failure('Target machine must have a GPU')
if not system_info.gpu.aux_attributes:
raise page_test.Failure('Browser must support GPU aux attributes')
if not 'gl_renderer' in system_info.gpu.aux_attributes:
raise page_test.Failure('Browser must have gl_renderer in aux attribs')
if len(system_info.gpu.aux_attributes['gl_renderer']) <= 0:
raise page_test.Failure('Must have a non-empty gl_renderer string')
class NoGpuProcessSharedPageState(GpuProcessSharedPageState):
def __init__(self, test, finder_options, story_set):
super(NoGpuProcessSharedPageState, self).__init__(
test, finder_options, story_set)
options = finder_options.browser_options
if sys.platform in ('cygwin', 'win32'):
# Hit id 34 from kSoftwareRenderingListJson.
options.AppendExtraBrowserArgs('--gpu-testing-vendor-id=0x5333')
options.AppendExtraBrowserArgs('--gpu-testing-device-id=0x8811')
elif sys.platform.startswith('linux'):
# Hit id 50 from kSoftwareRenderingListJson.
options.AppendExtraBrowserArgs('--gpu-no-complete-info-collection')
options.AppendExtraBrowserArgs('--gpu-testing-vendor-id=0x10de')
options.AppendExtraBrowserArgs('--gpu-testing-device-id=0x0de1')
options.AppendExtraBrowserArgs('--gpu-testing-gl-vendor=VMware')
options.AppendExtraBrowserArgs('--gpu-testing-gl-renderer=softpipe')
options.AppendExtraBrowserArgs('--gpu-testing-gl-version="2.1 Mesa 10.1"')
elif sys.platform == 'darwin':
# Hit id 81 from kSoftwareRenderingListJson.
options.AppendExtraBrowserArgs('--gpu-testing-os-version=10.7')
options.AppendExtraBrowserArgs('--gpu-testing-vendor-id=0x15ad')
options.AppendExtraBrowserArgs('--gpu-testing-device-id=0x0393')
class NoGpuProcessPage(gpu_test_base.PageBase):
def __init__(self, story_set, expectations):
super(NoGpuProcessPage, self).__init__(
url='about:blank',
name='GpuProcess.no_gpu_process',
page_set=story_set,
shared_page_state_class=NoGpuProcessSharedPageState,
expectations=expectations)
def Validate(self, tab, results):
has_gpu_process_js = 'chrome.gpuBenchmarking.hasGpuProcess()'
has_gpu_process = tab.EvaluateJavaScript(has_gpu_process_js)
if has_gpu_process:
raise page_test.Failure('GPU process detected')
class SoftwareGpuProcessSharedPageState(GpuProcessSharedPageState):
def __init__(self, test, finder_options, story_set):
super(SoftwareGpuProcessSharedPageState, self).__init__(
test, finder_options, story_set)
options = finder_options.browser_options
options.AppendExtraBrowserArgs('--gpu-testing-vendor-id=0x10de')
options.AppendExtraBrowserArgs('--gpu-testing-device-id=0x0de1')
options.AppendExtraBrowserArgs('--gpu-testing-gl-vendor=VMware')
options.AppendExtraBrowserArgs('--gpu-testing-gl-renderer=SVGA3D')
options.AppendExtraBrowserArgs('--gpu-testing-gl-version="2.1 Mesa 10.1"')
class SoftwareGpuProcessPage(gpu_test_base.PageBase):
def __init__(self, story_set, expectations):
super(SoftwareGpuProcessPage, self).__init__(
url='about:blank',
name='GpuProcess.software_gpu_process',
page_set=story_set,
shared_page_state_class=SoftwareGpuProcessSharedPageState,
expectations=expectations)
class SkipGpuProcessSharedPageState(GpuProcessSharedPageState):
def __init__(self, test, finder_options, story_set):
super(SkipGpuProcessSharedPageState, self).__init__(
test, finder_options, story_set)
options = finder_options.browser_options
options.AppendExtraBrowserArgs('--disable-gpu')
options.AppendExtraBrowserArgs('--skip-gpu-data-loading')
class SkipGpuProcessPage(gpu_test_base.PageBase):
def __init__(self, story_set, expectations):
super(SkipGpuProcessPage, self).__init__(
url='chrome:gpu',
name='GpuProcess.skip_gpu_process',
page_set=story_set,
shared_page_state_class=SkipGpuProcessSharedPageState,
expectations=expectations)
def Validate(self, tab, results):
has_gpu_process_js = 'chrome.gpuBenchmarking.hasGpuProcess()'
has_gpu_process = tab.EvaluateJavaScript(has_gpu_process_js)
if has_gpu_process:
raise page_test.Failure('GPU process detected')
class DriverBugWorkaroundsShared(GpuProcessSharedPageState):
def __init__(self, test, finder_options, story_set):
super(DriverBugWorkaroundsShared, self).__init__(
test, finder_options, story_set)
options = finder_options.browser_options
options.AppendExtraBrowserArgs('--use_gpu_driver_workaround_for_testing')
class DriverBugWorkaroundsInGpuProcessPage(DriverBugWorkaroundsTestsPage):
def __init__(self, story_set, expectations):
super(DriverBugWorkaroundsInGpuProcessPage, self).__init__(
name='GpuProcess.driver_bug_workarounds_in_gpu_process',
page_set=story_set,
shared_page_state_class=DriverBugWorkaroundsShared,
expectations=expectations,
expected_workaround='use_gpu_driver_workaround_for_testing')
def Validate(self, tab, results):
super(DriverBugWorkaroundsInGpuProcessPage, self).Validate(tab, results)
class DriverBugWorkaroundsUponGLRendererShared(GpuProcessSharedPageState):
def __init__(self, test, finder_options, story_set):
super(DriverBugWorkaroundsUponGLRendererShared, self).__init__(
test, finder_options, story_set)
options = finder_options.browser_options
if sys.platform in ('cygwin', 'win32'):
# Hit id 51 and 87 from kGpuDriverBugListJson.
options.AppendExtraBrowserArgs('--gpu-testing-vendor-id=0x1002')
options.AppendExtraBrowserArgs('--gpu-testing-device-id=0x6779')
options.AppendExtraBrowserArgs('--gpu-testing-driver-date=11-20-2014')
options.AppendExtraBrowserArgs('--gpu-testing-gl-vendor=Google Inc.')
options.AppendExtraBrowserArgs('--gpu-testing-gl-renderer=ANGLE ' \
'(AMD Radeon HD 6450 Direct3D11 vs_5_0 ps_5_0)')
options.AppendExtraBrowserArgs('--gpu-testing-gl-version=OpenGL ES 2.0 ' \
'(ANGLE 2.1.0.0c0d8006a9dd)')
elif sys.platform.startswith('linux'):
# Hit id 153 from kGpuDriverBugListJson.
options.AppendExtraBrowserArgs('--gpu-testing-vendor-id=0x0101')
options.AppendExtraBrowserArgs('--gpu-testing-device-id=0x0102')
options.AppendExtraBrowserArgs('--gpu-testing-gl-vendor=Vivante ' \
'Corporation')
options.AppendExtraBrowserArgs('--gpu-testing-gl-renderer=Vivante GC1000')
elif sys.platform == 'darwin':
# Currently on osx no workaround relies on gl-renderer.
pass
class DriverBugWorkaroundsUponGLRendererPage(DriverBugWorkaroundsTestsPage):
def __init__(self, story_set, expectations):
self.expected_workaround = None
self.unexpected_workaround = None
if sys.platform in ('cygwin', 'win32'):
self.expected_workaround = "texsubimage_faster_than_teximage"
self.unexpected_workaround = "disable_d3d11"
elif sys.platform.startswith('linux'):
self.expected_workaround = "disable_transparent_visuals"
elif sys.platform == 'darwin':
pass
super(DriverBugWorkaroundsUponGLRendererPage, self).__init__(
name='GpuProcess.driver_bug_workarounds_upon_gl_renderer',
page_set=story_set,
shared_page_state_class=DriverBugWorkaroundsUponGLRendererShared,
expectations=expectations,
expected_workaround=self.expected_workaround,
unexpected_workaround=self.unexpected_workaround)
def Validate(self, tab, results):
super(DriverBugWorkaroundsUponGLRendererPage, self).Validate(tab, results)
class IdentifyActiveGpuSharedPageState1(GpuProcessSharedPageState):
def __init__(self, test, finder_options, story_set):
super(IdentifyActiveGpuSharedPageState1, self).__init__(
test, finder_options, story_set)
opts = finder_options.browser_options
opts.AppendExtraBrowserArgs('--gpu-testing-vendor-id=0x8086')
opts.AppendExtraBrowserArgs('--gpu-testing-device-id=0x040a')
opts.AppendExtraBrowserArgs('--gpu-testing-secondary-vendor-ids=0x10de')
opts.AppendExtraBrowserArgs('--gpu-testing-secondary-device-ids=0x0de1')
opts.AppendExtraBrowserArgs('--gpu-testing-gl-vendor=nouveau')
class IdentifyActiveGpuPage1(IdentifyActiveGpuPageBase):
def __init__(self, story_set, expectations):
active_gpu = ['VENDOR = 0x10de, DEVICE= 0x0de1 *ACTIVE*']
inactive_gpus = ['VENDOR = 0x8086, DEVICE= 0x040a']
super(IdentifyActiveGpuPage1, self).__init__(
name='GpuProcess.identify_active_gpu1',
page_set=story_set,
shared_page_state_class=IdentifyActiveGpuSharedPageState1,
expectations=expectations,
active_gpu=active_gpu,
inactive_gpus=inactive_gpus)
def Validate(self, tab, results):
super(IdentifyActiveGpuPage1, self).Validate(tab, results)
class IdentifyActiveGpuSharedPageState2(GpuProcessSharedPageState):
def __init__(self, test, finder_options, story_set):
super(IdentifyActiveGpuSharedPageState2, self).__init__(
test, finder_options, story_set)
opts = finder_options.browser_options
opts.AppendExtraBrowserArgs('--gpu-testing-vendor-id=0x8086')
opts.AppendExtraBrowserArgs('--gpu-testing-device-id=0x040a')
opts.AppendExtraBrowserArgs('--gpu-testing-secondary-vendor-ids=0x10de')
opts.AppendExtraBrowserArgs('--gpu-testing-secondary-device-ids=0x0de1')
opts.AppendExtraBrowserArgs('--gpu-testing-gl-vendor=Intel')
class IdentifyActiveGpuPage2(IdentifyActiveGpuPageBase):
def __init__(self, story_set, expectations):
active_gpu = ['VENDOR = 0x8086, DEVICE= 0x040a *ACTIVE*']
inactive_gpus = ['VENDOR = 0x10de, DEVICE= 0x0de1']
super(IdentifyActiveGpuPage2, self).__init__(
name='GpuProcess.identify_active_gpu2',
page_set=story_set,
shared_page_state_class=IdentifyActiveGpuSharedPageState2,
expectations=expectations,
active_gpu=active_gpu,
inactive_gpus=inactive_gpus)
def Validate(self, tab, results):
super(IdentifyActiveGpuPage2, self).Validate(tab, results)
class IdentifyActiveGpuSharedPageState3(GpuProcessSharedPageState):
def __init__(self, test, finder_options, story_set):
super(IdentifyActiveGpuSharedPageState3, self).__init__(
test, finder_options, story_set)
opts = finder_options.browser_options
opts.AppendExtraBrowserArgs('--gpu-testing-vendor-id=0x8086')
opts.AppendExtraBrowserArgs('--gpu-testing-device-id=0x040a')
opts.AppendExtraBrowserArgs('--gpu-testing-secondary-vendor-ids= \
0x10de;0x1002')
opts.AppendExtraBrowserArgs('--gpu-testing-secondary-device-ids= \
0x0de1;0x6779')
opts.AppendExtraBrowserArgs('--gpu-testing-gl-vendor=X.Org')
opts.AppendExtraBrowserArgs('--gpu-testing-gl-renderer=AMD R600')
class IdentifyActiveGpuPage3(IdentifyActiveGpuPageBase):
def __init__(self, story_set, expectations):
active_gpu = ['VENDOR = 0x1002, DEVICE= 0x6779 *ACTIVE*']
inactive_gpus = ['VENDOR = 0x8086, DEVICE= 0x040a', \
'VENDOR = 0x10de, DEVICE= 0x0de1']
super(IdentifyActiveGpuPage3, self).__init__(
name='GpuProcess.identify_active_gpu3',
page_set=story_set,
shared_page_state_class=IdentifyActiveGpuSharedPageState3,
expectations=expectations,
active_gpu=active_gpu,
inactive_gpus=inactive_gpus)
def Validate(self, tab, results):
super(IdentifyActiveGpuPage3, self).Validate(tab, results)
class IdentifyActiveGpuSharedPageState4(GpuProcessSharedPageState):
def __init__(self, test, finder_options, story_set):
super(IdentifyActiveGpuSharedPageState4, self).__init__(
test, finder_options, story_set)
opts = finder_options.browser_options
opts.AppendExtraBrowserArgs('--gpu-testing-vendor-id=0x10de')
opts.AppendExtraBrowserArgs('--gpu-testing-device-id=0x0de1')
opts.AppendExtraBrowserArgs('--gpu-testing-secondary-vendor-ids=')
opts.AppendExtraBrowserArgs('--gpu-testing-secondary-device-ids=')
opts.AppendExtraBrowserArgs('--gpu-testing-gl-vendor=nouveau')
class IdentifyActiveGpuPage4(IdentifyActiveGpuPageBase):
def __init__(self, story_set, expectations):
active_gpu = ['VENDOR = 0x10de, DEVICE= 0x0de1 *ACTIVE*']
inactive_gpus = []
super(IdentifyActiveGpuPage4, self).__init__(
name='GpuProcess.identify_active_gpu4',
page_set=story_set,
shared_page_state_class=IdentifyActiveGpuSharedPageState4,
expectations=expectations,
active_gpu=active_gpu,
inactive_gpus=inactive_gpus)
def Validate(self, tab, results):
super(IdentifyActiveGpuPage4, self).Validate(tab, results)
class ReadbackWebGLGpuProcessSharedPageState(GpuProcessSharedPageState):
def __init__(self, test, finder_options, story_set):
super(ReadbackWebGLGpuProcessSharedPageState, self).__init__(
test, finder_options, story_set)
options = finder_options.browser_options
if sys.platform.startswith('linux'):
# Hit id 110 from kSoftwareRenderingListJson.
options.AppendExtraBrowserArgs('--gpu-testing-vendor-id=0x10de')
options.AppendExtraBrowserArgs('--gpu-testing-device-id=0x0de1')
options.AppendExtraBrowserArgs('--gpu-testing-gl-vendor=VMware')
options.AppendExtraBrowserArgs('--gpu-testing-gl-renderer=Gallium 0.4 ' \
'on llvmpipe (LLVM 3.4, 256 bits)')
options.AppendExtraBrowserArgs('--gpu-testing-gl-version="3.0 Mesa 11.2"')
class ReadbackWebGLGpuProcessPage(gpu_test_base.PageBase):
def __init__(self, story_set, expectations):
super(ReadbackWebGLGpuProcessPage, self).__init__(
url='chrome:gpu',
name='GpuProcess.readback_webgl_gpu_process',
page_set=story_set,
shared_page_state_class=ReadbackWebGLGpuProcessSharedPageState,
expectations=expectations)
def Validate(self, tab, results):
if sys.platform.startswith('linux'):
feature_status_js = 'browserBridge.gpuInfo.featureStatus.featureStatus'
feature_status_list = tab.EvaluateJavaScript(feature_status_js)
result = True
for name, status in feature_status_list.items():
if name == 'multiple_raster_threads':
result = result and status == 'enabled_on'
elif name == 'native_gpu_memory_buffers':
result = result and status == 'disabled_software'
elif name == 'webgl':
result = result and status == 'enabled_readback'
else:
result = result and status == 'unavailable_software'
if not result:
raise page_test.Failure('WebGL readback setup failed: %s' \
% feature_status_list)
class EqualBugWorkaroundsInBrowserAndGpuProcessPage(gpu_test_base.PageBase):
def __init__(self, story_set, expectations):
super(EqualBugWorkaroundsInBrowserAndGpuProcessPage, self).__init__(
url='chrome:gpu',
name='GpuProcess.equal_bug_workarounds_in_browser_and_gpu_process',
page_set=story_set,
shared_page_state_class=GpuProcessSharedPageState,
expectations=expectations)
def Validate(self, tab, results):
browser_list = tab.EvaluateJavaScript('GetDriverBugWorkarounds()')
gpu_list = tab.EvaluateJavaScript( \
'chrome.gpuBenchmarking.getGpuDriverBugWorkarounds()')
diff = set(browser_list).symmetric_difference(set(gpu_list))
if len(diff) > 0:
print 'Test failed. Printing page contents:'
print tab.EvaluateJavaScript('document.body.innerHTML')
raise page_test.Failure('Browser and GPU process list of driver bug' \
'workarounds are not equal: %s != %s, diff: %s' % \
(browser_list, gpu_list, list(diff)))
class HasTransparentVisualsShared(GpuProcessSharedPageState):
def __init__(self, test, finder_options, story_set):
super(HasTransparentVisualsShared, self).__init__(
test, finder_options, story_set)
options = finder_options.browser_options
if sys.platform.startswith('linux'):
# Hit id 173 from kGpuDriverBugListJson.
options.AppendExtraBrowserArgs('--gpu-testing-gl-version=3.0 Mesa ' \
'12.1')
class HasTransparentVisualsGpuProcessPage(DriverBugWorkaroundsTestsPage):
def __init__(self, story_set, expectations):
super(HasTransparentVisualsGpuProcessPage, self).__init__(
name='GpuProcess.has_transparent_visuals_gpu_process',
page_set=story_set,
shared_page_state_class=HasTransparentVisualsShared,
expectations=expectations,
expected_workaround=None,
unexpected_workaround='disable_transparent_visuals')
def Validate(self, tab, results):
if sys.platform.startswith('linux'):
super(HasTransparentVisualsGpuProcessPage, self).Validate(tab, results)
class NoTransparentVisualsShared(GpuProcessSharedPageState):
def __init__(self, test, finder_options, story_set):
super(NoTransparentVisualsShared, self).__init__(
test, finder_options, story_set)
options = finder_options.browser_options
if sys.platform.startswith('linux'):
options.AppendExtraBrowserArgs('--disable_transparent_visuals=1')
class NoTransparentVisualsGpuProcessPage(DriverBugWorkaroundsTestsPage):
def __init__(self, story_set, expectations):
super(NoTransparentVisualsGpuProcessPage, self).__init__(
name='GpuProcess.no_transparent_visuals_gpu_process',
page_set=story_set,
shared_page_state_class=NoTransparentVisualsShared,
expectations=expectations,
expected_workaround='disable_transparent_visuals',
unexpected_workaround=None)
def Validate(self, tab, results):
if sys.platform.startswith('linux'):
super(NoTransparentVisualsGpuProcessPage, self).Validate(tab, results)
class GpuProcessTestsStorySet(story_set_module.StorySet):
""" Tests that accelerated content triggers the creation of a GPU process """
def __init__(self, expectations, is_platform_android):
super(GpuProcessTestsStorySet, self).__init__(
serving_dirs=set(['../../../../content/test/data']))
urls_and_names_list = [
('file://../../data/gpu/functional_canvas_demo.html',
'GpuProcess.canvas2d'),
('file://../../data/gpu/functional_3d_css.html',
'GpuProcess.css3d'),
('file://../../data/gpu/functional_webgl.html',
'GpuProcess.webgl')
]
for url, name in urls_and_names_list:
self.AddStory(GpuProcessTestsPage(url, name, self, expectations))
self.AddStory(FunctionalVideoPage(self, expectations))
self.AddStory(GpuInfoCompletePage(self, expectations))
self.AddStory(NoGpuProcessPage(self, expectations))
self.AddStory(SoftwareGpuProcessPage(self, expectations))
self.AddStory(DriverBugWorkaroundsInGpuProcessPage(self, expectations))
self.AddStory(IdentifyActiveGpuPage1(self, expectations))
self.AddStory(IdentifyActiveGpuPage2(self, expectations))
self.AddStory(IdentifyActiveGpuPage3(self, expectations))
self.AddStory(IdentifyActiveGpuPage4(self, expectations))
self.AddStory(ReadbackWebGLGpuProcessPage(self, expectations))
self.AddStory(DriverBugWorkaroundsUponGLRendererPage(self, expectations))
self.AddStory(EqualBugWorkaroundsInBrowserAndGpuProcessPage(self,
expectations))
if not is_platform_android:
self.AddStory(SkipGpuProcessPage(self, expectations))
self.AddStory(HasTransparentVisualsGpuProcessPage(self, expectations))
self.AddStory(NoTransparentVisualsGpuProcessPage(self, expectations))
@property
def allow_mixed_story_states(self):
# Return True here in order to be able to run pages with different browser
# command line arguments.
return True
|
|
# Copyright 2012 Managed I.T.
#
# Author: Kiall Mac Innes <kiall@managedit.ie>
# Author: Endre Karlson <endre.karlson@bouvet.no>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from oslo_config import cfg
from oslo_log import log as logging
from designate import exceptions
from designate.central import rpcapi as central_rpcapi
from designate.context import DesignateContext
from designate.i18n import _LW
from designate.objects import Record
from designate.objects import RecordSet
from designate.plugin import ExtensionPlugin
LOG = logging.getLogger(__name__)
class NotificationHandler(ExtensionPlugin):
"""Base class for notification handlers"""
__plugin_ns__ = 'designate.notification.handler'
__plugin_type__ = 'handler'
def __init__(self, *args, **kw):
super(NotificationHandler, self).__init__(*args, **kw)
self.central_api = central_rpcapi.CentralAPI()
@abc.abstractmethod
def get_exchange_topics(self):
"""
Returns a tuple of (exchange, list(topics)) this handler wishes
to receive notifications from.
"""
@abc.abstractmethod
def get_event_types(self):
"""
Returns a list of event types this handler is capable of processing
"""
@abc.abstractmethod
def process_notification(self, context, event_type, payload):
"""Processes a given notification"""
def get_zone(self, zone_id):
"""
Return the zone for this context
"""
context = DesignateContext.get_admin_context(all_tenants=True)
return self.central_api.get_zone(context, zone_id)
def _find_or_create_recordset(self, context, zone_id, name, type,
ttl=None):
name = name.encode('idna').decode('utf-8')
try:
# Attempt to create an empty recordset
values = {
'name': name,
'type': type,
'ttl': ttl,
}
recordset = self.central_api.create_recordset(
context, zone_id, RecordSet(**values))
except exceptions.DuplicateRecordSet:
# Fetch the existing recordset
recordset = self.central_api.find_recordset(context, {
'zone_id': zone_id,
'name': name,
'type': type,
})
return recordset
class BaseAddressHandler(NotificationHandler):
def _get_ip_data(self, addr_dict):
ip = addr_dict['address']
version = addr_dict['version']
data = {
'ip_version': version,
}
# TODO(endre): Add v6 support
if version == 4:
data['ip_address'] = ip.replace('.', '-')
ip_data = ip.split(".")
for i in [0, 1, 2, 3]:
data["octet%s" % i] = ip_data[i]
return data
def _create(self, addresses, extra, zone_id, managed=True,
resource_type=None, resource_id=None):
"""
Create a a record from addresses
:param addresses: Address objects like
{'version': 4, 'ip': '10.0.0.1'}
:param extra: Extra data to use when formatting the record
:param managed: Is it a managed resource
:param resource_type: The managed resource type
:param resource_id: The managed resource ID
"""
if not managed:
LOG.warning(_LW(
'Deprecation notice: Unmanaged designate-sink records are '
'being deprecated please update the call '
'to remove managed=False'))
LOG.debug('Using Zone ID: %s', zone_id)
zone = self.get_zone(zone_id)
LOG.debug('Domain: %r', zone)
data = extra.copy()
LOG.debug('Event data: %s', data)
data['zone'] = zone['name']
context = DesignateContext().elevated()
context.all_tenants = True
context.edit_managed_records = True
for addr in addresses:
event_data = data.copy()
event_data.update(self._get_ip_data(addr))
for fmt in cfg.CONF[self.name].get('format'):
recordset_values = {
'zone_id': zone['id'],
'name': fmt % event_data,
'type': 'A' if addr['version'] == 4 else 'AAAA'}
recordset = self._find_or_create_recordset(
context, **recordset_values)
record_values = {
'data': addr['address']}
if managed:
record_values.update({
'managed': managed,
'managed_plugin_name': self.get_plugin_name(),
'managed_plugin_type': self.get_plugin_type(),
'managed_resource_type': resource_type,
'managed_resource_id': resource_id})
LOG.debug('Creating record in %s / %s with values %r',
zone['id'], recordset['id'], record_values)
self.central_api.create_record(context,
zone['id'],
recordset['id'],
Record(**record_values))
def _delete(self, zone_id, managed=True, resource_id=None,
resource_type='instance', criterion=None):
"""
Handle a generic delete of a fixed ip within a zone
:param criterion: Criterion to search and destroy records
"""
if not managed:
LOG.warning(_LW(
'Deprecation notice: Unmanaged designate-sink records are '
'being deprecated please update the call '
'to remove managed=False'))
criterion = criterion or {}
context = DesignateContext().elevated()
context.all_tenants = True
context.edit_managed_records = True
criterion.update({'zone_id': zone_id})
if managed:
criterion.update({
'managed': managed,
'managed_plugin_name': self.get_plugin_name(),
'managed_plugin_type': self.get_plugin_type(),
'managed_resource_id': resource_id,
'managed_resource_type': resource_type
})
records = self.central_api.find_records(context, criterion)
for record in records:
LOG.debug('Deleting record %s', record['id'])
self.central_api.delete_record(context,
zone_id,
record['recordset_id'],
record['id'])
|
|
# -*- coding: utf-8 -*-
import mysqlstmt
from .where_mixin import WhereMixin
from .join_mixin import JoinMixin
from .config import Config
import collections
class Select(mysqlstmt.Stmt, WhereMixin, JoinMixin):
"""SELECT statement.
Attributes:
cacheable (bool): See constructor.
calc_found_rows (bool): See constructor.
Examples: ::
>>> q = Select('t1')
>>> q.columns('t1c1').sql()
('SELECT `t1c1` FROM t1', None)
>>> q = Select()
>>> q.from_table('t1').sql()
('SELECT * FROM t1', None)
>>> q = Select(cacheable=True)
>>> sql_t = q.from_table('t1').columns('t1c1').sql()
('SELECT SQL_CACHE `t1c1` FROM t1', None)
>>> q = Select(cacheable=False)
>>> sql_t = q.from_table('t1').columns('t1c1').sql()
('SELECT SQL_NO_CACHE `t1c1` FROM t1', None)
>>> q = Select('t1')
>>> q.set_option('DISTINCT').columns('t1c1').sql()
('SELECT DISTINCT `t1c1` FROM t1', None)
"""
def __init__(self, table_name=None, having_predicate='OR', cacheable=None, calc_found_rows=False, **kwargs):
"""Constructor
Keyword Arguments:
table_name (string, optional): Table or tables to select from.
having_predicate (string, optional): The predicate for the outer HAVING condition, either 'AND' or 'OR'.
where_predicate (string, optional): The predicate for the outer WHERE condition, either 'AND' or 'OR'.
cacheable (bool, optional): Whether MySQL should cache query result.
Default is None, in which case the :py:class:`mysqlstmt.config.Config` setting will be used.
calc_found_rows (bool, optional): Whether MySQL should calculate number of found rows. Default is False.
**kwargs: Base class arguments.
"""
super(Select, self).__init__(**kwargs)
assert having_predicate == 'AND' or having_predicate == 'OR'
self._table_factors = []
self._select_col = []
self._select_expr = [] # tuple(name, value_params)
self._orderby_conds = []
self._groupby_conds = []
self._limit = None
self._having_cond_root = mysqlstmt.WhereCondition(self, where_predicate=having_predicate)
if cacheable is False or Config.select_cacheable is False:
self.cacheable = False
else:
self.cacheable = Config.select_cacheable if cacheable is None else cacheable
self.calc_found_rows = calc_found_rows
# Default first condition is 'AND'; will be ignored if having_or is called first
self.having_cond(where_predicate='AND')
if table_name:
self.from_table(table_name)
def from_table(self, list_or_name):
"""Add tables to select from.
Arguments:
list_or_name (string or list): Table name or list of table names.
Returns:
object: self
Examples: ::
>>> q = Select('t1')
>>> q.sql()
('SELECT * FROM t1', None)
>>> q = Select()
>>> q.from_table('t1').sql()
('SELECT * FROM t1', None)
"""
if not isinstance(list_or_name, basestring):
for c in list_or_name:
self.from_table(c)
else:
self._table_factors.append(list_or_name)
return self
from_tables = from_table
"""Alias for :py:meth:`from_table`"""
def column(self, list_or_name, raw=False, value_params=None):
"""Add column names to select.
Arguments:
list_or_name (string or list): Column name or list of column names.
raw (bool, optional): Set to True for column name to be included in the SQL verbatim, default is False.
value_params (iterable, optional): List of value params if ``raw`` is True. Default is None.
Returns:
object: self
Examples: ::
>>> q = Select()
>>> q.from_table('t1').columns('t1c1').sql()
('SELECT `t1c1` FROM t1', None)
>>> q = Select()
>>> q.from_table('t1').columns('t1.t1c1').sql()
('SELECT t1.`t1c1` FROM t1', None)
>>> q = Select(quote_all_col_refs = False)
>>> q.from_table('t1').columns('t1.t1c1').sql()
('SELECT t1.t1c1 FROM t1', None)
>>> q = Select()
>>> q.from_table('t1 AS t1a').columns('t1c1').sql()
('SELECT `t1c1` FROM t1 AS t1a', None)
>>> q = Select()
>>> q.from_table('t1').columns('t1c1')()
('SELECT `t1c1` FROM t1', None)
>>> q = Select()
>>> q.from_table('t1').column('t1c1').column('t1c2').sql()
('SELECT `t1c1`, `t1c2` FROM t1', None)
>>> q = Select()
>>> q.from_table('t1').columns(['t1c1','t1c2']).sql()
('SELECT `t1c1`, `t1c2` FROM t1', None)
>>> q = Select()
>>> q.from_table('t1').columns(('t1c1','t1c2')).sql()
('SELECT `t1c1`, `t1c2` FROM t1', None)
>>> q = Select()
>>> q.from_table('t1').columns('`t1c1`').sql()
('SELECT `t1c1` FROM t1', None)
>>> q = Select()
>>> q.from_table('t1').columns('DATE(`t1c1`)').sql()
('SELECT DATE(`t1c1`) FROM t1', None)
>>> q = Select()
>>> q.from_table('t1').columns('`t1c1` AS `t1a1`').sql()
('SELECT `t1c1` AS `t1a1` FROM t1', None)
"""
assert value_params is None or isinstance(value_params, collections.Iterable)
if not isinstance(list_or_name, basestring):
for c in list_or_name:
self.column(c, raw, value_params)
elif raw is True:
self._select_expr.append((list_or_name, value_params))
elif list_or_name not in self._select_col:
self._select_col.append(list_or_name)
return self
select = column
"""Alias for :py:meth:`column`"""
columns = column
"""Alias for :py:meth:`column`"""
def column_expr(self, list_or_expr, value_params=None):
"""Add expressions to select.
Arguments:
list_or_expr (string or list): Expression or list of expressions.
value_params (iterable, optional): List of value params. Default is None.
Returns:
object: self
Examples: ::
>>> q = Select()
>>> q.column_expr('1+1').sql()
('SELECT 1+1', None)
>>> q = Select()
>>> q.column_expr('PASSWORD(?)', ['mypw']).sql()
('SELECT PASSWORD(?)', ['mypw'])
"""
return self.column(list_or_expr, True, value_params)
select_expr = column_expr
"""Alias for :py:meth:`column_expr`"""
columns_expr = column_expr
"""Alias for :py:meth:`column_expr`"""
def remove_column(self, list_or_name):
"""Remove column names to select.
Arguments:
list_or_name (string or list): Column name or list of column names.
Returns:
object: self
Examples: ::
>>> q = Select()
>>> q.from_table('t1').columns(('t1c1','t2c1')).remove_column('t2c1').sql()
('SELECT `t1c1` FROM t1', None)
>>> q = Select()
>>> q.from_table('t1').columns('t1c1').column_expr('1+1 AS t2c1').remove_column('t2c1').sql()
('SELECT `t1c1` FROM t1', None)
"""
if not isinstance(list_or_name, basestring):
for c in list_or_name:
self.remove_column(c)
else:
expr_alias = ' AS {0}'.format(list_or_name)
self._select_col = [c for c in self._select_col if c != list_or_name]
self._select_expr = [c for c in self._select_expr if not c[0].endswith(expr_alias)]
return self
def qualify_columns(self, table_name, qualify_cols=None):
"""Qualify column names with a table name.
Arguments:
table_name (string): Table name
qualify_cols (Iterable, optional): Column names to qualify,
or None to qualify all non-qualified columns. Default is None.
Returns:
object: self
Note:
Does not qualify expression columns.
Examples: ::
>>> q = Select()
>>> q.from_table('t1').columns(('t1c1', 't1c2')).qualify_columns('t1', ('t1c1',)).sql()
('SELECT t1.`t1c1`, `t1c2` FROM t1', None)
>>> q = Select()
>>> q.from_table('t1').columns(('t1c1', 't1c2')).qualify_columns('t1').sql()
('SELECT t1.`t1c1`, t1.`t1c2` FROM t1', None)
>>> q = Select()
>>> q.from_table(('t1', 't2')).columns(('t1c1', 't2.t2c1')).qualify_columns('t1').sql()
('SELECT t1.`t1c1`, t2.`t2c1` FROM t1, t2', None)
"""
for i, col in enumerate(self._select_col):
if qualify_cols is None or col in qualify_cols:
if '.' not in col:
self._select_col[i] = '{0}.{1}'.format(table_name, col)
return self
def order_by(self, list_or_name):
"""Add expressions to order by.
Arguments:
list_or_name (string or list): An expression or list of expressions to order by.
Returns:
object: self
Examples: ::
>>> q = Select()
>>> q.from_table('t1').order_by('t1c1').sql()
('SELECT * FROM t1 ORDER BY t1c1', None)
"""
if not isinstance(list_or_name, basestring):
for c in list_or_name:
self.order_by(c)
else:
self._orderby_conds.append(list_or_name)
return self
def group_by(self, list_or_name):
"""Add expressions to group by.
Arguments:
list_or_name (string or list): An expression or list of expressions to group by.
Returns:
object: self
Examples: ::
>>> q = Select()
>>> q.from_table('t1').group_by('t1c1').sql()
('SELECT * FROM t1 GROUP BY t1c1', None)
>>> q = Select()
>>> q.from_table('t1').group_by(['t1c1', 't1c2']).sql()
('SELECT * FROM t1 GROUP BY t1c1, t1c2', None)
"""
if not isinstance(list_or_name, basestring):
for c in list_or_name:
self.group_by(c)
else:
self._groupby_conds.append(list_or_name)
return self
def limit(self, row_count, offset=0):
"""Add limit clause expression.
Arguments:
row_count (int): Maximum number of rows to return.
offset (int, optional): Offset of the first row to return.
Returns:
object: self
Examples: ::
>>> q = Select()
>>> q.from_table('t1').limit(5).sql()
('SELECT * FROM t1 LIMIT 5', None)
"""
self._limit = (row_count, offset)
return self
def having_value(self, field_or_dict, value_or_tuple=None, operator='='):
"""Compare field to a value.
Field names may be escaped with backticks.
Use :py:meth:`having_expr` if you want field names to be
included in the SQL statement verbatim.
Values will be pickled by :py:meth:`mysqlstmt.stmt.Stmt.pickle`.
Use :py:meth:`having_raw_value` if you want values to be
included in the SQL statement verbatim.
Arguments:
field_or_dict (string or list): Name of field/column or :py:class:`dict` mapping fields to values.
value_or_tuple (mixed or tuple, optional): Value to compare with if ``field_or_dict`` is a field name.
Type can be anything that :py:meth:`mysqlstmt.stmt.Stmt.pickle` can handle (Iterable, Object,etc.).
Can also be a tuple ``(value, operator)``.
operator (string, optional): Comparison operator, default is '='.
Returns:
object: self
Examples: ::
>>> q = Select()
>>> q.from_table('t1').having_value('t1c1', 3).sql()
('SELECT * FROM t1 HAVING `t1c1` = 3', None)
>>> q = Select()
>>> q.from_table('t1').having_value('t1c1', 3).having_value('t1c2', 'string').sql()
('SELECT * FROM t1 HAVING (`t1c1` = 3 AND `t1c2` = ?)', ['string'])
>>> q = Select()
>>> q.from_table('t1').having_value(OrderedDict([('t1c1', 3), ('t1c2', 'string')])).sql()
('SELECT * FROM t1 HAVING (`t1c1` = 3 AND `t1c2` = ?)', ['string'])
>>> q = Select()
>>> q.from_table('t1').having_value('t1c1', 1).having_value('t1c2', 5).having_and().having_value('t1c1', 6).having_value('t1c2', 10).sql()
('SELECT * FROM t1 HAVING ((`t1c1` = 1 AND `t1c2` = 5) OR (`t1c1` = 6 AND `t1c2` = 10))', None)
"""
self.get_having_cond().where_value(field_or_dict, value_or_tuple, operator)
return self
having_values = having_value
"""Alias for :py:meth:`having_value`"""
def having_raw_value(self, field_or_dict, value_or_tuple=None, operator='=', value_params=None):
"""Compare field to a an unmanipulated value.
Field names may be escaped with backticks.
Use :py:meth:`having_expr` if you want field names to be
included in the SQL statement verbatim.
Values will be included in the SQL statement verbatim.
Use :py:meth:`having_value` if you want values to be pickled.
Arguments:
field_or_dict (string or list): Name of field/column or :py:class:`dict` mapping fields to values.
Dictionary values can also be a tuple, as described below.
value_or_tuple (string or tuple, optional): Value to compare with if ``field_or_dict`` is a field name.
Can also be a tuple ``(value, operator, value_params)``.
operator (string, optional): Comparison operator, default is '='.
value_params (iterable, optional): List of value params. Default is None.
Returns:
object: self
Examples: ::
>>> q = Select()
>>> q.from_table('t1').having_raw_value('t1c1', 'NOW()').sql()
('SELECT * FROM t1 HAVING `t1c1` = NOW()', None)
>>> q = Select()
>>> q.from_table('t1').having_raw_value('t1c1', 'PASSWORD(?)', value_params=('mypw',)).sql()
('SELECT * FROM t1 WHERE `t1c1` = PASSWORD(?)', ['mypw'])
"""
self.get_having_cond().where_raw_value(field_or_dict, value_or_tuple, operator, value_params)
return self
having_raw_values = having_raw_value
"""Alias for :py:meth:`having_raw_value`"""
def having_expr(self, list_or_expr, expr_params=None):
"""Include a complex expression in conditional statement.
Expressions will be included in the SQL statement verbatim.
Use :py:meth:`having_value` or :py:meth:`having_raw_value` if you are
doing basic field comparisons and/or want the value to be pickled.
Arguments:
list_or_expr (string or list): An expression or :py:class:`list` of expressions.
Expression values can also be a tuple ``(expression, expr_params)``.
expr_params (iterable, optional): List of expression params. Default is None.
Returns:
object: self
Examples: ::
>>> q = Select()
>>> q.from_table('t1').having_expr('`t1c1` = NOW()').sql()
('SELECT * FROM t1 HAVING `t1c1` = NOW()', None)
>>> q = Select()
>>> q.from_table('t1').having_expr('`t1c1` = PASSWORD(?)', ('mypw',)).sql()
('SELECT * FROM t1 HAVING `t1c1` = PASSWORD(?)', ['mypw'])
"""
self.get_having_cond().where_expr(list_or_expr, expr_params)
return self
having_exprs = having_expr
"""Alias for :py:meth:`having_expr`"""
def get_having_cond(self, index=-1):
"""Returns a ``WhereCondition`` object from the list of conditions.
Arguments:
index (int): Index of condition, defaults to the active condition (-1).
Returns:
object: :py:class:`WhereCondition`
Note:
Conditions are typically created with ``having_and`` and ``having_or``,
so you should not need to use this function often.
See Also:
:py:class:`mysqlstmt.where_condition.WhereCondition` :py:meth:`having_cond`
"""
return self._having_cond_root.get_where_cond(index)
def having_cond(self, cond=None, where_predicate=None):
"""Activates a new ``WhereCondition``.
Arguments:
cond (mysqlstmt.WhereCondition, optional): A new condition; one will be created if not specified.
where_predicate (string): The predicate for the new condition if a new one is created, either 'AND' or 'OR'.
Returns:
object: self
Note:
Conditions are typically created with ``having_and`` and ``having_or``.
You should use this function when creating complex conditions with ``WhereCondition`` objects.
See Also:
:py:class:`mysqlstmt.where_condition.WhereCondition` :py:meth:`having_and` :py:meth:`having_or`
"""
self._having_cond_root.add_cond(cond, where_predicate)
return self
def having_and(self):
"""Activates a new ``WhereCondition`` with an 'AND' predicate.
Returns:
object: self
See Also:
:py:class:`mysqlstmt.where_condition.WhereCondition` :py:meth:`having_cond` :py:meth:`having_or`
Examples: ::
>>> q = Select()
>>> q.from_table('t1').having_and().having_value('t1c1', 1).having_value('t1c2', 5). \
having_and().having_value('t1c1', 6).having_value('t1c2', 10).sql()
('SELECT * FROM t1 HAVING ((`t1c1` = 1 AND `t1c2` = 5) OR (`t1c1` = 6 AND `t1c2` = 10))', None)
"""
self._having_cond_root.where_and()
return self
def having_or(self):
"""Activates a new ``WhereCondition`` with an 'OR' predicate.
Returns:
object: self
See Also:
:py:class:`mysqlstmt.where_condition.WhereCondition` :py:meth:`having_cond` :py:meth:`having_and`
Examples: ::
>>> q = Select()
>>> q.from_table('t1').having_or().having_value('t1c1', 3).having_value('t1c1', 5).sql()
('SELECT * FROM t1 HAVING (`t1c1` = 3 OR `t1c1` = 5)', None)
>>> q = Select(having_predicate='AND')
>>> q.from_table('t1').having_or().having_value('t1c1', 1).having_value('t1c1', 5). \
having_or().having_value('t1c1', 6).having_value('t1c1', 10).sql()
('SELECT * FROM t1 HAVING ((`t1c1` = 1 OR `t1c1` = 5) AND (`t1c1` = 6 OR `t1c1` = 10))', None)
"""
self._having_cond_root.where_or()
return self
def sql(self):
"""Build SELECT SQL statement.
Returns:
Either a tuple ``(SQL statement, parameterized values)`` if ``placeholder`` is set,
otherwise SQL statement as string.
Raises:
ValueError: The statement cannot be created with the given attributes.
"""
table_refs = []
param_values = []
cols = [self.quote_col_ref(c) for c in self._select_col]
for c in self._select_expr:
expr, val_params = c
cols.append(expr)
if val_params is not None and self.placeholder:
param_values.extend(val_params)
if self._table_factors:
table_refs.append(', '.join(self._table_factors))
if self._join_refs:
if not table_refs:
raise ValueError('A root table must be specified when using joins')
self._append_join_table_refs(self._table_factors[0], table_refs)
# MySQL SELECT syntax as of 5.7:
#
# SELECT
# [ALL | DISTINCT | DISTINCTROW ]
# [HIGH_PRIORITY]
# [STRAIGHT_JOIN]
# [SQL_SMALL_RESULT] [SQL_BIG_RESULT] [SQL_BUFFER_RESULT]
# [SQL_CACHE | SQL_NO_CACHE] [SQL_CALC_FOUND_ROWS]
# select_expr [, select_expr ...]
# [FROM table_references
# [WHERE where_condition]
# [GROUP BY {col_name | expr | position}
# [ASC | DESC], ... [WITH ROLLUP]]
# [HAVING where_condition]
# [ORDER BY {col_name | expr | position}
# [ASC | DESC], ...]
# [LIMIT {[offset,] row_count | row_count OFFSET offset}]
# [PROCEDURE procedure_name(argument_list)]
# [INTO OUTFILE 'file_name' export_options
# | INTO DUMPFILE 'file_name'
# | INTO var_name [, var_name]]
# [FOR UPDATE | LOCK IN SHARE MODE]]
sql = ['SELECT']
if self.query_options:
sql.extend(self.query_options)
if self.cacheable is True:
sql.append('SQL_CACHE')
elif self.cacheable is False:
sql.append('SQL_NO_CACHE')
if self.calc_found_rows is True:
sql.append('SQL_CALC_FOUND_ROWS')
sql.append(', '.join(cols) if cols else '*')
if table_refs:
sql.append('FROM')
sql.append(' '.join(table_refs))
if self._where_cond_root.has_conds:
sql.append('WHERE')
sql.append(self._where_cond_root.sql(param_values))
if self._groupby_conds:
sql.append('GROUP BY')
sql.append(', '.join(self._groupby_conds))
if self._having_cond_root.has_conds:
sql.append('HAVING')
sql.append(self._having_cond_root.sql(param_values))
if self._orderby_conds:
sql.append('ORDER BY')
sql.append(', '.join(self._orderby_conds))
if self._limit is not None:
row_count, offset = self._limit
if offset > 0:
sql.append('LIMIT {0},{1}'.format(offset, row_count))
else:
sql.append('LIMIT {0}'.format(row_count))
if self.placeholder:
return ' '.join(sql), param_values if param_values else None
assert not param_values
return ' '.join(sql)
|
|
from PySide2 import QtWidgets, QtCore, QtGui
from pymel import core as pmc
from auri.auri_lib import AuriScriptView, AuriScriptController, AuriScriptModel, grpbox
from auri.scripts.Maya_Scripts import rig_lib
from auri.scripts.Maya_Scripts.rig_lib import RigController
reload(rig_lib)
class View(AuriScriptView):
def __init__(self, *args, **kwargs):
self.modules_cbbox = QtWidgets.QComboBox()
self.outputs_cbbox = QtWidgets.QComboBox()
self.refresh_btn = QtWidgets.QPushButton("Refresh")
self.prebuild_btn = QtWidgets.QPushButton("Prebuild")
self.side_cbbox = QtWidgets.QComboBox()
self.swimrotation_cbbox = QtWidgets.QComboBox()
self.how_many_jnts = QtWidgets.QSpinBox()
self.how_many_ctrls = QtWidgets.QSpinBox()
self.how_many_levels = QtWidgets.QSpinBox()
self.refresh_spaces_btn = QtWidgets.QPushButton("Refresh")
self.add_space_btn = QtWidgets.QPushButton("Add")
self.remove_space_btn = QtWidgets.QPushButton("Remove")
self.space_modules_cbbox = QtWidgets.QComboBox()
self.spaces_cbbox = QtWidgets.QComboBox()
self.selected_space_module = "No_space_module"
self.selected_space = "no_space"
self.space_list_view = QtWidgets.QListView()
self.space_list = QtGui.QStringListModel()
super(View, self).__init__(*args, **kwargs)
def set_controller(self):
self.ctrl = Controller(self.model, self)
def set_model(self):
self.model = Model()
def refresh_view(self):
self.side_cbbox.setCurrentText(self.model.side)
self.swimrotation_cbbox.setCurrentText(self.model.swimrotation)
self.how_many_ctrls.setValue(self.model.how_many_ctrls)
self.how_many_jnts.setValue(self.model.how_many_jnts)
self.ctrl.look_for_parent()
self.space_list.setStringList(self.model.space_list)
self.ctrl.look_for_parent(l_cbbox_stringlist=self.ctrl.modules_with_spaces,
l_cbbox_selection=self.selected_space_module,
l_cbbox=self.space_modules_cbbox, r_cbbox_stringlist=self.ctrl.spaces_model,
r_cbbox_selection=self.selected_space, r_cbbox=self.spaces_cbbox)
def setup_ui(self):
self.modules_cbbox.setModel(self.ctrl.modules_with_output)
self.modules_cbbox.currentTextChanged.connect(self.ctrl.on_modules_cbbox_changed)
self.outputs_cbbox.setModel(self.ctrl.outputs_model)
self.outputs_cbbox.currentTextChanged.connect(self.ctrl.on_outputs_cbbox_changed)
self.space_modules_cbbox.setModel(self.ctrl.modules_with_spaces)
self.space_modules_cbbox.currentTextChanged.connect(self.ctrl.on_space_modules_cbbox_changed)
self.spaces_cbbox.setModel(self.ctrl.spaces_model)
self.spaces_cbbox.currentTextChanged.connect(self.ctrl.on_spaces_cbbox_changed)
self.space_list_view.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection)
self.space_list.setStringList(self.model.space_list)
self.space_list_view.setModel(self.space_list)
self.add_space_btn.clicked.connect(self.ctrl.add_space_to_list)
self.remove_space_btn.clicked.connect(self.ctrl.remove_space_from_list)
self.refresh_spaces_btn.clicked.connect(self.ctrl.look_for_spaces)
self.side_cbbox.insertItems(0, ["Left", "Right"])
self.side_cbbox.currentTextChanged.connect(self.ctrl.on_side_cbbox_changed)
self.swimrotation_cbbox.insertItems(0, ["X", "Y", "Z"])
self.swimrotation_cbbox.currentTextChanged.connect(self.ctrl.on_swimrotation_cbbox_changed)
self.how_many_jnts.setMinimum(1)
self.how_many_jnts.valueChanged.connect(self.ctrl.on_how_many_jnts_changed)
self.how_many_ctrls.setMinimum(2)
self.how_many_ctrls.setMaximum(self.model.how_many_jnts)
self.how_many_ctrls.valueChanged.connect(self.ctrl.on_how_many_ctrls_changed)
self.refresh_btn.clicked.connect(self.ctrl.look_for_parent)
self.prebuild_btn.clicked.connect(self.ctrl.prebuild)
main_layout = QtWidgets.QVBoxLayout()
select_parent_layout = QtWidgets.QVBoxLayout()
select_parent_grp = grpbox("Select parent", select_parent_layout)
cbbox_layout = QtWidgets.QHBoxLayout()
cbbox_layout.addWidget(self.modules_cbbox)
cbbox_layout.addWidget(self.outputs_cbbox)
select_parent_layout.addLayout(cbbox_layout)
select_parent_layout.addWidget(self.refresh_btn)
select_spaces_layout = QtWidgets.QVBoxLayout()
select_spaces_grp = grpbox("Select local spaces :", select_spaces_layout)
spaces_cbbox_layout = QtWidgets.QHBoxLayout()
spaces_cbbox_layout.addWidget(self.space_modules_cbbox)
spaces_cbbox_layout.addWidget(self.spaces_cbbox)
btn_layout = QtWidgets.QVBoxLayout()
btn_layout.addWidget(self.refresh_spaces_btn)
btn_layout.addWidget(self.add_space_btn)
select_spaces_layout.addLayout(spaces_cbbox_layout)
select_spaces_layout.addLayout(btn_layout)
space_list_layout = QtWidgets.QVBoxLayout()
space_list_grp = grpbox("local spaces :", space_list_layout)
space_list_layout.addWidget(self.space_list_view)
space_list_layout.addWidget(self.remove_space_btn)
options_layout = QtWidgets.QVBoxLayout()
options_grp = grpbox("Options", options_layout)
side_layout = QtWidgets.QVBoxLayout()
side_grp = grpbox("Side", side_layout)
side_layout.addWidget(self.side_cbbox)
swimrotation_layout = QtWidgets.QVBoxLayout()
swimrotation_grp = grpbox("Choose swim rotation", swimrotation_layout)
swimrotation_layout.addWidget(self.swimrotation_cbbox)
how_many_layout = QtWidgets.QVBoxLayout()
jnts_layout = QtWidgets.QVBoxLayout()
jnts_text = QtWidgets.QLabel("How many jnts :")
jnts_layout.addWidget(jnts_text)
jnts_layout.addWidget(self.how_many_jnts)
ctrls_layout = QtWidgets.QVBoxLayout()
ctrls_text = QtWidgets.QLabel("How many ctrls :")
ctrls_layout.addWidget(ctrls_text)
ctrls_layout.addWidget(self.how_many_ctrls)
how_many_layout.addLayout(jnts_layout)
how_many_layout.addLayout(ctrls_layout)
options_layout.addLayout(how_many_layout)
main_layout.addWidget(select_parent_grp)
main_layout.addWidget(side_grp)
main_layout.addWidget(swimrotation_grp)
main_layout.addWidget(options_grp)
main_layout.addWidget(select_spaces_grp)
main_layout.addWidget(space_list_grp)
main_layout.addWidget(self.prebuild_btn)
self.setLayout(main_layout)
class Controller(RigController):
def __init__(self, model, view):
"""
Args:
model (Model):
view (View):
"""
self.guide_names = []
self.guides = []
self.guides_grp = None
self.side = {}
self.side_coef = 0
self.swimrotation = {}
self.created_fin_jnts = []
self.created_fin_ctrl = []
self.option_ctrl = None
self.jnt_const_group = None
RigController.__init__(self, model, view)
def on_how_many_jnts_changed(self, value):
self.model.how_many_jnts = value
self.view.how_many_ctrls.setMaximum(self.model.how_many_jnts)
def prebuild(self):
self.guide_names = ["{0}_fin_start_GUIDE".format(self.model.module_name),
"{0}_fin_end_GUIDE".format(self.model.module_name)]
self.side = {"Left": 1, "Right": -1}
self.side_coef = self.side.get(self.model.side)
if self.guide_check(self.guide_names):
self.guides = pmc.ls(self.guide_names)
self.guides_grp = pmc.ls("{0}_guides".format(self.model.module_name))[0]
self.guides_grp.setAttr("visibility", 1)
self.view.refresh_view()
pmc.select(d=1)
return
fin_start_guide = pmc.spaceLocator(p=(0, 0, 0), n=self.guide_names[0])
fin_end_guide = pmc.spaceLocator(p=(0, 0, 0), n=self.guide_names[1])
fin_start_guide.setAttr("translate", (2*self.side_coef, 2, 0))
fin_end_guide.setAttr("translate", (3*self.side_coef, 2, 0))
self.guides = [fin_start_guide, fin_end_guide]
self.guides_grp = self.group_guides(self.guides)
self.view.refresh_view()
pmc.select(d=1)
def execute(self):
self.prebuild()
self.delete_existing_objects()
self.connect_to_parent()
self.create_jnts()
self.create_option_ctrl()
self.create_ctrl()
self.create_local_spaces()
self.create_autoswim()
self.clean_rig()
self.create_outputs()
pmc.select(d=1)
def create_jnts(self):
fin_jnts_crv = pmc.curve(d=1, p=[pmc.xform(self.guides[0], q=1, ws=1, translation=1),
pmc.xform(self.guides[1], q=1, ws=1, translation=1)],
n="{0}_fin_curve".format(self.model.module_name))
fin_jnts_rebuilded = pmc.rebuildCurve(fin_jnts_crv, rpo=0, rt=0, end=1, kr=0, kep=1, kt=0,
s=self.model.how_many_jnts, d=1, ch=0, replaceOriginal=1)[0]
if self.model.how_many_jnts == 2:
pmc.delete(fin_jnts_rebuilded.cv[-2])
pmc.delete(fin_jnts_rebuilded.cv[1])
vertex_list = fin_jnts_rebuilded.cv[:]
self.created_fin_jnts = rig_lib.create_jnts_from_cv_list_and_return_jnts_list(vertex_list,
"{0}_fin".format(
self.model.module_name))
rig_lib.change_jnt_chain_suffix(self.created_fin_jnts, new_suffix="SKN")
pmc.select(d=1)
fin_jnts_offset_name = str(self.created_fin_jnts[0]).replace("_SKN", "_jnt_OFS")
fin_jnts_offset = pmc.joint(p=pmc.xform(self.created_fin_jnts[0], q=1, ws=1, translation=1),
o=pmc.xform(self.created_fin_jnts[0], q=1, ws=1, rotation=1),
n=fin_jnts_offset_name)
pmc.parent(self.created_fin_jnts[0], fin_jnts_offset, r=0)
pmc.parent(fin_jnts_offset, self.jnt_input_grp, r=0)
pmc.delete(fin_jnts_rebuilded)
def create_ctrl(self):
duplicate_jnts = pmc.duplicate(self.created_fin_jnts)
rig_lib.change_jnt_chain_suffix(duplicate_jnts, new_suffix="CTRL")
for i, ctrl in enumerate(duplicate_jnts):
ctrl_shape = pmc.circle(c=(0, 0, 0), nr=(0, 1, 0), sw=360, r=2, d=3, s=8,
n="{0}_shape".format(ctrl))[0]
pmc.parent(ctrl_shape.getShape(), ctrl, shape=1, r=1)
ctrl.setAttr("radius", 0)
ctrl.setAttr("overrideEnabled", 1)
ctrl.getShape().rename(str(ctrl) + "Shape")
pmc.delete(ctrl_shape)
self.created_fin_ctrl = duplicate_jnts
pmc.select(d=1)
fin_ctrl_offset_name = str(self.created_fin_ctrl[0]).replace("_CTRL", "_ctrl_OFS")
fin_ctrl_offset = pmc.joint(p=pmc.xform(self.created_fin_ctrl[0], q=1, ws=1, translation=1),
o=pmc.xform(self.created_fin_ctrl[0], q=1, ws=1, rotation=1),
n=fin_ctrl_offset_name, radius=0)
pmc.parent(self.created_fin_ctrl[0], fin_ctrl_offset, r=0)
pmc.parent(fin_ctrl_offset, self.ctrl_input_grp, r=0)
for i, ctrl in enumerate(self.created_fin_ctrl):
pmc.parentConstraint(ctrl, self.created_fin_jnts[i], maintainOffset=1)
ctrl.jointOrient >> self.created_fin_jnts[i].jointOrient
ctrl.scale >> self.created_fin_jnts[i].scale
def create_option_ctrl(self):
option_ctrl_shape = rig_lib.little_cube("{0}_option_ctrl_shape".format(self.model.module_name))
self.option_ctrl = rig_lib.create_jnttype_ctrl(name="{0}_option_CTRL".format(self.model.module_name),
shape=option_ctrl_shape)
option_ofs = pmc.group(self.option_ctrl, n="{0}_option_ctrl_OFS".format(self.model.module_name), r=1)
pmc.parent(option_ofs, self.ctrl_input_grp)
rig_lib.matrix_constraint(self.created_fin_jnts[-1], option_ofs, srt="trs")
ctrl_shape = self.option_ctrl.getShape()
pmc.move(ctrl_shape, [-1 * self.side_coef, 0, -1], relative=1, objectSpace=1, worldSpaceDistance=1)
self.option_ctrl.addAttr("ctrlsPrincipaux", attributeType="bool", defaultValue=1, hidden=0, keyable=1)
self.option_ctrl.addAttr("ctrlsSecondaires", attributeType="bool", defaultValue=0, hidden=0, keyable=1)
self.option_ctrl.addAttr("autoSwim", attributeType="float", defaultValue=0, hidden=0, keyable=1, hasMaxValue=1,
hasMinValue=1, maxValue=1, minValue=0)
self.option_ctrl.addAttr("amplitude", attributeType="float", defaultValue=2, hidden=0, keyable=1,
hasMaxValue=0, hasMinValue=1, minValue=0)
self.option_ctrl.addAttr("speed", attributeType="float", defaultValue=1, hidden=0, keyable=1,
hasMaxValue=0, hasMinValue=1, minValue=-1)
self.option_ctrl.addAttr("delay", attributeType="float", defaultValue=-0.5, hidden=0, keyable=1,
hasMaxValue=0, hasMinValue=0)
i = 1
while i <= self.model.how_many_ctrls:
name = "level{0}".format(i)
self.option_ctrl.addAttr("{0}Attributes".format(name), attributeType="enum", enumName="_________", hidden=0,
keyable=0)
self.option_ctrl.setAttr("{0}Attributes".format(name), lock=1, channelBox=1)
self.option_ctrl.addAttr("{0}Offset".format(name), attributeType="float", defaultValue=0, hidden=0,
keyable=1,
hasMinValue=1, minValue=0,
hasMaxValue=1, maxValue=self.model.how_many_jnts + 2)
self.option_ctrl.addAttr("{0}Amplitude".format(name), attributeType="float", defaultValue=1, hidden=0,
keyable=1,
hasMaxValue=0, hasMinValue=1, minValue=0)
self.option_ctrl.addAttr("{0}Frequence".format(name), attributeType="float", defaultValue=1, hidden=0,
keyable=1,
hasMaxValue=0, hasMinValue=0)
self.option_ctrl.addAttr("{0}Delay".format(name), attributeType="float", defaultValue=1, hidden=0,
keyable=1,
hasMaxValue=0, hasMinValue=0)
i += 1
def create_autoswim(self):
if pmc.objExists("{0}_autoswim_EXP".format(self.model.module_name)):
pmc.delete("{0}_autoswim_EXP".format(self.model.module_name))
exp = pmc.createNode("expression", n="{0}_autoswim_EXP".format(self.model.module_name))
exp_text = ""
for i, ctrl in enumerate(self.created_fin_ctrl):
autoswim_ctrl_name = str(ctrl).replace("_CTRL", "_ctrl_AUTOSWIM")
autoswim_ctrl = pmc.joint(p=pmc.xform(ctrl, q=1, ws=1, translation=1), o=pmc.xform(ctrl, q=1, ws=1, rotation=1),
n=autoswim_ctrl_name)
pmc.parent(autoswim_ctrl, ctrl.getParent())
pmc.parent(ctrl, autoswim_ctrl)
autoswim_ctrl.setAttr("drawStyle", 2)
if pmc.objExists("{0}_autoswim_merge_PMA".format(ctrl)):
pmc.delete("{0}_autoswim_merge_PMA".format(ctrl))
add = pmc.createNode("plusMinusAverage", n="{0}_autoswim_merge_PMA".format(ctrl))
n = 0
while n < self.model.how_many_ctrls:
if pmc.objExists("{0}_level{1}_offset_clamp".format(ctrl, n + 1)):
pmc.delete("{0}_level{1}_offset_clamp".format(ctrl, n + 1))
clamp = pmc.createNode("clamp", n="{0}_level{1}_offset_clamp".format(ctrl, n + 1))
if pmc.objExists("{0}_level{1}_offset_percent_range".format(ctrl, n + 1)):
pmc.delete("{0}_level{1}_offset_percent_range".format(ctrl, n + 1))
range = pmc.createNode("setRange", n="{0}_level{1}_offset_percent_range".format(ctrl, n + 1))
if pmc.objExists("{0}_level{1}_offset_invert_percent".format(ctrl, n + 1)):
pmc.delete("{0}_level{1}_offset_invert_percent".format(ctrl, n + 1))
plus = pmc.createNode("plusMinusAverage", n="{0}_level{1}_offset_invert_percent".format(ctrl, n + 1))
if pmc.objExists("{0}_level{1}_offset_multiply".format(ctrl, n + 1)):
pmc.delete("{0}_level{1}_offset_multiply".format(ctrl, n + 1))
mult = pmc.createNode("multDoubleLinear", n="{0}_level{1}_offset_multiply".format(ctrl, n + 1))
level_attr_prefix = "level{0}".format(n + 1)
self.option_ctrl.connectAttr("{0}Offset".format(level_attr_prefix), clamp + ".inputR")
clamp.setAttr("minR", i)
clamp.setAttr("maxR", i + 1)
clamp.outputR >> range.valueX
range.setAttr("oldMinX", i)
range.setAttr("oldMaxX", i + 1)
range.setAttr("minX", 0)
range.setAttr("maxX", 1)
plus.setAttr("operation", 2)
plus.setAttr("input1D[0]", 1)
range.outValueX >> plus.input1D[1]
plus.output1D >> mult.input1
# exp_text += "\n{0}.input2 = ( {2}/float({4}) * {1}.amplitude * {1}.{3}Amplitude * {1}.autoSwim ) * sin((time * {1}.speed)) ;".format(mult, self.option_ctrl, i + 1, level_attr_prefix, self.model.how_many_jnts)
exp_text += "\n{0}.input2 = ( {2}/float({4}) * {1}.amplitude * {1}.{3}Amplitude * {1}.autoSwim ) * sin( ( time* {1}.speed + {2} * {1}.delay * {1}.{3}Delay )* {1}.{3}Frequence" \
" ) ;".format(mult, self.option_ctrl, i + 1, level_attr_prefix, self.model.how_many_jnts)
mult.output >> add.input1D[n]
n += 1
if self.model.swimrotation == "Z":
add.output1D >> autoswim_ctrl.rotateZ
elif self.model.swimrotation == "X":
add.output1D >> autoswim_ctrl.rotateX
else:
add.output1D >> autoswim_ctrl.rotateY
exp.setExpression(exp_text)
def create_local_spaces(self):
spaces_names = []
space_locs = []
for space in self.model.space_list:
name = str(space).replace("_OUTPUT", "")
if "local_ctrl" in name:
name = "world"
spaces_names.append(name)
if pmc.objExists("{0}_{1}_SPACELOC".format(self.model.module_name, name)):
pmc.delete("{0}_{1}_SPACELOC".format(self.model.module_name, name))
space_loc = pmc.spaceLocator(p=(0, 0, 0), n="{0}_{1}_SPACELOC".format(self.model.module_name, name))
space_locs.append(space_loc)
spaces_names.append("local")
fk_ctrls = self.created_fin_ctrl
fk_ctrls[0].addAttr("space", attributeType="enum", enumName=spaces_names, hidden=0, keyable=1)
for i, space in enumerate(self.model.space_list):
space_locs[i].setAttr("translate", pmc.xform(self.created_fin_jnts[0], q=1, ws=1, translation=1))
pmc.parent(space_locs[i], space)
fk_space_const = pmc.parentConstraint(space_locs[i], fk_ctrls[0].getParent(), maintainOffset=1)
rig_lib.connect_condition_to_constraint("{0}.{1}W{2}".format(fk_space_const, space_locs[i], i),
fk_ctrls[0].space, i,
"{0}_{1}_COND".format(fk_ctrls[0], spaces_names[i]))
def clean_rig(self):
self.jnt_input_grp.setAttr("visibility", 0)
self.parts_grp.setAttr("visibility", 0)
self.guides_grp.setAttr("visibility", 0)
self.created_fin_ctrl[-1].setAttr("visibility", 0)
rig_lib.clean_ctrl(self.option_ctrl, 9, trs="trs")
if self.model.side == "Left":
color_value = 6
else:
color_value = 13
for ctrl in self.created_fin_ctrl:
rig_lib.clean_ctrl(ctrl, color_value, trs="ts")
second_ctrl_list = self.created_fin_ctrl[:]
princ_ctrl_list = []
step_princ_ctrl = int(self.model.how_many_jnts / self.model.how_many_ctrls)
for i in range(0, self.model.how_many_jnts, step_princ_ctrl):
princ_ctrl_list.append(second_ctrl_list[i])
for jnt in princ_ctrl_list:
second_ctrl_list.remove(jnt)
for ctrl in princ_ctrl_list:
self.option_ctrl.ctrlsPrincipaux >> ctrl.getShape().visibility
for ctrl in second_ctrl_list:
self.option_ctrl.ctrlsSecondaires >> ctrl.getShape().visibility
def create_outputs(self):
for i, jnt in enumerate(self.created_fin_jnts):
name = "{0}_jnt_{1}_OUTPUT".format(self.model.module_name, i)
rig_lib.create_output(name=name, parent=jnt)
class Model(AuriScriptModel):
def __init__(self):
AuriScriptModel.__init__(self)
self.selected_module = None
self.selected_output = None
self.side = "Left"
self.swimrotation = "X"
self.how_many_jnts = 6
self.how_many_ctrls = 2
self.space_list = []
|
|
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import sys
import time
import signal
from os.path import basename, dirname, join
from random import shuffle
from swift import gettext_ as _
from contextlib import closing
from eventlet import Timeout
from swift.obj import diskfile, replicator
from swift.common.utils import (
get_logger, ratelimit_sleep, dump_recon_cache, list_from_csv, listdir,
unlink_paths_older_than, readconf, config_auto_int_value)
from swift.common.exceptions import DiskFileQuarantined, DiskFileNotExist,\
DiskFileDeleted, DiskFileExpired
from swift.common.daemon import Daemon
from swift.common.storage_policy import POLICIES
class AuditorWorker(object):
"""Walk through file system to audit objects"""
def __init__(self, conf, logger, rcache, devices, zero_byte_only_at_fps=0):
self.conf = conf
self.logger = logger
self.devices = devices
self.max_files_per_second = float(conf.get('files_per_second', 20))
self.max_bytes_per_second = float(conf.get('bytes_per_second',
10000000))
try:
# ideally unless ops overrides the rsync_tempfile_timeout in the
# auditor section we can base our behavior on whatever they
# configure for their replicator
replicator_config = readconf(self.conf['__file__'],
'object-replicator')
except (KeyError, ValueError, IOError):
# if we can't parse the real config (generally a KeyError on
# __file__, or ValueError on no object-replicator section, or
# IOError if reading the file failed) we use
# a very conservative default for rsync_timeout
default_rsync_timeout = 86400
else:
replicator_rsync_timeout = int(replicator_config.get(
'rsync_timeout', replicator.DEFAULT_RSYNC_TIMEOUT))
# Here we can do some light math for ops and use the *replicator's*
# rsync_timeout (plus 15 mins to avoid deleting local tempfiles
# before the remote replicator kills it's rsync)
default_rsync_timeout = replicator_rsync_timeout + 900
# there's not really a good reason to assume the replicator
# section's reclaim_age is more appropriate than the reconstructor
# reclaim_age - but we're already parsing the config so we can set
# the default value in our config if it's not already set
if 'reclaim_age' in replicator_config:
conf.setdefault('reclaim_age',
replicator_config['reclaim_age'])
self.rsync_tempfile_timeout = config_auto_int_value(
self.conf.get('rsync_tempfile_timeout'), default_rsync_timeout)
self.diskfile_router = diskfile.DiskFileRouter(conf, self.logger)
self.auditor_type = 'ALL'
self.zero_byte_only_at_fps = zero_byte_only_at_fps
if self.zero_byte_only_at_fps:
self.max_files_per_second = float(self.zero_byte_only_at_fps)
self.auditor_type = 'ZBF'
self.log_time = int(conf.get('log_time', 3600))
self.last_logged = 0
self.files_running_time = 0
self.bytes_running_time = 0
self.bytes_processed = 0
self.total_bytes_processed = 0
self.total_files_processed = 0
self.passes = 0
self.quarantines = 0
self.errors = 0
self.rcache = rcache
self.stats_sizes = sorted(
[int(s) for s in list_from_csv(conf.get('object_size_stats'))])
self.stats_buckets = dict(
[(s, 0) for s in self.stats_sizes + ['OVER']])
def create_recon_nested_dict(self, top_level_key, device_list, item):
if device_list:
device_key = ''.join(sorted(device_list))
return {top_level_key: {device_key: item}}
else:
return {top_level_key: item}
def audit_all_objects(self, mode='once', device_dirs=None):
description = ''
if device_dirs:
device_dir_str = ','.join(sorted(device_dirs))
if self.auditor_type == 'ALL':
description = _(' - parallel, %s') % device_dir_str
else:
description = _(' - %s') % device_dir_str
self.logger.info(_('Begin object audit "%(mode)s" mode (%(audi_type)s'
'%(description)s)') %
{'mode': mode, 'audi_type': self.auditor_type,
'description': description})
begin = reported = time.time()
self.total_bytes_processed = 0
self.total_files_processed = 0
total_quarantines = 0
total_errors = 0
time_auditing = 0
# TODO: we should move audit-location generation to the storage policy,
# as we may (conceivably) have a different filesystem layout for each.
# We'd still need to generate the policies to audit from the actual
# directories found on-disk, and have appropriate error reporting if we
# find a directory that doesn't correspond to any known policy. This
# will require a sizable refactor, but currently all diskfile managers
# can find all diskfile locations regardless of policy -- so for now
# just use Policy-0's manager.
all_locs = (self.diskfile_router[POLICIES[0]]
.object_audit_location_generator(
device_dirs=device_dirs,
auditor_type=self.auditor_type))
for location in all_locs:
loop_time = time.time()
self.failsafe_object_audit(location)
self.logger.timing_since('timing', loop_time)
self.files_running_time = ratelimit_sleep(
self.files_running_time, self.max_files_per_second)
self.total_files_processed += 1
now = time.time()
if now - self.last_logged >= self.log_time:
self.logger.info(_(
'Object audit (%(type)s). '
'Since %(start_time)s: Locally: %(passes)d passed, '
'%(quars)d quarantined, %(errors)d errors, '
'files/sec: %(frate).2f, bytes/sec: %(brate).2f, '
'Total time: %(total).2f, Auditing time: %(audit).2f, '
'Rate: %(audit_rate).2f') % {
'type': '%s%s' % (self.auditor_type, description),
'start_time': time.ctime(reported),
'passes': self.passes, 'quars': self.quarantines,
'errors': self.errors,
'frate': self.passes / (now - reported),
'brate': self.bytes_processed / (now - reported),
'total': (now - begin), 'audit': time_auditing,
'audit_rate': time_auditing / (now - begin)})
cache_entry = self.create_recon_nested_dict(
'object_auditor_stats_%s' % (self.auditor_type),
device_dirs,
{'errors': self.errors, 'passes': self.passes,
'quarantined': self.quarantines,
'bytes_processed': self.bytes_processed,
'start_time': reported, 'audit_time': time_auditing})
dump_recon_cache(cache_entry, self.rcache, self.logger)
reported = now
total_quarantines += self.quarantines
total_errors += self.errors
self.passes = 0
self.quarantines = 0
self.errors = 0
self.bytes_processed = 0
self.last_logged = now
time_auditing += (now - loop_time)
# Avoid divide by zero during very short runs
elapsed = (time.time() - begin) or 0.000001
self.logger.info(_(
'Object audit (%(type)s) "%(mode)s" mode '
'completed: %(elapsed).02fs. Total quarantined: %(quars)d, '
'Total errors: %(errors)d, Total files/sec: %(frate).2f, '
'Total bytes/sec: %(brate).2f, Auditing time: %(audit).2f, '
'Rate: %(audit_rate).2f') % {
'type': '%s%s' % (self.auditor_type, description),
'mode': mode, 'elapsed': elapsed,
'quars': total_quarantines + self.quarantines,
'errors': total_errors + self.errors,
'frate': self.total_files_processed / elapsed,
'brate': self.total_bytes_processed / elapsed,
'audit': time_auditing, 'audit_rate': time_auditing / elapsed})
if self.stats_sizes:
self.logger.info(
_('Object audit stats: %s') % json.dumps(self.stats_buckets))
# Unset remaining partitions to not skip them in the next run
diskfile.clear_auditor_status(self.devices, self.auditor_type)
def record_stats(self, obj_size):
"""
Based on config's object_size_stats will keep track of how many objects
fall into the specified ranges. For example with the following:
object_size_stats = 10, 100, 1024
and your system has 3 objects of sizes: 5, 20, and 10000 bytes the log
will look like: {"10": 1, "100": 1, "1024": 0, "OVER": 1}
"""
for size in self.stats_sizes:
if obj_size <= size:
self.stats_buckets[size] += 1
break
else:
self.stats_buckets["OVER"] += 1
def failsafe_object_audit(self, location):
"""
Entrypoint to object_audit, with a failsafe generic exception handler.
"""
try:
self.object_audit(location)
except (Exception, Timeout):
self.logger.increment('errors')
self.errors += 1
self.logger.exception(_('ERROR Trying to audit %s'), location)
def object_audit(self, location):
"""
Audits the given object location.
:param location: an audit location
(from diskfile.object_audit_location_generator)
"""
def raise_dfq(msg):
raise DiskFileQuarantined(msg)
diskfile_mgr = self.diskfile_router[location.policy]
# this method doesn't normally raise errors, even if the audit
# location does not exist; if this raises an unexpected error it
# will get logged in failsafe
df = diskfile_mgr.get_diskfile_from_audit_location(location)
reader = None
try:
with df.open(modernize=True):
metadata = df.get_metadata()
obj_size = int(metadata['Content-Length'])
if self.stats_sizes:
self.record_stats(obj_size)
if obj_size and not self.zero_byte_only_at_fps:
reader = df.reader(_quarantine_hook=raise_dfq)
if reader:
with closing(reader):
for chunk in reader:
chunk_len = len(chunk)
self.bytes_running_time = ratelimit_sleep(
self.bytes_running_time,
self.max_bytes_per_second,
incr_by=chunk_len)
self.bytes_processed += chunk_len
self.total_bytes_processed += chunk_len
except DiskFileQuarantined as err:
self.quarantines += 1
self.logger.error(_('ERROR Object %(obj)s failed audit and was'
' quarantined: %(err)s'),
{'obj': location, 'err': err})
except DiskFileExpired:
pass # ignore expired objects
except DiskFileDeleted:
# If there is a reclaimable tombstone, we'll invalidate the hash
# to trigger the replicator to rehash/cleanup this suffix
ts = df._ondisk_info['ts_info']['timestamp']
if (not self.zero_byte_only_at_fps and
(time.time() - float(ts)) > df.manager.reclaim_age):
df.manager.invalidate_hash(dirname(df._datadir))
except DiskFileNotExist:
pass
self.passes += 1
# _ondisk_info attr is initialized to None and filled in by open
ondisk_info_dict = df._ondisk_info or {}
if 'unexpected' in ondisk_info_dict:
is_rsync_tempfile = lambda fpath: (
diskfile.RE_RSYNC_TEMPFILE.match(basename(fpath)))
rsync_tempfile_paths = filter(is_rsync_tempfile,
ondisk_info_dict['unexpected'])
mtime = time.time() - self.rsync_tempfile_timeout
unlink_paths_older_than(rsync_tempfile_paths, mtime)
class ObjectAuditor(Daemon):
"""Audit objects."""
def __init__(self, conf, logger=None, **options):
self.conf = conf
self.logger = logger or get_logger(conf, log_route='object-auditor')
self.devices = conf.get('devices', '/srv/node')
self.concurrency = int(conf.get('concurrency', 1))
self.conf_zero_byte_fps = int(
conf.get('zero_byte_files_per_second', 50))
self.recon_cache_path = conf.get('recon_cache_path',
'/var/cache/swift')
self.rcache = join(self.recon_cache_path, "object.recon")
self.interval = int(conf.get('interval', 30))
def _sleep(self):
time.sleep(self.interval)
def clear_recon_cache(self, auditor_type):
"""Clear recon cache entries"""
dump_recon_cache({'object_auditor_stats_%s' % auditor_type: {}},
self.rcache, self.logger)
def run_audit(self, **kwargs):
"""Run the object audit"""
mode = kwargs.get('mode')
zero_byte_only_at_fps = kwargs.get('zero_byte_fps', 0)
device_dirs = kwargs.get('device_dirs')
worker = AuditorWorker(self.conf, self.logger, self.rcache,
self.devices,
zero_byte_only_at_fps=zero_byte_only_at_fps)
worker.audit_all_objects(mode=mode, device_dirs=device_dirs)
def fork_child(self, zero_byte_fps=False, **kwargs):
"""Child execution"""
pid = os.fork()
if pid:
return pid
else:
signal.signal(signal.SIGTERM, signal.SIG_DFL)
if zero_byte_fps:
kwargs['zero_byte_fps'] = self.conf_zero_byte_fps
try:
self.run_audit(**kwargs)
except Exception as e:
self.logger.exception(
_("ERROR: Unable to run auditing: %s") % e)
finally:
sys.exit()
def audit_loop(self, parent, zbo_fps, override_devices=None, **kwargs):
"""Parallel audit loop"""
self.clear_recon_cache('ALL')
self.clear_recon_cache('ZBF')
once = kwargs.get('mode') == 'once'
kwargs['device_dirs'] = override_devices
if parent:
kwargs['zero_byte_fps'] = zbo_fps
self.run_audit(**kwargs)
else:
pids = set()
if self.conf_zero_byte_fps:
zbf_pid = self.fork_child(zero_byte_fps=True, **kwargs)
pids.add(zbf_pid)
if self.concurrency == 1:
# Audit all devices in 1 process
pids.add(self.fork_child(**kwargs))
else:
# Divide devices amongst parallel processes set by
# self.concurrency. Total number of parallel processes
# is self.concurrency + 1 if zero_byte_fps.
parallel_proc = self.concurrency + 1 if \
self.conf_zero_byte_fps else self.concurrency
device_list = list(override_devices) if override_devices else \
listdir(self.devices)
shuffle(device_list)
while device_list:
pid = None
if len(pids) == parallel_proc:
pid = os.wait()[0]
pids.discard(pid)
if self.conf_zero_byte_fps and pid == zbf_pid and once:
# If we're only running one pass and the ZBF scanner
# finished, don't bother restarting it.
zbf_pid = -100
elif self.conf_zero_byte_fps and pid == zbf_pid:
# When we're running forever, the ZBF scanner must
# be restarted as soon as it finishes.
kwargs['device_dirs'] = override_devices
# sleep between ZBF scanner forks
self._sleep()
zbf_pid = self.fork_child(zero_byte_fps=True, **kwargs)
pids.add(zbf_pid)
else:
kwargs['device_dirs'] = [device_list.pop()]
pids.add(self.fork_child(**kwargs))
while pids:
pid = os.wait()[0]
# ZBF scanner must be restarted as soon as it finishes
# unless we're in run-once mode
if self.conf_zero_byte_fps and pid == zbf_pid and \
len(pids) > 1 and not once:
kwargs['device_dirs'] = override_devices
# sleep between ZBF scanner forks
self._sleep()
zbf_pid = self.fork_child(zero_byte_fps=True, **kwargs)
pids.add(zbf_pid)
pids.discard(pid)
def run_forever(self, *args, **kwargs):
"""Run the object audit until stopped."""
# zero byte only command line option
zbo_fps = kwargs.get('zero_byte_fps', 0)
parent = False
if zbo_fps:
# only start parent
parent = True
kwargs = {'mode': 'forever'}
while True:
try:
self.audit_loop(parent, zbo_fps, **kwargs)
except (Exception, Timeout) as err:
self.logger.exception(_('ERROR auditing: %s'), err)
self._sleep()
def run_once(self, *args, **kwargs):
"""Run the object audit once"""
# zero byte only command line option
zbo_fps = kwargs.get('zero_byte_fps', 0)
override_devices = list_from_csv(kwargs.get('devices'))
# Remove bogus entries and duplicates from override_devices
override_devices = list(
set(listdir(self.devices)).intersection(set(override_devices)))
parent = False
if zbo_fps:
# only start parent
parent = True
kwargs = {'mode': 'once'}
try:
self.audit_loop(parent, zbo_fps, override_devices=override_devices,
**kwargs)
except (Exception, Timeout) as err:
self.logger.exception(_('ERROR auditing: %s'), err)
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Adam."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.keras import optimizers
from tensorflow.python.keras.optimizer_v2 import adam
from tensorflow.python.keras.optimizer_v2 import learning_rate_schedule
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def adam_update_numpy(param,
g_t,
t,
m,
v,
lr=0.001,
beta1=0.9,
beta2=0.999,
epsilon=1e-7):
lr_t = lr * np.sqrt(1 - beta2**(t + 1)) / (1 - beta1**(t + 1))
m_t = beta1 * m + (1 - beta1) * g_t
v_t = beta2 * v + (1 - beta2) * g_t * g_t
param_t = param - lr_t * m_t / (np.sqrt(v_t) + epsilon)
return param_t, m_t, v_t
def adam_update_numpy_amsgrad(param,
g_t,
t,
m,
v,
vhat,
lr=0.001,
beta1=0.9,
beta2=0.999,
epsilon=1e-7):
lr_t = lr * np.sqrt(1 - beta2**(t + 1)) / (1 - beta1**(t + 1))
m_t = beta1 * m + (1 - beta1) * g_t
v_t = beta2 * v + (1 - beta2) * g_t * g_t
vhat_t = np.maximum(vhat, v_t)
param_t = param - lr_t * m_t / (np.sqrt(vhat_t) + epsilon)
return param_t, m_t, v_t, vhat_t
def adam_sparse_update_numpy_amsgrad(param,
indices,
g_t,
t,
m,
v,
vhat,
lr=0.001,
beta1=0.9,
beta2=0.999,
epsilon=1e-7):
m_t, v_t, vhat_t, param_t = (np.copy(m), np.copy(v), np.copy(vhat),
np.copy(param))
lr_t = lr * np.sqrt(1 - beta2**(t + 1)) / (1 - beta1**(t + 1))
m_t_slice = beta1 * m[indices] + (1 - beta1) * g_t
v_t_slice = beta2 * v[indices] + (1 - beta2) * g_t * g_t
m_t[indices] = m_t_slice
v_t[indices] = v_t_slice
v_hat_t = np.maximum(vhat_t, v_t)
v_hat_t_slice = v_hat_t[indices]
param_t_slice = param[indices] - (
lr_t * (m_t_slice / (np.sqrt(v_hat_t_slice) + epsilon)))
param_t[indices] = param_t_slice
return param_t, m_t, v_t, vhat_t
def get_beta_accumulators(opt, dtype):
local_step = math_ops.cast(opt.iterations + 1, dtype)
beta_1_t = math_ops.cast(opt._get_hyper("beta_1"), dtype)
beta_1_power = math_ops.pow(beta_1_t, local_step)
beta_2_t = math_ops.cast(opt._get_hyper("beta_2"), dtype)
beta_2_power = math_ops.pow(beta_2_t, local_step)
return (beta_1_power, beta_2_power)
class AdamOptimizerTest(test.TestCase):
@test_util.run_deprecated_v1
def testSparse(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.0, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.0, 0.01], dtype=dtype.as_numpy_dtype)
var0 = resource_variable_ops.ResourceVariable(var0_np)
var1 = resource_variable_ops.ResourceVariable(var1_np)
grads0_np_indices = np.array([0, 2], dtype=np.int32)
grads0 = ops.IndexedSlices(
constant_op.constant(grads0_np[grads0_np_indices]),
constant_op.constant(grads0_np_indices), constant_op.constant([3]))
grads1_np_indices = np.array([0, 2], dtype=np.int32)
grads1 = ops.IndexedSlices(
constant_op.constant(grads1_np[grads1_np_indices]),
constant_op.constant(grads1_np_indices), constant_op.constant([3]))
opt = adam.Adam()
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 3.0, 4.0], self.evaluate(var1))
beta_1_power, beta_2_power = get_beta_accumulators(opt, dtype)
# Run 3 steps of Adam
for t in range(3):
self.assertAllCloseAccordingToType(0.9**(t + 1),
self.evaluate(beta_1_power))
self.assertAllCloseAccordingToType(0.999**(t + 1),
self.evaluate(beta_2_power))
update.run()
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
@test_util.run_deprecated_v1
def testSparseDevicePlacement(self):
for index_dtype in [dtypes.int32, dtypes.int64]:
with self.cached_session(force_gpu=test.is_gpu_available()):
# If a GPU is available, tests that all optimizer ops can be placed on
# it (i.e. they have GPU kernels).
var = variables.Variable([[1.0], [2.0]])
indices = constant_op.constant([0, 1], dtype=index_dtype)
g_sum = lambda: math_ops.reduce_sum(array_ops.gather(var, indices)) # pylint: disable=cell-var-from-loop
optimizer = adam.Adam(3.0)
minimize_op = optimizer.minimize(g_sum, var_list=[var])
variables.global_variables_initializer().run()
minimize_op.run()
@test_util.run_deprecated_v1
def testSparseRepeatedIndices(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
repeated_index_update_var = variables.Variable(
[[1.0], [2.0]], dtype=dtype)
aggregated_update_var = variables.Variable(
[[1.0], [2.0]], dtype=dtype)
grad_repeated_index = ops.IndexedSlices(
constant_op.constant(
[0.1, 0.1], shape=[2, 1], dtype=dtype),
constant_op.constant([1, 1]),
constant_op.constant([2, 1]))
grad_aggregated = ops.IndexedSlices(
constant_op.constant(
[0.2], shape=[1, 1], dtype=dtype),
constant_op.constant([1]),
constant_op.constant([2, 1]))
repeated_update = adam.Adam().apply_gradients(
[(grad_repeated_index, repeated_index_update_var)])
aggregated_update = adam.Adam().apply_gradients(
[(grad_aggregated, aggregated_update_var)])
variables.global_variables_initializer().run()
self.assertAllClose(aggregated_update_var.eval(),
self.evaluate(repeated_index_update_var))
for _ in range(3):
repeated_update.run()
aggregated_update.run()
self.assertAllClose(aggregated_update_var.eval(),
self.evaluate(repeated_index_update_var))
def doTestBasic(self, use_callable_params=False):
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
with self.session(graph=ops.Graph()):
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = resource_variable_ops.ResourceVariable(
var0_np, name="var0_%d" % i)
var1 = resource_variable_ops.ResourceVariable(
var1_np, name="var1_%d" % i)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
learning_rate = lambda: 0.001
beta1 = lambda: 0.9
beta2 = lambda: 0.999
epsilon = lambda: 1e-8
if not use_callable_params:
learning_rate = learning_rate()
beta1 = beta1()
beta2 = beta2()
epsilon = epsilon()
opt = adam.Adam(learning_rate=learning_rate)
if not context.executing_eagerly():
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# Run 3 steps of Adam
for t in range(3):
beta_1_power, beta_2_power = get_beta_accumulators(opt, dtype)
self.assertAllCloseAccordingToType(0.9**(t + 1),
self.evaluate(beta_1_power))
self.assertAllCloseAccordingToType(0.999**(t + 1),
self.evaluate(beta_2_power))
if not context.executing_eagerly():
self.evaluate(update)
else:
opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
@test_util.run_in_graph_and_eager_modes(reset_test=True)
def testResourceBasic(self):
self.doTestBasic()
def testBasicCallableParams(self):
with context.eager_mode():
self.doTestBasic(use_callable_params=True)
@test_util.run_in_graph_and_eager_modes(reset_test=True)
def testBasicWithAmsgrad(self):
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
with self.session(graph=ops.Graph()):
# Initialize variables for numpy implementation.
m0, v0, v0hat, m1, v1, v1hat = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = resource_variable_ops.ResourceVariable(
var0_np, name="var0_%d" % i)
var1 = resource_variable_ops.ResourceVariable(
var1_np, name="var1_%d" % i)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
opt = adam.Adam(amsgrad=True)
if not context.executing_eagerly():
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# Run 3 steps of Adam
for t in range(3):
beta_1_power, beta_2_power = get_beta_accumulators(opt, dtype)
self.assertAllCloseAccordingToType(0.9**(t + 1),
self.evaluate(beta_1_power))
self.assertAllCloseAccordingToType(0.999**(t + 1),
self.evaluate(beta_2_power))
if not context.executing_eagerly():
self.evaluate(update)
else:
opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
var0_np, m0, v0, v0hat = adam_update_numpy_amsgrad(
var0_np, grads0_np, t, m0, v0, v0hat)
var1_np, m1, v1, v1hat = adam_update_numpy_amsgrad(
var1_np, grads1_np, t, m1, v1, v1hat)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
@test_util.run_in_graph_and_eager_modes
def testSparseWithAmsgrad(self):
# dtypes.half does not work on gpu + eager.
for dtype in [dtypes.float32, dtypes.float64]:
with self.cached_session():
m0 = np.array([[0.0], [0.0]])
v0 = np.array([[0.0], [0.0]])
v0hat = np.array([[0.0], [0.0]])
indices_np = np.array([1])
indices = constant_op.constant(indices_np, dtype=dtypes.int32)
var0_np = np.array([[1.0], [2.0]], dtype=dtype.as_numpy_dtype)
repeated_index_update_var = variables.Variable(var0_np, dtype=dtype)
aggregated_update_var = variables.Variable(var0_np, dtype=dtype)
grads0_np = np.array([[0.2]], dtype=dtype.as_numpy_dtype)
grad_repeated_index = ops.IndexedSlices(
constant_op.constant([0.1, 0.1], shape=[2, 1], dtype=dtype),
constant_op.constant([1, 1]), constant_op.constant([2, 1]))
grad_aggregated = ops.IndexedSlices(grads0_np, indices,
constant_op.constant([2, 1]))
opt_repeated = adam.Adam(amsgrad=True)
opt_aggregated = adam.Adam(amsgrad=True)
if not context.executing_eagerly():
repeated_update = opt_repeated.apply_gradients(
[(grad_repeated_index, repeated_index_update_var)])
aggregated_update = opt_aggregated.apply_gradients(
[(grad_aggregated, aggregated_update_var)])
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(
self.evaluate(aggregated_update_var),
self.evaluate(repeated_index_update_var))
for t in range(3):
if not context.executing_eagerly():
self.evaluate(repeated_update)
self.evaluate(aggregated_update)
else:
opt_repeated.apply_gradients(
[(grad_repeated_index, repeated_index_update_var)])
opt_aggregated.apply_gradients(
[(grad_aggregated, aggregated_update_var)])
var0_np, m0, v0, v0hat = adam_sparse_update_numpy_amsgrad(
var0_np, indices_np, grads0_np, t, m0, v0, v0hat)
# Validate updated params
self.assertAllCloseAccordingToType(
var0_np, self.evaluate(aggregated_update_var))
self.assertAllCloseAccordingToType(
self.evaluate(aggregated_update_var),
self.evaluate(repeated_index_update_var))
@test_util.run_deprecated_v1
def testBasicWithLearningRateDecay(self):
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
with self.session(graph=ops.Graph()):
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = resource_variable_ops.ResourceVariable(
var0_np, name="var0_%d" % i)
var1 = resource_variable_ops.ResourceVariable(
var1_np, name="var1_%d" % i)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
learning_rate = 0.001
beta_1 = 0.9
beta_2 = 0.999
epsilon = 1e-7
decay = 0.5
opt = adam.Adam(
learning_rate=learning_rate,
beta_1=beta_1,
beta_2=beta_2,
epsilon=epsilon,
decay=decay)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# Run 3 steps of Adam
for t in range(3):
self.evaluate(update)
lr_np = learning_rate / (1 + decay * t)
var0_np, m0, v0 = adam_update_numpy(
var0_np, grads0_np, t, m0, v0, lr=lr_np)
var1_np, m1, v1 = adam_update_numpy(
var1_np, grads1_np, t, m1, v1, lr=lr_np)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
@test_util.run_deprecated_v1
def testBasicWithLearningRateInverseTimeDecay(self):
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
with self.session(graph=ops.Graph()):
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = resource_variable_ops.ResourceVariable(
var0_np, name="var0_%d" % i)
var1 = resource_variable_ops.ResourceVariable(
var1_np, name="var1_%d" % i)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
learning_rate = 0.001
decay = 0.5
lr_schedule = learning_rate_schedule.InverseTimeDecay(
learning_rate, decay_steps=1.0, decay_rate=decay)
beta_1 = 0.9
beta_2 = 0.999
epsilon = 1e-7
opt = adam.Adam(
learning_rate=lr_schedule,
beta_1=beta_1,
beta_2=beta_2,
epsilon=epsilon)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# Run 3 steps of Adam
for t in range(3):
self.evaluate(update)
lr_np = learning_rate / (1 + decay * t)
var0_np, m0, v0 = adam_update_numpy(
var0_np, grads0_np, t, m0, v0, lr=lr_np)
var1_np, m1, v1 = adam_update_numpy(
var1_np, grads1_np, t, m1, v1, lr=lr_np)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
@test_util.run_deprecated_v1
def testTensorLearningRate(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
opt = adam.Adam(constant_op.constant(0.001))
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
beta_1_power, beta_2_power = get_beta_accumulators(opt, dtype)
# Run 3 steps of Adam
for t in range(3):
self.assertAllCloseAccordingToType(0.9**(t + 1),
self.evaluate(beta_1_power))
self.assertAllCloseAccordingToType(0.999**(t + 1),
self.evaluate(beta_2_power))
update.run()
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
@test_util.run_deprecated_v1
def testSharing(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
opt = adam.Adam()
update1 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
update2 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
beta_1_power, beta_2_power = get_beta_accumulators(opt, dtype)
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 3 steps of intertwined Adam1 and Adam2.
for t in range(3):
self.assertAllCloseAccordingToType(0.9**(t + 1),
self.evaluate(beta_1_power))
self.assertAllCloseAccordingToType(0.999**(t + 1),
self.evaluate(beta_2_power))
if t % 2 == 0:
update1.run()
else:
update2.run()
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
def testSlotsUniqueEager(self):
with context.eager_mode():
v1 = resource_variable_ops.ResourceVariable(1.)
v2 = resource_variable_ops.ResourceVariable(1.)
opt = adam.Adam(1.)
opt.minimize(lambda: v1 + v2, var_list=[v1, v2])
# There should be iteration, and two unique slot variables for v1 and v2.
self.assertEqual(
5, len(set([v.experimental_ref() for v in opt.variables()])))
self.assertEqual(
self.evaluate(opt.variables()[0]), self.evaluate(opt.iterations))
def testSetWeightsFromV1AdamWithoutMinimize(self):
keras_v1_adam = optimizers.Adam()
keras_v2_adam = adam.Adam()
keras_v2_adam.set_weights(keras_v1_adam.get_weights())
keras_v1_iteration = keras_v1_adam.iterations
keras_v2_iteration = keras_v2_adam.iterations
self.evaluate(variables.global_variables_initializer())
self.assertEqual(
self.evaluate(keras_v1_iteration), self.evaluate(keras_v2_iteration))
def testConstructAdamWithLR(self):
opt = adam.Adam(lr=1.0)
opt_2 = adam.Adam(learning_rate=0.1, lr=1.0)
opt_3 = adam.Adam(learning_rate=0.1)
self.assertIsInstance(opt.lr, variables.Variable)
self.assertIsInstance(opt_2.lr, variables.Variable)
self.assertIsInstance(opt_3.lr, variables.Variable)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(self.evaluate(opt.lr), (1.0))
self.assertAllClose(self.evaluate(opt_2.lr), (1.0))
self.assertAllClose(self.evaluate(opt_3.lr), (0.1))
if __name__ == "__main__":
test.main()
|
|
# Source Generated with Decompyle++
# File: internal_parameter.pyc (Python 2.5)
from __future__ import absolute_import
from Live import DeviceParameter
from ableton.v2.base import listenable_property, liveobj_valid, nop, Slot, SlotManager, Subject, SlotError
def identity(value, _parent):
return value
def to_percentage_display(value):
percentage = 100 * value
percentage_str = '100'
if percentage < 100:
if percentage < 10:
pass
1
precision = 1
format_str = '%.' + str(precision) + 'f'
percentage_str = format_str % percentage
return unicode(percentage_str + ' %')
class InternalParameterBase(Subject):
is_enabled = True
is_quantized = False
def __init__(self, name = None, *a, **k):
if not name is not None:
raise AssertionError
super(InternalParameterBase, self).__init__(*a, **a)
self._name = name
def _has_valid_parent(self):
return liveobj_valid(self._parent)
def canonical_parent(self):
raise NotImplementedError
canonical_parent = property(canonical_parent)
def display_value(self):
raise NotImplementedError
display_value = property(display_value)
def min(self):
raise NotImplementedError
min = property(min)
def max(self):
raise NotImplementedError
max = property(max)
def value(self):
raise NotImplementedError
value = property(value)
def name(self):
return self._name
name = property(name)
def original_name(self):
return self._name
original_name = property(original_name)
def default_value(self):
return self.min
default_value = property(default_value)
def automation_state(self):
return DeviceParameter.AutomationState.none
automation_state = listenable_property(automation_state)
def state(self):
return DeviceParameter.ParameterState.enabled
state = listenable_property(state)
def _live_ptr(self):
return id(self)
_live_ptr = property(_live_ptr)
def __str__(self):
return self.display_value
class InternalParameter(InternalParameterBase):
'''
Class implementing the DeviceParameter interface. Using instances of this class,
we can mix script-internal values with DeviceParameter instances.
'''
__events__ = ('value',)
def __init__(self, parent = None, display_value_conversion = None, *a, **k):
super(InternalParameter, self).__init__(*a, **a)
self._value = 0
self._parent = parent
self.set_display_value_conversion(display_value_conversion)
self.set_scaling_functions(None, None)
def set_display_value_conversion(self, display_value_conversion):
if not display_value_conversion:
pass
self._display_value_conversion = to_percentage_display
self.notify_value()
def set_scaling_functions(self, to_internal, from_internal):
if not to_internal:
pass
self._to_internal = identity
if not from_internal:
pass
self._from_internal = identity
def canonical_parent(self):
return self._parent
canonical_parent = property(canonical_parent)
def _get_value(self):
if self._has_valid_parent():
pass
1
return self.min
def _set_value(self, new_value):
if self.min <= new_value:
pass
new_value <= self.max
if not 1:
raise AssertionError, 'Invalid value %f' % new_value
self.linear_value = self._to_internal(new_value, self._parent)
value = property(_get_value, _set_value)
def _get_linear_value(self):
return self._value
def _set_linear_value(self, new_value):
if new_value != self._value:
self._value = new_value
self.notify_value()
linear_value = property(_get_linear_value, _set_linear_value)
def min(self):
return 0
min = property(min)
def max(self):
return 1
max = property(max)
def display_value(self):
return self._display_value_conversion(self.value)
display_value = property(display_value)
class WrappingParameter(InternalParameter, SlotManager):
def __init__(self, source_property = None, from_property_value = None, to_property_value = None, display_value_conversion = nop, value_items = [], *a, **k):
if not source_property is not None:
raise AssertionError
super(WrappingParameter, self).__init__(display_value_conversion = display_value_conversion, *a, **a)
if not hasattr(self._parent, source_property) and source_property in dir(self._parent):
raise AssertionError
self._source_property = source_property
self._value_items = value_items
self.set_scaling_functions(to_property_value, from_property_value)
self._property_slot = self.register_slot(Slot(listener = self.notify_value, event = source_property))
self.connect()
def connect(self):
self._property_slot.subject = None
self._property_slot.subject = self._parent
def _get_property_value(self):
if self._has_valid_parent():
pass
1
return self.min
def _get_value(self):
try:
if self._has_valid_parent():
pass
1
return self.min
except RuntimeError:
return self.min
def _set_value(self, new_value):
if self.min <= new_value:
pass
new_value <= self.max
if not 1:
raise AssertionError, 'Invalid value %f' % new_value
try:
setattr(self._parent, self._source_property, self._to_internal(new_value, self._parent))
except RuntimeError:
pass
linear_value = property(_get_value, _set_value)
value = property(_get_value, _set_value)
def display_value(self):
try:
value = self._get_property_value()
return unicode(self._display_value_conversion(value))
except RuntimeError:
return unicode()
display_value = property(display_value)
def is_quantized(self):
return len(self._value_items) > 0
is_quantized = property(is_quantized)
def value_items(self):
return self._value_items
value_items = property(value_items)
class EnumWrappingParameter(InternalParameterBase, SlotManager):
is_enabled = True
is_quantized = True
def __init__(self, parent = None, values_property = None, index_property = None, value_type = int, to_index_conversion = None, from_index_conversion = None, *a, **k):
if not parent is not None:
raise AssertionError
if not values_property is not None:
raise AssertionError
if not index_property is not None:
raise AssertionError
super(EnumWrappingParameter, self).__init__(*a, **a)
self._parent = parent
self._values_property = values_property
self._index_property = index_property
if not to_index_conversion:
pass
self._to_index = lambda x: x
if not from_index_conversion:
pass
self._from_index = lambda x: x
self.value_type = value_type
self._index_property_slot = self.register_slot(self._parent, self.notify_value, index_property)
try:
self.register_slot(self._parent, self.notify_value_items, values_property)
except SlotError:
pass
def connect(self):
self._index_property_slot.subject = None
self._index_property_slot.subject = self._parent
def display_value(self):
index = self._get_index()
values = self._get_values()
if index < len(values):
return unicode(values[index])
else:
return unicode()
display_value = property(display_value)
def value_items(self):
return self._get_values()
value_items = listenable_property(value_items)
def value(self):
return self._get_index()
value = listenable_property(value)
def value(self, new_value):
self._set_index(new_value)
value = value.setter(value)
def _get_values(self):
if self._has_valid_parent():
pass
1
return []
def _get_index(self):
if self._has_valid_parent():
pass
1
return int(getattr(self._parent, self._index_property))(0)
def _set_index(self, index):
index = self._to_index(index)
setattr(self._parent, self._index_property, self.value_type(index))
def canonical_parent(self):
pass
canonical_parent = property(canonical_parent)
def max(self):
return len(self.value_items) - 1
max = property(max)
def min(self):
return 0
min = property(min)
class RelativeInternalParameter(InternalParameter):
__events__ = ('delta',)
def default_value(self):
return 0.5
default_value = property(default_value)
def _get_value(self):
return self.default_value
def _set_value(self, new_value):
delta = new_value - self.value
if delta != 0:
self.notify_value()
self.notify_delta(delta)
value = property(_get_value, _set_value)
linear_value = property(_get_value, _set_value)
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: postgresql_info
short_description: Gather information about PostgreSQL servers
description:
- Gathers information about PostgreSQL servers.
version_added: '2.8'
options:
filter:
description:
- Limit the collected information by comma separated string or YAML list.
- Allowable values are C(version),
C(databases), C(settings), C(tablespaces), C(roles),
C(replications), C(repl_slots).
- By default, collects all subsets.
- You can use shell-style (fnmatch) wildcard to pass groups of values (see Examples).
- You can use '!' before value (for example, C(!settings)) to exclude it from the information.
- If you pass including and excluding values to the filter, for example, I(filter=!settings,ver),
the excluding values will be ignored.
type: list
db:
description:
- Name of database to connect.
type: str
aliases:
- login_db
session_role:
description:
- Switch to session_role after connecting. The specified session_role must
be a role that the current login_user is a member of.
- Permissions checking for SQL commands is carried out as though
the session_role were the one that had logged in originally.
type: str
notes:
- The default authentication assumes that you are either logging in as or
sudo'ing to the postgres account on the host.
- login_user or session_role must be able to read from pg_authid.
- To avoid "Peer authentication failed for user postgres" error,
use postgres user as a I(become_user).
- This module uses psycopg2, a Python PostgreSQL database adapter. You must
ensure that psycopg2 is installed on the host before using this module. If
the remote host is the PostgreSQL server (which is the default case), then
PostgreSQL must also be installed on the remote host. For Ubuntu-based
systems, install the postgresql, libpq-dev, and python-psycopg2 packages
on the remote host before using this module.
requirements: [ psycopg2 ]
author:
- Andrew Klychkov (@Andersson007)
extends_documentation_fragment: postgres
'''
EXAMPLES = r'''
# Display info from postgres hosts.
# ansible postgres -m postgresql_info
# Display only databases and roles info from all hosts using shell-style wildcards:
# ansible all -m postgresql_info -a 'filter=dat*,rol*'
# Display only replications and repl_slots info from standby hosts using shell-style wildcards:
# ansible standby -m postgresql_info -a 'filter=repl*'
# Display all info from databases hosts except settings:
# ansible databases -m postgresql_info -a 'filter=!settings'
- name: Collect PostgreSQL version and extensions
become: yes
become_user: postgres
postgresql_info:
filter: ver*,ext*
- name: Collect all info except settings and roles
become: yes
become_user: postgres
postgresql_info:
filter: "!settings,!roles"
# On FreeBSD with PostgreSQL 9.5 version and lower use pgsql user to become
# and pass "postgres" as a database to connect to
- name: Collect tablespaces and repl_slots info
become: yes
become_user: pgsql
postgresql_info:
db: postgres
filter:
- tablesp*
- repl_sl*
- name: Collect all info except databases
become: yes
become_user: postgres
postgresql_info:
filter:
- "!databases"
'''
RETURN = r'''
version:
description: Database server version U(https://www.postgresql.org/support/versioning/).
returned: always
type: dict
sample: { "version": { "major": 10, "minor": 6 } }
contains:
major:
description: Major server version.
returned: always
type: int
sample: 11
minor:
description: Minor server version.
returned: always
type: int
sample: 1
databases:
description: Information about databases.
returned: always
type: dict
sample:
- { "postgres": { "access_priv": "", "collate": "en_US.UTF-8",
"ctype": "en_US.UTF-8", "encoding": "UTF8", "owner": "postgres", "size": "7997 kB" } }
contains:
database_name:
description: Database name.
returned: always
type: dict
sample: template1
contains:
access_priv:
description: Database access privileges.
returned: always
type: str
sample: "=c/postgres_npostgres=CTc/postgres"
collate:
description:
- Database collation U(https://www.postgresql.org/docs/current/collation.html).
returned: always
type: str
sample: en_US.UTF-8
ctype:
description:
- Database LC_CTYPE U(https://www.postgresql.org/docs/current/multibyte.html).
returned: always
type: str
sample: en_US.UTF-8
encoding:
description:
- Database encoding U(https://www.postgresql.org/docs/current/multibyte.html).
returned: always
type: str
sample: UTF8
owner:
description:
- Database owner U(https://www.postgresql.org/docs/current/sql-createdatabase.html).
returned: always
type: str
sample: postgres
size:
description: Database size in bytes.
returned: always
type: str
sample: 8189415
extensions:
description:
- Extensions U(https://www.postgresql.org/docs/current/sql-createextension.html).
returned: always
type: dict
sample:
- { "plpgsql": { "description": "PL/pgSQL procedural language",
"extversion": { "major": 1, "minor": 0 } } }
contains:
extdescription:
description: Extension description.
returned: if existent
type: str
sample: PL/pgSQL procedural language
extversion:
description: Extension description.
returned: always
type: dict
contains:
major:
description: Extension major version.
returned: always
type: int
sample: 1
minor:
description: Extension minor version.
returned: always
type: int
sample: 0
nspname:
description: Namespace where the extension is.
returned: always
type: str
sample: pg_catalog
languages:
description: Procedural languages U(https://www.postgresql.org/docs/current/xplang.html).
returned: always
type: dict
sample: { "sql": { "lanacl": "", "lanowner": "postgres" } }
contains:
lanacl:
description:
- Language access privileges
U(https://www.postgresql.org/docs/current/catalog-pg-language.html).
returned: always
type: str
sample: "{postgres=UC/postgres,=U/postgres}"
lanowner:
description:
- Language owner U(https://www.postgresql.org/docs/current/catalog-pg-language.html).
returned: always
type: str
sample: postgres
namespaces:
description:
- Namespaces (schema) U(https://www.postgresql.org/docs/current/sql-createschema.html).
returned: always
type: dict
sample: { "pg_catalog": { "nspacl": "{postgres=UC/postgres,=U/postgres}", "nspowner": "postgres" } }
contains:
nspacl:
description:
- Access privileges U(https://www.postgresql.org/docs/current/catalog-pg-namespace.html).
returned: always
type: str
sample: "{postgres=UC/postgres,=U/postgres}"
nspowner:
description:
- Schema owner U(https://www.postgresql.org/docs/current/catalog-pg-namespace.html).
returned: always
type: str
sample: postgres
repl_slots:
description:
- Replication slots (available in 9.4 and later)
U(https://www.postgresql.org/docs/current/catalog-pg-replication-slots.html).
returned: if existent
type: dict
sample: { "slot0": { "active": false, "database": null, "plugin": null, "slot_type": "physical" } }
contains:
active:
description:
- True means that a receiver has connected to it, and it is currently reserving archives.
returned: always
type: bool
sample: true
database:
description: Database name this slot is associated with, or null.
returned: always
type: str
sample: acme
plugin:
description:
- Base name of the shared object containing the output plugin
this logical slot is using, or null for physical slots.
returned: always
type: str
sample: pgoutput
slot_type:
description: The slot type - physical or logical.
returned: always
type: str
sample: logical
replications:
description:
- Information about the current replications by process PIDs
U(https://www.postgresql.org/docs/current/monitoring-stats.html#MONITORING-STATS-VIEWS-TABLE).
returned: if pg_stat_replication view existent
type: dict
sample:
- { 76580: { "app_name": "standby1", "backend_start": "2019-02-03 00:14:33.908593+03",
"client_addr": "10.10.10.2", "client_hostname": "", "state": "streaming", "usename": "postgres" } }
contains:
usename:
description:
- Name of the user logged into this WAL sender process ('usename' is a column name in pg_stat_replication view).
returned: always
type: str
sample: replication_user
app_name:
description: Name of the application that is connected to this WAL sender.
returned: if existent
type: str
sample: acme_srv
client_addr:
description:
- IP address of the client connected to this WAL sender.
- If this field is null, it indicates that the client is connected
via a Unix socket on the server machine.
returned: always
type: str
sample: 10.0.0.101
client_hostname:
description:
- Host name of the connected client, as reported by a reverse DNS lookup of client_addr.
- This field will only be non-null for IP connections, and only when log_hostname is enabled.
returned: always
type: str
sample: dbsrv1
backend_start:
description: Time when this process was started, i.e., when the client connected to this WAL sender.
returned: always
type: str
sample: "2019-02-03 00:14:33.908593+03"
state:
description: Current WAL sender state.
returned: always
type: str
sample: streaming
tablespaces:
description:
- Information about tablespaces U(https://www.postgresql.org/docs/current/catalog-pg-tablespace.html).
returned: always
type: dict
sample:
- { "test": { "spcacl": "{postgres=C/postgres,andreyk=C/postgres}", "spcoptions": [ "seq_page_cost=1" ],
"spcowner": "postgres" } }
contains:
spcacl:
description: Tablespace access privileges.
returned: always
type: str
sample: "{postgres=C/postgres,andreyk=C/postgres}"
spcoptions:
description: Tablespace-level options.
returned: always
type: list
sample: [ "seq_page_cost=1" ]
spcowner:
description: Owner of the tablespace.
returned: always
type: str
sample: test_user
roles:
description:
- Information about roles U(https://www.postgresql.org/docs/current/user-manag.html).
returned: always
type: dict
sample:
- { "test_role": { "canlogin": true, "member_of": [ "user_ro" ], "superuser": false,
"valid_until": "9999-12-31T23:59:59.999999+00:00" } }
contains:
canlogin:
description: Login privilege U(https://www.postgresql.org/docs/current/role-attributes.html).
returned: always
type: bool
sample: true
member_of:
description:
- Role membership U(https://www.postgresql.org/docs/current/role-membership.html).
returned: always
type: list
sample: [ "read_only_users" ]
superuser:
description: User is a superuser or not.
returned: always
type: bool
sample: false
valid_until:
description:
- Password expiration date U(https://www.postgresql.org/docs/current/sql-alterrole.html).
returned: always
type: str
sample: "9999-12-31T23:59:59.999999+00:00"
pending_restart_settings:
description:
- List of settings that are pending restart to be set.
returned: always
type: list
sample: [ "shared_buffers" ]
settings:
description:
- Information about run-time server parameters
U(https://www.postgresql.org/docs/current/view-pg-settings.html).
returned: always
type: dict
sample:
- { "work_mem": { "boot_val": "4096", "context": "user", "max_val": "2147483647",
"min_val": "64", "setting": "8192", "sourcefile": "/var/lib/pgsql/10/data/postgresql.auto.conf",
"unit": "kB", "vartype": "integer", "val_in_bytes": 4194304 } }
contains:
setting:
description: Current value of the parameter.
returned: always
type: str
sample: 49152
unit:
description: Implicit unit of the parameter.
returned: always
type: str
sample: kB
boot_val:
description:
- Parameter value assumed at server startup if the parameter is not otherwise set.
returned: always
type: str
sample: 4096
min_val:
description:
- Minimum allowed value of the parameter (null for non-numeric values).
returned: always
type: str
sample: 64
max_val:
description:
- Maximum allowed value of the parameter (null for non-numeric values).
returned: always
type: str
sample: 2147483647
sourcefile:
description:
- Configuration file the current value was set in.
- Null for values set from sources other than configuration files,
or when examined by a user who is neither a superuser or a member of pg_read_all_settings.
- Helpful when using include directives in configuration files.
returned: always
type: str
sample: /var/lib/pgsql/10/data/postgresql.auto.conf
context:
description:
- Context required to set the parameter's value.
- For more information see U(https://www.postgresql.org/docs/current/view-pg-settings.html).
returned: always
type: str
sample: user
vartype:
description:
- Parameter type (bool, enum, integer, real, or string).
returned: always
type: str
sample: integer
val_in_bytes:
description:
- Current value of the parameter in bytes.
returned: if supported
type: int
sample: 2147483647
pretty_val:
description:
- Value presented in the pretty form.
returned: always
type: str
sample: 2MB
pending_restart:
description:
- True if the value has been changed in the configuration file but needs a restart; or false otherwise.
- Returns only if C(settings) is passed.
returned: always
type: bool
sample: false
'''
from fnmatch import fnmatch
try:
import psycopg2
HAS_PSYCOPG2 = True
except ImportError:
HAS_PSYCOPG2 = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.database import SQLParseError
from ansible.module_utils.postgres import postgres_common_argument_spec
from ansible.module_utils._text import to_native
from ansible.module_utils.six import iteritems
# ===========================================
# PostgreSQL module specific support methods.
#
class PgDbConn(object):
def __init__(self, module, params_dict, session_role):
self.params_dict = params_dict
self.module = module
self.db_conn = None
self.session_role = session_role
self.cursor = None
def connect(self):
try:
self.db_conn = psycopg2.connect(**self.params_dict)
self.cursor = self.db_conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
# Switch role, if specified:
if self.session_role:
try:
self.cursor.execute('SET ROLE %s' % self.session_role)
except Exception as e:
self.module.fail_json(msg="Could not switch role: %s" % to_native(e))
return self.cursor
except TypeError as e:
if 'sslrootcert' in e.args[0]:
self.module.fail_json(msg='PostgreSQL server must be at least version 8.4 '
'to support sslrootcert')
self.module.fail_json(msg="Unable to connect to database: %s" % to_native(e))
except Exception as e:
self.module.fail_json(msg="Unable to connect to database: %s" % to_native(e))
def reconnect(self, dbname):
self.db_conn.close()
self.params_dict['database'] = dbname
return self.connect()
class PgClusterInfo(object):
def __init__(self, module, db_conn_obj):
self.module = module
self.db_obj = db_conn_obj
self.cursor = db_conn_obj.connect()
self.pg_info = {
"version": {},
"tablespaces": {},
"databases": {},
"replications": {},
"repl_slots": {},
"settings": {},
"roles": {},
"pending_restart_settings": [],
}
def collect(self, val_list=False):
subset_map = {
"version": self.get_pg_version,
"tablespaces": self.get_tablespaces,
"databases": self.get_db_info,
"replications": self.get_repl_info,
"repl_slots": self.get_rslot_info,
"settings": self.get_settings,
"roles": self.get_role_info,
}
incl_list = []
excl_list = []
# Notice: incl_list and excl_list
# don't make sense together, therefore,
# if incl_list is not empty, we collect
# only values from it:
if val_list:
for i in val_list:
if i[0] != '!':
incl_list.append(i)
else:
excl_list.append(i.lstrip('!'))
if incl_list:
for s in subset_map:
for i in incl_list:
if fnmatch(s, i):
subset_map[s]()
break
elif excl_list:
found = False
# Collect info:
for s in subset_map:
for e in excl_list:
if fnmatch(s, e):
found = True
if not found:
subset_map[s]()
else:
found = False
# Default behaviour, if include or exclude is not passed:
else:
# Just collect info for each item:
for s in subset_map:
subset_map[s]()
return self.pg_info
def get_tablespaces(self):
"""
Get information about tablespaces.
"""
# Check spcoption exists:
opt = self.__exec_sql("SELECT column_name "
"FROM information_schema.columns "
"WHERE table_name = 'pg_tablespace' "
"AND column_name = 'spcoptions'")
if not opt:
query = ("SELECT s.spcname, a.rolname, s.spcacl "
"FROM pg_tablespace AS s "
"JOIN pg_authid AS a ON s.spcowner = a.oid")
else:
query = ("SELECT s.spcname, a.rolname, s.spcacl, s.spcoptions "
"FROM pg_tablespace AS s "
"JOIN pg_authid AS a ON s.spcowner = a.oid")
res = self.__exec_sql(query)
ts_dict = {}
for i in res:
ts_name = i[0]
ts_info = dict(
spcowner=i[1],
spcacl=i[2] if i[2] else '',
)
if opt:
ts_info['spcoptions'] = i[3] if i[3] else []
ts_dict[ts_name] = ts_info
self.pg_info["tablespaces"] = ts_dict
def get_ext_info(self):
"""
Get information about existing extensions.
"""
# Check that pg_extension exists:
res = self.__exec_sql("SELECT EXISTS (SELECT 1 FROM "
"information_schema.tables "
"WHERE table_name = 'pg_extension')")
if not res[0][0]:
return True
query = ("SELECT e.extname, e.extversion, n.nspname, c.description "
"FROM pg_catalog.pg_extension AS e "
"LEFT JOIN pg_catalog.pg_namespace AS n "
"ON n.oid = e.extnamespace "
"LEFT JOIN pg_catalog.pg_description AS c "
"ON c.objoid = e.oid "
"AND c.classoid = 'pg_catalog.pg_extension'::pg_catalog.regclass")
res = self.__exec_sql(query)
ext_dict = {}
for i in res:
ext_ver = i[1].split('.')
ext_dict[i[0]] = dict(
extversion=dict(
major=int(ext_ver[0]),
minor=int(ext_ver[1]),
),
nspname=i[2],
description=i[3],
)
return ext_dict
def get_role_info(self):
"""
Get information about roles (in PgSQL groups and users are roles).
"""
query = ("SELECT r.rolname, r.rolsuper, r.rolcanlogin, "
"r.rolvaliduntil, "
"ARRAY(SELECT b.rolname "
"FROM pg_catalog.pg_auth_members AS m "
"JOIN pg_catalog.pg_roles AS b ON (m.roleid = b.oid) "
"WHERE m.member = r.oid) AS memberof "
"FROM pg_catalog.pg_roles AS r "
"WHERE r.rolname !~ '^pg_'")
res = self.__exec_sql(query)
rol_dict = {}
for i in res:
rol_dict[i[0]] = dict(
superuser=i[1],
canlogin=i[2],
valid_until=i[3] if i[3] else '',
member_of=i[4] if i[4] else [],
)
self.pg_info["roles"] = rol_dict
def get_rslot_info(self):
"""
Get information about replication slots if exist.
"""
# Check that pg_replication_slots exists:
res = self.__exec_sql("SELECT EXISTS (SELECT 1 FROM "
"information_schema.tables "
"WHERE table_name = 'pg_replication_slots')")
if not res[0][0]:
return True
query = ("SELECT slot_name, plugin, slot_type, database, "
"active FROM pg_replication_slots")
res = self.__exec_sql(query)
# If there is no replication:
if not res:
return True
rslot_dict = {}
for i in res:
rslot_dict[i[0]] = dict(
plugin=i[1],
slot_type=i[2],
database=i[3],
active=i[4],
)
self.pg_info["repl_slots"] = rslot_dict
def get_settings(self):
"""
Get server settings.
"""
# Check pending restart column exists:
pend_rest_col_exists = self.__exec_sql("SELECT 1 FROM information_schema.columns "
"WHERE table_name = 'pg_settings' "
"AND column_name = 'pending_restart'")
if not pend_rest_col_exists:
query = ("SELECT name, setting, unit, context, vartype, "
"boot_val, min_val, max_val, sourcefile "
"FROM pg_settings")
else:
query = ("SELECT name, setting, unit, context, vartype, "
"boot_val, min_val, max_val, sourcefile, pending_restart "
"FROM pg_settings")
res = self.__exec_sql(query)
set_dict = {}
for i in res:
val_in_bytes = None
setting = i[1]
if i[2]:
unit = i[2]
else:
unit = ''
if unit == 'kB':
val_in_bytes = int(setting) * 1024
elif unit == '8kB':
val_in_bytes = int(setting) * 1024 * 8
elif unit == 'MB':
val_in_bytes = int(setting) * 1024 * 1024
if val_in_bytes is not None and val_in_bytes < 0:
val_in_bytes = 0
setting_name = i[0]
pretty_val = self.__get_pretty_val(setting_name)
pending_restart = None
if pend_rest_col_exists:
pending_restart = i[9]
set_dict[setting_name] = dict(
setting=setting,
unit=unit,
context=i[3],
vartype=i[4],
boot_val=i[5] if i[5] else '',
min_val=i[6] if i[6] else '',
max_val=i[7] if i[7] else '',
sourcefile=i[8] if i[8] else '',
pretty_val=pretty_val,
)
if val_in_bytes is not None:
set_dict[setting_name]['val_in_bytes'] = val_in_bytes
if pending_restart is not None:
set_dict[setting_name]['pending_restart'] = pending_restart
if pending_restart:
self.pg_info["pending_restart_settings"].append(setting_name)
self.pg_info["settings"] = set_dict
def get_repl_info(self):
"""
Get information about replication if the server is a master.
"""
# Check that pg_replication_slots exists:
res = self.__exec_sql("SELECT EXISTS (SELECT 1 FROM "
"information_schema.tables "
"WHERE table_name = 'pg_stat_replication')")
if not res[0][0]:
return True
query = ("SELECT r.pid, a.rolname, r.application_name, r.client_addr, "
"r.client_hostname, r.backend_start::text, r.state "
"FROM pg_stat_replication AS r "
"JOIN pg_authid AS a ON r.usesysid = a.oid")
res = self.__exec_sql(query)
# If there is no replication:
if not res:
return True
repl_dict = {}
for i in res:
repl_dict[i[0]] = dict(
usename=i[1],
app_name=i[2] if i[2] else '',
client_addr=i[3],
client_hostname=i[4] if i[4] else '',
backend_start=i[5],
state=i[6],
)
self.pg_info["replications"] = repl_dict
def get_lang_info(self):
"""
Get information about current supported languages.
"""
query = ("SELECT l.lanname, a.rolname, l.lanacl "
"FROM pg_language AS l "
"JOIN pg_authid AS a ON l.lanowner = a.oid")
res = self.__exec_sql(query)
lang_dict = {}
for i in res:
lang_dict[i[0]] = dict(
lanowner=i[1],
lanacl=i[2] if i[2] else '',
)
return lang_dict
def get_namespaces(self):
"""
Get information about namespaces.
"""
query = ("SELECT n.nspname, a.rolname, n.nspacl "
"FROM pg_catalog.pg_namespace AS n "
"JOIN pg_authid AS a ON a.oid = n.nspowner")
res = self.__exec_sql(query)
nsp_dict = {}
for i in res:
nsp_dict[i[0]] = dict(
nspowner=i[1],
nspacl=i[2] if i[2] else '',
)
return nsp_dict
def get_pg_version(self):
query = "SELECT version()"
raw = self.__exec_sql(query)[0][0]
raw = raw.split()[1].split('.')
self.pg_info["version"] = dict(
major=int(raw[0]),
minor=int(raw[1]),
)
def get_db_info(self):
# Following query returns:
# Name, Owner, Encoding, Collate, Ctype, Access Priv, Size
query = ("SELECT d.datname, "
"pg_catalog.pg_get_userbyid(d.datdba), "
"pg_catalog.pg_encoding_to_char(d.encoding), "
"d.datcollate, "
"d.datctype, "
"pg_catalog.array_to_string(d.datacl, E'\n'), "
"CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') "
"THEN pg_catalog.pg_database_size(d.datname)::text "
"ELSE 'No Access' END, "
"t.spcname "
"FROM pg_catalog.pg_database AS d "
"JOIN pg_catalog.pg_tablespace t ON d.dattablespace = t.oid "
"WHERE d.datname != 'template0'")
res = self.__exec_sql(query)
db_dict = {}
for i in res:
db_dict[i[0]] = dict(
owner=i[1],
encoding=i[2],
collate=i[3],
ctype=i[4],
access_priv=i[5] if i[5] else '',
size=i[6],
)
for datname in db_dict:
self.cursor = self.db_obj.reconnect(datname)
db_dict[datname]['namespaces'] = self.get_namespaces()
db_dict[datname]['extensions'] = self.get_ext_info()
db_dict[datname]['languages'] = self.get_lang_info()
self.pg_info["databases"] = db_dict
def __get_pretty_val(self, setting):
return self.__exec_sql("SHOW %s" % setting)[0][0]
def __exec_sql(self, query):
try:
self.cursor.execute(query)
res = self.cursor.fetchall()
if res:
return res
except SQLParseError as e:
self.module.fail_json(msg=to_native(e))
self.cursor.close()
except psycopg2.ProgrammingError as e:
self.module.fail_json(msg="Cannot execute SQL '%s': %s" % (query, to_native(e)))
self.cursor.close()
return False
# ===========================================
# Module execution.
#
def main():
argument_spec = postgres_common_argument_spec()
argument_spec.update(
db=dict(type='str', aliases=['login_db']),
port=dict(type='int', default=5432, aliases=['login_port']),
filter=dict(type='list'),
ssl_mode=dict(type='str', default='prefer', choices=['allow', 'disable', 'prefer', 'require', 'verify-ca', 'verify-full']),
ca_cert=dict(type='str', aliases=['ssl_rootcert']),
session_role=dict(type='str'),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
if not HAS_PSYCOPG2:
module.fail_json(msg="The python psycopg2 module is required")
filter_ = module.params["filter"]
sslrootcert = module.params["ca_cert"]
session_role = module.params["session_role"]
# To use defaults values, keyword arguments must be absent, so
# check which values are empty and don't include in the **kw
# dictionary
params_map = {
"login_host": "host",
"login_user": "user",
"login_password": "password",
"port": "port",
"db": "database",
"ssl_mode": "sslmode",
"ca_cert": "sslrootcert"
}
kw = dict((params_map[k], v) for (k, v) in iteritems(module.params)
if k in params_map and v != "" and v is not None)
# If a login_unix_socket is specified, incorporate it here.
is_localhost = "host" not in kw or kw["host"] == "" or kw["host"] == "localhost"
if is_localhost and module.params["login_unix_socket"] != "":
kw["host"] = module.params["login_unix_socket"]
if psycopg2.__version__ < '2.4.3' and sslrootcert:
module.fail_json(msg='psycopg2 must be at least 2.4.3 in order '
'to user the ca_cert parameter')
db_conn_obj = PgDbConn(module, kw, session_role)
# Do job:
pg_info = PgClusterInfo(module, db_conn_obj)
module.exit_json(**pg_info.collect(filter_))
if __name__ == '__main__':
main()
|
|
class BaseStemmer(object):
def __init__(self):
self.set_current("")
self.maxCacheSize = 10000
self._cache = {}
self._counter = 0
def set_current(self, value):
'''
Set the self.current string.
'''
self.current = value
self.cursor = 0
self.limit = len(self.current)
self.limit_backward = 0
self.bra = self.cursor
self.ket = self.limit
def get_current(self):
'''
Get the self.current string.
'''
return self.current
def copy_from(self, other):
self.current = other.current
self.cursor = other.cursor
self.limit = other.limit
self.limit_backward = other.limit_backward
self.bra = other.bra
self.ket = other.ket
def in_grouping(self, s, min, max):
if self.cursor >= self.limit:
return False
ch = ord(self.current[self.cursor])
if ch > max or ch < min:
return False
ch -= min
if (s[ch >> 3] & (0x1 << (ch & 0x7))) == 0:
return False
self.cursor += 1
return True
def in_grouping_b(self, s, min, max):
if self.cursor <= self.limit_backward:
return False
ch = ord(self.current[self.cursor - 1])
if ch > max or ch < min:
return False
ch -= min
if (s[ch >> 3] & (0x1 << (ch & 0x7))) == 0:
return False
self.cursor -= 1
return True
def out_grouping(self, s, min, max):
if self.cursor >= self.limit:
return False
ch = ord(self.current[self.cursor])
if ch > max or ch < min:
self.cursor += 1
return True
ch -= min
if (s[ch >> 3] & (0X1 << (ch & 0x7))) == 0:
self.cursor += 1
return True
return False
def out_grouping_b(self, s, min, max):
if self.cursor <= self.limit_backward:
return False
ch = ord(self.current[self.cursor - 1])
if ch > max or ch < min:
self.cursor -= 1
return True
ch -= min
if (s[ch >> 3] & (0X1 << (ch & 0x7))) == 0:
self.cursor -= 1
return True
return False
def in_range(self, min, max):
if self.cursor >= self.limit:
return False
ch = ord(self.current[self.cursor])
if ch > max or ch < min:
return False
self.cursor += 1
return True
def in_range_b(self, min, max):
if self.cursor <= self.limit_backward:
return False
ch = ord(self.current[self.cursor - 1])
if ch > max or ch < min:
return False
self.cursor -= 1
return True
def out_range(self, min, max):
if self.cursor >= self.limit:
return False
ch = ord(self.current[self.cursor])
if not (ch > max or ch < min):
return False
self.cursor += 1
return True
def out_range_b(self, min, max):
if self.cursor <= self.limit_backward:
return False
ch = ord(self.current[self.cursor - 1])
if not (ch > max or ch < min):
return False
self.cursor -= 1
return True
def eq_s(self, s_size, s):
if self.limit - self.cursor < s_size:
return False
if self.current[self.cursor:self.cursor + s_size] != s:
return False
self.cursor += s_size
return True
def eq_s_b(self, s_size, s):
if self.cursor - self.limit_backward < s_size:
return False
if self.current[self.cursor - s_size:self.cursor] != s:
return False
self.cursor -= s_size
return True
def eq_v(self, s):
return self.eq_s(len(s), s)
def eq_v_b(self, s):
return self.eq_s_b(len(s), s)
def find_among(self, v, v_size):
i = 0
j = v_size
c = self.cursor
l = self.limit
common_i = 0
common_j = 0
first_key_inspected = False
while True:
k = i + ((j - i) >> 1)
diff = 0
common = min(common_i, common_j) # smalle
w = v[k]
for i2 in range(common, w.s_size):
if c + common == l:
diff = -1
break
diff = ord(self.current[c + common]) - ord(w.s[i2])
if diff != 0:
break
common += 1
if diff < 0:
j = k
common_j = common
else:
i = k
common_i = common
if j - i <= 1:
if i > 0:
break # v->s has been inspected
if j == i:
break # only one item in v
# - but now we need to go round once more to get
# v->s inspected. self looks messy, but is actually
# the optimal approach.
if first_key_inspected:
break
first_key_inspected = True
while True:
w = v[i]
if common_i >= w.s_size:
self.cursor = c + w.s_size
if w.method is None:
return w.result
method = getattr(self, w.method)
res = method()
self.cursor = c + w.s_size
if res:
return w.result
i = w.substring_i
if i < 0:
return 0
return -1 # not reachable
def find_among_b(self, v, v_size):
'''
find_among_b is for backwards processing. Same comments apply
'''
i = 0
j = v_size
c = self.cursor
lb = self.limit_backward;
common_i = 0
common_j = 0
first_key_inspected = False
while True:
k = i + ((j - i) >> 1)
diff = 0
common = min(common_i, common_j)
w = v[k]
for i2 in range(w.s_size - 1 - common, -1, -1):
if c - common == lb:
diff = -1
break
diff = ord(self.current[c - 1 - common]) - ord(w.s[i2])
if diff != 0:
break
common += 1
if diff < 0:
j = k
common_j = common
else:
i = k
common_i = common
if j - i <= 1:
if i > 0:
break
if j == i:
break
if first_key_inspected:
break
first_key_inspected = True
while True:
w = v[i]
if common_i >= w.s_size:
self.cursor = c - w.s_size
if w.method is None:
return w.result
method = getattr(self, w.method)
res = method()
self.cursor = c - w.s_size
if res:
return w.result
i = w.substring_i
if i < 0:
return 0
return -1 # not reachable
def replace_s(self, c_bra, c_ket, s):
'''
to replace chars between c_bra and c_ket in self.current by the
chars in s.
@type c_bra int
@type c_ket int
@type s: string
'''
adjustment = len(s) - (c_ket - c_bra)
self.current = self.current[0:c_bra] + s + self.current[c_ket:]
self.limit += adjustment
if self.cursor >= c_ket:
self.cursor += adjustment
elif self.cursor > c_bra:
self.cursor = c_bra
return adjustment
def slice_check(self):
if self.bra < 0 or self.bra > self.ket or self.ket > self.limit or self.limit > len(self.current):
return False
return True
def slice_from(self, s):
'''
@type s string
'''
result = False
if self.slice_check():
self.replace_s(self.bra, self.ket, s)
result = True
return result
def slice_del(self):
return self.slice_from("")
def insert(self, c_bra, c_ket, s):
'''
@type c_bra int
@type c_ket int
@type s: string
'''
adjustment = self.replace_s(c_bra, c_ket, s)
if c_bra <= self.bra:
self.bra += adjustment
if c_bra <= self.ket:
self.ket += adjustment
def slice_to(self, s):
'''
Copy the slice into the supplied StringBuffer
@type s: string
'''
result = ''
if self.slice_check():
result = self.current[self.bra:self.ket]
return result
def assign_to(self, s):
'''
@type s: string
'''
return self.current[0:self.limit]
def _stem_word(self, word):
cache = self._cache.get(word)
if cache is None:
self.set_current(word)
self._stem()
result = self.get_current()
self._cache[word] = [result, self._counter]
else:
cache[1] = self._counter
result = cache[0]
self._counter += 1
return result
def _clear_cache(self):
removecount = int(len(self._cache) - self.maxCacheSize * 8 / 10)
oldcaches = sorted(self._cache.items(), key=lambda cache: cache[1][1])[0:removecount]
for key, value in oldcaches:
del self._cache[key]
def stemWord(self, word):
result = self._stem_word(word)
if len(self._cache) > self.maxCacheSize:
self._clear_cache()
return result
def stemWords(self, words):
result = [self._stem_word(word) for word in words]
if len(self._cache) > self.maxCacheSize:
self._clear_cache()
return result
|
|
#!/usr/bin/python
# Copyright (c) 2013 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Recipes for PNaCl toolchain packages.
Recipes consist of specially-structured dictionaries, with keys for package
name, type, commands to execute, etc. The structure is documented in the
PackageBuilder docstring in toolchain_main.py.
The real entry plumbing and CLI flags are also in toolchain_main.py.
"""
import fnmatch
import logging
import os
import shutil
import sys
import zipfile
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import pynacl.file_tools
import pynacl.gsd_storage
import pynacl.platform
import pynacl.repo_tools
import command
import pnacl_commands
import pnacl_sandboxed_translator
import pnacl_targetlibs
import toolchain_main
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
NACL_DIR = os.path.dirname(SCRIPT_DIR)
# Use the argparse from third_party to ensure it's the same on all platorms
python_lib_dir = os.path.join(os.path.dirname(NACL_DIR), 'third_party',
'python_libs', 'argparse')
sys.path.insert(0, python_lib_dir)
import argparse
PNACL_DRIVER_DIR = os.path.join(NACL_DIR, 'pnacl', 'driver')
NACL_TOOLS_DIR = os.path.join(NACL_DIR, 'tools')
# Scons tests can check this version number to decide whether to enable tests
# for toolchain bug fixes or new features. This allows tests to be enabled on
# the toolchain buildbots/trybots before the new toolchain version is pinned
# (i.e. before the tests would pass on the main NaCl buildbots/trybots).
# If you are adding a test that depends on a toolchain change, you can
# increment this version number manually.
FEATURE_VERSION = 12
# For backward compatibility, these key names match the directory names
# previously used with gclient
GIT_REPOS = {
'binutils': 'nacl-binutils.git',
'clang': 'pnacl-clang.git',
'llvm': 'pnacl-llvm.git',
'gcc': 'pnacl-gcc.git',
'libcxx': 'pnacl-libcxx.git',
'libcxxabi': 'pnacl-libcxxabi.git',
'nacl-newlib': 'nacl-newlib.git',
'llvm-test-suite': 'pnacl-llvm-testsuite.git',
'compiler-rt': 'pnacl-compiler-rt.git',
'subzero': 'pnacl-subzero.git',
'binutils-x86': 'nacl-binutils.git',
}
GIT_BASE_URL = 'https://chromium.googlesource.com/native_client/'
GIT_PUSH_URL = 'https://chromium.googlesource.com/native_client/'
GIT_DEPS_FILE = os.path.join(NACL_DIR, 'pnacl', 'COMPONENT_REVISIONS')
ALT_GIT_BASE_URL = 'https://chromium.googlesource.com/a/native_client/'
KNOWN_MIRRORS = [('http://git.chromium.org/native_client/', GIT_BASE_URL)]
PUSH_MIRRORS = [('http://git.chromium.org/native_client/', GIT_PUSH_URL),
(ALT_GIT_BASE_URL, GIT_PUSH_URL),
(GIT_BASE_URL, GIT_PUSH_URL),
('ssh://gerrit.chromium.org/native_client/', GIT_PUSH_URL)]
PACKAGE_NAME = 'Native Client SDK [%(build_signature)s]'
BUG_URL = 'http://gonacl.com/reportissue'
# TODO(dschuff): Some of this mingw logic duplicates stuff in command.py
BUILD_CROSS_MINGW = False
# Path to the mingw cross-compiler libs on Ubuntu
CROSS_MINGW_LIBPATH = '/usr/lib/gcc/i686-w64-mingw32/4.6'
# Path and version of the native mingw compiler to be installed on Windows hosts
MINGW_PATH = os.path.join(NACL_DIR, 'mingw32')
MINGW_VERSION = 'i686-w64-mingw32-4.8.1'
CHROME_CLANG = os.path.join(os.path.dirname(NACL_DIR), 'third_party',
'llvm-build', 'Release+Asserts', 'bin', 'clang')
CHROME_CLANGXX = CHROME_CLANG + '++'
# Redirectors are small shims acting like sym links with optional arguments.
# For mac/linux we simply use a shell script which create small redirector
# shell scripts. For windows we compile an executable which redirects to
# the target using a compiled in table.
REDIRECTOR_SCRIPT = os.path.join(NACL_TOOLS_DIR, 'create_redirector.sh')
REDIRECTOR_WIN32_SRC = os.path.join(NACL_TOOLS_DIR, 'redirector')
TOOL_X64_I686_REDIRECTS = [
#Toolname, Tool Args
('as', '--32'),
('ld', '-melf_i386_nacl'),
]
BINUTILS_PROGS = ['addr2line', 'ar', 'as', 'c++filt', 'elfedit', 'ld',
'ld.bfd', 'ld.gold', 'nm', 'objcopy', 'objdump', 'ranlib',
'readelf', 'size', 'strings', 'strip']
TRANSLATOR_ARCHES = ('x86-32', 'x86-64', 'arm', 'mips32',
'x86-32-nonsfi', 'arm-nonsfi')
SANDBOXED_TRANSLATOR_ARCHES = ('x86-32', 'x86-64', 'arm', 'mips32')
# MIPS32 doesn't use biased bitcode, and nonsfi targets don't need it.
BITCODE_BIASES = tuple(
bias for bias in ('le32', 'i686_bc', 'x86_64_bc', 'arm_bc'))
DIRECT_TO_NACL_ARCHES = ['x86_64', 'i686', 'arm']
MAKE_DESTDIR_CMD = ['make', 'DESTDIR=%(abs_output)s']
def TripleIsWindows(t):
return fnmatch.fnmatch(t, '*-mingw32*')
def TripleIsCygWin(t):
return fnmatch.fnmatch(t, '*-cygwin*')
def TripleIsLinux(t):
return fnmatch.fnmatch(t, '*-linux*')
def TripleIsMac(t):
return fnmatch.fnmatch(t, '*-darwin*')
def TripleIsX8664(t):
return fnmatch.fnmatch(t, 'x86_64*')
def HostIsDebug(options):
return options.host_flavor == 'debug'
def ProgramPath(program):
"""Returns the path for the given program, or None if it doesn't exist."""
try:
return pynacl.file_tools.Which(program)
except pynacl.file_tools.ExecutableNotFound:
pass
return None
# Return a tuple (C compiler, C++ compiler, ar, ranlib) of the compilers and
# tools to compile the host toolchains.
def CompilersForHost(host):
compiler = {
# For now we only do native builds for linux and mac
# treat 32-bit linux like a native build
'i686-linux': (CHROME_CLANG, CHROME_CLANGXX, 'ar', 'ranlib'),
'x86_64-linux': (CHROME_CLANG, CHROME_CLANGXX, 'ar', 'ranlib'),
'x86_64-apple-darwin': (CHROME_CLANG, CHROME_CLANGXX, 'ar', 'ranlib'),
# Windows build should work for native and cross
'i686-w64-mingw32': (
'i686-w64-mingw32-gcc', 'i686-w64-mingw32-g++', 'ar', 'ranlib'),
# TODO: add arm-hosted support
'i686-pc-cygwin': ('gcc', 'g++', 'ar', 'ranlib'),
}
if host == 'le32-nacl':
nacl_sdk = os.environ.get('NACL_SDK_ROOT')
assert nacl_sdk, 'NACL_SDK_ROOT not set'
pnacl_bin_dir = os.path.join(nacl_sdk, 'toolchain/linux_pnacl/bin')
compiler.update({
'le32-nacl': (os.path.join(pnacl_bin_dir, 'pnacl-clang'),
os.path.join(pnacl_bin_dir, 'pnacl-clang++'),
os.path.join(pnacl_bin_dir, 'pnacl-ar'),
os.path.join(pnacl_bin_dir, 'pnacl-ranlib')),
})
return compiler[host]
def GSDJoin(*args):
return '_'.join([pynacl.gsd_storage.LegalizeName(arg) for arg in args])
# name of a build target, including build flavor (debug/release)
def FlavoredName(component_name, host, options):
joined_name = GSDJoin(component_name, host)
if HostIsDebug(options):
joined_name= joined_name + '_debug'
return joined_name
def HostArchToolFlags(host, extra_cflags, opts):
"""Return the appropriate CFLAGS, CXXFLAGS, and LDFLAGS based on host
and opts. Does not attempt to determine flags that are attached
to CC and CXX directly.
"""
extra_cc_flags = list(extra_cflags)
result = { 'LDFLAGS' : [],
'CFLAGS' : [],
'CXXFLAGS' : []}
if TripleIsWindows(host):
result['LDFLAGS'] += ['-L%(abs_libdl)s', '-ldl']
result['CFLAGS'] += ['-isystem','%(abs_libdl)s']
result['CXXFLAGS'] += ['-isystem', '%(abs_libdl)s']
else:
if TripleIsLinux(host) and not TripleIsX8664(host):
# Chrome clang defaults to 64-bit builds, even when run on 32-bit Linux.
extra_cc_flags += ['-m32']
elif TripleIsMac(host):
# This is required for building with recent libc++ against OSX 10.6
extra_cc_flags += ['-U__STRICT_ANSI__']
if opts.gcc or host == 'le32-nacl':
result['CFLAGS'] += extra_cc_flags
result['CXXFLAGS'] += extra_cc_flags
else:
result['CFLAGS'] += extra_cc_flags
result['LDFLAGS'] += ['-L%(' + FlavoredName('abs_libcxx',
host, opts) + ')s/lib']
result['CXXFLAGS'] += ([
'-stdlib=libc++',
'-I%(' + FlavoredName('abs_libcxx', host, opts) + ')s/include/c++/v1'] +
extra_cc_flags)
return result
def ConfigureHostArchFlags(host, extra_cflags, options, extra_configure=None):
""" Return flags passed to LLVM and binutils configure for compilers and
compile flags. """
configure_args = []
extra_cc_args = []
configure_args += options.extra_configure_args
if extra_configure is not None:
configure_args += extra_configure
if options.extra_cc_args is not None:
extra_cc_args += [options.extra_cc_args]
native = pynacl.platform.PlatformTriple()
is_cross = host != native
if TripleIsLinux(host) and not TripleIsX8664(host):
assert TripleIsLinux(native)
# NaCl/Chrome buildbots run 32 bit userspace on a 64 bit kernel. configure
# guesses that the host is 64-bit even though we want a 32-bit build. But
# it's still "native enough", so force --build rather than --host.
# PlatformTriple() returns 32-bit, so this does not appear as a cross build
# here.
configure_args.append('--build=' + host)
elif is_cross:
configure_args.append('--host=' + host)
extra_cxx_args = list(extra_cc_args)
if not options.gcc:
cc, cxx, ar, ranlib = CompilersForHost(host)
if ProgramPath('ccache'):
# Set CCACHE_CPP2 envvar, to avoid an error due to a strange
# ccache/clang++ interaction. Specifically, errors about
# "argument unused during compilation".
os.environ['CCACHE_CPP2'] = 'yes'
cc_list = ['ccache', cc]
cxx_list = ['ccache', cxx]
extra_cc_args += ['-Qunused-arguments']
extra_cxx_args += ['-Qunused-arguments']
else:
cc_list = [cc]
cxx_list = [cxx]
configure_args.append('CC=' + ' '.join(cc_list + extra_cc_args))
configure_args.append('CXX=' + ' '.join(cxx_list + extra_cxx_args))
configure_args.append('AR=' + ar)
configure_args.append('RANLIB=' + ranlib)
tool_flags = HostArchToolFlags(host, extra_cflags, options)
configure_args.extend(
['CFLAGS=' + ' '.join(tool_flags['CFLAGS']),
'CXXFLAGS=' + ' '.join(tool_flags['CXXFLAGS']),
'LDFLAGS=' + ' '.join(tool_flags['LDFLAGS']),
])
if TripleIsWindows(host):
# The i18n support brings in runtime dependencies on MinGW DLLs
# that we don't want to have to distribute alongside our binaries.
# So just disable it, and compiler messages will always be in US English.
configure_args.append('--disable-nls')
if is_cross:
# LLVM's linux->mingw cross build needs this
configure_args.append('CC_FOR_BUILD=gcc')
return configure_args
def LibCxxHostArchFlags(host):
cc, cxx, _, _ = CompilersForHost(host)
cmake_flags = []
cmake_flags.extend(['-DCMAKE_C_COMPILER='+cc, '-DCMAKE_CXX_COMPILER='+cxx])
if TripleIsLinux(host) and not TripleIsX8664(host):
# Chrome clang defaults to 64-bit builds, even when run on 32-bit Linux
cmake_flags.extend(['-DCMAKE_C_FLAGS=-m32',
'-DCMAKE_CXX_FLAGS=-m32'])
return cmake_flags
def CmakeHostArchFlags(host, options):
""" Set flags passed to LLVM cmake for compilers and compile flags. """
cmake_flags = []
cc, cxx, _, _ = CompilersForHost(host)
cmake_flags.extend(['-DCMAKE_C_COMPILER='+cc, '-DCMAKE_CXX_COMPILER='+cxx])
if ProgramPath('ccache'):
cmake_flags.extend(['-DSYSTEM_HAS_CCACHE=ON'])
# There seems to be a bug in chrome clang where it exposes the msan interface
# (even when compiling without msan) but then does not link with an
# msan-enabled compiler_rt, leaving references to __msan_allocated_memory
# undefined.
cmake_flags.append('-DHAVE_SANITIZER_MSAN_INTERFACE_H=FALSE')
tool_flags = HostArchToolFlags(host, [], options)
cmake_flags.extend(['-DCMAKE_C_FLAGS=' + ' '.join(tool_flags['CFLAGS'])])
cmake_flags.extend(['-DCMAKE_CXX_FLAGS=' + ' '.join(tool_flags['CXXFLAGS'])])
for linker_type in ['EXE', 'SHARED', 'MODULE']:
cmake_flags.extend([('-DCMAKE_%s_LINKER_FLAGS=' % linker_type) +
' '.join(tool_flags['LDFLAGS'])])
return cmake_flags
def ConfigureBinutilsCommon():
return ['--with-pkgversion=' + PACKAGE_NAME,
'--with-bugurl=' + BUG_URL,
'--without-zlib',
'--prefix=',
'--disable-silent-rules',
'--enable-deterministic-archives',
]
def LLVMConfigureAssertionsFlags(options):
if options.enable_llvm_assertions:
return []
else:
return ['--disable-debug', '--disable-assertions']
def MakeCommand(host):
make_command = ['make']
if not pynacl.platform.IsWindows() or pynacl.platform.IsCygWin():
# The make that ships with msys sometimes hangs when run with -j.
# The ming32-make that comes with the compiler itself reportedly doesn't
# have this problem, but it has issues with pathnames with LLVM's build.
make_command.append('-j%(cores)s')
if TripleIsWindows(host):
# There appears to be nothing we can pass at top-level configure time
# that will prevent the configure scripts from finding MinGW's libiconv
# and using it. We have to force this variable into the environment
# of the sub-configure runs, which are run via make.
make_command.append('HAVE_LIBICONV=no')
return make_command
def CopyWindowsHostLibs(host):
if not TripleIsWindows(host) and not TripleIsCygWin(host):
return []
if TripleIsCygWin(host):
lib_path = '/bin'
libs = ('cyggcc_s-1.dll', 'cygiconv-2.dll', 'cygwin1.dll',
'cygintl-8.dll', 'cygstdc++-6.dll', 'cygz.dll')
elif pynacl.platform.IsWindows():
lib_path = os.path.join(MINGW_PATH, 'bin')
# The native minGW compiler uses winpthread, but the Ubuntu cross compiler
# does not.
libs = ('libgcc_s_sjlj-1.dll', 'libstdc++-6.dll', 'libwinpthread-1.dll')
else:
lib_path = os.path.join(CROSS_MINGW_LIBPATH)
libs = ('libgcc_s_sjlj-1.dll', 'libstdc++-6.dll')
return [command.Copy(
os.path.join(lib_path, lib),
os.path.join('%(output)s', 'bin', lib))
for lib in libs]
def GetGitSyncCmdsCallback(revisions):
"""Return a callback which returns the git sync commands for a component.
This allows all the revision information to be processed here while giving
other modules like pnacl_targetlibs.py the ability to define their own
source targets with minimal boilerplate.
"""
def GetGitSyncCmds(component):
git_url = GIT_BASE_URL + GIT_REPOS[component]
git_push_url = GIT_PUSH_URL + GIT_REPOS[component]
return (command.SyncGitRepoCmds(git_url, '%(output)s', revisions[component],
git_cache='%(git_cache_dir)s',
push_url=git_push_url,
known_mirrors=KNOWN_MIRRORS,
push_mirrors=PUSH_MIRRORS) +
[command.Runnable(lambda opts: opts.IsBot(),
pnacl_commands.CmdCheckoutGitBundleForTrybot,
component, '%(output)s')])
return GetGitSyncCmds
def HostToolsSources(GetGitSyncCmds):
sources = {
'libcxx_src': {
'type': 'source',
'output_dirname': 'libcxx',
'commands': GetGitSyncCmds('libcxx'),
},
'libcxxabi_src': {
'type': 'source',
'output_dirname': 'libcxxabi',
'commands': GetGitSyncCmds('libcxxabi'),
},
'binutils_pnacl_src': {
'type': 'source',
'output_dirname': 'binutils',
'commands': GetGitSyncCmds('binutils'),
},
# For some reason, the llvm build using --with-clang-srcdir chokes if the
# clang source directory is named something other than 'clang', so don't
# change output_dirname for clang.
'clang_src': {
'type': 'source',
'output_dirname': 'clang',
'commands': GetGitSyncCmds('clang'),
},
'llvm_src': {
'type': 'source',
'output_dirname': 'llvm',
'commands': GetGitSyncCmds('llvm'),
},
'subzero_src': {
'type': 'source',
'output_dirname': 'subzero',
'commands': GetGitSyncCmds('subzero'),
},
'binutils_x86_src': {
'type': 'source',
'output_dirname': 'binutils-x86',
'commands': GetGitSyncCmds('binutils-x86'),
},
}
return sources
def TestsuiteSources(GetGitSyncCmds):
sources = {
'llvm_testsuite_src': {
'type': 'source',
'output_dirname': 'llvm-test-suite',
'commands': GetGitSyncCmds('llvm-test-suite'),
},
}
return sources
def CopyHostLibcxxForLLVMBuild(host, dest, options):
"""Copy libc++ to the working directory for build tools."""
if options.gcc:
return []
if TripleIsLinux(host):
libname = 'libc++.so.1'
elif TripleIsMac(host):
libname = 'libc++.1.dylib'
else:
return []
return [command.Mkdir(dest, parents=True),
command.Copy('%(' +
FlavoredName('abs_libcxx', host, options) +')s/lib/' +
libname, os.path.join(dest, libname))]
def CreateSymLinksToDirectToNaClTools(host):
if host == 'le32-nacl':
return []
return (
[command.Command(['ln', '-f',
command.path.join('%(output)s', 'bin','clang'),
command.path.join('%(output)s', 'bin',
arch + '-nacl-clang')])
for arch in DIRECT_TO_NACL_ARCHES] +
[command.Command(['ln', '-f',
command.path.join('%(output)s', 'bin','clang'),
command.path.join('%(output)s', 'bin',
arch + '-nacl-clang++')])
for arch in DIRECT_TO_NACL_ARCHES])
def HostLibs(host, options):
def H(component_name):
# Return a package name for a component name with a host triple.
return FlavoredName(component_name, host, options)
libs = {}
if TripleIsWindows(host):
if pynacl.platform.IsWindows():
ar = 'ar'
else:
ar = 'i686-w64-mingw32-ar'
libs.update({
'libdl': {
'type': 'build',
'inputs' : { 'src' : os.path.join(NACL_DIR, '..', 'third_party',
'dlfcn-win32') },
'commands': [
command.CopyTree('%(src)s', 'src'),
command.Command(['i686-w64-mingw32-gcc',
'-o', 'dlfcn.o', '-c',
os.path.join('src', 'dlfcn.c'),
'-Wall', '-O3', '-fomit-frame-pointer']),
command.Command([ar, 'cru',
'libdl.a', 'dlfcn.o']),
command.Copy('libdl.a',
os.path.join('%(output)s', 'libdl.a')),
command.Copy(os.path.join('src', 'dlfcn.h'),
os.path.join('%(output)s', 'dlfcn.h')),
],
},
})
elif not options.gcc:
# Libc++ is only tested with the clang build
libs.update({
H('libcxx'): {
'dependencies': ['libcxx_src', 'libcxxabi_src'],
'type': 'build',
'commands': [
command.SkipForIncrementalCommand([
'cmake', '-G', 'Unix Makefiles'] +
LibCxxHostArchFlags(host) +
['-DLIBCXX_CXX_ABI=libcxxabi',
'-DLIBCXX_LIBCXXABI_INCLUDE_PATHS=' + command.path.join(
'%(abs_libcxxabi_src)s', 'include'),
'-DLIBCXX_ENABLE_SHARED=ON',
'-DCMAKE_INSTALL_PREFIX=',
'-DCMAKE_INSTALL_NAME_DIR=@executable_path/../lib',
'%(libcxx_src)s']),
command.Command(MakeCommand(host) + ['VERBOSE=1']),
command.Command(MAKE_DESTDIR_CMD + ['VERBOSE=1', 'install']),
],
},
})
return libs
def HostTools(host, options):
def H(component_name):
# Return a package name for a component name with a host triple.
return FlavoredName(component_name, host, options)
# Return the file name with the appropriate suffix for an executable file.
def Exe(file):
if TripleIsWindows(host):
return file + '.exe'
else:
return file
# TODO(jfb): gold's build currently generates the following error on Windows:
# too many arguments for format.
binutils_do_werror = not TripleIsWindows(host)
extra_gold_deps = []
if host == 'le32-nacl':
# TODO(bradnelson): Fix warnings so this can go away.
binutils_do_werror = False
extra_gold_deps = [H('llvm')]
# Binutils still has some warnings when building with clang
if not options.gcc:
warning_flags = ['-Wno-extended-offsetof', '-Wno-absolute-value',
'-Wno-unused-function', '-Wno-unused-const-variable',
'-Wno-unneeded-internal-declaration',
'-Wno-unused-private-field', '-Wno-format-security']
else:
warning_flags = ['-Wno-unused-function', '-Wno-unused-value']
# The binutils git checkout includes all the directories in the
# upstream binutils-gdb.git repository, but some of these
# directories are not included in a binutils release tarball. The
# top-level Makefile will try to build whichever of the whole set
# exist, but we don't want these extra directories built. So we
# stub them out by creating dummy <subdir>/Makefile files; having
# these exist before the configure-<subdir> target in the
# top-level Makefile runs prevents it from doing anything.
binutils_dummy_dirs = ['gdb', 'libdecnumber', 'readline', 'sim']
def DummyDirCommands(dirs):
dummy_makefile = """\
.DEFAULT:;@echo Ignoring $@
"""
commands = []
for dir in dirs:
commands.append(command.Mkdir(dir))
commands.append(command.WriteData(
dummy_makefile, command.path.join(dir, 'Makefile')))
return commands
tools = {
# The binutils_pnacl package is used both for bitcode linking (gold) and
# for its conventional use with arm-nacl-clang.
H('binutils_pnacl'): {
'dependencies': ['binutils_pnacl_src'] + extra_gold_deps,
'type': 'build',
'inputs' : { 'macros': os.path.join(NACL_DIR,
'pnacl', 'support', 'clang_direct', 'nacl-arm-macros.s')},
'commands': [
command.SkipForIncrementalCommand([
'sh',
'%(binutils_pnacl_src)s/configure'] +
ConfigureBinutilsCommon() +
ConfigureHostArchFlags(
host, warning_flags, options,
options.binutils_pnacl_extra_configure) +
[
'--enable-gold=yes',
'--enable-plugins',
'--enable-shared=no',
'--enable-targets=arm-nacl,i686-nacl,x86_64-nacl,mipsel-nacl',
'--enable-werror=' + ('yes' if binutils_do_werror else 'no'),
'--program-prefix=le32-nacl-',
'--target=arm-nacl',
'--with-sysroot=/le32-nacl',
'--without-gas'
])] + DummyDirCommands(binutils_dummy_dirs) + [
command.Command(MakeCommand(host)),
command.Command(MAKE_DESTDIR_CMD + ['install-strip'])] +
[command.RemoveDirectory(os.path.join('%(output)s', dir))
for dir in ('lib', 'lib32')] +
# Since it has dual use, just create links for both sets of names
# (i.e. le32-nacl-foo and arm-nacl-foo)
# TODO(dschuff): Use the redirector scripts here like binutils_x86
[command.Command([
'ln', '-f',
command.path.join('%(output)s', 'bin', 'le32-nacl-' + tool),
command.path.join('%(output)s', 'bin', 'arm-nacl-' + tool)])
for tool in BINUTILS_PROGS] +
# Gold is the default linker for PNaCl, but BFD ld is the default
# for nacl-clang, so re-link the version that arm-nacl-clang will
# use.
[command.Command([
'ln', '-f',
command.path.join('%(output)s', 'arm-nacl', 'bin', 'ld.bfd'),
command.path.join('%(output)s', 'arm-nacl', 'bin', 'ld')]),
command.Copy(
'%(macros)s',
os.path.join(
'%(output)s', 'arm-nacl', 'lib', 'nacl-arm-macros.s'))]
},
H('driver'): {
'type': 'build',
'output_subdir': 'bin',
'inputs': { 'src': PNACL_DRIVER_DIR },
'commands': [
command.Runnable(
None,
pnacl_commands.InstallDriverScripts,
'%(src)s', '%(output)s',
host_windows=TripleIsWindows(host) or TripleIsCygWin(host),
host_64bit=TripleIsX8664(host))
],
},
}
asan_flags = []
if options.sanitize:
asan_flags.append('-DLLVM_USE_SANITIZER=%s' % options.sanitize.capitalize())
# TODO(jfb) Windows currently uses MinGW's GCC 4.8.1 which generates warnings
# on upstream LLVM code. Turn on -Werror once these are fixed.
# The same applies for the default GCC on current Ubuntu.
llvm_do_werror = not (TripleIsWindows(host) or options.gcc)
llvm_cmake = {
H('llvm'): {
'dependencies': ['clang_src', 'llvm_src', 'binutils_pnacl_src',
'subzero_src'],
'inputs': {'test_xfails': os.path.join(NACL_DIR, 'pnacl', 'scripts')},
'type': 'build',
'commands': [
command.SkipForIncrementalCommand([
'cmake', '-G', 'Ninja'] +
CmakeHostArchFlags(host, options) + asan_flags +
[
'-DBUILD_SHARED_LIBS=ON',
'-DCMAKE_BUILD_TYPE=' + ('Debug' if HostIsDebug(options)
else 'Release'),
'-DCMAKE_INSTALL_PREFIX=%(output)s',
'-DCMAKE_INSTALL_RPATH=$ORIGIN/../lib',
'-DLLVM_APPEND_VC_REV=ON',
'-DLLVM_BINUTILS_INCDIR=%(abs_binutils_pnacl_src)s/include',
'-DLLVM_BUILD_TESTS=ON',
'-DLLVM_ENABLE_ASSERTIONS=ON',
'-DLLVM_ENABLE_LIBCXX=OFF',
'-LLVM_ENABLE_WERROR=' + ('ON' if llvm_do_werror else 'OFF'),
'-DLLVM_ENABLE_ZLIB=OFF',
'-DLLVM_EXTERNAL_CLANG_SOURCE_DIR=%(clang_src)s',
'-DLLVM_EXTERNAL_SUBZERO_SOURCE_DIR=%(subzero_src)s',
'-DLLVM_INSTALL_UTILS=ON',
'-DLLVM_TARGETS_TO_BUILD=X86;ARM;Mips;JSBackend',
'-DSUBZERO_TARGETS_TO_BUILD=X8632;ARM32',
'%(llvm_src)s'],
# Older CMake ignore CMAKE_*_LINKER_FLAGS during config step.
# https://public.kitware.com/Bug/view.php?id=14066
# The workaround is to set LDFLAGS in the environment.
# TODO(jvoung): remove the ability to override env vars
# from "command" once the CMake fix propagates and we can
# stop using this env var hack.
env={'LDFLAGS' : ' '.join(
HostArchToolFlags(host, [], options)['LDFLAGS'])})] +
CopyHostLibcxxForLLVMBuild(host, 'lib', options) +
[command.Command(['ninja', '-v']),
command.Command(['ninja', 'install'])] +
CreateSymLinksToDirectToNaClTools(host)
},
}
cleanup_static_libs = []
shared = []
if host != 'le32-nacl':
shared = ['--enable-shared']
cleanup_static_libs = [
command.Remove(*[os.path.join('%(output)s', 'lib', f) for f
in '*.a', '*Hello.*', 'BugpointPasses.*']),
]
llvm_autoconf = {
H('llvm'): {
'dependencies': ['clang_src', 'llvm_src', 'binutils_pnacl_src',
'subzero_src'],
'inputs': {'test_xfails': os.path.join(NACL_DIR, 'pnacl', 'scripts')},
'type': 'build',
'commands': [
command.SkipForIncrementalCommand([
'sh',
'%(llvm_src)s/configure'] +
ConfigureHostArchFlags(host, [], options) +
LLVMConfigureAssertionsFlags(options) +
[
'--disable-bindings', # ocaml is currently the only binding.
'--disable-jit',
'--disable-terminfo',
'--disable-zlib',
'--enable-optimized=' + ('no' if HostIsDebug(options)
else 'yes'),
'--enable-debug=' + ('yes' if HostIsDebug(options)
else 'no'),
'--enable-targets=x86,arm,mips,js',
'--enable-subzero-targets=X8632,ARM32',
'--enable-werror=' + ('yes' if llvm_do_werror else 'no'),
# Backtraces require TLS, which is missing on OSX 10.6
'--enable-backtraces=' + ('no' if TripleIsMac(host)
else 'yes'),
'--prefix=/',
'--program-prefix=',
'--with-binutils-include=%(abs_binutils_pnacl_src)s/include',
'--with-clang-srcdir=%(abs_clang_src)s',
'ac_cv_have_decl_strerror_s=no',
] + shared)] +
CopyHostLibcxxForLLVMBuild(
host,
os.path.join(('Debug+Asserts' if HostIsDebug(options)
else 'Release+Asserts'), 'lib'),
options) +
[command.Command(MakeCommand(host) + [
'VERBOSE=1',
'PNACL_BROWSER_TRANSLATOR=0',
'SUBZERO_SRC_ROOT=%(abs_subzero_src)s',
'all']),
command.Command(MAKE_DESTDIR_CMD + [
'SUBZERO_SRC_ROOT=%(abs_subzero_src)s',
'install'])] +
cleanup_static_libs + [
command.Remove(*[os.path.join('%(output)s', 'bin', f) for f in
Exe('clang-format'), Exe('clang-check'),
Exe('c-index-test'), Exe('clang-tblgen'),
Exe('llvm-tblgen')])] +
CreateSymLinksToDirectToNaClTools(host) +
CopyWindowsHostLibs(host),
},
}
if options.cmake:
tools.update(llvm_cmake)
else:
tools.update(llvm_autoconf)
if TripleIsWindows(host):
tools[H('binutils_pnacl')]['dependencies'].append('libdl')
tools[H('llvm')]['dependencies'].append('libdl')
elif not options.gcc and host != 'le32-nacl':
tools[H('binutils_pnacl')]['dependencies'].append(H('libcxx'))
tools[H('llvm')]['dependencies'].append(H('libcxx'))
return tools
def TargetLibCompiler(host, options):
def H(component_name):
return FlavoredName(component_name, host, options)
compiler = {
# Because target_lib_compiler is not a memoized target, its name doesn't
# need to have the host appended to it (it can be different on different
# hosts), which means that target library build rules don't have to care
# what host they run on; they can just depend on 'target_lib_compiler'
'target_lib_compiler': {
'type': 'work',
'output_subdir': 'target_lib_compiler',
'dependencies': [ H('binutils_pnacl'), H('llvm'), H('binutils_x86') ],
'inputs': { 'driver': PNACL_DRIVER_DIR },
'commands': [
command.CopyRecursive('%(' + t + ')s', '%(output)s')
for t in [H('llvm'), H('binutils_pnacl'), H('binutils_x86')]] + [
command.Runnable(
None, pnacl_commands.InstallDriverScripts,
'%(driver)s', os.path.join('%(output)s', 'bin'),
host_windows=TripleIsWindows(host) or TripleIsCygWin(host),
host_64bit=TripleIsX8664(host))
]
},
}
if TripleIsWindows(host) or not options.gcc:
host_lib = 'libdl' if TripleIsWindows(host) else H('libcxx')
compiler['target_lib_compiler']['dependencies'].append(host_lib)
compiler['target_lib_compiler']['commands'].append(
command.CopyRecursive('%(' + host_lib + ')s', '%(output)s'))
return compiler
def Metadata(revisions, is_canonical):
data = {
'metadata': {
'type': 'build' if is_canonical else 'build_noncanonical',
'inputs': { 'readme': os.path.join(NACL_DIR, 'pnacl', 'README'),
'COMPONENT_REVISIONS': GIT_DEPS_FILE,
'driver': PNACL_DRIVER_DIR },
'commands': [
command.Copy('%(readme)s', os.path.join('%(output)s', 'README')),
command.WriteData(str(FEATURE_VERSION),
os.path.join('%(output)s', 'FEATURE_VERSION')),
command.Runnable(None, pnacl_commands.WriteREVFile,
os.path.join('%(output)s', 'REV'),
GIT_BASE_URL,
GIT_REPOS,
revisions),
],
}
}
return data
def HostToolsDirectToNacl(host, options):
def H(component_name):
return FlavoredName(component_name, host, options)
tools = {}
if TripleIsWindows(host):
redirector_table = ''
for tool, args in TOOL_X64_I686_REDIRECTS:
redirector_table += ' {L"/bin/i686-nacl-%s.exe",' % tool + \
' L"/bin/x86_64-nacl-%s.exe",' % tool + \
' L"%s"},\n' % args
cc, cxx, _, _ = CompilersForHost(host)
tools.update({
'redirector': {
'type': 'build',
'inputs': { 'source_directory': REDIRECTOR_WIN32_SRC },
'commands': [
command.WriteData(redirector_table,
'redirector_table_pnacl.txt'),
command.Command([cc, '-O3', '-std=c99', '-I.', '-o',
os.path.join('%(output)s', 'redirector.exe'),
'-I', os.path.dirname(NACL_DIR),
'-DREDIRECT_DATA="redirector_table_pnacl.txt"',
os.path.join(REDIRECTOR_WIN32_SRC,
'redirector.c')]),
],
},
})
redirect_deps = ['redirector']
redirect_inputs = {}
redirect_cmds = [
command.Command([
'ln', '-f',
command.path.join('%(redirector)s', 'redirector.exe'),
command.path.join('%(output)s', 'bin', 'i686-nacl-%s.exe' % tool)])
for tool, args in TOOL_X64_I686_REDIRECTS]
else:
redirect_deps = []
redirect_inputs = { 'redirector_script': REDIRECTOR_SCRIPT }
redirect_cmds = [
command.Command([
'%(abs_redirector_script)s',
command.path.join('%(output)s', 'bin', 'i686-nacl-' + tool),
'x86_64-nacl-' + tool,
args])
for tool, args in TOOL_X64_I686_REDIRECTS]
tools.update({
H('binutils_x86'): {
'type': 'build',
'dependencies': ['binutils_x86_src'] + redirect_deps,
'inputs': redirect_inputs,
'commands': [
command.SkipForIncrementalCommand(
['sh', '%(binutils_x86_src)s/configure'] +
ConfigureBinutilsCommon() +
['--target=x86_64-nacl',
'--enable-gold',
'--enable-targets=x86_64-nacl,i686-nacl',
'--disable-werror']),
command.Command(MakeCommand(host)),
command.Command(MAKE_DESTDIR_CMD + ['install-strip'])] +
# Remove the share dir from this binutils build and leave the one
# from the newer version used for bitcode linking. Always remove
# the lib dirs, which have unneeded host libs.
[command.RemoveDirectory(os.path.join('%(output)s', dir))
for dir in ('lib', 'lib32', 'lib64', 'share')] +
# Create the set of directories for target libs and includes, for
# experimentation before we actually build them.
# Libc includes (libs dir is created by binutils)
# TODO(dschuff): remove these when they are populated by target
# library packages.
[command.Mkdir(command.path.join(
'%(output)s', 'x86_64-nacl', 'include'), parents=True),
command.Mkdir(command.path.join(
'%(output)s', 'x86_64-nacl', 'lib32')),
command.Command(['ln', '-s', command.path.join('..','lib32'),
command.path.join(
'%(output)s', 'x86_64-nacl', 'lib', '32')]),
command.Command(['ln', '-s', 'lib',
command.path.join(
'%(output)s', 'x86_64-nacl', 'lib64')])] +
# Create links for i686-flavored names of the tools. For now we
# don't use the redirector scripts that pass different arguments
# because the compiler driver doesn't need them.
[command.Command([
'ln', '-f',
command.path.join('%(output)s', 'bin', 'x86_64-nacl-' + tool),
command.path.join('%(output)s', 'bin', 'i686-nacl-' + tool)])
for tool in ['addr2line', 'ar', 'nm', 'objcopy', 'objdump',
'ranlib', 'readelf', 'size', 'strings', 'strip']] +
redirect_cmds
}
})
return tools
def ParseComponentRevisionsFile(filename):
''' Parse a simple-format deps file, with fields of the form:
key=value
Keys should match the keys in GIT_REPOS above, which match the previous
directory names used by gclient (with the exception that '_' in the file is
replaced by '-' in the returned key name).
Values are the git hashes for each repo.
Empty lines or lines beginning with '#' are ignored.
This function returns a dictionary mapping the keys found in the file to their
values.
'''
with open(filename) as f:
deps = {}
for line in f:
stripped = line.strip()
if stripped.startswith('#') or len(stripped) == 0:
continue
tokens = stripped.split('=')
if len(tokens) != 2:
raise Exception('Malformed component revisions file: ' + filename)
deps[tokens[0].replace('_', '-')] = tokens[1]
return deps
def InstallMinGWHostCompiler():
"""Install the MinGW host compiler used to build the host tools on Windows.
We could use an ordinary source rule for this, but that would require hashing
hundreds of MB of toolchain files on every build. Instead, check for the
presence of the specially-named file <version>.installed in the install
directory. If it is absent, check for the presence of the zip file
<version>.zip. If it is absent, attempt to download it from Google Storage.
Then extract the zip file and create the install file.
"""
if not os.path.isfile(os.path.join(MINGW_PATH, MINGW_VERSION + '.installed')):
downloader = pynacl.gsd_storage.GSDStorage([], ['nativeclient-mingw'])
zipfilename = MINGW_VERSION + '.zip'
zipfilepath = os.path.join(NACL_DIR, zipfilename)
# If the zip file is not present, try to download it from Google Storage.
# If that fails, bail out.
if (not os.path.isfile(zipfilepath) and
not downloader.GetSecureFile(zipfilename, zipfilepath)):
print >>sys.stderr, 'Failed to install MinGW tools:'
print >>sys.stderr, 'could not find or download', zipfilename
sys.exit(1)
logging.info('Extracting %s' % zipfilename)
zf = zipfile.ZipFile(zipfilepath)
if os.path.exists(MINGW_PATH):
shutil.rmtree(MINGW_PATH)
zf.extractall(NACL_DIR)
with open(os.path.join(MINGW_PATH, MINGW_VERSION + '.installed'), 'w') as _:
pass
os.environ['MINGW'] = MINGW_PATH
def GetUploadPackageTargets():
"""Package Targets describes all the archived package targets.
This build can be built among many build bots, but eventually all things
will be combined together. This package target dictionary describes the final
output of the entire build.
For the pnacl toolchain build we want 2 versions of the toolchain:
1. pnacl_newlib_raw - The toolchain without core_sdk headers/libraries.
2. pnacl_newlib - The toolchain with all the core_sdk headers/libraries.
"""
package_targets = {}
common_raw_packages = ['metadata']
common_complete_packages = []
# Target translator libraries
for arch in TRANSLATOR_ARCHES:
legal_arch = pynacl.gsd_storage.LegalizeName(arch)
common_raw_packages.append('libs_support_translator_%s' % legal_arch)
if not 'nonsfi' in arch:
common_raw_packages.append('libgcc_eh_%s' % legal_arch)
# Target libraries
for bias in BITCODE_BIASES:
legal_bias = pynacl.gsd_storage.LegalizeName(bias)
common_raw_packages.append('newlib_%s' % legal_bias)
common_raw_packages.append('libcxx_%s' % legal_bias)
common_raw_packages.append('libs_support_%s' % legal_bias)
common_raw_packages.append('compiler_rt_bc_%s' % legal_bias)
# Portable core sdk libs. For now, no biased libs.
common_complete_packages.append('core_sdk_libs_le32')
# Direct-to-nacl target libraries
for arch in DIRECT_TO_NACL_ARCHES:
common_raw_packages.append('newlib_%s' % arch)
common_raw_packages.append('libcxx_%s' % arch)
common_raw_packages.append('libs_support_%s' % arch)
common_complete_packages.append('core_sdk_libs_%s' % arch)
# Host components
host_packages = {}
for os_name, arch in (('win', 'x86-32'),
('mac', 'x86-64'),
# These components are all supposed to be the same regardless of which bot is
# running, however the 32-bit linux bot is special because it builds and tests
# packages which are never uploaded. Because the package extraction is done by
# package_version, we still need to output the 32-bit version of the host
# packages on that bot.
('linux', pynacl.platform.GetArch3264())):
triple = pynacl.platform.PlatformTriple(os_name, arch)
legal_triple = pynacl.gsd_storage.LegalizeName(triple)
host_packages.setdefault(os_name, []).extend(
['binutils_pnacl_%s' % legal_triple,
'binutils_x86_%s' % legal_triple,
'llvm_%s' % legal_triple,
'driver_%s' % legal_triple])
if os_name != 'win':
host_packages[os_name].append('libcxx_%s' % legal_triple)
# Unsandboxed target IRT libraries
for os_name in ('linux', 'mac'):
legal_triple = pynacl.gsd_storage.LegalizeName('x86-32-' + os_name)
host_packages[os_name].append('unsandboxed_runtime_%s' % legal_triple)
for os_name, os_packages in host_packages.iteritems():
package_target = '%s_x86' % pynacl.platform.GetOS(os_name)
package_targets[package_target] = {}
raw_packages = os_packages + common_raw_packages
package_targets[package_target]['pnacl_newlib_raw'] = raw_packages
complete_packages = raw_packages + common_complete_packages
package_targets[package_target]['pnacl_newlib'] = complete_packages
package_targets['linux_x86']['pnacl_translator'] = ['sandboxed_translators']
return package_targets
if __name__ == '__main__':
# This sets the logging for gclient-alike repo sync. It will be overridden
# by the package builder based on the command-line flags.
logging.getLogger().setLevel(logging.DEBUG)
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('--disable-llvm-assertions', action='store_false',
dest='enable_llvm_assertions', default=True)
parser.add_argument('--cmake', action='store_true', default=False,
help="Use LLVM's cmake ninja build instead of autoconf")
parser.add_argument('--gcc', action='store_true', default=False,
help="Use the default compiler 'cc' instead of clang")
parser.add_argument('--sanitize', choices=['address', 'thread', 'memory',
'undefined'],
help="Use a sanitizer with LLVM's clang cmake build")
parser.add_argument('--testsuite-sync', action='store_true', default=False,
help=('Sync the sources for the LLVM testsuite. '
'Only useful if --sync/ is also enabled'))
parser.add_argument('--build-sbtc', action='store_true', default=False,
help='Build the sandboxed translators')
parser.add_argument('--pnacl-in-pnacl', action='store_true', default=False,
help='Build with a PNaCl toolchain')
parser.add_argument('--extra-cc-args', default=None,
help='Extra arguments to pass to cc/cxx')
parser.add_argument('--extra-configure-arg', dest='extra_configure_args',
default=[], action='append',
help='Extra arguments to pass pass to host configure')
parser.add_argument('--binutils-pnacl-extra-configure',
default=[], action='append',
help='Extra binutils-pnacl arguments '
'to pass pass to host configure')
parser.add_argument('--host-flavor', choices=['debug', 'release'],
dest='host_flavor',
default='release',
help='Flavor of the build of the host binaries.')
args, leftover_args = parser.parse_known_args()
if '-h' in leftover_args or '--help' in leftover_args:
print 'The following arguments are specific to toolchain_build_pnacl.py:'
parser.print_help()
print 'The rest of the arguments are generic, in toolchain_main.py'
if args.sanitize and not args.cmake:
print 'Use of sanitizers requires a cmake build'
sys.exit(1)
if args.gcc and args.cmake:
print 'gcc build is not supported with cmake'
sys.exit(1)
packages = {}
upload_packages = {}
rev = ParseComponentRevisionsFile(GIT_DEPS_FILE)
upload_packages = GetUploadPackageTargets()
if pynacl.platform.IsWindows():
InstallMinGWHostCompiler()
packages.update(HostToolsSources(GetGitSyncCmdsCallback(rev)))
if args.testsuite_sync:
packages.update(TestsuiteSources(GetGitSyncCmdsCallback(rev)))
if args.pnacl_in_pnacl:
hosts = ['le32-nacl']
else:
hosts = [pynacl.platform.PlatformTriple()]
if pynacl.platform.IsLinux() and BUILD_CROSS_MINGW:
hosts.append(pynacl.platform.PlatformTriple('win', 'x86-32'))
for host in hosts:
packages.update(HostTools(host, args))
if not args.pnacl_in_pnacl:
packages.update(HostLibs(host, args))
packages.update(HostToolsDirectToNacl(host, args))
if not args.pnacl_in_pnacl:
packages.update(TargetLibCompiler(pynacl.platform.PlatformTriple(), args))
# Don't build the target libs on Windows because of pathname issues.
# Only the linux64 bot is canonical (i.e. it will upload its packages).
# The other bots will use a 'work' target instead of a 'build' target for
# the target libs, so they will not be memoized, but can be used for tests.
# TODO(dschuff): Even better would be if we could memoize non-canonical
# build targets without doing things like mangling their names (and for e.g.
# scons tests, skip running them if their dependencies haven't changed, like
# build targets)
is_canonical = pynacl.platform.IsLinux64()
if ((pynacl.platform.IsLinux() or pynacl.platform.IsMac())
and not args.pnacl_in_pnacl):
packages.update(pnacl_targetlibs.TargetLibsSrc(
GetGitSyncCmdsCallback(rev)))
for bias in BITCODE_BIASES:
packages.update(pnacl_targetlibs.TargetLibs(bias, is_canonical))
for arch in DIRECT_TO_NACL_ARCHES:
packages.update(pnacl_targetlibs.TargetLibs(arch, is_canonical))
packages.update(pnacl_targetlibs.SDKLibs(arch, is_canonical))
for arch in TRANSLATOR_ARCHES:
packages.update(pnacl_targetlibs.TranslatorLibs(arch, is_canonical))
packages.update(Metadata(rev, is_canonical))
packages.update(pnacl_targetlibs.SDKCompiler(
['le32'] + DIRECT_TO_NACL_ARCHES))
packages.update(pnacl_targetlibs.SDKLibs('le32', is_canonical))
unsandboxed_runtime_canonical = is_canonical or pynacl.platform.IsMac()
packages.update(pnacl_targetlibs.UnsandboxedRuntime(
'x86-32-%s' % pynacl.platform.GetOS(), unsandboxed_runtime_canonical))
if args.build_sbtc and not args.pnacl_in_pnacl:
packages.update(pnacl_sandboxed_translator.SandboxedTranslators(
SANDBOXED_TRANSLATOR_ARCHES))
tb = toolchain_main.PackageBuilder(packages,
upload_packages,
leftover_args)
sys.exit(tb.Main())
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Data loader and processing.
Defines input_fn of RetinaNet for TF Estimator. The input_fn includes training
data for category classification, bounding box regression, and number of
positive examples to normalize the loss during training.
T.-Y. Lin, P. Goyal, R. Girshick, K. He, and P. Dollar
Focal Loss for Dense Object Detection. arXiv:1708.02002
"""
import tensorflow as tf
import anchors
from object_detection import preprocessor
from object_detection import tf_example_decoder
MAX_NUM_INSTANCES = 100
class InputProcessor(object):
"""Base class of Input processor."""
def __init__(self, image, output_size):
"""Initializes a new `InputProcessor`.
Args:
image: The input image before processing.
output_size: The output image size after calling resize_and_crop_image
function.
"""
self._image = image
self._output_size = output_size
# Parameters to control rescaling and shifting during preprocessing.
# Image scale defines scale from original image to scaled image.
self._image_scale = tf.constant(1.0)
# The integer height and width of scaled image.
self._scaled_height = tf.shape(image)[0]
self._scaled_width = tf.shape(image)[1]
# The x and y translation offset to crop scaled image to the output size.
self._crop_offset_y = tf.constant(0)
self._crop_offset_x = tf.constant(0)
def normalize_image(self):
"""Normalize the image to zero mean and unit variance."""
# The image normalization is identical to Cloud TPU ResNet.
self._image = tf.image.convert_image_dtype(self._image, dtype=tf.float32)
offset = tf.constant([0.485, 0.456, 0.406])
offset = tf.expand_dims(offset, axis=0)
offset = tf.expand_dims(offset, axis=0)
self._image -= offset
scale = tf.constant([0.229, 0.224, 0.225])
scale = tf.expand_dims(scale, axis=0)
scale = tf.expand_dims(scale, axis=0)
self._image /= scale
def set_training_random_scale_factors(self, scale_min, scale_max):
"""Set the parameters for multiscale training."""
# Select a random scale factor.
random_scale_factor = tf.random_uniform([], scale_min, scale_max)
scaled_size = tf.to_int32(random_scale_factor * self._output_size)
# Recompute the accurate scale_factor using rounded scaled image size.
height = tf.shape(self._image)[0]
width = tf.shape(self._image)[1]
max_image_size = tf.to_float(tf.maximum(height, width))
image_scale = tf.to_float(scaled_size) / max_image_size
# Select non-zero random offset (x, y) if scaled image is larger than
# self._output_size.
scaled_height = tf.to_int32(tf.to_float(height) * image_scale)
scaled_width = tf.to_int32(tf.to_float(width) * image_scale)
offset_y = tf.to_float(scaled_height - self._output_size)
offset_x = tf.to_float(scaled_width - self._output_size)
offset_y = tf.maximum(0.0, offset_y) * tf.random_uniform([], 0, 1)
offset_x = tf.maximum(0.0, offset_x) * tf.random_uniform([], 0, 1)
offset_y = tf.to_int32(offset_y)
offset_x = tf.to_int32(offset_x)
self._image_scale = image_scale
self._scaled_height = scaled_height
self._scaled_width = scaled_width
self._crop_offset_x = offset_x
self._crop_offset_y = offset_y
def set_scale_factors_to_output_size(self):
"""Set the parameters to resize input image to self._output_size."""
# Compute the scale_factor using rounded scaled image size.
height = tf.shape(self._image)[0]
width = tf.shape(self._image)[1]
max_image_size = tf.to_float(tf.maximum(height, width))
image_scale = tf.to_float(self._output_size) / max_image_size
scaled_height = tf.to_int32(tf.to_float(height) * image_scale)
scaled_width = tf.to_int32(tf.to_float(width) * image_scale)
self._image_scale = image_scale
self._scaled_height = scaled_height
self._scaled_width = scaled_width
def resize_and_crop_image(self, method=tf.image.ResizeMethod.BILINEAR):
"""Resize input image and crop it to the self._output dimension."""
scaled_image = tf.image.resize_images(
self._image, [self._scaled_height, self._scaled_width], method=method)
scaled_image = scaled_image[
self._crop_offset_y:self._crop_offset_y + self._output_size,
self._crop_offset_x:self._crop_offset_x + self._output_size, :]
output_image = tf.image.pad_to_bounding_box(
scaled_image, 0, 0, self._output_size, self._output_size)
return output_image
class DetectionInputProcessor(InputProcessor):
"""Input processor for object detection."""
def __init__(self, image, output_size, boxes=None, classes=None):
InputProcessor.__init__(self, image, output_size)
self._boxes = boxes
self._classes = classes
def random_horizontal_flip(self):
"""Randomly flip input image and bounding boxes."""
self._image, self._boxes = preprocessor.random_horizontal_flip(
self._image, boxes=self._boxes)
def clip_boxes(self, boxes):
"""Clip boxes to fit in an image."""
boxes = tf.where(tf.less(boxes, 0), tf.zeros_like(boxes), boxes)
boxes = tf.where(tf.greater(boxes, self._output_size - 1),
(self._output_size - 1) * tf.ones_like(boxes), boxes)
return boxes
def resize_and_crop_boxes(self):
"""Resize boxes and crop it to the self._output dimension."""
boxlist = preprocessor.box_list.BoxList(self._boxes)
boxes = preprocessor.box_list_scale(
boxlist, self._scaled_height, self._scaled_width).get()
# Adjust box coordinates based on the offset.
box_offset = tf.stack([self._crop_offset_y, self._crop_offset_x,
self._crop_offset_y, self._crop_offset_x,])
boxes -= tf.to_float(tf.reshape(box_offset, [1, 4]))
# Clip the boxes.
boxes = self.clip_boxes(boxes)
# Filter out ground truth boxes that are all zeros.
indices = tf.where(tf.not_equal(tf.reduce_sum(boxes, axis=1), 0))
boxes = tf.gather_nd(boxes, indices)
classes = tf.gather_nd(self._classes, indices)
return boxes, classes
@property
def image_scale(self):
# Return image scale from original image to scaled image.
return self._image_scale
@property
def image_scale_to_original(self):
# Return image scale from scaled image to original image.
return 1.0 / self._image_scale
@property
def offset_x(self):
return self._crop_offset_x
@property
def offset_y(self):
return self._crop_offset_y
class SegmentationInputProcessor(InputProcessor):
"""Input processor for semantic segmentation."""
def __init__(self, image, output_size, label):
InputProcessor.__init__(self, image, output_size)
self._label = label
def random_horizontal_flip(self):
"""Randomly flip input image and segmentation label."""
self._label = tf.expand_dims(self._label, 0)
self._image, self._label = preprocessor.random_horizontal_flip(
self._image, masks=self._label)
self._label = self._label[0, :, :]
def resize_and_crop_label(self, padding_label,
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR):
"""Resize label and crop it to the self._output dimension."""
scaled_label = tf.image.resize_images(
self._label, [self._scaled_height, self._scaled_width], method=method)
scaled_label = scaled_label[
self._crop_offset_y:self._crop_offset_y + self._output_size,
self._crop_offset_x:self._crop_offset_x + self._output_size]
scaled_label -= padding_label
scaled_label = tf.image.pad_to_bounding_box(
scaled_label, 0, 0, self._output_size, self._output_size)
scaled_label += padding_label
return scaled_label
def pad_to_fixed_size(data, pad_value, output_shape):
"""Pad data to a fixed length at the first dimension.
Args:
data: Tensor to be padded to output_shape.
pad_value: A constant value assigned to the paddings.
output_shape: The output shape of a 2D tensor.
Returns:
The Padded tensor with output_shape [max_num_instances, dimension].
"""
max_num_instances = output_shape[0]
dimension = output_shape[1]
data = tf.reshape(data, [-1, dimension])
num_instances = tf.shape(data)[0]
assert_length = tf.Assert(
tf.less_equal(num_instances, max_num_instances), [num_instances])
with tf.control_dependencies([assert_length]):
pad_length = max_num_instances - num_instances
paddings = pad_value * tf.ones([pad_length, dimension])
padded_data = tf.concat([data, paddings], axis=0)
padded_data = tf.reshape(padded_data, output_shape)
return padded_data
class InputReader(object):
"""Input reader for dataset."""
def __init__(self, file_pattern, is_training):
self._file_pattern = file_pattern
self._is_training = is_training
self._max_num_instances = MAX_NUM_INSTANCES
def __call__(self, params):
input_anchors = anchors.Anchors(params['min_level'], params['max_level'],
params['num_scales'],
params['aspect_ratios'],
params['anchor_scale'],
params['image_size'])
anchor_labeler = anchors.AnchorLabeler(input_anchors, params['num_classes'])
example_decoder = tf_example_decoder.TfExampleDecoder()
def _dataset_parser(value):
"""Parse data to a fixed dimension input image and learning targets.
Args:
value: A dictionary contains an image and groundtruth annotations.
Returns:
image: Image tensor that is preproessed to have normalized value and
fixed dimension [image_size, image_size, 3]
cls_targets_dict: ordered dictionary with keys
[min_level, min_level+1, ..., max_level]. The values are tensor with
shape [height_l, width_l, num_anchors]. The height_l and width_l
represent the dimension of class logits at l-th level.
box_targets_dict: ordered dictionary with keys
[min_level, min_level+1, ..., max_level]. The values are tensor with
shape [height_l, width_l, num_anchors * 4]. The height_l and
width_l represent the dimension of bounding box regression output at
l-th level.
num_positives: Number of positive anchors in the image.
source_id: Source image id. Default value -1 if the source id is empty
in the groundtruth annotation.
image_scale: Scale of the proccessed image to the original image.
boxes: Groundtruth bounding box annotations. The box is represented in
[y1, x1, y2, x2] format. The tennsor is padded with -1 to the fixed
dimension [self._max_num_instances, 4].
is_crowds: Groundtruth annotations to indicate if an annotation
represents a group of instances by value {0, 1}. The tennsor is
padded with 0 to the fixed dimension [self._max_num_instances].
areas: Groundtruth areas annotations. The tennsor is padded with -1
to the fixed dimension [self._max_num_instances].
classes: Groundtruth classes annotations. The tennsor is padded with -1
to the fixed dimension [self._max_num_instances].
"""
with tf.name_scope('parser'):
data = example_decoder.decode(value)
source_id = data['source_id']
image = data['image']
boxes = data['groundtruth_boxes']
classes = data['groundtruth_classes']
classes = tf.reshape(tf.cast(classes, dtype=tf.float32), [-1, 1])
areas = data['groundtruth_area']
is_crowds = data['groundtruth_is_crowd']
classes = tf.reshape(tf.cast(classes, dtype=tf.float32), [-1, 1])
if params['skip_crowd_during_training'] and self._is_training:
indices = tf.where(tf.logical_not(data['groundtruth_is_crowd']))
classes = tf.gather_nd(classes, indices)
boxes = tf.gather_nd(boxes, indices)
input_processor = DetectionInputProcessor(
image, params['image_size'], boxes, classes)
input_processor.normalize_image()
if self._is_training and params['input_rand_hflip']:
input_processor.random_horizontal_flip()
if self._is_training:
input_processor.set_training_random_scale_factors(
params['train_scale_min'], params['train_scale_max'])
else:
input_processor.set_scale_factors_to_output_size()
image = input_processor.resize_and_crop_image()
boxes, classes = input_processor.resize_and_crop_boxes()
# Assign anchors.
(cls_targets, box_targets,
num_positives) = anchor_labeler.label_anchors(boxes, classes)
source_id = tf.where(tf.equal(source_id, tf.constant('')), '-1',
source_id)
source_id = tf.string_to_number(source_id)
# Pad groundtruth data for evaluation.
image_scale = input_processor.image_scale_to_original
boxes *= image_scale
is_crowds = tf.cast(is_crowds, dtype=tf.float32)
boxes = pad_to_fixed_size(boxes, -1, [self._max_num_instances, 4])
is_crowds = pad_to_fixed_size(is_crowds, 0,
[self._max_num_instances, 1])
areas = pad_to_fixed_size(areas, -1, [self._max_num_instances, 1])
classes = pad_to_fixed_size(classes, -1, [self._max_num_instances, 1])
if params['use_bfloat16']:
image = tf.cast(image, dtype=tf.bfloat16)
return (image, cls_targets, box_targets, num_positives, source_id,
image_scale, boxes, is_crowds, areas, classes)
batch_size = params['batch_size']
dataset = tf.data.Dataset.list_files(
self._file_pattern, shuffle=self._is_training)
if self._is_training:
dataset = dataset.repeat()
# Prefetch data from files.
def _prefetch_dataset(filename):
dataset = tf.data.TFRecordDataset(filename).prefetch(1)
return dataset
dataset = dataset.apply(
tf.contrib.data.parallel_interleave(
_prefetch_dataset, cycle_length=32, sloppy=self._is_training))
if self._is_training:
dataset = dataset.shuffle(64)
# Parse the fetched records to input tensors for model function.
dataset = dataset.map(_dataset_parser, num_parallel_calls=64)
dataset = dataset.prefetch(tf.contrib.data.AUTOTUNE)
dataset = dataset.batch(batch_size, drop_remainder=True)
def _process_example(images, cls_targets, box_targets, num_positives,
source_ids, image_scales, boxes, is_crowds, areas,
classes):
"""Processes one batch of data."""
labels = {}
# Count num_positives in a batch.
num_positives_batch = tf.reduce_mean(num_positives)
labels['mean_num_positives'] = tf.reshape(
tf.tile(tf.expand_dims(num_positives_batch, 0), [
batch_size,
]), [batch_size, 1])
for level in range(params['min_level'], params['max_level'] + 1):
labels['cls_targets_%d' % level] = cls_targets[level]
labels['box_targets_%d' % level] = box_targets[level]
# Concatenate groundtruth annotations to a tensor.
groundtruth_data = tf.concat([boxes, is_crowds, areas, classes], axis=2)
labels['source_ids'] = source_ids
labels['groundtruth_data'] = groundtruth_data
labels['image_scales'] = image_scales
return images, labels
dataset = dataset.map(_process_example)
dataset = dataset.prefetch(tf.contrib.data.AUTOTUNE)
return dataset
class SegmentationInputReader(object):
"""Input reader for dataset."""
def __init__(self, file_pattern, is_training):
self._file_pattern = file_pattern
self._is_training = is_training
def __call__(self, params):
example_decoder = tf_example_decoder.TfExampleSegmentationDecoder()
def _dataset_parser(value):
"""Parse data to a fixed dimension input image and learning targets.
Args:
value: A dictionary contains an image and groundtruth annotations.
Returns:
A list of the following elements in order:
image: Image tensor that is preproessed to have normalized value and
fixed dimension [image_size, image_size, 3]
label: label tensor of the same spatial dimension as the image.
"""
with tf.name_scope('parser'):
data = example_decoder.decode(value)
image = data['image']
label = data['labels_class']
label = tf.to_int32(label)
input_processor = SegmentationInputProcessor(image,
params['image_size'],
label)
# The image normalization is identical to Cloud TPU ResNet.
input_processor.normalize_image()
if self._is_training and params['input_rand_hflip']:
input_processor.random_horizontal_flip()
if self._is_training:
input_processor.set_training_random_scale_factors(
params['train_scale_min'], params['train_scale_max'])
image = input_processor.resize_and_crop_image()
# Set padding to background (class=0) during training.
if self._is_training:
label = input_processor.resize_and_crop_label(0)
else:
label = input_processor.resize_and_crop_label(params['ignore_label'])
if params['use_bfloat16']:
image = tf.cast(image, dtype=tf.bfloat16)
return image, label
batch_size = params['batch_size']
dataset = tf.data.Dataset.list_files(
self._file_pattern, shuffle=self._is_training)
if self._is_training:
dataset = dataset.repeat()
def _prefetch_dataset(filename):
dataset = tf.data.TFRecordDataset(filename).prefetch(1)
return dataset
dataset = dataset.apply(
tf.contrib.data.parallel_interleave(
_prefetch_dataset, cycle_length=32, sloppy=self._is_training))
if self._is_training:
dataset = dataset.shuffle(64)
dataset = dataset.map(_dataset_parser, num_parallel_calls=64)
dataset = dataset.prefetch(batch_size)
dataset = dataset.batch(batch_size, drop_remainder=True)
dataset = dataset.prefetch(tf.contrib.data.AUTOTUNE)
return dataset
|
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron_lib.callbacks import events
from neutron_lib.callbacks import registry
from neutron_lib import constants
from neutron_lib import context
from neutron_lib import exceptions as lib_exc
from neutron_lib.plugins import constants as p_cons
from neutron_lib.plugins import directory
from oslo_utils import uuidutils
import testtools
from neutron.services.l3_router.service_providers import driver_controller
from neutron.services import provider_configuration
from neutron.tests import base
from neutron.tests.unit import testlib_api
DB_PLUGIN_KLASS = 'neutron.db.db_base_plugin_v2.NeutronDbPluginV2'
class TestDriverController(testlib_api.SqlTestCase):
def setUp(self):
super(TestDriverController, self).setUp()
self.setup_coreplugin(DB_PLUGIN_KLASS)
self.fake_l3 = mock.Mock()
self.dc = driver_controller.DriverController(self.fake_l3)
self.fake_l3.l3_driver_controller = self.dc
self.ctx = context.get_admin_context()
def _return_provider_for_flavor(self, provider):
self.dc._flavor_plugin_ref = mock.Mock()
self.dc._flavor_plugin_ref.get_flavor.return_value = {'id': 'abc'}
provider = {'provider': provider}
self.dc._flavor_plugin_ref.get_flavor_next_provider.return_value = [
provider]
def test_uses_scheduler(self):
self._return_provider_for_flavor('dvrha')
router_db = mock.Mock()
flavor_id = uuidutils.generate_uuid()
router_id = uuidutils.generate_uuid()
router = dict(id=router_id, flavor_id=flavor_id)
self.dc._set_router_provider('router', 'PRECOMMIT_CREATE', self,
self.ctx, router, router_db)
self.assertTrue(self.dc.uses_scheduler(self.ctx, router_id))
self.dc.drivers['dvrha'].use_integrated_agent_scheduler = False
self.assertFalse(self.dc.uses_scheduler(self.ctx, router_id))
def test_driver_owns_router(self):
self._return_provider_for_flavor('dvrha')
router_db = mock.Mock()
flavor_id = uuidutils.generate_uuid()
r1 = uuidutils.generate_uuid()
r2 = uuidutils.generate_uuid()
router = dict(id=r1, flavor_id=flavor_id)
self.dc._set_router_provider('router', 'PRECOMMIT_CREATE', self,
self.ctx, router, router_db)
self.assertTrue(self.dc.drivers['dvrha'].owns_router(self.ctx, r1))
self.assertFalse(self.dc.drivers['dvr'].owns_router(self.ctx, r1))
self.assertFalse(self.dc.drivers['dvr'].owns_router(self.ctx, r2))
self.assertFalse(self.dc.drivers['dvr'].owns_router(self.ctx, None))
@mock.patch('neutron_lib.callbacks.registry.notify')
def test__set_router_provider_flavor_specified(self, mock_cb):
self._return_provider_for_flavor('dvrha')
router_db = mock.Mock()
flavor_id = uuidutils.generate_uuid()
router_id = uuidutils.generate_uuid()
router = dict(id=router_id, flavor_id=flavor_id)
self.dc._set_router_provider('router', 'PRECOMMIT_CREATE', self,
self.ctx, router, router_db)
mock_cb.assert_called_with('router_controller',
events.PRECOMMIT_ADD_ASSOCIATION, mock.ANY,
context=self.ctx, router=mock.ANY, router_db=mock.ANY,
old_driver=mock.ANY, new_driver=mock.ANY)
self.assertEqual(flavor_id, router_db.flavor_id)
self.assertEqual(self.dc.drivers['dvrha'],
self.dc.get_provider_for_router(self.ctx,
router_id))
def test__update_router_provider_invalid(self):
test_dc = driver_controller.DriverController(self.fake_l3)
with mock.patch.object(registry, "publish") as mock_cb:
with mock.patch.object(test_dc, "get_provider_for_router"):
with mock.patch.object(
driver_controller,
"_ensure_driver_supports_request") as _ensure:
_ensure.side_effect = lib_exc.InvalidInput(
error_message='message')
self.assertRaises(
lib_exc.InvalidInput,
test_dc._update_router_provider,
None, None, None,
payload=events.DBEventPayload(
None, request_body={'name': 'testname'},
states=({'flavor_id': 'old_fid'},)))
mock_cb.assert_not_called()
def test__update_router_provider_with_flags(self):
test_dc = driver_controller.DriverController(self.fake_l3)
with mock.patch.object(registry, "publish"):
with mock.patch.object(test_dc, "get_provider_for_router"):
with mock.patch.object(
driver_controller,
"_ensure_driver_supports_request") as _ensure:
_ensure.side_effect = lib_exc.InvalidInput(
error_message='message')
with mock.patch(
"neutron.services.l3_router.service_providers."
"driver_controller.LOG.debug") as mock_log:
self.assertRaises(
lib_exc.InvalidInput,
test_dc._update_router_provider,
None, None, None,
payload=events.DBEventPayload(
None, request_body={'name': 'testname',
'distributed': False},
states=({'flavor_id': None,
'distributed': True, 'ha': False},)))
# To validate that the 'ha' attribute of the router
# stays unchanged from the previous state while
# updating 'distributed' from True to False.
mock_log.assert_any_call(
"Get a provider driver handle based on the ha "
"flag: %(ha_flag)s and distributed flag: "
"%(distributed_flag)s",
{'ha_flag': False, 'distributed_flag': False})
@mock.patch('neutron_lib.callbacks.registry.notify')
def test__set_router_provider_attr_lookups(self, mock_cb):
# ensure correct drivers are looked up based on attrs
router_id1 = uuidutils.generate_uuid()
router_id2 = uuidutils.generate_uuid()
router_id3 = uuidutils.generate_uuid()
router_id4 = uuidutils.generate_uuid()
router_id5 = uuidutils.generate_uuid()
router_id6 = uuidutils.generate_uuid()
router_id7 = uuidutils.generate_uuid()
router_id8 = uuidutils.generate_uuid()
router_id9 = uuidutils.generate_uuid()
cases = [
('dvrha', dict(id=router_id1, distributed=True, ha=True)),
('dvr', dict(id=router_id2, distributed=True, ha=False)),
('ha', dict(id=router_id3, distributed=False, ha=True)),
('single_node', dict(id=router_id4, distributed=False,
ha=False)),
('ha', dict(id=router_id5, ha=True,
distributed=constants.ATTR_NOT_SPECIFIED)),
('dvr', dict(id=router_id6, distributed=True,
ha=constants.ATTR_NOT_SPECIFIED)),
('single_node', dict(id=router_id7, ha=False,
distributed=constants.ATTR_NOT_SPECIFIED)),
('single_node', dict(id=router_id8, distributed=False,
ha=constants.ATTR_NOT_SPECIFIED)),
('single_node', dict(id=router_id9,
distributed=constants.ATTR_NOT_SPECIFIED,
ha=constants.ATTR_NOT_SPECIFIED)),
]
for driver, body in cases:
self.dc._set_router_provider('router', 'PRECOMMIT_CREATE', self,
self.ctx, body, mock.Mock())
mock_cb.assert_called_with('router_controller',
events.PRECOMMIT_ADD_ASSOCIATION, mock.ANY,
context=self.ctx, router=mock.ANY, router_db=mock.ANY,
old_driver=mock.ANY, new_driver=mock.ANY)
self.assertEqual(self.dc.drivers[driver],
self.dc.get_provider_for_router(self.ctx,
body['id']),
'Expecting %s for body %s' % (driver, body))
@mock.patch('neutron_lib.callbacks.registry.notify')
def test__clear_router_provider(self, mock_cb):
# ensure correct drivers are looked up based on attrs
router_id1 = uuidutils.generate_uuid()
body = dict(id=router_id1, distributed=True, ha=True)
self.dc._set_router_provider('router', 'PRECOMMIT_CREATE', self,
self.ctx, body, mock.Mock())
mock_cb.assert_called_with('router_controller',
events.PRECOMMIT_ADD_ASSOCIATION, mock.ANY,
context=self.ctx, router=mock.ANY, router_db=mock.ANY,
old_driver=mock.ANY, new_driver=mock.ANY)
self.assertEqual(self.dc.drivers['dvrha'],
self.dc.get_provider_for_router(self.ctx,
body['id']))
self.dc._clear_router_provider('router', 'PRECOMMIT_DELETE', self,
self.ctx, body['id'])
mock_cb.assert_called_with('router_controller',
events.PRECOMMIT_DELETE_ASSOCIATIONS, mock.ANY,
context=self.ctx, router_id=mock.ANY, old_driver=mock.ANY,
new_driver=mock.ANY)
with testtools.ExpectedException(ValueError):
# if association was cleared, get_router will be called
self.fake_l3.get_router.side_effect = ValueError
self.dc.get_provider_for_router(self.ctx, body['id'])
mock_cb.assert_called_with('router_controller',
events.PRECOMMIT_ADD_ASSOCIATION, mock.ANY, context=self.ctx,
router_id=body['id'], router=mock.ANY, old_driver=mock.ANY,
new_driver=mock.ANY)
def test__flavor_plugin(self):
directory.add_plugin(p_cons.FLAVORS, mock.Mock())
_dc = driver_controller.DriverController(self.fake_l3)
self.assertEqual(
directory.get_plugin(p_cons.FLAVORS), _dc._flavor_plugin)
class Test_LegacyPlusProviderConfiguration(base.BaseTestCase):
@mock.patch.object(provider_configuration.ProviderConfiguration,
"add_provider")
def test__update_router_provider_invalid(self, mock_method):
mock_method.side_effect = lib_exc.Invalid(message='message')
driver_controller._LegacyPlusProviderConfiguration()
|
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.tools.cli.handler.kubeflow_handler."""
import datetime
import os
import sys
from unittest import mock
import kfp
import tensorflow as tf
from tfx.dsl.components.base import base_driver
from tfx.dsl.io import fileio
from tfx.tools.cli import labels
from tfx.tools.cli.handler import kubeflow_dag_runner_patcher
from tfx.tools.cli.handler import kubeflow_handler
from tfx.utils import test_case_utils
class _MockRunResponse:
def __init__(self, pipeline_name, run_id, status, created_at):
self.pipeline_spec = mock.MagicMock()
self.pipeline_spec.pipeline_name = pipeline_name
self.id = run_id
self.status = status
self.created_at = created_at
class KubeflowHandlerTest(test_case_utils.TfxTest):
def setUp(self):
super().setUp()
# Flags for handler.
self.engine = 'kubeflow'
self.chicago_taxi_pipeline_dir = os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'testdata')
self.enter_context(test_case_utils.change_working_dir(self.tmp_dir))
self.enter_context(
test_case_utils.override_env_var('KFP_E2E_BASE_CONTAINER_IMAGE',
'dummy-image'))
self.enter_context(
test_case_utils.override_env_var('KFP_E2E_BUCKET_NAME', 'dummy-bucket'))
self.enter_context(
test_case_utils.override_env_var('KFP_E2E_TEST_DATA_ROOT',
'dummy-root'))
self.pipeline_path = os.path.join(self.chicago_taxi_pipeline_dir,
'test_pipeline_kubeflow_1.py')
self.pipeline_name = 'chicago_taxi_pipeline_kubeflow'
# Kubeflow client params.
self.endpoint = 'dummyEndpoint'
self.namespace = 'kubeflow'
self.iap_client_id = 'dummyID'
self.runtime_parameter = {'a': '1', 'b': '2'}
default_flags = {
labels.ENGINE_FLAG: self.engine,
labels.ENDPOINT: self.endpoint,
labels.IAP_CLIENT_ID: self.iap_client_id,
labels.NAMESPACE: self.namespace,
}
self.flags_with_name = {
**default_flags,
labels.PIPELINE_NAME: self.pipeline_name,
}
self.flags_with_runtime_param = {
**default_flags,
labels.PIPELINE_NAME: self.pipeline_name,
labels.RUNTIME_PARAMETER: self.runtime_parameter,
}
self.flags_with_dsl_path = {
**default_flags,
labels.PIPELINE_DSL_PATH: self.pipeline_path,
}
# Pipeline args for mocking subprocess.
self.pipeline_args = {'pipeline_name': 'chicago_taxi_pipeline_kubeflow'}
self.pipeline_id = 'the_pipeline_id'
self.experiment_id = 'the_experiment_id'
self.pipeline_version_id = 'the_pipeline_version_id'
mock_client_cls = self.enter_context(
mock.patch.object(kfp, 'Client', autospec=True))
self.mock_client = mock_client_cls.return_value
# Required to access generated apis.
self.mock_client._experiment_api = mock.MagicMock()
self.mock_client.get_pipeline_id.return_value = self.pipeline_id
self.mock_client.get_experiment.return_value.id = self.experiment_id
versions = [mock.MagicMock()]
versions[0].id = self.pipeline_version_id
self.mock_client.list_pipeline_versions.return_value.versions = versions
def testCreatePipeline(self):
handler = kubeflow_handler.KubeflowHandler(self.flags_with_dsl_path)
self.mock_client.get_pipeline_id.return_value = None
self.mock_client.upload_pipeline.return_value.id = 'new_pipeline_id'
handler.create_pipeline()
self.mock_client.upload_pipeline.assert_called_once_with(
pipeline_package_path=mock.ANY,
pipeline_name=self.pipeline_name)
self.mock_client.create_experiment.assert_called_once_with(
self.pipeline_name)
self.mock_client.upload_pipeline_version.assert_not_called()
def testCreatePipelineExistentPipeline(self):
handler = kubeflow_handler.KubeflowHandler(self.flags_with_dsl_path)
# 'the_pipeline_id' will be returned.
with self.assertRaises(SystemExit) as err:
handler.create_pipeline()
self.assertIn(
f'Pipeline "{self.pipeline_args[labels.PIPELINE_NAME]}" already exists.',
str(err.exception))
self.mock_client.upload_pipeline.assert_not_called()
def testUpdatePipeline(self):
handler = kubeflow_handler.KubeflowHandler(self.flags_with_dsl_path)
# Update test_pipeline and run update_pipeline
handler.update_pipeline()
self.mock_client.upload_pipeline.assert_not_called()
self.mock_client.create_experiment.assert_not_called()
self.mock_client.upload_pipeline_version.assert_called_once_with(
pipeline_package_path=mock.ANY,
pipeline_version_name=mock.ANY,
pipeline_id=self.pipeline_id)
def testUpdatePipelineNoPipeline(self):
handler = kubeflow_handler.KubeflowHandler(self.flags_with_dsl_path)
self.mock_client.get_pipeline_id.return_value = None
with self.assertRaises(SystemExit) as err:
handler.update_pipeline()
self.assertIn(f'Cannot find pipeline "{self.pipeline_name}".',
str(err.exception))
self.mock_client.upload_pipeline.assert_not_called()
self.mock_client.upload_pipeline_version.assert_not_called()
def testCompilePipeline(self):
handler = kubeflow_handler.KubeflowHandler(self.flags_with_dsl_path)
with self.captureWritesToStream(sys.stdout) as captured:
handler.compile_pipeline()
self.assertIn('Pipeline compiled successfully', captured.contents())
self.assertIn('Pipeline package path', captured.contents())
def testDeletePipeline(self):
handler = kubeflow_handler.KubeflowHandler(self.flags_with_name)
handler.delete_pipeline()
self.mock_client.delete_pipeline.assert_called_once_with(self.pipeline_id)
self.mock_client._experiment_api.delete_experiment.assert_called_once_with(
self.experiment_id)
def testDeletePipelineNonExistentPipeline(self):
handler = kubeflow_handler.KubeflowHandler(self.flags_with_name)
self.mock_client.get_pipeline_id.return_value = None
with self.assertRaises(SystemExit) as err:
handler.delete_pipeline()
self.assertIn(f'Cannot find pipeline "{self.pipeline_name}".',
str(err.exception))
self.mock_client.delete_pipeline.assert_not_called()
self.mock_client._experiment_api.delete_experiment.assert_not_called()
@mock.patch.object(
kubeflow_handler.KubeflowHandler, 'execute_dsl', autospec=True)
def testGetSchema(self, mock_execute_dsl):
temp_pipeline_root = os.path.join(self.tmp_dir, 'pipeline_root')
handler = kubeflow_handler.KubeflowHandler(
{labels.ENGINE_FLAG: self.engine})
assert isinstance(handler, kubeflow_handler.KubeflowHandler)
mock_execute_dsl.return_value = {
kubeflow_dag_runner_patcher.KubeflowDagRunnerPatcher.PIPELINE_NAME:
self.pipeline_name,
kubeflow_dag_runner_patcher.KubeflowDagRunnerPatcher.PIPELINE_ROOT:
temp_pipeline_root
}
# No pipeline root
with self.assertRaises(SystemExit) as err:
handler.get_schema()
self.assertEqual(
str(err.exception),
'Create a run before inferring schema. If pipeline is already running, then wait for it to successfully finish.'
)
# No SchemaGen output.
fileio.makedirs(temp_pipeline_root)
with self.assertRaises(SystemExit) as err:
handler.get_schema()
self.assertEqual(
str(err.exception),
'Either SchemaGen component does not exist or pipeline is still running. If pipeline is running, then wait for it to successfully finish.'
)
# Successful pipeline run.
# Create fake schema in pipeline root.
component_output_dir = os.path.join(temp_pipeline_root, 'SchemaGen')
schema_path = base_driver._generate_output_uri( # pylint: disable=protected-access
component_output_dir, 'schema', 3)
fileio.makedirs(schema_path)
with open(os.path.join(schema_path, 'schema.pbtxt'), 'w') as f:
f.write('SCHEMA')
with self.captureWritesToStream(sys.stdout) as captured:
handler.get_schema()
curr_dir_path = os.path.join(os.getcwd(), 'schema.pbtxt')
self.assertIn('Path to schema: {}'.format(curr_dir_path),
captured.contents())
self.assertIn(
'*********SCHEMA FOR {}**********'.format(
self.pipeline_name.upper()), captured.contents())
self.assertTrue(fileio.exists(curr_dir_path))
def testCreateRun(self):
self.mock_client.run_pipeline.return_value = _MockRunResponse(
self.pipeline_name, '1', 'Success', datetime.datetime.now())
handler = kubeflow_handler.KubeflowHandler(self.flags_with_runtime_param)
with self.captureWritesToStream(sys.stdout) as captured:
handler.create_run()
self.assertIn('Run created for pipeline: ', captured.contents())
self.mock_client.run_pipeline.assert_called_once_with(
experiment_id=self.experiment_id,
job_name=self.pipeline_name,
params={
'a': '1',
'b': '2'
},
version_id=self.pipeline_version_id)
def testCreateRunNoPipeline(self):
handler = kubeflow_handler.KubeflowHandler(self.flags_with_name)
self.mock_client.get_pipeline_id.return_value = None
with self.assertRaises(SystemExit) as err:
handler.create_run()
self.assertIn(f'Cannot find pipeline "{self.pipeline_name}".',
str(err.exception))
self.mock_client.run_pipeline.assert_not_called()
def testListRuns(self):
handler = kubeflow_handler.KubeflowHandler(self.flags_with_name)
self.mock_client.list_runs.return_value.runs = [
_MockRunResponse(self.pipeline_name, '1', 'Success',
datetime.datetime.now()),
_MockRunResponse(self.pipeline_name, '2', 'Failed',
datetime.datetime.now()),
]
with self.captureWritesToStream(sys.stdout) as captured:
handler.list_runs()
self.mock_client.list_runs.assert_called_once_with(
experiment_id=self.experiment_id)
self.assertIn('pipeline_name', captured.contents())
def testListRunsNoPipeline(self):
handler = kubeflow_handler.KubeflowHandler(self.flags_with_name)
self.mock_client.get_pipeline_id.return_value = None
with self.assertRaises(SystemExit) as err:
handler.list_runs()
self.assertIn(f'Cannot find pipeline "{self.pipeline_name}".',
str(err.exception))
if __name__ == '__main__':
tf.test.main()
|
|
# BSD 3-Clause License
#
# Copyright (c) 2019, Elasticsearch BV
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import time
from multiprocessing.dummy import Pool
import mock
import pytest
from elasticapm.conf import constants
from elasticapm.metrics.base_metrics import Counter, Gauge, MetricsRegistry, MetricsSet, NoopMetric, Timer
from tests.utils import assert_any_record_contains
class DummyMetricSet(MetricsSet):
def before_collect(self):
self.gauge("a.b.c.d").val = 0
self.gauge("a").val = 0
self.gauge("b").val = 0
self.gauge("c").val = 0
@pytest.mark.parametrize("elasticapm_client", [{"metrics_interval": "30s"}], indirect=True)
def test_metrics_registry(elasticapm_client):
registry = MetricsRegistry(elasticapm_client)
registry.register("tests.metrics.base_tests.DummyMetricSet")
registry.collect()
assert len(elasticapm_client.events[constants.METRICSET])
@pytest.mark.parametrize(
"elasticapm_client",
[{"metrics_sets": "tests.metrics.base_tests.DummyMetricSet", "disable_metrics": "a.*,*c"}],
indirect=True,
)
def test_disable_metrics(elasticapm_client):
elasticapm_client._metrics.collect()
metrics = elasticapm_client.events[constants.METRICSET][0]
assert "a" in metrics["samples"]
assert "b" in metrics["samples"]
assert "a.b.c.d" not in metrics["samples"]
assert "c" not in metrics["samples"]
def test_metrics_counter(elasticapm_client):
metricset = MetricsSet(MetricsRegistry(elasticapm_client))
metricset.counter("x").inc()
data = next(metricset.collect())
assert data["samples"]["x"]["value"] == 1
metricset.counter("x").inc(10)
data = next(metricset.collect())
assert data["samples"]["x"]["value"] == 11
metricset.counter("x").dec(10)
data = next(metricset.collect())
assert data["samples"]["x"]["value"] == 1
metricset.counter("x").dec()
data = next(metricset.collect())
assert data["samples"]["x"]["value"] == 0
def test_metrics_histogram(elasticapm_client):
metricset = MetricsSet(MetricsRegistry(elasticapm_client))
hist = metricset.histogram("x", buckets=[1, 10, 100])
assert len(hist.buckets) == 4
hist.update(0.3)
hist.update(1)
hist.update(5)
hist.update(20)
hist.update(100)
hist.update(1000)
data = list(metricset.collect())
assert len(data) == 1
d = data[0]
assert d["samples"]["x"]["counts"] == [2, 1, 2, 1]
assert d["samples"]["x"]["values"] == [0.5, 5.5, 55.0, 100]
def test_metrics_labels(elasticapm_client):
metricset = MetricsSet(MetricsRegistry(elasticapm_client))
metricset.counter("x", mylabel="a").inc()
metricset.counter("y", mylabel="a").inc()
metricset.counter("x", mylabel="b").inc().inc()
metricset.counter("x", mylabel="b", myotherlabel="c").inc()
metricset.counter("x", mylabel="a").dec()
data = list(metricset.collect())
asserts = 0
for d in data:
if d["tags"] == {"mylabel": "a"}:
assert d["samples"]["x"]["value"] == 0
assert d["samples"]["y"]["value"] == 1
asserts += 1
elif d["tags"] == {"mylabel": "b"}:
assert d["samples"]["x"]["value"] == 2
asserts += 1
elif d["tags"] == {"mylabel": "b", "myotherlabel": "c"}:
assert d["samples"]["x"]["value"] == 1
asserts += 1
assert asserts == 3
def test_metrics_multithreaded(elasticapm_client):
metricset = MetricsSet(MetricsRegistry(elasticapm_client))
pool = Pool(5)
def target():
for i in range(500):
metricset.counter("x").inc(i + 1)
time.sleep(0.0000001)
[pool.apply_async(target, ()) for i in range(10)]
pool.close()
pool.join()
expected = 10 * ((500 * 501) / 2)
assert metricset.counter("x").val == expected
@pytest.mark.parametrize("sending_elasticapm_client", [{"metrics_interval": "30s"}], indirect=True)
def test_metrics_flushed_on_shutdown(sending_elasticapm_client):
# this is ugly, we need an API for this at some point...
metricset = MetricsSet(sending_elasticapm_client._metrics)
sending_elasticapm_client._metrics._metricsets["foo"] = metricset
metricset.counter("x").inc()
sending_elasticapm_client.close()
assert sending_elasticapm_client.httpserver.payloads
for item in sending_elasticapm_client.httpserver.payloads[0]:
try:
assert item["metricset"]["samples"]["x"]["value"] == 1
break
except KeyError:
pass
else:
assert False, "no item found with matching dict path metricset.samples.x.value"
@mock.patch("elasticapm.metrics.base_metrics.DISTINCT_LABEL_LIMIT", 3)
def test_metric_limit(caplog, elasticapm_client):
m = MetricsSet(MetricsRegistry(elasticapm_client))
with caplog.at_level(logging.WARNING, logger="elasticapm.metrics"):
for i in range(2):
counter = m.counter("counter", some_label=i)
gauge = m.gauge("gauge", some_label=i)
timer = m.timer("timer", some_label=i)
if i == 0:
assert isinstance(timer, Timer)
assert isinstance(gauge, Gauge)
assert isinstance(counter, Counter)
else:
assert isinstance(timer, NoopMetric)
assert isinstance(gauge, NoopMetric)
assert isinstance(counter, NoopMetric)
assert_any_record_contains(caplog.records, "The limit of 3 metricsets has been reached", "elasticapm.metrics")
def test_metrics_not_collected_if_zero_and_reset(elasticapm_client):
m = MetricsSet(MetricsRegistry(elasticapm_client))
counter = m.counter("counter", reset_on_collect=False)
resetting_counter = m.counter("resetting_counter", reset_on_collect=True)
gauge = m.gauge("gauge", reset_on_collect=False)
resetting_gauge = m.gauge("resetting_gauge", reset_on_collect=True)
timer = m.timer("timer", reset_on_collect=False, unit="us")
resetting_timer = m.timer("resetting_timer", reset_on_collect=True, unit="us")
counter.inc(), resetting_counter.inc()
gauge.val = 5
resetting_gauge.val = 5
timer.update(1, 1)
resetting_timer.update(1, 1)
data = list(m.collect())
more_data = list(m.collect())
assert set(data[0]["samples"].keys()) == {
"counter",
"resetting_counter",
"gauge",
"resetting_gauge",
"timer.count",
"timer.sum.us",
"resetting_timer.count",
"resetting_timer.sum.us",
}
assert set(more_data[0]["samples"].keys()) == {"counter", "gauge", "timer.count", "timer.sum.us"}
|
|
# -*- coding: utf-8 -*-
"""Contains helper functions for generating correctly
formatted hgrid list/folders.
"""
import logging
import datetime
import hurry.filesize
from framework import sentry
from framework.auth.decorators import Auth
from website import settings
from website.util import paths
from website.util import sanitize
from website.settings import DISK_SAVING_MODE
logger = logging.getLogger(__name__)
FOLDER = 'folder'
FILE = 'file'
KIND = 'kind'
# TODO: Validate the JSON schema, esp. for addons
DEFAULT_PERMISSIONS = {
'view': True,
'edit': False,
}
def format_filesize(size):
return hurry.filesize.size(size, system=hurry.filesize.alternative)
def default_urls(node_api, short_name):
return {
'fetch': u'{node_api}{addonshort}/hgrid/'.format(node_api=node_api, addonshort=short_name),
'upload': u'{node_api}{addonshort}/'.format(node_api=node_api, addonshort=short_name),
}
def to_hgrid(node, auth, **data):
"""Converts a node into a rubeus grid format
:param Node node: the node to be parsed
:param Auth auth: the user authorization object
:returns: rubeus-formatted dict
"""
return NodeFileCollector(node, auth, **data).to_hgrid()
def build_addon_root(node_settings, name, permissions=None,
urls=None, extra=None, buttons=None, user=None,
private_key=None, **kwargs):
"""Builds the root or "dummy" folder for an addon.
:param addonNodeSettingsBase node_settings: Addon settings
:param String name: Additional information for the folder title
eg. Repo name for Github or bucket name for S3
:param dict or Auth permissions: Dictionary of permissions for the addon's content or Auth for use in node.can_X methods
:param dict urls: Hgrid related urls
:param String extra: Html to be appened to the addon folder name
eg. Branch switcher for github
:param list of dicts buttons: List of buttons to appear in HGrid row. Each
dict must have 'text', a string that will appear on the button, and
'action', the name of a function in
:param bool private_key: Used to check if information should be stripped from anonymous links
:param dict kwargs: Any additional information to add to the root folder
:return dict: Hgrid formatted dictionary for the addon root folder
"""
from website.util import check_private_key_for_anonymized_link
permissions = permissions or DEFAULT_PERMISSIONS
if name and not check_private_key_for_anonymized_link(private_key):
name = u'{0}: {1}'.format(node_settings.config.full_name, name)
else:
name = node_settings.config.full_name
if hasattr(node_settings.config, 'urls') and node_settings.config.urls:
urls = node_settings.config.urls
if urls is None:
urls = default_urls(node_settings.owner.api_url, node_settings.config.short_name)
forbid_edit = DISK_SAVING_MODE if node_settings.config.short_name == 'osfstorage' else False
if isinstance(permissions, Auth):
auth = permissions
permissions = {
'view': node_settings.owner.can_view(auth),
'edit': (node_settings.owner.can_edit(auth)
and not node_settings.owner.is_registration
and not forbid_edit),
}
max_size = node_settings.config.max_file_size
if user and 'high_upload_limit' in user.system_tags:
max_size = node_settings.config.high_max_file_size
ret = {
'provider': node_settings.config.short_name,
'addonFullname': node_settings.config.full_name,
'name': name,
'iconUrl': node_settings.config.icon_url,
KIND: FOLDER,
'extra': extra,
'buttons': buttons,
'isAddonRoot': True,
'permissions': permissions,
'accept': {
'maxSize': max_size,
'acceptedFiles': node_settings.config.accept_extensions,
},
'urls': urls,
'isPointer': False,
'nodeId': node_settings.owner._id,
'nodeUrl': node_settings.owner.url,
'nodeApiUrl': node_settings.owner.api_url,
}
ret.update(kwargs)
return ret
def build_addon_button(text, action, title=''):
"""Builds am action button to be rendered in HGrid
:param str text: A string or html to appear on the button itself
:param str action: The name of the HGrid action for the button to call.
The callback for the HGrid action must be defined as a member of HGrid.Actions
:return dict: Hgrid formatted dictionary for custom buttons
"""
button = {
'text': text,
'action': action,
}
if title:
button['attributes'] = 'title="{title}" data-toggle="tooltip" data-placement="right" '.format(title=title)
return button
def sort_by_name(hgrid_data):
return_value = hgrid_data
if hgrid_data is not None:
return_value = sorted(hgrid_data, key=lambda item: item['name'].lower())
return return_value
class NodeFileCollector(object):
"""A utility class for creating rubeus formatted node data"""
def __init__(self, node, auth, **kwargs):
self.node = node
self.auth = auth
self.extra = kwargs
self.can_view = node.can_view(auth)
self.can_edit = node.can_edit(auth) and not node.is_registration
def to_hgrid(self):
"""Return the Rubeus.JS representation of the node's file data, including
addons and components
"""
root = self._serialize_node(self.node)
return [root]
def _collect_components(self, node, visited):
rv = []
if not node.can_view(self.auth):
return rv
for child in node.nodes:
if child.is_deleted:
continue
elif not child.can_view(self.auth):
if child.primary:
for desc in child.find_readable_descendants(self.auth):
visited.append(desc.resolve()._id)
rv.append(self._serialize_node(desc, visited=visited))
elif child.resolve()._id not in visited:
visited.append(child.resolve()._id)
rv.append(self._serialize_node(child, visited=visited))
return rv
def _get_node_name(self, node):
"""Input node object, return the project name to be display.
"""
can_view = node.can_view(auth=self.auth)
if can_view:
node_name = sanitize.unescape_entities(node.title)
elif node.is_registration:
node_name = u'Private Registration'
elif node.is_fork:
node_name = u'Private Fork'
elif not node.primary:
node_name = u'Private Link'
else:
node_name = u'Private Component'
return node_name
def _serialize_node(self, node, visited=None):
"""Returns the rubeus representation of a node folder.
"""
visited = visited or []
visited.append(node.resolve()._id)
can_view = node.can_view(auth=self.auth)
if can_view:
children = self._collect_addons(node) + self._collect_components(node, visited)
else:
children = []
return {
# TODO: Remove safe_unescape_html when mako html safe comes in
'name': self._get_node_name(node),
'category': node.category,
'kind': FOLDER,
'permissions': {
'edit': node.can_edit(self.auth) and not node.is_registration,
'view': can_view,
},
'urls': {
'upload': None,
'fetch': None,
},
'children': children,
'isPointer': not node.primary,
'isSmartFolder': False,
'nodeType': node.project_or_component,
'nodeID': node.resolve()._id,
}
def _collect_addons(self, node):
rv = []
for addon in node.get_addons():
if addon.config.has_hgrid_files:
# WARNING: get_hgrid_data can return None if the addon is added but has no credentials.
try:
temp = addon.config.get_hgrid_data(addon, self.auth, **self.extra)
except Exception as e:
logger.warn(
getattr(
e,
'data',
'Unexpected error when fetching file contents for {0}.'.format(addon.config.full_name)
)
)
sentry.log_exception()
rv.append({
KIND: FOLDER,
'unavailable': True,
'iconUrl': addon.config.icon_url,
'provider': addon.config.short_name,
'addonFullname': addon.config.full_name,
'permissions': {'view': False, 'edit': False},
'name': '{} is currently unavailable'.format(addon.config.full_name),
})
continue
rv.extend(sort_by_name(temp) or [])
return rv
# TODO: these might belong in addons module
def collect_addon_assets(node):
"""Return a dictionary containing lists of JS and CSS assets for a node's
addons.
:rtype: {'tree_js': <list of JS scripts>, 'tree_css': <list of CSS files>}
"""
return {
'tree_js': list(collect_addon_js(node)),
'tree_css': list(collect_addon_css(node)),
}
# TODO: Abstract static collectors
def collect_addon_js(node, visited=None, filename='files.js', config_entry='files'):
"""Collect JavaScript includes for all add-ons implementing HGrid views.
:return list: List of JavaScript include paths
"""
js = []
for addon_config in settings.ADDONS_AVAILABLE_DICT.values():
# JS modules configured in each addon's __init__ file
js.extend(addon_config.include_js.get(config_entry, []))
# Webpack bundle
js_path = paths.resolve_addon_path(addon_config, filename)
if js_path:
js.append(js_path)
return js
def collect_addon_css(node, visited=None):
"""Collect CSS includes for all addons-ons implementing Hgrid views.
:return: List of CSS include paths
:rtype: list
"""
css = []
for addon_config in settings.ADDONS_AVAILABLE_DICT.values():
# CSS modules configured in each addon's __init__ file
css.extend(addon_config.include_css.get('files', []))
return css
def delta_date(d):
diff = d - datetime.datetime.utcnow()
s = diff.total_seconds()
return s
|
|
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""VM-related helper functions/classes."""
import logging
import os
import shutil
import time
from chromite.cbuildbot import constants
from chromite.lib import cros_build_lib
from chromite.lib import osutils
from chromite.lib import remote_access
class VMError(Exception):
"""A base exception for VM errors."""
class VMCreationError(VMError):
"""Raised when failed to create a VM image."""
def VMIsUpdatable(path):
"""Check if the existing VM image is updatable.
Args:
path: Path to the VM image.
Returns:
True if VM is updatable; False otherwise.
"""
table = cros_build_lib.GetImageDiskPartitionInfo(path, unit='MB')
# Assume if size of the two root partitions match, the image
# is updatable.
return table['ROOT-B'].size == table['ROOT-A'].size
def CreateVMImage(image=None, board=None, updatable=True, dest_dir=None):
"""Returns the path of the image built to run in a VM.
By default, the returned VM is a test image that can run full update
testing on it. If there exists a VM image with the matching
|updatable| setting, this method returns the path to the existing
image. If |dest_dir| is set, it will copy/create the VM image to the
|dest_dir|.
Args:
image: Path to the (non-VM) image. Defaults to None to use the latest
image for the board.
board: Board that the image was built with. If None, attempts to use the
configured default board.
updatable: Create a VM image that supports AU.
dest_dir: If set, create/copy the VM image to |dest|; otherwise,
use the folder where |image| resides.
"""
if not image and not board:
raise VMCreationError(
'Cannot create VM when both image and board are None.')
image_dir = os.path.dirname(image)
src_path = dest_path = os.path.join(image_dir, constants.VM_IMAGE_BIN)
if dest_dir:
dest_path = os.path.join(dest_dir, constants.VM_IMAGE_BIN)
exists = False
# Do not create a new VM image if a matching image already exists.
exists = os.path.exists(src_path) and (
not updatable or VMIsUpdatable(src_path))
if exists and dest_dir:
# Copy the existing VM image to dest_dir.
shutil.copyfile(src_path, dest_path)
if not exists:
# No existing VM image that we can reuse. Create a new VM image.
logging.info('Creating %s', dest_path)
cmd = ['./image_to_vm.sh', '--test_image']
if image:
cmd.append('--from=%s' % cros_build_lib.ToChrootPath(image_dir))
if updatable:
cmd.extend(['--disk_layout', '2gb-rootfs-updatable'])
if board:
cmd.extend(['--board', board])
# image_to_vm.sh only runs in chroot, but dest_dir may not be
# reachable from chroot. In that case, we copy it to a temporary
# directory in chroot, and then move it to dest_dir .
tempdir = None
if dest_dir:
# Create a temporary directory in chroot to store the VM
# image. This is to avoid the case where dest_dir is not
# reachable within chroot.
tempdir = cros_build_lib.RunCommand(
['mktemp', '-d'],
capture_output=True,
enter_chroot=True).output.strip()
cmd.append('--to=%s' % tempdir)
msg = 'Failed to create the VM image'
try:
cros_build_lib.RunCommand(cmd, enter_chroot=True,
cwd=constants.SOURCE_ROOT)
except cros_build_lib.RunCommandError as e:
logging.error('%s: %s', msg, e)
if tempdir:
osutils.RmDir(
cros_build_lib.FromChrootPath(tempdir), ignore_missing=True)
raise VMCreationError(msg)
if dest_dir:
# Move VM from tempdir to dest_dir.
shutil.move(
cros_build_lib.FromChrootPath(
os.path.join(tempdir, constants.VM_IMAGE_BIN)), dest_path)
osutils.RmDir(cros_build_lib.FromChrootPath(tempdir), ignore_missing=True)
if not os.path.exists(dest_path):
raise VMCreationError(msg)
return dest_path
class VMStartupError(VMError):
"""Raised when failed to start a VM instance."""
class VMStopError(VMError):
"""Raised when failed to stop a VM instance."""
class VMInstance(object):
"""This is a wrapper of a VM instance."""
MAX_LAUNCH_ATTEMPTS = 5
TIME_BETWEEN_LAUNCH_ATTEMPTS = 30
# VM needs a longer timeout.
SSH_CONNECT_TIMEOUT = 120
def __init__(self, image_path, port=None, tempdir=None,
debug_level=logging.DEBUG):
"""Initializes VMWrapper with a VM image path.
Args:
image_path: Path to the VM image.
port: SSH port of the VM.
tempdir: Temporary working directory.
debug_level: Debug level for logging.
"""
self.image_path = image_path
self.tempdir = tempdir
self._tempdir_obj = None
if not self.tempdir:
self._tempdir_obj = osutils.TempDir(prefix='vm_wrapper', sudo_rm=True)
self.tempdir = self._tempdir_obj.tempdir
self.kvm_pid_path = os.path.join(self.tempdir, 'kvm.pid')
self.port = (remote_access.GetUnusedPort() if port is None
else remote_access.NormalizePort(port))
self.debug_level = debug_level
self.ssh_settings = remote_access.CompileSSHConnectSettings(
ConnectTimeout=self.SSH_CONNECT_TIMEOUT)
self.agent = remote_access.RemoteAccess(
remote_access.LOCALHOST, self.tempdir, self.port,
debug_level=self.debug_level, interactive=False)
def _Start(self):
"""Run the command to start VM."""
cmd = [os.path.join(constants.CROSUTILS_DIR, 'bin', 'cros_start_vm'),
'--ssh_port', str(self.port),
'--image_path', self.image_path,
'--no_graphics',
'--kvm_pid', self.kvm_pid_path]
try:
self._RunCommand(cmd, capture_output=True)
except cros_build_lib.RunCommandError as e:
msg = 'VM failed to start'
logging.warning('%s: %s', msg, e)
raise VMStartupError(msg)
def Connect(self):
"""Returns True if we can connect to VM via SSH."""
try:
self.agent.RemoteSh(['true'], connect_settings=self.ssh_settings)
except Exception:
return False
return True
def Stop(self, ignore_error=False):
"""Stops a running VM.
Args:
ignore_error: If set True, do not raise an exception on error.
"""
cmd = [os.path.join(constants.CROSUTILS_DIR, 'bin', 'cros_stop_vm'),
'--kvm_pid', self.kvm_pid_path]
result = self._RunCommand(cmd, capture_output=True, error_code_ok=True)
if result.returncode:
msg = 'Failed to stop VM'
if ignore_error:
logging.warning('%s: %s', msg, result.error)
else:
logging.error('%s: %s', msg, result.error)
raise VMStopError(msg)
def Start(self):
"""Start VM and wait until we can ssh into it.
This command is more robust than just naively starting the VM as it will
try to start the VM multiple times if the VM fails to start up. This is
inspired by retry_until_ssh in crosutils/lib/cros_vm_lib.sh.
"""
for _ in range(self.MAX_LAUNCH_ATTEMPTS):
try:
self._Start()
except VMStartupError:
logging.warning('VM failed to start.')
continue
if self.Connect():
# VM is started up successfully if we can connect to it.
break
logging.warning('Cannot connect to VM...')
self.Stop(ignore_error=True)
time.sleep(self.TIME_BETWEEN_LAUNCH_ATTEMPTS)
else:
raise VMStartupError('Max attempts (%d) to start VM exceeded.'
% self.MAX_LAUNCH_ATTEMPTS)
logging.info('VM started at port %d', self.port)
def _RunCommand(self, *args, **kwargs):
"""Runs a commmand on the host machine."""
kwargs.setdefault('debug_level', self.debug_level)
return cros_build_lib.RunCommand(*args, **kwargs)
|
|
from __future__ import absolute_import, division, print_function
from itertools import cycle
from operator import itemgetter, add
from toolz import unique, groupby, accumulate, pluck
import bokeh.plotting as bp
from bokeh.io import _state
from bokeh.palettes import brewer
from bokeh.models import HoverTool, LinearAxis, Range1d
from ..utils import funcname
from ..core import istask
from ..compatibility import apply
def unquote(expr):
if istask(expr):
if expr[0] in (tuple, list, set):
return expr[0](map(unquote, expr[1]))
elif expr[0] == dict and expr[1][0] == list:
return dict(map(unquote, expr[1][1]))
return expr
def pprint_task(task, keys, label_size=60):
"""Return a nicely formatted string for a task.
Parameters
----------
task:
Value within dask graph to render as text
keys: iterable
List of keys within dask graph
label_size: int (optional)
Maximum size of output label, defaults to 60
Examples
--------
>>> from operator import add, mul
>>> dsk = {'a': 1,
... 'b': 2,
... 'c': (add, 'a', 'b'),
... 'd': (add, (mul, 'a', 'b'), 'c'),
... 'e': (sum, ['a', 'b', 5]),
... 'f': (add,),
... 'g': []}
>>> pprint_task(dsk['c'], dsk)
'add(_, _)'
>>> pprint_task(dsk['d'], dsk)
'add(mul(_, _), _)'
>>> pprint_task(dsk['e'], dsk)
'sum([_, _, *])'
>>> pprint_task(dsk['f'], dsk)
'add()'
>>> pprint_task(dsk['g'], dsk)
'[]'
"""
if istask(task):
func = task[0]
if func is apply:
head = funcname(task[1])
tail = ')'
args = unquote(task[2]) if len(task) > 2 else ()
kwargs = unquote(task[3]) if len(task) > 3 else {}
else:
if hasattr(func, 'funcs'):
head = '('.join(funcname(f) for f in func.funcs)
tail = ')'*len(func.funcs)
else:
head = funcname(task[0])
tail = ')'
args = task[1:]
kwargs = {}
if args or kwargs:
label_size2 = int((label_size - len(head) - len(tail)) //
(len(args) + len(kwargs)))
pprint = lambda t: pprint_task(t, keys, label_size2)
if args:
if label_size2 > 5:
args = ', '.join(pprint(t) for t in args)
else:
args = '...'
else:
args = ''
if kwargs:
if label_size2 > 5:
kwargs = ', ' + ', '.join('{0}={1}'.format(k, pprint(v))
for k, v in sorted(kwargs.items()))
else:
kwargs = ', ...'
else:
kwargs = ''
return '{0}({1}{2}{3}'.format(head, args, kwargs, tail)
elif isinstance(task, list):
if not task:
return '[]'
elif len(task) > 3:
result = pprint_task(task[:3], keys, label_size)
return result[:-1] + ', ...]'
else:
label_size2 = int((label_size - 2 - 2*len(task)) // len(task))
args = ', '.join(pprint_task(t, keys, label_size2) for t in task)
return '[{0}]'.format(args)
else:
try:
if task in keys:
return '_'
else:
return '*'
except TypeError:
return '*'
def get_colors(palette, funcs):
"""Get a dict mapping funcs to colors from palette.
Parameters
----------
palette : string
Name of the palette. Must be a key in bokeh.palettes.brewer
funcs : iterable
Iterable of function names
"""
unique_funcs = list(sorted(unique(funcs)))
n_funcs = len(unique_funcs)
palette_lookup = brewer[palette]
keys = list(palette_lookup.keys())
low, high = min(keys), max(keys)
if n_funcs > high:
colors = cycle(palette_lookup[high])
elif n_funcs < low:
colors = palette_lookup[low]
else:
colors = palette_lookup[n_funcs]
color_lookup = dict(zip(unique_funcs, colors))
return [color_lookup[n] for n in funcs]
def visualize(profilers, file_path=None, show=True, save=True, **kwargs):
"""Visualize the results of profiling in a bokeh plot.
If multiple profilers are passed in, the plots are stacked vertically.
Parameters
----------
profilers : profiler or list
Profiler or list of profilers.
file_path : string, optional
Name of the plot output file.
show : boolean, optional
If True (default), the plot is opened in a browser.
save : boolean, optional
If True (default), the plot is saved to disk.
**kwargs
Other keyword arguments, passed to bokeh.figure. These will override
all defaults set by visualize.
Returns
-------
The completed bokeh plot object.
"""
if not _state._notebook:
file_path = file_path or "profile.html"
bp.output_file(file_path)
if not isinstance(profilers, list):
profilers = [profilers]
figs = [prof._plot(**kwargs) for prof in profilers]
# Stack the plots
if len(figs) == 1:
p = figs[0]
else:
top = figs[0]
for f in figs[1:]:
f.x_range = top.x_range
f.title = None
f.min_border_top = 20
for f in figs[:1]:
f.xaxis.axis_label = None
f.min_border_bottom = 20
for f in figs:
f.min_border_left = 75
f.min_border_right = 75
p = bp.gridplot([[f] for f in figs])
if show:
bp.show(p)
if file_path and save:
bp.save(p)
return p
def plot_tasks(results, dsk, palette='GnBu', label_size=60, **kwargs):
"""Visualize the results of profiling in a bokeh plot.
Parameters
----------
results : sequence
Output of Profiler.results
dsk : dict
The dask graph being profiled.
palette : string, optional
Name of the bokeh palette to use, must be key in bokeh.palettes.brewer.
label_size: int (optional)
Maximum size of output labels in plot, defaults to 60
**kwargs
Other keyword arguments, passed to bokeh.figure. These will override
all defaults set by visualize.
Returns
-------
The completed bokeh plot object.
"""
defaults = dict(title="Profile Results",
tools="hover,save,reset,resize,xwheel_zoom,xpan",
plot_width=800, plot_height=300)
defaults.update((k, v) for (k, v) in kwargs.items() if k in
bp.Figure.properties())
if results:
keys, tasks, starts, ends, ids = zip(*results)
id_group = groupby(itemgetter(4), results)
timings = dict((k, [i.end_time - i.start_time for i in v]) for (k, v) in
id_group.items())
id_lk = dict((t[0], n) for (n, t) in enumerate(sorted(timings.items(),
key=itemgetter(1), reverse=True)))
left = min(starts)
right = max(ends)
p = bp.figure(y_range=[str(i) for i in range(len(id_lk))],
x_range=[0, right - left], **defaults)
data = {}
data['width'] = width = [e - s for (s, e) in zip(starts, ends)]
data['x'] = [w/2 + s - left for (w, s) in zip(width, starts)]
data['y'] = [id_lk[i] + 1 for i in ids]
data['function'] = funcs = [pprint_task(i, dsk, label_size) for i in tasks]
data['color'] = get_colors(palette, funcs)
data['key'] = [str(i) for i in keys]
source = bp.ColumnDataSource(data=data)
p.rect(source=source, x='x', y='y', height=1, width='width',
color='color', line_color='gray')
else:
p = bp.figure(y_range=[str(i) for i in range(8)], x_range=[0, 10],
**defaults)
p.grid.grid_line_color = None
p.axis.axis_line_color = None
p.axis.major_tick_line_color = None
p.yaxis.axis_label = "Worker ID"
p.xaxis.axis_label = "Time (s)"
hover = p.select(HoverTool)
hover.tooltips = """
<div>
<span style="font-size: 14px; font-weight: bold;">Key:</span>
<span style="font-size: 10px; font-family: Monaco, monospace;">@key</span>
</div>
<div>
<span style="font-size: 14px; font-weight: bold;">Task:</span>
<span style="font-size: 10px; font-family: Monaco, monospace;">@function</span>
</div>
"""
hover.point_policy = 'follow_mouse'
return p
def plot_resources(results, palette='GnBu', **kwargs):
"""Plot resource usage in a bokeh plot.
Parameters
----------
results : sequence
Output of ResourceProfiler.results
palette : string, optional
Name of the bokeh palette to use, must be key in bokeh.palettes.brewer.
**kwargs
Other keyword arguments, passed to bokeh.figure. These will override
all defaults set by plot_resources.
Returns
-------
The completed bokeh plot object.
"""
defaults = dict(title="Profile Results",
tools="save,reset,resize,xwheel_zoom,xpan",
plot_width=800, plot_height=300)
defaults.update((k, v) for (k, v) in kwargs.items() if k in
bp.Figure.properties())
if results:
t, mem, cpu = zip(*results)
left, right = min(t), max(t)
t = [i - left for i in t]
p = bp.figure(y_range=(0, max(cpu)), x_range=(0, right - left), **defaults)
else:
t = mem = cpu = []
p = bp.figure(y_range=(0, 100), x_range=(0, 10), **defaults)
colors = brewer[palette][6]
p.line(t, cpu, color=colors[0], line_width=4, legend='% CPU')
p.yaxis.axis_label = "% CPU"
p.extra_y_ranges = {'memory': Range1d(start=0, end=(max(mem) if mem else 100))}
p.line(t, mem, color=colors[2], y_range_name='memory', line_width=4,
legend='Memory')
p.add_layout(LinearAxis(y_range_name='memory', axis_label='Memory (MB)'),
'right')
p.xaxis.axis_label = "Time (s)"
return p
def plot_cache(results, dsk, start_time, metric_name, palette='GnBu',
label_size=60, **kwargs):
"""Visualize the results of profiling in a bokeh plot.
Parameters
----------
results : sequence
Output of CacheProfiler.results
dsk : dict
The dask graph being profiled.
start_time : float
Start time of the profile.
metric_name : string
Metric used to measure cache size
palette : string, optional
Name of the bokeh palette to use, must be key in bokeh.palettes.brewer.
label_size: int (optional)
Maximum size of output labels in plot, defaults to 60
**kwargs
Other keyword arguments, passed to bokeh.figure. These will override
all defaults set by visualize.
Returns
-------
The completed bokeh plot object.
"""
defaults = dict(title="Profile Results",
tools="hover,save,reset,resize,wheel_zoom,xpan",
plot_width=800, plot_height=300)
defaults.update((k, v) for (k, v) in kwargs.items() if k in
bp.Figure.properties())
if results:
starts, ends = list(zip(*results))[3:]
tics = list(sorted(unique(starts + ends)))
groups = groupby(lambda d: pprint_task(d[1], dsk, label_size), results)
data = {}
for k, vals in groups.items():
cnts = dict.fromkeys(tics, 0)
for v in vals:
cnts[v.cache_time] += v.metric
cnts[v.free_time] -= v.metric
data[k] = list(accumulate(add, pluck(1, sorted(cnts.items()))))
tics = [i - start_time for i in tics]
p = bp.figure(x_range=[0, max(tics)], **defaults)
for (key, val), color in zip(data.items(), get_colors(palette, data.keys())):
p.line('x', 'y', line_color=color, line_width=3,
source=bp.ColumnDataSource({'x': tics, 'y': val,
'label': [key for i in val]}))
else:
p = bp.figure(y_range=[0, 10], x_range=[0, 10], **defaults)
p.grid.grid_line_color = None
p.axis.axis_line_color = None
p.axis.major_tick_line_color = None
p.yaxis.axis_label = "Cache Size ({0})".format(metric_name)
p.xaxis.axis_label = "Time (s)"
hover = p.select(HoverTool)
hover.tooltips = """
<div>
<span style="font-size: 14px; font-weight: bold;">Task:</span>
<span style="font-size: 10px; font-family: Monaco, monospace;">@label</span>
</div>
"""
return p
|
|
# profile.py
from translationstring import TranslationString as _
from error import WeasylError
import macro as m
import define as d
import pytz
import orm
import shout
import welcome
from libweasyl.html import strip_html
from libweasyl.models import tables
from libweasyl import ratings
from libweasyl import staff
from weasyl.cache import region
from weasyl.configuration_builder import create_configuration, BoolOption, ConfigOption
from weasyl import media
class ExchangeType:
def __init__(self, name_singular, name_plural):
self.name_singular = name_singular
self.name_plural = name_plural
EXCHANGE_TYPE_TRADE = ExchangeType("trade", "trades")
EXCHANGE_TYPE_REQUEST = ExchangeType("request", "requests")
EXCHANGE_TYPE_COMMISSION = ExchangeType("commission", "commissions")
class ExchangeSetting:
def __init__(self, code, text):
self.code = code
self.text = text
def format(self, request_type):
return _(self.text.format(type=request_type))
EXCHANGE_SETTING_ACCEPTING = ExchangeSetting("o", "I am currently accepting {type.name_plural}")
EXCHANGE_SETTING_SOMETIMES = ExchangeSetting("s", "I may sometimes accept {type.name_plural}")
EXCHANGE_SETTING_FULL_QUEUE = ExchangeSetting("f", "My {type.name_singular} queue is currently filled")
EXCHANGE_SETTING_NOT_ACCEPTING = ExchangeSetting("c", "I am not accepting {type.name_plural} right now")
EXCHANGE_SETTING_NOT_APPLICABLE = ExchangeSetting("e", "This is not applicable to me")
ALL_EXCHANGE_SETTINGS = [EXCHANGE_SETTING_ACCEPTING, EXCHANGE_SETTING_SOMETIMES,
EXCHANGE_SETTING_FULL_QUEUE, EXCHANGE_SETTING_NOT_ACCEPTING,
EXCHANGE_SETTING_NOT_APPLICABLE]
EXCHANGE_SETTING_CODE_MAP = {setting.code: setting for setting in ALL_EXCHANGE_SETTINGS}
ALLOWABLE_EXCHANGE_CODES = {
EXCHANGE_TYPE_TRADE: 'osce',
EXCHANGE_TYPE_REQUEST: 'osce',
EXCHANGE_TYPE_COMMISSION: 'osfce',
}
Config = create_configuration([
BoolOption("twelvehour", "2"),
ConfigOption("rating", dict(zip(ratings.ALL_RATINGS, ["", "m", "a", "p"]))),
BoolOption("tagging", "k"),
BoolOption("edittagging", "r"),
BoolOption("hideprofile", "h"),
BoolOption("hidestats", "i"),
BoolOption("hidefavorites", "v"),
BoolOption("hidefavbar", "u"),
ConfigOption("shouts", {"anyone": "", "friends_only": "x", "staff_only": "w"}),
ConfigOption("notes", {"anyone": "", "friends_only": "z", "staff_only": "y"}),
BoolOption("filter", "l"),
BoolOption("follow_s", "s"),
BoolOption("follow_c", "c"),
BoolOption("follow_f", "f"),
BoolOption("follow_t", "t"),
BoolOption("follow_j", "j"),
])
def get_exchange_setting(exchange_type, code):
if code not in ALLOWABLE_EXCHANGE_CODES[exchange_type]:
return EXCHANGE_SETTING_NOT_ACCEPTING
return EXCHANGE_SETTING_CODE_MAP[code]
def resolve(userid, otherid, othername, myself=True):
"""
Attempts to determine the userid of a specified user; resolves using otherid,
othername, and userid (if myself is True), in that order. If no userid can be
resolved, returns 0 instead.
"""
result = None
if otherid:
result = d.execute("SELECT userid FROM login WHERE userid = %i", [d.get_int(otherid)], ["element"])
if result:
return result
elif othername:
result = d.execute("SELECT userid FROM login WHERE login_name = '%s'", [d.get_sysname(othername)], ["element"])
if result:
return result
result = d.execute("SELECT userid FROM useralias WHERE alias_name = '%s'", [d.get_sysname(othername)], ["element"])
if result:
return result
elif userid and myself:
return userid
return 0
@region.cache_on_arguments()
@d.record_timing
def resolve_by_login(login):
return resolve(None, None, login, False)
def select_profile(userid, avatar=False, banner=False, propic=False, images=False, commish=True, viewer=None):
query = d.execute("""
SELECT pr.username, pr.full_name, pr.catchphrase, pr.unixtime, pr.profile_text,
pr.settings, pr.stream_url, pr.config, pr.stream_text, lo.settings, us.end_time
FROM profile pr
INNER JOIN login lo USING (userid)
LEFT JOIN user_streams us USING (userid)
WHERE userid = %i
""", [userid], ["single"])
if not query:
raise WeasylError('RecordMissing')
streaming_status = "stopped"
if query[6]: # profile.stream_url
if query[10] > d.get_time(): # user_streams.end_time
streaming_status = "started"
elif 'l' in query[5]:
streaming_status = "later"
return {
"userid": userid,
"user_media": media.get_user_media(userid),
"username": query[0],
"full_name": query[1],
"catchphrase": query[2],
"unixtime": query[3],
"profile_text": query[4],
"settings": query[5],
"stream_url": query[6],
"stream_text": query[8],
"config": query[7],
"show_favorites_bar": "u" not in query[7] and "v" not in query[7],
"show_favorites_tab": userid == viewer or "v" not in query[7],
"commish_slots": 0,
"banned": "b" in query[9],
"suspended": "s" in query[9],
"streaming_status": streaming_status,
}
def twitter_card(userid):
username, full_name, catchphrase, profile_text, config, twitter = d.execute(
"SELECT pr.username, pr.full_name, pr.catchphrase, pr.profile_text, pr.config, ul.link_value "
"FROM profile pr "
"LEFT JOIN user_links ul ON pr.userid = ul.userid AND ul.link_type = 'twitter' "
"WHERE pr.userid = %i",
[userid],
["single"])
ret = {
'card': 'summary',
'url': d.absolutify_url('/~%s' % (username,)),
'title': '%s on Weasyl' % (full_name,),
}
if catchphrase:
description = '"%s"' % (catchphrase,)
elif profile_text:
description = strip_html(profile_text)
else:
description = "[%s has an empty profile, but is eggcelent!]" % (full_name,)
ret['description'] = d.summarize(description)
media_items = media.get_user_media(userid)
ret['image:src'] = d.absolutify_url(media_items['avatar'][0]['display_url'])
if twitter:
ret['creator'] = '@%s' % (twitter.lstrip('@'),)
return ret
def select_myself(userid):
if not userid:
return
query = d.execute("SELECT username, config FROM profile WHERE userid = %i", [userid], ["single"])
return {
"userid": userid,
"username": query[0],
"is_mod": userid in staff.MODS,
"user_media": media.get_user_media(userid),
}
def get_user_age(userid):
assert userid
return d.convert_age(d.execute("SELECT birthday FROM userinfo WHERE userid = %i", [userid], ["element"]))
def get_user_ratings(userid):
return ratings.get_ratings_for_age(get_user_age(userid))
def check_user_rating_allowed(userid, rating):
# TODO(kailys): ensure usages always pass a Rating
minimum_age = rating.minimum_age if isinstance(rating, ratings.Rating) else ratings.CODE_MAP[rating].minimum_age
if get_user_age(userid) < minimum_age:
raise WeasylError("ratingInvalid")
def select_userinfo(userid, config=None):
if config is None:
[query] = d.engine.execute("""
SELECT pr.config, ui.birthday, ui.gender, ui.country
FROM profile pr
INNER JOIN userinfo ui USING (userid)
WHERE pr.userid = %(userid)s
""", userid=userid)
else:
[query] = d.engine.execute("""
SELECT %(config)s, birthday, gender, country
FROM userinfo
WHERE userid = %(userid)s
""", userid=userid, config=config)
user_link_rows = d.engine.execute("""
SELECT link_type, ARRAY_AGG(link_value)
FROM user_links
WHERE userid = %(userid)s
GROUP BY link_type
""", userid=userid)
user_links = {r[0]: r[1] for r in user_link_rows}
show_age = "b" in query[0] or d.get_userid() in staff.MODS
return {
"birthday": query[1],
"age": d.convert_age(query[1]) if show_age else None,
"show_age": "b" in query[0],
"gender": query[2],
"country": query[3],
"user_links": user_links,
}
def select_report_stats(userid):
query_byreason = d.engine.execute("""
SELECT count(r.reportid), r.closure_reason FROM report r
JOIN reportcomment c ON r.reportid = c.reportid
WHERE c.userid = %(userid)s AND r.closed_at IS NOT null
GROUP BY r.closure_reason
UNION
SELECT count(r.reportid), r.closure_reason FROM report r
JOIN reportcomment c ON r.reportid = c.reportid
WHERE c.userid = %(userid)s AND r.closed_at IS null
GROUP BY r.closure_reason
""", userid=userid)
# create a dict of {'closure_reason' : 'count'}
# closure_reason will be None if report was not yet closed.
return {row[1].replace("-", " ").title() if row[1] is not None else "Open":
row[0] for row in query_byreason}
def select_relation(userid, otherid):
if not userid or userid == otherid:
return {
"follow": False,
"friend": False,
"ignore": False,
"friendreq": False,
"follower": False,
"is_self": userid == otherid,
}
query = d.engine.execute("""
SELECT
(SELECT EXISTS (SELECT 0 FROM watchuser WHERE (userid, otherid) = (%(user)s, %(other)s)) AS follow),
(SELECT EXISTS (SELECT 0 FROM frienduser WHERE userid IN (%(user)s, %(other)s) AND otherid IN (%(user)s, %(other)s) AND settings !~ 'p') AS friend),
(SELECT EXISTS (SELECT 0 FROM ignoreuser WHERE (userid, otherid) = (%(user)s, %(other)s)) AS ignore),
(SELECT EXISTS (SELECT 0 FROM frienduser WHERE (userid, otherid) = (%(user)s, %(other)s) AND settings ~ 'p') AS friendreq),
(SELECT EXISTS (SELECT 0 FROM watchuser WHERE (userid, otherid) = (%(other)s, %(user)s)) AS follower)
""", user=userid, other=otherid).first()
return dict(
query,
is_self=False)
@region.cache_on_arguments(expiration_time=600)
@d.record_timing
def _select_statistics(userid):
query = d.execute("""
SELECT
(SELECT page_views FROM profile WHERE userid = %i),
0,
(SELECT COUNT(*) FROM favorite WHERE userid = %i),
(SELECT
(SELECT COUNT(*) FROM favorite fa JOIN submission su ON fa.targetid = su.submitid
WHERE su.userid = %i AND fa.type = 's') +
(SELECT COUNT(*) FROM favorite fa JOIN character ch ON fa.targetid = ch.charid
WHERE ch.userid = %i AND fa.type = 'f') +
(SELECT COUNT(*) FROM favorite fa JOIN journal jo ON fa.targetid = jo.journalid
WHERE jo.userid = %i AND fa.type = 'j')),
(SELECT COUNT(*) FROM watchuser WHERE otherid = %i),
(SELECT COUNT(*) FROM watchuser WHERE userid = %i),
(SELECT COUNT(*) FROM submission WHERE userid = %i AND settings !~ 'h'),
(SELECT COUNT(*) FROM journal WHERE userid = %i AND settings !~ 'h'),
(SELECT COUNT(*) FROM comments WHERE target_user = %i AND settings !~ 'h' AND settings ~ 's')
""", [userid, userid, userid, userid, userid, userid, userid, userid, userid, userid], options="single")
return {
"page_views": query[0],
"submit_views": query[1],
"faves_sent": query[2],
"faves_received": query[3],
"followed": query[4],
"following": query[5],
"submissions": query[6],
"journals": query[7],
"staff_notes": query[8],
}
def select_statistics(userid):
if "i" in d.get_config(userid) and d.get_userid() not in staff.MODS:
return
return _select_statistics(userid)
def select_streaming(userid, rating, limit, following=True, order_by=None):
statement = [
"SELECT userid, pr.username, pr.stream_url, pr.config, pr.stream_text, start_time "
"FROM profile pr "
"JOIN user_streams USING (userid) "
"WHERE end_time > %i" % (d.get_time(),)
]
if userid:
statement.append(m.MACRO_IGNOREUSER % (userid, "pr"))
if following:
pass # todo
if order_by:
statement.append(" ORDER BY %s LIMIT %i" % (order_by, limit))
else:
statement.append(" ORDER BY RANDOM() LIMIT %i" % limit)
ret = [{
"userid": i[0],
"username": i[1],
"stream_url": i[2],
"stream_text": i[4],
"stream_time": i[5],
} for i in d.execute("".join(statement)) if i[2]]
media.populate_with_user_media(ret)
return ret
def select_avatars(userids):
if not userids:
return {}
results = d.execute(
"SELECT userid, username, config FROM profile pr WHERE userid IN %s" % (d.sql_number_list(userids),))
results = [
{
"username": username,
"userid": userid,
}
for userid, username, config in results]
media.populate_with_user_media(results)
return {d['userid']: d for d in results}
def edit_profile(userid, profile,
set_trade=EXCHANGE_SETTING_NOT_ACCEPTING,
set_request=EXCHANGE_SETTING_NOT_ACCEPTING,
set_commission=EXCHANGE_SETTING_NOT_ACCEPTING,
profile_display=''):
# Assign settings
settings = "".join([set_commission.code, set_trade.code, set_request.code])
if profile_display not in ('O', 'A'):
profile_display = ''
d.execute(
"UPDATE profile "
"SET (full_name, catchphrase, profile_text, settings) = ('%s', '%s', '%s', '%s'), "
"config = REGEXP_REPLACE(config, '[OA]', '') || '%s'"
"WHERE userid = %i",
[profile.full_name, profile.catchphrase, profile.profile_text, settings, profile_display, userid])
d._get_config.invalidate(userid)
STREAMING_ACTION_MAP = {
'': 'not streaming',
'later': 'streaming later',
'start': 'now streaming',
'still': 'still streaming',
}
def edit_streaming_settings(my_userid, userid, profile, set_stream=None, stream_length=0):
if set_stream == 'start':
try:
stream_length = int(stream_length)
except:
raise WeasylError("streamDurationOutOfRange")
if stream_length < 1 or stream_length > 360:
raise WeasylError("streamDurationOutOfRange")
if set_stream == 'start' and not profile.stream_url:
raise WeasylError("streamLocationNotSet")
# unless we're specifically still streaming, clear the user_streams record
if set_stream != 'still':
d.execute("DELETE FROM user_streams WHERE userid = %i", [userid])
settings_flag = ''
stream_status = None
# if we're starting to stream, update user_streams to reflect that
if set_stream == 'start':
now = d.get_time()
stream_end = now + stream_length * 60 # stream_length is minutes; we need seconds
d.execute("INSERT INTO user_streams VALUES (%i, %i, %i)", [userid, now, stream_end])
stream_status = 'n'
# if we're going to stream later, update profile.settings to reflect that
elif set_stream == 'later':
settings_flag = stream_status = 'l'
# if stream_status is None, any rows in `welcome` will get cleared. but, if
# the user is still streaming, that shouldn't happen. otherwise, `welcome`
# will get updated with the current stream state.
if set_stream != 'still':
welcome.stream_insert(userid, stream_status)
d.execute(
"UPDATE profile "
"SET (stream_text, stream_url, settings) = ('%s', '%s', REGEXP_REPLACE(settings, '[nli]', '') || '%s') "
"WHERE userid = %i",
[profile.stream_text, profile.stream_url, settings_flag, userid])
if my_userid != userid:
from weasyl import moderation
note_body = (
'- Stream url: %s\n'
'- Stream description: %s\n'
'- Stream status: %s' % (profile.stream_url, profile.stream_text, STREAMING_ACTION_MAP[set_stream]))
moderation.note_about(my_userid, userid, 'Streaming settings updated:', note_body)
# form
# show_age
# gender
# country
# (...)
def edit_userinfo(userid, form):
social_rows = []
for site_name, site_value in zip(form.site_names, form.site_values):
if not site_name or not site_value:
continue
row = {
'userid': userid,
'link_type': site_name,
'link_value': site_value,
}
row['userid'] = userid
social_rows.append(row)
d.engine.execute("""
UPDATE userinfo
SET gender = %(gender)s, country = %(country)s
WHERE userid = %(userid)s
""", userid=userid, gender=form.gender.strip(), country=form.country.strip())
d.engine.execute("""
DELETE FROM user_links
WHERE userid = %(userid)s
""", userid=userid)
if social_rows:
d.engine.execute(d.meta.tables['user_links'].insert().values(social_rows))
if form.show_age:
d.engine.execute("""
UPDATE profile
SET config = config || 'b'
WHERE userid = %(userid)s
AND config !~ 'b'
""", userid=userid)
else:
d.engine.execute("""
UPDATE profile
SET config = REPLACE(config, 'b', '')
WHERE userid = %(userid)s
""", userid=userid)
d._get_config.invalidate(userid)
def edit_email_password(userid, username, password, newemail, newemailcheck,
newpassword, newpasscheck):
import login
# Check that credentials are correct
logid, logerror = login.authenticate_bcrypt(username, password, session=False)
if userid != logid or logerror is not None:
raise WeasylError("loginInvalid")
if newemail:
if newemail != newemailcheck:
raise WeasylError("emailMismatch")
elif login.email_exists(newemail):
raise WeasylError("emailExists")
if newpassword:
if newpassword != newpasscheck:
raise WeasylError("passwordMismatch")
elif not login.password_secure(newpassword):
raise WeasylError("passwordInsecure")
if newemail:
d.execute("UPDATE login SET email = '%s' WHERE userid = %i", [newemail, userid])
if newpassword:
d.execute("UPDATE authbcrypt SET hashsum = '%s' WHERE userid = %i", [login.passhash(newpassword), userid])
def edit_preferences(userid, timezone=None,
preferences=None, jsonb_settings=None):
"""
Apply changes to stored preferences for a given user.
:param userid: The userid to apply changes to
:param timezone: (optional) new Timezone to set for user
:param preferences: (optional) old-style char preferences, overwrites all previous settings
:param jsonb_settings: (optional) JSON preferences, overwrites all previous settings
:return: None
"""
config = d.get_config(userid)
tooyoung = False
if preferences is not None:
tooyoung |= get_user_age(userid) < preferences.rating.minimum_age
if jsonb_settings is not None:
sfwrating = jsonb_settings.max_sfw_rating
sfwrating = ratings.CODE_MAP.get(sfwrating, ratings.GENERAL)
tooyoung |= get_user_age(userid) < sfwrating.minimum_age
if tooyoung:
raise WeasylError("birthdayInsufficient")
if timezone is not None and timezone not in pytz.all_timezones:
raise WeasylError('invalidTimezone')
db = d.connect()
updates = {}
if preferences is not None:
# update legacy preferences
# clear out the option codes that are being replaced.
for i in Config.all_option_codes:
config = config.replace(i, "")
config_str = config + preferences.to_code()
updates['config'] = config_str
d._get_config.invalidate(userid)
if jsonb_settings is not None:
# update jsonb preferences
updates['jsonb_settings'] = jsonb_settings.get_raw()
d._get_profile_settings.invalidate(userid)
d.engine.execute(
tables.profile.update().where(tables.profile.c.userid == userid),
updates
)
# update TZ
if timezone is not None:
tz = db.query(orm.UserTimezone).get(userid)
if tz is None:
tz = orm.UserTimezone(userid=userid)
db.add(tz)
tz.timezone = timezone
db.flush()
tz.cache()
else:
db.flush()
def select_manage(userid):
query = d.execute("""
SELECT
lo.userid, lo.last_login, lo.email, pr.unixtime, pr.username, pr.full_name, pr.catchphrase, ui.birthday,
ui.gender, ui.country, pr.config
FROM login lo
INNER JOIN profile pr USING (userid)
INNER JOIN userinfo ui USING (userid)
WHERE lo.userid = %i
""", [userid], ["single"])
if not query:
raise WeasylError("Unexpected")
return {
"userid": query[0],
"last_login": query[1],
"email": query[2],
"unixtime": query[3],
"username": query[4],
"full_name": query[5],
"catchphrase": query[6],
"birthday": query[7],
"gender": query[8],
"country": query[9],
"config": query[10],
"staff_notes": shout.count(userid, staffnotes=True),
}
def do_manage(my_userid, userid, username=None, full_name=None, catchphrase=None,
birthday=None, gender=None, country=None):
updates = []
# Username
if username is not None:
if not d.get_sysname(username):
raise WeasylError("usernameInvalid")
elif d.execute("SELECT EXISTS (SELECT 0 FROM login WHERE login_name = '%s')",
[d.get_sysname(username)], ["bool"]):
raise WeasylError("usernameExists")
elif d.execute("SELECT EXISTS (SELECT 0 FROM useralias WHERE alias_name = '%s')",
[d.get_sysname(username)], ["bool"]):
raise WeasylError("usernameExists")
elif d.execute("SELECT EXISTS (SELECT 0 FROM logincreate WHERE login_name = '%s')",
[d.get_sysname(username)], ["bool"]):
raise WeasylError("usernameExists")
d.execute("UPDATE login SET login_name = '%s' WHERE userid = %i",
[d.get_sysname(username), userid])
d._get_display_name.invalidate(userid)
d.execute("UPDATE profile SET username = '%s' WHERE userid = %i",
[username, userid])
updates.append('- Username: %s' % (username,))
# Full name
if full_name is not None:
d.execute("UPDATE profile SET full_name = '%s' WHERE userid = %i",
[full_name, userid])
updates.append('- Full name: %s' % (full_name,))
# Catchphrase
if catchphrase is not None:
d.execute("UPDATE profile SET catchphrase = '%s' WHERE userid = %i",
[catchphrase, userid])
updates.append('- Catchphrase: %s' % (catchphrase,))
# Birthday
if birthday is not None and d.convert_inputdate(birthday):
unixtime = d.convert_inputdate(birthday)
age = d.convert_age(unixtime)
d.execute("UPDATE userinfo SET birthday = %i WHERE userid = %i", [unixtime, userid])
if age < ratings.MODERATE.minimum_age:
max_rating = ratings.GENERAL.code
rating_flag = ""
elif age < ratings.EXPLICIT.minimum_age:
max_rating = ratings.MODERATE.code
rating_flag = "m"
else:
max_rating = ratings.EXPLICIT.code
if d.get_rating(userid) > max_rating:
d.execute(
"""
UPDATE profile
SET config = REGEXP_REPLACE(config, '[map]', '', 'g') || '%s'
WHERE userid = %i
""",
[rating_flag, userid]
)
d._get_config.invalidate(userid)
updates.append('- Birthday: %s' % (birthday,))
# Gender
if gender is not None:
d.execute("UPDATE userinfo SET gender = '%s' WHERE userid = %i",
[gender, userid])
updates.append('- Gender: %s' % (gender,))
# Location
if country is not None:
d.execute("UPDATE userinfo SET country = '%s' WHERE userid = %i",
[country, userid])
updates.append('- Country: %s' % (country,))
if updates:
from weasyl import moderation
moderation.note_about(my_userid, userid, 'The following fields were changed:', '\n'.join(updates))
def force_resetbirthday(userid, birthday):
if not birthday:
raise WeasylError("birthdayInvalid")
elif birthday > d.get_time():
raise WeasylError("birthdayInvalid")
d.execute("UPDATE userinfo SET birthday = %i WHERE userid = %i", [birthday, userid])
d.execute("UPDATE login SET settings = REPLACE(settings, 'i', '') WHERE userid = %i", [userid])
d.get_login_settings.invalidate(userid)
class ProfileSettings(object):
"""
This class standardizes access to jsonb profile settings,
to ensure consistent use of naming conventions
and defaults. This class will intentionally throw
exceptions if you try to access a setting that has
not been properly defined!
"""
class Setting:
def __init__(self, default, typecast):
self.default = default
self.typecast = typecast
def _valid_rating(rating):
rating = int(rating)
return rating if rating in ratings.CODE_MAP else ratings.GENERAL.code
_raw_settings = {}
_settings = {
"allow_collection_requests": Setting(True, bool),
"allow_collection_notifs": Setting(True, bool),
"disable_custom_thumbs": Setting(False, bool),
"max_sfw_rating": Setting(ratings.GENERAL.code, _valid_rating),
}
def __init__(self, json):
self._raw_settings = json
def __getattr__(self, name):
setting_config = self._settings[name]
return self._raw_settings.get(name, setting_config.default)
def __setattr__(self, name, value):
if name.startswith("_"):
super(ProfileSettings, self).__setattr__(name, value)
else:
setting_config = self._settings[name]
if setting_config.typecast is not None:
value = setting_config.typecast(value)
self._raw_settings[name] = value
def get_raw(self):
return self._raw_settings
|
|
#!/usr/bin/env python3
# Copyright (c) 2018-2021 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the Partially Signed Transaction RPCs.
"""
from decimal import Decimal
from itertools import product
from test_framework.descriptors import descsum_create
from test_framework.key import ECKey
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_approx,
assert_equal,
assert_greater_than,
assert_raises_rpc_error,
find_output,
)
from test_framework.wallet_util import bytes_to_wif
import json
import os
MAX_BIP125_RBF_SEQUENCE = 0xfffffffd
# Create one-input, one-output, no-fee transaction:
class PSBTTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 3
self.extra_args = [
["-walletrbf=1", "-addresstype=bech32", "-changetype=bech32"], #TODO: Remove address type restrictions once taproot has psbt extensions
["-walletrbf=0", "-changetype=legacy"],
[]
]
self.supports_cli = False
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
# TODO: Re-enable this test with segwit v1
def test_utxo_conversion(self):
mining_node = self.nodes[2]
offline_node = self.nodes[0]
online_node = self.nodes[1]
# Disconnect offline node from others
# Topology of test network is linear, so this one call is enough
self.disconnect_nodes(0, 1)
# Create watchonly on online_node
online_node.createwallet(wallet_name='wonline', disable_private_keys=True)
wonline = online_node.get_wallet_rpc('wonline')
w2 = online_node.get_wallet_rpc('')
# Mine a transaction that credits the offline address
offline_addr = offline_node.getnewaddress(address_type="p2sh-segwit")
online_addr = w2.getnewaddress(address_type="p2sh-segwit")
wonline.importaddress(offline_addr, "", False)
mining_node.sendtoaddress(address=offline_addr, amount=1.0)
self.generate(mining_node, nblocks=1)
# Construct an unsigned PSBT on the online node (who doesn't know the output is Segwit, so will include a non-witness UTXO)
utxos = wonline.listunspent(addresses=[offline_addr])
raw = wonline.createrawtransaction([{"txid":utxos[0]["txid"], "vout":utxos[0]["vout"]}],[{online_addr:0.9999}])
psbt = wonline.walletprocesspsbt(online_node.converttopsbt(raw))["psbt"]
assert "non_witness_utxo" in mining_node.decodepsbt(psbt)["inputs"][0]
# Have the offline node sign the PSBT (which will update the UTXO to segwit)
signed_psbt = offline_node.walletprocesspsbt(psbt)["psbt"]
assert "witness_utxo" in mining_node.decodepsbt(signed_psbt)["inputs"][0]
# Make sure we can mine the resulting transaction
txid = mining_node.sendrawtransaction(mining_node.finalizepsbt(signed_psbt)["hex"])
self.generate(mining_node, 1)
assert_equal(online_node.gettxout(txid,0)["confirmations"], 1)
wonline.unloadwallet()
# Reconnect
self.connect_nodes(0, 1)
self.connect_nodes(0, 2)
def assert_change_type(self, psbtx, expected_type):
"""Assert that the given PSBT has a change output with the given type."""
# The decodepsbt RPC is stateless and independent of any settings, we can always just call it on the first node
decoded_psbt = self.nodes[0].decodepsbt(psbtx["psbt"])
changepos = psbtx["changepos"]
assert_equal(decoded_psbt["tx"]["vout"][changepos]["scriptPubKey"]["type"], expected_type)
def run_test(self):
# Create and fund a raw tx for sending 10 BTC
psbtx1 = self.nodes[0].walletcreatefundedpsbt([], {self.nodes[2].getnewaddress():10})['psbt']
# If inputs are specified, do not automatically add more:
utxo1 = self.nodes[0].listunspent()[0]
assert_raises_rpc_error(-4, "Insufficient funds", self.nodes[0].walletcreatefundedpsbt, [{"txid": utxo1['txid'], "vout": utxo1['vout']}], {self.nodes[2].getnewaddress():90})
psbtx1 = self.nodes[0].walletcreatefundedpsbt([{"txid": utxo1['txid'], "vout": utxo1['vout']}], {self.nodes[2].getnewaddress():90}, 0, {"add_inputs": True})['psbt']
assert_equal(len(self.nodes[0].decodepsbt(psbtx1)['tx']['vin']), 2)
# Inputs argument can be null
self.nodes[0].walletcreatefundedpsbt(None, {self.nodes[2].getnewaddress():10})
# Node 1 should not be able to add anything to it but still return the psbtx same as before
psbtx = self.nodes[1].walletprocesspsbt(psbtx1)['psbt']
assert_equal(psbtx1, psbtx)
# Node 0 should not be able to sign the transaction with the wallet is locked
self.nodes[0].encryptwallet("password")
assert_raises_rpc_error(-13, "Please enter the wallet passphrase with walletpassphrase first", self.nodes[0].walletprocesspsbt, psbtx)
# Node 0 should be able to process without signing though
unsigned_tx = self.nodes[0].walletprocesspsbt(psbtx, False)
assert_equal(unsigned_tx['complete'], False)
self.nodes[0].walletpassphrase(passphrase="password", timeout=1000000)
# Sign the transaction and send
signed_tx = self.nodes[0].walletprocesspsbt(psbt=psbtx, finalize=False)['psbt']
finalized_tx = self.nodes[0].walletprocesspsbt(psbt=psbtx, finalize=True)['psbt']
assert signed_tx != finalized_tx
final_tx = self.nodes[0].finalizepsbt(signed_tx)['hex']
self.nodes[0].sendrawtransaction(final_tx)
# Manually selected inputs can be locked:
assert_equal(len(self.nodes[0].listlockunspent()), 0)
utxo1 = self.nodes[0].listunspent()[0]
psbtx1 = self.nodes[0].walletcreatefundedpsbt([{"txid": utxo1['txid'], "vout": utxo1['vout']}], {self.nodes[2].getnewaddress():1}, 0,{"lockUnspents": True})["psbt"]
assert_equal(len(self.nodes[0].listlockunspent()), 1)
# Locks are ignored for manually selected inputs
self.nodes[0].walletcreatefundedpsbt([{"txid": utxo1['txid'], "vout": utxo1['vout']}], {self.nodes[2].getnewaddress():1}, 0)
# Create p2sh, p2wpkh, and p2wsh addresses
pubkey0 = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress())['pubkey']
pubkey1 = self.nodes[1].getaddressinfo(self.nodes[1].getnewaddress())['pubkey']
pubkey2 = self.nodes[2].getaddressinfo(self.nodes[2].getnewaddress())['pubkey']
# Setup watchonly wallets
self.nodes[2].createwallet(wallet_name='wmulti', disable_private_keys=True)
wmulti = self.nodes[2].get_wallet_rpc('wmulti')
# Create all the addresses
p2sh = wmulti.addmultisigaddress(2, [pubkey0, pubkey1, pubkey2], "", "legacy")['address']
p2wsh = wmulti.addmultisigaddress(2, [pubkey0, pubkey1, pubkey2], "", "bech32")['address']
p2sh_p2wsh = wmulti.addmultisigaddress(2, [pubkey0, pubkey1, pubkey2], "", "p2sh-segwit")['address']
if not self.options.descriptors:
wmulti.importaddress(p2sh)
wmulti.importaddress(p2wsh)
wmulti.importaddress(p2sh_p2wsh)
p2wpkh = self.nodes[1].getnewaddress("", "bech32")
p2pkh = self.nodes[1].getnewaddress("", "legacy")
p2sh_p2wpkh = self.nodes[1].getnewaddress("", "p2sh-segwit")
# fund those addresses
rawtx = self.nodes[0].createrawtransaction([], {p2sh:10, p2wsh:10, p2wpkh:10, p2sh_p2wsh:10, p2sh_p2wpkh:10, p2pkh:10})
rawtx = self.nodes[0].fundrawtransaction(rawtx, {"changePosition":3})
signed_tx = self.nodes[0].signrawtransactionwithwallet(rawtx['hex'])['hex']
txid = self.nodes[0].sendrawtransaction(signed_tx)
self.generate(self.nodes[0], 6)
# Find the output pos
p2sh_pos = -1
p2wsh_pos = -1
p2wpkh_pos = -1
p2pkh_pos = -1
p2sh_p2wsh_pos = -1
p2sh_p2wpkh_pos = -1
decoded = self.nodes[0].decoderawtransaction(signed_tx)
for out in decoded['vout']:
if out['scriptPubKey']['address'] == p2sh:
p2sh_pos = out['n']
elif out['scriptPubKey']['address'] == p2wsh:
p2wsh_pos = out['n']
elif out['scriptPubKey']['address'] == p2wpkh:
p2wpkh_pos = out['n']
elif out['scriptPubKey']['address'] == p2sh_p2wsh:
p2sh_p2wsh_pos = out['n']
elif out['scriptPubKey']['address'] == p2sh_p2wpkh:
p2sh_p2wpkh_pos = out['n']
elif out['scriptPubKey']['address'] == p2pkh:
p2pkh_pos = out['n']
inputs = [{"txid": txid, "vout": p2wpkh_pos}, {"txid": txid, "vout": p2sh_p2wpkh_pos}, {"txid": txid, "vout": p2pkh_pos}]
outputs = [{self.nodes[1].getnewaddress(): 29.99}]
# spend single key from node 1
created_psbt = self.nodes[1].walletcreatefundedpsbt(inputs, outputs)
walletprocesspsbt_out = self.nodes[1].walletprocesspsbt(created_psbt['psbt'])
# Make sure it has both types of UTXOs
decoded = self.nodes[1].decodepsbt(walletprocesspsbt_out['psbt'])
assert 'non_witness_utxo' in decoded['inputs'][0]
assert 'witness_utxo' in decoded['inputs'][0]
# Check decodepsbt fee calculation (input values shall only be counted once per UTXO)
assert_equal(decoded['fee'], created_psbt['fee'])
assert_equal(walletprocesspsbt_out['complete'], True)
self.nodes[1].sendrawtransaction(self.nodes[1].finalizepsbt(walletprocesspsbt_out['psbt'])['hex'])
self.log.info("Test walletcreatefundedpsbt fee rate of 10000 sat/vB and 0.1 BTC/kvB produces a total fee at or slightly below -maxtxfee (~0.05290000)")
res1 = self.nodes[1].walletcreatefundedpsbt(inputs, outputs, 0, {"fee_rate": 10000, "add_inputs": True})
assert_approx(res1["fee"], 0.055, 0.005)
res2 = self.nodes[1].walletcreatefundedpsbt(inputs, outputs, 0, {"feeRate": "0.1", "add_inputs": True})
assert_approx(res2["fee"], 0.055, 0.005)
self.log.info("Test min fee rate checks with walletcreatefundedpsbt are bypassed, e.g. a fee_rate under 1 sat/vB is allowed")
res3 = self.nodes[1].walletcreatefundedpsbt(inputs, outputs, 0, {"fee_rate": "0.999", "add_inputs": True})
assert_approx(res3["fee"], 0.00000381, 0.0000001)
res4 = self.nodes[1].walletcreatefundedpsbt(inputs, outputs, 0, {"feeRate": 0.00000999, "add_inputs": True})
assert_approx(res4["fee"], 0.00000381, 0.0000001)
self.log.info("Test min fee rate checks with walletcreatefundedpsbt are bypassed and that funding non-standard 'zero-fee' transactions is valid")
for param, zero_value in product(["fee_rate", "feeRate"], [0, 0.000, 0.00000000, "0", "0.000", "0.00000000"]):
assert_equal(0, self.nodes[1].walletcreatefundedpsbt(inputs, outputs, 0, {param: zero_value, "add_inputs": True})["fee"])
self.log.info("Test invalid fee rate settings")
for param, value in {("fee_rate", 100000), ("feeRate", 1)}:
assert_raises_rpc_error(-4, "Fee exceeds maximum configured by user (e.g. -maxtxfee, maxfeerate)",
self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {param: value, "add_inputs": True})
assert_raises_rpc_error(-3, "Amount out of range",
self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {param: -1, "add_inputs": True})
assert_raises_rpc_error(-3, "Amount is not a number or string",
self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {param: {"foo": "bar"}, "add_inputs": True})
# Test fee rate values that don't pass fixed-point parsing checks.
for invalid_value in ["", 0.000000001, 1e-09, 1.111111111, 1111111111111111, "31.999999999999999999999"]:
assert_raises_rpc_error(-3, "Invalid amount",
self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {param: invalid_value, "add_inputs": True})
# Test fee_rate values that cannot be represented in sat/vB.
for invalid_value in [0.0001, 0.00000001, 0.00099999, 31.99999999, "0.0001", "0.00000001", "0.00099999", "31.99999999"]:
assert_raises_rpc_error(-3, "Invalid amount",
self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {"fee_rate": invalid_value, "add_inputs": True})
self.log.info("- raises RPC error if both feeRate and fee_rate are passed")
assert_raises_rpc_error(-8, "Cannot specify both fee_rate (sat/vB) and feeRate (BTC/kvB)",
self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {"fee_rate": 0.1, "feeRate": 0.1, "add_inputs": True})
self.log.info("- raises RPC error if both feeRate and estimate_mode passed")
assert_raises_rpc_error(-8, "Cannot specify both estimate_mode and feeRate",
self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {"estimate_mode": "economical", "feeRate": 0.1, "add_inputs": True})
for param in ["feeRate", "fee_rate"]:
self.log.info("- raises RPC error if both {} and conf_target are passed".format(param))
assert_raises_rpc_error(-8, "Cannot specify both conf_target and {}. Please provide either a confirmation "
"target in blocks for automatic fee estimation, or an explicit fee rate.".format(param),
self.nodes[1].walletcreatefundedpsbt ,inputs, outputs, 0, {param: 1, "conf_target": 1, "add_inputs": True})
self.log.info("- raises RPC error if both fee_rate and estimate_mode are passed")
assert_raises_rpc_error(-8, "Cannot specify both estimate_mode and fee_rate",
self.nodes[1].walletcreatefundedpsbt ,inputs, outputs, 0, {"fee_rate": 1, "estimate_mode": "economical", "add_inputs": True})
self.log.info("- raises RPC error with invalid estimate_mode settings")
for k, v in {"number": 42, "object": {"foo": "bar"}}.items():
assert_raises_rpc_error(-3, "Expected type string for estimate_mode, got {}".format(k),
self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {"estimate_mode": v, "conf_target": 0.1, "add_inputs": True})
for mode in ["", "foo", Decimal("3.141592")]:
assert_raises_rpc_error(-8, 'Invalid estimate_mode parameter, must be one of: "unset", "economical", "conservative"',
self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {"estimate_mode": mode, "conf_target": 0.1, "add_inputs": True})
self.log.info("- raises RPC error with invalid conf_target settings")
for mode in ["unset", "economical", "conservative"]:
self.log.debug("{}".format(mode))
for k, v in {"string": "", "object": {"foo": "bar"}}.items():
assert_raises_rpc_error(-3, "Expected type number for conf_target, got {}".format(k),
self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {"estimate_mode": mode, "conf_target": v, "add_inputs": True})
for n in [-1, 0, 1009]:
assert_raises_rpc_error(-8, "Invalid conf_target, must be between 1 and 1008", # max value of 1008 per src/policy/fees.h
self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {"estimate_mode": mode, "conf_target": n, "add_inputs": True})
self.log.info("Test walletcreatefundedpsbt with too-high fee rate produces total fee well above -maxtxfee and raises RPC error")
# previously this was silently capped at -maxtxfee
for bool_add, outputs_array in {True: outputs, False: [{self.nodes[1].getnewaddress(): 1}]}.items():
msg = "Fee exceeds maximum configured by user (e.g. -maxtxfee, maxfeerate)"
assert_raises_rpc_error(-4, msg, self.nodes[1].walletcreatefundedpsbt, inputs, outputs_array, 0, {"fee_rate": 1000000, "add_inputs": bool_add})
assert_raises_rpc_error(-4, msg, self.nodes[1].walletcreatefundedpsbt, inputs, outputs_array, 0, {"feeRate": 1, "add_inputs": bool_add})
self.log.info("Test various PSBT operations")
# partially sign multisig things with node 1
psbtx = wmulti.walletcreatefundedpsbt(inputs=[{"txid":txid,"vout":p2wsh_pos},{"txid":txid,"vout":p2sh_pos},{"txid":txid,"vout":p2sh_p2wsh_pos}], outputs={self.nodes[1].getnewaddress():29.99}, options={'changeAddress': self.nodes[1].getrawchangeaddress()})['psbt']
walletprocesspsbt_out = self.nodes[1].walletprocesspsbt(psbtx)
psbtx = walletprocesspsbt_out['psbt']
assert_equal(walletprocesspsbt_out['complete'], False)
# Unload wmulti, we don't need it anymore
wmulti.unloadwallet()
# partially sign with node 2. This should be complete and sendable
walletprocesspsbt_out = self.nodes[2].walletprocesspsbt(psbtx)
assert_equal(walletprocesspsbt_out['complete'], True)
self.nodes[2].sendrawtransaction(self.nodes[2].finalizepsbt(walletprocesspsbt_out['psbt'])['hex'])
# check that walletprocesspsbt fails to decode a non-psbt
rawtx = self.nodes[1].createrawtransaction([{"txid":txid,"vout":p2wpkh_pos}], {self.nodes[1].getnewaddress():9.99})
assert_raises_rpc_error(-22, "TX decode failed", self.nodes[1].walletprocesspsbt, rawtx)
# Convert a non-psbt to psbt and make sure we can decode it
rawtx = self.nodes[0].createrawtransaction([], {self.nodes[1].getnewaddress():10})
rawtx = self.nodes[0].fundrawtransaction(rawtx)
new_psbt = self.nodes[0].converttopsbt(rawtx['hex'])
self.nodes[0].decodepsbt(new_psbt)
# Make sure that a non-psbt with signatures cannot be converted
# Error could be either "TX decode failed" (segwit inputs causes parsing to fail) or "Inputs must not have scriptSigs and scriptWitnesses"
# We must set iswitness=True because the serialized transaction has inputs and is therefore a witness transaction
signedtx = self.nodes[0].signrawtransactionwithwallet(rawtx['hex'])
assert_raises_rpc_error(-22, "", self.nodes[0].converttopsbt, hexstring=signedtx['hex'], iswitness=True)
assert_raises_rpc_error(-22, "", self.nodes[0].converttopsbt, hexstring=signedtx['hex'], permitsigdata=False, iswitness=True)
# Unless we allow it to convert and strip signatures
self.nodes[0].converttopsbt(signedtx['hex'], True)
# Explicitly allow converting non-empty txs
new_psbt = self.nodes[0].converttopsbt(rawtx['hex'])
self.nodes[0].decodepsbt(new_psbt)
# Create outputs to nodes 1 and 2
node1_addr = self.nodes[1].getnewaddress()
node2_addr = self.nodes[2].getnewaddress()
txid1 = self.nodes[0].sendtoaddress(node1_addr, 13)
txid2 = self.nodes[0].sendtoaddress(node2_addr, 13)
blockhash = self.generate(self.nodes[0], 6)[0]
vout1 = find_output(self.nodes[1], txid1, 13, blockhash=blockhash)
vout2 = find_output(self.nodes[2], txid2, 13, blockhash=blockhash)
# Create a psbt spending outputs from nodes 1 and 2
psbt_orig = self.nodes[0].createpsbt([{"txid":txid1, "vout":vout1}, {"txid":txid2, "vout":vout2}], {self.nodes[0].getnewaddress():25.999})
# Update psbts, should only have data for one input and not the other
psbt1 = self.nodes[1].walletprocesspsbt(psbt_orig, False, "ALL")['psbt']
psbt1_decoded = self.nodes[0].decodepsbt(psbt1)
assert psbt1_decoded['inputs'][0] and not psbt1_decoded['inputs'][1]
# Check that BIP32 path was added
assert "bip32_derivs" in psbt1_decoded['inputs'][0]
psbt2 = self.nodes[2].walletprocesspsbt(psbt_orig, False, "ALL", False)['psbt']
psbt2_decoded = self.nodes[0].decodepsbt(psbt2)
assert not psbt2_decoded['inputs'][0] and psbt2_decoded['inputs'][1]
# Check that BIP32 paths were not added
assert "bip32_derivs" not in psbt2_decoded['inputs'][1]
# Sign PSBTs (workaround issue #18039)
psbt1 = self.nodes[1].walletprocesspsbt(psbt_orig)['psbt']
psbt2 = self.nodes[2].walletprocesspsbt(psbt_orig)['psbt']
# Combine, finalize, and send the psbts
combined = self.nodes[0].combinepsbt([psbt1, psbt2])
finalized = self.nodes[0].finalizepsbt(combined)['hex']
self.nodes[0].sendrawtransaction(finalized)
self.generate(self.nodes[0], 6)
# Test additional args in walletcreatepsbt
# Make sure both pre-included and funded inputs
# have the correct sequence numbers based on
# replaceable arg
block_height = self.nodes[0].getblockcount()
unspent = self.nodes[0].listunspent()[0]
psbtx_info = self.nodes[0].walletcreatefundedpsbt([{"txid":unspent["txid"], "vout":unspent["vout"]}], [{self.nodes[2].getnewaddress():unspent["amount"]+1}], block_height+2, {"replaceable": False, "add_inputs": True}, False)
decoded_psbt = self.nodes[0].decodepsbt(psbtx_info["psbt"])
for tx_in, psbt_in in zip(decoded_psbt["tx"]["vin"], decoded_psbt["inputs"]):
assert_greater_than(tx_in["sequence"], MAX_BIP125_RBF_SEQUENCE)
assert "bip32_derivs" not in psbt_in
assert_equal(decoded_psbt["tx"]["locktime"], block_height+2)
# Same construction with only locktime set and RBF explicitly enabled
psbtx_info = self.nodes[0].walletcreatefundedpsbt([{"txid":unspent["txid"], "vout":unspent["vout"]}], [{self.nodes[2].getnewaddress():unspent["amount"]+1}], block_height, {"replaceable": True, "add_inputs": True}, True)
decoded_psbt = self.nodes[0].decodepsbt(psbtx_info["psbt"])
for tx_in, psbt_in in zip(decoded_psbt["tx"]["vin"], decoded_psbt["inputs"]):
assert_equal(tx_in["sequence"], MAX_BIP125_RBF_SEQUENCE)
assert "bip32_derivs" in psbt_in
assert_equal(decoded_psbt["tx"]["locktime"], block_height)
# Same construction without optional arguments
psbtx_info = self.nodes[0].walletcreatefundedpsbt([], [{self.nodes[2].getnewaddress():unspent["amount"]+1}])
decoded_psbt = self.nodes[0].decodepsbt(psbtx_info["psbt"])
for tx_in, psbt_in in zip(decoded_psbt["tx"]["vin"], decoded_psbt["inputs"]):
assert_equal(tx_in["sequence"], MAX_BIP125_RBF_SEQUENCE)
assert "bip32_derivs" in psbt_in
assert_equal(decoded_psbt["tx"]["locktime"], 0)
# Same construction without optional arguments, for a node with -walletrbf=0
unspent1 = self.nodes[1].listunspent()[0]
psbtx_info = self.nodes[1].walletcreatefundedpsbt([{"txid":unspent1["txid"], "vout":unspent1["vout"]}], [{self.nodes[2].getnewaddress():unspent1["amount"]+1}], block_height, {"add_inputs": True})
decoded_psbt = self.nodes[1].decodepsbt(psbtx_info["psbt"])
for tx_in, psbt_in in zip(decoded_psbt["tx"]["vin"], decoded_psbt["inputs"]):
assert_greater_than(tx_in["sequence"], MAX_BIP125_RBF_SEQUENCE)
assert "bip32_derivs" in psbt_in
# Make sure change address wallet does not have P2SH innerscript access to results in success
# when attempting BnB coin selection
self.nodes[0].walletcreatefundedpsbt([], [{self.nodes[2].getnewaddress():unspent["amount"]+1}], block_height+2, {"changeAddress":self.nodes[1].getnewaddress()}, False)
# Make sure the wallet's change type is respected by default
small_output = {self.nodes[0].getnewaddress():0.1}
psbtx_native = self.nodes[0].walletcreatefundedpsbt([], [small_output])
self.assert_change_type(psbtx_native, "witness_v0_keyhash")
psbtx_legacy = self.nodes[1].walletcreatefundedpsbt([], [small_output])
self.assert_change_type(psbtx_legacy, "pubkeyhash")
# Make sure the change type of the wallet can also be overwritten
psbtx_np2wkh = self.nodes[1].walletcreatefundedpsbt([], [small_output], 0, {"change_type":"p2sh-segwit"})
self.assert_change_type(psbtx_np2wkh, "scripthash")
# Make sure the change type cannot be specified if a change address is given
invalid_options = {"change_type":"legacy","changeAddress":self.nodes[0].getnewaddress()}
assert_raises_rpc_error(-8, "both change address and address type options", self.nodes[0].walletcreatefundedpsbt, [], [small_output], 0, invalid_options)
# Regression test for 14473 (mishandling of already-signed witness transaction):
psbtx_info = self.nodes[0].walletcreatefundedpsbt([{"txid":unspent["txid"], "vout":unspent["vout"]}], [{self.nodes[2].getnewaddress():unspent["amount"]+1}], 0, {"add_inputs": True})
complete_psbt = self.nodes[0].walletprocesspsbt(psbtx_info["psbt"])
double_processed_psbt = self.nodes[0].walletprocesspsbt(complete_psbt["psbt"])
assert_equal(complete_psbt, double_processed_psbt)
# We don't care about the decode result, but decoding must succeed.
self.nodes[0].decodepsbt(double_processed_psbt["psbt"])
# Make sure unsafe inputs are included if specified
self.nodes[2].createwallet(wallet_name="unsafe")
wunsafe = self.nodes[2].get_wallet_rpc("unsafe")
self.nodes[0].sendtoaddress(wunsafe.getnewaddress(), 2)
self.sync_mempools()
assert_raises_rpc_error(-4, "Insufficient funds", wunsafe.walletcreatefundedpsbt, [], [{self.nodes[0].getnewaddress(): 1}])
wunsafe.walletcreatefundedpsbt([], [{self.nodes[0].getnewaddress(): 1}], 0, {"include_unsafe": True})
# BIP 174 Test Vectors
# Check that unknown values are just passed through
unknown_psbt = "cHNidP8BAD8CAAAAAf//////////////////////////////////////////AAAAAAD/////AQAAAAAAAAAAA2oBAAAAAAAACg8BAgMEBQYHCAkPAQIDBAUGBwgJCgsMDQ4PAAA="
unknown_out = self.nodes[0].walletprocesspsbt(unknown_psbt)['psbt']
assert_equal(unknown_psbt, unknown_out)
# Open the data file
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data/rpc_psbt.json'), encoding='utf-8') as f:
d = json.load(f)
invalids = d['invalid']
valids = d['valid']
creators = d['creator']
signers = d['signer']
combiners = d['combiner']
finalizers = d['finalizer']
extractors = d['extractor']
# Invalid PSBTs
for invalid in invalids:
assert_raises_rpc_error(-22, "TX decode failed", self.nodes[0].decodepsbt, invalid)
# Valid PSBTs
for valid in valids:
self.nodes[0].decodepsbt(valid)
# Creator Tests
for creator in creators:
created_tx = self.nodes[0].createpsbt(creator['inputs'], creator['outputs'])
assert_equal(created_tx, creator['result'])
# Signer tests
for i, signer in enumerate(signers):
self.nodes[2].createwallet(wallet_name="wallet{}".format(i))
wrpc = self.nodes[2].get_wallet_rpc("wallet{}".format(i))
for key in signer['privkeys']:
wrpc.importprivkey(key)
signed_tx = wrpc.walletprocesspsbt(signer['psbt'], True, "ALL")['psbt']
assert_equal(signed_tx, signer['result'])
# Combiner test
for combiner in combiners:
combined = self.nodes[2].combinepsbt(combiner['combine'])
assert_equal(combined, combiner['result'])
# Empty combiner test
assert_raises_rpc_error(-8, "Parameter 'txs' cannot be empty", self.nodes[0].combinepsbt, [])
# Finalizer test
for finalizer in finalizers:
finalized = self.nodes[2].finalizepsbt(finalizer['finalize'], False)['psbt']
assert_equal(finalized, finalizer['result'])
# Extractor test
for extractor in extractors:
extracted = self.nodes[2].finalizepsbt(extractor['extract'], True)['hex']
assert_equal(extracted, extractor['result'])
# Unload extra wallets
for i, signer in enumerate(signers):
self.nodes[2].unloadwallet("wallet{}".format(i))
# TODO: Re-enable this for segwit v1
# self.test_utxo_conversion()
# Test that psbts with p2pkh outputs are created properly
p2pkh = self.nodes[0].getnewaddress(address_type='legacy')
psbt = self.nodes[1].walletcreatefundedpsbt([], [{p2pkh : 1}], 0, {"includeWatching" : True}, True)
self.nodes[0].decodepsbt(psbt['psbt'])
# Test decoding error: invalid base64
assert_raises_rpc_error(-22, "TX decode failed invalid base64", self.nodes[0].decodepsbt, ";definitely not base64;")
# Send to all types of addresses
addr1 = self.nodes[1].getnewaddress("", "bech32")
txid1 = self.nodes[0].sendtoaddress(addr1, 11)
vout1 = find_output(self.nodes[0], txid1, 11)
addr2 = self.nodes[1].getnewaddress("", "legacy")
txid2 = self.nodes[0].sendtoaddress(addr2, 11)
vout2 = find_output(self.nodes[0], txid2, 11)
addr3 = self.nodes[1].getnewaddress("", "p2sh-segwit")
txid3 = self.nodes[0].sendtoaddress(addr3, 11)
vout3 = find_output(self.nodes[0], txid3, 11)
self.sync_all()
def test_psbt_input_keys(psbt_input, keys):
"""Check that the psbt input has only the expected keys."""
assert_equal(set(keys), set(psbt_input.keys()))
# Create a PSBT. None of the inputs are filled initially
psbt = self.nodes[1].createpsbt([{"txid":txid1, "vout":vout1},{"txid":txid2, "vout":vout2},{"txid":txid3, "vout":vout3}], {self.nodes[0].getnewaddress():32.999})
decoded = self.nodes[1].decodepsbt(psbt)
test_psbt_input_keys(decoded['inputs'][0], [])
test_psbt_input_keys(decoded['inputs'][1], [])
test_psbt_input_keys(decoded['inputs'][2], [])
# Update a PSBT with UTXOs from the node
# Bech32 inputs should be filled with witness UTXO. Other inputs should not be filled because they are non-witness
updated = self.nodes[1].utxoupdatepsbt(psbt)
decoded = self.nodes[1].decodepsbt(updated)
test_psbt_input_keys(decoded['inputs'][0], ['witness_utxo'])
test_psbt_input_keys(decoded['inputs'][1], [])
test_psbt_input_keys(decoded['inputs'][2], [])
# Try again, now while providing descriptors, making P2SH-segwit work, and causing bip32_derivs and redeem_script to be filled in
descs = [self.nodes[1].getaddressinfo(addr)['desc'] for addr in [addr1,addr2,addr3]]
updated = self.nodes[1].utxoupdatepsbt(psbt=psbt, descriptors=descs)
decoded = self.nodes[1].decodepsbt(updated)
test_psbt_input_keys(decoded['inputs'][0], ['witness_utxo', 'bip32_derivs'])
test_psbt_input_keys(decoded['inputs'][1], [])
test_psbt_input_keys(decoded['inputs'][2], ['witness_utxo', 'bip32_derivs', 'redeem_script'])
# Two PSBTs with a common input should not be joinable
psbt1 = self.nodes[1].createpsbt([{"txid":txid1, "vout":vout1}], {self.nodes[0].getnewaddress():Decimal('10.999')})
assert_raises_rpc_error(-8, "exists in multiple PSBTs", self.nodes[1].joinpsbts, [psbt1, updated])
# Join two distinct PSBTs
addr4 = self.nodes[1].getnewaddress("", "p2sh-segwit")
txid4 = self.nodes[0].sendtoaddress(addr4, 5)
vout4 = find_output(self.nodes[0], txid4, 5)
self.generate(self.nodes[0], 6)
psbt2 = self.nodes[1].createpsbt([{"txid":txid4, "vout":vout4}], {self.nodes[0].getnewaddress():Decimal('4.999')})
psbt2 = self.nodes[1].walletprocesspsbt(psbt2)['psbt']
psbt2_decoded = self.nodes[0].decodepsbt(psbt2)
assert "final_scriptwitness" in psbt2_decoded['inputs'][0] and "final_scriptSig" in psbt2_decoded['inputs'][0]
joined = self.nodes[0].joinpsbts([psbt, psbt2])
joined_decoded = self.nodes[0].decodepsbt(joined)
assert len(joined_decoded['inputs']) == 4 and len(joined_decoded['outputs']) == 2 and "final_scriptwitness" not in joined_decoded['inputs'][3] and "final_scriptSig" not in joined_decoded['inputs'][3]
# Check that joining shuffles the inputs and outputs
# 10 attempts should be enough to get a shuffled join
shuffled = False
for _ in range(10):
shuffled_joined = self.nodes[0].joinpsbts([psbt, psbt2])
shuffled |= joined != shuffled_joined
if shuffled:
break
assert shuffled
# Newly created PSBT needs UTXOs and updating
addr = self.nodes[1].getnewaddress("", "p2sh-segwit")
txid = self.nodes[0].sendtoaddress(addr, 7)
addrinfo = self.nodes[1].getaddressinfo(addr)
blockhash = self.generate(self.nodes[0], 6)[0]
vout = find_output(self.nodes[0], txid, 7, blockhash=blockhash)
psbt = self.nodes[1].createpsbt([{"txid":txid, "vout":vout}], {self.nodes[0].getnewaddress("", "p2sh-segwit"):Decimal('6.999')})
analyzed = self.nodes[0].analyzepsbt(psbt)
assert not analyzed['inputs'][0]['has_utxo'] and not analyzed['inputs'][0]['is_final'] and analyzed['inputs'][0]['next'] == 'updater' and analyzed['next'] == 'updater'
# After update with wallet, only needs signing
updated = self.nodes[1].walletprocesspsbt(psbt, False, 'ALL', True)['psbt']
analyzed = self.nodes[0].analyzepsbt(updated)
assert analyzed['inputs'][0]['has_utxo'] and not analyzed['inputs'][0]['is_final'] and analyzed['inputs'][0]['next'] == 'signer' and analyzed['next'] == 'signer' and analyzed['inputs'][0]['missing']['signatures'][0] == addrinfo['embedded']['witness_program']
# Check fee and size things
assert analyzed['fee'] == Decimal('0.001') and analyzed['estimated_vsize'] == 134 and analyzed['estimated_feerate'] == Decimal('0.00746268')
# After signing and finalizing, needs extracting
signed = self.nodes[1].walletprocesspsbt(updated)['psbt']
analyzed = self.nodes[0].analyzepsbt(signed)
assert analyzed['inputs'][0]['has_utxo'] and analyzed['inputs'][0]['is_final'] and analyzed['next'] == 'extractor'
self.log.info("PSBT spending unspendable outputs should have error message and Creator as next")
analysis = self.nodes[0].analyzepsbt('cHNidP8BAJoCAAAAAljoeiG1ba8MI76OcHBFbDNvfLqlyHV5JPVFiHuyq911AAAAAAD/////g40EJ9DsZQpoqka7CwmK6kQiwHGyyng1Kgd5WdB86h0BAAAAAP////8CcKrwCAAAAAAWAEHYXCtx0AYLCcmIauuBXlCZHdoSTQDh9QUAAAAAFv8/wADXYP/7//////8JxOh0LR2HAI8AAAAAAAEBIADC6wsAAAAAF2oUt/X69ELjeX2nTof+fZ10l+OyAokDAQcJAwEHEAABAACAAAEBIADC6wsAAAAAF2oUt/X69ELjeX2nTof+fZ10l+OyAokDAQcJAwEHENkMak8AAAAA')
assert_equal(analysis['next'], 'creator')
assert_equal(analysis['error'], 'PSBT is not valid. Input 0 spends unspendable output')
self.log.info("PSBT with invalid values should have error message and Creator as next")
analysis = self.nodes[0].analyzepsbt('cHNidP8BAHECAAAAAfA00BFgAm6tp86RowwH6BMImQNL5zXUcTT97XoLGz0BAAAAAAD/////AgD5ApUAAAAAFgAUKNw0x8HRctAgmvoevm4u1SbN7XL87QKVAAAAABYAFPck4gF7iL4NL4wtfRAKgQbghiTUAAAAAAABAR8AgIFq49AHABYAFJUDtxf2PHo641HEOBOAIvFMNTr2AAAA')
assert_equal(analysis['next'], 'creator')
assert_equal(analysis['error'], 'PSBT is not valid. Input 0 has invalid value')
self.log.info("PSBT with signed, but not finalized, inputs should have Finalizer as next")
analysis = self.nodes[0].analyzepsbt('cHNidP8BAHECAAAAAZYezcxdnbXoQCmrD79t/LzDgtUo9ERqixk8wgioAobrAAAAAAD9////AlDDAAAAAAAAFgAUy/UxxZuzZswcmFnN/E9DGSiHLUsuGPUFAAAAABYAFLsH5o0R38wXx+X2cCosTMCZnQ4baAAAAAABAR8A4fUFAAAAABYAFOBI2h5thf3+Lflb2LGCsVSZwsltIgIC/i4dtVARCRWtROG0HHoGcaVklzJUcwo5homgGkSNAnJHMEQCIGx7zKcMIGr7cEES9BR4Kdt/pzPTK3fKWcGyCJXb7MVnAiALOBgqlMH4GbC1HDh/HmylmO54fyEy4lKde7/BT/PWxwEBAwQBAAAAIgYC/i4dtVARCRWtROG0HHoGcaVklzJUcwo5homgGkSNAnIYDwVpQ1QAAIABAACAAAAAgAAAAAAAAAAAAAAiAgL+CIiB59NSCssOJRGiMYQK1chahgAaaJpIXE41Cyir+xgPBWlDVAAAgAEAAIAAAACAAQAAAAAAAAAA')
assert_equal(analysis['next'], 'finalizer')
analysis = self.nodes[0].analyzepsbt('cHNidP8BAHECAAAAAfA00BFgAm6tp86RowwH6BMImQNL5zXUcTT97XoLGz0BAAAAAAD/////AgCAgWrj0AcAFgAUKNw0x8HRctAgmvoevm4u1SbN7XL87QKVAAAAABYAFPck4gF7iL4NL4wtfRAKgQbghiTUAAAAAAABAR8A8gUqAQAAABYAFJUDtxf2PHo641HEOBOAIvFMNTr2AAAA')
assert_equal(analysis['next'], 'creator')
assert_equal(analysis['error'], 'PSBT is not valid. Output amount invalid')
analysis = self.nodes[0].analyzepsbt('cHNidP8BAJoCAAAAAkvEW8NnDtdNtDpsmze+Ht2LH35IJcKv00jKAlUs21RrAwAAAAD/////S8Rbw2cO1020OmybN74e3Ysffkglwq/TSMoCVSzbVGsBAAAAAP7///8CwLYClQAAAAAWABSNJKzjaUb3uOxixsvh1GGE3fW7zQD5ApUAAAAAFgAUKNw0x8HRctAgmvoevm4u1SbN7XIAAAAAAAEAnQIAAAACczMa321tVHuN4GKWKRncycI22aX3uXgwSFUKM2orjRsBAAAAAP7///9zMxrfbW1Ue43gYpYpGdzJwjbZpfe5eDBIVQozaiuNGwAAAAAA/v///wIA+QKVAAAAABl2qRT9zXUVA8Ls5iVqynLHe5/vSe1XyYisQM0ClQAAAAAWABRmWQUcjSjghQ8/uH4Bn/zkakwLtAAAAAAAAQEfQM0ClQAAAAAWABRmWQUcjSjghQ8/uH4Bn/zkakwLtAAAAA==')
assert_equal(analysis['next'], 'creator')
assert_equal(analysis['error'], 'PSBT is not valid. Input 0 specifies invalid prevout')
assert_raises_rpc_error(-25, 'Inputs missing or spent', self.nodes[0].walletprocesspsbt, 'cHNidP8BAJoCAAAAAkvEW8NnDtdNtDpsmze+Ht2LH35IJcKv00jKAlUs21RrAwAAAAD/////S8Rbw2cO1020OmybN74e3Ysffkglwq/TSMoCVSzbVGsBAAAAAP7///8CwLYClQAAAAAWABSNJKzjaUb3uOxixsvh1GGE3fW7zQD5ApUAAAAAFgAUKNw0x8HRctAgmvoevm4u1SbN7XIAAAAAAAEAnQIAAAACczMa321tVHuN4GKWKRncycI22aX3uXgwSFUKM2orjRsBAAAAAP7///9zMxrfbW1Ue43gYpYpGdzJwjbZpfe5eDBIVQozaiuNGwAAAAAA/v///wIA+QKVAAAAABl2qRT9zXUVA8Ls5iVqynLHe5/vSe1XyYisQM0ClQAAAAAWABRmWQUcjSjghQ8/uH4Bn/zkakwLtAAAAAAAAQEfQM0ClQAAAAAWABRmWQUcjSjghQ8/uH4Bn/zkakwLtAAAAA==')
# Test that we can fund psbts with external inputs specified
eckey = ECKey()
eckey.generate()
privkey = bytes_to_wif(eckey.get_bytes())
# Make a weird but signable script. sh(pkh()) descriptor accomplishes this
desc = descsum_create("sh(pkh({}))".format(privkey))
if self.options.descriptors:
res = self.nodes[0].importdescriptors([{"desc": desc, "timestamp": "now"}])
else:
res = self.nodes[0].importmulti([{"desc": desc, "timestamp": "now"}])
assert res[0]["success"]
addr = self.nodes[0].deriveaddresses(desc)[0]
addr_info = self.nodes[0].getaddressinfo(addr)
self.nodes[0].sendtoaddress(addr, 10)
self.generate(self.nodes[0], 6)
ext_utxo = self.nodes[0].listunspent(addresses=[addr])[0]
# An external input without solving data should result in an error
assert_raises_rpc_error(-4, "Insufficient funds", self.nodes[1].walletcreatefundedpsbt, [ext_utxo], {self.nodes[0].getnewaddress(): 10 + ext_utxo['amount']}, 0, {'add_inputs': True})
# But funding should work when the solving data is provided
psbt = self.nodes[1].walletcreatefundedpsbt([ext_utxo], {self.nodes[0].getnewaddress(): 15}, 0, {'add_inputs': True, "solving_data": {"pubkeys": [addr_info['pubkey']], "scripts": [addr_info["embedded"]["scriptPubKey"]]}})
signed = self.nodes[1].walletprocesspsbt(psbt['psbt'])
assert not signed['complete']
signed = self.nodes[0].walletprocesspsbt(signed['psbt'])
assert signed['complete']
self.nodes[0].finalizepsbt(signed['psbt'])
psbt = self.nodes[1].walletcreatefundedpsbt([ext_utxo], {self.nodes[0].getnewaddress(): 15}, 0, {'add_inputs': True, "solving_data":{"descriptors": [desc]}})
signed = self.nodes[1].walletprocesspsbt(psbt['psbt'])
assert not signed['complete']
signed = self.nodes[0].walletprocesspsbt(signed['psbt'])
assert signed['complete']
self.nodes[0].finalizepsbt(signed['psbt'])
if __name__ == '__main__':
PSBTTest().main()
|
|
# -*- coding:utf-8 -*-
'''
Function for building whole packed version of highlight.js out of
pre-packed modules.
'''
import os
import shutil
import re
import argparse
import subprocess
import json
import codecs
from functools import partial
REPLACES = {
'case_insensitive': 'cI',
'lexemes': 'l',
'contains': 'c',
'keywords': 'k',
'subLanguage': 'sL',
'className': 'cN',
'begin': 'b',
'beginKeywords': 'bK',
'end': 'e',
'endsWithParent': 'eW',
'illegal': 'i',
'excludeBegin': 'eB',
'excludeEnd': 'eE',
'returnBegin': 'rB',
'returnEnd': 'rE',
'relevance': 'r',
'variants': 'v',
'IDENT_RE': 'IR',
'UNDERSCORE_IDENT_RE': 'UIR',
'NUMBER_RE': 'NR',
'C_NUMBER_RE': 'CNR',
'BINARY_NUMBER_RE': 'BNR',
'RE_STARTERS_RE': 'RSR',
'BACKSLASH_ESCAPE': 'BE',
'APOS_STRING_MODE': 'ASM',
'QUOTE_STRING_MODE': 'QSM',
'PHRASAL_WORDS_MODE': 'PWM',
'C_LINE_COMMENT_MODE': 'CLCM',
'C_BLOCK_COMMENT_MODE': 'CBCM',
'HASH_COMMENT_MODE': 'HCM',
'NUMBER_MODE': 'NM',
'C_NUMBER_MODE': 'CNM',
'BINARY_NUMBER_MODE': 'BNM',
'CSS_NUMBER_MODE': 'CSSNM',
'REGEXP_MODE': 'RM',
'TITLE_MODE': 'TM',
'UNDERSCORE_TITLE_MODE': 'UTM',
'beginRe': 'bR',
'endRe': 'eR',
'illegalRe': 'iR',
'lexemesRe': 'lR',
'terminators': 't',
'terminator_end': 'tE',
}
CATEGORIES = {
'common': [
'apache', 'nginx',
'java', 'cs', 'cpp', 'objectivec',
'ini', 'diff', 'bash', 'makefile',
'sql', 'php', 'ruby', 'python', 'perl',
'css', 'xml', 'javascript', 'coffeescript', 'http', 'json',
'markdown',
],
}
def lang_name(filename):
return os.path.splitext(os.path.basename(filename))[0]
# This is used instead of plain `open` everywhere as there are apparently
# "some systems" that don't use utf-8 as the default system encoding.
# We should probably drop it in the better, brighter future.
def utf8_open(filename, mode='r'):
return codecs.open(filename, mode, 'utf-8')
def mapnonstrings(source, func):
result = []
pos = 0
quotes = re.compile('[\'"/]')
while pos < len(source):
match = quotes.search(source, pos)
end = match.start() if match else len(source)
result.append(func(source[pos:end]))
pos = end
if match:
terminator = re.compile(r'[%s\\]' % match.group(0))
start = pos
pos += 1
while True:
match = terminator.search(source, pos)
if not match:
raise ValueError('Unmatched quote')
if match.group(0) == '\\':
pos = match.start() + 2
else:
pos = match.start() + 1
result.append(source[start:pos])
break
return ''.join(result)
def compress_content(tools_path, content, filetype='js'):
if filetype == 'js':
for s, r in REPLACES.items():
content = mapnonstrings(content, partial(re.sub, r'\b%s\b' % s, r))
content = re.sub(r'(block|parentNode)\.cN', r'\1.className', content)
try:
args = ['java', '-jar', os.path.join(tools_path, 'yuicompressor.jar'), '--type', filetype]
p = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
except FileNotFoundError as e:
raise RuntimeError('Couldn\'t find "%s" which is required for compression to work. You can skip compression with the `-n` option.' % args[0]) from e
p.stdin.write(content.encode('utf-8'))
p.stdin.close()
content = p.stdout.read().decode('utf-8')
p.stdout.close()
return content
def parse_header(filename):
'''
Parses possible language description header from a file. If a header is found returns it
as dict, otherwise returns None.
'''
content = utf8_open(filename).read(1024)
match = re.search(r'^\s*/\*(.*?)\*/', content, re.S)
if not match:
return
headers = match.group(1).split('\n')
headers = dict(h.strip().split(': ') for h in headers if ': ' in h)
return headers if 'Language' in headers else None
def language_filenames(src_path, languages):
'''
Resolves dependencies and returns the list of actual language filenames
'''
lang_path = os.path.join(src_path, 'languages')
filenames = [f for f in os.listdir(lang_path) if f.endswith('.js')]
headers = [parse_header(os.path.join(lang_path, f)) for f in filenames]
infos = [(h, f) for h, f in zip(headers, filenames) if h]
# Filtering infos based on list of languages and categories
if languages:
categories = {l for l in languages if l.startswith(':')}
languages = set(languages) - categories
categories = {c.strip(':') for c in categories}
cat_languages = {l for c, ls in CATEGORIES.items() if c in categories for l in ls}
languages |= cat_languages
infos = [
(i, f) for i, f in infos
if lang_name(f) in languages
]
def append(filename):
if filename not in filenames:
filenames.append(filename)
filenames = []
for info, filename in infos:
if 'Requires' in info:
requires = [r.strip() for r in info['Requires'].split(',')]
for r in requires:
append(r)
append(filename)
return [os.path.join(lang_path, f) for f in filenames]
def strip_read(filename):
s = utf8_open(filename).read()
pattern = re.compile(r'^\s*(/\*(.*?)\*/)?\s*', re.DOTALL)
s = pattern.sub('', s)
return s.strip()
def wrap_language(filename, content, compressed):
'''
Wraps a language file content for the browser build. The "compressed" parameter
selects which wrapping code to use:
- If compressed is False the function expects source files to be uncompressed and
wraps them maintaining readability of the source.
- If compressed is True the function expects source files to be already compressed
individually and wraps them with the minimal code, effectively emulating
what yuicompressor does.
'''
name = lang_name(filename)
if compressed:
content = content.rstrip(';')
wrap = 'hljs.registerLanguage("%s",%s);'
else:
wrap = '\nhljs.registerLanguage(\'%s\', %s);\n'
return wrap % (name, content)
def glue_files(hljs_filename, language_filenames, compressed):
'''
Glues files together for the browser build.
'''
if compressed:
hljs = 'var hljs=new %s();' % strip_read(hljs_filename).rstrip(';')
file_func = lambda f: utf8_open(f).read()
else:
hljs = 'var hljs = new %s();\n' % strip_read(hljs_filename)
file_func = strip_read
return ''.join([hljs] + [wrap_language(f, file_func(f), compressed) for f in language_filenames])
def copy_docs(root, build_path):
build_docs_path = os.path.join(build_path, 'docs')
os.makedirs(build_docs_path)
docs_path = os.path.join(root, 'docs')
filenames = os.listdir(docs_path)
for filename in filenames:
if '.rst' in filename:
shutil.copyfile(
os.path.join(docs_path, filename),
os.path.join(build_docs_path, filename)
)
def build_browser(root, build_path, filenames, options, is_amd=False, need_copy_docs=True):
src_path = os.path.join(root, 'src')
tools_path = os.path.join(root, 'tools')
print('Building %d files:\n%s' % (len(filenames), '\n'.join(filenames)))
content = glue_files(os.path.join(src_path, 'highlight.js'), filenames, False)
if is_amd:
content = 'define(function() {\n%s\nreturn hljs;\n});' % content # AMD wrap
print('Uncompressed size:', len(content.encode('utf-8')))
if options.compress:
print('Compressing...')
content = compress_content(tools_path, content)
print('Compressed size:', len(content.encode('utf-8')))
utf8_open(os.path.join(build_path, 'highlight.pack.js'), 'w').write(content)
if need_copy_docs:
print('Copying docs...')
copy_docs(root, build_path)
def build_amd(root, build_path, filenames, options):
build_browser(root, build_path, filenames, options, True, False)
def build_node(root, build_path, filenames, options):
src_path = os.path.join(root, 'src')
os.makedirs(os.path.join(build_path, 'lib', 'languages'))
print('Building %d files:' % len(filenames))
for filename in filenames:
print(filename)
content = 'module.exports = %s;' % strip_read(filename)
utf8_open(os.path.join(build_path, 'lib', 'languages', os.path.basename(filename)), 'w').write(content)
filename = os.path.join(src_path, 'highlight.js')
print(filename)
core = 'var Highlight = %s;' % strip_read(filename)
core += '\nmodule.exports = Highlight;'
utf8_open(os.path.join(build_path, 'lib', 'highlight.js'), 'w').write(core)
print('Registering languages with the library...')
hljs = "var Highlight = require('./highlight');\nvar hljs = new Highlight();"
filenames = map(os.path.basename, filenames)
for filename in filenames:
hljs += '\nhljs.registerLanguage(\'%s\', require(\'./languages/%s\'));' % (lang_name(filename), filename)
hljs += '\nmodule.exports = hljs;'
utf8_open(os.path.join(build_path, 'lib', 'index.js'), 'w').write(hljs)
if options.compress:
print('Notice: not compressing files for "node" target.')
print('Copying styles...')
build_style_path = os.path.join(build_path, 'styles')
src_style_path = os.path.join(src_path, 'styles')
os.mkdir(build_style_path)
styles = [os.path.join(src_style_path, f) for f in os.listdir(src_style_path) if f.endswith('.css')]
for style in styles:
print(style)
shutil.copy(style, build_style_path)
print('Copying over Metafiles...')
filenames = ['LICENSE', 'README.md']
for filename in filenames:
source = os.path.join(root, filename)
dest = os.path.join(build_path, filename)
shutil.copyfile(source, dest)
print('Adding package.json...')
package = json.load(utf8_open(os.path.join(root, 'package.json')))
authors = utf8_open(os.path.join(root, 'AUTHORS.en.txt'))
matches = (re.match('^- (?P<name>.*) <(?P<email>.*)>$', a) for a in authors)
package['contributors'] = [m.groupdict() for m in matches if m]
content = json.dumps(package, indent=2, ensure_ascii=False)
utf8_open(os.path.join(build_path, 'package.json'), 'w').write(content)
print('Copying docs...')
copy_docs(root, build_path)
def build_cdn(root, build_path, filenames, options):
src_path = os.path.join(root, 'src')
tools_path = os.path.join(root, 'tools')
if not options.compress:
print('Notice: forcing compression for "cdn" target')
options.compress = True
build_browser(root, build_path, filenames, options, False, False)
os.rename(os.path.join(build_path, 'highlight.pack.js'), os.path.join(build_path, 'highlight.min.js'))
print('Compressing all languages...')
lang_path = os.path.join(build_path, 'languages')
os.mkdir(lang_path)
all_filenames = language_filenames(src_path, [])
for filename in all_filenames:
print(filename)
content = compress_content(tools_path, strip_read(filename))
content = wrap_language(filename, content, True)
utf8_open(os.path.join(lang_path, '%s.min.js' % lang_name(filename)), 'w').write(content)
print('Compressing styles...')
build_style_path = os.path.join(build_path, 'styles')
src_style_path = os.path.join(src_path, 'styles')
os.mkdir(build_style_path)
styles = [lang_name(f) for f in os.listdir(src_style_path) if f.endswith('.css')]
for style in styles:
filename = os.path.join(src_style_path, '%s.css' % style)
print(filename)
content = compress_content(tools_path, utf8_open(filename).read(), 'css')
utf8_open(os.path.join(build_style_path, '%s.min.css' % style), 'w').write(content)
def build(buildfunc, root, args):
build_path = os.path.join(root, 'build')
if os.path.exists(build_path):
shutil.rmtree(build_path)
os.mkdir(build_path)
filenames = language_filenames(os.path.join(root, 'src'), args.languages)
buildfunc(root, build_path, filenames, args)
print('Done.')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Build highlight.js for various targets')
parser.add_argument(
'languages', nargs='*',
help = 'language (the name of a language file without the .js extension) or :category (currently the only available category is ":common")',
)
parser.add_argument(
'-n', '--no-compress',
dest = 'compress', action = 'store_false', default = True,
help = 'Don\'t compress source files. Compression only works for the "browser" target.',
)
parser.add_argument(
'-t', '--target', dest = 'target',
choices = ['browser', 'node', 'cdn', 'amd'], default = 'browser',
help = 'Target format, default is "browser"',
)
args = parser.parse_args()
buildfunc = locals()['build_%s' % args.target]
root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
build(buildfunc, root, args)
|
|
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""HTTP wrapper for apitools.
This library wraps the underlying http library we use, which is
currently httplib2.
"""
import collections
import contextlib
import httplib
import logging
import socket
import time
import urlparse
import httplib2
from gslib.third_party.storage_apitools import exceptions
from gslib.third_party.storage_apitools import util
__all__ = [
'GetHttp',
'MakeRequest',
]
# 308 and 429 don't have names in httplib.
RESUME_INCOMPLETE = 308
TOO_MANY_REQUESTS = 429
_REDIRECT_STATUS_CODES = (
httplib.MOVED_PERMANENTLY,
httplib.FOUND,
httplib.SEE_OTHER,
httplib.TEMPORARY_REDIRECT,
RESUME_INCOMPLETE,
)
# http: An httplib2.Http instance.
# http_request: A http_wrapper.Request.
# exc: Exception being raised.
# num_retries: Number of retries consumed; used for exponential backoff.
ExceptionRetryArgs = collections.namedtuple('ExceptionRetryArgs',
['http', 'http_request', 'exc',
'num_retries'])
@contextlib.contextmanager
def _Httplib2Debuglevel(http_request, level, http=None):
"""Temporarily change the value of httplib2.debuglevel if needed.
If http_request has a `loggable_body` distinct from `body`, then we
need to prevent httplib2 from logging the full body. This sets
httplib2.debuglevel for the duration of the `with` block; however,
that alone won't change the value of existing HTTP connections. If
an httplib2.Http object is provided, we'll also change the level on
any cached connections attached to it.
"""
if http_request.loggable_body is None:
yield
return
old_level = httplib2.debuglevel
http_levels = {}
httplib2.debuglevel = level
if http is not None:
for connection_key, connection in http.connections.iteritems():
# httplib2 stores two kinds of values in this dict, connection
# classes and instances. Since the connection types are all
# old-style classes, we can't easily distinguish by connection
# type -- so instead we use the key pattern.
if ':' not in connection_key:
continue
http_levels[connection_key] = connection.debuglevel
connection.set_debuglevel(level)
yield
httplib2.debuglevel = old_level
if http is not None:
for connection_key, old_level in http_levels.iteritems():
if connection_key in http.connections:
http.connections[connection_key].set_debuglevel(old_level)
class Request(object):
"""Class encapsulating the data for an HTTP request."""
def __init__(self, url='', http_method='GET', headers=None, body=''):
self.url = url
self.http_method = http_method
self.headers = headers or {}
self.__body = None
self.__loggable_body = None
self.body = body
@property
def loggable_body(self):
return self.__loggable_body
@loggable_body.setter
def loggable_body(self, value):
if self.body is None:
raise exceptions.RequestError(
'Cannot set loggable body on request with no body')
self.__loggable_body = value
@property
def body(self):
return self.__body
@body.setter
def body(self, value):
"""Sets the request body; handles logging and length measurement."""
self.__body = value
if value is not None:
# Avoid calling len() which cannot exceed 4GiB in 32-bit python.
body_length = getattr(self.__body, 'length', None) or len(self.__body)
self.headers['content-length'] = str(body_length)
else:
self.headers.pop('content-length', None)
# This line ensures we don't try to print large requests.
if not isinstance(value, basestring):
self.loggable_body = '<media body>'
# Note: currently the order of fields here is important, since we want
# to be able to pass in the result from httplib2.request.
class Response(collections.namedtuple(
'HttpResponse', ['info', 'content', 'request_url'])):
"""Class encapsulating data for an HTTP response."""
__slots__ = ()
def __len__(self):
return self.length
@property
def length(self):
"""Return the length of this response.
We expose this as an attribute since using len() directly can fail
for responses larger than sys.maxint.
Returns:
Response length (as int or long)
"""
def ProcessContentRange(content_range):
_, _, range_spec = content_range.partition(' ')
byte_range, _, _ = range_spec.partition('/')
start, _, end = byte_range.partition('-')
return int(end) - int(start) + 1
if '-content-encoding' in self.info and 'content-range' in self.info:
# httplib2 rewrites content-length in the case of a compressed
# transfer; we can't trust the content-length header in that
# case, but we *can* trust content-range, if it's present.
return ProcessContentRange(self.info['content-range'])
elif 'content-length' in self.info:
return int(self.info.get('content-length'))
elif 'content-range' in self.info:
return ProcessContentRange(self.info['content-range'])
return len(self.content)
@property
def status_code(self):
return int(self.info['status'])
@property
def retry_after(self):
if 'retry-after' in self.info:
return int(self.info['retry-after'])
@property
def is_redirect(self):
return (self.status_code in _REDIRECT_STATUS_CODES and
'location' in self.info)
def CheckResponse(response):
if response is None:
# Caller shouldn't call us if the response is None, but handle anyway.
raise exceptions.RequestError('Request to url %s did not return a response.'
% response.request_url)
elif (response.status_code >= 500 or
response.status_code == TOO_MANY_REQUESTS):
raise exceptions.BadStatusCodeError.FromResponse(response)
elif response.status_code == httplib.UNAUTHORIZED:
# Sometimes we get a 401 after a connection break.
# TODO: this shouldn't be a retryable exception, but for now we retry.
raise exceptions.BadStatusCodeError.FromResponse(response)
elif response.retry_after:
raise exceptions.RetryAfterError.FromResponse(response)
def RebuildHttpConnections(http):
"""Rebuilds all http connections in the httplib2.Http instance.
httplib2 overloads the map in http.connections to contain two different
types of values:
{ scheme string: connection class } and
{ scheme + authority string : actual http connection }
Here we remove all of the entries for actual connections so that on the
next request httplib2 will rebuild them from the connection types.
Args:
http: An httplib2.Http instance.
"""
if getattr(http, 'connections', None):
for conn_key in http.connections.keys():
if ':' in conn_key:
del http.connections[conn_key]
def RethrowExceptionHandler(*unused_args):
raise
def HandleExceptionsAndRebuildHttpConnections(retry_args):
"""Exception handler for http failures.
This catches known failures and rebuilds the underlying HTTP connections.
Args:
retry_args: An ExceptionRetryArgs tuple.
"""
# If the server indicates how long to wait, use that value. Otherwise,
# calculate the wait time on our own.
retry_after = None
# Transport failures
if isinstance(retry_args.exc, httplib.BadStatusLine):
logging.debug('Caught BadStatusLine from httplib, retrying: %s',
retry_args.exc)
elif isinstance(retry_args.exc, socket.error):
logging.debug('Caught socket error, retrying: %s', retry_args.exc)
elif isinstance(retry_args.exc, socket.gaierror):
logging.debug('Caught socket address error, retrying: %s', retry_args.exc)
elif isinstance(retry_args.exc, httplib2.ServerNotFoundError):
logging.debug('Caught server not found error, retrying: %s', retry_args.exc)
elif isinstance(retry_args.exc, ValueError):
# oauth2_client tries to JSON-decode the response, which can result
# in a ValueError if the response was invalid. Until that is fixed in
# oauth2_client, need to handle it here.
logging.debug('Response content was invalid (%s), retrying',
retry_args.exc)
elif isinstance(retry_args.exc, exceptions.RequestError):
logging.debug('Request returned no response, retrying')
# API-level failures
elif isinstance(retry_args.exc, exceptions.BadStatusCodeError):
logging.debug('Response returned status %s, retrying',
retry_args.exc.status_code)
elif isinstance(retry_args.exc, exceptions.RetryAfterError):
logging.debug('Response returned a retry-after header, retrying')
retry_after = retry_args.exc.retry_after
else:
raise
RebuildHttpConnections(retry_args.http)
logging.debug('Retrying request to url %s after exception %s',
retry_args.http_request.url, retry_args.exc)
time.sleep(retry_after or util.CalculateWaitForRetry(retry_args.num_retries))
def MakeRequest(http, http_request, retries=7, redirections=5,
retry_func=HandleExceptionsAndRebuildHttpConnections,
check_response_func=CheckResponse):
"""Send http_request via the given http, performing error/retry handling.
Args:
http: An httplib2.Http instance, or a http multiplexer that delegates to
an underlying http, for example, HTTPMultiplexer.
http_request: A Request to send.
retries: (int, default 5) Number of retries to attempt on 5XX replies.
redirections: (int, default 5) Number of redirects to follow.
retry_func: Function to handle retries on exceptions. Arguments are
(Httplib2.Http, Request, Exception, int num_retries).
check_response_func: Function to validate the HTTP response. Arguments are
(Response, response content, url).
Returns:
A Response object.
"""
retry = 0
while True:
try:
return _MakeRequestNoRetry(http, http_request, redirections=redirections,
check_response_func=check_response_func)
# retry_func will consume the exception types it handles and raise.
# pylint: disable=broad-except
except Exception as e:
retry += 1
if retry >= retries:
raise
else:
retry_func(ExceptionRetryArgs(http, http_request, e, retry))
def _MakeRequestNoRetry(http, http_request, redirections=5,
check_response_func=CheckResponse):
"""Send http_request via the given http.
This wrapper exists to handle translation between the plain httplib2
request/response types and the Request and Response types above.
Args:
http: An httplib2.Http instance, or a http multiplexer that delegates to
an underlying http, for example, HTTPMultiplexer.
http_request: A Request to send.
redirections: (int, default 5) Number of redirects to follow.
check_response_func: Function to validate the HTTP response. Arguments are
(Response, response content, url).
Returns:
Response object.
Raises:
RequestError if no response could be parsed.
"""
connection_type = None
if getattr(http, 'connections', None):
url_scheme = urlparse.urlsplit(http_request.url).scheme
if url_scheme and url_scheme in http.connections:
connection_type = http.connections[url_scheme]
# Custom printing only at debuglevel 4
new_debuglevel = 4 if httplib2.debuglevel == 4 else 0
with _Httplib2Debuglevel(http_request, new_debuglevel, http=http):
info, content = http.request(
str(http_request.url), method=str(http_request.http_method),
body=http_request.body, headers=http_request.headers,
redirections=redirections, connection_type=connection_type)
if info is None:
raise exceptions.RequestError()
response = Response(info, content, http_request.url)
check_response_func(response)
return response
def GetHttp():
return httplib2.Http()
|
|
#-------------
# Some sections of the code below have been copied from
# MongoEngine.
#
# https://github.com/MongoEngine/mongoengine
#
# Copyright (c) 2009 See AUTHORS
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#-------------
import datetime
import hmac
import logging
import random
import re
import string
import time
import uuid
from hashlib import sha1
from mongoengine import Document, EmbeddedDocument
from mongoengine import StringField, DateTimeField, ListField
from mongoengine import BooleanField, ObjectIdField, EmailField
from mongoengine import EmbeddedDocumentField, IntField
from mongoengine import DictField, DynamicEmbeddedDocument
from mongoengine.django.utils import datetime_now
from mongoengine.django.auth import SiteProfileNotAvailable
from django.conf import settings
from django.contrib import auth
from django.contrib.auth.hashers import check_password, make_password
from django.contrib.auth.models import _user_has_perm, _user_get_all_permissions
from django.contrib.auth.models import _user_has_module_perms
from django.core.exceptions import ImproperlyConfigured
from django.db import models
from django.utils.translation import ugettext_lazy as _
from crits.config.config import CRITsConfig
from crits.core.crits_mongoengine import CritsDocument, CritsSchemaDocument
from crits.core.crits_mongoengine import CritsDocumentFormatter, UnsupportedAttrs
from crits.core.user_migrate import migrate_user
logger = logging.getLogger(__name__)
class EmbeddedSubscription(EmbeddedDocument, CritsDocumentFormatter):
"""
Embedded Subscription
"""
_id = ObjectIdField(required=True, db_field="id")
date = DateTimeField(default=datetime.datetime.now)
class EmbeddedSourceSubscription(EmbeddedDocument, CritsDocumentFormatter):
"""
Embedded Subscription
"""
date = DateTimeField(default=datetime.datetime.now)
name = StringField(required=True)
class EmbeddedFavorites(EmbeddedDocument, CritsDocumentFormatter):
"""
Embedded Favorites
"""
Actor = ListField(StringField())
Backdoor = ListField(StringField())
Campaign = ListField(StringField())
Certificate = ListField(StringField())
Domain = ListField(StringField())
Email = ListField(StringField())
Event = ListField(StringField())
Exploit = ListField(StringField())
IP = ListField(StringField())
Indicator = ListField(StringField())
PCAP = ListField(StringField())
RawData = ListField(StringField())
Sample = ListField(StringField())
Screenshot = ListField(StringField())
Target = ListField(StringField())
class EmbeddedSubscriptions(EmbeddedDocument, CritsDocumentFormatter):
"""
Embedded Subscriptions
"""
Actor = ListField(EmbeddedDocumentField(EmbeddedSubscription))
Backdoor = ListField(EmbeddedDocumentField(EmbeddedSubscription))
Campaign = ListField(EmbeddedDocumentField(EmbeddedSubscription))
Certificate = ListField(EmbeddedDocumentField(EmbeddedSubscription))
Domain = ListField(EmbeddedDocumentField(EmbeddedSubscription))
Email = ListField(EmbeddedDocumentField(EmbeddedSubscription))
Event = ListField(EmbeddedDocumentField(EmbeddedSubscription))
Exploit = ListField(EmbeddedDocumentField(EmbeddedSubscription))
IP = ListField(EmbeddedDocumentField(EmbeddedSubscription))
Indicator = ListField(EmbeddedDocumentField(EmbeddedSubscription))
PCAP = ListField(EmbeddedDocumentField(EmbeddedSubscription))
RawData = ListField(EmbeddedDocumentField(EmbeddedSubscription))
Sample = ListField(EmbeddedDocumentField(EmbeddedSubscription))
Source = ListField(EmbeddedDocumentField(EmbeddedSourceSubscription))
Target = ListField(EmbeddedDocumentField(EmbeddedSubscription))
class PreferencesField(DynamicEmbeddedDocument):
"""
Embedded User Preferences
"""
notify = DictField(required=True, default=
{"email": False}
)
plugins = DictField(required=False, default={})
ui = DictField(required=True, default=
{"theme": "default",
"table_page_size": 25
}
)
nav = DictField(required=True, default={"nav_menu": "default",
"text_color": "#FFF",
"background_color": "#464646",
"hover_text_color": "#39F",
"hover_background_color": "#6F6F6F"})
toast_notifications = DictField(required=True, default={"enabled": True,
"acknowledgement_type": "sticky",
"initial_notifications_display": "show",
"newer_notifications_location": "top"})
class EmbeddedPasswordReset(EmbeddedDocument, CritsDocumentFormatter):
"""
Embedded Password Reset
"""
reset_code = StringField(required=True, default="")
attempts = IntField(default=0)
date = DateTimeField(default=datetime.datetime.now)
class EmbeddedLoginAttempt(EmbeddedDocument, CritsDocumentFormatter):
"""
Embedded Login Attempt
"""
success = BooleanField(required=True)
user_agent = StringField(required=True)
remote_addr = StringField(required=True)
accept_language = StringField(required=True)
date = DateTimeField(default=datetime.datetime.now)
class EmbeddedAPIKey(EmbeddedDocument, CritsDocumentFormatter):
"""
Embedded API Key
"""
name = StringField(required=True)
api_key = StringField(required=True)
date = DateTimeField(default=datetime.datetime.now)
default = BooleanField(default=False)
class CRITsUser(CritsDocument, CritsSchemaDocument, Document):
"""
CRITs User object
"""
meta = {
"collection": settings.COL_USERS,
'indexes': [
{'fields': ['username'],
'unique': True,
'sparse': True,
},
],
"crits_type": 'User',
"latest_schema_version": 3,
"schema_doc": {
'username': 'The username of this analyst',
'organization': 'The name of the organization this user is from',
'role': 'The role this user has been granted from a CRITs Admin',
'sources': ('List [] of source names this user has been granted'
' access to view data from'),
'subscriptions': {
'Campaign': [
{
'date': 'ISODate subscribed',
'id': 'ObjectId of the object subscribed to'
}
],
'Domain': [
{
'date': 'ISODate subscribed',
'id': 'ObjectId of the object subscribed to'
}
],
'Email': [
{
'date': 'ISODate subscribed',
'id': 'ObjectId of the object subscribed to'
}
],
'Target': [
{
'date': 'ISODate subscribed',
'id': 'ObjectId of the object subscribed to'
}
],
'Event': [
{
'date': 'ISODate subscribed',
'id': 'ObjectId of the object subscribed to'
}
],
'IP': [
{
'date': 'ISODate subscribed',
'id': 'ObjectId of the object subscribed to'
}
],
'Indicator': [
{
'date': 'ISODate subscribed',
'id': 'ObjectId of the object subscribed to'
}
],
'PCAP': [
{
'date': 'ISODate subscribed',
'id': 'ObjectId of the object subscribed to'
}
],
'Sample': [
{
'date': 'ISODate subscribed',
'id': 'ObjectId of the object subscribed to'
}
],
'Source': [
{
'date': 'ISODate subscribed',
'name': 'Name of the source subscribed to'
}
],
},
'favorites': {
'Actor': [],
'Backdoor': [],
'Campaign': [],
'Domain': [],
'Email': [],
'Target': [],
'Event': [],
'Exploit': [],
'IP': [],
'Indicator': [],
'PCAP': [],
'Sample': [],
}
},
}
username = StringField(max_length=30, required=True,
verbose_name=_('username'),
help_text=_("Required. 30 characters or fewer. Letters, numbers and @/./+/-/_ characters"))
first_name = StringField(max_length=30,
verbose_name=_('first name'))
last_name = StringField(max_length=30,
verbose_name=_('last name'))
email = EmailField(verbose_name=_('e-mail address'))
password = StringField(max_length=128,
verbose_name=_('password'),
help_text=_("Use '[algo]$[iterations]$[salt]$[hexdigest]' or use the <a href=\"password/\">change password form</a>."))
secret = StringField(verbose_name=_('TOTP Secret'))
is_staff = BooleanField(default=False,
verbose_name=_('staff status'),
help_text=_("Designates whether the user can log into this admin site."))
is_active = BooleanField(default=True,
verbose_name=_('active'),
help_text=_("Designates whether this user should be treated as active. Unselect this instead of deleting accounts."))
is_superuser = BooleanField(default=False,
verbose_name=_('superuser status'),
help_text=_("Designates that this user has all permissions without explicitly assigning them."))
last_login = DateTimeField(default=datetime_now,
verbose_name=_('last login'))
date_joined = DateTimeField(default=datetime_now,
verbose_name=_('date joined'))
invalid_login_attempts = IntField(default=0)
login_attempts = ListField(EmbeddedDocumentField(EmbeddedLoginAttempt))
organization = StringField(default=settings.COMPANY_NAME)
password_reset = EmbeddedDocumentField(EmbeddedPasswordReset, default=EmbeddedPasswordReset())
role = StringField(default="Analyst")
sources = ListField(StringField())
subscriptions = EmbeddedDocumentField(EmbeddedSubscriptions, default=EmbeddedSubscriptions())
favorites = EmbeddedDocumentField(EmbeddedFavorites, default=EmbeddedFavorites())
prefs = EmbeddedDocumentField(PreferencesField, default=PreferencesField())
totp = BooleanField(default=False)
secret = StringField(default="")
api_keys = ListField(EmbeddedDocumentField(EmbeddedAPIKey))
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['email']
defaultDashboard = ObjectIdField(required=False, default=None)
def migrate(self):
"""
Migrate to latest schema version.
"""
migrate_user(self)
def __setattr__(self, name, value):
"""
Overrides our core __setattr__ because we have to allow for extra
authentication attributes that would normally get moved to
unsupported_attrs.
"""
if (not self._dynamic and hasattr(self, 'unsupported_attrs')
and not name in self._fields and not name.startswith('_')
and not name.startswith('$') and not '.' in name
and name not in ('backend')):
if not self.unsupported_attrs:
self.unsupported_attrs = UnsupportedAttrs()
self.unsupported_attrs.__setattr__(name, value)
else:
super(CritsDocument, self).__setattr__(name, value)
@property
def pk(self):
"""
Return the ObjectId as the primary key.
"""
return self.id
def __str__(self):
"""
This is so request.user returns the username like Django expects,
not the whole object.
"""
if self.username:
return self.username
# the rest of this taken from the MongoEngine User class.
def __unicode__(self):
"""
This is so request.user returns the username like Django expects,
not the whole object.
"""
return self.username
def get_full_name(self):
"""
Returns the users first and last names, separated by a space.
"""
full_name = u'%s %s' % (self.first_name or '', self.last_name or '')
return full_name.strip()
def is_anonymous(self):
"""
We do not allow anonymous users.
"""
return False
def is_authenticated(self):
"""
If we know about the user from the request, it means they've
authenticated.
"""
return True
def mark_active(self, analyst=None):
"""
Mark the user as active.
"""
self.is_active = True
self.save(username=analyst)
return self
def mark_inactive(self, analyst=None):
"""
Deactivate the user.
"""
self.is_active = False
self.save(username=analyst)
return self
def is_password_complex(self, password):
"""
Based on the CRITsConfig, is the password provided complex enough to be
used?
:param password: The password to check for complexity.
:type password: str
:returns: True, False
"""
crits_config = CRITsConfig.objects().first()
if crits_config:
pw_regex = crits_config.password_complexity_regex
else:
pw_regex = settings.PASSWORD_COMPLEXITY_REGEX
complex_regex = re.compile(pw_regex)
if complex_regex.match(password):
return True
return False
def set_password(self, raw_password, analyst=None):
"""
Sets the user's password - always use this rather than directly
assigning to :attr:`~mongoengine.django.auth.User.password` as the
password is hashed before storage.
:param raw_password: The password to hash and store.
:type raw_password: str
:returns: self, False
"""
if self.is_password_complex(raw_password):
self.password = make_password(raw_password)
self.save(username=analyst)
return self
else:
return False
def set_reset_code(self, analyst):
"""
Sets a reset code on the account for password reset validation.
:returns: str
"""
e = EmbeddedPasswordReset()
char_set = string.ascii_uppercase + string.digits
e.reset_code = ''.join(random.sample(char_set*6,6))
e.date = datetime.datetime.now()
self.password_reset = e
self.save(username=analyst)
return e.reset_code
def reset_password(self, rcode, new_p, new_p_c, analyst):
"""
Reset the user's password. Validate the reset code, ensure the two
passwords are identical, and then set.
:param rcode: Reset Code to validate.
:type rcode: str
:param new_p: New password.
:type new_p: str
:param new_p_c: New password confirmation.
:type new_p_c: str
:param analyst: The user.
:type analyst: str
:returns: dict with keys "success" (boolean) and "message" (str).
"""
if self.validate_reset_code(rcode, analyst)['success']:
if new_p == new_p_c:
self.password_reset.reset_code = ""
if self.set_password(new_p):
return {'success': True, 'message': 'Password reset.'}
else:
crits_config = CRITsConfig.objects().first()
if crits_config:
pw_desc = crits_config.password_complexity_desc
else:
pw_desc = settings.PASSWORD_COMPLEXITY_DESC
message = 'Password not complex enough: %s' % pw_desc
return {'success': False, 'message': message}
else:
return {'success': False, 'message': 'Passwords do not match.'}
else:
self.password_reset.reset_code = ""
self.save(username=analyst)
return {'success': False, 'message': 'Reset Code Expired.'}
def validate_reset_code(self, reset_code, analyst):
"""
Validate the reset code. Also ensure that the reset code hasn't expired
already since it is a limited-time use reset.
:param reset_code: The reset code.
:type reset_code: str
:param analyst: The user.
:type analyst: str
:returns: dict with keys "success" (boolean) and "message" (str).
"""
my_reset = self.password_reset.reset_code
if len(reset_code) == 6 and len(my_reset) == 6 and my_reset == reset_code:
date = datetime.datetime.now()
diff = date - self.password_reset.date
window = divmod(diff.days * 86400 + diff.seconds, 60)
if window[0] < 5:
self.password_reset.attempts = 0
self.save(username=analyst)
return {'success': True, 'message': 'Reset Code Validated.'}
else:
self.password_reset.attempts += 1
self.password_reset.reset_code = ""
self.save(username=analyst)
return {'success': False, 'message': 'Reset Code Expired.'}
self.password_reset.attempts += 1
if self.password_reset.attempts > 2:
self.password_reset.date = self.password_reset.date + datetime.timedelta(minutes=-5)
self.save(username=analyst)
return {'success': False, 'message': 'Reset Code Expired.'}
self.save(username=analyst)
return {'success': False, 'message': 'Reset Code Invalid.'}
def check_password(self, raw_password):
"""
Checks the user's password against a provided password - always use
this rather than directly comparing to
:attr:`~mongoengine.django.auth.User.password` as the password is
hashed before storage.
"""
return check_password(raw_password, self.password)
def create_api_key(self, name, analyst, default=False):
"""
Generate an API key for the user. It will require a name as we allow for
unlimited API keys and users need a way to reference them.
:param name: The name for the API key.
:type name: str
:param analyst: The user.
:type analyst: str
:param default: Use as default API key.
:type default: boolean
:returns: dict with keys "success" (boolean) and "message" (str).
"""
if not name:
return {'success': False, 'message': 'Need a name'}
new_uuid = uuid.uuid4()
key = hmac.new(new_uuid.bytes, digestmod=sha1).hexdigest()
ea = EmbeddedAPIKey(name=name, api_key=key, default=default)
if len(self.api_keys) < 1:
ea.default = True
self.api_keys.append(ea)
self.save(username=analyst)
return {'success': True, 'message': {'name': name,
'key': key,
'date': str(ea.date)}}
def default_api_key(self, name, analyst):
"""
Make an API key the default key for a user. The default key is used for
situations where the user is not or cannot be asked which API key to
use.
:param name: The name of the API key.
:type name: str
:param analyst: The user.
:type analyst: str
:returns: dict with keys "success" (boolean) and "message" (str).
"""
c = 0
for key in self.api_keys:
if key.name == name:
self.api_keys[c].default = True
else:
self.api_keys[c].default = False
c += 1
self.save(username=analyst)
return {'success': True}
def revoke_api_key(self, name, analyst):
"""
Revoke an API key so it can no longer be used.
:param name: The name of the API key.
:type name: str
:param analyst: The user.
:type analyst: str
:returns: dict with keys "success" (boolean) and "message" (str).
"""
keys = self.api_keys
keyslen = len(keys)
self.api_keys = [k for k in keys if k.name != name]
if keyslen > len(self.api_keys):
self.save(username=analyst)
return {'success': True}
else:
return {'success': False, 'message': 'Key not found.'}
def get_api_key(self, name):
"""
Get the API key.
:param name: The name of the API key.
:type name: str
:returns: str, None
"""
for key in self.api_keys:
if key.name == name:
return key.api_key
return None
def validate_api_key(self, key):
"""
Validate that the API key exists for this user.
:param key: The API key.
:type key: str
:returns: True, False
"""
for keys in self.api_keys:
if keys.api_key == key:
return True
return False
@classmethod
def create_user(cls, username, password, email=None, analyst=None):
"""
Create (and save) a new user with the given username, password and
email address.
"""
now = datetime_now()
# Normalize the address by lowercasing the domain part of the email
# address.
if email is not None:
try:
email_name, domain_part = email.strip().split('@', 1)
except ValueError:
pass
else:
email = '@'.join([email_name, domain_part.lower()])
user = cls(username=username, email=email, date_joined=now)
user.create_api_key("default", analyst, default=True)
if password and user.set_password(password):
user.save(username=analyst)
return user
elif CRITsConfig.remote_user:
user.save(username="CRITS_REMOTE_USER")
return user
else:
return None
def get_group_permissions(self, obj=None):
"""
Returns a list of permission strings that this user has through his/her
groups. This method queries all available auth backends. If an object
is passed in, only permissions matching this object are returned.
"""
permissions = set()
for backend in auth.get_backends():
if hasattr(backend, "get_group_permissions"):
permissions.update(backend.get_group_permissions(self, obj))
return permissions
def get_all_permissions(self, obj=None):
return _user_get_all_permissions(self, obj)
def has_perm(self, perm, obj=None):
"""
Returns True if the user has the specified permission. This method
queries all available auth backends, but returns immediately if any
backend returns True. Thus, a user who has permission from a single
auth backend is assumed to have permission in general. If an object is
provided, permissions for this specific object are checked.
"""
# Active superusers have all permissions.
if self.is_active and self.is_superuser:
return True
# Otherwise we need to check the backends.
return _user_has_perm(self, perm, obj)
def has_module_perms(self, app_label):
"""
Returns True if the user has any permissions in the given app label.
Uses pretty much the same logic as has_perm, above.
"""
# Active superusers have all permissions.
if self.is_active and self.is_superuser:
return True
return _user_has_module_perms(self, app_label)
def email_user(self, subject, message, from_email=None):
"""
Sends an e-mail to this User.
"""
from django.core.mail import send_mail
if not from_email:
crits_config = CRITsConfig.objects().first()
if crits_config:
from_email = crits_config.crits_email
send_mail(subject, message, from_email, [self.email])
def get_username(self):
return self.username
def get_profile(self):
"""
Returns site-specific profile for this user. Raises
SiteProfileNotAvailable if this site does not allow profiles.
"""
if not hasattr(self, '_profile_cache'):
from django.conf import settings
if not getattr(settings, 'AUTH_PROFILE_MODULE', False):
raise SiteProfileNotAvailable('You need to set AUTH_PROFILE_MO'
'DULE in your project settings')
try:
app_label, model_name = settings.AUTH_PROFILE_MODULE.split('.')
except ValueError:
raise SiteProfileNotAvailable('app_label and model_name should'
' be separated by a dot in the AUTH_PROFILE_MODULE set'
'ting')
try:
model = models.get_model(app_label, model_name)
if model is None:
raise SiteProfileNotAvailable('Unable to load the profile '
'model, check AUTH_PROFILE_MODULE in your project sett'
'ings')
self._profile_cache = model._default_manager.using(self._state.db).get(user__id__exact=self.id)
self._profile_cache.user = self
except (ImportError, ImproperlyConfigured):
raise SiteProfileNotAvailable
return self._profile_cache
def get_preference(self, section, setting, default=None):
"""
Get a user preference setting out of the deep dynamic dictionary
'section' is the preferences 'section' e.g. 'ui'
'setting' is the dot separated preference setting e.g. 'foo.bar.enabled'
:param section: A specific section of preferences you want.
:type section: str
:param setting: The setting you want to get.
:type setting: str
:returns: None, str, dict
"""
if not section in self.prefs:
return default
# Split the preference option into subtrees on '.'
otree = setting.split(".")
param = otree.pop()
opt = self.prefs[section]
if len(otree):
for subsect in otree:
if subsect in opt:
opt = opt[subsect]
else:
return default
if not param in opt:
return default
return opt[param]
def update_from_ldap(self, analyst, config=None, passw=''):
"""
Set First Name, Last Name, and Email from LDAP if we can get the data.
"""
info = self.info_from_ldap(config, passw)
if info['result'] == "OK":
self.first_name = info['first_name']
self.last_name = info['last_name']
self.email = info['email']
self.save(username=analyst)
def info_from_ldap(self, config=None, password=''):
"""
Get information about this user from LDAP.
"""
import ldap, ldapurl
resp = {"result": "ERROR"}
if not config:
config = CRITsConfig.objects().first()
# Make sure we have the rquired settings, else return failure
if not config.ldap_server or not config.ldap_userdn:
return resp
ldap_server = config.ldap_server.split(':')
scheme = "ldap"
if config.ldap_tls:
scheme = "ldaps"
url = ldapurl.LDAPUrl('%s://%s' % (scheme, ldap_server[0]))
if len(ldap_server) == 2:
l = ldap.initialize('%s:%s' % (url.unparse(),
ldap_server[1]))
else:
l = ldap.initialize(url.unparse())
l.protocol_version = 3
l.set_option(ldap.OPT_REFERRALS, 0)
l.set_option(ldap.OPT_TIMEOUT, 10)
# setup auth for custom cn's
cn = "cn="
if config.ldap_usercn:
cn = config.ldap_usercn
# setup auth for custom cn's
if len(config.ldap_usercn) > 0:
un = "%s%s,%s" % (config.ldap_usercn,
self.username,
config.ldap_userdn)
elif "@" in config.ldap_userdn:
un = "%s%s" % (self.username, config.ldap_userdn)
else:
un = self.username
try:
# Try auth bind first
l.simple_bind_s(un, password)
logger.info("Bound to LDAP for: %s" % self.username)
except Exception, e:
logger.error("Error binding to LDAP for: %s" % self.username)
logger.error("ERR: %s" % e)
try:
uatr = None
uatr = l.search_s(config.ldap_userdn,
ldap.SCOPE_SUBTREE,
"(%s%s)" % (cn, self.username)
)[0][1]
resp['first_name'] = uatr['givenName'][0]
resp['last_name'] = uatr['sn'][0]
resp['email'] = uatr['mail'][0]
resp['result'] = "OK"
logger.info("Retrieved LDAP info for: %s" % self.username)
except Exception, e:
logger.error("Error retrieving LDAP info for: %s" % self.username)
logger.error("ERR: %s" % e)
l.unbind()
return resp
def getDashboards(self):
from crits.dashboards.handlers import getDashboardsForUser
return getDashboardsForUser(self)
# stolen from MongoEngine and modified to use the CRITsUser class.
class CRITsAuthBackend(object):
"""
Authenticate using MongoEngine and crits.core.user.CRITsUser.
"""
supports_object_permissions = False
supports_anonymous_user = False
supports_inactive_user = False
def authenticate(self, username=None, password=None, user_agent=None,
remote_addr=None, accept_language=None,
totp_enabled='Disabled'):
"""
Perform the authentication of the user.
:param username: The user to authenticate.
:type username: str
:param password: The password provided to authenticate with.
:type password: str
:param user_agent: The user-agent in the request.
:type user_agent: str
:param remote_addr: The hostname/ip in the request.
:type remote_addr: str
:param accept_language: The Accept Language in the request.
:type accept_language: str
:param totp_enabled: If TOTP is enabled and should be checked as well.
:type totp_enabled: str
:returns: :class:`crits.core.user.CRITsUser`, None
"""
# Need username and password for logins, checkem both
if not all([username, password]):
return None
e = EmbeddedLoginAttempt()
e.user_agent = user_agent
e.remote_addr = remote_addr
e.accept_language = accept_language
fusername = username
if '\\' in username:
username = username.split("\\")[1]
user = CRITsUser.objects(username=username).first()
if user:
# If the user needs TOTP and it is not disabled system-wide, and
# the user has exceeded the login threshold for this time period
# don't go any further. Track the invalid login and return.
if (((user.totp and totp_enabled == 'Optional') or
totp_enabled == 'Required') and
self._exceeded_login_threshold(user)):
e.success = False
self.track_login_attempt(user, e)
user.reload()
return None
config = CRITsConfig.objects().first()
if not config:
return None
if config.ldap_auth:
import ldap, ldapurl
try:
# If you are using Oracle's server that's based on
# Netscape's code, and your users can't login after
# password expiration warning kicks in, you need:
# python-ldap 2.4.15 installed and
# import ldap.controls.pwdpolicy to fix it
#
import ldap.controls.pwdpolicy
except ImportError:
logger.info("ldap.controls.pwdpolicy not present.")
try:
# don't parse the port if there is one
ldap_server = config.ldap_server.split(':')
scheme = "ldap"
if config.ldap_tls:
scheme = "ldaps"
url = ldapurl.LDAPUrl('%s://%s' % (scheme, ldap_server[0]))
if len(ldap_server) == 2:
l = ldap.initialize('%s:%s' % (url.unparse(),
ldap_server[1]))
else:
l = ldap.initialize(url.unparse())
l.protocol_version = 3
l.set_option(ldap.OPT_REFERRALS, 0)
l.set_option(ldap.OPT_TIMEOUT, 10)
# setup auth for custom cn's
if len(config.ldap_usercn) > 0:
un = "%s%s,%s" % (config.ldap_usercn,
fusername,
config.ldap_userdn)
elif "@" in config.ldap_userdn:
un = "%s%s" % (fusername, config.ldap_userdn)
else:
un = fusername
logger.info("Logging in user: %s" % un)
l.simple_bind_s(un, password)
user = self._successful_settings(user, e, totp_enabled)
if config.ldap_update_on_login:
user.update_from_ldap("Auto LDAP update", config, password)
l.unbind()
return user
except ldap.INVALID_CREDENTIALS:
l.unbind()
logger.info("Invalid LDAP credentials for: %s" % un)
except Exception, err:
logger.info("LDAP Auth error: %s" % err)
# If LDAP auth fails, attempt normal CRITs auth.
# This will help with being able to use local admin accounts when
# you have LDAP auth enabled.
if password and user.check_password(password):
self._successful_settings(user, e, totp_enabled)
if config.ldap_update_on_login:
user.update_from_ldap("Auto LDAP update", config)
return user
else:
e.success = False
user.invalid_login_attempts += 1
if user.is_active and user.invalid_login_attempts > settings.INVALID_LOGIN_ATTEMPTS:
user.is_active = False
logger.info("Account disabled due to too many invalid login attempts: %s" %
user.username)
if config.crits_email_end_tag:
subject = "CRITs Account Lockout" + config.crits_email_subject_tag
else:
subject = config.crits_email_subject_tag + "CRITs Account Lockout"
body = """
You are receiving this email because your CRITs account has been locked out due to
too many invalid login attempts. If you did not perform this action,
someone may be attempting to access your account.
Please contact a site administrator to resolve.
"""
try:
user.email_user(subject, body)
except Exception, err:
logger.warning("Error sending email: %s" % str(err))
self.track_login_attempt(user, e)
user.reload()
return None
def track_login_attempt(self, user, login_attempt):
"""
Track this login attempt.
"""
# only track the last 50 login attempts
if len(user.login_attempts) > 49:
user.login_attempts = user.login_attempts[-49:]
user.login_attempts.append(login_attempt)
user.save()
def get_user(self, user_id):
"""
Get a user with the specified user_id.
"""
return CRITsUser.objects.with_id(user_id)
def _exceeded_login_threshold(self, user, interval=10):
"""
Throttle login attempts for this user so they can't be locked out by a
brute force attempt. Requires that the user wait 10 seconds before
another attempt will be attempted.
"""
# If the user was just created, they may not have an attempt logged
if not user.login_attempts:
return False
# If last login attempt was success, don't bother checking.
if user.login_attempts[-1].success:
return False
ct = time.time()
try:
lt = time.mktime(user.login_attempts[-1]['date'].timetuple())
except:
lt = 0
if ct - lt < 10:
logger.info("Multiple login attempts detected exceeding "
"threshold of 10 seconds for user %s" % user.username)
return True
return False
def _successful_settings(self, user, e, totp_enabled):
"""
Adjust the user document and the request after a successful login.
"""
# If login requires TOTP, don't log this as a success yet
if ((user.totp and totp_enabled == 'Optional') or
totp_enabled == 'Required'):
return user
e.success = True
# only track the last 50 login attempts
if len(user.login_attempts) > 49:
user.login_attempts = user.login_attempts[-49:]
user.login_attempts.append(e)
user.save()
backend = auth.get_backends()[0]
user.backend = "%s.%s" % (backend.__module__, backend.__class__.__name__)
return user
class CRITsRemoteUserBackend(CRITsAuthBackend):
"""
Handle CRITs users when dealing with REMOTE_USER
"""
def authenticate(self, username, password=None, user_agent=None,
remote_addr=None, accept_language=None,
totp_enabled='Disabled'):
"""
Perform the authentication of the user.
:param username: The user to authenticate.
:type username: str
:param password: The password provided to authenticate with.
:type password: str
:param user_agent: The user-agent in the request.
:type user_agent: str
:param remote_addr: The hostname/ip in the request.
:type remote_addr: str
:param accept_language: The Accept Language in the request.
:type accept_language: str
:param totp_enabled: If TOTP is enabled and should be checked as well.
:type totp_enabled: str
:returns: :class:`crits.core.user.CRITsUser`, None
"""
e = EmbeddedLoginAttempt()
e.user_agent = user_agent
e.remote_addr = remote_addr
e.accept_language = accept_language
if not username:
logger.warn("No username passed to CRITsRemoteUserBackend (auth)")
return None
config = CRITsConfig.objects().first()
user = None
username = self.clean_username(username)
user = CRITsUser.objects(username=username).first()
if user and user.is_active:
if self._exceeded_login_threshold(user):
return None
# Log in user
self._successful_settings(user, e, totp_enabled)
if config.ldap_update_on_login:
user.update_from_ldap("Auto LDAP update", config)
return user
elif not user and config.create_unknown_user:
# Create the user
user = CRITsUser.create_user(username=username, password=None)
user.sources.append(config.company_name)
# Attempt to update info from LDAP
user.update_from_ldap("Auto LDAP update", config)
user = self._successful_settings(user, e, totp_enabled)
return user
else:
logger.warn("Unknown user and not creating accounts.")
return None
def clean_username(self, username):
"""
Clean the username.
"""
return username
def configure_user(self, user):
"""
Return the user.
"""
return user
|
|
# coding: utf-8
# In[1]:
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as dts
import pandas as pd
from fredpy import series, window_equalize, quickplot
import datetime,dateutil,urllib,runProcs
import requests
# get_ipython().magic('matplotlib inline')
# In[2]:
# 1. Import the most recent inflation forecast data from the Philadelphia Fed, Survey of Professional Forecasters
url = "https://www.philadelphiafed.org/-/media/research-and-data/real-time-center/survey-of-professional-forecasters/historical-data/inflation.xls?la=en"
r = requests.get(url,verify=False)
with open("inflationForecasts.xls", "wb") as code:
code.write(r.content)
# dls = "http://www.philadelphiafed.org/research-and-data/real-time-center/survey-of-professional-forecasters/historical-data/inflation.xls"
# urllib.urlretrieve(dls, "inflationForecasts.xls")
# In[3]:
# 2. Download and manage data from FRED
gdpDeflatorQ=series('GDPDEF')
gdpDeflatorA=series('A191RD3A086NBEA')
gdpDeflatorQ.apc(method='forward')
gdpDeflatorA.apc(method='forward')
gdpDeflatorQ.window(['07-01-1970','01-01-2200'])
gdpDeflatorA.window(['07-01-1970','01-01-2200'])
interestQ = series('GS1')
interestA = series('GS1')
interestQ.monthtoquarter(method='average')
interestA.monthtoannual(method='average')
interestQ.window(['07-01-1970','01-01-2200'])
interestA.window(['07-01-1970','01-01-2200'])
# In[4]:
# 3. Create forecast series as FRED objects
# 3.1 import the inflation forecasts from Excel file and fill in missing value for 1974:Q3
inflationForecasts = pd.read_excel('inflationForecasts.xls')
inflationForecasts['INFPGDP1YR']=inflationForecasts['INFPGDP1YR'].interpolate()
# 3.2 initialize some FRED objects
gdpDeflatorForecastQ=series('GDPDEF')
gdpDeflatorForecastA=series('GDPDEF')
# 3.3 Associate forecasts with dates. The date should coincide with the start of the period for which the forecast applies.
dates = []
for i,ind in enumerate(inflationForecasts.index):
year =int(inflationForecasts.iloc[i]['YEAR'])
quart=int(inflationForecasts.iloc[i]['QUARTER'])
if quart == 1:
month = '04'
elif quart == 2:
month = '07'
elif quart == 3:
month = '10'
else:
month = '01'
year=year+1
date = month+'-01-'+str(year)
dates.append(date)
dateNumbers = [dateutil.parser.parse(s) for s in dates]
# 3.4 Create the FRED objects
gdpDeflatorForecastQ.data = inflationForecasts['INFPGDP1YR'].values
gdpDeflatorForecastQ.dates = dates
gdpDeflatorForecastQ.datenumbers = dateNumbers
gdpDeflatorForecastA.data = inflationForecasts['INFPGDP1YR'].values.tolist()
gdpDeflatorForecastA.dates = dates
gdpDeflatorForecastA.datenumbers = dateNumbers
gdpDeflatorForecastA.quartertoannual(method='average')
# In[5]:
# 3.5 Create data frames with forecast inflation, actual inflation, and the 1-year bond rate
window_equalize([gdpDeflatorQ,gdpDeflatorForecastQ,interestQ])
window_equalize([gdpDeflatorA,gdpDeflatorForecastA,interestA])
inflationForecastQDf=pd.DataFrame({'1-year inflation forecast':gdpDeflatorForecastQ.data,'1-year actual inflation':gdpDeflatorQ.data,'1-year nominal interest rate':interestQ.data},index = interestQ.dates)
inflationForecastADf=pd.DataFrame({'1-year inflation forecast':gdpDeflatorForecastA.data,'1-year actual inflation':gdpDeflatorA.data,'1-year nominal interest rate':interestA.data},index = interestA.dates)
# In[6]:
# 3.6 Save data to csv
inflationForecastQDf.to_csv('inflationForecastsQ.csv',index=True,index_label='date')
inflationForecastADf.to_csv('inflationForecastsA.csv',index=True,index_label='date')
# In[7]:
# 4. Plot some things
# 4.1 actual inflation, expected inflation, 1-year interest rate: quarterly
fig=plt.figure()
ax=fig.add_subplot(1,1,1)
ax.plot_date(gdpDeflatorQ.datenumbers,gdpDeflatorQ.data,'b-',lw=3)
ax.plot_date(gdpDeflatorForecastQ.datenumbers,gdpDeflatorForecastQ.data,'r--',lw=3)
ax.plot_date(interestQ.datenumbers,interestQ.data,'m-.',lw=3)
ax.set_title('Quarterly')
ax.set_xlabel('Date')
ax.set_ylabel('%')
ax.legend(['actual $\pi$','forecast $\pi$','interest'],loc='upper right')
# interestQ.recessions()
plt.grid()
# In[8]:
# 4.2 actual inflation, expected inflation, 1-year interest rate: annual
fig=plt.figure()
ax=fig.add_subplot(1,1,1)
ax.plot_date(gdpDeflatorA.datenumbers,gdpDeflatorA.data,'b-o',lw=3)
ax.plot_date(gdpDeflatorForecastA.datenumbers,gdpDeflatorForecastA.data,'r--o',lw=3)
ax.plot_date(interestA.datenumbers,interestA.data,'m-.o',lw=3)
ax.set_title('Annual')
ax.set_xlabel('Date')
ax.set_ylabel('%')
ax.legend(['actual $\pi$','forecast $\pi$','interest'],loc='upper right')
# interestA.recessions()
plt.grid()
# In[9]:
# 5. Real interest rates
# 5.1 Construct real interest rate series: ex ante and ex post
realExAnteA = np.array(interestA.data) - np.array(gdpDeflatorForecastA.data)
realExPostA = np.array(interestA.data) - np.array(gdpDeflatorA.data)
realExAnteQ = np.array(interestQ.data) - np.array(gdpDeflatorForecastQ.data)
realExPostQ = np.array(interestQ.data) - np.array(gdpDeflatorQ.data)
# In[10]:
# 5.2 ex ante and ex post real interest rates: annual
fig=plt.figure()
ax=fig.add_subplot(1,1,1)
ax.plot_date(interestA.datenumbers,realExAnteA,'b-o',lw=3)
ax.plot_date(interestA.datenumbers,realExPostA,'r--o',lw=3)
ax.set_title('Annual real interest rate')
ax.set_xlabel('Date')
ax.set_ylabel('%')
ax.legend(['ex ante','ex post'],loc='upper right')
# interestA.recessions()
plt.grid()
# In[11]:
# 5.2 ex ante and ex post real interest rates: quarterly
fig=plt.figure()
ax=fig.add_subplot(1,1,1)
ax.plot_date(interestQ.datenumbers,realExAnteQ,'b-',lw=3)
ax.plot_date(interestQ.datenumbers,realExPostQ,'r--',lw=3)
ax.set_title('Quarterly real interest rate')
ax.set_xlabel('Date')
ax.set_ylabel('%')
ax.legend(['ex ante','ex post'],loc='upper right')
# interestQ.recessions()
plt.grid()
# In[12]:
# # 6. Consumption Euler equation
# 6.1 create the consumption series
cons=series('PCECA')
defl=series('A191RD3A086NBEA')
window_equalize([cons,defl])
cons.pc(method='backward')
window_equalize([interestA,cons])
# In[13]:
# 6.2 Predicted real interest rate: sigma = 1
sigma = 1
beta = .98
gc=np.mean(cons.data)
rPredA = sigma*np.array(cons.data - np.mean(cons.data)) - 100*np.log(beta)
print(gc)
# In[14]:
# 6.3 Plot the predicted real interest rate
fig=plt.figure()
ax=fig.add_subplot(1,1,1)
ax.plot_date(interestA.datenumbers,realExAnteA,'b-',lw=3)
ax.plot_date(interestA.datenumbers,rPredA,'r--',lw=3)
ax.set_title('Annual ex ante real interest rate')
ax.set_xlabel('Date')
ax.set_ylabel('%')
ax.legend(['actual','predicted'],loc='upper right')
# interestA.recessions()
plt.grid()
# In[15]:
np.corrcoef(cons.data, realExAnteA)
# In[16]:
# 7. Export to notebook to .py
runProcs.exportNb('consumptionEuler')
|
|
import random
from django.contrib.auth.models import AnonymousUser, Group, User
from django.db import connection
from django.test import RequestFactory
from django.test.utils import override_settings
import mock
import waffle
from test_app import views
from waffle.middleware import WaffleMiddleware
from waffle.models import Flag, Sample, Switch
from waffle.tests.base import TestCase
def get(**kw):
request = RequestFactory().get('/foo', data=kw)
request.user = AnonymousUser()
return request
def process_request(request, view):
response = view(request)
return WaffleMiddleware().process_response(request, response)
class WaffleTests(TestCase):
def test_persist_active_flag(self):
Flag.objects.create(name='myflag', percent='0.1')
request = get()
# Flag stays on.
request.COOKIES['dwf_myflag'] = 'True'
response = process_request(request, views.flag_in_view)
self.assertEqual(b'on', response.content)
assert 'dwf_myflag' in response.cookies
self.assertEqual('True', response.cookies['dwf_myflag'].value)
def test_persist_inactive_flag(self):
Flag.objects.create(name='myflag', percent='99.9')
request = get()
# Flag stays off.
request.COOKIES['dwf_myflag'] = 'False'
response = process_request(request, views.flag_in_view)
self.assertEqual(b'off', response.content)
assert 'dwf_myflag' in response.cookies
self.assertEqual('False', response.cookies['dwf_myflag'].value)
def test_no_set_unused_flag(self):
"""An unused flag shouldn't have its cookie reset."""
request = get()
request.COOKIES['dwf_unused'] = 'True'
response = process_request(request, views.flag_in_view)
assert 'dwf_unused' not in response.cookies
def test_superuser(self):
"""Test the superuser switch."""
Flag.objects.create(name='myflag', superusers=True)
request = get()
response = process_request(request, views.flag_in_view)
self.assertEqual(b'off', response.content)
assert 'dwf_myflag' not in response.cookies
superuser = User(username='foo', is_superuser=True)
request.user = superuser
response = process_request(request, views.flag_in_view)
self.assertEqual(b'on', response.content)
assert 'dwf_myflag' not in response.cookies
non_superuser = User(username='bar', is_superuser=False)
non_superuser.save()
request.user = non_superuser
response = process_request(request, views.flag_in_view)
self.assertEqual(b'off', response.content)
assert 'dwf_myflag' not in response.cookies
def test_staff(self):
"""Test the staff switch."""
Flag.objects.create(name='myflag', staff=True)
request = get()
response = process_request(request, views.flag_in_view)
self.assertEqual(b'off', response.content)
assert 'dwf_myflag' not in response.cookies
staff = User(username='foo', is_staff=True)
request.user = staff
response = process_request(request, views.flag_in_view)
self.assertEqual(b'on', response.content)
assert 'dwf_myflag' not in response.cookies
non_staff = User(username='foo', is_staff=False)
non_staff.save()
request.user = non_staff
response = process_request(request, views.flag_in_view)
self.assertEqual(b'off', response.content)
assert 'dwf_myflag' not in response.cookies
def test_languages(self):
Flag.objects.create(name='myflag', languages='en,fr')
request = get()
response = process_request(request, views.flag_in_view)
self.assertEqual(b'off', response.content)
request.LANGUAGE_CODE = 'en'
response = process_request(request, views.flag_in_view)
self.assertEqual(b'on', response.content)
request.LANGUAGE_CODE = 'de'
response = process_request(request, views.flag_in_view)
self.assertEqual(b'off', response.content)
def test_user(self):
"""Test the per-user switch."""
user = User.objects.create(username='foo')
flag = Flag.objects.create(name='myflag')
flag.users.add(user)
request = get()
request.user = user
response = process_request(request, views.flag_in_view)
self.assertEqual(b'on', response.content)
assert 'dwf_myflag' not in response.cookies
request.user = User.objects.create(username='someone_else')
response = process_request(request, views.flag_in_view)
self.assertEqual(b'off', response.content)
assert 'dwf_myflag' not in response.cookies
def test_group(self):
"""Test the per-group switch."""
group = Group.objects.create(name='foo')
user = User.objects.create(username='bar')
user.groups.add(group)
flag = Flag.objects.create(name='myflag')
flag.groups.add(group)
request = get()
request.user = user
response = process_request(request, views.flag_in_view)
self.assertEqual(b'on', response.content)
assert 'dwf_myflag' not in response.cookies
request.user = User(username='someone_else')
request.user.save()
response = process_request(request, views.flag_in_view)
self.assertEqual(b'off', response.content)
assert 'dwf_myflag' not in response.cookies
def test_authenticated(self):
"""Test the authenticated/anonymous switch."""
Flag.objects.create(name='myflag', authenticated=True)
request = get()
response = process_request(request, views.flag_in_view)
self.assertEqual(b'off', response.content)
assert 'dwf_myflag' not in response.cookies
request.user = User(username='foo')
assert request.user.is_authenticated()
response = process_request(request, views.flag_in_view)
self.assertEqual(b'on', response.content)
assert 'dwf_myflag' not in response.cookies
def test_everyone_on(self):
"""Test the 'everyone' switch on."""
Flag.objects.create(name='myflag', everyone=True)
request = get()
request.COOKIES['dwf_myflag'] = 'False'
response = process_request(request, views.flag_in_view)
self.assertEqual(b'on', response.content)
assert 'dwf_myflag' not in response.cookies
request.user = User(username='foo')
assert request.user.is_authenticated()
response = process_request(request, views.flag_in_view)
self.assertEqual(b'on', response.content)
assert 'dwf_myflag' not in response.cookies
def test_everyone_off(self):
"""Test the 'everyone' switch off."""
Flag.objects.create(name='myflag', everyone=False,
authenticated=True)
request = get()
request.COOKIES['dwf_myflag'] = 'True'
response = process_request(request, views.flag_in_view)
self.assertEqual(b'off', response.content)
assert 'dwf_myflag' not in response.cookies
request.user = User(username='foo')
assert request.user.is_authenticated()
response = process_request(request, views.flag_in_view)
self.assertEqual(b'off', response.content)
assert 'dwf_myflag' not in response.cookies
def test_percent(self):
"""If you have no cookie, you get a cookie!"""
Flag.objects.create(name='myflag', percent='50.0')
request = get()
response = process_request(request, views.flag_in_view)
assert 'dwf_myflag' in response.cookies
@mock.patch.object(random, 'uniform')
def test_reroll(self, uniform):
"""Even without a cookie, calling flag_is_active twice should return
the same value."""
Flag.objects.create(name='myflag', percent='50.0')
# Make sure we're not really random.
request = get() # Create a clean request.
assert not hasattr(request, 'waffles')
uniform.return_value = '10' # < 50. Flag is True.
assert waffle.flag_is_active(request, 'myflag')
assert hasattr(request, 'waffles') # We should record this flag.
assert 'myflag' in request.waffles
assert request.waffles['myflag'][0]
uniform.return_value = '70' # > 50. Normally, Flag would be False.
assert waffle.flag_is_active(request, 'myflag')
assert request.waffles['myflag'][0]
def test_undefined(self):
"""Undefined flags are always false."""
request = get()
assert not waffle.flag_is_active(request, 'foo')
@override_settings(WAFFLE_FLAG_DEFAULT=True)
def test_undefined_default(self):
"""WAFFLE_FLAG_DEFAULT controls undefined flags."""
request = get()
assert waffle.flag_is_active(request, 'foo')
@override_settings(WAFFLE_OVERRIDE=True)
def test_override(self):
request = get(foo='1')
Flag.objects.create(name='foo') # Off for everyone.
assert waffle.flag_is_active(request, 'foo')
def test_testing_flag(self):
Flag.objects.create(name='foo', testing=True)
request = get(dwft_foo='1')
assert waffle.flag_is_active(request, 'foo')
assert 'foo' in request.waffle_tests
assert request.waffle_tests['foo']
# GET param should override cookie
request = get(dwft_foo='0')
request.COOKIES['dwft_foo'] = 'True'
assert not waffle.flag_is_active(request, 'foo')
assert 'foo' in request.waffle_tests
assert not request.waffle_tests['foo']
def test_testing_disabled_flag(self):
Flag.objects.create(name='foo')
request = get(dwft_foo='1')
assert not waffle.flag_is_active(request, 'foo')
assert not hasattr(request, 'waffle_tests')
request = get(dwft_foo='0')
assert not waffle.flag_is_active(request, 'foo')
assert not hasattr(request, 'waffle_tests')
def test_set_then_unset_testing_flag(self):
Flag.objects.create(name='myflag', testing=True)
response = self.client.get('/flag_in_view?dwft_myflag=1')
self.assertEqual(b'on', response.content)
response = self.client.get('/flag_in_view')
self.assertEqual(b'on', response.content)
response = self.client.get('/flag_in_view?dwft_myflag=0')
self.assertEqual(b'off', response.content)
response = self.client.get('/flag_in_view')
self.assertEqual(b'off', response.content)
response = self.client.get('/flag_in_view?dwft_myflag=1')
self.assertEqual(b'on', response.content)
class SwitchTests(TestCase):
def test_switch_active(self):
switch = Switch.objects.create(name='myswitch', active=True)
assert waffle.switch_is_active(get(), switch.name)
def test_switch_inactive(self):
switch = Switch.objects.create(name='myswitch', active=False)
assert not waffle.switch_is_active(get(), switch.name)
def test_switch_active_from_cache(self):
"""Do not make two queries for an existing active switch."""
switch = Switch.objects.create(name='myswitch', active=True)
# Get the value once so that it will be put into the cache
assert waffle.switch_is_active(get(), switch.name)
queries = len(connection.queries)
assert waffle.switch_is_active(get(), switch.name)
self.assertEqual(queries, len(connection.queries), 'We should only make one query.')
def test_switch_inactive_from_cache(self):
"""Do not make two queries for an existing inactive switch."""
switch = Switch.objects.create(name='myswitch', active=False)
# Get the value once so that it will be put into the cache
assert not waffle.switch_is_active(get(), switch.name)
queries = len(connection.queries)
assert not waffle.switch_is_active(get(), switch.name)
self.assertEqual(queries, len(connection.queries), 'We should only make one query.')
def test_undefined(self):
assert not waffle.switch_is_active(get(), 'foo')
@override_settings(WAFFLE_SWITCH_DEFAULT=True)
def test_undefined_default(self):
assert waffle.switch_is_active(get(), 'foo')
class SampleTests(TestCase):
def test_sample_100(self):
sample = Sample.objects.create(name='sample', percent='100.0')
assert waffle.sample_is_active(get(), sample.name)
def test_sample_0(self):
sample = Sample.objects.create(name='sample', percent='0.0')
assert not waffle.sample_is_active(get(), sample.name)
def test_undefined(self):
assert not waffle.sample_is_active(get(), 'foo')
@override_settings(WAFFLE_SAMPLE_DEFAULT=True)
def test_undefined_default(self):
assert waffle.sample_is_active(get(), 'foo')
|
|
# Copyright (c) 2012 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Client side of nova-cells RPC API (for talking to the nova-cells service
within a cell).
This is different than communication between child and parent nova-cells
services. That communication is handled by the cells driver via the
messging module.
"""
from oslo.config import cfg
from nova import exception
from nova.openstack.common import jsonutils
from nova.openstack.common.rpc import proxy as rpc_proxy
CONF = cfg.CONF
CONF.import_opt('enable', 'nova.cells.opts', group='cells')
CONF.import_opt('topic', 'nova.cells.opts', group='cells')
class CellsAPI(rpc_proxy.RpcProxy):
'''Cells client-side RPC API
API version history:
1.0 - Initial version.
1.1 - Adds get_cell_info_for_neighbors() and sync_instances()
1.2 - Adds service_get_all(), service_get_by_compute_host(),
and proxy_rpc_to_compute_manager()
1.3 - Adds task_log_get_all()
1.4 - Adds compute_node_get(), compute_node_get_all(), and
compute_node_stats()
1.5 - Adds actions_get(), action_get_by_request_id(), and
action_events_get()
1.6 - Adds consoleauth_delete_tokens() and validate_console_port()
1.7 - Adds service_update()
and bdm_create(), bdm_update(), and bdm_destroy()
'''
BASE_RPC_API_VERSION = '1.0'
def __init__(self):
super(CellsAPI, self).__init__(topic=CONF.cells.topic,
default_version=self.BASE_RPC_API_VERSION)
def cast_compute_api_method(self, ctxt, cell_name, method,
*args, **kwargs):
"""Make a cast to a compute API method in a certain cell."""
method_info = {'method': method,
'method_args': args,
'method_kwargs': kwargs}
self.cast(ctxt, self.make_msg('run_compute_api_method',
cell_name=cell_name,
method_info=method_info,
call=False))
def call_compute_api_method(self, ctxt, cell_name, method,
*args, **kwargs):
"""Make a call to a compute API method in a certain cell."""
method_info = {'method': method,
'method_args': args,
'method_kwargs': kwargs}
return self.call(ctxt, self.make_msg('run_compute_api_method',
cell_name=cell_name,
method_info=method_info,
call=True))
def schedule_run_instance(self, ctxt, **kwargs):
"""Schedule a new instance for creation."""
self.cast(ctxt, self.make_msg('schedule_run_instance',
host_sched_kwargs=kwargs))
def instance_update_at_top(self, ctxt, instance):
"""Update instance at API level."""
if not CONF.cells.enable:
return
# Make sure we have a dict, not a SQLAlchemy model
instance_p = jsonutils.to_primitive(instance)
self.cast(ctxt, self.make_msg('instance_update_at_top',
instance=instance_p))
def instance_destroy_at_top(self, ctxt, instance):
"""Destroy instance at API level."""
if not CONF.cells.enable:
return
instance_p = jsonutils.to_primitive(instance)
self.cast(ctxt, self.make_msg('instance_destroy_at_top',
instance=instance_p))
def instance_delete_everywhere(self, ctxt, instance, delete_type):
"""Delete instance everywhere. delete_type may be 'soft'
or 'hard'. This is generally only used to resolve races
when API cell doesn't know to what cell an instance belongs.
"""
if not CONF.cells.enable:
return
instance_p = jsonutils.to_primitive(instance)
self.cast(ctxt, self.make_msg('instance_delete_everywhere',
instance=instance_p,
delete_type=delete_type))
def instance_fault_create_at_top(self, ctxt, instance_fault):
"""Create an instance fault at the top."""
if not CONF.cells.enable:
return
instance_fault_p = jsonutils.to_primitive(instance_fault)
self.cast(ctxt, self.make_msg('instance_fault_create_at_top',
instance_fault=instance_fault_p))
def bw_usage_update_at_top(self, ctxt, uuid, mac, start_period,
bw_in, bw_out, last_ctr_in, last_ctr_out, last_refreshed=None):
"""Broadcast upwards that bw_usage was updated."""
if not CONF.cells.enable:
return
bw_update_info = {'uuid': uuid,
'mac': mac,
'start_period': start_period,
'bw_in': bw_in,
'bw_out': bw_out,
'last_ctr_in': last_ctr_in,
'last_ctr_out': last_ctr_out,
'last_refreshed': last_refreshed}
self.cast(ctxt, self.make_msg('bw_usage_update_at_top',
bw_update_info=bw_update_info))
def instance_info_cache_update_at_top(self, ctxt, instance_info_cache):
"""Broadcast up that an instance's info_cache has changed."""
if not CONF.cells.enable:
return
iicache = jsonutils.to_primitive(instance_info_cache)
instance = {'uuid': iicache['instance_uuid'],
'info_cache': iicache}
self.cast(ctxt, self.make_msg('instance_update_at_top',
instance=instance))
def get_cell_info_for_neighbors(self, ctxt):
"""Get information about our neighbor cells from the manager."""
if not CONF.cells.enable:
return []
return self.call(ctxt, self.make_msg('get_cell_info_for_neighbors'),
version='1.1')
def sync_instances(self, ctxt, project_id=None, updated_since=None,
deleted=False):
"""Ask all cells to sync instance data."""
if not CONF.cells.enable:
return
return self.cast(ctxt, self.make_msg('sync_instances',
project_id=project_id,
updated_since=updated_since,
deleted=deleted),
version='1.1')
def service_get_all(self, ctxt, filters=None):
"""Ask all cells for their list of services."""
return self.call(ctxt,
self.make_msg('service_get_all',
filters=filters),
version='1.2')
def service_get_by_compute_host(self, ctxt, host_name):
"""Get the service entry for a host in a particular cell. The
cell name should be encoded within the host_name.
"""
return self.call(ctxt, self.make_msg('service_get_by_compute_host',
host_name=host_name),
version='1.2')
def service_update(self, ctxt, host_name, binary, params_to_update):
"""
Used to enable/disable a service. For compute services, setting to
disabled stops new builds arriving on that host.
:param host_name: the name of the host machine that the service is
running
:param binary: The name of the executable that the service runs as
:param params_to_update: eg. {'disabled': True}
"""
return self.call(ctxt, self.make_msg(
'service_update', host_name=host_name,
binary=binary, params_to_update=params_to_update),
version='1.7')
def proxy_rpc_to_manager(self, ctxt, rpc_message, topic, call=False,
timeout=None):
"""Proxy RPC to a compute manager. The host in the topic
should be encoded with the target cell name.
"""
return self.call(ctxt, self.make_msg('proxy_rpc_to_manager',
topic=topic,
rpc_message=rpc_message,
call=call,
timeout=timeout),
timeout=timeout,
version='1.2')
def task_log_get_all(self, ctxt, task_name, period_beginning,
period_ending, host=None, state=None):
"""Get the task logs from the DB in child cells."""
return self.call(ctxt, self.make_msg('task_log_get_all',
task_name=task_name,
period_beginning=period_beginning,
period_ending=period_ending,
host=host, state=state),
version='1.3')
def compute_node_get(self, ctxt, compute_id):
"""Get a compute node by ID in a specific cell."""
return self.call(ctxt, self.make_msg('compute_node_get',
compute_id=compute_id),
version='1.4')
def compute_node_get_all(self, ctxt, hypervisor_match=None):
"""Return list of compute nodes in all cells, optionally
filtering by hypervisor host.
"""
return self.call(ctxt,
self.make_msg('compute_node_get_all',
hypervisor_match=hypervisor_match),
version='1.4')
def compute_node_stats(self, ctxt):
"""Return compute node stats from all cells."""
return self.call(ctxt, self.make_msg('compute_node_stats'),
version='1.4')
def actions_get(self, ctxt, instance):
if not instance['cell_name']:
raise exception.InstanceUnknownCell(instance_uuid=instance['uuid'])
return self.call(ctxt, self.make_msg('actions_get',
cell_name=instance['cell_name'],
instance_uuid=instance['uuid']),
version='1.5')
def action_get_by_request_id(self, ctxt, instance, request_id):
if not instance['cell_name']:
raise exception.InstanceUnknownCell(instance_uuid=instance['uuid'])
return self.call(ctxt, self.make_msg('action_get_by_request_id',
cell_name=instance['cell_name'],
instance_uuid=instance['uuid'],
request_id=request_id),
version='1.5')
def action_events_get(self, ctxt, instance, action_id):
if not instance['cell_name']:
raise exception.InstanceUnknownCell(instance_uuid=instance['uuid'])
return self.call(ctxt, self.make_msg('action_events_get',
cell_name=instance['cell_name'],
action_id=action_id),
version='1.5')
def consoleauth_delete_tokens(self, ctxt, instance_uuid):
"""Delete consoleauth tokens for an instance in API cells."""
self.cast(ctxt, self.make_msg('consoleauth_delete_tokens',
instance_uuid=instance_uuid),
version='1.6')
def validate_console_port(self, ctxt, instance_uuid, console_port,
console_type):
"""Validate console port with child cell compute node."""
return self.call(ctxt,
self.make_msg('validate_console_port',
instance_uuid=instance_uuid,
console_port=console_port,
console_type=console_type),
version='1.6')
def bdm_create(self, ctxt, bdm):
"""Broadcast upwards that a BDM was created for an instance."""
self.cast(ctxt, self.make_msg('bdm_create', bdm=bdm), version='1.7')
def bdm_update(self, ctxt, bdm, create=False):
"""Broadcast upwards that a BDM was updated for an instance.
'create' should be True if the BDM should be created if it's not
found when going to update.
"""
self.cast(ctxt, self.make_msg('bdm_update', bdm=bdm, create=create),
version='1.7')
def bdm_destroy(self, ctxt, instance_uuid, device_name=None,
volume_id=None):
"""Broadcast upwards that a BDM was destroyed. One of device_name
or volume_id should be specified.
"""
self.cast(ctxt, self.make_msg('bdm_destroy',
instance_uuid=instance_uuid,
device_name=device_name,
volume_id=volume_id),
version='1.7')
|
|
# coding: utf-8
import re
from mapography.model import CallTree, Segment, Module
class ParserError(Exception):
pass
def extract_segments(maptext):
"""
Extract the segments extract from map file content
:param maptext: map file content string
:return: segments extract string
"""
segments_header = """ --------
Segments
--------
"""
start = maptext.find(segments_header)
if start < 0:
raise ParserError("Cannot find segments")
end = maptext.find("\n\n\n", start)
if end < 0:
raise ParserError("Cannot find segments")
return maptext[start + len(segments_header):end + 1]
def parse_segments(segments_string, strict=True):
"""
Parse the segments and returns a list of dictionaries of the elements
:param segments_string: segments as printed in the map file
:param strict: if True the function raises a ParseError exception when
incoherent data is found
:return: list of dictionaries for each element with the following keys:
- name: of the segment
- start: address of the segment as integer
- end: address of the segment as integer
- length: of the segment
"""
# dict(name, start, end, length)
segments_dicts = []
for line in segments_string.split('\n'):
if line.strip():
items = line.split()
items_d = {items[2*n]: items[2*n+1] for n in range(len(items)//2)}
seg = {
'name': items_d['segment'],
'start': int(items_d['start'], 16),
'end': int(items_d['end'], 16),
'length': int(items_d['length']),
}
if strict and seg['length'] != seg['end'] - seg['start']:
raise ParserError("Segment '{}': length given doesn't match "
"with start and end".format(seg['name']))
segments_dicts.append(seg)
return segments_dicts
def make_segments(segments_dict):
return [Segment(seg_dict['name'], seg_dict['start'], seg_dict['end'])
for seg_dict in segments_dict]
def get_segments(maptext):
"""
Map file content string -> list of Segment objects
Shortcut for make_segments(parse_segments(extract_segments(maptext)))
:param maptext: map file content string
:return: list of Segment objects
"""
return make_segments(parse_segments(extract_segments(maptext)))
def extract_modules(maptext):
"""
Extract the modules from map file content
:param maptext: map file content string
:return: modules extract string
"""
header = """-------
Modules
-------
"""
start = maptext.find(header)
if start < 0:
raise ParserError("Cannot find modules")
end = maptext.find("\n\n\n", start)
if end < 0:
raise ParserError("Cannot find modules")
return maptext[start+len(header):end+1]
def parse_modules(modules_string):
blocs = [[line.strip() for line in bloc.splitlines() if line.strip()]
for bloc in modules_string.split('\n\n')]
modules = []
for bloc in blocs:
module = {'name': bloc[0][:bloc[0].rfind(':')], 'sections': []}
for line in bloc[1:]:
items = line.split()
items_d = {items[2*n]: items[2*n+1] for n in range(len(items)//2)}
module['sections'].append(items_d)
modules.append(module)
return modules
def make_modules(modules_dicts):
modules = []
for module_dict in modules_dicts:
segments = [Segment(s['section'], s['start'], s['end'])
for s in module_dict['sections']]
modules.append(Module(module_dict['name'], segments))
return modules
def get_modules(maptext):
return make_modules(parse_modules(extract_modules(maptext)))
def extract_call_tree(maptext):
"""
Extract the call tree from map file content
:param maptext: map file content string
:return: call tree string
"""
call_tree_header = """ ---------
Call tree
---------
"""
start = maptext.find(call_tree_header)
if start < 0:
raise ParserError("Cannot find call tree")
end = maptext.find("\n\n\n\n", start)
if end < 0:
raise ParserError("Cannot find call tree")
return maptext[start+len(call_tree_header):end+1]
CALL_TREE_REGEX = re.compile(r"""
(?P<index>\d+)
(?P<level>(?:[ >+|])+)
\(?(?P<func_name>[^: )]+)\)?
(?:
(?:[:\s]*\((?P<size>\d+)\))
|
(?:\s+[->]+\s+(?P<ref>\d+))
|
.*(?P<ellipsis>[.]{3})
)
""", flags=re.VERBOSE)
def parse_call_tree(call_tree_string):
"""
Parse the call tree and returns a list of dictionaries of the elements
:param call_tree_string: call tree as printed in the map file
:return: list of dictionaries for each element with the following keys:
- index: index of the element as printed
- func_name: name of the function
- level: level of indentation denoting the call hierarchy, root is 0
- size and ref: only one is defined, the other is None. When defined,
size is the stack size of the function, ref is the index at which the
size is given
"""
# dict(index, func_name, level, size, ref, ellipsis)
call_tree_dicts = []
# For each match, get and normalize its dict, and store in list
for match in re.finditer(CALL_TREE_REGEX, call_tree_string):
element = match.groupdict()
# Normalize values to int
for key in ('index', 'size', 'ref'):
try:
element[key] = int(element[key])
except (ValueError, TypeError):
pass
level = element['level']
element['level'] = level.count('|') + level.count('+')
element['ellipsis'] = bool(element['ellipsis'])
call_tree_dicts.append(element)
# Check the matching of 'index' attribute with the position in the list
# For now it just raise an exception at the first non-matching case,
# consider adding an attempt to fix it if necessary
# The first index is 1 so we start to push a None in position 0
call_tree_dicts.insert(0, None)
for position, element in enumerate(call_tree_dicts):
if element is not None and element['index'] != position:
raise ParserError("Index {} doesn't match position {}".format(
element['index'], position))
return call_tree_dicts
def make_call_tree(elements):
call_tree = CallTree()
for element in elements:
if element is not None and element['size'] is not None:
call_tree.add_function(element['func_name'], element['size'])
call_stack = []
for element in elements:
if element is not None:
call_stack_delta = 1 + element['level'] - len(call_stack)
if element['level'] == 0:
call_stack = [element]
call_tree.connect(element['func_name'], None)
else:
call_tree.connect(element['func_name'],
call_stack[call_stack_delta-2]['func_name'])
for pop_num in range(1 - call_stack_delta):
call_stack.pop()
call_stack.append(element)
return call_tree
def get_call_tree(maptext):
"""
Map file content string -> CallTree object
Shortcut for make_call_tree(parse_call_tree(extract_call_tree(maptext)))
:param maptext: map file content string
:return: CallTree object
"""
return make_call_tree(parse_call_tree(extract_call_tree(maptext)))
def extract_symbols(maptext):
"""
Extract the symbols section from map file content
:param maptext: map file content string
:return: symbol section of the map file as string
"""
symbols_header = """ -------
Symbols
-------
"""
start = maptext.find(symbols_header)
if start < 0:
raise ParserError("Cannot find 'Symbols' section")
return maptext[start + len(symbols_header):]
SYMBOL_REGEX = re.compile(r""" (?P<name>\w+)
\s+
(?P<address>[0-9a-fA-F]+)
\s+
defined\ in\
(?P<module_defined>.+?)
\s*
(?:section\
(?P<section>.+?)(?:\ \((?P<section2>.+?)\).*?(?P<init>initialized)?)?
)?
\n\s*
(?:
(?:used\ in\ (?P<module_used>.+(?:\n\s+.+)*\n)+?)
|
(?:(?P<not_used>\*\*\*\ not\ used\ \*\*\*)\n)
)?""", flags=re.VERBOSE)
def parse_symbols(symbols_string):
"""
Parse the symbols section and returns a list of dictionaries of the elements
:param symbols_string: symbols as printed in the map file
:return:
"""
symbols_dicts = []
# For each match, get and normalize its dict, and store in list
for match in re.finditer(SYMBOL_REGEX, symbols_string):
element = match.groupdict()
# TODO: finish
print(element)
symbols_dicts.append(element)
return symbols_dicts
|
|
"""Spectral biclustering algorithms."""
# Authors : Kemal Eren
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy.linalg import norm
from scipy.sparse import dia_matrix, issparse
from scipy.sparse.linalg import eigsh, svds
from . import KMeans, MiniBatchKMeans
from ..base import BaseEstimator, BiclusterMixin
from ..utils import check_random_state
from ..utils.extmath import (make_nonnegative, randomized_svd,
safe_sparse_dot)
from ..utils.validation import assert_all_finite, _deprecate_positional_args
__all__ = ['SpectralCoclustering',
'SpectralBiclustering']
def _scale_normalize(X):
"""Normalize ``X`` by scaling rows and columns independently.
Returns the normalized matrix and the row and column scaling
factors.
"""
X = make_nonnegative(X)
row_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=1))).squeeze()
col_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=0))).squeeze()
row_diag = np.where(np.isnan(row_diag), 0, row_diag)
col_diag = np.where(np.isnan(col_diag), 0, col_diag)
if issparse(X):
n_rows, n_cols = X.shape
r = dia_matrix((row_diag, [0]), shape=(n_rows, n_rows))
c = dia_matrix((col_diag, [0]), shape=(n_cols, n_cols))
an = r * X * c
else:
an = row_diag[:, np.newaxis] * X * col_diag
return an, row_diag, col_diag
def _bistochastic_normalize(X, max_iter=1000, tol=1e-5):
"""Normalize rows and columns of ``X`` simultaneously so that all
rows sum to one constant and all columns sum to a different
constant.
"""
# According to paper, this can also be done more efficiently with
# deviation reduction and balancing algorithms.
X = make_nonnegative(X)
X_scaled = X
for _ in range(max_iter):
X_new, _, _ = _scale_normalize(X_scaled)
if issparse(X):
dist = norm(X_scaled.data - X.data)
else:
dist = norm(X_scaled - X_new)
X_scaled = X_new
if dist is not None and dist < tol:
break
return X_scaled
def _log_normalize(X):
"""Normalize ``X`` according to Kluger's log-interactions scheme."""
X = make_nonnegative(X, min_value=1)
if issparse(X):
raise ValueError("Cannot compute log of a sparse matrix,"
" because log(x) diverges to -infinity as x"
" goes to 0.")
L = np.log(X)
row_avg = L.mean(axis=1)[:, np.newaxis]
col_avg = L.mean(axis=0)
avg = L.mean()
return L - row_avg - col_avg + avg
class BaseSpectral(BiclusterMixin, BaseEstimator, metaclass=ABCMeta):
"""Base class for spectral biclustering."""
@abstractmethod
def __init__(self, n_clusters=3, svd_method="randomized",
n_svd_vecs=None, mini_batch=False, init="k-means++",
n_init=10, n_jobs='deprecated', random_state=None):
self.n_clusters = n_clusters
self.svd_method = svd_method
self.n_svd_vecs = n_svd_vecs
self.mini_batch = mini_batch
self.init = init
self.n_init = n_init
self.n_jobs = n_jobs
self.random_state = random_state
def _check_parameters(self):
legal_svd_methods = ('randomized', 'arpack')
if self.svd_method not in legal_svd_methods:
raise ValueError("Unknown SVD method: '{0}'. svd_method must be"
" one of {1}.".format(self.svd_method,
legal_svd_methods))
def fit(self, X, y=None):
"""Creates a biclustering for X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
y : Ignored
"""
if self.n_jobs != 'deprecated':
warnings.warn("'n_jobs' was deprecated in version 0.23 and will be"
" removed in 1.0 (renaming of 0.25).", FutureWarning)
X = self._validate_data(X, accept_sparse='csr', dtype=np.float64)
self._check_parameters()
self._fit(X)
return self
def _svd(self, array, n_components, n_discard):
"""Returns first `n_components` left and right singular
vectors u and v, discarding the first `n_discard`.
"""
if self.svd_method == 'randomized':
kwargs = {}
if self.n_svd_vecs is not None:
kwargs['n_oversamples'] = self.n_svd_vecs
u, _, vt = randomized_svd(array, n_components,
random_state=self.random_state,
**kwargs)
elif self.svd_method == 'arpack':
u, _, vt = svds(array, k=n_components, ncv=self.n_svd_vecs)
if np.any(np.isnan(vt)):
# some eigenvalues of A * A.T are negative, causing
# sqrt() to be np.nan. This causes some vectors in vt
# to be np.nan.
A = safe_sparse_dot(array.T, array)
random_state = check_random_state(self.random_state)
# initialize with [-1,1] as in ARPACK
v0 = random_state.uniform(-1, 1, A.shape[0])
_, v = eigsh(A, ncv=self.n_svd_vecs, v0=v0)
vt = v.T
if np.any(np.isnan(u)):
A = safe_sparse_dot(array, array.T)
random_state = check_random_state(self.random_state)
# initialize with [-1,1] as in ARPACK
v0 = random_state.uniform(-1, 1, A.shape[0])
_, u = eigsh(A, ncv=self.n_svd_vecs, v0=v0)
assert_all_finite(u)
assert_all_finite(vt)
u = u[:, n_discard:]
vt = vt[n_discard:]
return u, vt.T
def _k_means(self, data, n_clusters):
if self.mini_batch:
model = MiniBatchKMeans(n_clusters,
init=self.init,
n_init=self.n_init,
random_state=self.random_state)
else:
model = KMeans(n_clusters, init=self.init,
n_init=self.n_init, n_jobs=self.n_jobs,
random_state=self.random_state)
model.fit(data)
centroid = model.cluster_centers_
labels = model.labels_
return centroid, labels
class SpectralCoclustering(BaseSpectral):
"""Spectral Co-Clustering algorithm (Dhillon, 2001).
Clusters rows and columns of an array `X` to solve the relaxed
normalized cut of the bipartite graph created from `X` as follows:
the edge between row vertex `i` and column vertex `j` has weight
`X[i, j]`.
The resulting bicluster structure is block-diagonal, since each
row and each column belongs to exactly one bicluster.
Supports sparse matrices, as long as they are nonnegative.
Read more in the :ref:`User Guide <spectral_coclustering>`.
Parameters
----------
n_clusters : int, default=3
The number of biclusters to find.
svd_method : {'randomized', 'arpack'}, default='randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', use
:func:`sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', use
:func:`scipy.sparse.linalg.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, default=None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, default=False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random', or ndarray of shape \
(n_clusters, n_features), default='k-means++'
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, default=10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
n_jobs : int, default=None
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
.. deprecated:: 0.23
``n_jobs`` was deprecated in version 0.23 and will be removed in
1.0 (renaming of 0.25).
random_state : int, RandomState instance, default=None
Used for randomizing the singular value decomposition and the k-means
initialization. Use an int to make the randomness deterministic.
See :term:`Glossary <random_state>`.
Attributes
----------
rows_ : array-like of shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
columns_ : array-like of shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
row_labels_ : array-like of shape (n_rows,)
The bicluster label of each row.
column_labels_ : array-like of shape (n_cols,)
The bicluster label of each column.
Examples
--------
>>> from sklearn.cluster import SpectralCoclustering
>>> import numpy as np
>>> X = np.array([[1, 1], [2, 1], [1, 0],
... [4, 7], [3, 5], [3, 6]])
>>> clustering = SpectralCoclustering(n_clusters=2, random_state=0).fit(X)
>>> clustering.row_labels_ #doctest: +SKIP
array([0, 1, 1, 0, 0, 0], dtype=int32)
>>> clustering.column_labels_ #doctest: +SKIP
array([0, 0], dtype=int32)
>>> clustering
SpectralCoclustering(n_clusters=2, random_state=0)
References
----------
* Dhillon, Inderjit S, 2001. `Co-clustering documents and words using
bipartite spectral graph partitioning
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.140.3011>`__.
"""
@_deprecate_positional_args
def __init__(self, n_clusters=3, *, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs='deprecated', random_state=None):
super().__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
def _fit(self, X):
normalized_data, row_diag, col_diag = _scale_normalize(X)
n_sv = 1 + int(np.ceil(np.log2(self.n_clusters)))
u, v = self._svd(normalized_data, n_sv, n_discard=1)
z = np.vstack((row_diag[:, np.newaxis] * u,
col_diag[:, np.newaxis] * v))
_, labels = self._k_means(z, self.n_clusters)
n_rows = X.shape[0]
self.row_labels_ = labels[:n_rows]
self.column_labels_ = labels[n_rows:]
self.rows_ = np.vstack([self.row_labels_ == c
for c in range(self.n_clusters)])
self.columns_ = np.vstack([self.column_labels_ == c
for c in range(self.n_clusters)])
class SpectralBiclustering(BaseSpectral):
"""Spectral biclustering (Kluger, 2003).
Partitions rows and columns under the assumption that the data has
an underlying checkerboard structure. For instance, if there are
two row partitions and three column partitions, each row will
belong to three biclusters, and each column will belong to two
biclusters. The outer product of the corresponding row and column
label vectors gives this checkerboard structure.
Read more in the :ref:`User Guide <spectral_biclustering>`.
Parameters
----------
n_clusters : int or tuple (n_row_clusters, n_column_clusters), default=3
The number of row and column clusters in the checkerboard
structure.
method : {'bistochastic', 'scale', 'log'}, default='bistochastic'
Method of normalizing and converting singular vectors into
biclusters. May be one of 'scale', 'bistochastic', or 'log'.
The authors recommend using 'log'. If the data is sparse,
however, log normalization will not work, which is why the
default is 'bistochastic'.
.. warning::
if `method='log'`, the data must be sparse.
n_components : int, default=6
Number of singular vectors to check.
n_best : int, default=3
Number of best singular vectors to which to project the data
for clustering.
svd_method : {'randomized', 'arpack'}, default='randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', uses
:func:`~sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', uses
`scipy.sparse.linalg.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, default=None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, default=False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random'} or ndarray of (n_clusters, n_features), \
default='k-means++'
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, default=10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
n_jobs : int, default=None
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
.. deprecated:: 0.23
``n_jobs`` was deprecated in version 0.23 and will be removed in
1.0 (renaming of 0.25).
random_state : int, RandomState instance, default=None
Used for randomizing the singular value decomposition and the k-means
initialization. Use an int to make the randomness deterministic.
See :term:`Glossary <random_state>`.
Attributes
----------
rows_ : array-like of shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
columns_ : array-like of shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
row_labels_ : array-like of shape (n_rows,)
Row partition labels.
column_labels_ : array-like of shape (n_cols,)
Column partition labels.
Examples
--------
>>> from sklearn.cluster import SpectralBiclustering
>>> import numpy as np
>>> X = np.array([[1, 1], [2, 1], [1, 0],
... [4, 7], [3, 5], [3, 6]])
>>> clustering = SpectralBiclustering(n_clusters=2, random_state=0).fit(X)
>>> clustering.row_labels_
array([1, 1, 1, 0, 0, 0], dtype=int32)
>>> clustering.column_labels_
array([0, 1], dtype=int32)
>>> clustering
SpectralBiclustering(n_clusters=2, random_state=0)
References
----------
* Kluger, Yuval, et. al., 2003. `Spectral biclustering of microarray
data: coclustering genes and conditions
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.135.1608>`__.
"""
@_deprecate_positional_args
def __init__(self, n_clusters=3, *, method='bistochastic',
n_components=6, n_best=3, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs='deprecated', random_state=None):
super().__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
self.method = method
self.n_components = n_components
self.n_best = n_best
def _check_parameters(self):
super()._check_parameters()
legal_methods = ('bistochastic', 'scale', 'log')
if self.method not in legal_methods:
raise ValueError("Unknown method: '{0}'. method must be"
" one of {1}.".format(self.method, legal_methods))
try:
int(self.n_clusters)
except TypeError:
try:
r, c = self.n_clusters
int(r)
int(c)
except (ValueError, TypeError) as e:
raise ValueError("Incorrect parameter n_clusters has value:"
" {}. It should either be a single integer"
" or an iterable with two integers:"
" (n_row_clusters, n_column_clusters)") from e
if self.n_components < 1:
raise ValueError("Parameter n_components must be greater than 0,"
" but its value is {}".format(self.n_components))
if self.n_best < 1:
raise ValueError("Parameter n_best must be greater than 0,"
" but its value is {}".format(self.n_best))
if self.n_best > self.n_components:
raise ValueError("n_best cannot be larger than"
" n_components, but {} > {}"
"".format(self.n_best, self.n_components))
def _fit(self, X):
n_sv = self.n_components
if self.method == 'bistochastic':
normalized_data = _bistochastic_normalize(X)
n_sv += 1
elif self.method == 'scale':
normalized_data, _, _ = _scale_normalize(X)
n_sv += 1
elif self.method == 'log':
normalized_data = _log_normalize(X)
n_discard = 0 if self.method == 'log' else 1
u, v = self._svd(normalized_data, n_sv, n_discard)
ut = u.T
vt = v.T
try:
n_row_clusters, n_col_clusters = self.n_clusters
except TypeError:
n_row_clusters = n_col_clusters = self.n_clusters
best_ut = self._fit_best_piecewise(ut, self.n_best,
n_row_clusters)
best_vt = self._fit_best_piecewise(vt, self.n_best,
n_col_clusters)
self.row_labels_ = self._project_and_cluster(X, best_vt.T,
n_row_clusters)
self.column_labels_ = self._project_and_cluster(X.T, best_ut.T,
n_col_clusters)
self.rows_ = np.vstack([self.row_labels_ == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters)])
self.columns_ = np.vstack([self.column_labels_ == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters)])
def _fit_best_piecewise(self, vectors, n_best, n_clusters):
"""Find the ``n_best`` vectors that are best approximated by piecewise
constant vectors.
The piecewise vectors are found by k-means; the best is chosen
according to Euclidean distance.
"""
def make_piecewise(v):
centroid, labels = self._k_means(v.reshape(-1, 1), n_clusters)
return centroid[labels].ravel()
piecewise_vectors = np.apply_along_axis(make_piecewise,
axis=1, arr=vectors)
dists = np.apply_along_axis(norm, axis=1,
arr=(vectors - piecewise_vectors))
result = vectors[np.argsort(dists)[:n_best]]
return result
def _project_and_cluster(self, data, vectors, n_clusters):
"""Project ``data`` to ``vectors`` and cluster the result."""
projected = safe_sparse_dot(data, vectors)
_, labels = self._k_means(projected, n_clusters)
return labels
|
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mox
from cinder.brick import exception
from cinder.brick.local_dev import lvm as brick
from cinder.openstack.common import log as logging
from cinder.openstack.common import processutils
from cinder import test
from cinder.volume import configuration as conf
LOG = logging.getLogger(__name__)
def create_configuration():
configuration = mox.MockObject(conf.Configuration)
configuration.append_config_values(mox.IgnoreArg())
return configuration
class BrickLvmTestCase(test.TestCase):
def setUp(self):
self._mox = mox.Mox()
self.configuration = mox.MockObject(conf.Configuration)
self.configuration.volume_group_name = 'fake-vg'
super(BrickLvmTestCase, self).setUp()
#Stub processutils.execute for static methods
self.stubs.Set(processutils, 'execute',
self.fake_execute)
self.vg = brick.LVM(self.configuration.volume_group_name,
'sudo',
False, None,
'default',
self.fake_execute)
def failed_fake_execute(obj, *cmd, **kwargs):
return ("\n", "fake-error")
def fake_pretend_lvm_version(obj, *cmd, **kwargs):
return (" LVM version: 2.03.00 (2012-03-06)\n", "")
def fake_old_lvm_version(obj, *cmd, **kwargs):
# Does not support thin prov or snap activation
return (" LVM version: 2.02.65(2) (2012-03-06)\n", "")
def fake_customised_lvm_version(obj, *cmd, **kwargs):
return (" LVM version: 2.02.100(2)-RHEL6 (2013-09-12)\n", "")
def fake_execute(obj, *cmd, **kwargs):
cmd_string = ', '.join(cmd)
data = "\n"
if ('env, LC_ALL=C, vgs, --noheadings, --unit=g, -o, name' ==
cmd_string):
data = " fake-vg\n"
data += " some-other-vg\n"
elif ('env, LC_ALL=C, vgs, --noheadings, -o, name, fake-vg' ==
cmd_string):
data = " fake-vg\n"
elif 'env, LC_ALL=C, vgs, --version' in cmd_string:
data = " LVM version: 2.02.95(2) (2012-03-06)\n"
elif ('env, LC_ALL=C, vgs, --noheadings, -o uuid, fake-vg' in
cmd_string):
data = " kVxztV-dKpG-Rz7E-xtKY-jeju-QsYU-SLG6Z1\n"
elif 'env, LC_ALL=C, vgs, --noheadings, --unit=g, ' \
'-o, name,size,free,lv_count,uuid, ' \
'--separator, :, --nosuffix' in cmd_string:
data = " fake-vg:10.00:10.00:0:"\
"kVxztV-dKpG-Rz7E-xtKY-jeju-QsYU-SLG6Z1\n"
if 'fake-vg' in cmd_string:
return (data, "")
data += " fake-vg-2:10.00:10.00:0:"\
"lWyauW-dKpG-Rz7E-xtKY-jeju-QsYU-SLG7Z2\n"
data += " fake-vg-3:10.00:10.00:0:"\
"mXzbuX-dKpG-Rz7E-xtKY-jeju-QsYU-SLG8Z3\n"
elif ('env, LC_ALL=C, lvs, --noheadings, '
'--unit=g, -o, vg_name,name,size' in cmd_string):
data = " fake-vg fake-1 1.00g\n"
data += " fake-vg fake-2 1.00g\n"
elif ('env, LC_ALL=C, lvdisplay, --noheading, -C, -o, Attr' in
cmd_string):
if 'test-volumes' in cmd_string:
data = ' wi-a-'
else:
data = ' owi-a-'
elif 'env, LC_ALL=C, pvs, --noheadings' in cmd_string:
data = " fake-vg:/dev/sda:10.00:1.00\n"
data += " fake-vg:/dev/sdb:10.00:1.00\n"
data += " fake-vg:/dev/sdc:10.00:8.99\n"
data += " fake-vg-2:/dev/sdd:10.00:9.99\n"
elif 'env, LC_ALL=C, lvs, --noheadings, --unit=g' \
', -o, size,data_percent, --separator, :' in cmd_string:
data = " 9:12\n"
elif 'lvcreate, -T, -L, ' in cmd_string:
pass
elif 'lvcreate, -T, -V, ' in cmd_string:
pass
elif 'lvcreate, --name, ' in cmd_string:
pass
else:
raise AssertionError('unexpected command called: %s' % cmd_string)
return (data, "")
def test_create_lv_snapshot(self):
self.assertEqual(self.vg.create_lv_snapshot('snapshot-1', 'fake-1'),
None)
self._mox.StubOutWithMock(self.vg, 'get_volume')
self.vg.get_volume('fake-non-existent').AndReturn(None)
self._mox.ReplayAll()
try:
self.vg.create_lv_snapshot('snapshot-1', 'fake-non-existent')
except exception.VolumeDeviceNotFound as e:
self.assertEqual(e.kwargs['device'], 'fake-non-existent')
else:
self.fail("Exception not raised")
def test_vg_exists(self):
self.assertEqual(self.vg._vg_exists(), True)
def test_get_vg_uuid(self):
self.assertEqual(self.vg._get_vg_uuid()[0],
'kVxztV-dKpG-Rz7E-xtKY-jeju-QsYU-SLG6Z1')
def test_get_all_volumes(self):
out = self.vg.get_volumes()
self.assertEqual(out[0]['name'], 'fake-1')
self.assertEqual(out[0]['size'], '1.00g')
self.assertEqual(out[0]['vg'], 'fake-vg')
def test_get_volume(self):
self.assertEqual(self.vg.get_volume('fake-1')['name'], 'fake-1')
def test_get_all_physical_volumes(self):
# Filtered VG version
pvs = self.vg.get_all_physical_volumes('sudo', 'fake-vg')
self.assertEqual(len(pvs), 3)
# Non-Filtered, all VG's
pvs = self.vg.get_all_physical_volumes('sudo')
self.assertEqual(len(pvs), 4)
def test_get_physical_volumes(self):
pvs = self.vg.get_physical_volumes()
self.assertEqual(len(pvs), 3)
def test_get_volume_groups(self):
self.assertEqual(len(self.vg.get_all_volume_groups('sudo')), 3)
self.assertEqual(len(self.vg.get_all_volume_groups('sudo',
'fake-vg')), 1)
def test_thin_support(self):
# lvm.supports_thin() is a static method and doesn't
# use the self._executor fake we pass in on init
# so we need to stub processutils.execute appropriately
self.stubs.Set(processutils, 'execute', self.fake_execute)
self.assertTrue(self.vg.supports_thin_provisioning('sudo'))
self.stubs.Set(processutils, 'execute', self.fake_pretend_lvm_version)
self.assertTrue(self.vg.supports_thin_provisioning('sudo'))
self.stubs.Set(processutils, 'execute', self.fake_old_lvm_version)
self.assertFalse(self.vg.supports_thin_provisioning('sudo'))
self.stubs.Set(processutils,
'execute',
self.fake_customised_lvm_version)
self.assertTrue(self.vg.supports_thin_provisioning('sudo'))
def test_snapshot_lv_activate_support(self):
self.vg._supports_snapshot_lv_activation = None
self.stubs.Set(processutils, 'execute', self.fake_execute)
self.assertTrue(self.vg.supports_snapshot_lv_activation)
self.vg._supports_snapshot_lv_activation = None
self.stubs.Set(processutils, 'execute', self.fake_old_lvm_version)
self.assertFalse(self.vg.supports_snapshot_lv_activation)
self.vg._supports_snapshot_lv_activation = None
def test_lvchange_ignskipact_support_yes(self):
"""Tests if lvchange -K is available via a lvm2 version check."""
self.vg._supports_lvchange_ignoreskipactivation = None
self.stubs.Set(processutils, 'execute', self.fake_pretend_lvm_version)
self.assertTrue(self.vg.supports_lvchange_ignoreskipactivation)
self.vg._supports_lvchange_ignoreskipactivation = None
self.stubs.Set(processutils, 'execute', self.fake_old_lvm_version)
self.assertFalse(self.vg.supports_lvchange_ignoreskipactivation)
self.vg._supports_lvchange_ignoreskipactivation = None
def test_thin_pool_creation(self):
# The size of fake-vg volume group is 10g, so the calculated thin
# pool size should be 9.5g (95% of 10g).
self.assertEqual("9.5g", self.vg.create_thin_pool())
# Passing a size parameter should result in a thin pool of that exact
# size.
for size in ("1g", "1.2g", "1.75g"):
self.assertEqual(size, self.vg.create_thin_pool(size_str=size))
def test_thin_pool_free_space(self):
# The size of fake-vg-pool is 9g and the allocated data sums up to
# 12% so the calculated free space should be 7.92
self.assertEqual(float("7.92"),
self.vg._get_thin_pool_free_space("fake-vg",
"fake-vg-pool"))
def test_volume_create_after_thin_creation(self):
"""Test self.vg.vg_thin_pool is set to pool_name
See bug #1220286 for more info.
"""
vg_name = "vg-name"
pool_name = vg_name + "-pool"
pool_path = "%s/%s" % (vg_name, pool_name)
def executor(obj, *cmd, **kwargs):
self.assertEqual(pool_path, cmd[-1])
self.vg._executor = executor
self.vg.create_thin_pool(pool_name, "1G")
self.vg.create_volume("test", "1G", lv_type='thin')
self.assertEqual(self.vg.vg_thin_pool, pool_name)
def test_lv_has_snapshot(self):
self.assertTrue(self.vg.lv_has_snapshot('fake-vg'))
self.assertFalse(self.vg.lv_has_snapshot('test-volumes'))
def test_activate_lv(self):
self._mox.StubOutWithMock(self.vg, '_execute')
self.vg._supports_lvchange_ignoreskipactivation = True
self.vg._execute('lvchange', '-a', 'y', '--yes', '-K',
'fake-vg/my-lv',
root_helper='sudo', run_as_root=True)
self._mox.ReplayAll()
self.vg.activate_lv('my-lv')
self._mox.VerifyAll()
def test_get_mirrored_available_capacity(self):
self.assertEqual(self.vg.vg_mirror_free_space(1), 2.0)
|
|
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.path.pardir))
from phantom_mask import db
from config import settings
from datetime import datetime, timedelta
import uuid
from contextlib import contextmanager
import json
from lib import usps, phantom_on_the_capitol, select_solver
import random
from dateutil import parser
import pytz
from lib.int_ext import ordinal
from services import determine_district_service, address_inferrence_service, geolocation_service
from lib.dict_ext import sanitize_keys
from sqlalchemy import or_, and_, not_
from flask import url_for, flash
import jellyfish
from flask.ext.login import UserMixin
import flask_login
from flask_admin.contrib.sqla import ModelView
from jinja2 import Markup
from flask import jsonify, redirect, request
from sqlalchemy import func
from helpers import app_router_path
from sqlalchemy_utils.functions import get_class_by_table
from werkzeug.security import generate_password_hash, check_password_hash
from sqlalchemy import event
from flask_admin import expose
from util import render_without_request
from helpers import render_template_wctx, append_get_params
from flask_wtf import Form
@contextmanager
def get_db_session():
try:
yield db.session
finally:
db.session.remove()
def set_attributes(model, attrs, commit=False):
for k, v in attrs:
try:
setattr(model, k, v)
except:
continue
if commit:
db.session.commit()
return model
def db_del_and_commit(model, commit=True):
try:
db.session.delete(model)
if commit:
db.session.commit()
return True
except:
db.session.rollback()
return None
def db_add_and_commit(model, commit=True):
try:
db.session.add(model)
if commit:
db.session.commit()
return model
except:
db.session.rollback()
return None
def db_first_or_create(cls, commit=True, **kwargs):
model = cls.query.filter_by(**kwargs).first()
if model is None:
model = cls(**kwargs)
db_add_and_commit(model, commit)
return model
def to_json(inst, cls):
"""
Jsonify the sql alchemy query result.
@param inst: instance to jsonify
@type inst:
@param cls: class of the instance to jsonify
@type cls:
@return: json string representing the isntance
@rtype: string
"""
convert = dict()
# add your coversions for things like datetime's
# and what-not that aren't serializable.
d = dict()
for c in cls.__table__.columns:
v = getattr(inst, c.name)
if c.type in convert.keys() and v is not None:
try:
d[c.name] = convert[c.type](v)
except:
d[c.name] = "Error: Failed to covert using ", str(convert[c.type])
elif v is None:
d[c.name] = str()
else:
d[c.name] = v
return json.dumps(d)
def uid_creator(cls, *args):
"""
Creates a potential 64 character string uid, checks for collisions in input class,
and returns a uid.
@param cls: string of class name inherited from db.Model
@type cls: string
@param args: one position argument for name of the uid attribute on the class
@type args: string
@return: function that will generate the uid
@rtype: function
"""
def create_uid():
while True:
potential_token = uuid.uuid4().hex + uuid.uuid4().hex
if getattr(sys.modules[__name__], cls).query.filter_by(**{args[0]: potential_token}).count() == 0:
return potential_token
return create_uid
class MyModelView(ModelView):
form_base_class = Form
def _handle_view(self, name, **kwargs):
if name != 'login' and not self.is_accessible():
return redirect(url_for('admin.login', next=request.url))
def is_accessible(self):
return flask_login.current_user.is_authenticated()
class MyBaseModel(db.Model):
__abstract__ = True
def __repr__(self):
return str([(col.name, getattr(self,col.name)) for col in self.__table__.columns])
@property
def json(self):
return to_json(self, self.__class__)
class BaseAnalytics(object):
def __init__(self, model):
self.model = model
self.today = datetime.today()
self.today_start = self.today.replace(hour=0, minute=0, second=0, microsecond=0)
def total_count(self):
return self.model.query.count()
def new_today(self):
return self.new_last_n_days(0)
def new_last_n_days(self, n_days):
return self.model.query.filter(self.model.created_at > (self.today_start - timedelta(days=n_days))).count()
def new_in_range(self, start_days, end_days):
return self.model.query.filter(and_(self.model.created_at > (self.today_start - timedelta(days=start_days)),
self.model.created_at < (self.today_start - timedelta(days=end_days)))).count()
def growth_rate(self, n_days):
last_n = float(self.new_last_n_days(n_days))
prev_last_n = float(self.new_in_range(n_days*2, n_days))
return (last_n - prev_last_n) / last_n
class Legislator(MyBaseModel):
"""
Thin model for storing data on current representatives.
"""
class ModelView(MyModelView):
column_searchable_list = ['bioguide_id', 'chamber', 'state', 'title',
'first_name', 'last_name', 'oc_email', 'contact_form']
bioguide_id = db.Column(db.String(7), primary_key=True, info={'official': True})
chamber = db.Column(db.String(20),info={'official': True})
state = db.Column(db.String(2),info={'official': True})
district = db.Column(db.Integer, nullable=True, info={'official': True})
title = db.Column(db.String(3), info={'official': True})
first_name = db.Column(db.String(256), info={'official': True})
last_name = db.Column(db.String(256), info={'official': True})
contact_form = db.Column(db.String(1024), info={'official': True})
oc_email = db.Column(db.String(256), info={'official': True})
contactable = db.Column(db.Boolean, default=True)
messages = db.relationship('MessageLegislator', backref='legislator', lazy='dynamic')
FIPS_CODES = {
"AK": "02",
"AL": "01",
"AR": "05",
"AS": "60",
"AZ": "04",
"CA": "06",
"CO": "08",
"CT": "09",
"DC": "11",
"DE": "10",
"FL": "12",
"GA": "13",
"GU": "66",
"HI": "15",
"IA": "19",
"ID": "16",
"IL": "17",
"IN": "18",
"KS": "20",
"KY": "21",
"LA": "22",
"MA": "25",
"MD": "24",
"ME": "23",
"MI": "26",
"MN": "27",
"MO": "29",
"MS": "28",
"MT": "30",
"NC": "37",
"ND": "38",
"NE": "31",
"NH": "33",
"NJ": "34",
"NM": "35",
"NV": "32",
"NY": "36",
"OH": "39",
"OK": "40",
"OR": "41",
"PA": "42",
"PR": "72",
"RI": "44",
"SC": "45",
"SD": "46",
"TN": "47",
"TX": "48",
"UT": "49",
"VA": "51",
"VI": "78",
"VT": "50",
"WA": "53",
"WI": "55",
"WV": "54",
"WY": "56"
}
@staticmethod
def find_by_recip_email(recip_email):
return Legislator.query.filter(
func.lower(Legislator.oc_email) == func.lower(Legislator.doctor_oc_email(recip_email))).first()
@staticmethod
def doctor_oc_email(email):
return email.replace(settings.EMAIL_DOMAIN, "opencongress.org")
@staticmethod
def humanized_district(state, district):
return (ordinal(int(district)) if int(district) > 0 else 'At-Large') + ' Congressional district of ' + usps.CODE_TO_STATE.get(state)
@staticmethod
def humanized_state(state):
return usps.CODE_TO_STATE.get(state)
@staticmethod
def get_district_geojson_url(state, district):
try:
fips = Legislator.FIPS_CODES.get(state)
return "http://realtime.influenceexplorer.com/geojson/cd113_geojson/%s%0*d.geojson" % (fips, 2, int(district))
except:
return None
@classmethod
def members_for_state_and_district(cls, state, district, contactable=None):
or_seg = or_(Legislator.district.is_(None), Legislator.district == district)
and_seg = [Legislator.state == state, or_seg]
if contactable is not None:
query = and_(Legislator.contactable.is_(contactable), *and_seg)
else:
query = and_(*and_seg)
return Legislator.query.filter(query).all()
@classmethod
def congress_api_columns(cls):
return [col.name for col in cls.__table__.columns if 'official' in col.info and col.info['official']]
@classmethod
def get_leg_buckets_from_emails(self, permitted_legs, emails):
legs = {label: [] for label in ['contactable','non_existent','uncontactable','does_not_represent']}
inbound_emails = [email_addr for email_addr in emails]
# user sent to catchall address
if settings.CATCH_ALL_MYREPS in inbound_emails:
legs['contactable'] = permitted_legs
inbound_emails.remove(settings.CATCH_ALL_MYREPS)
elif Legislator.doctor_oc_email(settings.CATCH_ALL_MYREPS) in inbound_emails:
legs['contactable'] = permitted_legs
inbound_emails.remove(Legislator.doctor_oc_email(settings.CATCH_ALL_MYREPS))
# maximize error messages for users for individual addresses
for recip_email in inbound_emails:
# IMPORTANT! OC_EMAIL is legacy from @opencongress.org. The new addresses are @emailcongress.us.
leg = Legislator.find_by_recip_email(recip_email)
if leg is None:
legs['non_existent'].append(recip_email) # TODO refer user to index page?
elif not leg.contactable:
legs['uncontactable'].append(leg)
elif leg not in permitted_legs:
legs['does_not_represent'].append(leg)
elif leg not in legs['contactable']:
legs['contactable'].append(leg)
else:
continue
return legs
def full_title(self):
return {
'Com': 'Commissioner',
'Del': 'Delegate',
'Rep': 'Representative',
'Sen': 'Senator'
}.get(self.title, 'Representative')
def full_name(self):
return self.first_name + " " + self.last_name
def title_and_last_name(self):
return self.title + " " + self.last_name
def title_and_full_name(self):
return self.title + " " + self.full_name()
def full_title_and_full_name(self):
return self.full_title() + " " + self.full_name()
def image_url(self, size='small'):
dimensions = {
'small': '225x275',
'large': '450x550'
}
return "https://raw.githubusercontent.com/unitedstates/images/gh-pages/congress/" + \
dimensions.get(size, dimensions.values()[0]) + "/" + self.bioguide_id + '.jpg'
@property
def email(self):
return self.oc_email.replace('opencongress.org', settings.EMAIL_DOMAIN)
class AdminUser(MyBaseModel, UserMixin):
class ModelView(MyModelView):
column_searchable_list = ['username']
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(256), unique=True, nullable=False)
pw_hash = db.Column(db.String(66), unique=False, nullable=False)
# UserMixin for flask.ext.login
active = db.Column(db.Boolean, default=True)
anonymous = db.Column(db.Boolean, default=False)
def is_active(self):
return self.active
def is_anonymous(self):
return self.anonymous
def set_password(self, password):
self.pw_hash = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.pw_hash, password)
def get_id(self):
return unicode(self.id)
class Token(MyBaseModel):
token = db.Column(db.String(64), unique=True, primary_key=True)
item_id = db.Column (db.Integer)
item_table = db.Column(db.String(16))
@classmethod
def convert_token(cls, token):
msg, umi, user = None, None, None
token = cls.query.filter_by(token=token).first()
if token is not None:
item = token.item
if type(item) is User:
user = item
umi = user.default_info
elif type(item) is Message:
msg = item
umi = msg.user_message_info
user = umi.user
return msg, umi, user
@classmethod
def uid_creator(cls, model=None, param='token'):
"""
Creates a potential 64 character string uid, checks for collisions in input class,
and returns a uid.
@return: 64 character alphanumeri c string
@rtype: string
"""
model = model if model is not None else cls
while True:
potential_token = uuid.uuid4().hex + uuid.uuid4().hex
if model.query.filter_by(**{param: potential_token}).count() == 0:
return potential_token
@property
def item(self):
return get_class_by_table(db.Model, db.Model.metadata.tables[self.item_table]).query.filter_by(id=self.item_id).first()
@item.setter
def item(self, item):
self.item_id = item.id
self.item_table = item.__table__.name
def __init__(self, item, **kwargs):
super(Token, self).__init__(**kwargs)
self.token = self.uid_creator()
self.item = item
def reset(self, token=None):
self.token = token if token is not None else self.uid_creator()
db.session.commit()
return self.token
def link(self):
"""
Gets the url for this token.
@param path: validation url from view
@type path: string
@return: URL for confirmation email
@rtype: string
"""
return app_router_path('update_user_address', token=self.token)
class HasTokenMixin(db.Model):
__abstract__ = True
__mapper_args__ = {
'batch' : False
}
@property
def token(self):
return Token.query.filter_by(item_id=self.id, item_table=self.__table__.name).first()
@token.setter
def token(self, token):
self.token = token
def verification_link(self):
return self.token.link()
class User(MyBaseModel, HasTokenMixin):
class ModelView(MyModelView):
column_searchable_list = ['email']
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(256), unique=True)
created_at = db.Column(db.DateTime, default=datetime.now)
user_infos = db.relationship('UserMessageInfo', backref='user')
role = db.Column(db.Integer, default=0)
ROLES = {
0: 'user',
1: 'special',
2: 'admin'
}
@classmethod
def global_captcha(cls):
return Message.query.filter((datetime.now() - timedelta(hours=settings.USER_MESSAGE_LIMIT_HOURS)
< Message.created_at)).count() > settings.GLOBAL_HOURLY_CAPTCHA_THRESHOLD
@classmethod
def get_or_create(cls, email):
user = db_first_or_create(User, email=email.lower())
umi = db_first_or_create(UserMessageInfo, user_id=user.id, default=True)
return user, umi
def __html__(self):
return render_without_request('snippets/user.html', user=self)
@property
def default_info(self):
return UserMessageInfo.query.filter_by(user_id=self.id, default=True).first()
def get_role(self):
return self.ROLES.get(self.role, 'unknown')
def is_admin(self):
return self.ROLES.get(self.role) is 'admin'
def is_special(self):
return self.ROLES.get(self.role) is 'special'
def can_skip_rate_limit(self):
return self.is_admin() or self.is_special()
def get_rate_limit_status(self):
"""
Determines if a user should be rate limited (or blocked if argument provided
@return status of rate limit [block, captcha, free]
@rtype string
"""
if self.can_skip_rate_limit():
return 'free'
if User.global_captcha():
return 'g_captcha'
count = self.messages().filter((datetime.now() - timedelta(hours=settings.USER_MESSAGE_LIMIT_HOURS) < Message.created_at)).count()
if count > settings.USER_MESSAGE_LIMIT_BLOCK_COUNT:
return 'block'
elif count > settings.USER_MESSAGE_LIMIT_CAPTCHA_COUNT:
return 'captcha'
else:
return 'free'
def messages(self, **filters):
return Message.query.filter_by(**filters).join(UserMessageInfo).join(User).filter_by(email=self.email)
def last_message(self):
return self.messages()[-1]
def new_address_change_link(self):
self.token.reset()
def address_change_link(self):
return self.token.link()
class Analytics(BaseAnalytics):
def __init__(self):
super(User.Analytics, self).__init__(User)
def users_with_verified_districts(self):
return UserMessageInfo.query.join(User).filter(
and_(UserMessageInfo.default.is_(True), not_(UserMessageInfo.district.is_(None)))).count()
class UserMessageInfo(MyBaseModel):
class ModelView(MyModelView):
column_searchable_list = ['first_name', 'last_name', 'street_address', 'street_address2', 'city', 'state',
'zip5', 'zip4', 'phone_number']
# meta data
id = db.Column(db.Integer, primary_key=True)
default = db.Column(db.Boolean, default=False)
created_at = db.Column(db.DateTime, default=datetime.now)
# input by user
prefix = db.Column(db.String(32), default='', info={'user_input': True})
first_name = db.Column(db.String(256), default='', info={'user_input': True})
last_name = db.Column(db.String(256), default='', info={'user_input': True})
street_address = db.Column(db.String(1000), default='', info={'user_input': True})
street_address2 = db.Column(db.String(1000), default='', info={'user_input': True})
city = db.Column(db.String(256), default='', info={'user_input': True})
state = db.Column(db.String(2), default='', info={'user_input': True})
zip5 = db.Column(db.String(5), default='', info={'user_input': True})
zip4 = db.Column(db.String(4), default='', info={'user_input': True})
phone_number = db.Column(db.String(20), default='', info={'user_input': True})
accept_tos = db.Column(db.DateTime, default=None)
# set by methods based on input address information above
latitude = db.Column(db.String(256))
longitude = db.Column(db.String(256))
district = db.Column(db.Integer, default=None)
# relations
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
messages = db.relationship('Message', backref='user_message_info')
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.comparable_attributes() == other.comparable_attributes()
return False
def comparable_attributes(self):
return {key: value for key, value in self.__dict__ if key in self.user_input_columns()}
@classmethod
def first_or_create(cls, user_id, created_at=datetime.now, **kwargs):
user = User.query.filter_by(id=user_id).first()
if user is not None:
sanitize_keys(kwargs, cls.user_input_columns())
umi = UserMessageInfo.query.filter_by(**kwargs).first()
if umi is not None: return umi
else:
created_at = parser.parse(created_at) if type(created_at) is str else created_at().replace(tzinfo=pytz.timezone('US/Eastern'))
umi = UserMessageInfo(user_id=user.id, created_at=created_at, **kwargs)
db_add_and_commit(umi)
return umi
@classmethod
def user_input_columns(cls):
return [col.name for col in cls.__table__.columns if 'user_input' in col.info and col.info['user_input']]
def humanized_district(self):
return Legislator.humanized_district(self.state, self.district)
def humanized_state(self):
return Legislator.humanized_state(self.state)
def confirm_accept_tos(self):
self.accept_tos = datetime.now()
db.session.commit()
def should_update_address_info(self):
"""
Determines if user needs to update their address information.
@return: True if they need to update, False otherwise
@rtype: boolean
"""
return self.accept_tos is None or (datetime.now() - self.accept_tos).days >= settings.ADDRESS_DAYS_VALID
def mailing_address(self):
return self.street_address + ' ' + self.street_address2 + ', '\
+ self.city + ', ' + self.state + ' ' + self.zip5 + '-' + self.zip4
def complete_information(self):
if self.district is None:
self.determine_district(force=True)
if not self.zip4:
self.zip4_lookup(force=True)
def zip4_lookup(self, force=False):
if force or not self.zip4:
try:
self.zip4 = address_inferrence_service.zip4_lookup(self.street_address,
self.city,
self.state,
self.zip5
)
db.session.commit()
except:
db.session.rollback()
def geolocate_address(self, force=False):
"""
Retrieves the latitude and longitude of the address information.
@return: latitude, longitude tuple
@rtype: (string, string)
"""
if force or (self.latitude is None and self.longitude is None):
try:
self.latitude, self.longitude = geolocation_service.geolocate(street_address=self.street_address,
city=self.city,
state=self.state,
zip5=self.zip5)
db.session.commit()
return self.latitude, self.longitude
except:
return None, None
def get_district_geojson_url(self):
return Legislator.get_district_geojson_url(self.state, self.district)
def determine_district(self, force=False):
"""
Determines the district of information associated with this address.
@param force: whether to force an update of district
@type force: boolean
@return: district of address information
@rtype: int
"""
if not force and self.district is not None:
return self.district
data = determine_district_service.determine_district(zip5=self.zip5)
if data is None:
self.geolocate_address()
data = determine_district_service.determine_district(latitude=self.latitude, longitude=self.longitude)
try:
self.district = data.get('district')
self.state = data.get('state')
db.session.commit()
return self.district
except:
print "Unable to determine district for " + self.mailing_address()
return None
@property
def members_of_congress(self):
if self.district is None:
self.determine_district()
return Legislator.query.filter(
and_(Legislator.contactable.is_(True), Legislator.state == self.state,
or_(Legislator.district.is_(None), Legislator.district == self.district))).all()
@property
def json(self):
return to_json(self, self.__class__)
class Topic(MyBaseModel):
class ModelView(MyModelView):
column_searchable_list = ['name']
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(512), unique=True)
wikipedia_parent = db.Column(db.Integer, db.ForeignKey('topic.id'))
msg_legs = db.relationship('MessageLegislator', backref='topic')
@staticmethod
def topic_for_message(choices, message):
return select_solver.choose(message.msgbody, [t.name.lower() for t in Topic.topics_from_choices(choices)])
@staticmethod
def topics_from_choices(choices):
return list(set([top if top.wikipedia_parent is None else Topic.query.filter_by(id=top.wikipedia_parent).first()
for top in Topic.query.filter(or_(*[(Topic.name == c.lower()) for c in choices])).all()]))
@classmethod
def populate_topics_from_phantom_forms(cls):
all_forms = phantom_on_the_capitol.retrieve_form_elements([x.bioguide_id for x in Legislator.query.all()])
all_topics = {}
for legislator, req in all_forms.iteritems():
for key, val in req.iteritems():
for step in val:
if step['value'] == '$TOPIC':
if type(step['options_hash']) is dict:
keys = step['options_hash'].keys()
else:
keys = step['options_hash']
for k in keys:
k = k.strip()
if all_topics.has_key(k):
all_topics[k] += 1
else:
all_topics[k] = 1
failed_topics = []
for topic, count in all_topics.iteritems():
result = select_solver.choose('test', [topic.lower()])
if result is None:
failed_topics.append(topic.lower())
elif result:
db_first_or_create(Topic, name=topic.lower())
all_topics = Topic.query.filter_by(wikipedia_parent=None)
for f_topic in failed_topics:
try:
lowest = (None, None)
for topic in all_topics:
print topic.name, f_topic
d = jellyfish.damerau_levenshtein_distance(unicode(str(topic.name)), unicode(str(f_topic)))
if lowest[0] is None or lowest[1] > d:
lowest = (topic, d)
print 'Adding ' + f_topic + ' with parent ' + lowest[0].name
db_first_or_create(Topic, name=f_topic, wikipedia_parent=lowest[0].id)
except:
continue
@property
def json(self):
return to_json(self, self.__class__)
class Message(MyBaseModel, HasTokenMixin):
class ModelView(MyModelView):
column_list = ('created_at', 'subject', 'user_message_info', 'to_legislators', 'status')
column_searchable_list = ['to_originally', 'subject', 'msgbody']
def scaffold_form(self):
from wtforms import SelectMultipleField
form_class = super(Message.ModelView, self).scaffold_form()
form_class.legislators = SelectMultipleField('Add Legislators', choices=[(x.bioguide_id, x.bioguide_id) for x in Legislator.query.all()])
return form_class
def update_model(self, form, model):
if super(Message.ModelView, self).update_model(form, model):
if model.set_legislators(Legislator.query.filter(Legislator.bioguide_id.in_(form.legislators.data)).all()):
return True
def validate_form(self, form):
if super(Message.ModelView, self).validate_form(form):
legs = Legislator.query.filter(Legislator.bioguide_id.in_(form.legislators.data))
if len(form.legislators.data) != legs.count():
form.legislators.errors = ['Legislators with provided bioguides do not exist.']
return False
return True
id = db.Column(db.Integer, primary_key=True)
created_at = db.Column(db.DateTime, default=datetime.now)
to_originally = db.Column(db.String(8000)) # original recipients from email as json list
subject = db.Column(db.String(500))
msgbody = db.Column(db.String(8000))
user_message_info_id = db.Column(db.Integer, db.ForeignKey('user_message_info.id'))
to_legislators = db.relationship('MessageLegislator', backref='message')
# email uid from postmark
email_uid = db.Column(db.String(1000))
# None = sent, free = able to be sent, (g_)captcha = must solve captcha to send, block = can't send
status = db.Column(db.String(10), nullable=True, default='free')
def set_legislators(self, legislators):
"""
Used to set which legislators that this message will be sent to
@param legislators: single or list of legislators
@type legislators: list[models.Legislator or string]
@return: True if success, False otherwise
@rtype: boolean
"""
if type(legislators) is not list: legislators = [legislators]
try:
self.to_legislators = [MessageLegislator(message_id=self.id,
legislator_id=(leg if type(leg) is str else leg.bioguide_id))
for leg in legislators]
db.session.commit()
return True
except:
db.session.rollback()
return False
def add_legislators(self, legislators):
"""
Add a legislator that this will message will be sent to
@param legislators: single or list of legislators
@type legislators: list[models.Legislator or string]
@return: True if success, False otherwise
@rtype: boolean
"""
if type(legislators) is not list: legislators = [legislators]
try:
for leg in legislators:
db_first_or_create(MessageLegislator,
message_id=self.id,
legislator_id=(leg if type(leg) is str else leg.bioguide_id))
return True
except:
return False
def get_legislators(self, as_dict=False):
"""
Retrieves the legislator models to which this message is to be sent.
@param as_dict: if True, the method will return a dictionary with bioguide_ids as keys. False = list
@type as_dict: dict
@return: legislators for this message
@rtype: list[models.Legislator] or dict[string:models.Legislator]
"""
if as_dict:
toReturn = {}
for leg in self.to_legislators:
l = Legislator.query.filter_by(bioguide_id=leg.legislator_id).first()
toReturn[l.bioguide_id] = l
return toReturn
else:
return Legislator.query.join(MessageLegislator).filter(
MessageLegislator.id.in_([ml.id for ml in self.to_legislators])).all()
def has_legislators(self):
return self.get_legislators()
def get_send_status(self):
target_count = len(self.to_legislators)
sent_count = MessageLegislator.query.join(Message).filter(Message.id == self.id,
MessageLegislator.sent.is_(True)).count()
if sent_count == 0:
return 'unsent'
elif sent_count < target_count:
return 'sundry'
else:
return 'sent'
def associate_legislators(self, force=False):
if force or not self.to_legislators:
self.set_legislators(self.user_message_info.members_of_congress)
def free_link(self):
set_attributes(self, {'status': 'free'}.iteritems(), commit=True)
def kill_link(self):
set_attributes(self, {'status': None}.iteritems(), commit=True)
def update_status(self):
self.status = self.user_message_info.user.get_rate_limit_status()
db.session.commit()
def needs_captcha_to_send(self):
return self.status in ['captcha', 'g_captcha']
def is_free_to_send(self):
return self.status == 'free'
def is_already_sent(self):
return self.status is None
def queue_to_send(self, moc=None):
from scheduler import send_to_phantom_of_the_capitol
set_attributes(self, {'status': None}.iteritems(), commit=True)
if moc is not None: self.set_legislators(moc)
send_to_phantom_of_the_capitol.delay(msg_id=self.id, force=True)
return True
def send(self, fresh=False):
"""
Attempts to the send this message using phantom on the capitol.
@return: list of MessageLegislator instances that were processed
@rtype: list[models.MessageLegislator]
"""
newly_sent = []
for msg_leg in self.to_legislators:
try:
newly_sent.append(msg_leg.send())
except:
continue
return newly_sent if fresh else self.to_legislators
def map_to_contact_congress_fields(self):
"""
Converts model data into a dictionary for phantom of the capitol.
@return: dictionary of values
@rtype: dict
"""
umi = self.user_message_info
return {
'$NAME_PREFIX': umi.prefix,
'$NAME_FIRST': umi.first_name,
'$NAME_LAST': umi.last_name,
'$NAME_FULL': umi.first_name + ' ' + umi.last_name,
'$ADDRESS_STREET': umi.street_address,
'$ADDRESS_STREET_2': umi.street_address2,
'$ADDRESS_CITY': umi.city,
'$ADDRESS_ZIP5': umi.zip5,
'$ADDRESS_ZIP4': umi.zip4,
"$ADDRESS_ZIP_PLUS_4": umi.zip5 + '-' + umi.zip4,
'$EMAIL': umi.user.email,
'$SUBJECT': self.subject,
'$MESSAGE': self.msgbody,
'$ADDRESS_STATE_POSTAL_ABBREV': umi.state,
'$PHONE': umi.phone_number,
'$ADDRESS_STATE_FULL': usps.CODE_TO_STATE.get(umi.state)
}
class Analytics():
def __init__(self):
super(Message.Analytics, self).__init__(Message)
class MessageLegislator(MyBaseModel):
class ModelView(MyModelView):
column_searchable_list = ['send_status']
column_list = ('id', 'sent', 'send_status', 'topic', 'legislator', 'message', 'message.user_message_info')
@expose('/sent/', methods=['GET', 'POST'])
def sent_view(self):
model = self.get_one(request.args.get('id', None))
if model is None or model.sent:
return redirect(self.get_url('.index_view'))
from scheduler import send_to_phantom_of_the_capitol
send_to_phantom_of_the_capitol.delay(msgleg_id=model.id)
flash('Message %s will attempt to be resent' % str(model.id))
return redirect(self.get_url('.index_view'))
def _sent_formatter(view, context, model, name):
if not model.sent:
ctx = {'msgleg-id': str(model.id)}
get_paramss = {'url': view.url, 'id': ctx['msgleg-id']}
ctx['post_url'] = append_get_params(view.url + '/' + name + '/', **get_paramss)
return Markup(render_template_wctx('admin/resend_button.html', context=ctx))
else:
return model.sent
column_formatters = {
'sent': _sent_formatter
}
id = db.Column(db.Integer, primary_key=True)
message_id = db.Column(db.Integer, db.ForeignKey('message.id'))
legislator_id = db.Column(db.String(7), db.ForeignKey('legislator.bioguide_id'))
topic_id = db.Column(db.Integer, db.ForeignKey('topic.id'))
send_status = db.Column(db.String(4096), default='{"status": "unsent"}') # stringified JSON
sent = db.Column(db.Boolean, nullable=True, default=None)
def is_sent(self):
return self.sent
def get_send_status(self):
"""
Retrieves the current status of the message to the legislator.
@return: [Dictionary] dictionary detailing the status of the message
"""
try:
return json.loads(self.send_status)
except:
return self.send_status
def send(self):
"""
Method that actually passes information to phantom of the capitol.
@return: self
@rtype: models.MessageLegislator
"""
if self.is_sent() is not True:
for bioguide_id, ra in phantom_on_the_capitol.retrieve_form_elements([self.legislator.bioguide_id]).iteritems():
json_dict = self.map_to_contact_congress()
for step in ra['required_actions']:
field = step.get('value')
options = step.get('options_hash')
if options is not None:
# convert first to dictionary for convenience
if type(options) is not dict:
options = {k: k for k in options}
if field == '$TOPIC':
# need lower case strings for select-solver
options = {k.lower(): v for k, v in options.items()}
try: # try to determine best topic based off content of text
choice = Topic.topic_for_message(options.keys(), self.message)
json_dict['fields'][field] = choice
self.topic = Topic.query.filter_by(name=choice).first()
except: # if failed, choose a random topic
pass
if field not in json_dict['fields'] or json_dict['fields'][field] not in options.values():
json_dict['fields'][field] = random.choice(options.values())
if field not in json_dict['fields'].keys():
print 'What the heck is ' + step.get('value') + ' in ' + bioguide_id + '?'
result = phantom_on_the_capitol.fill_out_form(json_dict)
self.sent = result['status'] == 'success'
self.send_status = json.dumps(result)
db.session.commit()
return self
def map_to_contact_congress(self, campaign_tag=False):
data = {
'bio_id': self.legislator.bioguide_id,
'fields': self.message.map_to_contact_congress_fields()
}
if campaign_tag:
data['campaign_tag'] = self.message.email_uid
return data
@property
def json(self):
return to_json(self, self.__class__)
for cls in HasTokenMixin.__subclasses__():
@event.listens_for(cls, 'after_insert')
def receive_after_insert(mapper, connection, target):
db.session.add(Token(item=target))
@event.listens_for(cls, 'after_delete')
def receive_after_delete(mapper, connection, target):
db.session.delete(target.token)
|
|
#
# Widgets.py -- wrapped HTML widgets and convenience functions
#
# Eric Jeschke (eric@naoj.org)
#
# Copyright (c) Eric R. Jeschke. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
import os.path
import threading
import time
from functools import reduce
from ginga.misc import Callback, Bunch
from ginga.web.pgw import PgHelp
import ginga.icons
# path to our icons
icondir = os.path.split(ginga.icons.__file__)[0]
class WidgetError(Exception):
"""For errors thrown in this module."""
pass
# widget id counter
widget_id = 0
# widget dict
widget_dict = {}
tab_idx = 0
# reference to the created application
_app = None
default_font = 'Ariel 8'
# BASE
class WidgetBase(Callback.Callbacks):
def __init__(self):
global widget_id, widget_dict
super(WidgetBase, self).__init__()
self.widget = None
self.changed = False
# external data can be attached here
self.extdata = Bunch.Bunch()
# generic attributes of widgets
self.enabled = True
self.width = 400
self.height = 800
self.bgcolor = 'gray'
self.fgcolor = 'black'
self.tooltip = ''
widget_id += 1
self.id = widget_id
widget_dict[widget_id] = self
def get_url(self):
app = self.get_app()
return "%s?id=%d" % (app.base_url, self.id)
def get_app(self):
return _app
def get_widget(self):
return self.widget
def set_tooltip(self, text):
self.tooltip = text
def set_enabled(self, tf):
self.enabled = tf
app = self.get_app()
app.do_operation('disable', id=self.id, value=not tf)
def get_size(self):
return self.width, self.height
def delete(self):
# for now...
pass
def resize(self, width, height):
self.width, self.height = width, height
def focus(self):
pass
def show(self):
pass
def hide(self):
pass
def get_font(self, font_family, point_size):
font = '%s %s' % (font_family, point_size)
return font
def cfg_expand(self, horizontal=0, vertical=0):
# this is for compatibility with Qt widgets
pass
def render(self):
text = "'%s' NOT YET IMPLEMENTED" % (str(self.__class__))
d = dict(id=self.id, text=text)
return '''<span id=%(id)s>%(text)s</span>''' % d
# BASIC WIDGETS
class TextEntry(WidgetBase):
def __init__(self, text='', editable=True):
super(TextEntry, self).__init__()
self.widget = None
self.text = text
self.editable = editable
self.font = default_font
self.length = 20 # seems to be default HTML5 size
self.enable_callback('activated')
def _cb_redirect(self, event):
self.text = event.value
self.make_callback('activated')
def get_text(self):
return self.text
def set_text(self, text):
self.text = text
def set_editable(self, tf):
self.editable = tf
def set_font(self, font):
self.font = font
def set_length(self, numchars):
# this is only supposed to set the visible length
self.length = numchars
def render(self):
# TODO: render font
d = dict(id=self.id, text=self.text, disabled='', size=20)
if not self.enabled:
d['disabled'] = 'disabled'
return '''<input id=%(id)s type="text" size=%(size)d name="%(id)s" %(disabled)s onchange="ginga_app.widget_handler('%(id)s', document.getElementById('%(id)s').value)" value="%(text)s">''' % d
class TextEntrySet(WidgetBase):
def __init__(self, text='', editable=True):
super(TextEntrySet, self).__init__()
self.widget = None
self.text = text
self.font = default_font
self.editable = editable
## self.entry = None
## self.btn = None
self.length = 20 # seems to be default HTML5 size
self.enable_callback('activated')
def _cb_redirect(self, event):
self.text = event.value
self.make_callback('activated')
def get_text(self):
return self.text
def set_text(self, text):
self.text = text
def set_font(self, font):
self.font = font
def set_editable(self, tf):
self.editable = tf
def set_length(self, numchars):
# this is only supposed to set the visible length
self.length = numchars
def render(self):
# TODO: render font, editable
d = dict(id=self.id, text=self.text, disabled='', size=20)
return '''<span> <input id=%(id)s type="text" size=%(size)d name="%(id)s" %(disabled)s onchange="ginga_app.widget_handler('%(id)s', document.getElementById('%(id)s').value)" value="%(text)s"/>
<input type="button" %(disabled)s onclick="ginga_app.widget_handler('%(id)s', document.getElementById('%(id)s').value)" value="Set"/> </span>''' % d
class TextArea(WidgetBase):
def __init__(self, wrap=False, editable=False):
super(TextArea, self).__init__()
self.widget = None
self.editable = editable
self.wrap = wrap
self.text = ''
self.font = default_font
def _cb_redirect(self, event):
self.text = event.value
#self.make_callback('activated')
def append_text(self, text, autoscroll=True):
if text.endswith('\n'):
text = text[:-1]
self.text = self.text + text
if not autoscroll:
return
def get_text(self):
return self.text
def clear(self):
self.text = ""
def set_text(self, text):
self.text = text
def set_limit(self, numlines):
# for compatibility with the other supported widget sets
pass
def set_editable(self, tf):
self.editable = tf
def set_font(self, font):
self.font = font
def set_wrap(self, tf):
self.wrap = tf
def render(self):
# TODO: handle wrapping, render font
d = dict(id=self.id, text=self.text, disabled='', editable='')
if not self.enabled:
d['disabled'] = 'disabled'
if not self.editable:
d['editable'] = 'readOnly'
return '''<textarea id=%(id)s name="%(id)s" style="width: 100%%;" %(disabled)s %(editable)s onchange="ginga_app.widget_handler('%(id)s', document.getElementById('%(id)s').value)">%(text)s</textarea>''' % d
class Label(WidgetBase):
def __init__(self, text='', halign='left', style='normal', menu=None):
super(Label, self).__init__()
self.text = text
self.font = default_font
self.halign = halign
self.style = style
self.fgcolor = None
self.bgcolor = None
self.menu = menu
self.widget = None
self.enable_callback('activated')
def get_text(self):
return self.text
def set_text(self, text):
self.text = text
app = self.get_app()
app.do_operation('update_label', id=self.id, value=text)
def set_font(self, font):
self.font = font
def set_color(self, fg=None, bg=None):
if fg is not None:
self.fgcolor = fg
if bg is not None:
self.bgcolor = bg
def render(self):
# TODO: render font, alignment, style, menu, clickable
style = ""
#style += ("text-align: %s; " % self.halign)
if self.fgcolor is not None:
style += ("color: %s; " % self.fgcolor)
if self.bgcolor is not None:
style += ("background-color: %s; " % self.bgcolor)
d = dict(id=self.id, text=self.text, style=style)
return '''<span id=%(id)s style="%(style)s">%(text)s</span>''' % d
class Button(WidgetBase):
def __init__(self, text=''):
super(Button, self).__init__()
self.text = text
self.widget = None
#self.widget.clicked.connect(self._cb_redirect)
self.enable_callback('activated')
def _cb_redirect(self, event):
self.make_callback('activated')
def render(self):
d = dict(id=self.id, text=self.text, disabled='')
if not self.enabled:
d['disabled'] = 'disabled'
return '''<input id=%(id)s type="button" %(disabled)s onclick="ginga_app.widget_handler('%(id)s', 0)" value="%(text)s">''' % d
class ComboBox(WidgetBase):
def __init__(self, editable=False, multi_choice=False):
super(ComboBox, self).__init__()
self.widget = None
self.index = 0
self.multi_choice = multi_choice
self.choices = []
self.enable_callback('activated')
def _cb_redirect(self, event):
self.index = int(event.value)
self.make_callback('activated', self.index)
def insert_alpha(self, text):
index = 0
num_choices = len(self.choices)
while True:
if index >= num_choices:
self.choices.append(text)
return
item_text = self.choices[index]
if item_text > text:
self.choices.insert(index, text)
return
index += 1
def delete_alpha(self, text):
self.choices.remove(text)
def get_alpha(self, idx):
return self.choices[idx]
def clear(self):
self.choices = []
def show_text(self, text):
index = self.choices.index(text)
self.set_index(index)
def append_text(self, text):
self.choices.append(text)
def set_index(self, index):
self.index = index
app = self.get_app()
app.do_operation('update_index', id=self.id, value=self.index)
def get_index(self):
return self.index
def render(self):
d = dict(id=self.id, disabled='')
if self.multi_choice:
d['multiple'] = 'multiple'
else:
d['multiple'] = ''
if not self.enabled:
d['disabled'] = 'disabled'
res = ['''<select id=%(id)s %(disabled)s name="%(id)s" %(multiple)s onchange="ginga_app.widget_handler('%(id)s', document.getElementById('%(id)s').value)">''' % d]
for idx, choice in enumerate(self.choices):
if idx == self.index:
selected = 'selected'
else:
selected = ''
res.append(''' <option value="%d" %s>%s</option>''' % (
idx, selected, choice))
res.append('''</select>''')
return '\n'.join(res)
class SpinBox(WidgetBase):
def __init__(self, dtype=int):
super(SpinBox, self).__init__()
self.dtype = dtype
self.widget = None
self.value = dtype(0)
self.decimals = 0
self.minval = dtype(0)
self.maxval = dtype(0)
self.incr = dtype(0)
self.enable_callback('value-changed')
def _cb_redirect(self, event):
self.value = event.value
self.make_callback('value-changed', self.value)
def get_value(self):
return self.dtype(self.value)
def set_value(self, val):
self.changed = True
self.value = val
def set_decimals(self, num):
self.decimals = num
def set_limits(self, minval, maxval, incr_value=1):
self.minval = minval
self.maxval = maxval
self.incr = incr_value
def render(self):
d = dict(id=self.id, value=self.value, step=self.incr,
max=self.maxval, min=self.minval, disabled='')
if not self.enabled:
d['disabled'] = 'disabled'
if self.dtype == float:
return '''<input id=%(id)s %(disabled)s type="number" onchange="ginga_app.widget_handler('%(id)s', document.getElementById('%(id)s').value)" value="%(value)f" step="%(step)f" max="%(max)f" min="%(min)f">''' % d
else:
return '''<input id=%(id)s %(disabled)s type="number" onchange="ginga_app.widget_handler('%(id)s', document.getElementById('%(id)s').value)" value="%(value)d" step="%(step)d" max="%(max)d" min="%(min)d">''' % d
class Slider(WidgetBase):
def __init__(self, orientation='horizontal', track=False, dtype=int):
super(Slider, self).__init__()
self.orientation = orientation
self.track = track
self.widget = None
self.dtype = dtype
self.value = dtype(0)
self.minval = dtype(0)
self.maxval = dtype(0)
self.incr = dtype(0)
self.enable_callback('value-changed')
def _cb_redirect(self, event):
self.value = self.dtype(event.value)
self.make_callback('value-changed', self.value)
def get_value(self):
return self.value
def set_value(self, val):
self.changed = True
self.value = val
def set_tracking(self, tf):
pass
def set_limits(self, minval, maxval, incr_value=1):
self.minval = minval
self.maxval = maxval
self.incr = incr_value
def render(self):
d = dict(id=self.id, value=self.value, incr=self.incr,
max=self.maxval, min=self.minval, disabled='',
orient='', style='')
if self.orientation == 'vertical':
# firefox
d['orient'] = 'orient=vertical'
d['style'] = "-webkit-appearance: slider-vertical;"
if not self.enabled:
d['disabled'] = 'disabled'
if self.dtype == float:
return '''<input id=%(id)s type="range" %(disabled)s style="%(style)s" onchange="ginga_app.widget_handler('%(id)s', document.getElementById('%(id)s').value)" value="%(value)f" step="%(incr)f" max="%(max)f" min="%(min)f orient="%(orient)s">''' % d
else:
return '''<input id=%(id)s type="range" %(disabled)s style="%(style)s" onchange="ginga_app.widget_handler('%(id)s', document.getElementById('%(id)s').value)" value="%(value)d" step="%(incr)d" max="%(max)d" min="%(min)d orient="%(orient)s">''' % d
class ScrollBar(WidgetBase):
def __init__(self, orientation='horizontal'):
super(ScrollBar, self).__init__()
# if orientation == 'horizontal':
# self.widget = QtGui.QScrollBar(QtCore.Qt.Horizontal)
# else:
# self.widget = QtGui.QScrollBar(QtCore.Qt.Vertical)
# self.widget.valueChanged.connect(self._cb_redirect)
self.widget = None
self.value = 0.0
self.enable_callback('activated')
def _cb_redirect(self, event):
self.value = event.value
self.make_callback('activated', self.value)
class CheckBox(WidgetBase):
def __init__(self, text=''):
super(CheckBox, self).__init__()
self.widget = None
self.value = False
self.text = text
self.enable_callback('activated')
def _cb_redirect(self, event):
self.value = event.value
self.make_callback('activated', self.value)
def set_state(self, tf):
self.value = tf
def get_state(self):
val = self.value
return val
def render(self):
d = dict(id=self.id, text=self.text, disabled='')
if not self.enabled:
d['disabled'] = 'disabled'
return '''<input id=%(id)s type="checkbox" %(disabled)s onchange="ginga_app.widget_handler('%(id)s', document.getElementById('%(id)s').checked)" value="%(text)s"><label for="%(id)s">%(text)s</label>''' % d
class ToggleButton(WidgetBase):
def __init__(self, text=''):
super(ToggleButton, self).__init__()
## self.widget = QtGui.QPushButton(text)
## self.widget.setCheckable(True)
## self.widget.clicked.connect(self._cb_redirect)
self.widget = None
self.value = False
self.text = text
self.enable_callback('activated')
def _cb_redirect(self, event):
self.value = event.value
self.make_callback('activated', self.value)
def set_state(self, tf):
self.value = tf
def get_state(self):
return self.value
def render(self):
d = dict(id=self.id, text=self.text, disabled='')
if not self.enabled:
d['disabled'] = 'disabled'
return '''<input id=%(id)s type="checkbox" %(disabled)s onchange="ginga_app.widget_handler('%(id)s', document.getElementById('%(id)s').checked)" value="%(text)s"><label for="%(id)s">%(text)s</label>''' % d
class RadioButton(WidgetBase):
group_cnt = 0
def __init__(self, text='', group=None):
super(RadioButton, self).__init__()
## self.widget = QtGui.QRadioButton(text)
## self.widget.toggled.connect(self._cb_redirect)
self.widget = None
self.text = text
self.value = False
self.group_name = None
if group is None:
self.group_name = "radio%d" % (RadioButton.group_cnt)
RadioButton.group_cnt += 1
self.group = [self]
else:
self.group = group.group
self.group_name = group.group_name
self.enable_callback('activated')
def _cb_redirect(self, event):
self.value = event.value
self.make_callback('activated', event.value)
def set_state(self, tf):
if self.value != tf:
# toggled only fires when the value is toggled
self.changed = True
self.value = tf
def get_state(self):
return self.value
def render(self):
d = dict(id=self.id, disabled='', checked='',
group=self.group_name, text=self.text)
if not self.enabled:
d['disabled'] = 'disabled'
if self.value:
d['checked'] = 'checked'
return '''<input id=%(id)s name="%(group)s" type="radio" %(disabled)s onchange="ginga_app.widget_handler('%(id)s', document.getElementById('%(id)s').value)" %(checked)s value="true">%(text)s''' % d
class Image(WidgetBase):
def __init__(self, native_image=None, style='normal', menu=None):
super(Image, self).__init__()
self.image = None
self.img_src = ''
self.menu = menu
self.widget = None
self.enable_callback('activated')
if native_image is not None:
self._set_image(native_image)
def _cb_redirect(self, event):
self.value = event.value
self.make_callback('activated', event.value)
def _set_image(self, native_image):
self.image = native_image
self.img_src = PgHelp.get_image_src_from_buffer(self.image)
app = self.get_app()
app.do_operation('update_imgsrc', id=self.id, value=self.img_src)
def render(self):
# TODO: callback for click
d = dict(id=self.id, src=self.img_src, tooltip=self.tooltip,
height=self.height, width=self.width)
return '''<img id=%(id)s src="%(src)s" alt="%(tooltip)s"
width="%(width)d" height="%(height)d">''' % d
class ProgressBar(Label):
def __init__(self):
self.value = 0.0
self.start_time = time.time()
super(ProgressBar, self).__init__(self._format())
def _format(self):
pct = self.value * 100.0
elapsed = time.time() - self.start_time
text = "%.2f %% %.2f sec" % (pct, elapsed)
return text
def set_value(self, pct):
self.value = pct
if pct == 0.0:
# reset start time
self.start_time = time.time()
self.set_text(self._format())
class StatusBar(Label):
def __init__(self):
super(StatusBar, self).__init__()
def set_message(self, msg_str):
# TODO: remove message in about 10 seconds
self.set_text(msg_str)
class TreeView(WidgetBase):
def __init__(self, auto_expand=False, sortable=False, selection='single',
use_alt_row_color=False, dragable=False):
super(TreeView, self).__init__()
self.auto_expand = auto_expand
self.sortable = sortable
self.selection = selection
self.dragable = dragable
self.levels = 1
self.leaf_key = None
self.leaf_idx = 0
self.columns = []
self.datakeys = []
# shadow index
self.shadow = {}
self.widget = None
for cbname in ('selected', 'activated', 'drag-start'):
self.enable_callback(cbname)
def setup_table(self, columns, levels, leaf_key):
self.clear()
# TODO
def set_tree(self, tree_dict):
self.clear()
self.add_tree(tree_dict)
def add_tree(self, tree_dict):
# TODO
pass
def _selection_cb(self):
res_dict = self.get_selected()
self.make_callback('selected', res_dict)
def _cb_redirect(self, item):
res_dict = {}
self._get_item(res_dict, item)
self.make_callback('activated', res_dict)
def get_selected(self):
res_dict = {}
return res_dict
def clear(self):
self.shadow = {}
def clear_selection(self):
pass
def _path_to_item(self, path):
s = self.shadow
for name in path[:-1]:
s = s[name].node
item = s[path[-1]].item
return item
def select_path(self, path):
item = self._path_to_item(path)
# TODO
def highlight_path(self, path, onoff, font_color='green'):
item = self._path_to_item(path)
# TODO
def scroll_to_path(self, path):
item = self._path_to_item(path)
# TODO
def sort_on_column(self, i):
pass
def set_column_width(self, i, width):
pass
def set_column_widths(self, lwidths):
for i, width in enumerate(lwidths):
if width is not None:
self.set_column_width(i, width)
def set_optimal_column_widths(self):
for i in range(len(self.columns)):
pass
class Canvas(WidgetBase):
canvas_template = '''
<canvas id="%(id)s" tabindex="%(tab_idx)d"
width="%(width)s" height="%(height)s">Your browser does not appear to
support HTML5 canvas.</canvas>
<script type="text/javascript">
ginga_initialize_canvas(document.getElementById("%(id)s"), "%(id)s",
ginga_app);
</script>
'''
def __init__(self, width=600, height=600):
super(Canvas, self).__init__()
self.widget = None
self.width = width
self.height = height
self.name = ''
self.timers = {}
def _cb_redirect(self, event):
pass
def _draw(self, shape_type, **kwargs):
shape = dict(kwargs, type=shape_type)
app = self.get_app()
app.do_operation("draw_canvas", id=self.id, shape=shape)
def clear_rect(self, x, y, width, height):
self._draw("clear", x=x, y=y, width=width, height=height)
def draw_image(self, img_buf, x, y, width=None, height=None):
img_src = PgHelp.get_image_src_from_buffer(img_buf)
self._draw("image", x=x, y=y, src=img_src, width=width, height=height)
def add_timer(self, name, cb_fn):
app = self.get_app()
timer = app.add_timer(cb_fn)
self.timers[name] = timer
def reset_timer(self, name, time_sec):
app = self.get_app()
app.reset_timer(self.timers[name], time_sec)
def render(self):
global tab_idx
# canvas needs a tabindex to be able to focus it and register
# for keyboard events
tab_idx += 1
d = dict(id=self.id, width=self.width, height=self.height,
tab_idx=tab_idx)
return Canvas.canvas_template % d
# CONTAINERS
class ContainerBase(WidgetBase):
def __init__(self):
super(ContainerBase, self).__init__()
# TODO: probably need to maintain children as list of widget ids
self.children = []
self.margins = (0, 0, 0, 0) # L, R, T, B
def add_ref(self, ref):
# TODO: should this be a weakref?
self.children.append(ref)
def remove(self, w, delete=False):
if not w in self.children:
raise KeyError("Widget is not a child of this container")
self.children.remove(w)
def remove_all(self):
for w in list(self.children):
self.remove(w)
def get_children(self):
return self.children
def render(self):
return self.render_children()
def set_margins(self, left, right, top, bottom):
self.margins = (left, right, top, bottom)
def set_border_width(self, pix):
self.margins = (pix, pix, pix, pix)
def render_children(self, ifx=' ', spacing=0, spacing_side='right'):
def _render_child(child):
return '''<span style="margin-%s: %dpx;">%s</span>''' % (
spacing_side, spacing, child.render())
return ifx.join(map(_render_child, self.children))
class Box(ContainerBase):
def __init__(self, orientation='horizontal'):
super(Box, self).__init__()
self.orientation = orientation
self.widget = None
self.spacing = 0
def add_widget(self, child, stretch=0.0):
self.add_ref(child)
def set_spacing(self, val):
self.spacing = val
def render(self):
# TODO: handle spacing attribute
d = dict(id=self.id)
style_d = dict(left=self.margins[0], right=self.margins[1],
top=self.margins[2], bottom=self.margins[3])
if self.orientation == 'horizontal':
d['style'] = "display: flex; flex-direction: row; flex-wrap: nowrap; justify-content: flex-start; margin: %(left)dpx %(right)dpx %(top)dpx %(bottom)dpx;" % style_d
d['content'] = self.render_children(spacing=self.spacing,
spacing_side='right')
else:
d['style'] = "display: flex; flex-direction: column; flex-wrap: nowrap; justify-content: flex-start; margin: %(left)dpx %(right)dpx %(top)dpx %(bottom)dpx;" % style_d
d['content'] = self.render_children(spacing=self.spacing,
spacing_side='bottom')
return '''<div id=%(id)s style="%(style)s">%(content)s</div>''' % d
class HBox(Box):
def __init__(self):
super(HBox, self).__init__(orientation='horizontal')
class VBox(Box):
def __init__(self):
super(VBox, self).__init__(orientation='vertical')
class Frame(ContainerBase):
def __init__(self, title=None):
super(Frame, self).__init__()
self.widget = None
self.label = title
def set_widget(self, child, stretch=1):
self.remove_all()
self.add_ref(child)
def render(self):
d = dict(id=self.id, content=self.render_children(), legend=self.label)
res = '''<fieldset id=%(id)s>'''
if not self.label is None:
res += '''<legend>%(legend)s</legend>''' % d
res += "%(content)s" % d
res += '''</fieldset>'''
return res
class Expander(Frame):
pass
class TabWidget(ContainerBase):
tab_script_template = '''
<script>
ginga_initialize_tab_widget(document.getElementById("%(id)s"), "%(id)s", ginga_app)
</script>
'''
def __init__(self, tabpos='top', reorderable=False, detachable=True,
group=0):
super(TabWidget, self).__init__()
self.reorderable = reorderable
self.detachable = detachable
self.group = group
self.widget = None
self.index = 0
self.set_tab_position(tabpos)
self.titles = []
self._tabs_visible = True
for name in ('page-switch', 'page-close', 'page-move', 'page-detach'):
self.enable_callback(name)
def set_tab_position(self, tabpos):
self.tabpos = tabpos
# TODO: set tab position
nb = self.widget
if tabpos == 'top':
pass
elif tabpos == 'bottom':
pass
elif tabpos == 'left':
pass
elif tabpos == 'right':
pass
def _cb_redirect(self, event):
self.index = event.value
self.make_callback('page-switch', self.index)
def add_widget(self, child, title=''):
self.add_ref(child)
self.titles.append(title)
# attach title to child
child.extdata.tab_title = title
def get_index(self):
return self.index
def set_index(self, idx):
self.index = idx
app = self.get_app()
app.do_operation('set_tab', id=self.id, value=self.index)
def index_of(self, child):
try:
return self.children.index(child)
except ValueError:
return -1
def index_to_widget(self, idx):
"""Returns child corresponding to `idx`"""
return self.children[idx]
def render(self):
d = dict(id=self.id)
style_d = dict(left=self.margins[0], right=self.margins[1],
top=self.margins[2], bottom=self.margins[3])
d['style'] = "padding: 0; margin: %(left)dpx %(right)dpx %(top)dpx %(bottom)dpx;" % style_d
res = ['''\n<div id="%(id)s" style="%(style)s">\n''' % d]
if self._tabs_visible:
# draw tabs
res.append(''' <ul>\n''')
d['cnt'] = 1
for child in self.get_children():
d['title'] = self.titles[d['cnt']-1]
res.append('''<li><a href="#%(id)s-%(cnt)d">%(title)s</a></li>\n''' % d)
d['cnt'] += 1
res.append(''' </ul>\n''')
d['cnt'] = 1
for child in self.get_children():
d['content'] = child.render()
res.append('''<div id="%(id)s-%(cnt)d" style="%(style)s"> %(content)s </div>\n''' % d)
d['cnt'] += 1
res.append('''</div>\n''')
res.append(TabWidget.tab_script_template % d)
return ''.join(res)
class StackWidget(TabWidget):
def __init__(self):
super(StackWidget, self).__init__(tabpos='top', reorderable=False,
detachable=False, group=-1)
self._tabs_visible = False
class MDIWidget(TabWidget):
def __init__(self, tabpos='top', mode='tabs'):
super(MDIWidget, self).__init__(tabpos=tabpos)
self.mode = 'tabs'
self.true_mdi = False
def get_mode(self):
return self.mode
def set_mode(self, mode):
pass
def tile_panes(self):
pass
def cascade_panes(self):
pass
def use_tabs(self, tf):
pass
class ScrollArea(ContainerBase):
def __init__(self):
super(ScrollArea, self).__init__()
self.widget = None
self.enable_callback('configure')
def set_widget(self, child):
self.add_ref(child)
def scroll_to_end(self, vertical=True, horizontal=False):
pass
def render(self):
# TODO: handle spacing attribute
d = dict(id=self.id)
child = self.get_children()[0]
d['content'] = child.render()
return '''<div id=%(id)s>%(content)s</div>''' % d
class Splitter(Box):
def get_sizes(self):
wd, ht = self.get_size()
if self.orientation == 'horizontal':
length = wd
else:
length = ht
return length // self.num_children()
def set_sizes(self, sizes):
pass
class GridBox(ContainerBase):
def __init__(self, rows=1, columns=1):
super(GridBox, self).__init__()
self.widget = None
self.num_rows = rows
self.num_cols = columns
self.row_spacing = 0
self.col_spacing = 0
self.tbl = {}
def resize_grid(self, rows, columns):
self.num_rows = rows
self.num_cols = columns
def set_row_spacing(self, val):
self.row_spacing = val
def set_spacing(self, val):
self.set_row_spacing(val)
self.set_column_spacing(val)
def set_column_spacing(self, val):
self.col_spacing = val
def add_widget(self, child, row, col, stretch=0):
self.add_ref(child)
self.num_rows = max(self.num_rows, row+1)
self.num_cols = max(self.num_cols, col+1)
self.tbl[(row, col)] = child
app = self.get_app()
app.do_operation('update_html', id=self.id,
value=self.render_body())
def render_body(self):
res = []
for i in range(self.num_rows):
res.append(" <tr>")
for j in range(self.num_cols):
res.append(" <td>")
key = (i, j)
if key in self.tbl:
res.append(self.tbl[key].render())
else:
res.append("")
res.append(" </td>")
res.append(" </tr>")
return '\n'.join(res)
def render(self):
d = dict(id=self.id)
res = ['''<table id=%(id)s>''' % d]
res.append(self.render_body())
res.append("</table>")
return '\n'.join(res)
class ToolbarAction(WidgetBase):
def __init__(self):
super(ToolbarAction, self).__init__()
self.widget = None
self.value = False
self.checkable = False
self.enable_callback('activated')
def _cb_redirect(self, *args):
if self.checkable:
tf = self.get_state()
self.make_callback('activated', tf)
else:
self.make_callback('activated')
def set_state(self, tf):
self.value = tf
def get_state(self):
return self.value
def render(self):
return self.widget.render()
class Toolbar(ContainerBase):
def __init__(self, orientation='horizontal'):
super(Toolbar, self).__init__()
self.orientation = orientation
self.widget = Box(orientation=orientation)
def add_action(self, text, toggle=False, iconpath=None):
child = ToolbarAction()
self.text = text
if iconpath:
native_image = PgHelp.get_icon(iconpath, size=(24, 24),
format='png')
widget = Image(native_image=native_image)
widget.resize(24, 24)
else:
widget = Button(text)
child.checkable = toggle
child.widget = widget
self.widget.add_widget(child, stretch=0)
return child
def add_widget(self, child):
self.add_ref(child)
def add_menu(self, text, menu=None):
if menu is None:
menu = Menu()
child = self.add_action(text)
child.add_callback('activated', lambda w: menu.popup())
return menu
def add_separator(self):
#self.widget.addSeparator()
pass
def render(self):
return self.widget.render()
class MenuAction(WidgetBase):
def __init__(self, text=None):
super(MenuAction, self).__init__()
self.widget = None
self.text = text
self.is_checkable = False
self.value = False
self.enable_callback('activated')
def _cb_redirect(self, *args):
if self.is_checkable:
self.make_callback('activated', self.value)
else:
self.make_callback('activated')
class Menu(ContainerBase):
def __init__(self):
super(Menu, self).__init__()
# this ends up being a reference to the Qt menubar or toolbar
self.widget = None
def add_widget(self, child):
self.add_ref(child)
def add_name(self, name):
child = MenuAction(text=name)
self.add_widget(child)
return child
def add_separator(self):
#self.widget.addSeparator()
pass
def popup(self, widget=None):
# TODO
if widget is not None:
w = widget.get_widget()
class Menubar(HBox):
def __init__(self):
super(Menubar, self).__init__()
self.set_border_width(2)
self.set_spacing(8)
def add_name(self, name):
child = Menu()
menu_w = Label(text=name, halign='left', style='clickable',
menu=child)
self.add_widget(menu_w)
return child
class TopLevel(ContainerBase):
def __init__(self, title=""):
super(TopLevel, self).__init__()
self.title = title
self.widget = None
# these are assigned by the Application()
self.wid = None
self.url = None
self.app = None
#widget.closeEvent = lambda event: self._quit(event)
self.enable_callback('close')
def set_widget(self, child):
self.add_ref(child)
def show(self):
pass
def hide(self):
pass
def close(self):
self.make_callback('close')
def raise_(self):
pass
def lower(self):
pass
def resize(self, width, height):
#self.widget.resize(width, height)
pass
def focus(self):
pass
def move(self, x, y):
pass
def maximize(self):
pass
def unmaximize(self):
pass
def fullscreen(self):
pass
def unfullscreen(self):
pass
def iconify(self):
pass
def uniconify(self):
pass
def set_title(self, title):
self.title = title
def _cb_redirect(self, event):
pass
def render(self):
base_url = self.app.base_url
url = base_url + "?wid=%s" % (self.wid)
ws_url = base_url + "/socket?wid=%s" % (self.wid)
d = dict(title=self.title, content=self.render_children(),
wid=self.wid, url=url, ws_url=ws_url)
return '''
<!doctype html>
<html>
<head>
<title>%(title)s</title>
<style>
body {
width: 100%%;
height: 100%%;
padding: 0px;
margin: 0px;
border: 0;
/* overflow: hidden; disable scrollbars */
display: block; /* no floating content on sides */
}
</style>
<meta name="viewport"
content="width=device-width, initial-scale=1.0, minimum-scale=1.0, maximum-scale=1.0, user-scalable=no, target-densitydpi=device-dpi" />
</head>
<body>
<script type="text/javascript" src="/js/hammer.js"></script>
<link rel="stylesheet" href="//code.jquery.com/ui/1.11.4/themes/smoothness/jquery-ui.css">
<script src="//code.jquery.com/jquery-1.10.2.js"></script>
<script src="//code.jquery.com/ui/1.11.4/jquery-ui.js"></script>
<script type="text/javascript" src="/js/application.js"></script>
<script type="text/javascript">
var wid = "%(wid)s";
var url = "%(url)s";
var ws_url = "ws://" + window.location.host + "/app/socket?wid=%(wid)s";
var ginga_app = ginga_make_application(ws_url);
</script>
%(content)s
</body>
</html>''' % d
class Application(Callback.Callbacks):
def __init__(self, logger=None, base_url=None,
host='localhost', port=9909):
global _app, widget_dict
super(Application, self).__init__()
self.logger = logger
self.base_url = base_url
self.window_dict = {}
self.wincnt = 0
# list of web socket handlers connected to this application
self.ws_handlers = []
_app = self
widget_dict[0] = self
self._timer_lock = threading.RLock()
self._timer_cnt = 0
self._timer = {}
self.host = host
self.port = port
self.base_url = "http://%s:%d/app" % (self.host, self.port)
# Get screen size
# TODO: need to pass this from Web browser
self.screen_wd = 1600
self.screen_ht = 1200
for name in ('shutdown', ):
self.enable_callback(name)
def get_screen_size(self):
return (self.screen_wd, self.screen_ht)
def process_events(self):
pass
def process_end(self):
pass
def add_window(self, window, wid=None):
if wid is None:
wid = 'win%d' % (self.wincnt)
self.wincnt += 1
window.wid = wid
window.url = self.base_url + '?id=%s' % (wid)
window.app = self
self.window_dict[wid] = window
def get_window(self, wid):
return self.window_dict[wid]
def has_window(self, wid):
return wid in self.window_dict
def get_wids(self):
return list(self.window_dict.keys())
def make_window(self, title=None):
w = TopLevel(title=title)
self.add_window(w)
return w
def _cb_redirect(self, event):
#print("application got an event (%s)" % (str(event)))
pass
def add_ws_handler(self, handler):
with self._timer_lock:
self.ws_handlers.append(handler)
def do_operation(self, operation, **kwdargs):
with self._timer_lock:
handlers = list(self.ws_handlers)
bad_handlers = []
for handler in handlers:
try:
handler.do_operation(operation, **kwdargs)
except Exception as e:
self.logger.error("Error doing operation '%s': %s" % (
operation, str(e)))
bad_handlers.append(handler)
# remove problematic clients
if len(bad_handlers) > 0:
with self._timer_lock:
for handler in bad_handlers:
self.ws_handlers.remove(handler)
def on_timer_event(self, event):
#self.logger.debug("timer update")
funcs = []
with self._timer_lock:
for key, bnch in self._timer.items():
if (bnch.timer is not None) and \
(time.time() > bnch.timer):
bnch.timer = None
funcs.append(bnch.func)
for func in funcs:
try:
func()
except Exception as e:
pass
#self.logger.debug("update should have been called.")
def add_timer(self, func):
with self._timer_lock:
name = self._timer_cnt
self._timer_cnt += 1
timer = Bunch.Bunch(timer=None, func=func, name=name)
self._timer[name] = timer
return timer
def remove_timer(self, timer):
with self._timer_lock:
name = timer.name
del self._timer[name]
def reset_timer(self, timer, time_sec):
with self._timer_lock:
#self.logger.debug("setting timer...")
timer.timer = time.time() + time_sec
def widget_event(self, event):
if event.type == "timer":
self.on_timer_event(event)
return
# get the widget associated with this id
w_id = int(event.id)
try:
widget = widget_dict[w_id]
# make the callback for this widget (activation or value-changed)
widget._cb_redirect(event)
except KeyError:
self.logger.error("Event '%s' from unknown widget (id=%d)" % (
str(event), w_id))
def start(self, no_ioloop=False):
import tornado.web
import tornado.ioloop
from ginga.web.pgw import PgHelp, js
js_path = os.path.dirname(js.__file__)
# create and run the app
self.server = tornado.web.Application([
#(r"/js/(.*\.js)", tornado.web.StaticFileHandler,
(r"/js/(.*)", tornado.web.StaticFileHandler,
{"path": js_path}),
(r"/js/jquery/(.*)", tornado.web.StaticFileHandler,
{"path": os.path.join(js_path, 'jquery')}),
(r"/app", PgHelp.WindowHandler,
dict(name='Application', url='/app', app=self)),
(r"/app/socket", PgHelp.ApplicationHandler,
dict(name='ApplicationSocketInterface', app=self)),
],
app=self, logger=self.logger)
self.server.listen(self.port, self.host)
self.logger.info("ginga web now running at " + self.base_url)
if no_ioloop:
self.t_ioloop = None
else:
self.t_ioloop = tornado.ioloop.IOLoop.instance()
self.t_ioloop.start()
def stop(self):
# how to stop tornado server?
if not self.t_ioloop is None:
self.t_ioloop.stop()
self.ev_quit.set()
class Dialog(WidgetBase):
dialog_template = '''
<div id="%(id)s">
<script>
ginga_initialize_dialog(document.getElementById("%(id)s"), "%(id)s", "%(title)s", %(buttons)s, ginga_app)
</script>
</div>
'''
def __init__(self, title=None, flags=None, buttons=None,
callback=None):
super(Dialog, self).__init__()
self.title = title
self.buttons = buttons
self.value = None
if callback:
self.enable_callback('activated')
self.add_callback('activated', callback)
def buttons_to_js_obj(self):
d = dict(id=self.id)
s = '{'
for item in self.buttons:
d['label'], d['val'] = item
s += '''
"%(label)s": function() {
ginga_app.widget_handler("%(id)s", "%(val)s");
},
''' % d
s += '}'
return s
def _cb_redirect(self, event):
self.value = event.value
self.make_callback('activated', self.value)
def show(self):
app = self.get_app()
app.do_operation('dialog_action', id=self.id, action="open")
def close(self):
app = self.get_app()
app.do_operation('dialog_action', id=self.id, action="close")
def render(self):
d = dict(id=self.id, title=self.title, buttons=self.buttons_to_js_obj())
return self.dialog_template % d
## class SaveDialog(QtGui.QFileDialog):
## def __init__(self, title=None, selectedfilter=None):
## super(SaveDialog, self).__init__()
## self.selectedfilter = selectedfilter
## self.widget = self.getSaveFileName(self, title, '', selectedfilter)
## def get_path(self):
## if self.widget and not self.widget.endswith(self.selectedfilter[1:]):
## self.widget += self.selectedfilter[1:]
## return self.widget
# MODULE FUNCTIONS
def name_mangle(name, pfx=''):
newname = []
for c in name.lower():
if not (c.isalpha() or c.isdigit() or (c == '_')):
newname.append('_')
else:
newname.append(c)
return pfx + ''.join(newname)
def make_widget(title, wtype):
if wtype == 'label':
w = Label(title)
#w.widget.setAlignment(QtCore.Qt.AlignRight)
elif wtype == 'llabel':
w = Label(title)
#w.widget.setAlignment(QtCore.Qt.AlignLeft)
elif wtype == 'entry':
w = TextEntry()
#w.widget.setMaxLength(12)
elif wtype == 'entryset':
w = TextEntrySet()
#w.widget.setMaxLength(12)
elif wtype == 'combobox':
w = ComboBox()
elif wtype == 'spinbutton':
w = SpinBox(dtype=int)
elif wtype == 'spinfloat':
w = SpinBox(dtype=float)
elif wtype == 'vbox':
w = VBox()
elif wtype == 'hbox':
w = HBox()
elif wtype == 'hscale':
w = Slider(orientation='horizontal')
elif wtype == 'vscale':
w = Slider(orientation='vertical')
elif wtype == 'checkbutton':
w = CheckBox(title)
elif wtype == 'radiobutton':
w = RadioButton(title)
elif wtype == 'togglebutton':
w = ToggleButton(title)
elif wtype == 'button':
w = Button(title)
elif wtype == 'spacer':
w = Label('')
elif wtype == 'textarea':
w = TextArea(editable=True)
elif wtype == 'toolbar':
w = Toolbar()
elif wtype == 'progress':
w = ProgressBar()
elif wtype == 'menubar':
w = Menubar()
else:
raise ValueError("Bad wtype=%s" % wtype)
return w
def hadjust(w, orientation):
if orientation != 'horizontal':
return w
vbox = VBox()
vbox.add_widget(w)
vbox.add_widget(Label(''), stretch=1)
return vbox
def build_info(captions, orientation='vertical'):
numrows = len(captions)
numcols = reduce(lambda acc, tup: max(acc, len(tup)), captions, 0)
if (numcols % 2) != 0:
raise ValueError("Column spec is not an even number")
numcols = int(numcols // 2)
table = GridBox(rows=numrows, columns=numcols)
wb = Bunch.Bunch()
row = 0
for tup in captions:
col = 0
while col < numcols:
idx = col * 2
if idx < len(tup):
title, wtype = tup[idx:idx+2]
if not title.endswith(':'):
name = name_mangle(title)
else:
name = name_mangle('lbl_'+title[:-1])
w = make_widget(title, wtype)
table.add_widget(w, row, col)
wb[name] = w
col += 1
row += 1
w = hadjust(table, orientation=orientation)
return w, wb
def wrap(native_widget):
wrapper = WidgetBase()
wrapper.widget = native_widget
return wrapper
def get_orientation(container):
if not hasattr(container, 'size'):
return 'vertical'
(wd, ht) = container.size
if wd < ht:
return 'vertical'
else:
return 'horizontal'
def get_oriented_box(container, scrolled=True, fill=False):
orientation = get_orientation(container)
if orientation == 'vertical':
box1 = VBox()
box2 = VBox()
else:
box1 = HBox()
box2 = VBox()
box2.add_widget(box1)
if not fill:
box2.add_widget(Label(''), stretch=1)
if scrolled:
sw = ScrollArea()
sw.set_widget(box2)
else:
sw = box2
return box1, sw, orientation
#END
|
|
"""Interfaces for launching and remotely controlling Web browsers."""
import os
import sys
__all__ = ["Error", "open", "get", "register"]
class Error(Exception):
pass
_browsers = {} # Dictionary of available browser controllers
_tryorder = [] # Preference order of available browsers
def register(name, klass, instance=None):
"""Register a browser connector and, optionally, connection."""
_browsers[name.lower()] = [klass, instance]
def get(using=None):
"""Return a browser launcher instance appropriate for the environment."""
if using is not None:
alternatives = [using]
else:
alternatives = _tryorder
for browser in alternatives:
if '%s' in browser:
# User gave us a command line, don't mess with it.
return GenericBrowser(browser)
else:
# User gave us a browser name.
try:
command = _browsers[browser.lower()]
except KeyError:
command = _synthesize(browser)
if command[1] is None:
return command[0]()
else:
return command[1]
raise Error("could not locate runnable browser")
# Please note: the following definition hides a builtin function.
def open(url, new=0, autoraise=1):
get().open(url, new, autoraise)
def open_new(url):
get().open(url, 1)
def _synthesize(browser):
"""Attempt to synthesize a controller base on existing controllers.
This is useful to create a controller when a user specifies a path to
an entry in the BROWSER environment variable -- we can copy a general
controller to operate using a specific installation of the desired
browser in this way.
If we can't create a controller in this way, or if there is no
executable for the requested browser, return [None, None].
"""
if not os.path.exists(browser):
return [None, None]
name = os.path.basename(browser)
try:
command = _browsers[name.lower()]
except KeyError:
return [None, None]
# now attempt to clone to fit the new name:
controller = command[1]
if controller and name.lower() == controller.basename:
import copy
controller = copy.copy(controller)
controller.name = browser
controller.basename = os.path.basename(browser)
register(browser, None, controller)
return [None, controller]
return [None, None]
def _iscommand(cmd):
"""Return True if cmd can be found on the executable search path."""
path = os.environ.get("PATH")
if not path:
return False
for d in path.split(os.pathsep):
exe = os.path.join(d, cmd)
if os.path.isfile(exe):
return True
return False
PROCESS_CREATION_DELAY = 4
class GenericBrowser:
def __init__(self, cmd):
self.name, self.args = cmd.split(None, 1)
self.basename = os.path.basename(self.name)
def open(self, url, new=0, autoraise=1):
assert "'" not in url
command = "%s %s" % (self.name, self.args)
os.system(command % url)
def open_new(self, url):
self.open(url)
class Netscape:
"Launcher class for Netscape browsers."
def __init__(self, name):
self.name = name
self.basename = os.path.basename(name)
def _remote(self, action, autoraise):
raise_opt = ("-noraise", "-raise")[autoraise]
cmd = "%s %s -remote '%s' >/dev/null 2>&1" % (self.name,
raise_opt,
action)
rc = os.system(cmd)
if rc:
import time
os.system("%s &" % self.name)
time.sleep(PROCESS_CREATION_DELAY)
rc = os.system(cmd)
return not rc
def open(self, url, new=0, autoraise=1):
if new:
self._remote("openURL(%s, new-window)"%url, autoraise)
else:
self._remote("openURL(%s)" % url, autoraise)
def open_new(self, url):
self.open(url, 1)
class Galeon:
"""Launcher class for Galeon browsers."""
def __init__(self, name):
self.name = name
self.basename = os.path.basename(name)
def _remote(self, action, autoraise):
raise_opt = ("--noraise", "")[autoraise]
cmd = "%s %s %s >/dev/null 2>&1" % (self.name, raise_opt, action)
rc = os.system(cmd)
if rc:
import time
os.system("%s >/dev/null 2>&1 &" % self.name)
time.sleep(PROCESS_CREATION_DELAY)
rc = os.system(cmd)
return not rc
def open(self, url, new=0, autoraise=1):
if new:
self._remote("-w '%s'" % url, autoraise)
else:
self._remote("-n '%s'" % url, autoraise)
def open_new(self, url):
self.open(url, 1)
class Konqueror:
"""Controller for the KDE File Manager (kfm, or Konqueror).
See http://developer.kde.org/documentation/other/kfmclient.html
for more information on the Konqueror remote-control interface.
"""
def __init__(self):
if _iscommand("konqueror"):
self.name = self.basename = "konqueror"
else:
self.name = self.basename = "kfm"
def _remote(self, action):
cmd = "kfmclient %s >/dev/null 2>&1" % action
rc = os.system(cmd)
if rc:
import time
if self.basename == "konqueror":
os.system(self.name + " --silent &")
else:
os.system(self.name + " -d &")
time.sleep(PROCESS_CREATION_DELAY)
rc = os.system(cmd)
return not rc
def open(self, url, new=1, autoraise=1):
# XXX Currently I know no way to prevent KFM from
# opening a new win.
assert "'" not in url
self._remote("openURL '%s'" % url)
open_new = open
class Grail:
# There should be a way to maintain a connection to Grail, but the
# Grail remote control protocol doesn't really allow that at this
# point. It probably neverwill!
def _find_grail_rc(self):
import glob
import pwd
import socket
import tempfile
tempdir = os.path.join(tempfile.gettempdir(),
".grail-unix")
user = pwd.getpwuid(os.getuid())[0]
filename = os.path.join(tempdir, user + "-*")
maybes = glob.glob(filename)
if not maybes:
return None
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
for fn in maybes:
# need to PING each one until we find one that's live
try:
s.connect(fn)
except socket.error:
# no good; attempt to clean it out, but don't fail:
try:
os.unlink(fn)
except IOError:
pass
else:
return s
def _remote(self, action):
s = self._find_grail_rc()
if not s:
return 0
s.send(action)
s.close()
return 1
def open(self, url, new=0, autoraise=1):
if new:
self._remote("LOADNEW " + url)
else:
self._remote("LOAD " + url)
def open_new(self, url):
self.open(url, 1)
class WindowsDefault:
def open(self, url, new=0, autoraise=1):
os.startfile(url)
def open_new(self, url):
self.open(url)
#
# Platform support for Unix
#
# This is the right test because all these Unix browsers require either
# a console terminal of an X display to run. Note that we cannot split
# the TERM and DISPLAY cases, because we might be running Python from inside
# an xterm.
if os.environ.get("TERM") or os.environ.get("DISPLAY"):
_tryorder = ["links", "lynx", "w3m"]
# Easy cases first -- register console browsers if we have them.
if os.environ.get("TERM"):
# The Links browser <http://artax.karlin.mff.cuni.cz/~mikulas/links/>
if _iscommand("links"):
register("links", None, GenericBrowser("links '%s'"))
# The Lynx browser <http://lynx.browser.org/>
if _iscommand("lynx"):
register("lynx", None, GenericBrowser("lynx '%s'"))
# The w3m browser <http://ei5nazha.yz.yamagata-u.ac.jp/~aito/w3m/eng/>
if _iscommand("w3m"):
register("w3m", None, GenericBrowser("w3m '%s'"))
# X browsers have more in the way of options
if os.environ.get("DISPLAY"):
_tryorder = ["galeon", "skipstone",
"mozilla-firefox", "mozilla-firebird", "mozilla", "netscape",
"kfm", "grail"] + _tryorder
# First, the Netscape series
for browser in ("mozilla-firefox", "mozilla-firebird",
"mozilla", "netscape"):
if _iscommand(browser):
register(browser, None, Netscape(browser))
# Next, Mosaic -- old but still in use.
if _iscommand("mosaic"):
register("mosaic", None, GenericBrowser(
"mosaic '%s' >/dev/null &"))
# Gnome's Galeon
if _iscommand("galeon"):
register("galeon", None, Galeon("galeon"))
# Skipstone, another Gtk/Mozilla based browser
if _iscommand("skipstone"):
register("skipstone", None, GenericBrowser(
"skipstone '%s' >/dev/null &"))
# Konqueror/kfm, the KDE browser.
if _iscommand("kfm") or _iscommand("konqueror"):
register("kfm", Konqueror, Konqueror())
# Grail, the Python browser.
if _iscommand("grail"):
register("grail", Grail, None)
class InternetConfig:
def open(self, url, new=0, autoraise=1):
ic.launchurl(url)
def open_new(self, url):
self.open(url)
#
# Platform support for Windows
#
if sys.platform[:3] == "win":
_tryorder = ["netscape", "windows-default"]
register("windows-default", WindowsDefault)
#
# Platform support for MacOS
#
try:
import ic
except ImportError:
pass
else:
# internet-config is the only supported controller on MacOS,
# so don't mess with the default!
_tryorder = ["internet-config"]
register("internet-config", InternetConfig)
#
# Platform support for OS/2
#
if sys.platform[:3] == "os2" and _iscommand("netscape.exe"):
_tryorder = ["os2netscape"]
register("os2netscape", None,
GenericBrowser("start netscape.exe %s"))
# OK, now that we know what the default preference orders for each
# platform are, allow user to override them with the BROWSER variable.
#
if "BROWSER" in os.environ:
# It's the user's responsibility to register handlers for any unknown
# browser referenced by this value, before calling open().
_tryorder = os.environ["BROWSER"].split(os.pathsep)
for cmd in _tryorder:
if not cmd.lower() in _browsers:
if _iscommand(cmd.lower()):
register(cmd.lower(), None, GenericBrowser(
"%s '%%s'" % cmd.lower()))
cmd = None # to make del work if _tryorder was empty
del cmd
_tryorder = filter(lambda x: x.lower() in _browsers
or x.find("%s") > -1, _tryorder)
# what to do if _tryorder is now empty?
|
|
# Copyright 2013 Red Hat, Inc.
# Copyright 2015 Mirantis, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
This is the Registry's Driver API.
This API relies on the registry RPC client (version >= 2). The functions bellow
work as a proxy for the database back-end configured in the registry service,
which means that everything returned by that back-end will be also returned by
this API.
This API exists for supporting deployments not willing to put database
credentials in glance-api. Those deployments can rely on this registry driver
that will talk to a remote registry service, which will then access the
database back-end.
"""
import functools
from oslo_log import log as logging
from glance import artifacts
from glance.registry.client.v2 import api
LOG = logging.getLogger(__name__)
def configure():
api.configure_registry_client()
def _get_client(func):
"""Injects a client instance to the each function
This decorator creates an instance of the Registry
client and passes it as an argument to each function
in this API.
"""
@functools.wraps(func)
def wrapper(context, *args, **kwargs):
client = api.get_registry_client(context)
return func(client, *args, **kwargs)
return wrapper
@_get_client
def image_create(client, values):
"""Create an image from the values dictionary."""
return client.image_create(values=values)
@_get_client
def image_update(client, image_id, values, purge_props=False, from_state=None):
"""
Set the given properties on an image and update it.
:raises NotFound if image does not exist.
"""
return client.image_update(values=values,
image_id=image_id,
purge_props=purge_props, from_state=from_state)
@_get_client
def image_destroy(client, image_id):
"""Destroy the image or raise if it does not exist."""
return client.image_destroy(image_id=image_id)
@_get_client
def image_get(client, image_id, force_show_deleted=False):
return client.image_get(image_id=image_id,
force_show_deleted=force_show_deleted)
def is_image_visible(context, image, status=None):
"""Return True if the image is visible in this context."""
# Is admin == image visible
if context.is_admin:
return True
# No owner == image visible
if image['owner'] is None:
return True
# Image is_public == image visible
if image['is_public']:
return True
# Perform tests based on whether we have an owner
if context.owner is not None:
if context.owner == image['owner']:
return True
# Figure out if this image is shared with that tenant
members = image_member_find(context,
image_id=image['id'],
member=context.owner,
status=status)
if members:
return True
# Private image
return False
@_get_client
def image_get_all(client, filters=None, marker=None, limit=None,
sort_key=None, sort_dir=None,
member_status='accepted', is_public=None,
admin_as_user=False, return_tag=False):
"""
Get all images that match zero or more filters.
:param filters: dict of filter keys and values. If a 'properties'
key is present, it is treated as a dict of key/value
filters on the image properties attribute
:param marker: image id after which to start page
:param limit: maximum number of images to return
:param sort_key: image attribute by which results should be sorted
:param sort_dir: direction in which results should be sorted (asc, desc)
:param member_status: only return shared images that have this membership
status
:param is_public: If true, return only public images. If false, return
only private and shared images.
:param admin_as_user: For backwards compatibility. If true, then return to
an admin the equivalent set of images which it would see
if it were a regular user
:param return_tag: To indicates whether image entry in result includes it
relevant tag entries. This could improve upper-layer
query performance, to prevent using separated calls
"""
sort_key = ['created_at'] if not sort_key else sort_key
sort_dir = ['desc'] if not sort_dir else sort_dir
return client.image_get_all(filters=filters, marker=marker, limit=limit,
sort_key=sort_key, sort_dir=sort_dir,
member_status=member_status,
is_public=is_public,
admin_as_user=admin_as_user,
return_tag=return_tag)
@_get_client
def image_property_create(client, values, session=None):
"""Create an ImageProperty object"""
return client.image_property_create(values=values)
@_get_client
def image_property_delete(client, prop_ref, image_ref, session=None):
"""
Used internally by _image_property_create and image_property_update
"""
return client.image_property_delete(prop_ref=prop_ref, image_ref=image_ref)
@_get_client
def image_member_create(client, values, session=None):
"""Create an ImageMember object"""
return client.image_member_create(values=values)
@_get_client
def image_member_update(client, memb_id, values):
"""Update an ImageMember object"""
return client.image_member_update(memb_id=memb_id, values=values)
@_get_client
def image_member_delete(client, memb_id, session=None):
"""Delete an ImageMember object"""
client.image_member_delete(memb_id=memb_id)
@_get_client
def image_member_find(client, image_id=None, member=None, status=None):
"""Find all members that meet the given criteria
:param image_id: identifier of image entity
:param member: tenant to which membership has been granted
"""
return client.image_member_find(image_id=image_id,
member=member,
status=status)
@_get_client
def image_member_count(client, image_id):
"""Return the number of image members for this image
:param image_id: identifier of image entity
"""
return client.image_member_count(image_id=image_id)
@_get_client
def image_tag_set_all(client, image_id, tags):
client.image_tag_set_all(image_id=image_id, tags=tags)
@_get_client
def image_tag_create(client, image_id, value, session=None):
"""Create an image tag."""
return client.image_tag_create(image_id=image_id, value=value)
@_get_client
def image_tag_delete(client, image_id, value, session=None):
"""Delete an image tag."""
client.image_tag_delete(image_id=image_id, value=value)
@_get_client
def image_tag_get_all(client, image_id, session=None):
"""Get a list of tags for a specific image."""
return client.image_tag_get_all(image_id=image_id)
@_get_client
def image_location_delete(client, image_id, location_id, status, session=None):
"""Delete an image location."""
client.image_location_delete(image_id=image_id, location_id=location_id,
status=status)
@_get_client
def image_location_update(client, image_id, location, session=None):
"""Update image location."""
client.image_location_update(image_id=image_id, location=location)
@_get_client
def user_get_storage_usage(client, owner_id, image_id=None, session=None):
return client.user_get_storage_usage(owner_id=owner_id, image_id=image_id)
@_get_client
def task_get(client, task_id, session=None, force_show_deleted=False):
"""Get a single task object
:return: task dictionary
"""
return client.task_get(task_id=task_id, session=session,
force_show_deleted=force_show_deleted)
@_get_client
def task_get_all(client, filters=None, marker=None, limit=None,
sort_key='created_at', sort_dir='desc', admin_as_user=False):
"""Get all tasks that match zero or more filters.
:param filters: dict of filter keys and values.
:param marker: task id after which to start page
:param limit: maximum number of tasks to return
:param sort_key: task attribute by which results should be sorted
:param sort_dir: direction in which results should be sorted (asc, desc)
:param admin_as_user: For backwards compatibility. If true, then return to
an admin the equivalent set of tasks which it would see
if it were a regular user
:return: tasks set
"""
return client.task_get_all(filters=filters, marker=marker, limit=limit,
sort_key=sort_key, sort_dir=sort_dir,
admin_as_user=admin_as_user)
@_get_client
def task_create(client, values, session=None):
"""Create a task object"""
return client.task_create(values=values, session=session)
@_get_client
def task_delete(client, task_id, session=None):
"""Delete a task object"""
return client.task_delete(task_id=task_id, session=session)
@_get_client
def task_update(client, task_id, values, session=None):
return client.task_update(task_id=task_id, values=values, session=session)
# Metadef
@_get_client
def metadef_namespace_get_all(
client, marker=None, limit=None, sort_key='created_at',
sort_dir=None, filters=None, session=None):
return client.metadef_namespace_get_all(
marker=marker, limit=limit,
sort_key=sort_key, sort_dir=sort_dir, filters=filters)
@_get_client
def metadef_namespace_get(client, namespace_name, session=None):
return client.metadef_namespace_get(namespace_name=namespace_name)
@_get_client
def metadef_namespace_create(client, values, session=None):
return client.metadef_namespace_create(values=values)
@_get_client
def metadef_namespace_update(
client, namespace_id, namespace_dict,
session=None):
return client.metadef_namespace_update(
namespace_id=namespace_id, namespace_dict=namespace_dict)
@_get_client
def metadef_namespace_delete(client, namespace_name, session=None):
return client.metadef_namespace_delete(
namespace_name=namespace_name)
@_get_client
def metadef_object_get_all(client, namespace_name, session=None):
return client.metadef_object_get_all(
namespace_name=namespace_name)
@_get_client
def metadef_object_get(
client,
namespace_name, object_name, session=None):
return client.metadef_object_get(
namespace_name=namespace_name, object_name=object_name)
@_get_client
def metadef_object_create(
client,
namespace_name, object_dict, session=None):
return client.metadef_object_create(
namespace_name=namespace_name, object_dict=object_dict)
@_get_client
def metadef_object_update(
client,
namespace_name, object_id,
object_dict, session=None):
return client.metadef_object_update(
namespace_name=namespace_name, object_id=object_id,
object_dict=object_dict)
@_get_client
def metadef_object_delete(
client,
namespace_name, object_name,
session=None):
return client.metadef_object_delete(
namespace_name=namespace_name, object_name=object_name)
@_get_client
def metadef_object_delete_namespace_content(
client,
namespace_name, session=None):
return client.metadef_object_delete_namespace_content(
namespace_name=namespace_name)
@_get_client
def metadef_object_count(
client,
namespace_name, session=None):
return client.metadef_object_count(
namespace_name=namespace_name)
@_get_client
def metadef_property_get_all(
client,
namespace_name, session=None):
return client.metadef_property_get_all(
namespace_name=namespace_name)
@_get_client
def metadef_property_get(
client,
namespace_name, property_name,
session=None):
return client.metadef_property_get(
namespace_name=namespace_name, property_name=property_name)
@_get_client
def metadef_property_create(
client,
namespace_name, property_dict,
session=None):
return client.metadef_property_create(
namespace_name=namespace_name, property_dict=property_dict)
@_get_client
def metadef_property_update(
client,
namespace_name, property_id,
property_dict, session=None):
return client.metadef_property_update(
namespace_name=namespace_name, property_id=property_id,
property_dict=property_dict)
@_get_client
def metadef_property_delete(
client,
namespace_name, property_name,
session=None):
return client.metadef_property_delete(
namespace_name=namespace_name, property_name=property_name)
@_get_client
def metadef_property_delete_namespace_content(
client,
namespace_name, session=None):
return client.metadef_property_delete_namespace_content(
namespace_name=namespace_name)
@_get_client
def metadef_property_count(
client,
namespace_name, session=None):
return client.metadef_property_count(
namespace_name=namespace_name)
@_get_client
def metadef_resource_type_create(client, values, session=None):
return client.metadef_resource_type_create(values=values)
@_get_client
def metadef_resource_type_get(
client,
resource_type_name, session=None):
return client.metadef_resource_type_get(
resource_type_name=resource_type_name)
@_get_client
def metadef_resource_type_get_all(client, session=None):
return client.metadef_resource_type_get_all()
@_get_client
def metadef_resource_type_delete(
client,
resource_type_name, session=None):
return client.metadef_resource_type_delete(
resource_type_name=resource_type_name)
@_get_client
def metadef_resource_type_association_get(
client,
namespace_name, resource_type_name,
session=None):
return client.metadef_resource_type_association_get(
namespace_name=namespace_name, resource_type_name=resource_type_name)
@_get_client
def metadef_resource_type_association_create(
client,
namespace_name, values, session=None):
return client.metadef_resource_type_association_create(
namespace_name=namespace_name, values=values)
@_get_client
def metadef_resource_type_association_delete(
client,
namespace_name, resource_type_name, session=None):
return client.metadef_resource_type_association_delete(
namespace_name=namespace_name, resource_type_name=resource_type_name)
@_get_client
def metadef_resource_type_association_get_all_by_namespace(
client,
namespace_name, session=None):
return client.metadef_resource_type_association_get_all_by_namespace(
namespace_name=namespace_name)
@_get_client
def metadef_tag_get_all(client, namespace_name, filters=None, marker=None,
limit=None, sort_key='created_at', sort_dir=None,
session=None):
return client.metadef_tag_get_all(
namespace_name=namespace_name, filters=filters, marker=marker,
limit=limit, sort_key=sort_key, sort_dir=sort_dir, session=session)
@_get_client
def metadef_tag_get(client, namespace_name, name, session=None):
return client.metadef_tag_get(
namespace_name=namespace_name, name=name)
@_get_client
def metadef_tag_create(
client, namespace_name, tag_dict, session=None):
return client.metadef_tag_create(
namespace_name=namespace_name, tag_dict=tag_dict)
@_get_client
def metadef_tag_create_tags(
client, namespace_name, tag_list, session=None):
return client.metadef_tag_create_tags(
namespace_name=namespace_name, tag_list=tag_list)
@_get_client
def metadef_tag_update(
client, namespace_name, id, tag_dict, session=None):
return client.metadef_tag_update(
namespace_name=namespace_name, id=id, tag_dict=tag_dict)
@_get_client
def metadef_tag_delete(
client, namespace_name, name, session=None):
return client.metadef_tag_delete(
namespace_name=namespace_name, name=name)
@_get_client
def metadef_tag_delete_namespace_content(
client, namespace_name, session=None):
return client.metadef_tag_delete_namespace_content(
namespace_name=namespace_name)
@_get_client
def metadef_tag_count(client, namespace_name, session=None):
return client.metadef_tag_count(namespace_name=namespace_name)
@_get_client
def artifact_create(client, values,
type_name, type_version=None, session=None):
return client.artifact_create(values=values,
type_name=type_name,
type_version=type_version)
@_get_client
def artifact_update(client, values, artifact_id,
type_name, type_version=None, session=None):
return client.artifact_update(values=values, artifact_id=artifact_id,
type_name=type_name,
type_version=type_version)
@_get_client
def artifact_delete(client, artifact_id,
type_name, type_version=None, session=None):
return client.artifact_delete(artifact_id=artifact_id,
type_name=type_name,
type_version=type_version)
@_get_client
def artifact_get(client, artifact_id,
type_name, type_version=None, session=None):
return client.artifact_get(artifact_id=artifact_id,
type_name=type_name,
type_version=type_version)
@_get_client
def artifact_get_all(client, marker=None, limit=None, sort_key=None,
sort_dir=None, filters={},
show_level=artifacts.Showlevel.NONE, session=None):
return client.artifact_create(marker, limit, sort_key,
sort_dir, filters, show_level)
@_get_client
def artifact_publish(client, artifact_id,
type_name, type_version=None, session=None):
return client.artifact_publish(artifact_id=artifact_id,
type_name=type_name,
type_version=type_version)
|
|
## system-config-printer
## Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Red Hat, Inc.
## Authors:
## Florian Festi <ffesti@redhat.com>
## Tim Waugh <twaugh@redhat.com>
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import cups, pprint, os, tempfile, re, string
import locale
from . import _debugprint
from . import config
class Printer:
_flags_blacklist = ["options", "local"]
def __init__(self, name, connection, **kw):
"""
@param name: printer name
@type name: string
@param connection: CUPS connection
@type connection: CUPS.Connection object
@param kw: printer attributes
@type kw: dict indexed by string
"""
self.name = name
self.connection = connection
self.class_members = []
have_kw = len (kw) > 0
fetch_attrs = True
if have_kw:
self.update (**kw)
if self.is_class:
fetch_attrs = True
else:
fetch_attrs = False
if fetch_attrs:
self.getAttributes ()
self._ppd = None # load on demand
def __del__ (self):
if self._ppd != None:
os.unlink(self._ppd)
def __repr__ (self):
return "<cupshelpers.Printer \"%s\">" % self.name
def _expand_flags(self):
def _ascii_lower(str):
return str.translate(string.maketrans(string.ascii_uppercase,
string.ascii_lowercase));
prefix = "CUPS_PRINTER_"
prefix_length = len(prefix)
# loop over cups constants
for name in cups.__dict__:
if name.startswith(prefix):
attr_name = \
_ascii_lower(name[prefix_length:])
if attr_name in self._flags_blacklist: continue
if attr_name == "class": attr_name = "is_class"
# set as attribute
setattr(self, attr_name,
bool(self.type & getattr(cups, name)))
def update(self, **kw):
"""
Update object from printer attributes.
@param kw: printer attributes
@type kw: dict indexed by string
"""
self.state = kw.get('printer-state', 0)
self.enabled = self.state != cups.IPP_PRINTER_STOPPED
self.device_uri = kw.get('device-uri', "")
self.info = kw.get('printer-info', "")
self.is_shared = kw.get('printer-is-shared', None)
self.location = kw.get('printer-location', "")
self.make_and_model = kw.get('printer-make-and-model', "")
self.type = kw.get('printer-type', 0)
self.uri_supported = kw.get('printer-uri-supported', "")
if type (self.uri_supported) != list:
self.uri_supported = [self.uri_supported]
self._expand_flags()
if self.is_shared is None:
self.is_shared = not self.not_shared
del self.not_shared
self.class_members = kw.get('member-names', [])
if type (self.class_members) != list:
self.class_members = [self.class_members]
self.class_members.sort ()
self.other_attributes = kw
def getAttributes(self):
"""
Fetch further attributes for the printer.
Normally only a small set of attributes is fetched. This
method is for fetching more.
"""
attrs = self.connection.getPrinterAttributes(self.name)
self.attributes = {}
self.other_attributes = {}
self.possible_attributes = {
'landscape' : ('False', ['True', 'False']),
'page-border' : ('none', ['none', 'single', 'single-thick',
'double', 'double-thick']),
}
for key, value in attrs.iteritems():
if key.endswith("-default"):
name = key[:-len("-default")]
if name in ["job-sheets", "printer-error-policy",
"printer-op-policy", # handled below
"notify-events", # cannot be set
"document-format", # cannot be set
"notify-lease-duration"]: # cannot be set
continue
supported = attrs.get(name + "-supported", None) or \
self.possible_attributes.get(name, None) or \
""
# Convert a list into a comma-separated string, since
# it can only really have been misinterpreted as a list
# by CUPS.
if isinstance (value, list):
value = reduce (lambda x, y: x+','+y, value)
self.attributes[name] = value
if attrs.has_key(name+"-supported"):
supported = attrs[name+"-supported"]
self.possible_attributes[name] = (value, supported)
elif (not key.endswith ("-supported") and
key != 'job-sheets-default' and
key != 'printer-error-policy' and
key != 'printer-op-policy' and
not key.startswith ('requesting-user-name-')):
self.other_attributes[key] = value
self.job_sheet_start, self.job_sheet_end = attrs.get(
'job-sheets-default', ('none', 'none'))
self.job_sheets_supported = attrs.get('job-sheets-supported', ['none'])
self.error_policy = attrs.get('printer-error-policy', 'none')
self.error_policy_supported = attrs.get(
'printer-error-policy-supported', ['none'])
self.op_policy = attrs.get('printer-op-policy', "") or "default"
self.op_policy_supported = attrs.get(
'printer-op-policy-supported', ["default"])
self.default_allow = True
self.except_users = []
if attrs.has_key('requesting-user-name-allowed'):
self.except_users = attrs['requesting-user-name-allowed']
self.default_allow = False
elif attrs.has_key('requesting-user-name-denied'):
self.except_users = attrs['requesting-user-name-denied']
self.except_users_string = ', '.join(self.except_users)
self.update (**attrs)
def getServer(self):
"""
Find out which server defines this printer.
@returns: server URI or None
"""
if not self.uri_supported[0].startswith('ipp://'):
return None
uri = self.uri_supported[0][6:]
uri = uri.split('/')[0]
uri = uri.split(':')[0]
if uri == "localhost.localdomain":
uri = "localhost"
return uri
def getPPD(self):
"""
Obtain the printer's PPD.
@returns: cups.PPD object, or False for raw queues
@raise cups.IPPError: IPP error
"""
result = None
if self._ppd is None:
try:
self._ppd = self.connection.getPPD(self.name)
result = cups.PPD (self._ppd)
except cups.IPPError, (e, m):
if e == cups.IPP_NOT_FOUND:
result = False
else:
raise
if result == None and self._ppd != None:
result = cups.PPD (self._ppd)
return result
def setOption(self, name, value):
"""
Set a printer's option.
@param name: option name
@type name: string
@param value: option value
@type value: option-specific
"""
if isinstance (value, float):
radixchar = locale.nl_langinfo (locale.RADIXCHAR)
if radixchar != '.':
# Convert floats to strings, being careful with decimal points.
value = str (value).replace (radixchar, '.')
self.connection.addPrinterOptionDefault(self.name, name, value)
def unsetOption(self, name):
"""
Unset a printer's option.
@param name: option name
@type name: string
"""
self.connection.deletePrinterOptionDefault(self.name, name)
def setEnabled(self, on, reason=None):
"""
Set the printer's enabled state.
@param on: whether it will be enabled
@type on: bool
@param reason: reason for this state
@type reason: string
"""
if on:
self.connection.enablePrinter(self.name)
else:
if reason:
self.connection.disablePrinter(self.name, reason=reason)
else:
self.connection.disablePrinter(self.name)
def setAccepting(self, on, reason=None):
"""
Set the printer's accepting state.
@param on: whether it will be accepting
@type on: bool
@param reason: reason for this state
@type reason: string
"""
if on:
self.connection.acceptJobs(self.name)
else:
if reason:
self.connection.rejectJobs(self.name, reason=reason)
else:
self.connection.rejectJobs(self.name)
def setShared(self,on):
"""
Set the printer's shared state.
@param on: whether it will be accepting
@type on: bool
"""
self.connection.setPrinterShared(self.name, on)
def setErrorPolicy (self, policy):
"""
Set the printer's error policy.
@param policy: error policy
@type policy: string
"""
self.connection.setPrinterErrorPolicy(self.name, policy)
def setOperationPolicy(self, policy):
"""
Set the printer's operation policy.
@param policy: operation policy
@type policy: string
"""
self.connection.setPrinterOpPolicy(self.name, policy)
def setJobSheets(self, start, end):
"""
Set the printer's job sheets.
@param start: start sheet
@type start: string
@param end: end sheet
@type end: string
"""
self.connection.setPrinterJobSheets(self.name, start, end)
def setAccess(self, allow, except_users):
"""
Set access control list.
@param allow: whether to allow by default, otherwise deny
@type allow: bool
@param except_users: exception list
@type except_users: string list
"""
if isinstance(except_users, str):
users = except_users.split()
users = [u.split(",") for u in users]
except_users = []
for u in users:
except_users.extend(u)
except_users = [u.strip() for u in except_users]
except_users = filter(None, except_users)
if allow:
self.connection.setPrinterUsersDenied(self.name, except_users)
else:
self.connection.setPrinterUsersAllowed(self.name, except_users)
def jobsQueued(self, only_tests=False, limit=None):
"""
Find out whether jobs are queued for this printer.
@param only_tests: whether to restrict search to test pages
@type only_tests: bool
@returns: list of job IDs
"""
ret = []
try:
try:
r = ['job-id', 'job-printer-uri', 'job-name']
jobs = self.connection.getJobs (requested_attributes=r)
except TypeError:
# requested_attributes requires pycups 1.9.50
jobs = self.connection.getJobs ()
except cups.IPPError:
return ret
for id, attrs in jobs.iteritems():
try:
uri = attrs['job-printer-uri']
uri = uri[uri.rindex ('/') + 1:]
except:
continue
if uri != self.name:
continue
if (not only_tests or
(attrs.has_key ('job-name') and
attrs['job-name'] == 'Test Page')):
ret.append (id)
if limit != None and len (ret) == limit:
break
return ret
def jobsPreserved(self, limit=None):
"""
Find out whether there are preserved jobs for this printer.
@return: list of job IDs
"""
ret = []
try:
try:
r = ['job-id', 'job-printer-uri', 'job-state']
jobs = self.connection.getJobs (which_jobs='completed',
requested_attributes=r)
except TypeError:
# requested_attributes requires pycups 1.9.50
jobs = self.connection.getJobs (which_jobs='completed')
except cups.IPPError:
return ret
for id, attrs in jobs.iteritems():
try:
uri = attrs['job-printer-uri']
uri = uri[uri.rindex ('/') + 1:]
except:
continue
if uri != self.name:
continue
if (attrs.get ('job-state',
cups.IPP_JOB_PENDING) < cups.IPP_JOB_COMPLETED):
continue
ret.append (id)
if limit != None and len (ret) == limit:
break
return ret
def testsQueued(self, limit=None):
"""
Find out whether test jobs are queued for this printer.
@returns: list of job IDs
"""
return self.jobsQueued (only_tests=True, limit=limit)
def setAsDefault(self):
"""
Set this printer as the system default.
"""
self.connection.setDefault(self.name)
# Also need to check system-wide lpoptions because that's how
# previous Fedora versions set the default (bug #217395).
(tmpfd, tmpfname) = tempfile.mkstemp ()
os.remove (tmpfname)
try:
resource = "/admin/conf/lpoptions"
self.connection.getFile(resource, fd=tmpfd)
except cups.HTTPError as e:
(s,) = e.args
if s == cups.HTTP_NOT_FOUND:
return False
raise cups.HTTPError (s)
f = os.fdopen (tmpfd, 'r+')
f.seek (0)
lines = f.readlines ()
changed = False
i = 0
for line in lines:
if line.startswith ("Default "):
# This is the system-wide default.
name = line.split (' ')[1]
if name != self.name:
# Stop it from over-riding the server default.
lines[i] = "Dest " + line[8:]
changed = True
i += 1
if changed:
f.seek (0)
f.writelines (lines)
f.truncate ()
os.lseek (tmpfd, 0, os.SEEK_SET)
try:
self.connection.putFile (resource, fd=tmpfd)
except cups.HTTPError:
return False
return changed
def getPrinters(connection):
"""
Obtain a list of printers.
@param connection: CUPS connection
@type connection: CUPS.Connection object
@returns: L{Printer} list
"""
printers = connection.getPrinters()
classes = connection.getClasses()
for name, printer in printers.iteritems():
printer = Printer(name, connection, **printer)
printers[name] = printer
if classes.has_key(name):
printer.class_members = classes[name]
printer.class_members.sort()
return printers
def parseDeviceID (id):
"""
Parse an IEEE 1284 Device ID, so that it may be indexed by field name.
@param id: IEEE 1284 Device ID, without the two leading length bytes
@type id: string
@returns: dict indexed by field name
"""
id_dict = {}
pieces = id.split(";")
for piece in pieces:
if piece.find(":") == -1:
continue
name, value = piece.split(":",1)
id_dict[name.strip ()] = value.strip()
if id_dict.has_key ("MANUFACTURER"):
id_dict.setdefault("MFG", id_dict["MANUFACTURER"])
if id_dict.has_key ("MODEL"):
id_dict.setdefault("MDL", id_dict["MODEL"])
if id_dict.has_key ("COMMAND SET"):
id_dict.setdefault("CMD", id_dict["COMMAND SET"])
for name in ["MFG", "MDL", "CMD", "CLS", "DES", "SN", "S", "P", "J"]:
id_dict.setdefault(name, "")
if id_dict["CMD"] == '':
id_dict["CMD"] = []
else:
id_dict["CMD"] = id_dict["CMD"].split(',')
return id_dict
class Device:
"""
This class represents a CUPS device.
"""
def __init__(self, uri, **kw):
"""
@param uri: device URI
@type uri: string
@param kw: device attributes
@type kw: dict
"""
self.uri = uri
self.device_class = kw.get('device-class', '')
self.info = kw.get('device-info', '')
self.make_and_model = kw.get('device-make-and-model', '')
self.id = kw.get('device-id', '')
self.location = kw.get('device-location', '')
if type (self.info) == unicode:
# Convert unicode objects to UTF-8 encoding so they can be
# compared with other UTF-8 encoded strings (bug #957444).
self.info = self.info.encode ('utf-8')
uri_pieces = uri.split(":")
self.type = uri_pieces[0]
self.is_class = len(uri_pieces)==1
#self.id = 'MFG:HEWLETT-PACKARD;MDL:DESKJET 990C;CMD:MLC,PCL,PML;CLS:PRINTER;DES:Hewlett-Packard DeskJet 990C;SN:US05N1J00XLG;S:00808880800010032C1000000C2000000;P:0800,FL,B0;J: ;'
self.id_dict = parseDeviceID (self.id)
s = uri.find("serial=")
if s != -1 and not self.id_dict.get ('SN',''):
self.id_dict['SN'] = uri[s + 7:]
def __repr__ (self):
return "<cupshelpers.Device \"%s\">" % self.uri
def __cmp__(self, other):
"""
Compare devices by order of preference.
"""
if other == None:
return -1
if self.is_class != other.is_class:
if other.is_class:
return -1
return 1
if not self.is_class and (self.type != other.type):
# "hp"/"hpfax" before "usb" before * before "parallel" before
# "serial"
if other.type == "serial":
return -1
if self.type == "serial":
return 1
if other.type == "parallel":
return -1
if self.type == "parallel":
return 1
if other.type == "hp":
return 1
if self.type == "hp":
return -1
if other.type == "hpfax":
return 1
if self.type == "hpfax":
return -1
if other.type == "dnssd":
return 1
if self.type == "dnssd":
return -1
if other.type == "socket":
return 1
if self.type == "socket":
return -1
if other.type == "lpd":
return 1
if self.type == "lpd":
return -1
if other.type == "ipps":
return 1
if self.type == "ipps":
return -1
if other.type == "ipp":
return 1
if self.type == "ipp":
return -1
if other.type == "usb":
return 1
if self.type == "usb":
return -1
if self.type == "dnssd" and other.type == "dnssd":
if other.uri.find("._pdl-datastream") != -1: # Socket
return 1
if self.uri.find("._pdl-datastream") != -1:
return -1
if other.uri.find("._printer") != -1: # LPD
return 1
if self.uri.find("._printer") != -1:
return -1
if other.uri.find("._ipp") != -1: # IPP
return 1
if self.uri.find("._ipp") != -1:
return -1
result = cmp(bool(self.id), bool(other.id))
if not result:
result = cmp(self.info, other.info)
return result
class _GetDevicesCall(object):
def call (self, connection, kwds):
if kwds.has_key ("reply_handler"):
self._client_reply_handler = kwds.get ("reply_handler")
kwds["reply_handler"] = self._reply_handler
return connection.getDevices (**kwds)
self._client_reply_handler = None
result = connection.getDevices (**kwds)
return self._reply_handler (connection, result)
def _reply_handler (self, connection, devices):
for uri, data in devices.iteritems():
device = Device(uri, **data)
devices[uri] = device
if device.info != '' and device.make_and_model == '':
device.make_and_model = device.info
if self._client_reply_handler:
self._client_reply_handler (connection, devices)
else:
return devices
def getDevices(connection, **kw):
"""
Obtain a list of available CUPS devices.
@param connection: CUPS connection
@type connection: cups.Connection object
@returns: a list of L{Device} objects
@raise cups.IPPError: IPP Error
"""
op = _GetDevicesCall ()
return op.call (connection, kw)
def activateNewPrinter(connection, name):
"""
Set a new printer enabled, accepting jobs, and (if necessary) the
default printer.
@param connection: CUPS connection
@type connection: cups.Connection object
@param name: printer name
@type name: string
@raise cups.IPPError: IPP error
"""
connection.enablePrinter (name)
connection.acceptJobs (name)
# Set as the default if there is not already a default printer.
if connection.getDefault () == None:
connection.setDefault (name)
def copyPPDOptions(ppd1, ppd2):
"""
Copy default options between PPDs.
@param ppd1: source PPD
@type ppd1: cups.PPD object
@param ppd2: destination PPD
@type ppd2: cups.PPD object
"""
def getPPDGroupOptions(group):
options = group.options[:]
for g in group.subgroups:
options.extend(getPPDGroupOptions(g))
return options
def iteratePPDOptions(ppd):
for group in ppd.optionGroups:
for option in getPPDGroupOptions(group):
yield option
for option in iteratePPDOptions(ppd1):
if option.keyword == "PageRegion":
continue
new_option = ppd2.findOption(option.keyword)
if new_option and option.ui==new_option.ui:
value = option.defchoice
for choice in new_option.choices:
if choice["choice"]==value:
ppd2.markOption(new_option.keyword, value)
_debugprint ("set %s = %s" % (repr (new_option.keyword),
repr (value)))
def setPPDPageSize(ppd, language):
"""
Set the PPD page size according to locale.
@param ppd: PPD
@type ppd: cups.PPD object
@param language: language, as given by the first element of
locale.setlocale
@type language: string
"""
# Just set the page size to A4 or Letter, that's all.
# Use the same method CUPS uses.
size = 'A4'
letter = [ 'C', 'POSIX', 'en', 'en_US', 'en_CA', 'fr_CA' ]
for each in letter:
if language == each:
size = 'Letter'
# Use setting in /etc/papersize if available
try:
f = open ("/etc/papersize")
for line in f:
if line.startswith("#"):
continue
if line.strip().lower().startswith("a4"):
size = 'A4'
elif line.strip().lower().startswith("letter"):
size = 'Letter'
elif line.strip() != "":
break
f.close()
except:
pass
try:
ppd.markOption ('PageSize', size)
_debugprint ("set PageSize = %s" % size)
except:
_debugprint ("Failed to set PageSize (%s not available?)" % size)
def missingExecutables(ppd):
"""
Check that all relevant executables for a PPD are installed.
@param ppd: PPD
@type ppd: cups.PPD object
@returns: string list, representing missing executables
"""
# First, a local function. How to check that something exists
# in a path:
def pathcheck (name, path="/usr/bin:/bin"):
if name == "-":
# A filter of "-" means that no filter is required,
# i.e. the device accepts the given format as-is.
return "builtin"
# Strip out foomatic '%'-style place-holders.
p = name.find ('%')
if p != -1:
name = name[:p]
if len (name) == 0:
return "true"
if name[0] == '/':
if os.access (name, os.X_OK):
_debugprint ("%s: found" % name)
return name
else:
_debugprint ("%s: NOT found" % name)
return None
if name.find ("=") != -1:
return "builtin"
if name in [ ":", ".", "[", "alias", "bind", "break", "cd",
"continue", "declare", "echo", "else", "eval",
"exec", "exit", "export", "fi", "if", "kill", "let",
"local", "popd", "printf", "pushd", "pwd", "read",
"readonly", "set", "shift", "shopt", "source",
"test", "then", "trap", "type", "ulimit", "umask",
"unalias", "unset", "wait" ]:
return "builtin"
for component in path.split (':'):
file = component.rstrip (os.path.sep) + os.path.sep + name
if os.access (file, os.X_OK):
_debugprint ("%s: found" % file)
return file
_debugprint ("%s: NOT found in %s" % (name, path))
return None
exes_to_install = []
def add_missing (exe):
# Strip out foomatic '%'-style place-holders.
p = exe.find ('%')
if p != -1:
exe = exe[:p]
exes_to_install.append (exe)
# Find a 'FoomaticRIPCommandLine' attribute.
exe = exepath = None
attr = ppd.findAttr ('FoomaticRIPCommandLine')
if attr:
# Foomatic RIP command line to check.
cmdline = attr.value.replace ('&&\n', '')
cmdline = cmdline.replace ('"', '"')
cmdline = cmdline.replace ('<', '<')
cmdline = cmdline.replace ('>', '>')
if (cmdline.find ("(") != -1 or
cmdline.find ("&") != -1):
# Don't try to handle sub-shells or unreplaced HTML entities.
cmdline = ""
# Strip out foomatic '%'-style place-holders
pipes = cmdline.split (';')
for pipe in pipes:
cmds = pipe.strip ().split ('|')
for cmd in cmds:
args = cmd.strip ().split (' ')
exe = args[0]
exepath = pathcheck (exe)
if not exepath:
add_missing (exe)
continue
# Main executable found. But if it's 'gs',
# perhaps there is an IJS server we also need
# to check.
if os.path.basename (exepath) == 'gs':
argn = len (args)
argi = 1
search = "-sIjsServer="
while argi < argn:
arg = args[argi]
if arg.startswith (search):
exe = arg[len (search):]
exepath = pathcheck (exe)
if not exepath:
add_missing (exe)
break
argi += 1
if not exepath:
# Next pipe.
break
if exepath or not exe:
# Look for '*cupsFilter' lines in the PPD and check that
# the filters are installed.
(tmpfd, tmpfname) = tempfile.mkstemp ()
os.unlink (tmpfname)
ppd.writeFd (tmpfd)
os.lseek (tmpfd, 0, os.SEEK_SET)
f = os.fdopen (tmpfd, "r")
search = "*cupsFilter:"
for line in f.readlines ():
if line.startswith (search):
line = line[len (search):].strip ().strip ('"')
try:
(mimetype, cost, exe) = line.split (' ')
except:
continue
exepath = pathcheck (exe,
config.cupsserverbindir + "/filter:"
"/usr/lib64/cups/filter")
if not exepath:
add_missing (config.cupsserverbindir + "/filter/" + exe)
return exes_to_install
def missingPackagesAndExecutables(ppd):
"""
Check that all relevant executables for a PPD are installed.
@param ppd: PPD
@type ppd: cups.PPD object
@returns: string list pair, representing missing packages and
missing executables
"""
executables = missingExecutables(ppd)
return ([], executables)
def _main():
c = cups.Connection()
#printers = getPrinters(c)
for device in getDevices(c).itervalues():
print device.uri, device.id_dict
if __name__=="__main__":
_main()
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class SubnetsOperations:
"""SubnetsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2021_02_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
virtual_network_name: str,
subnet_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subnetName': self._serialize.url("subnet_name", subnet_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
virtual_network_name: str,
subnet_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified subnet.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param subnet_name: The name of the subnet.
:type subnet_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
subnet_name=subnet_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subnetName': self._serialize.url("subnet_name", subnet_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}'} # type: ignore
async def get(
self,
resource_group_name: str,
virtual_network_name: str,
subnet_name: str,
expand: Optional[str] = None,
**kwargs: Any
) -> "_models.Subnet":
"""Gets the specified subnet by virtual network and resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param subnet_name: The name of the subnet.
:type subnet_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Subnet, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2021_02_01.models.Subnet
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Subnet"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subnetName': self._serialize.url("subnet_name", subnet_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Subnet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
virtual_network_name: str,
subnet_name: str,
subnet_parameters: "_models.Subnet",
**kwargs: Any
) -> "_models.Subnet":
cls = kwargs.pop('cls', None) # type: ClsType["_models.Subnet"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subnetName': self._serialize.url("subnet_name", subnet_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(subnet_parameters, 'Subnet')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Subnet', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('Subnet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
virtual_network_name: str,
subnet_name: str,
subnet_parameters: "_models.Subnet",
**kwargs: Any
) -> AsyncLROPoller["_models.Subnet"]:
"""Creates or updates a subnet in the specified virtual network.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param subnet_name: The name of the subnet.
:type subnet_name: str
:param subnet_parameters: Parameters supplied to the create or update subnet operation.
:type subnet_parameters: ~azure.mgmt.network.v2021_02_01.models.Subnet
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Subnet or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2021_02_01.models.Subnet]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Subnet"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
subnet_name=subnet_name,
subnet_parameters=subnet_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('Subnet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subnetName': self._serialize.url("subnet_name", subnet_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}'} # type: ignore
async def _prepare_network_policies_initial(
self,
resource_group_name: str,
virtual_network_name: str,
subnet_name: str,
prepare_network_policies_request_parameters: "_models.PrepareNetworkPoliciesRequest",
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._prepare_network_policies_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subnetName': self._serialize.url("subnet_name", subnet_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(prepare_network_policies_request_parameters, 'PrepareNetworkPoliciesRequest')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_prepare_network_policies_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}/PrepareNetworkPolicies'} # type: ignore
async def begin_prepare_network_policies(
self,
resource_group_name: str,
virtual_network_name: str,
subnet_name: str,
prepare_network_policies_request_parameters: "_models.PrepareNetworkPoliciesRequest",
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Prepares a subnet by applying network intent policies.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param subnet_name: The name of the subnet.
:type subnet_name: str
:param prepare_network_policies_request_parameters: Parameters supplied to prepare subnet by
applying network intent policies.
:type prepare_network_policies_request_parameters: ~azure.mgmt.network.v2021_02_01.models.PrepareNetworkPoliciesRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._prepare_network_policies_initial(
resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
subnet_name=subnet_name,
prepare_network_policies_request_parameters=prepare_network_policies_request_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subnetName': self._serialize.url("subnet_name", subnet_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_prepare_network_policies.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}/PrepareNetworkPolicies'} # type: ignore
async def _unprepare_network_policies_initial(
self,
resource_group_name: str,
virtual_network_name: str,
subnet_name: str,
unprepare_network_policies_request_parameters: "_models.UnprepareNetworkPoliciesRequest",
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._unprepare_network_policies_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subnetName': self._serialize.url("subnet_name", subnet_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(unprepare_network_policies_request_parameters, 'UnprepareNetworkPoliciesRequest')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_unprepare_network_policies_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}/UnprepareNetworkPolicies'} # type: ignore
async def begin_unprepare_network_policies(
self,
resource_group_name: str,
virtual_network_name: str,
subnet_name: str,
unprepare_network_policies_request_parameters: "_models.UnprepareNetworkPoliciesRequest",
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Unprepares a subnet by removing network intent policies.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param subnet_name: The name of the subnet.
:type subnet_name: str
:param unprepare_network_policies_request_parameters: Parameters supplied to unprepare subnet
to remove network intent policies.
:type unprepare_network_policies_request_parameters: ~azure.mgmt.network.v2021_02_01.models.UnprepareNetworkPoliciesRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._unprepare_network_policies_initial(
resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
subnet_name=subnet_name,
unprepare_network_policies_request_parameters=unprepare_network_policies_request_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subnetName': self._serialize.url("subnet_name", subnet_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_unprepare_network_policies.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}/UnprepareNetworkPolicies'} # type: ignore
def list(
self,
resource_group_name: str,
virtual_network_name: str,
**kwargs: Any
) -> AsyncIterable["_models.SubnetListResult"]:
"""Gets all subnets in a virtual network.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SubnetListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2021_02_01.models.SubnetListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SubnetListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('SubnetListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets'} # type: ignore
|
|
import warnings
warnings.filterwarnings('ignore')
import tensorflow as tf
import numpy as np
import pickle
import getopt
import time
import sys
import os
import nltk
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../')
from dnc import DNC
from recurrent_controller import StatelessRecurrentController
from adm_task import repeat_test
def bleu_score(target_batch, predict_batch, print_prob=0.995):
s = []
for b in range(target_batch.shape[0]):
trim_target = []
trim_predict = []
for t in target_batch[b]:
if t > 1:
trim_target.append(t)
for t in predict_batch[b]:
if t > 1:
trim_predict.append(t)
if np.random.rand() > print_prob:
print('{} vs {}'.format(trim_target, trim_predict))
BLEUscore = nltk.translate.bleu_score.sentence_bleu([trim_target], trim_predict, weights=[0.5, 0.5])
s.append(BLEUscore)
return np.mean(s)
def set_score_pre(target_batch, predict_batch):
s = []
s2 = []
for b in range(target_batch.shape[0]):
trim_target = []
trim_predict = []
for t in target_batch[b]:
if t > 1:
trim_target.append(t)
for t in predict_batch[b]:
if t > 1:
trim_predict.append(t)
if np.random.rand() > 1:
print('{} vs {}'.format(trim_target, trim_predict))
acc = len(set(trim_target).intersection(set(trim_predict))) / len(set(trim_target))
acc2 = 0
if len(set(trim_predict)) > 0:
acc2 = len(set(trim_target).intersection(set(trim_predict))) / len(trim_predict)
s.append(acc)
s2.append(acc2)
return np.mean(s2), np.mean(s) # prec, recall
def set_score_pre_jac(target_batch, predict_batch):
s = []
s2 = []
s3 = []
for b in range(target_batch.shape[0]):
trim_target = []
trim_predict = []
for t in target_batch[b]:
if t > 1:
trim_target.append(t)
for t in predict_batch[b]:
if t > 1:
trim_predict.append(t)
if np.random.rand() > 1:
print('{} vs {}'.format(trim_target, trim_predict))
acc = len(set(trim_target).intersection(set(trim_predict))) / len(set(trim_target))
acc2 = 0
if len(set(trim_predict)) > 0:
acc2 = len(set(trim_target).intersection(set(trim_predict))) / len(trim_predict)
acc3 = len(set(trim_target).intersection(set(trim_predict))) / len(set(trim_target).union(set(trim_predict)))
s.append(acc)
s2.append(acc2)
s3.append(acc3)
return np.mean(s2), np.mean(s), np.mean(s3) # prec, recall, jaccard
def set_score_hist(target_batch, predict_batch):
acc_label = {}
guess_label = {}
count_label = {}
for b in range(target_batch.shape[0]):
for t, t2 in zip(target_batch[b], predict_batch[b]):
# print('{} ----- {}'.format(t, t2))
trim_target = []
for tt in t:
if tt > 1:
trim_target.append(tt)
for l in trim_target:
if l not in count_label:
count_label[l] = 0
count_label[l] += 1
trim_predict = []
for tt in t2:
if tt > 1:
trim_predict.append(tt)
if np.random.rand() > 0.99:
print('{} vs {}'.format(trim_target, trim_predict))
for l in trim_predict:
if l not in guess_label:
guess_label[l] = 0
guess_label[l] += 1
correct = list(set(trim_target).intersection(set(trim_predict)))
for c in correct:
if c not in acc_label:
acc_label[c] = 0
acc_label[c] += 1
recall = []
precision = []
fscore = []
for k, v in sorted(count_label.items()):
if k in acc_label:
rec = acc_label[k] / count_label[k]
prec = acc_label[k] / guess_label[k]
recall.append(rec)
precision.append(prec)
fscore.append(2 * rec * prec / (rec + prec))
else:
recall.append(0)
precision.append(0)
fscore.append(0)
return recall, precision, fscore
import editdistance as ed
def batch_norm_edit_score(reals, preds, pprint=0.999):
avgs=0
c=0
for i,real in enumerate(reals):
avgs += norm_edit_score(reals[i],preds[i],pprint)
c+=1
return avgs/c
def norm_edit_score(real, pred, pprob=0.999):
trimpred=[]
for p in pred:
if p>1:
trimpred.append(p)
trimreal=[]
for r in real:
if r>1:
trimreal.append(r)
if np.random.rand() > pprob:
print('{} vs {}'.format(trimreal, trimpred))
if trimpred is []:
return 1
#print(trimreal)
return ed.eval(trimpred,trimreal)/max(len(trimpred),len(trimreal))
def norm_edit_score_raw(real, pred, pprob=0.999):
if np.random.rand() > pprob:
print('{} vs {}'.format(real, pred))
return ed.eval(pred,real)/max(len(pred),len(real))
def llprint(message):
sys.stdout.write(message)
sys.stdout.flush()
def load(path):
return pickle.load(open(path, 'rb'))
def onehot(index, size):
# print('-----')
# print(index)
vec = np.zeros(size, dtype=np.float32)
vec[int(index)] = 1.0
return vec
def convert_json_sdata(pdata, limit_out_adm):
all_in = []
all_out = []
for pat in pdata:
inp = pat[0]
rinp = []
for adm in inp:
rinp += adm
oup = pat[1]
roup = []
c = 0
for adm in oup:
roup += adm
c+=1
if c==limit_out_adm:
break
all_in.append(rinp)
all_out.append(roup)
return all_in, all_out
def load_adm_data(file_dir='./data/epw_7k', limit_out_adm=100, c2l=None, l2c=None):
import json
ptrain = json.load(open(file_dir + '/pairs-train.json'))
ptest = json.load(open(file_dir + '/pairs-test.json'))
pvalid = json.load(open(file_dir + '/pairs-valid.json'))
if c2l is None:
char2label = {'PAD': 0, 'EOS': 1}
else:
char2label = c2l
if l2c is None:
label2char = {0: 'PAD', 1: 'EOS'}
else:
label2char = l2c
ptrain_in, ptrain_out = convert_json_sdata(ptrain, limit_out_adm)
ptest_in, ptest_out = convert_json_sdata(ptest, limit_out_adm)
pvalid_in, pvalid_out = convert_json_sdata(pvalid, limit_out_adm)
ret = []
for data in [ptrain_in, ptrain_out, ptest_in, ptest_out, pvalid_in, pvalid_out]:
ndata = []
for seq in data:
seq2 = []
for c in seq:
if c not in char2label:
char2label[c] = len(char2label)
label2char[char2label[c]] = c
seq2.append(char2label[c])
ndata.append(seq2)
ret.append(ndata)
return ret, char2label, label2char
def prepare_adm_sample(dig_list, proc_list, word_space_size_input, word_space_size_output, index=-1):
if index < 0:
index = int(np.random.choice(len(dig_list), 1))
# print('\n{}'.format(index))
input_seq = dig_list[index]
output_seq = proc_list[index]
decoder_point = len(input_seq) + 1
seq_len = decoder_point + len(output_seq)
input_vec = np.zeros(seq_len)
for i, token in enumerate(input_seq):
input_vec[i] = token
input_vec[len(input_seq)] = 1
output_vec = np.zeros(seq_len)
input_vec[len(input_seq)] = 1
output_vec[len(input_seq)] = 1
weights_vec = np.zeros(seq_len, dtype=np.float32)
for i, token in enumerate(output_seq):
output_vec[decoder_point + i] = token
weights_vec[decoder_point + i] = 1
# print(input_vec)
# print(output_vec)
# print('----')
# raise False
input_vec = np.array([onehot(code, word_space_size_input) for code in input_vec])
output_vec = np.array([onehot(code, word_space_size_output) for code in output_vec])
# print('\n')
# print(output_vec)
# print(weights_vec)
return (
np.reshape(input_vec, (1, -1, word_space_size_input)),
np.reshape(output_vec, (1, -1, word_space_size_output)),
seq_len,
np.reshape(weights_vec, (1, -1, 1)), # seq dim at middle
decoder_point, index
)
def prepare_sample_batch(dig_list,proc_list,word_space_size_input,word_space_size_output, bs, lm_train=False):
if isinstance(bs, int):
indexs = np.random.choice(len(dig_list),bs,replace=False)
else:
#print('from {} to {}'.format(bs[0],bs[1]))
indexs=list(range(bs[0],bs[1]))
minlen=0
moutlne=0
for index in indexs:
minlen=max(len(dig_list[index]),minlen)
moutlne = max(len(proc_list[index]+[0]), moutlne)
# moutlne*=2
input_vecs=[]
output_vecs=[]
seq_len = minlen + 1 + moutlne
decoder_point = minlen + 1
out_list=[]
masks=[]
for index in indexs:
# print('\n{}'.format(index))
ins=dig_list[index]
ose=proc_list[index]+[0]
out_list.append(ose)
input_vec = np.zeros(seq_len)
output_vec = np.zeros(seq_len)
mask=np.zeros(seq_len, dtype=np.bool)
for iii, token in enumerate(ins):
input_vec[minlen-len(ins)+iii] = token
if lm_train:
output_vec[minlen - len(ins) + iii+1] = token
mask[minlen - len(ins) + iii+1] = True
input_vec[minlen] = 1
for iii, token in enumerate(ose):
output_vec[decoder_point + iii] = token
mask[decoder_point + iii]=True
# print(ins)
# print(ose)
# print(input_vec)
# print(output_vec)
# print('====')
input_vec = [onehot(code, word_space_size_input) for code in input_vec]
output_vec = [onehot(code, word_space_size_output) for code in output_vec]
input_vecs.append(input_vec)
output_vecs.append(output_vec)
masks.append(mask)
# raise False
return np.asarray(input_vecs), np.asarray(output_vecs), seq_len, decoder_point, np.asarray(masks), out_list
def adm_train():
dirname = os.path.dirname(os.path.abspath(__file__)) + '/data/'
print(dirname)
ckpts_dir = os.path.join(dirname, './save/checkpoints_adm')
llprint("Loading Data ... ")
llprint("Done!\n")
data, char2label, label2char = load_adm_data()
str2tok = pstr2tok = char2label
dig_list_train = data[0]
pro_list_train = data[1]
dig_list_test = data[2]
pro_list_test = data[3]
dig_list_valid = data[4]
pro_list_valid = data[5]
print('num train {}'.format(len(dig_list_train)))
print('num test {}'.format(len(dig_list_valid)))
print('dim in {}'.format(len(str2tok)))
print('dim out {}'.format(len(pstr2tok)))
batch_size = 32
input_size = len(str2tok)
output_size = len(pstr2tok)
sequence_max_length = 100
words_count = 16
word_size = 128
read_heads = 1
from_checkpoint = None
iterations = 10000
start_step = 0
options, _ = getopt.getopt(sys.argv[1:], '', ['checkpoint=', 'iterations=', 'start='])
for opt in options:
if opt[0] == '--checkpoint':
from_checkpoint = opt[1]
elif opt[0] == '--iterations':
iterations = int(opt[1])
elif opt[0] == '--start':
start_step = int(opt[1])
graph = tf.Graph()
with graph.as_default():
with tf.Session(graph=graph) as session:
llprint("Building Computational Graph ... ")
ncomputer = DNC(
StatelessRecurrentController,
input_size,
output_size,
sequence_max_length,
words_count,
word_size,
read_heads,
batch_size,
use_mem=True,
decoder_mode=True,
dual_controller=False,
write_protect=False,
dual_emb=False
)
output, prob, loss, apply_gradients = ncomputer.build_loss_function_mask()
llprint("Done!\n")
llprint("Initializing Variables ... ")
session.run(tf.global_variables_initializer())
#ncomputer.restore(session, ckpts_dir, ncomputer.print_config())
llprint("Done!\n")
if from_checkpoint is not None:
llprint("Restoring Checkpoint %s ... " % from_checkpoint)
ncomputer.restore(session, ckpts_dir, from_checkpoint)
llprint("Done!\n")
last_100_losses = []
start = 0 if start_step == 0 else start_step + 1
end = start_step + iterations + 1
start_time_100 = time.time()
avg_100_time = 0.
avg_counter = 0
train_writer = tf.summary.FileWriter('./data/summary/log_adm/', session.graph)
min_tloss = 0
for i in range(start, end + 1):
try:
llprint("\rIteration %d/%d" % (i, end))
input_vec, output_vec, seq_len, decoder_point, masks, _ = \
prepare_sample_batch(dig_list_train, pro_list_train, input_size, output_size, bs=batch_size, lm_train=False)
summerize = (i % 200 == 0)
loss_value, _ = session.run([
loss,
apply_gradients
], feed_dict={
ncomputer.input_data: input_vec,
ncomputer.target_output: output_vec,
ncomputer.sequence_length: seq_len,
ncomputer.decoder_point: decoder_point,
ncomputer.mask: masks
})
last_100_losses.append(loss_value)
if summerize:
llprint("\n\t episode %d -->Avg. Cross-Entropy: %.7f\n" % (i, np.mean(last_100_losses)))
summary = tf.Summary()
summary.value.add(tag='batch_train_loss', simple_value=np.mean(last_100_losses))
trscores = []
for ii in range(5):
input_data, target_output, seq_len, decoder_point, masks, rout_list = \
prepare_sample_batch(dig_list_train, pro_list_train, input_size, output_size,
batch_size, lm_train=False)
out, loss_v = session.run([prob, loss], feed_dict={ncomputer.input_data: input_data,
ncomputer.target_output: target_output,
ncomputer.sequence_length: seq_len,
ncomputer.decoder_point: decoder_point,
ncomputer.mask: masks
})
out = np.reshape(np.asarray(out), [-1, seq_len, len(pstr2tok)])
out = np.argmax(out, axis=-1)
bout_list = []
for b in range(out.shape[0]):
out_list = []
for io in range(decoder_point, out.shape[1]):
#if out[b][io]==0:
# break
out_list.append(out[b][io])
bout_list.append(out_list)
trscores.append(bleu_score(np.asarray(rout_list), np.asarray(bout_list), 0.99))
tescores = []
tescores2 = []
tescores3 = []
edit_scores = []
print('-----')
big_out_list = []
losses = []
ntb = len(dig_list_valid) // batch_size + 1
for ii in range(ntb):
if ii * batch_size == len(dig_list_valid):
break
bs = [ii * batch_size, min((ii + 1) * batch_size, len(dig_list_valid))]
rs = bs[1] - bs[0]
if bs[1] >= len(dig_list_valid):
bs = [len(dig_list_valid) - batch_size, len(dig_list_valid)]
input_data, target_output, seq_len, decoder_point, masks, rout_list = \
prepare_sample_batch(dig_list_valid, pro_list_valid, input_size, output_size, bs, lm_train=False)
out, loss_v = session.run([prob, loss], feed_dict={ncomputer.input_data: input_data,
ncomputer.target_output: target_output,
ncomputer.sequence_length: seq_len,
ncomputer.decoder_point: decoder_point,
ncomputer.mask: masks
})
# print(np.argmax(target_output, axis=-1))
# print(out)
# print(np.max(out, axis=-1))
# print(weights)
losses.append(loss_v)
out = np.reshape(np.asarray(out), [-1, seq_len, output_size])
out = np.argmax(out, axis=-1)
bout_list = []
for b in range(out.shape[0]):
out_list = []
for io in range(decoder_point, out.shape[1]):
#if out[b][io] == 0:
# break
out_list.append(out[b][io])
bout_list.append(out_list)
# print(out_list)
# print('yyyyy{}'.format(loss_v))
# print(np.asarray(bout_list)[:rs])
tescores.append(bleu_score(np.asarray(rout_list)[:rs], np.asarray(bout_list)[:rs]))
edit_scores.append(batch_norm_edit_score(np.asarray(rout_list)[:rs], np.asarray(bout_list)[:rs], 1))
pre, rec, jac = set_score_pre_jac(np.asarray(rout_list)[:rs], np.asarray(bout_list)[:rs])
tescores2.append(pre)
tescores3.append(jac)
# print(pro_list_test)
# print(big_out_list)
# rec, pre, fsc = set_score_hist(np.asarray([pro_list_test]),np.asarray([big_out_list]))
tloss = np.mean(losses)
tpre = np.mean(tescores2)
print('tr score {} vs te store {}'.format(np.mean(trscores), np.mean(tescores)))
print('test edit_scores {} prec {} jac {}'.format(np.mean(edit_scores),
np.mean(tescores2),
np.mean(tescores3)))
print('test loss {}'.format(tloss))
summary.value.add(tag='train_bleu', simple_value=np.mean(trscores))
summary.value.add(tag='test_bleu', simple_value=np.mean(tescores))
summary.value.add(tag='test_loss', simple_value=tloss)
summary.value.add(tag='test_edit', simple_value=np.mean(edit_scores))
summary.value.add(tag='test_jac', simple_value=np.mean(tescores3))
summary.value.add(tag='test_precision', simple_value=np.mean(tescores2))
train_writer.add_summary(summary, i)
train_writer.flush()
end_time_100 = time.time()
elapsed_time = (end_time_100 - start_time_100) / 60
avg_counter += 1
avg_100_time += (1. / avg_counter) * (elapsed_time - avg_100_time)
estimated_time = (avg_100_time * ((end - i) / 100.)) / 60.
print("\tAvg. 100 iterations time: %.2f minutes" % (avg_100_time))
print("\tApprox. time to completion: %.2f hours" % (estimated_time))
start_time_100 = time.time()
last_100_losses = []
if min_tloss < tpre:
min_tloss = tpre
llprint("\nSaving Checkpoint ... "),
ncomputer.save(session, ckpts_dir, ncomputer.print_config())
llprint("Done!\n")
except KeyboardInterrupt:
sys.exit(0)
def adm_test():
llprint("Loading Data ... ")
dirname = os.path.dirname(os.path.abspath(__file__)) + '/data/'
print(dirname)
ckpts_dir = os.path.join(dirname, './save/checkpoints_adm')
llprint("Done!\n")
neout = 1
data, _, char2label, label2char = repeat_test.load_adm_data()
data, eid, char2label, label2char = repeat_test.load_adm_data(limit_out_adm=neout, c2l=char2label, l2c=label2char)
str2tok = pstr2tok = char2label
dig_list_train = data[0]
pro_list_train = data[1]
dig_list_test = data[2]
pro_list_test = data[3]
dig_list_valid = data[4]
pro_list_valid = data[5]
eid_in_train = eid[0]
eid_out_train = eid[1]
eid_in_test = eid[2]
eid_out_test = eid[3]
eid_in_valid = eid[4]
eid_out_valid = eid[5]
print('num test {}'.format(len(dig_list_test)))
print('dim in {}'.format(len(str2tok)))
print('dim out {}'.format(len(pstr2tok)))
input_size = len(str2tok)
output_size = len(pstr2tok)
sequence_max_length = 100
words_count = 16
word_size = 128
read_heads = 1
batch_size = 1
graph = tf.Graph()
with graph.as_default():
with tf.Session(graph=graph) as session:
ncomputer = DNC(
StatelessRecurrentController,
input_size,
output_size,
sequence_max_length,
words_count,
word_size,
read_heads,
batch_size,
use_mem=True,
decoder_mode=True,
dual_controller=False,
write_protect=False,
dual_emb=False
)
output, mem_info = ncomputer.get_outputs()
prob = tf.nn.softmax(output, dim=-1)
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
labels=tf.slice(ncomputer.target_output, [0, ncomputer.decoder_point, 0],
[batch_size, ncomputer.sequence_length - ncomputer.decoder_point, output_size]),
logits=tf.slice(output, [0, ncomputer.decoder_point, 0],
[batch_size, ncomputer.sequence_length - ncomputer.decoder_point, output_size]), dim=-1)
)
ncomputer.restore(session, ckpts_dir, ncomputer.print_config())
tescores = []
tejac = []
edit_scores = []
big_out_list = []
losses = []
for ii in range(len(dig_list_test)):
llprint('\r{}/{}'.format(ii, len(dig_list_test)))
input_data, target_output, seq_len, weights, decoder_point, ii = \
prepare_adm_sample(dig_list_test, pro_list_test, len(str2tok), len(pstr2tok), ii)
out, loss_v, mem_view = session.run([prob, loss, mem_info], feed_dict={ncomputer.input_data: input_data,
ncomputer.target_output: target_output,
ncomputer.sequence_length: seq_len,
ncomputer.decoder_point: decoder_point,
})
losses.append(loss_v)
prob_ = np.reshape(np.asarray(out), [-1, seq_len, len(pstr2tok)])
pind = np.argsort(prob_, axis=-1)
out_list = []
for io in range(decoder_point, out.shape[1]):
label = pind[0][io][-1]
out_list.append(label)
cur_pre_index=0
# print(out_list)
# print(pro_list_test[ii])
# print(eid_out_test[ii])
# print('{} {} {}'.format(len(out_list), len(pro_list_test[ii]), len(eid_out_test[ii])))
# raise False
for n in range(neout):
sub_out_list=[]
rout = []
# cout_list = out_list * (n+1)
for ci, c in enumerate(pro_list_test[ii]):
if eid_out_test[ii][ci] == n:
rout.append(c)
sub_out_list.append(out_list[cur_pre_index])
cur_pre_index+=1
edit_scores.append(norm_edit_score(rout, sub_out_list, pprob=1))
# at L
pre, rec, jac = set_score_pre_jac(np.asarray([rout]), np.asarray([sub_out_list]))
tejac.append(jac)
# big_out_list.append(out_list)
# tescores.append(bleu_score(np.asarray([pro_list_test[ii]]), np.asarray([out_list]), print_prob=1))
# edit_scores.append(norm_edit_score(pro_list_test[ii], out_list, pprob=1))
# # at L
# pre, rec, jac = set_score_pre_jac(np.asarray([pro_list_test[ii]]), np.asarray([out_list]))
#
# tejac.append(jac)
tloss = np.mean(losses)
print('te BLEU score {}'.format(np.mean(tescores)))
print('te edit score {}'.format(np.mean(edit_scores)))
print('te jac score {}'.format(np.mean(tejac)))
print('test loss {}'.format(tloss))
if __name__ == '__main__':
# adm_train()
adm_test()
|
|
# $Id: __init__.py 7961 2016-07-28 22:02:47Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
This is ``docutils.parsers.rst`` package. It exports a single class, `Parser`,
the reStructuredText parser.
Usage
=====
1. Create a parser::
parser = docutils.parsers.rst.Parser()
Several optional arguments may be passed to modify the parser's behavior.
Please see `Customizing the Parser`_ below for details.
2. Gather input (a multi-line string), by reading a file or the standard
input::
input = sys.stdin.read()
3. Create a new empty `docutils.nodes.document` tree::
document = docutils.utils.new_document(source, settings)
See `docutils.utils.new_document()` for parameter details.
4. Run the parser, populating the document tree::
parser.parse(input, document)
Parser Overview
===============
The reStructuredText parser is implemented as a state machine, examining its
input one line at a time. To understand how the parser works, please first
become familiar with the `docutils.statemachine` module, then see the
`states` module.
Customizing the Parser
----------------------
Anything that isn't already customizable is that way simply because that type
of customizability hasn't been implemented yet. Patches welcome!
When instantiating an object of the `Parser` class, two parameters may be
passed: ``rfc2822`` and ``inliner``. Pass ``rfc2822=True`` to enable an
initial RFC-2822 style header block, parsed as a "field_list" element (with
"class" attribute set to "rfc2822"). Currently this is the only body-level
element which is customizable without subclassing. (Tip: subclass `Parser`
and change its "state_classes" and "initial_state" attributes to refer to new
classes. Contact the author if you need more details.)
The ``inliner`` parameter takes an instance of `states.Inliner` or a subclass.
It handles inline markup recognition. A common extension is the addition of
further implicit hyperlinks, like "RFC 2822". This can be done by subclassing
`states.Inliner`, adding a new method for the implicit markup, and adding a
``(pattern, method)`` pair to the "implicit_dispatch" attribute of the
subclass. See `states.Inliner.implicit_inline()` for details. Explicit
inline markup can be customized in a `states.Inliner` subclass via the
``patterns.initial`` and ``dispatch`` attributes (and new methods as
appropriate).
"""
__docformat__ = 'reStructuredText'
import docutils.parsers
import docutils.statemachine
from docutils.parsers.rst import states
from docutils import frontend, nodes, Component
from docutils.transforms import universal
class Parser(docutils.parsers.Parser):
"""The reStructuredText parser."""
supported = ('restructuredtext', 'rst', 'rest', 'restx', 'rtxt', 'rstx')
"""Aliases this parser supports."""
settings_spec = (
'reStructuredText Parser Options',
None,
(('Recognize and link to standalone PEP references (like "PEP 258").',
['--pep-references'],
{'action': 'store_true', 'validator': frontend.validate_boolean}),
('Base URL for PEP references '
'(default "http://www.python.org/dev/peps/").',
['--pep-base-url'],
{'metavar': '<URL>', 'default': 'http://www.python.org/dev/peps/',
'validator': frontend.validate_url_trailing_slash}),
('Template for PEP file part of URL. (default "pep-%04d")',
['--pep-file-url-template'],
{'metavar': '<URL>', 'default': 'pep-%04d'}),
('Recognize and link to standalone RFC references (like "RFC 822").',
['--rfc-references'],
{'action': 'store_true', 'validator': frontend.validate_boolean}),
('Base URL for RFC references (default "http://tools.ietf.org/html/").',
['--rfc-base-url'],
{'metavar': '<URL>', 'default': 'http://tools.ietf.org/html/',
'validator': frontend.validate_url_trailing_slash}),
('Set number of spaces for tab expansion (default 8).',
['--tab-width'],
{'metavar': '<width>', 'type': 'int', 'default': 8,
'validator': frontend.validate_nonnegative_int}),
('Remove spaces before footnote references.',
['--trim-footnote-reference-space'],
{'action': 'store_true', 'validator': frontend.validate_boolean}),
('Leave spaces before footnote references.',
['--leave-footnote-reference-space'],
{'action': 'store_false', 'dest': 'trim_footnote_reference_space'}),
('Disable directives that insert the contents of external file '
'("include" & "raw"); replaced with a "warning" system message.',
['--no-file-insertion'],
{'action': 'store_false', 'default': 1,
'dest': 'file_insertion_enabled',
'validator': frontend.validate_boolean}),
('Enable directives that insert the contents of external file '
'("include" & "raw"). Enabled by default.',
['--file-insertion-enabled'],
{'action': 'store_true'}),
('Disable the "raw" directives; replaced with a "warning" '
'system message.',
['--no-raw'],
{'action': 'store_false', 'default': 1, 'dest': 'raw_enabled',
'validator': frontend.validate_boolean}),
('Enable the "raw" directive. Enabled by default.',
['--raw-enabled'],
{'action': 'store_true'}),
('Token name set for parsing code with Pygments: one of '
'"long", "short", or "none (no parsing)". Default is "long".',
['--syntax-highlight'],
{'choices': ['long', 'short', 'none'],
'default': 'long', 'metavar': '<format>'}),
('Change straight quotation marks to typographic form: '
'one of "yes", "no", "alt[ernative]" (default "no").',
['--smart-quotes'],
{'default': False, 'validator': frontend.validate_ternary}),
('Inline markup recognized at word boundaries only '
'(adjacent to punctuation or whitespace). '
'Force character-level inline markup recognition with '
'"\ " (backslash + space). Default.',
['--word-level-inline-markup'],
{'action': 'store_false', 'dest': 'character_level_inline_markup'}),
('Inline markup recognized anywhere, regardless of surrounding '
'characters. Backslash-escapes must be used to avoid unwanted '
'markup recognition. Useful for East Asian languages. '
'Experimental.',
['--character-level-inline-markup'],
{'action': 'store_true', 'default': False,
'dest': 'character_level_inline_markup'}),
))
config_section = 'restructuredtext parser'
config_section_dependencies = ('parsers',)
def __init__(self, rfc2822=False, inliner=None):
if rfc2822:
self.initial_state = 'RFC2822Body'
else:
self.initial_state = 'Body'
self.state_classes = states.state_classes
self.inliner = inliner
def get_transforms(self):
return Component.get_transforms(self) + [
universal.SmartQuotes]
def parse(self, inputstring, document):
"""Parse `inputstring` and populate `document`, a document tree."""
self.setup_parse(inputstring, document)
self.statemachine = states.RSTStateMachine(
state_classes=self.state_classes,
initial_state=self.initial_state,
debug=document.reporter.debug_flag)
inputlines = docutils.statemachine.string2lines(
inputstring, tab_width=document.settings.tab_width,
convert_whitespace=True)
self.statemachine.run(inputlines, document, inliner=self.inliner)
self.finish_parse()
class DirectiveError(Exception):
"""
Store a message and a system message level.
To be thrown from inside directive code.
Do not instantiate directly -- use `Directive.directive_error()`
instead!
"""
def __init__(self, level, message):
"""Set error `message` and `level`"""
Exception.__init__(self)
self.level = level
self.msg = message
class Directive(object):
"""
Base class for reStructuredText directives.
The following attributes may be set by subclasses. They are
interpreted by the directive parser (which runs the directive
class):
- `required_arguments`: The number of required arguments (default:
0).
- `optional_arguments`: The number of optional arguments (default:
0).
- `final_argument_whitespace`: A boolean, indicating if the final
argument may contain whitespace (default: False).
- `option_spec`: A dictionary, mapping known option names to
conversion functions such as `int` or `float` (default: {}, no
options). Several conversion functions are defined in the
directives/__init__.py module.
Option conversion functions take a single parameter, the option
argument (a string or ``None``), validate it and/or convert it
to the appropriate form. Conversion functions may raise
`ValueError` and `TypeError` exceptions.
- `has_content`: A boolean; True if content is allowed. Client
code must handle the case where content is required but not
supplied (an empty content list will be supplied).
Arguments are normally single whitespace-separated words. The
final argument may contain whitespace and/or newlines if
`final_argument_whitespace` is True.
If the form of the arguments is more complex, specify only one
argument (either required or optional) and set
`final_argument_whitespace` to True; the client code must do any
context-sensitive parsing.
When a directive implementation is being run, the directive class
is instantiated, and the `run()` method is executed. During
instantiation, the following instance variables are set:
- ``name`` is the directive type or name (string).
- ``arguments`` is the list of positional arguments (strings).
- ``options`` is a dictionary mapping option names (strings) to
values (type depends on option conversion functions; see
`option_spec` above).
- ``content`` is a list of strings, the directive content line by line.
- ``lineno`` is the absolute line number of the first line
of the directive.
- ``content_offset`` is the line offset of the first line of the content from
the beginning of the current input. Used when initiating a nested parse.
- ``block_text`` is a string containing the entire directive.
- ``state`` is the state which called the directive function.
- ``state_machine`` is the state machine which controls the state which called
the directive function.
Directive functions return a list of nodes which will be inserted
into the document tree at the point where the directive was
encountered. This can be an empty list if there is nothing to
insert.
For ordinary directives, the list must contain body elements or
structural elements. Some directives are intended specifically
for substitution definitions, and must return a list of `Text`
nodes and/or inline elements (suitable for inline insertion, in
place of the substitution reference). Such directives must verify
substitution definition context, typically using code like this::
if not isinstance(state, states.SubstitutionDef):
error = state_machine.reporter.error(
'Invalid context: the "%s" directive can only be used '
'within a substitution definition.' % (name),
nodes.literal_block(block_text, block_text), line=lineno)
return [error]
"""
# There is a "Creating reStructuredText Directives" how-to at
# <http://docutils.sf.net/docs/howto/rst-directives.html>. If you
# update this docstring, please update the how-to as well.
required_arguments = 0
"""Number of required directive arguments."""
optional_arguments = 0
"""Number of optional arguments after the required arguments."""
final_argument_whitespace = False
"""May the final argument contain whitespace?"""
option_spec = None
"""Mapping of option names to validator functions."""
has_content = False
"""May the directive have content?"""
def __init__(self, name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
self.name = name
self.arguments = arguments
self.options = options
self.content = content
self.lineno = lineno
self.content_offset = content_offset
self.block_text = block_text
self.state = state
self.state_machine = state_machine
def run(self):
raise NotImplementedError('Must override run() is subclass.')
# Directive errors:
def directive_error(self, level, message):
"""
Return a DirectiveError suitable for being thrown as an exception.
Call "raise self.directive_error(level, message)" from within
a directive implementation to return one single system message
at level `level`, which automatically gets the directive block
and the line number added.
Preferably use the `debug`, `info`, `warning`, `error`, or `severe`
wrapper methods, e.g. ``self.error(message)`` to generate an
ERROR-level directive error.
"""
return DirectiveError(level, message)
def debug(self, message):
return self.directive_error(0, message)
def info(self, message):
return self.directive_error(1, message)
def warning(self, message):
return self.directive_error(2, message)
def error(self, message):
return self.directive_error(3, message)
def severe(self, message):
return self.directive_error(4, message)
# Convenience methods:
def assert_has_content(self):
"""
Throw an ERROR-level DirectiveError if the directive doesn't
have contents.
"""
if not self.content:
raise self.error('Content block expected for the "%s" directive; '
'none found.' % self.name)
def add_name(self, node):
"""Append self.options['name'] to node['names'] if it exists.
Also normalize the name string and register it as explicit target.
"""
if 'name' in self.options:
name = nodes.fully_normalize_name(self.options.pop('name'))
if 'name' in node:
del(node['name'])
node['names'].append(name)
self.state.document.note_explicit_target(node, node)
def convert_directive_function(directive_fn):
"""
Define & return a directive class generated from `directive_fn`.
`directive_fn` uses the old-style, functional interface.
"""
class FunctionalDirective(Directive):
option_spec = getattr(directive_fn, 'options', None)
has_content = getattr(directive_fn, 'content', False)
_argument_spec = getattr(directive_fn, 'arguments', (0, 0, False))
required_arguments, optional_arguments, final_argument_whitespace \
= _argument_spec
def run(self):
return directive_fn(
self.name, self.arguments, self.options, self.content,
self.lineno, self.content_offset, self.block_text,
self.state, self.state_machine)
# Return new-style directive.
return FunctionalDirective
|
|
"""deCONZ service tests."""
from copy import deepcopy
from asynctest import Mock, patch
import pytest
import voluptuous as vol
from homeassistant.components import deconz
from .test_gateway import (
BRIDGEID,
ENTRY_CONFIG,
DECONZ_WEB_REQUEST,
setup_deconz_integration,
)
GROUP = {
"1": {
"id": "Group 1 id",
"name": "Group 1 name",
"type": "LightGroup",
"state": {},
"action": {},
"scenes": [{"id": "1", "name": "Scene 1"}],
"lights": ["1"],
}
}
LIGHT = {
"1": {
"id": "Light 1 id",
"name": "Light 1 name",
"state": {"reachable": True},
"type": "Light",
"uniqueid": "00:00:00:00:00:00:00:01-00",
}
}
SENSOR = {
"1": {
"id": "Sensor 1 id",
"name": "Sensor 1 name",
"type": "ZHALightLevel",
"state": {"lightlevel": 30000, "dark": False},
"config": {"reachable": True},
"uniqueid": "00:00:00:00:00:00:00:02-00",
}
}
async def test_service_setup(hass):
"""Verify service setup works."""
assert deconz.services.DECONZ_SERVICES not in hass.data
with patch(
"homeassistant.core.ServiceRegistry.async_register", return_value=Mock(True)
) as async_register:
await deconz.services.async_setup_services(hass)
assert hass.data[deconz.services.DECONZ_SERVICES] is True
assert async_register.call_count == 2
async def test_service_setup_already_registered(hass):
"""Make sure that services are only registered once."""
hass.data[deconz.services.DECONZ_SERVICES] = True
with patch(
"homeassistant.core.ServiceRegistry.async_register", return_value=Mock(True)
) as async_register:
await deconz.services.async_setup_services(hass)
async_register.assert_not_called()
async def test_service_unload(hass):
"""Verify service unload works."""
hass.data[deconz.services.DECONZ_SERVICES] = True
with patch(
"homeassistant.core.ServiceRegistry.async_remove", return_value=Mock(True)
) as async_remove:
await deconz.services.async_unload_services(hass)
assert hass.data[deconz.services.DECONZ_SERVICES] is False
assert async_remove.call_count == 2
async def test_service_unload_not_registered(hass):
"""Make sure that services can only be unloaded once."""
with patch(
"homeassistant.core.ServiceRegistry.async_remove", return_value=Mock(True)
) as async_remove:
await deconz.services.async_unload_services(hass)
assert deconz.services.DECONZ_SERVICES not in hass.data
async_remove.assert_not_called()
async def test_configure_service_with_field(hass):
"""Test that service invokes pydeconz with the correct path and data."""
data = deepcopy(DECONZ_WEB_REQUEST)
await setup_deconz_integration(
hass, ENTRY_CONFIG, options={}, get_state_response=data
)
data = {
deconz.services.SERVICE_FIELD: "/light/2",
deconz.CONF_BRIDGEID: BRIDGEID,
deconz.services.SERVICE_DATA: {"on": True, "attr1": 10, "attr2": 20},
}
with patch(
"pydeconz.DeconzSession.async_put_state", return_value=Mock(True)
) as put_state:
await hass.services.async_call(
deconz.DOMAIN, deconz.services.SERVICE_CONFIGURE_DEVICE, service_data=data
)
await hass.async_block_till_done()
put_state.assert_called_with("/light/2", {"on": True, "attr1": 10, "attr2": 20})
async def test_configure_service_with_entity(hass):
"""Test that service invokes pydeconz with the correct path and data."""
data = deepcopy(DECONZ_WEB_REQUEST)
gateway = await setup_deconz_integration(
hass, ENTRY_CONFIG, options={}, get_state_response=data
)
gateway.deconz_ids["light.test"] = "/light/1"
data = {
deconz.services.SERVICE_ENTITY: "light.test",
deconz.services.SERVICE_DATA: {"on": True, "attr1": 10, "attr2": 20},
}
with patch(
"pydeconz.DeconzSession.async_put_state", return_value=Mock(True)
) as put_state:
await hass.services.async_call(
deconz.DOMAIN, deconz.services.SERVICE_CONFIGURE_DEVICE, service_data=data
)
await hass.async_block_till_done()
put_state.assert_called_with("/light/1", {"on": True, "attr1": 10, "attr2": 20})
async def test_configure_service_with_entity_and_field(hass):
"""Test that service invokes pydeconz with the correct path and data."""
data = deepcopy(DECONZ_WEB_REQUEST)
gateway = await setup_deconz_integration(
hass, ENTRY_CONFIG, options={}, get_state_response=data
)
gateway.deconz_ids["light.test"] = "/light/1"
data = {
deconz.services.SERVICE_ENTITY: "light.test",
deconz.services.SERVICE_FIELD: "/state",
deconz.services.SERVICE_DATA: {"on": True, "attr1": 10, "attr2": 20},
}
with patch(
"pydeconz.DeconzSession.async_put_state", return_value=Mock(True)
) as put_state:
await hass.services.async_call(
deconz.DOMAIN, deconz.services.SERVICE_CONFIGURE_DEVICE, service_data=data
)
await hass.async_block_till_done()
put_state.assert_called_with(
"/light/1/state", {"on": True, "attr1": 10, "attr2": 20}
)
async def test_configure_service_with_faulty_field(hass):
"""Test that service invokes pydeconz with the correct path and data."""
data = deepcopy(DECONZ_WEB_REQUEST)
await setup_deconz_integration(
hass, ENTRY_CONFIG, options={}, get_state_response=data
)
data = {deconz.services.SERVICE_FIELD: "light/2", deconz.services.SERVICE_DATA: {}}
with pytest.raises(vol.Invalid):
await hass.services.async_call(
deconz.DOMAIN, deconz.services.SERVICE_CONFIGURE_DEVICE, service_data=data
)
await hass.async_block_till_done()
async def test_configure_service_with_faulty_entity(hass):
"""Test that service invokes pydeconz with the correct path and data."""
data = deepcopy(DECONZ_WEB_REQUEST)
await setup_deconz_integration(
hass, ENTRY_CONFIG, options={}, get_state_response=data
)
data = {
deconz.services.SERVICE_ENTITY: "light.nonexisting",
deconz.services.SERVICE_DATA: {},
}
with patch(
"pydeconz.DeconzSession.async_put_state", return_value=Mock(True)
) as put_state:
await hass.services.async_call(
deconz.DOMAIN, deconz.services.SERVICE_CONFIGURE_DEVICE, service_data=data
)
await hass.async_block_till_done()
put_state.assert_not_called()
async def test_service_refresh_devices(hass):
"""Test that service can refresh devices."""
data = deepcopy(DECONZ_WEB_REQUEST)
gateway = await setup_deconz_integration(
hass, ENTRY_CONFIG, options={}, get_state_response=data
)
data = {deconz.CONF_BRIDGEID: BRIDGEID}
with patch(
"pydeconz.DeconzSession.async_get_state",
return_value={"groups": GROUP, "lights": LIGHT, "sensors": SENSOR},
):
await hass.services.async_call(
deconz.DOMAIN, deconz.services.SERVICE_DEVICE_REFRESH, service_data=data
)
await hass.async_block_till_done()
assert gateway.deconz_ids == {
"light.group_1_name": "/groups/1",
"light.light_1_name": "/lights/1",
"scene.group_1_name_scene_1": "/groups/1/scenes/1",
"sensor.sensor_1_name": "/sensors/1",
}
|
|
import asyncio
import inspect
import urllib.request
from operator import methodcaller
import snug
async def awaitable(obj):
"""an awaitable returning given object"""
await asyncio.sleep(0)
return obj
class MockAsyncClient:
def __init__(self, response):
self.response = response
async def send(self, req):
await asyncio.sleep(0)
self.request = req
return self.response
snug.send_async.register(MockAsyncClient, MockAsyncClient.send)
class MockClient(object):
def __init__(self, response):
self.response = response
def send(self, req):
self.request = req
return self.response
snug.send.register(MockClient, MockClient.send)
def test__execute__():
class StringClient:
def __init__(self, mappings):
self.mappings = mappings
def send(self, req):
return self.mappings[req]
snug.send.register(StringClient, StringClient.send)
client = StringClient(
{
"foo/posts/latest": "redirect:/posts/latest/",
"foo/posts/latest/": "redirect:/posts/december/",
"foo/posts/december/": b"hello world",
}
)
class MyQuery(object):
def __iter__(self):
redirect = yield "/posts/latest"
redirect = yield redirect.split(":")[1]
response = yield redirect.split(":")[1]
return response.decode("ascii")
assert (
snug.Query.__execute__(MyQuery(), client, lambda s: "foo" + s)
== "hello world"
)
def myquery():
return (yield snug.GET("my/url"))
class TestExecute:
def test_defaults(self, mocker):
send = mocker.patch("snug.query.send", autospec=True)
assert snug.execute(myquery()) == send.return_value
client, req = send.call_args[0]
assert isinstance(client, urllib.request.OpenerDirector)
assert req == snug.GET("my/url")
def test_custom_client(self):
client = MockClient(snug.Response(204))
result = snug.execute(myquery(), client=client)
assert result == snug.Response(204)
assert client.request == snug.GET("my/url")
def test_custom_execute(self):
client = MockClient(snug.Response(204))
class MyQuery(object):
def __execute__(self, client, auth):
return client.send(snug.GET("my/url"))
result = snug.execute(MyQuery(), client=client)
assert result == snug.Response(204)
assert client.request == snug.GET("my/url")
def test_auth(self):
client = MockClient(snug.Response(204))
result = snug.execute(myquery(), auth=("user", "pw"), client=client)
assert result == snug.Response(204)
assert client.request == snug.GET(
"my/url", headers={"Authorization": "Basic dXNlcjpwdw=="}
)
def test_none_auth(self):
client = MockClient(snug.Response(204))
result = snug.execute(myquery(), auth=None, client=client)
assert result == snug.Response(204)
assert client.request == snug.GET("my/url")
def test_auth_callable(self):
client = MockClient(snug.Response(204))
auther = methodcaller("with_headers", {"X-My-Auth": "letmein"})
result = snug.execute(myquery(), auth=auther, client=client)
assert result == snug.Response(204)
assert client.request == snug.GET(
"my/url", headers={"X-My-Auth": "letmein"}
)
class TestExecuteAsync:
def test_defaults(self, loop, mocker):
send = mocker.patch(
"snug.query.send_async", return_value=awaitable(snug.Response(204))
)
future = snug.execute_async(myquery())
result = loop.run_until_complete(future)
assert result == snug.Response(204)
client, req = send.call_args[0]
assert isinstance(client, asyncio.AbstractEventLoop)
assert req == snug.GET("my/url")
def test_custom_client(self, loop):
client = MockAsyncClient(snug.Response(204))
future = snug.execute_async(myquery(), client=client)
result = loop.run_until_complete(future)
assert result == snug.Response(204)
assert client.request == snug.GET("my/url")
def test_custom_execute(self, loop):
client = MockAsyncClient(snug.Response(204))
class MyQuery:
def __execute_async__(self, client, auth):
return client.send(snug.GET("my/url"))
future = snug.execute_async(MyQuery(), client=client)
result = loop.run_until_complete(future)
assert result == snug.Response(204)
assert client.request == snug.GET("my/url")
def test_auth(self, loop):
client = MockAsyncClient(snug.Response(204))
future = snug.execute_async(
myquery(), auth=("user", "pw"), client=client
)
result = loop.run_until_complete(future)
assert result == snug.Response(204)
assert client.request == snug.GET(
"my/url", headers={"Authorization": "Basic dXNlcjpwdw=="}
)
def test_none_auth(self, loop):
client = MockAsyncClient(snug.Response(204))
future = snug.execute_async(myquery(), auth=None, client=client)
result = loop.run_until_complete(future)
assert result == snug.Response(204)
assert client.request == snug.GET("my/url")
def test_auth_callable(self, loop):
client = MockAsyncClient(snug.Response(204))
auther = methodcaller("with_headers", {"X-My-Auth": "letmein"})
future = snug.execute_async(myquery(), auth=auther, client=client)
result = loop.run_until_complete(future)
assert result == snug.Response(204)
assert client.request == snug.GET(
"my/url", headers={"X-My-Auth": "letmein"}
)
def test_executor():
executor = snug.executor(client="foo")
assert executor.keywords == {"client": "foo"}
def test_async_executor():
executor = snug.async_executor(client="foo")
assert executor.keywords == {"client": "foo"}
def test_relation():
class Foo:
@snug.related
class Bar(snug.Query):
def __iter__(self):
pass
def __init__(self, a, b):
self.a, self.b = a, b
class Qux(snug.Query):
def __iter__(self):
pass
def __init__(self, a, b):
self.a, self.b = a, b
f = Foo()
bar = f.Bar(b=4)
assert isinstance(bar, Foo.Bar)
assert bar.a is f
bar2 = Foo.Bar(f, 4)
assert isinstance(bar2, Foo.Bar)
assert bar.a is f
# staticmethod opts out
qux = f.Qux(1, 2)
assert isinstance(qux, f.Qux)
qux2 = Foo.Qux(1, 2)
assert isinstance(qux2, Foo.Qux)
def test_identity():
obj = object()
assert snug.query._identity(obj) is obj
def test__execute_async__(loop):
class StringClient:
def __init__(self, mappings):
self.mappings = mappings
def send(self, req):
return self.mappings[req]
snug.send_async.register(StringClient, StringClient.send)
client = StringClient(
{
"foo/posts/latest": awaitable("redirect:/posts/latest/"),
"foo/posts/latest/": awaitable("redirect:/posts/december/"),
"foo/posts/december/": awaitable(b"hello world"),
}
)
class MyQuery:
def __iter__(self):
redirect = yield "/posts/latest"
redirect = yield redirect.split(":")[1]
response = yield redirect.split(":")[1]
return response.decode("ascii")
future = snug.Query.__execute_async__(
MyQuery(), client, lambda s: "foo" + s
)
assert inspect.isawaitable(future)
result = loop.run_until_complete(future)
assert result == "hello world"
|
|
"""
Let's work out some basic parsing of some productions from the Python grammar.
Start from a py3 port of parts of Parson, adapted to work on tokens
from `tokenize`.
"""
import sys
import ast
import token as T
from tokenize import tokenize
# First an annoying necessary hack. Certain of the AST types (the
# 'simple' ones) do not carry source-position attributes: the
# constructors silently drop them. (If this is documented, I missed
# it. I suppose the reason is efficiency; but this position info needs
# to live *somewhere*, and the AST node is its natural home.) For all
# of these types let's define subclasses that do retain these
# attributes.
position_attributes = dict(_attributes = ('lineno', 'col_offset'))
def position_extend(class_):
return type(class_.__name__, (class_,), position_attributes)
def map_extend(names):
return [position_extend(getattr(ast, name)) for name in names.split()]
And, Or = map_extend('And Or')
Add, Sub, Mult, Div, Mod, Pow, LShift, RShift, BitOr, BitXor, BitAnd, FloorDiv = \
map_extend('Add Sub Mult Div Mod Pow LShift RShift BitOr BitXor BitAnd FloorDiv')
Invert, Not, UAdd, USub = \
map_extend('Invert Not UAdd USub')
Eq, NotEq, Lt, LtE, Gt, GtE, Is, IsNot, In, NotIn = \
map_extend('Eq NotEq Lt LtE Gt GtE Is IsNot In NotIn')
# OK, back to parsing.
if __name__ == '__main__':
# XXX temporary hack during development
import parson3 as P
else:
from . import parson3 as P
def main(argv):
filename = argv[1]
if 0:
with open(filename, 'rb') as f:
tokens = list(tokenize(f.readline))
print_tokens(tokens)
demo_parse(tokens)
else:
with open(filename, 'rb') as f:
t = parse(f)
import astpp
print(astpp.dump(t, include_attributes=True))
class Name(P._Pex):
def __init__(self):
self.face = 'XXX'
def run(self, s, far, state):
i, vals = state
token = s[i]
if token.type != T.NAME or token.string in keywords:
return []
vals += (token,)
return [(_step(far, i+1), vals)]
class Tok(P._Pex):
"Matches a single lexical token of a given kind."
def __init__(self, kind, literal_string=None, keep=True):
self.kind = kind
self.expected = literal_string
self.keep = keep
self.face = 'XXX'
def run(self, s, far, state):
i, vals = state
token = s[i]
if token.type != self.kind:
return []
if self.expected is not None and token.string != self.expected:
return []
if self.keep:
vals += (token,)
return [(_step(far, i+1), vals)]
def _step(far, i):
"Update far with a new position."
far[0] = max(far[0], i)
return i
"""
file_input: (NEWLINE | stmt)* ENDMARKER
stmt: simple_stmt | compound_stmt
simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE
small_stmt: expr_stmt
compound_stmt: if_stmt
if_stmt: 'if' test ':' suite ('elif' test ':' suite)* ['else' ':' suite]
suite: simple_stmt | NEWLINE INDENT stmt+ DEDENT
expr_stmt: testlist_expr ('=' testlist_expr)*
testlist_expr: test
test: arith_expr
arith_expr: term (('+'|'-') term)*
term: factor (('*'|'/'|'%'|'//') factor)*
factor: ('+'|'-'|'~') factor | power
power: atom trailer* ('**' factor)?
atom: '(' test ')' | NAME | NUMBER | STRING+ | 'None' | 'True' | 'False'
trailer: '(' [arglist] ')'
arglist: (argument ',')* argument [',']
argument: test ['=' test]
"""
NUMBER = Tok(T.NUMBER)
STRING = Tok(T.STRING)
NAME = Name()
OP = lambda s: Tok(T.OP, s)
Punct = lambda s: Tok(T.OP, s, keep=False)
keywords = set()
def Kwd(s, keep=False):
keywords.add(s)
return Tok(T.NAME, s, keep=keep)
def Subst(string, maker):
return OP(string) >> (lambda t: lambda ctx: maker(lineno=t.start[0], col_offset=t.start[1]))
def wrapping(maker, wrapper):
return lambda t: lambda ctx: maker(wrapper(t.string),
lineno=t.start[0],
col_offset=t.start[1])
def propagating(maker):
result = lambda node_fn, *node_fns: lambda ctx: next(ast.copy_location(maker(node, *[n(ctx) for n in node_fns]), node)
for node in [node_fn(ctx)])
result.__name__ = maker.__name__
return result
def hug(*args):
return lambda ctx: [arg(ctx) for arg in args]
def make_module(*stmts):
m = ast.Module(list(stmts))
return ast.copy_location(m, stmts[0]) if stmts else m
def make_if(kwd, test, then, *rest):
# (This'd be simpler with a different form of the grammar.)
test = test(ast.Load())
if not rest: else_ = []
elif len(rest) == 1: else_ = rest[0]
else: else_ = [make_if(*rest)]
return ast.If(test, then, else_,
lineno=kwd.start[0],
col_offset=kwd.start[1])
def maybe_assignment(*expr_fns):
if len(expr_fns) == 1:
node0 = expr_fns[0](ast.Load())
stmt = ast.Expr(node0)
else:
lhses = [fn(ast.Store()) for fn in expr_fns[:-1]]
node0 = lhses[0]
stmt = ast.Assign(lhses, expr_fns[-1](ast.Load()))
return ast.copy_location(stmt, node0)
def fill_context(ctx):
return lambda f: f(ctx)
atom = P.delay(lambda:
Punct('(') + test + Punct(')')
| NUMBER >> wrapping(ast.Num, ast.literal_eval)
| STRING.plus() >> (lambda *tokens: lambda ctx: ast.Str(ast.literal_eval(' '.join(t.string for t in tokens)),
lineno=tokens[0].start[0],
col_offset=tokens[0].start[1]))
| Tok(T.NAME, 'None') >> wrapping(ast.NameConstant, lambda s: None)
| Tok(T.NAME, 'True') >> wrapping(ast.NameConstant, lambda s: True)
| Tok(T.NAME, 'False') >> wrapping(ast.NameConstant, lambda s: False)
| NAME >> (lambda t: lambda ctx: ast.Name(t.string, ctx,
lineno=t.start[0],
col_offset=t.start[1]))
)
arglist = P.delay(lambda:
(test + Punct(',')).star() + test + Punct(',').maybe())
trailer = (Punct('(') + (arglist.maybe() >> hug) + Punct(')')
+ propagating(lambda f, args: ast.Call(f, args, [], None, None)))
power = P.delay(lambda:
P.seclude(
atom + trailer.star() + (Subst('**', Pow) + factor + propagating(ast.BinOp)).maybe()))
factor = P.delay(lambda:
( (( Subst('+', UAdd)
| Subst('-', USub)
| Subst('~', Invert)) + factor) >> propagating(ast.UnaryOp))
| power)
term = P.seclude(
factor + (( Subst('*', Mult)
| Subst('/', Div)
| Subst('%', Mod)
| Subst('//', FloorDiv)) + factor + propagating(ast.BinOp)).star())
arith_expr = P.seclude(
term + (( Subst('+', Add)
| Subst('-', Sub)) + term + propagating(ast.BinOp)).star())
test = arith_expr
expr_stmt = P.seclude(
test + (Punct('=') + test).star()
+ maybe_assignment)
simple_stmt = expr_stmt + Tok(T.NEWLINE, keep=False)
stmt = P.delay(lambda: simple_stmt | compound_stmt)
suite = (
simple_stmt
| (Tok(T.NEWLINE, keep=False) + Tok(T.INDENT, keep=False) + stmt.plus() + Tok(T.DEDENT, keep=False))
) >> (lambda *stmts: list(stmts))
if_stmt = P.seclude(
Kwd('if', keep=True) + test + Punct(':') + suite
+ (Kwd('elif', keep=True) + test + Punct(':') + suite).star()
+ (Kwd('else') + Punct(':') + suite).maybe()
+ make_if
)
compound_stmt = if_stmt
file_input = (Tok(56, keep=False) # 'ENCODING' token -- yeah, no name for it
+ (Tok(T.NEWLINE, keep=False) | stmt).star()
+ Tok(T.ENDMARKER, keep=False)) >> make_module
top = file_input
def parse(f):
tokens = list(tokenize(f.readline))
# print_tokens(tokens)
far = [0]
for i, vals in top.run(tokens, far, (0, ())):
if 1:
assert i == len(tokens), "not full parse: %d of %r" % (i, tokens)
assert len(vals) == 1
return vals[0]
def demo_parse(tokens):
far = [0]
for i, vals in top.run(tokens, far, (0, ())):
print(i, tokens[i:])
print('vals', vals)
try:
import astpp
except ImportError:
continue
for tree in vals:
print(tree)
print(astpp.dump(tree, include_attributes=True))
print('far', far[0])
def print_tokens(tokens):
for t in tokens:
# print_token(t)
skim_token(t)
def skim_token(t):
print(T.tok_name[t.type], T.tok_name[t.exact_type], t.string)
return
if T.tok_name[t.type] == T.tok_name[t.exact_type]:
print(T.tok_name[t.type], t.string)
else:
print(T.tok_name[t.type], T.tok_name[t.exact_type], t.string)
def print_token(t):
# print(t.count)
# print(t.index)
# print()
print('line', t.line)
print('start', t.start)
print('end', t.end)
print('string', t.string)
print('type', t.type, T.tok_name[t.type])
print('exact_type', t.exact_type, T.tok_name[t.exact_type])
print()
if __name__ == '__main__':
main(sys.argv)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for computing statistics of samples."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import spectral_ops
from tensorflow.python.ops.distributions import util
__all__ = [
"auto_correlation",
"percentile",
]
# TODO(langmore) Write separate versions of this for real/complex dtype, taking
# advantage of optimized real-fft ops.
def auto_correlation(
x,
axis=-1,
max_lags=None,
center=True,
normalize=True,
name="auto_correlation"):
"""Auto correlation along one axis.
Given a `1-D` wide sense stationary (WSS) sequence `X`, the auto correlation
`RXX` may be defined as (with `E` expectation and `Conj` complex conjugate)
```
RXX[m] := E{ W[m] Conj(W[0]) } = E{ W[0] Conj(W[-m]) },
W[n] := (X[n] - MU) / S,
MU := E{ X[0] },
S**2 := E{ (X[0] - MU) Conj(X[0] - MU) }.
```
This function takes the viewpoint that `x` is (along one axis) a finite
sub-sequence of a realization of (WSS) `X`, and then uses `x` to produce an
estimate of `RXX[m]` as follows:
After extending `x` from length `L` to `inf` by zero padding, the auto
correlation estimate `rxx[m]` is computed for `m = 0, 1, ..., max_lags` as
```
rxx[m] := (L - m)**-1 sum_n w[n + m] Conj(w[n]),
w[n] := (x[n] - mu) / s,
mu := L**-1 sum_n x[n],
s**2 := L**-1 sum_n (x[n] - mu) Conj(x[n] - mu)
```
The error in this estimate is proportional to `1 / sqrt(len(x) - m)`, so users
often set `max_lags` small enough so that the entire output is meaningful.
Note that since `mu` is an imperfect estimate of `E{ X[0] }`, and we divide by
`len(x) - m` rather than `len(x) - m - 1`, our estimate of auto correlation
contains a slight bias, which goes to zero as `len(x) - m --> infinity`.
Args:
x: `float32` or `complex64` `Tensor`.
axis: Python `int`. The axis number along which to compute correlation.
Other dimensions index different batch members.
max_lags: Positive `int` tensor. The maximum value of `m` to consider
(in equation above). If `max_lags >= x.shape[axis]`, we effectively
re-set `max_lags` to `x.shape[axis] - 1`.
center: Python `bool`. If `False`, do not subtract the mean estimate `mu`
from `x[n]` when forming `w[n]`.
normalize: Python `bool`. If `False`, do not divide by the variance
estimate `s**2` when forming `w[n]`.
name: `String` name to prepend to created ops.
Returns:
`rxx`: `Tensor` of same `dtype` as `x`. `rxx.shape[i] = x.shape[i]` for
`i != axis`, and `rxx.shape[axis] = max_lags + 1`.
Raises:
TypeError: If `x` is not a supported type.
"""
# Implementation details:
# Extend length N / 2 1-D array x to length N by zero padding onto the end.
# Then, set
# F[x]_k := sum_n x_n exp{-i 2 pi k n / N }.
# It is not hard to see that
# F[x]_k Conj(F[x]_k) = F[R]_k, where
# R_m := sum_n x_n Conj(x_{(n - m) mod N}).
# One can also check that R_m / (N / 2 - m) is an unbiased estimate of RXX[m].
# Since F[x] is the DFT of x, this leads us to a zero-padding and FFT/IFFT
# based version of estimating RXX.
# Note that this is a special case of the Wiener-Khinchin Theorem.
with ops.name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
# Rotate dimensions of x in order to put axis at the rightmost dim.
# FFT op requires this.
rank = util.prefer_static_rank(x)
if axis < 0:
axis = rank + axis
shift = rank - 1 - axis
# Suppose x.shape[axis] = T, so there are T "time" steps.
# ==> x_rotated.shape = B + [T],
# where B is x_rotated's batch shape.
x_rotated = util.rotate_transpose(x, shift)
if center:
x_rotated -= math_ops.reduce_mean(x_rotated, axis=-1, keepdims=True)
# x_len = N / 2 from above explanation. The length of x along axis.
# Get a value for x_len that works in all cases.
x_len = util.prefer_static_shape(x_rotated)[-1]
# TODO(langmore) Investigate whether this zero padding helps or hurts. At
# the moment is is necessary so that all FFT implementations work.
# Zero pad to the next power of 2 greater than 2 * x_len, which equals
# 2**(ceil(Log_2(2 * x_len))). Note: Log_2(X) = Log_e(X) / Log_e(2).
x_len_float64 = math_ops.cast(x_len, np.float64)
target_length = math_ops.pow(
np.float64(2.),
math_ops.ceil(math_ops.log(x_len_float64 * 2) / np.log(2.)))
pad_length = math_ops.cast(target_length - x_len_float64, np.int32)
# We should have:
# x_rotated_pad.shape = x_rotated.shape[:-1] + [T + pad_length]
# = B + [T + pad_length]
x_rotated_pad = util.pad(x_rotated, axis=-1, back=True, count=pad_length)
dtype = x.dtype
if not dtype.is_complex:
if not dtype.is_floating:
raise TypeError("Argument x must have either float or complex dtype"
" found: {}".format(dtype))
x_rotated_pad = math_ops.complex(x_rotated_pad,
dtype.real_dtype.as_numpy_dtype(0.))
# Autocorrelation is IFFT of power-spectral density (up to some scaling).
fft_x_rotated_pad = spectral_ops.fft(x_rotated_pad)
spectral_density = fft_x_rotated_pad * math_ops.conj(fft_x_rotated_pad)
# shifted_product is R[m] from above detailed explanation.
# It is the inner product sum_n X[n] * Conj(X[n - m]).
shifted_product = spectral_ops.ifft(spectral_density)
# Cast back to real-valued if x was real to begin with.
shifted_product = math_ops.cast(shifted_product, dtype)
# Figure out if we can deduce the final static shape, and set max_lags.
# Use x_rotated as a reference, because it has the time dimension in the far
# right, and was created before we performed all sorts of crazy shape
# manipulations.
know_static_shape = True
if not x_rotated.shape.is_fully_defined():
know_static_shape = False
if max_lags is None:
max_lags = x_len - 1
else:
max_lags = ops.convert_to_tensor(max_lags, name="max_lags")
max_lags_ = tensor_util.constant_value(max_lags)
if max_lags_ is None or not know_static_shape:
know_static_shape = False
max_lags = math_ops.minimum(x_len - 1, max_lags)
else:
max_lags = min(x_len - 1, max_lags_)
# Chop off the padding.
# We allow users to provide a huge max_lags, but cut it off here.
# shifted_product_chopped.shape = x_rotated.shape[:-1] + [max_lags]
shifted_product_chopped = shifted_product[..., :max_lags + 1]
# If possible, set shape.
if know_static_shape:
chopped_shape = x_rotated.shape.as_list()
chopped_shape[-1] = min(x_len, max_lags + 1)
shifted_product_chopped.set_shape(chopped_shape)
# Recall R[m] is a sum of N / 2 - m nonzero terms x[n] Conj(x[n - m]). The
# other terms were zeros arising only due to zero padding.
# `denominator = (N / 2 - m)` (defined below) is the proper term to
# divide by by to make this an unbiased estimate of the expectation
# E[X[n] Conj(X[n - m])].
x_len = math_ops.cast(x_len, dtype.real_dtype)
max_lags = math_ops.cast(max_lags, dtype.real_dtype)
denominator = x_len - math_ops.range(0., max_lags + 1.)
denominator = math_ops.cast(denominator, dtype)
shifted_product_rotated = shifted_product_chopped / denominator
if normalize:
shifted_product_rotated /= shifted_product_rotated[..., :1]
# Transpose dimensions back to those of x.
return util.rotate_transpose(shifted_product_rotated, -shift)
# TODO(langmore) To make equivalent to numpy.percentile:
# Make work with a sequence of floats or single float for 'q'.
# Make work with "linear", "midpoint" interpolation. (linear should be default)
def percentile(x,
q,
axis=None,
interpolation=None,
keep_dims=False,
validate_args=False,
name=None):
"""Compute the `q`-th percentile of `x`.
Given a vector `x`, the `q`-th percentile of `x` is the value `q / 100` of the
way from the minimum to the maximum in a sorted copy of `x`.
The values and distances of the two nearest neighbors as well as the
`interpolation` parameter will determine the percentile if the normalized
ranking does not match the location of `q` exactly.
This function is the same as the median if `q = 50`, the same as the minimum
if `q = 0` and the same as the maximum if `q = 100`.
```python
# Get 30th percentile with default ('nearest') interpolation.
x = [1., 2., 3., 4.]
percentile(x, q=30.)
==> 2.0
# Get 30th percentile with 'lower' interpolation
x = [1., 2., 3., 4.]
percentile(x, q=30., interpolation='lower')
==> 1.0
# Get 100th percentile (maximum). By default, this is computed over every dim
x = [[1., 2.]
[3., 4.]]
percentile(x, q=100.)
==> 4.0
# Treat the leading dim as indexing samples, and find the 100th quantile (max)
# over all such samples.
x = [[1., 2.]
[3., 4.]]
percentile(x, q=100., axis=[0])
==> [3., 4.]
```
Compare to `numpy.percentile`.
Args:
x: Floating point `N-D` `Tensor` with `N > 0`. If `axis` is not `None`,
`x` must have statically known number of dimensions.
q: Scalar `Tensor` in `[0, 100]`. The percentile.
axis: Optional `0-D` or `1-D` integer `Tensor` with constant values.
The axis that hold independent samples over which to return the desired
percentile. If `None` (the default), treat every dimension as a sample
dimension, returning a scalar.
interpolation : {"lower", "higher", "nearest"}. Default: "nearest"
This optional parameter specifies the interpolation method to
use when the desired quantile lies between two data points `i < j`:
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j`, whichever is nearest.
keep_dims: Python `bool`. If `True`, the last dimension is kept with size 1
If `False`, the last dimension is removed from the output shape.
validate_args: Whether to add runtime checks of argument validity.
If False, and arguments are incorrect, correct behavior is not guaranteed.
name: A Python string name to give this `Op`. Default is "percentile"
Returns:
A `(N - len(axis))` dimensional `Tensor` of same dtype as `x`, or, if
`axis` is `None`, a scalar.
Raises:
ValueError: If argument 'interpolation' is not an allowed type.
"""
name = name or "percentile"
allowed_interpolations = {"lower", "higher", "nearest"}
if interpolation is None:
interpolation = "nearest"
else:
if interpolation not in allowed_interpolations:
raise ValueError("Argument 'interpolation' must be in %s. Found %s" %
(allowed_interpolations, interpolation))
with ops.name_scope(name, [x, q]):
x = ops.convert_to_tensor(x, name="x")
# Double is needed here and below, else we get the wrong index if the array
# is huge along axis.
q = math_ops.to_double(q, name="q")
_get_static_ndims(q, expect_ndims=0)
if validate_args:
q = control_flow_ops.with_dependencies([
check_ops.assert_rank(q, 0),
check_ops.assert_greater_equal(q, math_ops.to_double(0.)),
check_ops.assert_less_equal(q, math_ops.to_double(100.))
], q)
if axis is None:
y = array_ops.reshape(x, [-1])
else:
axis = ops.convert_to_tensor(axis, name="axis")
check_ops.assert_integer(axis)
axis_ndims = _get_static_ndims(
axis, expect_static=True, expect_ndims_no_more_than=1)
axis_const = tensor_util.constant_value(axis)
if axis_const is None:
raise ValueError(
"Expected argument 'axis' to be statically available. Found: %s" %
axis)
axis = axis_const
if axis_ndims == 0:
axis = [axis]
axis = [int(a) for a in axis]
x_ndims = _get_static_ndims(
x, expect_static=True, expect_ndims_at_least=1)
axis = _make_static_axis_non_negative(axis, x_ndims)
y = _move_dims_to_flat_end(x, axis, x_ndims)
frac_at_q_or_above = 1. - q / 100.
d = math_ops.to_double(array_ops.shape(y)[-1])
if interpolation == "lower":
index = math_ops.ceil((d - 1) * frac_at_q_or_above)
elif interpolation == "higher":
index = math_ops.floor((d - 1) * frac_at_q_or_above)
elif interpolation == "nearest":
index = math_ops.round((d - 1) * frac_at_q_or_above)
# If d is gigantic, then we would have d == d - 1, even in double... So
# let's use max/min to avoid out of bounds errors.
d = array_ops.shape(y)[-1]
# d - 1 will be distinct from d in int32.
index = clip_ops.clip_by_value(math_ops.to_int32(index), 0, d - 1)
# Sort everything, not just the top 'k' entries, which allows multiple calls
# to sort only once (under the hood) and use CSE.
sorted_y = _sort_tensor(y)
# result.shape = B
result = sorted_y[..., index]
result.set_shape(y.get_shape()[:-1])
if keep_dims:
if axis is None:
# ones_vec = [1, 1,..., 1], total length = len(S) + len(B).
ones_vec = array_ops.ones(
shape=[_get_best_effort_ndims(x)], dtype=dtypes.int32)
result *= array_ops.ones(ones_vec, dtype=x.dtype)
else:
result = _insert_back_keep_dims(result, axis)
return result
def _get_static_ndims(x,
expect_static=False,
expect_ndims=None,
expect_ndims_no_more_than=None,
expect_ndims_at_least=None):
"""Get static number of dimensions and assert that some expectations are met.
This function returns the number of dimensions "ndims" of x, as a Python int.
The optional expect arguments are used to check the ndims of x, but this is
only done if the static ndims of x is not None.
Args:
x: A Tensor.
expect_static: Expect `x` to have statically defined `ndims`.
expect_ndims: Optional Python integer. If provided, assert that x has
number of dimensions equal to this.
expect_ndims_no_more_than: Optional Python integer. If provided, assert
that x has no more than this many dimensions.
expect_ndims_at_least: Optional Python integer. If provided, assert that
x has at least this many dimensions.
Returns:
ndims: A Python integer.
Raises:
ValueError: If any of the expectations above are violated.
"""
ndims = x.get_shape().ndims
if ndims is None:
shape_const = tensor_util.constant_value(array_ops.shape(x))
if shape_const is not None:
ndims = shape_const.ndim
if ndims is None:
if expect_static:
raise ValueError(
"Expected argument 'x' to have statically defined 'ndims'. Found: " %
x)
return
if expect_ndims is not None:
ndims_message = ("Expected argument 'x' to have ndims %s. Found tensor %s"
% (expect_ndims, x))
if ndims != expect_ndims:
raise ValueError(ndims_message)
if expect_ndims_at_least is not None:
ndims_at_least_message = (
"Expected argument 'x' to have ndims >= %d. Found tensor %s" % (
expect_ndims_at_least, x))
if ndims < expect_ndims_at_least:
raise ValueError(ndims_at_least_message)
if expect_ndims_no_more_than is not None:
ndims_no_more_than_message = (
"Expected argument 'x' to have ndims <= %d. Found tensor %s" % (
expect_ndims_no_more_than, x))
if ndims > expect_ndims_no_more_than:
raise ValueError(ndims_no_more_than_message)
return ndims
def _get_best_effort_ndims(x,
expect_ndims=None,
expect_ndims_at_least=None,
expect_ndims_no_more_than=None):
"""Get static ndims if possible. Fallback on `tf.rank(x)`."""
ndims_static = _get_static_ndims(
x,
expect_ndims=expect_ndims,
expect_ndims_at_least=expect_ndims_at_least,
expect_ndims_no_more_than=expect_ndims_no_more_than)
if ndims_static is not None:
return ndims_static
return array_ops.rank(x)
def _insert_back_keep_dims(x, axis):
"""Insert the dims in `axis` back as singletons after being removed.
Args:
x: `Tensor`.
axis: Python list of integers.
Returns:
`Tensor` with same values as `x`, but additional singleton dimensions.
"""
for i in sorted(axis):
x = array_ops.expand_dims(x, axis=i)
return x
def _make_static_axis_non_negative(axis, ndims):
"""Convert possibly negatively indexed axis to non-negative.
Args:
axis: Iterable over Python integers.
ndims: Number of dimensions into which axis indexes.
Returns:
A list of non-negative Python integers.
Raises:
ValueError: If values in `axis` are too big/small to index into `ndims`.
"""
non_negative_axis = []
for d in axis:
if d >= 0:
if d >= ndims:
raise ValueError("dim %d not in the interval [0, %d]." % (d, ndims - 1))
non_negative_axis.append(d)
else:
if d < -1 * ndims:
raise ValueError(
"Negatively indexed dim %d not in the interval [-%d, -1]" % (d,
ndims))
non_negative_axis.append(ndims + d)
return non_negative_axis
def _move_dims_to_flat_end(x, axis, x_ndims):
"""Move dims corresponding to `axis` in `x` to the end, then flatten.
Args:
x: `Tensor` with shape `[B0,B1,...,Bb]`.
axis: Python list of indices into dimensions of `x`.
x_ndims: Python integer holding number of dimensions in `x`.
Returns:
`Tensor` with value from `x` and dims in `axis` moved to end into one single
dimension.
"""
# Suppose x.shape = [a, b, c, d]
# Suppose axis = [1, 3]
# front_dims = [0, 2] in example above.
front_dims = sorted(set(range(x_ndims)).difference(axis))
# x_permed.shape = [a, c, b, d]
x_permed = array_ops.transpose(x, perm=front_dims + list(axis))
if x.get_shape().is_fully_defined():
x_shape = x.get_shape().as_list()
# front_shape = [a, c], end_shape = [b * d]
front_shape = [x_shape[i] for i in front_dims]
end_shape = [np.prod([x_shape[i] for i in axis])]
full_shape = front_shape + end_shape
else:
front_shape = array_ops.shape(x_permed)[:x_ndims - len(axis)]
end_shape = [-1]
full_shape = array_ops.concat([front_shape, end_shape], axis=0)
return array_ops.reshape(x_permed, shape=full_shape)
def _sort_tensor(tensor):
"""Use `top_k` to sort a `Tensor` along the last dimension."""
sorted_, _ = nn_ops.top_k(tensor, k=array_ops.shape(tensor)[-1])
return sorted_
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
import six
from senlin.common import consts
from senlin.common import exception
from senlin.db.sqlalchemy import api as db_api
from senlin.engine import parser
from senlin.tests.unit.common import base
from senlin.tests.unit.common import utils
from senlin.tests.unit.db import shared
def _create_action(context, action=shared.sample_action, **kwargs):
data = parser.simple_parse(action)
data['user'] = context.user
data['project'] = context.project
data['domain'] = context.domain
data.update(kwargs)
return db_api.action_create(context, data)
class DBAPIActionTest(base.SenlinTestCase):
def setUp(self):
super(DBAPIActionTest, self).setUp()
self.ctx = utils.dummy_context()
def test_action_create(self):
data = parser.simple_parse(shared.sample_action)
action = _create_action(self.ctx)
self.assertIsNotNone(action)
self.assertEqual(data['name'], action.name)
self.assertEqual(data['target'], action.target)
self.assertEqual(data['action'], action.action)
self.assertEqual(data['cause'], action.cause)
self.assertEqual(data['timeout'], action.timeout)
self.assertEqual(data['status'], action.status)
self.assertEqual(data['status_reason'], action.status_reason)
self.assertEqual(10, action.inputs['max_size'])
self.assertEqual(self.ctx.user, action.user)
self.assertEqual(self.ctx.project, action.project)
self.assertEqual(self.ctx.domain, action.domain)
self.assertIsNone(action.outputs)
def test_action_update(self):
action = _create_action(self.ctx)
values = {
'status': 'ERROR',
'status_reason': 'Cluster creation failed',
'data': {'key1': 'value1', 'key2': 'value2'}
}
db_api.action_update(self.ctx, action.id, values)
action = db_api.action_get(self.ctx, action.id)
self.assertEqual('ERROR', action.status)
self.assertEqual('Cluster creation failed', action.status_reason)
self.assertEqual({'key1': 'value1', 'key2': 'value2'}, action.data)
self.assertRaises(exception.ActionNotFound,
db_api.action_update, self.ctx, 'fake-uuid', values)
def test_action_get(self):
data = parser.simple_parse(shared.sample_action)
action = _create_action(self.ctx)
retobj = db_api.action_get(self.ctx, action.id)
self.assertIsNotNone(retobj)
self.assertEqual(data['name'], retobj.name)
self.assertEqual(data['target'], retobj.target)
self.assertEqual(data['action'], retobj.action)
self.assertEqual(data['cause'], retobj.cause)
self.assertEqual(data['timeout'], retobj.timeout)
self.assertEqual(data['status'], retobj.status)
self.assertEqual(data['status_reason'], retobj.status_reason)
self.assertEqual(10, retobj.inputs['max_size'])
self.assertIsNone(retobj.outputs)
def test_action_get_project_safe(self):
parser.simple_parse(shared.sample_action)
action = _create_action(self.ctx)
new_ctx = utils.dummy_context(project='another-project')
retobj = db_api.action_get(new_ctx, action.id, project_safe=True)
self.assertIsNone(retobj)
retobj = db_api.action_get(new_ctx, action.id, project_safe=False)
self.assertIsNotNone(retobj)
def test_action_get_with_admin_context(self):
parser.simple_parse(shared.sample_action)
action = _create_action(self.ctx)
new_ctx = utils.dummy_context(project='another-project', is_admin=True)
retobj = db_api.action_get(new_ctx, action.id, project_safe=True)
self.assertIsNotNone(retobj)
def test_action_acquire_1st_ready(self):
specs = [
{'name': 'A01', 'status': 'INIT'},
{'name': 'A02', 'status': 'READY', 'owner': 'worker1'},
{'name': 'A03', 'status': 'INIT'},
{'name': 'A04', 'status': 'READY'}
]
for spec in specs:
_create_action(self.ctx, **spec)
worker = 'worker2'
timestamp = time.time()
action = db_api.action_acquire_1st_ready(self.ctx, worker, timestamp)
self.assertEqual('A04', action.name)
self.assertEqual('worker2', action.owner)
self.assertEqual(consts.ACTION_RUNNING, action.status)
self.assertEqual(timestamp, action.start_time)
def test_action_get_all_by_owner(self):
specs = [
{'name': 'A01', 'owner': 'work1'},
{'name': 'A02', 'owner': 'work2'},
{'name': 'A03', 'owner': 'work1'},
{'name': 'A04', 'owner': 'work3'}
]
for spec in specs:
_create_action(self.ctx, **spec)
actions = db_api.action_get_all_by_owner(self.ctx, 'work1')
self.assertEqual(2, len(actions))
names = [p.name for p in actions]
for spec in ['A01', 'A03']:
self.assertIn(spec, names)
def test_action_get_all(self):
specs = [
{'name': 'A01', 'target': 'cluster_001'},
{'name': 'A02', 'target': 'node_001'},
]
for spec in specs:
_create_action(self.ctx, **spec)
actions = db_api.action_get_all(self.ctx)
self.assertEqual(2, len(actions))
names = [p.name for p in actions]
for spec in specs:
self.assertIn(spec['name'], names)
def test_action_check_status(self):
specs = [
{'name': 'A01', 'target': 'cluster_001'},
{'name': 'A02', 'target': 'node_001'},
]
id_of = {}
for spec in specs:
action = _create_action(self.ctx, **spec)
id_of[spec['name']] = action.id
db_api.dependency_add(self.ctx, id_of['A02'], id_of['A01'])
action1 = db_api.action_get(self.ctx, id_of['A01'])
self.assertEqual(consts.ACTION_WAITING, action1.status)
timestamp = time.time()
status = db_api.action_check_status(self.ctx, id_of['A01'], timestamp)
self.assertEqual(consts.ACTION_WAITING, status)
status = db_api.action_check_status(self.ctx, id_of['A01'], timestamp)
self.assertEqual(consts.ACTION_WAITING, status)
timestamp = time.time()
db_api.action_mark_succeeded(self.ctx, id_of['A02'], timestamp)
status = db_api.action_check_status(self.ctx, id_of['A01'], timestamp)
self.assertEqual(consts.ACTION_READY, status)
action1 = db_api.action_get(self.ctx, id_of['A01'])
self.assertEqual('All depended actions completed.',
action1.status_reason)
self.assertEqual(timestamp, action1.end_time)
def _check_dependency_add_dependent_list(self):
specs = [
{'name': 'A01', 'target': 'cluster_001'},
{'name': 'A02', 'target': 'node_001'},
{'name': 'A03', 'target': 'node_002'},
{'name': 'A04', 'target': 'node_003'},
]
id_of = {}
for spec in specs:
action = _create_action(self.ctx, **spec)
id_of[spec['name']] = action.id
db_api.dependency_add(self.ctx,
id_of['A01'],
[id_of['A02'], id_of['A03'], id_of['A04']])
res = db_api.dependency_get_dependents(self.ctx, id_of['A01'])
self.assertEqual(3, len(res))
self.assertIn(id_of['A02'], res)
self.assertIn(id_of['A03'], res)
self.assertIn(id_of['A04'], res)
res = db_api.dependency_get_depended(self.ctx, id_of['A01'])
self.assertEqual(0, len(res))
for aid in [id_of['A02'], id_of['A03'], id_of['A04']]:
res = db_api.dependency_get_depended(self.ctx, aid)
self.assertEqual(1, len(res))
self.assertIn(id_of['A01'], res)
res = db_api.dependency_get_dependents(self.ctx, aid)
self.assertEqual(0, len(res))
action = db_api.action_get(self.ctx, aid)
self.assertEqual(action.status, consts.ACTION_WAITING)
return id_of
def _check_dependency_add_depended_list(self):
specs = [
{'name': 'A01', 'target': 'cluster_001'},
{'name': 'A02', 'target': 'node_001'},
{'name': 'A03', 'target': 'node_002'},
{'name': 'A04', 'target': 'node_003'},
]
id_of = {}
for spec in specs:
action = _create_action(self.ctx, **spec)
id_of[spec['name']] = action.id
db_api.dependency_add(self.ctx,
[id_of['A02'], id_of['A03'], id_of['A04']],
id_of['A01'])
res = db_api.dependency_get_depended(self.ctx, id_of['A01'])
self.assertEqual(3, len(res))
self.assertIn(id_of['A02'], res)
self.assertIn(id_of['A03'], res)
self.assertIn(id_of['A04'], res)
res = db_api.dependency_get_dependents(self.ctx, id_of['A01'])
self.assertEqual(0, len(res))
action = db_api.action_get(self.ctx, id_of['A01'])
self.assertEqual(action.status, consts.ACTION_WAITING)
for aid in [id_of['A02'], id_of['A03'], id_of['A04']]:
res = db_api.dependency_get_dependents(self.ctx, aid)
self.assertEqual(1, len(res))
self.assertIn(id_of['A01'], res)
res = db_api.dependency_get_depended(self.ctx, aid)
self.assertEqual(0, len(res))
return id_of
def test_dependency_add_depended_list(self):
self._check_dependency_add_depended_list()
def test_dependency_add_dependent_list(self):
self._check_dependency_add_dependent_list()
def test_action_mark_succeeded(self):
timestamp = time.time()
id_of = self._check_dependency_add_dependent_list()
db_api.action_mark_succeeded(self.ctx, id_of['A01'], timestamp)
res = db_api.dependency_get_depended(self.ctx, id_of['A01'])
self.assertEqual(0, len(res))
action = db_api.action_get(self.ctx, id_of['A01'])
self.assertEqual(consts.ACTION_SUCCEEDED, action.status)
self.assertEqual(timestamp, action.end_time)
for aid in [id_of['A02'], id_of['A03'], id_of['A04']]:
res = db_api.dependency_get_dependents(self.ctx, aid)
self.assertEqual(0, len(res))
def _prepare_action_mark_failed_cancel(self):
specs = [
{'name': 'A01', 'status': 'INIT', 'target': 'cluster_001'},
{'name': 'A02', 'status': 'INIT', 'target': 'node_001'},
{'name': 'A03', 'status': 'INIT', 'target': 'node_002'},
{'name': 'A04', 'status': 'INIT', 'target': 'node_003'},
{'name': 'A05', 'status': 'INIT', 'target': 'cluster_002'},
{'name': 'A06', 'status': 'INIT', 'target': 'cluster_003'},
{'name': 'A07', 'status': 'INIT', 'target': 'cluster_004'},
]
id_of = {}
for spec in specs:
action = _create_action(self.ctx, **spec)
id_of[spec['name']] = action.id
db_api.dependency_add(self.ctx,
[id_of['A02'], id_of['A03'], id_of['A04']],
id_of['A01'])
db_api.dependency_add(self.ctx,
id_of['A01'],
[id_of['A05'], id_of['A06'], id_of['A07']])
res = db_api.dependency_get_depended(self.ctx, id_of['A01'])
self.assertEqual(3, len(res))
self.assertIn(id_of['A02'], res)
self.assertIn(id_of['A03'], res)
self.assertIn(id_of['A04'], res)
action = db_api.action_get(self.ctx, id_of['A01'])
self.assertEqual(consts.ACTION_WAITING, action.status)
for aid in [id_of['A02'], id_of['A03'], id_of['A04']]:
res = db_api.dependency_get_dependents(self.ctx, aid)
self.assertEqual(1, len(res))
self.assertIn(id_of['A01'], res)
res = db_api.dependency_get_depended(self.ctx, aid)
self.assertEqual(0, len(res))
res = db_api.dependency_get_dependents(self.ctx, id_of['A01'])
self.assertEqual(3, len(res))
self.assertIn(id_of['A05'], res)
self.assertIn(id_of['A06'], res)
self.assertIn(id_of['A07'], res)
for aid in [id_of['A05'], id_of['A06'], id_of['A07']]:
res = db_api.dependency_get_depended(self.ctx, aid)
self.assertEqual(1, len(res))
self.assertIn(id_of['A01'], res)
res = db_api.dependency_get_dependents(self.ctx, aid)
self.assertEqual(0, len(res))
action = db_api.action_get(self.ctx, aid)
self.assertEqual(consts.ACTION_WAITING, action.status)
return id_of
def test_action_mark_failed(self):
timestamp = time.time()
id_of = self._prepare_action_mark_failed_cancel()
db_api.action_mark_failed(self.ctx, id_of['A01'], timestamp)
for aid in [id_of['A05'], id_of['A06'], id_of['A07']]:
action = db_api.action_get(self.ctx, aid)
self.assertEqual(consts.ACTION_FAILED, action.status)
self.assertEqual(timestamp, action.end_time)
result = db_api.dependency_get_dependents(self.ctx, id_of['A01'])
self.assertEqual(0, len(result))
def test_action_mark_cancelled(self):
timestamp = time.time()
id_of = self._prepare_action_mark_failed_cancel()
db_api.action_mark_cancelled(self.ctx, id_of['A01'], timestamp)
for aid in [id_of['A05'], id_of['A06'], id_of['A07']]:
action = db_api.action_get(self.ctx, aid)
self.assertEqual(consts.ACTION_CANCELLED, action.status)
self.assertEqual(timestamp, action.end_time)
result = db_api.dependency_get_dependents(self.ctx, id_of['A01'])
self.assertEqual(0, len(result))
def test_action_acquire(self):
action = _create_action(self.ctx)
db_api.action_update(self.ctx, action.id, {'status': 'READY'})
timestamp = time.time()
action = db_api.action_acquire(self.ctx, action.id, 'worker1',
timestamp)
self.assertEqual('worker1', action.owner)
self.assertEqual(consts.ACTION_RUNNING, action.status)
self.assertEqual(timestamp, action.start_time)
action = db_api.action_acquire(self.ctx, action.id, 'worker2',
timestamp)
self.assertIsNone(action)
def test_action_acquire_failed(self):
action = _create_action(self.ctx)
timestamp = time.time()
action = db_api.action_acquire(self.ctx, action.id, 'worker1',
timestamp)
self.assertIsNone(action)
def test_action_delete(self):
action = _create_action(self.ctx)
self.assertIsNotNone(action)
res = db_api.action_delete(self.ctx, action.id)
self.assertIsNone(res)
def test_action_delete_action_in_use(self):
for status in ('WAITING', 'RUNNING', 'SUSPENDED'):
action = _create_action(self.ctx, status=status)
self.assertIsNotNone(action)
ex = self.assertRaises(exception.ResourceBusyError,
db_api.action_delete,
self.ctx, action.id)
self.assertEqual('The action (%s) is busy now.' % action.id,
six.text_type(ex))
|
|
# Copyright 2019 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runs plain netperf in a few modes.
docs:
https://hewlettpackard.github.io/netperf/doc/netperf.html
manpage: http://manpages.ubuntu.com/manpages/maverick/man1/netperf.1.html
Runs TCP_RR, TCP_CRR, and TCP_STREAM benchmarks from netperf across two
machines.
"""
import collections
import csv
import json
import logging
import os
import re
from absl import flags
from perfkitbenchmarker import configs
from perfkitbenchmarker import data
from perfkitbenchmarker import errors
from perfkitbenchmarker import flag_util
from perfkitbenchmarker import sample
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.linux_packages import netperf
import six
from six.moves import zip
flags.DEFINE_integer('netperf_max_iter', None,
'Maximum number of iterations to run during '
'confidence interval estimation. If unset, '
'a single iteration will be run.',
lower_bound=3, upper_bound=30)
flags.DEFINE_integer('netperf_test_length', 60,
'netperf test length, in seconds',
lower_bound=1)
flags.DEFINE_bool('netperf_enable_histograms', True,
'Determines whether latency histograms are '
'collected/reported. Only for *RR benchmarks')
flag_util.DEFINE_integerlist('netperf_num_streams', flag_util.IntegerList([1]),
'Number of netperf processes to run. Netperf '
'will run once for each value in the list.',
module_name=__name__)
flags.DEFINE_integer('netperf_thinktime', 0,
'Time in nanoseconds to do work for each request.')
flags.DEFINE_integer('netperf_thinktime_array_size', 0,
'The size of the array to traverse for thinktime.')
flags.DEFINE_integer('netperf_thinktime_run_length', 0,
'The number of contiguous numbers to sum at a time in the '
'thinktime array.')
flags.DEFINE_integer('netperf_udp_stream_send_size_in_bytes', 1024,
'Send size to use for UDP_STREAM tests (netperf -m flag)',
lower_bound=1, upper_bound=65507)
# We set the default to 128KB (131072 bytes) to override the Linux default
# of 16K so that we can achieve the "link rate".
flags.DEFINE_integer('netperf_tcp_stream_send_size_in_bytes', 131072,
'Send size to use for TCP_STREAM tests (netperf -m flag)')
flags.DEFINE_integer(
'netperf_mss', None,
'Sets the Maximum Segment Size (in bytes) for netperf TCP tests to use. '
'The effective MSS will be slightly smaller than the value specified here. '
'If you try to set an MSS higher than the current MTU, '
'the MSS will be set to the highest possible value for that MTU. '
'If you try to set the MSS lower than 88 bytes, the default MSS will be '
'used.')
ALL_BENCHMARKS = ['TCP_RR', 'TCP_CRR', 'TCP_STREAM', 'UDP_RR', 'UDP_STREAM']
flags.DEFINE_list('netperf_benchmarks', ALL_BENCHMARKS,
'The netperf benchmark(s) to run.')
flags.register_validator(
'netperf_benchmarks',
lambda benchmarks: benchmarks and set(benchmarks).issubset(ALL_BENCHMARKS))
FLAGS = flags.FLAGS
BENCHMARK_NAME = 'netperf'
BENCHMARK_CONFIG = """
netperf:
description: Run TCP_RR, TCP_CRR, UDP_RR, TCP_STREAM and UDP_STREAM
vpc_peering: True
vm_groups:
vm_1:
vm_spec: *default_single_core
vm_2:
vm_spec: *default_single_core
"""
MBPS = 'Mbits/sec'
TRANSACTIONS_PER_SECOND = 'transactions_per_second'
# Specifies the keys and to include in the results for OMNI tests.
# Any user of ParseNetperfOutput() (e.g. container_netperf_benchmark), must
# specify these selectors to ensure the parsing doesn't break.
OUTPUT_SELECTOR = (
'THROUGHPUT,THROUGHPUT_UNITS,P50_LATENCY,P90_LATENCY,'
'P99_LATENCY,STDDEV_LATENCY,MIN_LATENCY,MAX_LATENCY,'
'CONFIDENCE_ITERATION,THROUGHPUT_CONFID,'
'LOCAL_TRANSPORT_RETRANS,REMOTE_TRANSPORT_RETRANS,'
'TRANSPORT_MSS')
# Command ports are even (id*2), data ports are odd (id*2 + 1)
PORT_START = 20000
REMOTE_SCRIPTS_DIR = 'netperf_test_scripts'
REMOTE_SCRIPT = 'netperf_test.py'
PERCENTILES = [50, 90, 99]
# By default, Container-Optimized OS (COS) host firewall allows only
# outgoing connections and incoming SSH connections. To allow incoming
# connections from VMs running netperf, we need to add iptables rules
# on the VM running netserver.
_COS_RE = re.compile(r'\b(cos|gci)-')
def GetConfig(user_config):
return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
def PrepareNetperf(vm):
"""Installs netperf on a single vm."""
vm.Install('netperf')
def Prepare(benchmark_spec):
"""Install netperf on the target vm.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
vms = benchmark_spec.vms
vms = vms[:2]
vm_util.RunThreaded(PrepareNetperf, vms)
num_streams = max(FLAGS.netperf_num_streams)
# See comments where _COS_RE is defined.
if vms[1].image and re.search(_COS_RE, vms[1].image):
_SetupHostFirewall(benchmark_spec)
# Start the netserver processes
if vm_util.ShouldRunOnExternalIpAddress():
# Open all of the command and data ports
vms[1].AllowPort(PORT_START, PORT_START + num_streams * 2 - 1)
port_end = PORT_START + num_streams * 2 - 1
netserver_cmd = (f'for i in $(seq {PORT_START} 2 {port_end}); do '
f'{netperf.NETSERVER_PATH} -p $i & done')
vms[1].RemoteCommand(netserver_cmd)
# Copy remote test script to client
path = data.ResourcePath(os.path.join(REMOTE_SCRIPTS_DIR, REMOTE_SCRIPT))
logging.info('Uploading %s to %s', path, vms[0])
vms[0].PushFile(path, REMOTE_SCRIPT)
vms[0].RemoteCommand(f'sudo chmod 777 {REMOTE_SCRIPT}')
def _SetupHostFirewall(benchmark_spec):
"""Set up host firewall to allow incoming traffic.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
client_vm = benchmark_spec.vms[0]
server_vm = benchmark_spec.vms[1]
ip_addrs = [client_vm.internal_ip]
if vm_util.ShouldRunOnExternalIpAddress():
ip_addrs.append(client_vm.ip_address)
logging.info('setting up host firewall on %s running %s for client at %s',
server_vm.name, server_vm.image, ip_addrs)
cmd = 'sudo iptables -A INPUT -p %s -s %s -j ACCEPT'
for protocol in 'tcp', 'udp':
for ip_addr in ip_addrs:
server_vm.RemoteHostCommand(cmd % (protocol, ip_addr))
def _HistogramStatsCalculator(histogram, percentiles=PERCENTILES):
"""Computes values at percentiles in a distribution as well as stddev.
Args:
histogram: A dict mapping values to the number of samples with that value.
percentiles: An array of percentiles to calculate.
Returns:
A dict mapping stat names to their values.
"""
stats = {}
# Histogram data in list form sorted by key
by_value = sorted([(value, count) for value, count in histogram.items()],
key=lambda x: x[0])
total_count = sum(histogram.values())
cur_value_index = 0 # Current index in by_value
cur_index = 0 # Number of values we've passed so far
for p in percentiles:
index = int(float(total_count) * float(p) / 100.0)
index = min(index, total_count - 1) # Handle 100th percentile
for value, count in by_value[cur_value_index:]:
if cur_index + count > index:
stats['p%s' % str(p)] = by_value[cur_value_index][0]
break
else:
cur_index += count
cur_value_index += 1
# Compute stddev
value_sum = float(sum([value * count for value, count in histogram.items()]))
average = value_sum / float(total_count)
if total_count > 1:
total_of_squares = sum([(value - average) ** 2 * count
for value, count in histogram.items()])
stats['stddev'] = (total_of_squares / (total_count - 1)) ** 0.5
else:
stats['stddev'] = 0
return stats
def ParseNetperfOutput(stdout, metadata, benchmark_name,
enable_latency_histograms):
"""Parses the stdout of a single netperf process.
Args:
stdout: the stdout of the netperf process
metadata: metadata for any sample.Sample objects we create
benchmark_name: the name of the netperf benchmark
enable_latency_histograms: bool indicating if latency histograms are
included in stdout
Returns:
A tuple containing (throughput_sample, latency_samples, latency_histogram)
"""
# Don't modify the metadata dict that was passed in
metadata = metadata.copy()
# Extract stats from stdout
# Sample output:
#
# "MIGRATED TCP REQUEST/RESPONSE TEST from 0.0.0.0 (0.0.0.0) port 20001
# AF_INET to 104.154.50.86 () port 20001 AF_INET : +/-2.500% @ 99% conf.
# : first burst 0",\n
# Throughput,Throughput Units,Throughput Confidence Width (%),
# Confidence Iterations Run,Stddev Latency Microseconds,
# 50th Percentile Latency Microseconds,90th Percentile Latency Microseconds,
# 99th Percentile Latency Microseconds,Minimum Latency Microseconds,
# Maximum Latency Microseconds\n
# 1405.50,Trans/s,2.522,4,783.80,683,735,841,600,900\n
try:
fp = six.StringIO(stdout)
# "-o" flag above specifies CSV output, but there is one extra header line:
banner = next(fp)
assert banner.startswith('MIGRATED'), stdout
r = csv.DictReader(fp)
results = next(r)
logging.info('Netperf Results: %s', results)
assert 'Throughput' in results
except (StopIteration, AssertionError):
# The output returned by netperf was unparseable - usually due to a broken
# connection or other error. Raise KnownIntermittentError to signal the
# benchmark can be retried. Do not automatically retry as an immediate
# retry on these VMs may be adveresly affected (e.g. burstable credits
# partially used)
message = 'Netperf ERROR: Failed to parse stdout. STDOUT: %s' % stdout
logging.error(message)
raise errors.Benchmarks.KnownIntermittentError(message)
# Update the metadata with some additional infos
meta_keys = [('Confidence Iterations Run', 'confidence_iter'),
('Throughput Confidence Width (%)', 'confidence_width_percent')]
if 'TCP' in benchmark_name:
meta_keys.extend([
('Local Transport Retransmissions', 'netperf_retransmissions'),
('Remote Transport Retransmissions', 'netserver_retransmissions'),
('Transport MSS bytes', 'netperf_mss')
])
metadata.update({meta_key: results[netperf_key]
for netperf_key, meta_key in meta_keys})
# Create the throughput sample
throughput = float(results['Throughput'])
throughput_units = results['Throughput Units']
if throughput_units == '10^6bits/s':
# TCP_STREAM benchmark
unit = MBPS
metric = '%s_Throughput' % benchmark_name
elif throughput_units == 'Trans/s':
# *RR benchmarks
unit = TRANSACTIONS_PER_SECOND
metric = '%s_Transaction_Rate' % benchmark_name
else:
raise ValueError('Netperf output specifies unrecognized throughput units %s'
% throughput_units)
throughput_sample = sample.Sample(metric, throughput, unit, metadata)
latency_hist = None
latency_samples = []
if enable_latency_histograms:
# Parse the latency histogram. {latency: count} where "latency" is the
# latency in microseconds with only 2 significant figures and "count" is the
# number of response times that fell in that latency range.
latency_hist = netperf.ParseHistogram(stdout)
hist_metadata = {'histogram': json.dumps(latency_hist)}
hist_metadata.update(metadata)
latency_samples.append(sample.Sample(
'%s_Latency_Histogram' % benchmark_name, 0, 'us', hist_metadata))
if unit != MBPS:
for metric_key, metric_name in [
('50th Percentile Latency Microseconds', 'p50'),
('90th Percentile Latency Microseconds', 'p90'),
('99th Percentile Latency Microseconds', 'p99'),
('Minimum Latency Microseconds', 'min'),
('Maximum Latency Microseconds', 'max'),
('Stddev Latency Microseconds', 'stddev')]:
if metric_key in results:
latency_samples.append(
sample.Sample('%s_Latency_%s' % (benchmark_name, metric_name),
float(results[metric_key]), 'us', metadata))
return (throughput_sample, latency_samples, latency_hist)
def RunNetperf(vm, benchmark_name, server_ip, num_streams):
"""Spawns netperf on a remote VM, parses results.
Args:
vm: The VM that the netperf TCP_RR benchmark will be run upon.
benchmark_name: The netperf benchmark to run, see the documentation.
server_ip: A machine that is running netserver.
num_streams: The number of netperf client threads to run.
Returns:
A sample.Sample object with the result.
"""
enable_latency_histograms = FLAGS.netperf_enable_histograms or num_streams > 1
# Throughput benchmarks don't have latency histograms
enable_latency_histograms = (
enable_latency_histograms and
(benchmark_name not in ['TCP_STREAM', 'UDP_STREAM']))
# Flags:
# -o specifies keys to include in CSV output.
# -j keeps additional latency numbers
# -v sets the verbosity level so that netperf will print out histograms
# -I specifies the confidence % and width - here 99% confidence that the true
# value is within +/- 2.5% of the reported value
# -i specifies the maximum and minimum number of iterations.
confidence = (f'-I 99,5 -i {FLAGS.netperf_max_iter},3'
if FLAGS.netperf_max_iter else '')
verbosity = '-v2 ' if enable_latency_histograms else ''
remote_cmd_timeout = (
FLAGS.netperf_test_length * (FLAGS.netperf_max_iter or 1) + 300)
metadata = {'netperf_test_length': FLAGS.netperf_test_length,
'sending_thread_count': num_streams,
'max_iter': FLAGS.netperf_max_iter or 1}
netperf_cmd = (f'{netperf.NETPERF_PATH} '
f'-p {{command_port}} '
f'-j {verbosity} '
f'-t {benchmark_name} '
f'-H {server_ip} '
f'-l {FLAGS.netperf_test_length} {confidence}'
' -- '
f'-P ,{{data_port}} '
f'-o {OUTPUT_SELECTOR}')
if benchmark_name.upper() == 'UDP_STREAM':
send_size = FLAGS.netperf_udp_stream_send_size_in_bytes
netperf_cmd += f' -R 1 -m {send_size} -M {send_size} '
metadata['netperf_send_size_in_bytes'] = (
FLAGS.netperf_udp_stream_send_size_in_bytes)
elif benchmark_name.upper() == 'TCP_STREAM':
send_size = FLAGS.netperf_tcp_stream_send_size_in_bytes
netperf_cmd += f' -m {send_size} -M {send_size} '
metadata['netperf_send_size_in_bytes'] = (
FLAGS.netperf_tcp_stream_send_size_in_bytes)
if FLAGS.netperf_thinktime != 0:
netperf_cmd += (' -X '
f'{FLAGS.netperf_thinktime},'
f'{FLAGS.netperf_thinktime_array_size},'
f'{FLAGS.netperf_thinktime_run_length} ')
if FLAGS.netperf_mss and 'TCP' in benchmark_name.upper():
netperf_cmd += f' -G {FLAGS.netperf_mss}b'
metadata['netperf_mss_requested'] = FLAGS.netperf_mss
# Run all of the netperf processes and collect their stdout
# TODO(dlott): Analyze process start delta of netperf processes on the remote
# machine
# Give the remote script the max possible test length plus 5 minutes to
# complete
remote_cmd_timeout = \
FLAGS.netperf_test_length * (FLAGS.netperf_max_iter or 1) + 300
remote_cmd = (f'./{REMOTE_SCRIPT} --netperf_cmd="{netperf_cmd}" '
f'--num_streams={num_streams} --port_start={PORT_START}')
remote_stdout, _ = vm.RobustRemoteCommand(remote_cmd, should_log=True,
timeout=remote_cmd_timeout)
# Decode stdouts, stderrs, and return codes from remote command's stdout
json_out = json.loads(remote_stdout)
stdouts = json_out[0]
parsed_output = [ParseNetperfOutput(stdout, metadata, benchmark_name,
enable_latency_histograms)
for stdout in stdouts]
if len(parsed_output) == 1:
# Only 1 netperf thread
throughput_sample, latency_samples, histogram = parsed_output[0]
return [throughput_sample] + latency_samples
else:
# Multiple netperf threads
samples = []
# Unzip parsed output
# Note that latency_samples are invalid with multiple threads because stats
# are computed per-thread by netperf, so we don't use them here.
throughput_samples, _, latency_histograms = [list(t)
for t in zip(*parsed_output)]
# They should all have the same units
throughput_unit = throughput_samples[0].unit
# Extract the throughput values from the samples
throughputs = [s.value for s in throughput_samples]
# Compute some stats on the throughput values
throughput_stats = sample.PercentileCalculator(throughputs, [50, 90, 99])
throughput_stats['min'] = min(throughputs)
throughput_stats['max'] = max(throughputs)
# Calculate aggregate throughput
throughput_stats['total'] = throughput_stats['average'] * len(throughputs)
# Create samples for throughput stats
for stat, value in throughput_stats.items():
samples.append(
sample.Sample(f'{benchmark_name}_Throughput_{stat}',
float(value),
throughput_unit, metadata))
if enable_latency_histograms:
# Combine all of the latency histogram dictionaries
latency_histogram = collections.Counter()
for histogram in latency_histograms:
latency_histogram.update(histogram)
# Create a sample for the aggregate latency histogram
hist_metadata = {'histogram': json.dumps(latency_histogram)}
hist_metadata.update(metadata)
samples.append(sample.Sample(
f'{benchmark_name}_Latency_Histogram', 0, 'us', hist_metadata))
# Calculate stats on aggregate latency histogram
latency_stats = _HistogramStatsCalculator(latency_histogram, [50, 90, 99])
# Create samples for the latency stats
for stat, value in latency_stats.items():
samples.append(
sample.Sample(f'{benchmark_name}_Latency_{stat}',
float(value),
'us', metadata))
return samples
def Run(benchmark_spec):
"""Run netperf TCP_RR on the target vm.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
A list of sample.Sample objects.
"""
vms = benchmark_spec.vms
client_vm = vms[0] # Client aka "sending vm"
server_vm = vms[1] # Server aka "receiving vm"
logging.info('netperf running on %s', client_vm)
results = []
metadata = {
'sending_zone': client_vm.zone,
'sending_machine_type': client_vm.machine_type,
'receiving_zone': server_vm.zone,
'receiving_machine_type': server_vm.machine_type
}
for num_streams in FLAGS.netperf_num_streams:
assert num_streams >= 1
for netperf_benchmark in FLAGS.netperf_benchmarks:
if vm_util.ShouldRunOnExternalIpAddress():
external_ip_results = RunNetperf(client_vm, netperf_benchmark,
server_vm.ip_address, num_streams)
for external_ip_result in external_ip_results:
external_ip_result.metadata[
'ip_type'] = vm_util.IpAddressMetadata.EXTERNAL
external_ip_result.metadata.update(metadata)
results.extend(external_ip_results)
if vm_util.ShouldRunOnInternalIpAddress(client_vm, server_vm):
internal_ip_results = RunNetperf(client_vm, netperf_benchmark,
server_vm.internal_ip, num_streams)
for internal_ip_result in internal_ip_results:
internal_ip_result.metadata.update(metadata)
internal_ip_result.metadata[
'ip_type'] = vm_util.IpAddressMetadata.INTERNAL
results.extend(internal_ip_results)
return results
def Cleanup(benchmark_spec):
"""Cleanup netperf on the target vm (by uninstalling).
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
vms = benchmark_spec.vms
vms[1].RemoteCommand('sudo killall netserver')
vms[0].RemoteCommand(f'sudo rm -rf {REMOTE_SCRIPT}')
|
|
import copy
from office365.runtime.client_value import ClientValue
from office365.runtime.odata.v3.json_light_format import JsonLightFormat
from office365.runtime.odata.odata_json_format import ODataJsonFormat
from office365.runtime.odata.query_options import QueryOptions
class ClientObject(object):
def __init__(self, context, resource_path=None, parent_collection=None, namespace=None):
"""
Base client object which define named properties and relationships of an entity
:type parent_collection: office365.runtime.client_object_collection.ClientObjectCollection or None
:type resource_path: office365.runtime.client_path.ClientPath or None
:type context: office365.runtime.client_runtime_context.ClientRuntimeContext
:type namespace: str
"""
self._properties = {}
self._properties_metadata = {}
self._entity_type_name = None
self._query_options = QueryOptions()
self._parent_collection = parent_collection
self._context = context
self._resource_path = resource_path
self._namespace = namespace
def set_metadata(self, name, group, value):
if name not in self._properties_metadata:
self._properties_metadata[name] = {}
self._properties_metadata[name][group] = value
def get_metadata(self, name, group, default_value=None):
return self._properties_metadata.get(name, {}).get(group, default_value)
def clear(self):
self._properties_metadata = {}
def execute_query(self):
self.context.execute_query()
return self
def execute_query_retry(self, max_retry=5,
timeout_secs=5,
success_callback=None,
failure_callback=None):
self.context.execute_query_retry(max_retry=max_retry,
timeout_secs=timeout_secs,
success_callback=success_callback,
failure_callback=failure_callback)
return self
def build_request(self):
return self.context.build_request(self.context.current_query)
def get(self):
self.context.load(self)
return self
def is_property_available(self, name):
"""Returns a Boolean value that indicates whether the specified property has been retrieved or set.
:param str name: A Property name
"""
if name in self.properties:
return True
return False
def expand(self, names):
"""
:type names: list[str]
"""
self.query_options.expand = names
return self
def select(self, names):
"""
:param list[str] names:
:return:
"""
self.query_options.select = names
return self
def remove_from_parent_collection(self):
if self._parent_collection is None:
return
self._parent_collection.remove_child(self)
def get_property(self, name, default_value=None):
"""
Gets property value
:param str name: property name
:param any default_value: property value
"""
if default_value is None:
normalized_name = name[0].lower() + name[1:]
default_value = getattr(self, normalized_name, None)
return self._properties.get(name, default_value)
def set_property(self, name, value, persist_changes=True):
"""Sets property value
:param str name: Property name
:param any value: Property value
:param bool persist_changes: Persist changes
"""
self._properties_metadata[name] = {}
if persist_changes:
self.set_metadata(name, "persist", True)
prop_type = self.get_property(name)
if isinstance(prop_type, ClientObject) or isinstance(prop_type, ClientValue) and value is not None:
if isinstance(value, list):
[prop_type.set_property(i, v, persist_changes) for i, v in enumerate(value)]
self._properties[name] = prop_type
elif isinstance(value, dict):
[prop_type.set_property(k, v, persist_changes) for k, v in value.items()]
self._properties[name] = prop_type
else:
self._properties[name] = value
else:
self._properties[name] = value
return self
def ensure_property(self, name, action, *args, **kwargs):
"""
Ensures if property is loaded
:type action: () -> None
:type name: str
"""
return self.ensure_properties([name], action, *args, **kwargs)
def ensure_properties(self, names, action, *args, **kwargs):
"""
Ensure if list of properties are loaded
:type action: (any) -> None
:type names: str or list[str]
"""
names_to_include = [n for n in names if not self.is_property_available(n)]
if len(names_to_include) > 0:
from office365.runtime.queries.read_entity_query import ReadEntityQuery
qry = ReadEntityQuery(self, names_to_include)
self.context.add_query(qry, set_as_current=False)
self.context.after_query_execute(qry, action, *args, **kwargs)
else:
action(*args, **kwargs)
return self
def clone_object(self):
result = copy.deepcopy(self)
result._context = self.context
return result
@property
def entity_type_name(self):
if self._entity_type_name is None:
if self._namespace is None:
self._entity_type_name = type(self).__name__
else:
self._entity_type_name = ".".join([self._namespace, type(self).__name__])
return self._entity_type_name
@property
def resource_url(self):
"""
Returns resource url
:rtype: str or None
"""
return self.context.service_root_url() + str(self.resource_path)
@property
def context(self):
return self._context
@property
def resource_path(self):
return self._resource_path
@property
def query_options(self):
return self._query_options
@property
def properties(self):
return self._properties
@property
def parent_collection(self):
return self._parent_collection
def to_json(self, json_format=None):
"""
:type json_format: office365.runtime.odata.odata_json_format.ODataJsonFormat or None
"""
ser_prop_names = [n for n, p in self._properties_metadata.items() if p.get("persist", False) is True]
json = dict((k, self.get_property(k)) for k in self.properties if k in ser_prop_names)
for k, v in json.items():
if isinstance(v, ClientObject) or isinstance(v, ClientValue):
json[k] = v.to_json(json_format)
if json and self.entity_type_name is not None and json_format.include_control_information():
if isinstance(json_format, JsonLightFormat):
json[json_format.metadata_type_tag_name] = {'type': self.entity_type_name}
elif isinstance(json_format, ODataJsonFormat):
json[json_format.metadata_type_tag_name] = "#" + self.entity_type_name
return json
|
|
# -*- coding: utf-8 -*-
"""
Organization Registry - Controllers
"""
module = request.controller
resourcename = request.function
if not settings.has_module(module):
raise HTTP(404, body="Module disabled: %s" % module)
# -----------------------------------------------------------------------------
def index():
""" Module's Home Page """
redirect(URL(f="organisation", args="summary"))
# -----------------------------------------------------------------------------
def index_alt():
"""
Module homepage for non-Admin users when no CMS content found
"""
# @ToDo: Move this to the Template (separate deployment_setting or else a customise for non-REST controllers)
template = settings.get_template()
if template == "SandyRelief":
# Just redirect to the Facilities
redirect(URL(f="facility"))
else:
# Just redirect to the list of Organisations
redirect(URL(f="organisation", args="summary"))
# -----------------------------------------------------------------------------
def group():
""" RESTful CRUD controller """
return s3_rest_controller(rheader = s3db.org_rheader)
# -----------------------------------------------------------------------------
def group_membership():
""" RESTful CRUD controller for options.s3json lookups """
if auth.permission.format != "s3json":
return ""
# Pre-process
def prep(r):
if r.method != "options":
return False
return True
s3.prep = prep
return s3_rest_controller()
# -----------------------------------------------------------------------------
def group_membership_status():
""" RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def group_person():
""" REST controller for options.s3json lookups """
s3.prep = lambda r: r.representation == "s3json" and r.method == "options"
return s3_rest_controller()
# -----------------------------------------------------------------------------
def group_person_status():
""" RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def region():
""" RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def sector():
""" RESTful CRUD controller """
# Pre-processor
def prep(r):
# Location Filter
s3db.gis_location_filter(r)
return True
s3.prep = prep
return s3_rest_controller()
# -----------------------------------------------------------------------------
def subsector():
""" RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def site():
"""
RESTful CRUD controller
- used by S3SiteAutocompleteWidget
which doesn't yet support filtering to just updateable sites
- used by site_contact_person()
- used by S3OptionsFilter (e.g. Asset Log)
"""
# Pre-processor
def prep(r):
if r.representation != "json" and \
r.method not in ("search_ac", "search_address_ac", "site_contact_person"):
return False
# Location Filter
s3db.gis_location_filter(r)
return True
s3.prep = prep
return s3_rest_controller()
# -----------------------------------------------------------------------------
def sites_for_org():
"""
Used to provide the list of Sites for an Organisation
- used in User Registration & Assets
"""
try:
org = request.args[0]
except:
result = current.xml.json_message(False, 400, "No Org provided!")
else:
try:
org = int(org)
except:
result = current.xml.json_message(False, 400, "Invalid Org provided!")
else:
stable = s3db.org_site
if settings.get_org_branches():
# Find all branches for this Organisation
btable = s3db.org_organisation_branch
query = (btable.organisation_id == org) & \
(btable.deleted != True)
rows = db(query).select(btable.branch_id)
org_ids = [row.branch_id for row in rows] + [org]
query = (stable.organisation_id.belongs(org_ids)) & \
(stable.deleted != True)
else:
query = (stable.organisation_id == org) & \
(stable.deleted != True)
rows = db(query).select(stable.site_id,
stable.name,
orderby=stable.name)
result = rows.json()
finally:
response.headers["Content-Type"] = "application/json"
return result
# -----------------------------------------------------------------------------
def facility():
""" RESTful CRUD controller """
return s3db.org_facility_controller()
# -----------------------------------------------------------------------------
def facility_type():
""" RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def office_type():
""" RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def organisation_type():
""" RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def organisation():
""" RESTful CRUD controller """
# Defined in the Model for use from Multiple Controllers for unified menus
return s3db.org_organisation_controller()
# -----------------------------------------------------------------------------
def org_search():
"""
Organisation REST controller
- limited to just search_ac for use in Autocompletes
- allows differential access permissions
"""
s3.prep = lambda r: r.method == "search_ac"
return s3_rest_controller(module, "organisation")
# -----------------------------------------------------------------------------
def organisation_list_represent(l):
organisation_represent = s3db.org_organisation_represent
if l:
max_length = 4
if len(l) > max_length:
return "%s, etc" % \
organisation_represent.multiple(l[:max_length])
else:
return organisation_represent.multiple(l)
else:
return NONE
# -----------------------------------------------------------------------------
def office():
""" RESTful CRUD controller """
# Defined in the Model for use from Multiple Controllers for unified menus
return s3db.org_office_controller()
# -----------------------------------------------------------------------------
def person():
""" Person controller for AddPersonWidget """
def prep(r):
if r.representation != "s3json":
# Do not serve other representations here
return False
else:
current.xml.show_ids = True
return True
s3.prep = prep
return s3_rest_controller("pr", "person")
# -----------------------------------------------------------------------------
def room():
""" RESTful CRUD controller """
def prep(r):
field = r.table.site_id
field.readable = field.writable = True
return True
s3.prep = prep
return s3_rest_controller()
# -----------------------------------------------------------------------------
def mailing_list():
""" RESTful CRUD controller """
tablename = "pr_group"
table = s3db[tablename]
# Only groups with a group_type of 5
s3.filter = (table.group_type == 5)
table.group_type.writable = False
table.group_type.readable = False
table.name.label = T("Mailing List Name")
s3.crud_strings[tablename] = s3.pr_mailing_list_crud_strings
# define the list_fields
list_fields = s3db.configure(tablename,
list_fields = ["id",
"name",
"description",
])
# Components
_rheader = s3db.pr_rheader
_tabs = [(T("Organization"), "organisation/"),
(T("Mailing List Details"), None),
]
if len(request.args) > 0:
_tabs.append((T("Members"), "group_membership"))
if "viewing" in request.vars:
tablename, record_id = request.vars.viewing.rsplit(".", 1)
if tablename == "org_organisation":
table = s3db[tablename]
_rheader = s3db.org_rheader
_tabs = []
s3db.add_components("pr_group", pr_group_membership="group_id")
rheader = lambda r: _rheader(r, tabs = _tabs)
return s3_rest_controller("pr",
"group",
rheader=rheader)
# -----------------------------------------------------------------------------
def donor():
""" RESTful CRUD controller """
tablename = "org_donor"
table = s3db[tablename]
tablename = "org_donor"
s3.crud_strings[tablename] = Storage(
label_create = ADD_DONOR,
title_display = T("Donor Details"),
title_list = T("Donors Report"),
title_update = T("Edit Donor"),
label_list_button = T("List Donors"),
label_delete_button = T("Delete Donor"),
msg_record_created = T("Donor added"),
msg_record_modified = T("Donor updated"),
msg_record_deleted = T("Donor deleted"),
msg_list_empty = T("No Donors currently registered"))
s3db.configure(tablename, listadd=False)
output = s3_rest_controller()
return output
# -----------------------------------------------------------------------------
def resource():
""" RESTful CRUD controller """
def prep(r):
if r.interactive:
if r.method in ("create", "update"):
# Context from a Profile page?"
table = r.table
location_id = get_vars.get("(location)", None)
if location_id:
field = table.location_id
field.default = location_id
field.readable = field.writable = False
organisation_id = get_vars.get("(organisation)", None)
if organisation_id:
field = table.organisation_id
field.default = organisation_id
field.readable = field.writable = False
return True
s3.prep = prep
return s3_rest_controller()
# -----------------------------------------------------------------------------
def resource_type():
""" RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def service():
""" RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def req_match():
""" Match Requests for Sites """
return s3db.req_match()
# -----------------------------------------------------------------------------
def incoming():
"""
Incoming Shipments for Sites
Used from Requests rheader when looking at Transport Status
"""
# @ToDo: Create this function!
return s3db.inv_incoming()
# -----------------------------------------------------------------------------
def facility_geojson():
"""
Create GeoJSON[P] of Facilities for use by a high-traffic website
- controller just for testing
- function normally run on a schedule
"""
s3db.org_facility_geojson()
# END =========================================================================
|
|
#!/usr/bin/env python
import os
from time import time
import argparse
import logging
import logging.handlers
from eden.util import configure_logging
from eden.util import serialize_dict
from numpy.random import randint
from numpy.random import uniform
from sklearn.linear_model import SGDClassifier
from sklearn import metrics
from eden.graph import Vectorizer
from eden.util import save_output, store_matrix
from eden.converter.graph.node_link_data import node_link_data_to_eden
import logging
logger = logging.getLogger(__name__)
class ModelInitializerBase(object):
"""
Subclass to generate your own EDeN model driver.
"""
def __init__(self):
pass
def load_data(self, args):
"""
Load data from file.
FIXME: Called by base command? I've really no idea...
The iterator returned here is handled by the pre_processor function
defined in pre_processor_init.
"""
iterator = node_link_data_to_eden(args.input_file)
return iterator
def load_positive_data(self, args):
"""Load the positive class data.
Called after invocating the fit command.
The iterator returned here is handled by the pre_processor function
defined in pre_processor_init.
"""
iterator = node_link_data_to_eden(args.positive_input_file)
return iterator
def load_negative_data(self, args):
iterator = node_link_data_to_eden(args.negative_input_file)
return iterator
"""Load the negative class data.
Called after invocating the fit command.
The iterator returned here is handled by the pre_processor function
defined in pre_processor_init.
"""
return self.load_data(args.negative_input_file)
def pre_processor_init(self, args):
"""Setup conversion of data prepared by load_data functions to graphs.
Returns the function used to process the data prepared by the load_data functions and
a set of matching parameter choices.
"""
def pre_processor(graphs, **args):
return graphs
pre_processor_parameters = {}
return pre_processor, pre_processor_parameters
def vectorizer_init(self, args):
"""Setup the conversion of graphs generated by the the pre_processor function to feature vectors.
Returns the function used to calculate feature vectors from graphs prepared by the
pre_processor function and a set of matching parameter choices.
"""
vectorizer = Vectorizer()
vectorizer_parameters = {'complexity': [2, 3, 4]}
return vectorizer, vectorizer_parameters
def estimator_init(self, args):
"""Setup the estimator and set of matching parameter choices."""
estimator = SGDClassifier(average=True, class_weight='auto', shuffle=True)
estimator_parameters = {'n_iter': randint(5, 200, size=args.n_iter),
'penalty': ['l1', 'l2', 'elasticnet'],
'l1_ratio': uniform(0.1, 0.9, size=args.n_iter),
'loss': ['hinge', 'log', 'modified_huber', 'squared_hinge', 'perceptron'],
'power_t': uniform(0.1, size=args.n_iter),
'alpha': [10 ** x for x in range(-8, 0)],
'eta0': [10 ** x for x in range(-4, -1)],
'learning_rate': ["invscaling", "constant", "optimal"],
'n_jobs': [-1]}
return estimator, estimator_parameters
def add_arguments(self, parser):
"""Add arguments for the main call."""
parser.add_argument('--version', action='version', version='0.1')
return parser
def add_arguments_fit(self, parser):
"""Add arguments for the fit command."""
parser.add_argument("-p", "--positive-input-file",
dest="positive_input_file",
help="Path tofile containing input for the positive class.",
required=True)
parser.add_argument("-n", "--negative-input-file",
dest="negative_input_file",
help="Path to file containing input for the negative class.",
required=True)
return parser
def add_arguments_estimate(self, parser):
"""Add arguments for the estimate command."""
return self.add_arguments_fit(parser)
def add_arguments_base(self, parser):
"""FIXME: Add arguments for what?"""
parser.add_argument("-i", "--input-file",
dest="input_file",
help="Path to file containing input.",
required=True)
return parser
def add_arguments_matrix(self, parser):
"""Add arguments for the matrix command."""
return parser
def add_arguments_predict(self, parser):
"""Add arguments for the predict command."""
return parser
def add_arguments_feature(self, parser):
"""Add arguments for the feature command."""
return parser
def main_fit(model_initializer, args):
# init
pos_train_iterator = model_initializer.load_positive_data(args)
neg_train_iterator = model_initializer.load_negative_data(args)
pre_processor, pre_processor_parameters = model_initializer.pre_processor_init(args)
vectorizer, vectorizer_parameters = model_initializer.vectorizer_init(args)
estimator, estimator_parameters = model_initializer.estimator_init(args)
from eden.model import ActiveLearningBinaryClassificationModel
model = ActiveLearningBinaryClassificationModel(pre_processor=pre_processor,
estimator=estimator,
vectorizer=vectorizer,
fit_vectorizer=args.fit_vectorizer,
n_jobs=args.n_jobs,
n_blocks=args.n_blocks,
block_size=args.block_size,
pre_processor_n_jobs=args.pre_processor_n_jobs,
pre_processor_n_blocks=args.pre_processor_n_blocks,
pre_processor_block_size=args.pre_processor_block_size,
random_state=args.random_state)
# save model
if not os.path.exists(args.output_dir_path):
os.mkdir(args.output_dir_path)
full_out_file_name = os.path.join(args.output_dir_path, args.model_file)
# hyper parameters optimization
model.optimize(pos_train_iterator, neg_train_iterator,
model_name=full_out_file_name,
n_iter=args.n_iter,
n_inner_iter_estimator=args.n_inner_iter_estimator,
pre_processor_parameters=pre_processor_parameters,
vectorizer_parameters=vectorizer_parameters,
estimator_parameters=estimator_parameters,
n_active_learning_iterations=args.n_active_learning_iterations,
size_positive=args.size_positive,
size_negative=args.size_negative,
lower_bound_threshold_positive=args.lower_bound_threshold_positive,
upper_bound_threshold_positive=args.upper_bound_threshold_positive,
lower_bound_threshold_negative=args.lower_bound_threshold_negative,
upper_bound_threshold_negative=args.upper_bound_threshold_negative,
max_total_time=args.max_total_time,
cv=args.cv,
scoring=args.scoring,
score_func=lambda u, s: u - s,
two_steps_optimization=args.two_steps_optimization)
def main_estimate(model_initializer, args):
pos_test_iterator = model_initializer.load_positive_data(args)
neg_test_iterator = model_initializer.load_negative_data(args)
from eden.model import ActiveLearningBinaryClassificationModel
model = ActiveLearningBinaryClassificationModel()
model.load(args.model_file)
logger.info(model.get_parameters())
apr, rocauc = model.estimate(pos_test_iterator, neg_test_iterator,
report_cross_validation=args.cross_validation)
def main_predict(model_initializer, args):
iterator = model_initializer.load_data(args)
from itertools import tee
iterator, iterator_ = tee(iterator)
from eden.model import ActiveLearningBinaryClassificationModel
model = ActiveLearningBinaryClassificationModel()
model.load(args.model_file)
logger.info(model.get_parameters())
text = []
for margin, graph_info in model.decision_function_info(iterator, key='id'):
if margin > 0:
prediction = 1
else:
prediction = -1
text.append("%d\t%s\t%s\n" % (prediction, margin, graph_info))
save_output(text=text, output_dir_path=args.output_dir_path, out_file_name='predictions.txt')
def main_matrix(model_initializer, args):
iterator = model_initializer.load_data(args)
from eden.model import ActiveLearningBinaryClassificationModel
model = ActiveLearningBinaryClassificationModel()
model.load(args.model_file)
logger.info(model.get_parameters())
data_matrix = model._data_matrix(iterator)
kernel_matrix = metrics.pairwise.pairwise_kernels(data_matrix, metric='linear')
store_matrix(matrix=kernel_matrix,
output_dir_path=args.output_dir_path,
out_file_name='Gram_matrix',
output_format=args.output_format)
def main_feature(model_initializer, args):
iterator = model_initializer.load_data(args)
from eden.model import ActiveLearningBinaryClassificationModel
model = ActiveLearningBinaryClassificationModel()
model.load(args.model_file)
logger.info(model.get_parameters())
data_matrix = model._data_matrix(iterator)
store_matrix(matrix=data_matrix,
output_dir_path=args.output_dir_path,
out_file_name='data_matrix',
output_format=args.output_format)
def main(model_initializer, args):
if args.which == 'fit':
main_fit(model_initializer, args)
elif args.which == 'estimate':
main_estimate(model_initializer, args)
elif args.which == 'predict':
main_predict(model_initializer, args)
elif args.which == 'matrix':
main_matrix(model_initializer, args)
elif args.which == 'feature':
main_feature(model_initializer, args)
else:
raise Exception('Unknown mode: %s' % args.which)
def argparse_setup(model_initializer, description, epilog):
class DefaultsRawDescriptionHelpFormatter(argparse.ArgumentDefaultsHelpFormatter,
argparse.RawDescriptionHelpFormatter):
# To join the behaviour of RawDescriptionHelpFormatter with that of ArgumentDefaultsHelpFormatter
pass
parser = argparse.ArgumentParser(description=description,
epilog=epilog,
formatter_class=DefaultsRawDescriptionHelpFormatter)
parser = model_initializer.add_arguments(parser)
parser.add_argument("-v", "--verbosity",
action="count",
help="Increase output verbosity")
parser.add_argument("-x", "--no-logging",
dest="no_logging",
help="If set, do not log on file.",
action="store_true")
subparsers = parser.add_subparsers(help='commands')
# fit commands
fit_parser = subparsers.add_parser('fit', help='Fit commands',
formatter_class=DefaultsRawDescriptionHelpFormatter)
fit_parser.set_defaults(which='fit')
# add domain specific arguments
fit_parser = model_initializer.add_arguments_fit(fit_parser)
fit_parser.add_argument("-o", "--output-dir",
dest="output_dir_path",
help="Path to output directory.",
default="out")
fit_parser.add_argument("-m", "--model-file",
dest="model_file",
help="Model file name. Note: it will be located in the output directory.",
default="model")
fit_parser.add_argument("-e", "--n-iter",
dest="n_iter",
type=int,
help="Number of randomly generated hyper parameter configurations \
tried during the discriminative model optimization. A value of 1 implies using \
the estimator default values.",
default=20)
fit_parser.add_argument("--n-inner-iter-estimator",
dest="n_inner_iter_estimator",
type=int,
help="Number of randomly generated hyper parameter configurations tried for \
the estimator for each parameter configuration of the pre-processor and \
vectorizer during optimization.",
default=5)
fit_parser.add_argument("--n-active-learning-iterations",
dest="n_active_learning_iterations",
type=int,
help="Number of iterations in the active learning cycle. A value of 0 means to \
avoid active learning.",
default=0)
fit_parser.add_argument("--size-positive",
dest="size_positive",
type=int,
help="Number of positive instances that have to be sampled in the active learning \
cycle. A value of -1 means to use all instances, i.e. not to use active learning \
for the positive instances.",
default=-1)
fit_parser.add_argument("--size-negative",
dest="size_negative",
type=int,
help="Number of negative instances that have to be sampled in the active learning \
cycle. A value of -1 means to use all instances, i.e. not to use active learning \
for the negative instances.",
default=-1)
fit_parser.add_argument("--lower-bound-threshold-positive",
dest="lower_bound_threshold_positive",
type=int,
help="Value of the score threshold to determine when to accept positive instances: \
positive instances with a score higher than the specified value will be accepted \
as candidates.",
default=-1)
fit_parser.add_argument("--lower-bound-threshold-negative",
dest="lower_bound_threshold_negative",
type=int,
help="Value of the score threshold to determine when to accept negative instances: \
negative instances with a score higher than the specified value will be accepted \
as candidates.",
default=-1)
fit_parser.add_argument("--upper-bound-threshold-positive",
dest="upper_bound_threshold_positive",
type=int,
help="Value of the score threshold to determine when to accept positive instances: \
positive instances with a score lower than the specified value will be accepted \
as candidates.",
default=1)
fit_parser.add_argument("--upper-bound-threshold-negative",
dest="upper_bound_threshold_negative",
type=int,
help="Value of the score threshold to determine when to accept negative instances:\
negative instances with a score lower than the specified value will be accepted\
as candidates.",
default=1)
fit_parser.add_argument("--fit-vectorizer",
dest="fit_vectorizer",
help="If set, activate the fitting procedure for the vectorizer on positive \
instances only.",
action="store_true")
fit_parser.add_argument("--max-total-time",
dest="max_total_time",
type=int,
help="Maximal number of seconds for the duration of the optimization phase. After \
that the procedure is forcefully stopped. A value of -1 means no time limit.",
default=-1)
fit_parser.add_argument("--two-steps-optimization",
dest="two_steps_optimization",
help="If set, activate a refinement procedure anfter n_iter/2 steps that samples \
only among the parameters that have previously improved the results.",
action="store_true")
fit_parser.add_argument("--scoring", choices=['accuracy', 'roc_auc', 'average_precision', 'f1',
'f1_micro', 'f1_macro', 'f1_weighted', 'f1_samples',
'log_loss', 'precision', 'recall'],
help="The scoring strategy for evaluating in cross validation the quality of \
a hyper parameter combination.",
default='roc_auc')
fit_parser.add_argument("--cv",
type=int,
help="Cross validation size.",
default=10)
fit_parser.add_argument("-B", "--nbits",
type=int,
help="Number of bits used to express the graph kernel features. A value of 20 \
corresponds to 2**20=1 million possible features.",
default=20)
parallelization_group = fit_parser.add_argument_group(
'Parallelization', 'These options define the granularity of the multicore parallelization.')
parallelization_group.add_argument("-j", "--n-jobs",
dest="n_jobs",
type=int,
help="Number of cores to use in multiprocessing.",
default=2)
parallelization_group.add_argument("-b", "--n-blocks",
dest="n_blocks",
type=int,
help="Number of blocks in which to divide the input for the \
multiprocessing elaboration.",
default=8)
parallelization_group.add_argument("-k", "-block-size",
dest="block_size",
type=int,
help="Number of instances per block for the multiprocessing \
elaboration.",
default=None)
parallelization_group.add_argument("--pre-processor-n-jobs",
dest="pre_processor_n_jobs",
type=int,
help="Number of cores to use in multiprocessing.",
default=4)
parallelization_group.add_argument("--pre-processor-n-blocks",
dest="pre_processor_n_blocks",
type=int,
help="Number of blocks in which to divide the input for the \
multiprocessing elaboration.",
default=10)
parallelization_group.add_argument("--pre-processor-block-size",
dest="pre_processor_block_size",
type=int,
help="Number of instances per block for the multiprocessing \
elaboration.",
default=None)
fit_parser.add_argument("-r", "--random-state",
dest="random_state",
type=int,
help="Random seed.",
default=1)
# estimate commands
estimate_parser = subparsers.add_parser('estimate', help='Estimate commands',
formatter_class=DefaultsRawDescriptionHelpFormatter)
estimate_parser.set_defaults(which='estimate')
estimate_parser = model_initializer.add_arguments_estimate(estimate_parser)
estimate_parser.add_argument("-m", "--model-file",
dest="model_file",
help="Path to a fit model file.",
required=True)
estimate_parser.add_argument("-o", "--output-dir",
dest="output_dir_path",
help="Path to output directory.",
default="out")
estimate_parser.add_argument("--cross-validation",
dest="cross_validation",
help="If set, report cross validated performance measures.\
The model's parameters are re-trained in each fold keeping the\
hyper-parameters of the given model.",
action="store_true")
# base parser
base_parser = argparse.ArgumentParser(add_help=False)
base_parser = model_initializer.add_arguments_base(base_parser)
base_parser.add_argument("-m", "--model-file",
dest="model_file",
help="Path to a fit model file.",
default="model")
base_parser.add_argument("-o", "--output-dir",
dest="output_dir_path",
help="Path to output directory.",
default="out")
# predict commands
predict_parser = subparsers.add_parser('predict',
help='Predict commands',
parents=[base_parser],
formatter_class=DefaultsRawDescriptionHelpFormatter)
predict_parser.set_defaults(which='predict')
predict_parser = model_initializer.add_arguments_predict(predict_parser)
# matrix commands
matrix_parser = subparsers.add_parser('matrix',
help='Matrix commands',
parents=[base_parser],
formatter_class=DefaultsRawDescriptionHelpFormatter)
matrix_parser.set_defaults(which='matrix')
matrix_parser = model_initializer.add_arguments_matrix(matrix_parser)
matrix_parser.add_argument("-t", "--output-format", choices=["text", "numpy", "MatrixMarket", "joblib"],
dest="output_format",
help="Output file format.",
default="MatrixMarket")
# feature commands
feature_parser = subparsers.add_parser('feature',
help='Feature commands',
parents=[base_parser],
formatter_class=DefaultsRawDescriptionHelpFormatter)
feature_parser.set_defaults(which='feature')
feature_parser = model_initializer.add_arguments_feature(feature_parser)
feature_parser.add_argument("-t", "--output-format", choices=["text", "numpy", "MatrixMarket", "joblib"],
dest="output_format",
help="Output file format.",
default="MatrixMarket")
return parser
def main_script(model_initializer=None, description=None, epilog=None, prog_name=None, logger=None):
parser = argparse_setup(model_initializer, description, epilog)
args = parser.parse_args()
if args.no_logging:
configure_logging(logger, verbosity=args.verbosity)
else:
configure_logging(logger, verbosity=args.verbosity, filename=prog_name + '.log')
logger.debug('-' * 80)
logger.debug('Program: %s' % prog_name)
logger.debug('Called with parameters:\n %s' % serialize_dict(args.__dict__))
start_time = time()
try:
main(model_initializer, args)
except Exception:
import datetime
curr_time = datetime.datetime.now().strftime("%A, %d. %B %Y %I:%M%p")
logger.exception("Program run failed on %s" % curr_time)
finally:
end_time = time()
logger.info('Elapsed time: %.1f sec', end_time - start_time)
|
|
from __future__ import division
import tensorflow as tf
tf.reset_default_graph()
from tensorflow.python.client import timeline #profiling
import re
import ast
import os
import sys
import time
import math
from random import sample
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from heapq import nlargest
# =======plotting settings=======
sns.set()
sns.set_style("darkgrid")
sns.set_color_codes("dark")
#db reading helper
from nn_helper import *
LOGDIR = 'logs/neuralnet2/'
# Parameters
training_epochs = 100
n_classes = 1
n_input = 62
X_train = np.array([])
Y_train = np.array([])
X_test = np.array([])
Y_test = np.array([])
no_model_error = 0.
early_stop_flag = 860
keep_rate = 0.8
keep_prob = tf.placeholder(tf.float32)
x = tf.placeholder("float", [None, n_input], name="x")
y = tf.placeholder("float", [None,1], name="y")
def multilayer_perceptron(x, layer_config, name="neuralnet"):
'''
sets up neural network in a dynamic way to test effectiveness of height and depth of network
code idea from: https://pythonprogramming.net/community/262/TensorFlow%20For%20loop%20to%20set%20weights%20and%20biases/
(original code has errors)
Args:
x: query vector of batch_size length
layer_config: config for layer sizes, e.g. [n_input, 1024, n_classes] has 1 hidden layer of size 1024
Returns:
last layer: layer of size 'n_classes' (1 in our case)
'''
layers = {}
layers_compute = {}
with tf.name_scope(name):
for i in range(1, len(layer_config)):
#========He initializer
# new_layer = {'weights': tf.Variable(tf.random_normal(shape=[layer_config[i-1], layer_config[i]], mean=0, stddev=math.sqrt(2/layer_config[i-1]))),
# 'biases': tf.Variable(tf.random_normal(shape=[layer_config[i]], mean=0, stddev=0))}
#========Xavier initializer
new_layer = {'weights': tf.get_variable(name="w"+str(i), shape=[layer_config[i-1], layer_config[i]], initializer=tf.contrib.layers.xavier_initializer()),
'biases': tf.Variable(tf.random_normal(shape=[layer_config[i]], mean=0, stddev=0))}
layers[i-1] = new_layer
with tf.name_scope("weights"):
tf.summary.histogram("w_l"+str(i)+"_summary", new_layer['weights'])
with tf.name_scope("biases"):
tf.summary.histogram("b_l"+str(i)+"_summary", new_layer['biases'])
l = tf.add(tf.matmul(x if i == 1 else layers_compute[i-2], layers[i-1]['weights']), layers[i-1]['biases'])
with tf.name_scope(name):
l = tf.nn.relu(l) if i != len(layer_config)-1 else l
l = tf.nn.dropout(l, keep_rate) if i != len(layer_config)-1 else l
layers_compute[i-1] = l
lastlayer = len(layers_compute)-1
return layers_compute[lastlayer]
def run_nn_model(learning_rate, benchmark_err, log_param, optimizer, batch_size, layer_config):
'''Builds session and executes model in run_nn_model. prints results and plots results.
Args:
learning_rate: 'float', learning rate of optimizer
benchmark_err: 'tuple', error of dataset (without any model) of training and testset
log_param: 'string', settings string for tensorboard saving - keeps logs separated
optimizer: 'string', identifies correct optimizer to load
batch_size: 'int', batch size for model
layer_config: 'list', config for rnn layers - see run_nn_model()
'''
begin_time = time.time()
prediction = multilayer_perceptron(x, layer_config)
# ========= profiling ========
# run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
# run_metadata = tf.RunMetadata()
# ========= profiling ========
with tf.name_scope("relativeMeanError"):
perc_err_train = tf.reduce_mean(tf.divide(tf.abs(tf.subtract(y, prediction)), benchmark_err[0]))
perc_err_test = tf.reduce_mean(tf.divide(tf.abs(tf.subtract(y, prediction)), benchmark_err[1]))
tf.summary.scalar("relativeMeanError train", perc_err_train)
tf.summary.scalar("relativeMeanError test", perc_err_test)
with tf.name_scope("optimizer"):
if optimizer == 'AdagradOptimizer':
optimizer_train = tf.train.AdagradOptimizer(learning_rate).minimize(perc_err_train)
optimizer_test = tf.train.AdagradOptimizer(learning_rate).minimize(perc_err_test)
if optimizer == 'FtrlOptimizer':
optimizer_train = tf.train.FtrlOptimizer(learning_rate).minimize(perc_err_train)
optimizer_test = tf.train.FtrlOptimizer(learning_rate).minimize(perc_err_test)
if optimizer == 'AdadeltaOptimizer':
optimizer_train = tf.train.AdadeltaOptimizer(learning_rate).minimize(perc_err_train)
optimizer_test = tf.train.AdadeltaOptimizer(learning_rate).minimize(perc_err_test)
if optimizer == 'AdamOptimizer':
optimizer_train = tf.train.AdamOptimizer(learning_rate).minimize(perc_err_train)
optimizer_test = tf.train.AdamOptimizer(learning_rate).minimize(perc_err_test)
if optimizer == 'RMSPropOptimizer':
optimizer_train = tf.train.RMSPropOptimizer(learning_rate).minimize(perc_err_train)
optimizer_test = tf.train.RMSPropOptimizer(learning_rate).minimize(perc_err_test)
# merge all summaries into a single "operation" which we can execute in a session
summary_op = tf.summary.merge_all()
# Launch the graph
with tf.Session() as sess:
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
writer = tf.summary.FileWriter(LOGDIR + log_param , graph=tf.get_default_graph())
test_err = []
train_batch_loss_y = []
train_batch_loss_x = []
last_n_results = []
results = []
e_opt = 0.
stops = []
# Training cycle
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(num_training_samples/batch_size)
# Loop over all batches
# ========== test ==========
if epoch % 2 == 0:
c_p, p, s_ = sess.run([perc_err_test, prediction, summary_op], feed_dict={x: X_test, y: Y_test})
#calculate mean over last 5 results
if epoch > 4:
del last_n_results[-1]
last_n_results.insert(0, c_p)
results.append(c_p)
end_res = sum(last_n_results) / len(last_n_results)
#=========== early stop check =========
if epoch == 0:
e_opt = c_p
early_stop = 100 * ((c_p/e_opt)-1)
if early_stop > early_stop_flag and epoch > 10:
with open('results-final.txt', 'a+') as out_:
out_.write('structurewarm' + str(results)+ '\n')
break
if c_p < e_opt:
e_opt = c_p
stops.append(early_stop)
print ("epoch: {:.1f}".format(epoch), "last error: {:.5f}".format(c_p), "avg last 5: {:.5f}".format(end_res), "without model: {:.3f}".format(benchmark_err[1]))
print ("stop: %f " % early_stop)
test_err.append(c_p)
writer.add_summary(s_, epoch)
# ========== test ==========
# ========== training ==========
for i in range(total_batch-1):
batch_x = X_train[i*batch_size:(i+1)*batch_size]
batch_y = Y_train[i*batch_size:(i+1)*batch_size]
batch_y = np.transpose([batch_y])
# Run optimization op (backprop) and cost op (to get loss value)
_, c_p, p, s = sess.run([optimizer_train, perc_err_train, prediction, summary_op], feed_dict={x: batch_x, y: batch_y})
# ========= profiling ======== , options=run_options, run_metadata=run_metadata
# tl = timeline.Timeline(run_metadata.step_stats)
# ctf = tl.generate_chrome_trace_format()
# with open('timeline.json', 'w+') as f:
# f.write(ctf)
# ========= profiling ========
avg_cost += c_p / total_batch
if i % 150 == 0:
train_batch_loss_y.append(c_p)
train_batch_loss_x.append(epoch + i/total_batch)
label_value = batch_y
estimate = p
err = label_value-estimate
# ==========pretty training logging===========
# if epoch % 1 == 0:
# # sess.run(assignment, feed_dict={x: X_test, y: Y_test})
# writer.add_summary(s, epoch)
# print ("Epoch:", '%04d' % (epoch+1), "cost=", \
# "{:.9f}".format(avg_cost))
# print ("[*]----------------------------")
# for i in xrange(4):
# print ("label value:", label_value[i], \
# "estimated value:", estimate[i])
# print ("[*]============================")
# sys.stdout.flush()
# ========== training ==========
# if epoch % 99 == 0 and epoch != 0:
# plot_res(test_err, (train_batch_loss_x, train_batch_loss_y), benchmark_err, epoch)
# saver.save(sess, LOGDIR + os.path.join(log_param, "model.ckpt"), epoch)
# ========= profiling ========
# tl = timeline.Timeline(run_metadata.step_stats)
# ctf = tl.generate_chrome_trace_format()
# with open('timeline.json', 'w') as f:
# f.write(ctf)
# ========= profiling ========
# print ("RMSE: {:.3f}".format(cost.eval({x: X_test, y: Y_test})))
# print ("relative error with model: {:.3f}".format(perc_err.eval({x: X_test, y: Y_test})), "without model: {:.3f}".format(benchmark_err))
# plt.plot(test_err)
# plt.show()
print ("Total Time: %3.2fs" % float(time.time() - begin_time))
def plot_res(test_err, train_batch_loss, benchmark_err, epoch):
'''Plot result of model
Args:
test_err: 'float', test error of model
train_batch_loss: 'tuple', tuple of train error of model x & y
benchmark_err: 'tuple', error of using no model
epoch: 'int', current epoch
'''
flatui = ["#9b59b6", "#3498db", "#95a5a6", "#e74c3c", "#34495e", "#2ecc71"]
test_x_val = np.array(list(x * 3 for x in range(0, len(test_err))))
plt.plot(train_batch_loss[0],train_batch_loss[1], label="Training error", c=flatui[1], alpha=0.5)
plt.plot(test_x_val, np.array(test_err), label="Test error", c=flatui[0])
plt.axhline(y=benchmark_err[1], linestyle='dashed', label="No-modell error", c=flatui[2])
plt.axhline(y=0.098, linestyle='dashed', label="State of the art error", c=flatui[3])
plt.suptitle("Model error - cold queries")
plt.yscale('log', nonposy='clip')
plt.xlim([0,epoch+1])
plt.xlabel('epoch')
plt.ylabel('error')
plt.legend(loc='upper right')
plt.show()
def make_log_param_string(learning_rate, optimizer, batch_size, warm, layer_config):
return "lr_%s_opt_%s_bsize_%s_warm_%s_layers_%s" % (learning_rate, optimizer, batch_size, warm, len(layer_config))
def main():
warm = True
vector_options = {'structure': True,'ged': True, 'time': False, 'sim': False,'w2v': False}
global X_train, X_test, Y_train
global Y_test, num_training_samples, n_input
X_train, X_test, Y_train, Y_test, num_training_samples, n_input = load_data('database-iMac.log-complete', warm, vector_options)
benchmark_err = no_modell_mean_error(Y_train, Y_test)
Y_test = np.transpose([Y_test])
print benchmark_err
for optimizer in ['AdamOptimizer']:
for learning_rate in [0.026740]:
for batch_size in [8]:
layer_config = [n_input, 44,221,205, n_classes]
log_param = make_log_param_string(learning_rate, optimizer, batch_size, warm, layer_config)
print ('Starting run for %s, optimizer: %s, batch_size: %s, warm: %s, num_layers: %s' % (log_param, optimizer, batch_size, warm, len(layer_config)))
run_nn_model(learning_rate, benchmark_err, log_param, optimizer, batch_size, layer_config)
if __name__ == '__main__':
main()
|
|
import env
import pytest
from olopy.deep import *
class TestPath:
def test_contructor (self):
# it should return an array of keys
p = Path('a','b','c')
assert isinstance(p, Path)
assert p == ['a','b','c']
# it should split dot-separated keys
p = Path('a.b', 'c')
assert p == ['a','b','c']
# it should recursively process array or Path arguments
p = Path(['a'], ['b',['c','d']], Path('e','f'))
assert p == ['a','b','c','d','e','f']
# it should ignore empty keys
p = Path('a', '', 'b', '', 'c', [], '.d.e..f', Path(), None, 'g')
assert p == ['a','b','c','d','e','f','g']
def test_slice (self):
p = Path('a.b.c.d.e')
# it should return an element if an integer is passed
assert p[0] == 'a'
# it should return a Path object when slicing
s = p[1:4]
assert s == ['b','c','d']
assert type(s) == Path
def test_stringify (self):
p = Path("a.b.c")
assert str(p) == "a.b.c"
def test_equal (self):
assert Path('a','b','c') == "a.b.c"
def test_is_subpath_of (self):
assert Path("a.b.c").is_subpath_of("")
assert Path("a.b.c").is_subpath_of("a")
assert Path("a.b.c").is_subpath_of("a.b")
assert Path("a.b.c").is_subpath_of("a.b.c")
assert not Path("a.b.c").is_subpath_of("a.b.c.d")
assert not Path("a.b.c").is_subpath_of('a.x')
def test_lookup (self):
mapping = {'a': {'b': {'c':1} } }
assert Path('a.b').lookup(mapping) == mapping['a']['b']
assert Path('a.b.c').lookup(mapping) == mapping['a']['b']['c']
assert Path('a.b.x').lookup(mapping) == None
assert Path('a.b.c.x').lookup(mapping) == None
class TestChange:
def test_constructor (self):
import time as timemod
change = Change('a.b.c', 1, 2)
assert change.path == ['a','b','c']
assert change.old == 1
assert change.new == 2
assert type(change.id) == str
assert type(change.time) == int
assert (change.time - int(timemod.time()*1000)) < 10
def test_equal (self):
# same path same values
assert Change('a.b.c', 1, 2) == Change('a.b.c', 1, 2)
# different path same values
assert Change('a.b.c', 1, 2) != Change('a.b', 1, 2)
# same path same old value different new value
assert Change('a.b.c', 1, 2) != Change('a.b.c', 1, 20)
assert Change('a.b.c', 1, None) != Change('a.b.c', 1, 2)
assert Change('a.b.c', 1, 2) != Change('a.b.c', 1, None)
# same path different old value same new value
assert Change('a.b.c', 1, 2) != Change('a.b.c', 10, 2)
assert Change('a.b.c', None, 2) != Change('a.b.c', 1, 2)
assert Change('a.b.c', 1, 2) != Change('a.b.c', None, 2)
def test_SubChange (self):
# it hould return the change with a relative path
# when the changed path is under the given sub-path
change = Change(['a','b','c'], 1, 2)
sub_change = change.SubChange("a.b")
assert sub_change == Change('c', 1, 2)
assert sub_change.id == change.id
assert sub_change.time == change.time
assert change.SubChange("a") == Change('b.c', 1, 2)
assert change.SubChange("") == Change('a.b.c', 1, 2)
assert change.SubChange("a.b.c") == Change([], 1, 2)
# it should return an empty-path change with old and new
# value of the model when a parent path changes
change = Change(['a','b'], {'c':{'x':1}}, {'c':{'x':2}})
assert change.SubChange("a.b.c.x") == Change([], 1, 2)
change = Change(['a','b'], {'c':{'x':1}}, {'c':{'y':2}})
assert change.SubChange("a.b.c.x") == Change([], 1, None)
change = Change(['a','b'], {'c':{'x':1}}, 1)
assert change.SubChange("a.b.c.x") == Change([], 1, None)
change = Change(['a','b'], 1, {'c':{'x':2}})
assert change.SubChange("a.b.c.x") == Change([], None, 2)
# it should return null if the change doesn't affect the sub-path
change = Change(['a','b','c'], 1, 2)
assert change.SubChange("a.b.d") == None
change = Change(['a','b','c'], 1, 2)
assert change.SubChange("a.b.c.d") == None
def test_SuperChange (self):
change = Change('d.e.f', 1, 2)
super_change = change.SuperChange('a.b.c')
assert super_change == Change('a.b.c.d.e.f', 1, 2)
assert super_change.id == change.id
assert super_change.time == change.time
def test_apply (self):
# set-change
d = {'a':1}
change = Change('a', 1, 2)
applied_change = change.apply(d)
assert applied_change == change
assert d == {'a':2}
d = {'a':{'b':1}}
change = Change('a.b', 1, 2)
applied_change = change.apply(d)
assert applied_change == change
assert d == {'a':{'b':2}}
# insert-change
d = {'a':{'b':1}}
change = Change('a.c', None, 2)
applied_change = change.apply(d)
assert applied_change == change
assert d == {'a':{'b':1, 'c':2}}
# delete-change
d = {'a':{'b':1}}
change = Change('a.b', 1, None)
applied_change = change.apply(d)
assert applied_change == change
assert d == {'a':{}}
# no-change
d = {'a':{'b':5}}
change = Change('a.b', 5, 5)
applied_change = change.apply(d)
assert applied_change == None
assert d == {'a':{'b':5}}
# error: non-existing path
d = {'a':{'b':5}}
with pytest.raises(AssertionError):
Change('a.b.c', 1, 2).apply(d)
assert d == {'a':{'b':5}}
# error: wrong old value
d = {'a':{'b':5}}
with pytest.raises(AssertionError):
Change('a.b', 1, 2).apply(d)
assert d == {'a':{'b':5}}
d = {'a':{'b':1}}
with pytest.raises(AssertionError):
Change('a.b', None, 5).apply(d)
assert d == {'a':{'b':1}}
d = {'a':{'b':1}}
with pytest.raises(AssertionError):
Change('a.b', 2, None).apply(d)
assert d == {'a':{'b':1}}
def test_diff ():
changes = diff({'a':1, 'b':2, 'c':{'d':3}},
{'a':1, 'b':2, 'c':{'d':3}})
assert changes == []
changes = diff({'a':1, 'b':2, 'c':{'d':3}},
{'a':10, 'b':2, 'c':{'d':3}})
assert changes == [Change('a', 1, 10)]
changes = diff({'a':1, 'b':2, 'c':{'d':3}},
{'a':1, 'c':{'d':3}})
assert changes == [Change('b', 2, None)]
changes = diff({'a':1, 'b':2, 'c':{'d':3} },
{'a':1, 'b':2, 'c':{'d':3}, 'e':4})
assert changes == [Change('e', None, 4)]
changes = diff({'a':1, 'b':2, 'c':{'d':3 }},
{'a':1, 'b':2, 'c':{'d':30}})
assert changes == [Change('c.d', 3, 30)]
changes = diff({'a':1, 'b':2, 'c':{'d':3}},
{'a':1, 'b':2, 'c':{ }})
assert changes == [Change('c.d', 3, None)]
changes = diff({'a':1, 'b':2, 'c':{'d':3 }},
{'a':1, 'b':2, 'c':{'d':3, 'e':4}})
assert changes == [Change('c.e', None, 4)]
old = {'a':2, 'b':2, 'c':{'d':3}, 'e':[1,2,4]}
changes = diff({'a':1, 'b':2, 'c':{'d':3 }},
{'a':10, 'b':2, 'c':{'d':3, 'e':4}})
expected_change1 = Change('a', 1, 10)
expected_change2 = Change('c.e', None, 4)
assert changes == [expected_change1,expected_change2] or changes == [expected_change2,expected_change1]
changes = diff(10, 20)
assert changes == [Change('', 10, 20)]
with pytest.raises(TypeError):
changes = diff([1,2,3], [1,2,4])
def test_serialization ():
o = {'a':1, 'b':1}
s = serialize(o)
assert type(s) == str
o2 = deserialize(s)
assert o2 == o
o2 = clone(o)
assert o2 == o
assert o2 is not o
p = Path('a.b.c')
p2 = clone(p)
assert p2 == p
assert p2 is not p
c = Change('a.b.c', 1, {'x':10})
c2 = clone(c)
assert c2 == dict(c)
assert c2 is not c
|
|
"""Definitions for DSMR Reader sensors added to MQTT."""
from __future__ import annotations
from collections.abc import Callable
from dataclasses import dataclass
from typing import Final
from homeassistant.components.sensor import (
SensorDeviceClass,
SensorEntityDescription,
SensorStateClass,
)
from homeassistant.const import (
CURRENCY_EURO,
ELECTRIC_CURRENT_AMPERE,
ELECTRIC_POTENTIAL_VOLT,
ENERGY_KILO_WATT_HOUR,
POWER_KILO_WATT,
VOLUME_CUBIC_METERS,
)
from homeassistant.util import dt as dt_util
PRICE_EUR_KWH: Final = f"EUR/{ENERGY_KILO_WATT_HOUR}"
PRICE_EUR_M3: Final = f"EUR/{VOLUME_CUBIC_METERS}"
def dsmr_transform(value):
"""Transform DSMR version value to right format."""
if value.isdigit():
return float(value) / 10
return value
def tariff_transform(value):
"""Transform tariff from number to description."""
if value == "1":
return "low"
return "high"
@dataclass
class DSMRReaderSensorEntityDescription(SensorEntityDescription):
"""Sensor entity description for DSMR Reader."""
state: Callable | None = None
SENSORS: tuple[DSMRReaderSensorEntityDescription, ...] = (
DSMRReaderSensorEntityDescription(
key="dsmr/reading/electricity_delivered_1",
name="Low tariff usage",
device_class=SensorDeviceClass.ENERGY,
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
state_class=SensorStateClass.TOTAL_INCREASING,
),
DSMRReaderSensorEntityDescription(
key="dsmr/reading/electricity_returned_1",
name="Low tariff returned",
device_class=SensorDeviceClass.ENERGY,
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
state_class=SensorStateClass.TOTAL_INCREASING,
),
DSMRReaderSensorEntityDescription(
key="dsmr/reading/electricity_delivered_2",
name="High tariff usage",
device_class=SensorDeviceClass.ENERGY,
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
state_class=SensorStateClass.TOTAL_INCREASING,
),
DSMRReaderSensorEntityDescription(
key="dsmr/reading/electricity_returned_2",
name="High tariff returned",
device_class=SensorDeviceClass.ENERGY,
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
state_class=SensorStateClass.TOTAL_INCREASING,
),
DSMRReaderSensorEntityDescription(
key="dsmr/reading/electricity_currently_delivered",
name="Current power usage",
device_class=SensorDeviceClass.POWER,
native_unit_of_measurement=POWER_KILO_WATT,
state_class=SensorStateClass.MEASUREMENT,
),
DSMRReaderSensorEntityDescription(
key="dsmr/reading/electricity_currently_returned",
name="Current power return",
device_class=SensorDeviceClass.POWER,
native_unit_of_measurement=POWER_KILO_WATT,
state_class=SensorStateClass.MEASUREMENT,
),
DSMRReaderSensorEntityDescription(
key="dsmr/reading/phase_currently_delivered_l1",
name="Current power usage L1",
entity_registry_enabled_default=False,
device_class=SensorDeviceClass.POWER,
native_unit_of_measurement=POWER_KILO_WATT,
state_class=SensorStateClass.MEASUREMENT,
),
DSMRReaderSensorEntityDescription(
key="dsmr/reading/phase_currently_delivered_l2",
name="Current power usage L2",
entity_registry_enabled_default=False,
device_class=SensorDeviceClass.POWER,
native_unit_of_measurement=POWER_KILO_WATT,
state_class=SensorStateClass.MEASUREMENT,
),
DSMRReaderSensorEntityDescription(
key="dsmr/reading/phase_currently_delivered_l3",
name="Current power usage L3",
entity_registry_enabled_default=False,
device_class=SensorDeviceClass.POWER,
native_unit_of_measurement=POWER_KILO_WATT,
state_class=SensorStateClass.MEASUREMENT,
),
DSMRReaderSensorEntityDescription(
key="dsmr/reading/phase_currently_returned_l1",
name="Current power return L1",
entity_registry_enabled_default=False,
device_class=SensorDeviceClass.POWER,
native_unit_of_measurement=POWER_KILO_WATT,
state_class=SensorStateClass.MEASUREMENT,
),
DSMRReaderSensorEntityDescription(
key="dsmr/reading/phase_currently_returned_l2",
name="Current power return L2",
entity_registry_enabled_default=False,
device_class=SensorDeviceClass.POWER,
native_unit_of_measurement=POWER_KILO_WATT,
state_class=SensorStateClass.MEASUREMENT,
),
DSMRReaderSensorEntityDescription(
key="dsmr/reading/phase_currently_returned_l3",
name="Current power return L3",
entity_registry_enabled_default=False,
device_class=SensorDeviceClass.POWER,
native_unit_of_measurement=POWER_KILO_WATT,
state_class=SensorStateClass.MEASUREMENT,
),
DSMRReaderSensorEntityDescription(
key="dsmr/reading/extra_device_delivered",
name="Gas meter usage",
entity_registry_enabled_default=False,
icon="mdi:fire",
native_unit_of_measurement=VOLUME_CUBIC_METERS,
state_class=SensorStateClass.TOTAL_INCREASING,
),
DSMRReaderSensorEntityDescription(
key="dsmr/reading/phase_voltage_l1",
name="Current voltage L1",
entity_registry_enabled_default=False,
device_class=SensorDeviceClass.VOLTAGE,
native_unit_of_measurement=ELECTRIC_POTENTIAL_VOLT,
state_class=SensorStateClass.MEASUREMENT,
),
DSMRReaderSensorEntityDescription(
key="dsmr/reading/phase_voltage_l2",
name="Current voltage L2",
entity_registry_enabled_default=False,
device_class=SensorDeviceClass.VOLTAGE,
native_unit_of_measurement=ELECTRIC_POTENTIAL_VOLT,
state_class=SensorStateClass.MEASUREMENT,
),
DSMRReaderSensorEntityDescription(
key="dsmr/reading/phase_voltage_l3",
name="Current voltage L3",
entity_registry_enabled_default=False,
device_class=SensorDeviceClass.VOLTAGE,
native_unit_of_measurement=ELECTRIC_POTENTIAL_VOLT,
state_class=SensorStateClass.MEASUREMENT,
),
DSMRReaderSensorEntityDescription(
key="dsmr/reading/phase_power_current_l1",
name="Phase power current L1",
entity_registry_enabled_default=False,
device_class=SensorDeviceClass.CURRENT,
native_unit_of_measurement=ELECTRIC_CURRENT_AMPERE,
state_class=SensorStateClass.MEASUREMENT,
),
DSMRReaderSensorEntityDescription(
key="dsmr/reading/phase_power_current_l2",
name="Phase power current L2",
entity_registry_enabled_default=False,
device_class=SensorDeviceClass.CURRENT,
native_unit_of_measurement=ELECTRIC_CURRENT_AMPERE,
state_class=SensorStateClass.MEASUREMENT,
),
DSMRReaderSensorEntityDescription(
key="dsmr/reading/phase_power_current_l3",
name="Phase power current L3",
entity_registry_enabled_default=False,
device_class=SensorDeviceClass.CURRENT,
native_unit_of_measurement=ELECTRIC_CURRENT_AMPERE,
state_class=SensorStateClass.MEASUREMENT,
),
DSMRReaderSensorEntityDescription(
key="dsmr/reading/timestamp",
name="Telegram timestamp",
entity_registry_enabled_default=False,
device_class=SensorDeviceClass.TIMESTAMP,
state=dt_util.parse_datetime,
),
DSMRReaderSensorEntityDescription(
key="dsmr/consumption/gas/delivered",
name="Gas usage",
device_class=SensorDeviceClass.GAS,
native_unit_of_measurement=VOLUME_CUBIC_METERS,
state_class=SensorStateClass.TOTAL_INCREASING,
),
DSMRReaderSensorEntityDescription(
key="dsmr/consumption/gas/currently_delivered",
name="Current gas usage",
device_class=SensorDeviceClass.GAS,
native_unit_of_measurement=VOLUME_CUBIC_METERS,
state_class=SensorStateClass.MEASUREMENT,
),
DSMRReaderSensorEntityDescription(
key="dsmr/consumption/gas/read_at",
name="Gas meter read",
entity_registry_enabled_default=False,
device_class=SensorDeviceClass.TIMESTAMP,
state=dt_util.parse_datetime,
),
DSMRReaderSensorEntityDescription(
key="dsmr/day-consumption/electricity1",
name="Low tariff usage",
device_class=SensorDeviceClass.ENERGY,
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
state_class=SensorStateClass.TOTAL_INCREASING,
),
DSMRReaderSensorEntityDescription(
key="dsmr/day-consumption/electricity2",
name="High tariff usage",
device_class=SensorDeviceClass.ENERGY,
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
state_class=SensorStateClass.TOTAL_INCREASING,
),
DSMRReaderSensorEntityDescription(
key="dsmr/day-consumption/electricity1_returned",
name="Low tariff return",
device_class=SensorDeviceClass.ENERGY,
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
state_class=SensorStateClass.TOTAL_INCREASING,
),
DSMRReaderSensorEntityDescription(
key="dsmr/day-consumption/electricity2_returned",
name="High tariff return",
device_class=SensorDeviceClass.ENERGY,
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
state_class=SensorStateClass.TOTAL_INCREASING,
),
DSMRReaderSensorEntityDescription(
key="dsmr/day-consumption/electricity_merged",
name="Power usage total",
device_class=SensorDeviceClass.ENERGY,
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
state_class=SensorStateClass.TOTAL_INCREASING,
),
DSMRReaderSensorEntityDescription(
key="dsmr/day-consumption/electricity_returned_merged",
name="Power return total",
device_class=SensorDeviceClass.ENERGY,
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
state_class=SensorStateClass.TOTAL_INCREASING,
),
DSMRReaderSensorEntityDescription(
key="dsmr/day-consumption/electricity1_cost",
name="Low tariff cost",
icon="mdi:currency-eur",
native_unit_of_measurement=CURRENCY_EURO,
),
DSMRReaderSensorEntityDescription(
key="dsmr/day-consumption/electricity2_cost",
name="High tariff cost",
icon="mdi:currency-eur",
native_unit_of_measurement=CURRENCY_EURO,
),
DSMRReaderSensorEntityDescription(
key="dsmr/day-consumption/electricity_cost_merged",
name="Power total cost",
icon="mdi:currency-eur",
native_unit_of_measurement=CURRENCY_EURO,
),
DSMRReaderSensorEntityDescription(
key="dsmr/day-consumption/gas",
name="Gas usage",
icon="mdi:counter",
native_unit_of_measurement=VOLUME_CUBIC_METERS,
),
DSMRReaderSensorEntityDescription(
key="dsmr/day-consumption/gas_cost",
name="Gas cost",
icon="mdi:currency-eur",
native_unit_of_measurement=CURRENCY_EURO,
),
DSMRReaderSensorEntityDescription(
key="dsmr/day-consumption/total_cost",
name="Total cost",
icon="mdi:currency-eur",
native_unit_of_measurement=CURRENCY_EURO,
),
DSMRReaderSensorEntityDescription(
key="dsmr/day-consumption/energy_supplier_price_electricity_delivered_1",
name="Low tariff delivered price",
icon="mdi:currency-eur",
native_unit_of_measurement=PRICE_EUR_KWH,
),
DSMRReaderSensorEntityDescription(
key="dsmr/day-consumption/energy_supplier_price_electricity_delivered_2",
name="High tariff delivered price",
icon="mdi:currency-eur",
native_unit_of_measurement=PRICE_EUR_KWH,
),
DSMRReaderSensorEntityDescription(
key="dsmr/day-consumption/energy_supplier_price_electricity_returned_1",
name="Low tariff returned price",
icon="mdi:currency-eur",
native_unit_of_measurement=PRICE_EUR_KWH,
),
DSMRReaderSensorEntityDescription(
key="dsmr/day-consumption/energy_supplier_price_electricity_returned_2",
name="High tariff returned price",
icon="mdi:currency-eur",
native_unit_of_measurement=PRICE_EUR_KWH,
),
DSMRReaderSensorEntityDescription(
key="dsmr/day-consumption/energy_supplier_price_gas",
name="Gas price",
icon="mdi:currency-eur",
native_unit_of_measurement=PRICE_EUR_M3,
),
DSMRReaderSensorEntityDescription(
key="dsmr/day-consumption/fixed_cost",
name="Current day fixed cost",
icon="mdi:currency-eur",
native_unit_of_measurement=CURRENCY_EURO,
),
DSMRReaderSensorEntityDescription(
key="dsmr/meter-stats/dsmr_version",
name="DSMR version",
entity_registry_enabled_default=False,
icon="mdi:alert-circle",
state=dsmr_transform,
),
DSMRReaderSensorEntityDescription(
key="dsmr/meter-stats/electricity_tariff",
name="Electricity tariff",
icon="mdi:flash",
state=tariff_transform,
),
DSMRReaderSensorEntityDescription(
key="dsmr/meter-stats/power_failure_count",
name="Power failure count",
entity_registry_enabled_default=False,
icon="mdi:flash",
),
DSMRReaderSensorEntityDescription(
key="dsmr/meter-stats/long_power_failure_count",
name="Long power failure count",
entity_registry_enabled_default=False,
icon="mdi:flash",
),
DSMRReaderSensorEntityDescription(
key="dsmr/meter-stats/voltage_sag_count_l1",
name="Voltage sag L1",
entity_registry_enabled_default=False,
icon="mdi:flash",
),
DSMRReaderSensorEntityDescription(
key="dsmr/meter-stats/voltage_sag_count_l2",
name="Voltage sag L2",
entity_registry_enabled_default=False,
icon="mdi:flash",
),
DSMRReaderSensorEntityDescription(
key="dsmr/meter-stats/voltage_sag_count_l3",
name="Voltage sag L3",
entity_registry_enabled_default=False,
icon="mdi:flash",
),
DSMRReaderSensorEntityDescription(
key="dsmr/meter-stats/voltage_swell_count_l1",
name="Voltage swell L1",
entity_registry_enabled_default=False,
icon="mdi:flash",
),
DSMRReaderSensorEntityDescription(
key="dsmr/meter-stats/voltage_swell_count_l2",
name="Voltage swell L2",
entity_registry_enabled_default=False,
icon="mdi:flash",
),
DSMRReaderSensorEntityDescription(
key="dsmr/meter-stats/voltage_swell_count_l3",
name="Voltage swell L3",
entity_registry_enabled_default=False,
icon="mdi:flash",
),
DSMRReaderSensorEntityDescription(
key="dsmr/meter-stats/rejected_telegrams",
name="Rejected telegrams",
entity_registry_enabled_default=False,
icon="mdi:flash",
),
DSMRReaderSensorEntityDescription(
key="dsmr/current-month/electricity1",
name="Current month low tariff usage",
device_class=SensorDeviceClass.ENERGY,
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
),
DSMRReaderSensorEntityDescription(
key="dsmr/current-month/electricity2",
name="Current month high tariff usage",
device_class=SensorDeviceClass.ENERGY,
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
),
DSMRReaderSensorEntityDescription(
key="dsmr/current-month/electricity1_returned",
name="Current month low tariff returned",
device_class=SensorDeviceClass.ENERGY,
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
),
DSMRReaderSensorEntityDescription(
key="dsmr/current-month/electricity2_returned",
name="Current month high tariff returned",
device_class=SensorDeviceClass.ENERGY,
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
),
DSMRReaderSensorEntityDescription(
key="dsmr/current-month/electricity_merged",
name="Current month power usage total",
device_class=SensorDeviceClass.ENERGY,
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
),
DSMRReaderSensorEntityDescription(
key="dsmr/current-month/electricity_returned_merged",
name="Current month power return total",
device_class=SensorDeviceClass.ENERGY,
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
),
DSMRReaderSensorEntityDescription(
key="dsmr/current-month/electricity1_cost",
name="Current month low tariff cost",
icon="mdi:currency-eur",
native_unit_of_measurement=CURRENCY_EURO,
),
DSMRReaderSensorEntityDescription(
key="dsmr/current-month/electricity2_cost",
name="Current month high tariff cost",
icon="mdi:currency-eur",
native_unit_of_measurement=CURRENCY_EURO,
),
DSMRReaderSensorEntityDescription(
key="dsmr/current-month/electricity_cost_merged",
name="Current month power total cost",
icon="mdi:currency-eur",
native_unit_of_measurement=CURRENCY_EURO,
),
DSMRReaderSensorEntityDescription(
key="dsmr/current-month/gas",
name="Current month gas usage",
icon="mdi:counter",
native_unit_of_measurement=VOLUME_CUBIC_METERS,
),
DSMRReaderSensorEntityDescription(
key="dsmr/current-month/gas_cost",
name="Current month gas cost",
icon="mdi:currency-eur",
native_unit_of_measurement=CURRENCY_EURO,
),
DSMRReaderSensorEntityDescription(
key="dsmr/current-month/fixed_cost",
name="Current month fixed cost",
icon="mdi:currency-eur",
native_unit_of_measurement=CURRENCY_EURO,
),
DSMRReaderSensorEntityDescription(
key="dsmr/current-month/total_cost",
name="Current month total cost",
icon="mdi:currency-eur",
native_unit_of_measurement=CURRENCY_EURO,
),
DSMRReaderSensorEntityDescription(
key="dsmr/current-year/electricity1",
name="Current year low tariff usage",
device_class=SensorDeviceClass.ENERGY,
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
),
DSMRReaderSensorEntityDescription(
key="dsmr/current-year/electricity2",
name="Current year high tariff usage",
device_class=SensorDeviceClass.ENERGY,
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
),
DSMRReaderSensorEntityDescription(
key="dsmr/current-year/electricity1_returned",
name="Current year low tariff returned",
device_class=SensorDeviceClass.ENERGY,
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
),
DSMRReaderSensorEntityDescription(
key="dsmr/current-year/electricity2_returned",
name="Current year high tariff usage",
device_class=SensorDeviceClass.ENERGY,
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
),
DSMRReaderSensorEntityDescription(
key="dsmr/current-year/electricity_merged",
name="Current year power usage total",
device_class=SensorDeviceClass.ENERGY,
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
),
DSMRReaderSensorEntityDescription(
key="dsmr/current-year/electricity_returned_merged",
name="Current year power returned total",
device_class=SensorDeviceClass.ENERGY,
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
),
DSMRReaderSensorEntityDescription(
key="dsmr/current-year/electricity1_cost",
name="Current year low tariff cost",
icon="mdi:currency-eur",
native_unit_of_measurement=CURRENCY_EURO,
),
DSMRReaderSensorEntityDescription(
key="dsmr/current-year/electricity2_cost",
name="Current year high tariff cost",
icon="mdi:currency-eur",
native_unit_of_measurement=CURRENCY_EURO,
),
DSMRReaderSensorEntityDescription(
key="dsmr/current-year/electricity_cost_merged",
name="Current year power total cost",
icon="mdi:currency-eur",
native_unit_of_measurement=CURRENCY_EURO,
),
DSMRReaderSensorEntityDescription(
key="dsmr/current-year/gas",
name="Current year gas usage",
icon="mdi:counter",
native_unit_of_measurement=VOLUME_CUBIC_METERS,
),
DSMRReaderSensorEntityDescription(
key="dsmr/current-year/gas_cost",
name="Current year gas cost",
icon="mdi:currency-eur",
native_unit_of_measurement=CURRENCY_EURO,
),
DSMRReaderSensorEntityDescription(
key="dsmr/current-year/fixed_cost",
name="Current year fixed cost",
icon="mdi:currency-eur",
native_unit_of_measurement=CURRENCY_EURO,
),
DSMRReaderSensorEntityDescription(
key="dsmr/current-year/total_cost",
name="Current year total cost",
icon="mdi:currency-eur",
native_unit_of_measurement=CURRENCY_EURO,
),
)
|
|
# Copyright 2019 Objectif Libre
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import datetime
from oslo_config import cfg
from oslo_log import log
from cloudkitty import dataframe
from cloudkitty.storage import v2 as v2_storage
from cloudkitty.storage.v2.elasticsearch import client as es_client
from cloudkitty.storage.v2.elasticsearch import exceptions
from cloudkitty.utils import tz as tzutils
LOG = log.getLogger(__name__)
CONF = cfg.CONF
ELASTICSEARCH_STORAGE_GROUP = 'storage_elasticsearch'
elasticsearch_storage_opts = [
cfg.StrOpt(
'host',
help='Elasticsearch host, along with port and protocol. '
'Defaults to http://localhost:9200',
default='http://localhost:9200'),
cfg.StrOpt(
'index_name',
help='Elasticsearch index to use. Defaults to "cloudkitty".',
default='cloudkitty'),
cfg.BoolOpt('insecure',
help='Set to true to allow insecure HTTPS '
'connections to Elasticsearch',
default=False),
cfg.StrOpt('cafile',
help='Path of the CA certificate to trust for '
'HTTPS connections.',
default=None),
cfg.IntOpt('scroll_duration',
help="Duration (in seconds) for which the ES scroll contexts "
"should be kept alive.",
advanced=True,
default=30, min=0, max=300),
]
CONF.register_opts(elasticsearch_storage_opts, ELASTICSEARCH_STORAGE_GROUP)
CLOUDKITTY_INDEX_MAPPING = {
"dynamic_templates": [
{
"strings_as_keywords": {
"match_mapping_type": "string",
"mapping": {
"type": "keyword"
}
}
}
],
"dynamic": False,
"properties": {
"start": {"type": "date"},
"end": {"type": "date"},
"type": {"type": "keyword"},
"unit": {"type": "keyword"},
"qty": {"type": "double"},
"price": {"type": "double"},
"groupby": {"dynamic": True, "type": "object"},
"metadata": {"dynamic": True, "type": "object"}
},
}
class ElasticsearchStorage(v2_storage.BaseStorage):
def __init__(self, *args, **kwargs):
super(ElasticsearchStorage, self).__init__(*args, **kwargs)
LOG.warning('The Elasticsearch storage driver is experimental. '
'DO NOT USE IT IN PRODUCTION.')
verify = not CONF.storage_elasticsearch.insecure
if verify and CONF.storage_elasticsearch.cafile:
verify = CONF.storage_elasticsearch.cafile
self._conn = es_client.ElasticsearchClient(
CONF.storage_elasticsearch.host,
CONF.storage_elasticsearch.index_name,
"_doc",
verify=verify)
def init(self):
r = self._conn.get_index()
if r.status_code != 200:
raise exceptions.IndexDoesNotExist(
CONF.storage_elasticsearch.index_name)
LOG.info('Creating mapping "_doc" on index {}...'.format(
CONF.storage_elasticsearch.index_name))
self._conn.put_mapping(CLOUDKITTY_INDEX_MAPPING)
LOG.info('Mapping created.')
def push(self, dataframes, scope_id=None):
for frame in dataframes:
for type_, point in frame.iterpoints():
start, end = self._local_to_utc(frame.start, frame.end)
self._conn.add_point(point, type_, start, end)
self._conn.commit()
@staticmethod
def _local_to_utc(*args):
return [tzutils.local_to_utc(arg) for arg in args]
@staticmethod
def _doc_to_datapoint(doc):
return dataframe.DataPoint(
doc['unit'],
doc['qty'],
doc['price'],
doc['groupby'],
doc['metadata'],
)
def _build_dataframes(self, docs):
dataframes = {}
nb_points = 0
for doc in docs:
source = doc['_source']
start = tzutils.dt_from_iso(source['start'])
end = tzutils.dt_from_iso(source['end'])
key = (start, end)
if key not in dataframes.keys():
dataframes[key] = dataframe.DataFrame(start=start, end=end)
dataframes[key].add_point(
self._doc_to_datapoint(source), source['type'])
nb_points += 1
output = list(dataframes.values())
output.sort(key=lambda frame: (frame.start, frame.end))
return output
def retrieve(self, begin=None, end=None,
filters=None,
metric_types=None,
offset=0, limit=1000, paginate=True):
begin, end = self._local_to_utc(begin or tzutils.get_month_start(),
end or tzutils.get_next_month())
total, docs = self._conn.retrieve(
begin, end, filters, metric_types,
offset=offset, limit=limit, paginate=paginate)
return {
'total': total,
'dataframes': self._build_dataframes(docs),
}
def delete(self, begin=None, end=None, filters=None):
self._conn.delete_by_query(begin, end, filters)
@staticmethod
def _normalize_time(t):
if isinstance(t, datetime.datetime):
return tzutils.utc_to_local(t)
return tzutils.dt_from_iso(t)
def _doc_to_total_result(self, doc, start, end):
output = {
'begin': self._normalize_time(doc.get('start', start)),
'end': self._normalize_time(doc.get('end', end)),
'qty': doc['sum_qty']['value'],
'rate': doc['sum_price']['value'],
}
# Means we had a composite aggregation
if 'key' in doc.keys():
for key, value in doc['key'].items():
if key == 'begin' or key == 'end':
# Elasticsearch returns ts in milliseconds
value = tzutils.dt_from_ts(value // 1000)
output[key] = value
return output
def total(self, groupby=None, begin=None, end=None, metric_types=None,
filters=None, custom_fields=None, offset=0, limit=1000,
paginate=True):
begin, end = self._local_to_utc(begin or tzutils.get_month_start(),
end or tzutils.get_next_month())
total, docs = self._conn.total(begin, end, metric_types, filters,
groupby, custom_fields=custom_fields,
offset=offset, limit=limit,
paginate=paginate)
return {
'total': total,
'results': [self._doc_to_total_result(doc, begin, end)
for doc in docs],
}
|
|
"""Data prep, train and evaluate DNN model."""
import datetime
import logging
import os
import numpy as np
import tensorflow as tf
from tensorflow import feature_column as fc
from tensorflow.keras import activations, callbacks, layers, models
logging.info(tf.version.VERSION)
CSV_COLUMNS = [
"fare_amount",
"pickup_datetime",
"pickup_longitude",
"pickup_latitude",
"dropoff_longitude",
"dropoff_latitude",
"passenger_count",
"key",
]
# inputs are all float except for pickup_datetime which is a string
STRING_COLS = ["pickup_datetime"]
LABEL_COLUMN = "fare_amount"
DEFAULTS = [[0.0], ["na"], [0.0], [0.0], [0.0], [0.0], [0.0], ["na"]]
DAYS = ["Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"]
def features_and_labels(row_data):
for unwanted_col in ["key"]:
row_data.pop(unwanted_col)
label = row_data.pop(LABEL_COLUMN)
return row_data, label
def load_dataset(pattern, batch_size, num_repeat):
dataset = tf.data.experimental.make_csv_dataset(
file_pattern=pattern,
batch_size=batch_size,
column_names=CSV_COLUMNS,
column_defaults=DEFAULTS,
num_epochs=num_repeat,
shuffle_buffer_size=1000000,
)
return dataset.map(features_and_labels)
def create_train_dataset(pattern, batch_size):
dataset = load_dataset(pattern, batch_size, num_repeat=None)
return dataset.prefetch(1)
def create_eval_dataset(pattern, batch_size):
dataset = load_dataset(pattern, batch_size, num_repeat=1)
return dataset.prefetch(1)
def parse_datetime(s):
if not isinstance(s, str):
s = s.numpy().decode("utf-8")
return datetime.datetime.strptime(s, "%Y-%m-%d %H:%M:%S %Z")
def euclidean(params):
lon1, lat1, lon2, lat2 = params
londiff = lon2 - lon1
latdiff = lat2 - lat1
return tf.sqrt(londiff * londiff + latdiff * latdiff)
def get_dayofweek(s):
ts = parse_datetime(s)
return DAYS[ts.weekday()]
@tf.function
def dayofweek(ts_in):
return tf.map_fn(
lambda s: tf.py_function(get_dayofweek, inp=[s], Tout=tf.string), ts_in
)
@tf.function
def fare_thresh(x):
return 60 * activations.relu(x)
def transform(inputs, numeric_cols, nbuckets):
# Pass-through columns
transformed = inputs.copy()
del transformed["pickup_datetime"]
feature_columns = {
colname: fc.numeric_column(colname) for colname in numeric_cols
}
# Scaling longitude from range [-70, -78] to [0, 1]
for lon_col in ["pickup_longitude", "dropoff_longitude"]:
transformed[lon_col] = layers.Lambda(
lambda x: (x + 78) / 8.0, name=f"scale_{lon_col}"
)(inputs[lon_col])
# Scaling latitude from range [37, 45] to [0, 1]
for lat_col in ["pickup_latitude", "dropoff_latitude"]:
transformed[lat_col] = layers.Lambda(
lambda x: (x - 37) / 8.0, name=f"scale_{lat_col}"
)(inputs[lat_col])
# Adding Euclidean dist (no need to be accurate: NN will calibrate it)
transformed["euclidean"] = layers.Lambda(euclidean, name="euclidean")(
[
inputs["pickup_longitude"],
inputs["pickup_latitude"],
inputs["dropoff_longitude"],
inputs["dropoff_latitude"],
]
)
feature_columns["euclidean"] = fc.numeric_column("euclidean")
# hour of day from timestamp of form '2010-02-08 09:17:00+00:00'
transformed["hourofday"] = layers.Lambda(
lambda x: tf.strings.to_number(
tf.strings.substr(x, 11, 2), out_type=tf.dtypes.int32
),
name="hourofday",
)(inputs["pickup_datetime"])
feature_columns["hourofday"] = fc.indicator_column(
fc.categorical_column_with_identity("hourofday", num_buckets=24)
)
latbuckets = np.linspace(0, 1, nbuckets).tolist()
lonbuckets = np.linspace(0, 1, nbuckets).tolist()
b_plat = fc.bucketized_column(
feature_columns["pickup_latitude"], latbuckets
)
b_dlat = fc.bucketized_column(
feature_columns["dropoff_latitude"], latbuckets
)
b_plon = fc.bucketized_column(
feature_columns["pickup_longitude"], lonbuckets
)
b_dlon = fc.bucketized_column(
feature_columns["dropoff_longitude"], lonbuckets
)
ploc = fc.crossed_column([b_plat, b_plon], nbuckets * nbuckets)
dloc = fc.crossed_column([b_dlat, b_dlon], nbuckets * nbuckets)
pd_pair = fc.crossed_column([ploc, dloc], nbuckets**4)
feature_columns["pickup_and_dropoff"] = fc.embedding_column(pd_pair, 100)
return transformed, feature_columns
def rmse(y_true, y_pred):
return tf.sqrt(tf.reduce_mean(tf.square(y_pred - y_true)))
def build_dnn_model(nbuckets, nnsize, lr, string_cols):
numeric_cols = set(CSV_COLUMNS) - {LABEL_COLUMN, "key"} - set(string_cols)
inputs = {
colname: layers.Input(name=colname, shape=(), dtype="float32")
for colname in numeric_cols
}
inputs.update(
{
colname: layers.Input(name=colname, shape=(), dtype="string")
for colname in string_cols
}
)
# transforms
transformed, feature_columns = transform(inputs, numeric_cols, nbuckets)
dnn_inputs = layers.DenseFeatures(feature_columns.values())(transformed)
x = dnn_inputs
for layer, nodes in enumerate(nnsize):
x = layers.Dense(nodes, activation="relu", name=f"h{layer}")(x)
output = layers.Dense(1, name="fare")(x)
model = models.Model(inputs, output)
lr_optimizer = tf.keras.optimizers.Adam(learning_rate=lr)
model.compile(optimizer=lr_optimizer, loss="mse", metrics=[rmse, "mse"])
return model
def train_and_evaluate(hparams):
batch_size = hparams["batch_size"]
nbuckets = hparams["nbuckets"]
lr = hparams["lr"]
nnsize = hparams["nnsize"]
eval_data_path = hparams["eval_data_path"]
num_evals = hparams["num_evals"]
num_examples_to_train_on = hparams["num_examples_to_train_on"]
output_dir = hparams["output_dir"]
train_data_path = hparams["train_data_path"]
timestamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
savedmodel_dir = os.path.join(output_dir, "export/savedmodel")
model_export_path = os.path.join(savedmodel_dir, timestamp)
checkpoint_path = os.path.join(output_dir, "checkpoints")
tensorboard_path = os.path.join(output_dir, "tensorboard")
if tf.io.gfile.exists(output_dir):
tf.io.gfile.rmtree(output_dir)
model = build_dnn_model(nbuckets, nnsize, lr, STRING_COLS)
logging.info(model.summary())
trainds = create_train_dataset(train_data_path, batch_size)
evalds = create_eval_dataset(eval_data_path, batch_size)
steps_per_epoch = num_examples_to_train_on // (batch_size * num_evals)
checkpoint_cb = callbacks.ModelCheckpoint(
checkpoint_path, save_weights_only=True, verbose=1
)
tensorboard_cb = callbacks.TensorBoard(tensorboard_path, histogram_freq=1)
history = model.fit(
trainds,
validation_data=evalds,
epochs=num_evals,
steps_per_epoch=max(1, steps_per_epoch),
verbose=2, # 0=silent, 1=progress bar, 2=one line per epoch
callbacks=[checkpoint_cb, tensorboard_cb],
)
# Exporting the model with default serving function.
model.save(model_export_path)
return history
|
|
from __future__ import absolute_import, print_function, division
from petl.test.helpers import ieq
from petl.util import expr, empty, coalesce
from petl.transform.basics import cut, cat, addfield, rowslice, head, tail, \
cutout, skipcomments, annex, addrownumbers, addcolumn, \
addfieldusingcontext, movefield, stack, addfields
def test_cut():
table = (('foo', 'bar', 'baz'),
('A', 1, 2),
('B', '2', '3.4'),
(u'B', u'3', u'7.8', True),
('D', 'xyz', 9.0),
('E', None))
cut1 = cut(table, 'foo')
expectation = (('foo',),
('A',),
('B',),
(u'B',),
('D',),
('E',))
ieq(expectation, cut1)
cut2 = cut(table, 'foo', 'baz')
expectation = (('foo', 'baz'),
('A', 2),
('B', '3.4'),
(u'B', u'7.8'),
('D', 9.0),
('E', None))
ieq(expectation, cut2)
cut3 = cut(table, 0, 2)
expectation = (('foo', 'baz'),
('A', 2),
('B', '3.4'),
(u'B', u'7.8'),
('D', 9.0),
('E', None))
ieq(expectation, cut3)
cut4 = cut(table, 'bar', 0)
expectation = (('bar', 'foo'),
(1, 'A'),
('2', 'B'),
(u'3', u'B'),
('xyz', 'D'),
(None, 'E'))
ieq(expectation, cut4)
cut5 = cut(table, ('foo', 'baz'))
expectation = (('foo', 'baz'),
('A', 2),
('B', '3.4'),
(u'B', u'7.8'),
('D', 9.0),
('E', None))
ieq(expectation, cut5)
def test_cut_empty():
table = (('foo', 'bar'),)
expect = (('bar',),)
actual = cut(table, 'bar')
ieq(expect, actual)
def test_cutout():
table = (('foo', 'bar', 'baz'),
('A', 1, 2),
('B', '2', '3.4'),
(u'B', u'3', u'7.8', True),
('D', 'xyz', 9.0),
('E', None))
cut1 = cutout(table, 'bar', 'baz')
expectation = (('foo',),
('A',),
('B',),
(u'B',),
('D',),
('E',))
ieq(expectation, cut1)
cut2 = cutout(table, 'bar')
expectation = (('foo', 'baz'),
('A', 2),
('B', '3.4'),
(u'B', u'7.8'),
('D', 9.0),
('E', None))
ieq(expectation, cut2)
cut3 = cutout(table, 1)
expectation = (('foo', 'baz'),
('A', 2),
('B', '3.4'),
(u'B', u'7.8'),
('D', 9.0),
('E', None))
ieq(expectation, cut3)
def test_cat():
table1 = (('foo', 'bar'),
(1, 'A'),
(2, 'B'))
table2 = (('bar', 'baz'),
('C', True),
('D', False))
cat1 = cat(table1, table2, missing=None)
expectation = (('foo', 'bar', 'baz'),
(1, 'A', None),
(2, 'B', None),
(None, 'C', True),
(None, 'D', False))
ieq(expectation, cat1)
# how does cat cope with uneven rows?
table3 = (('foo', 'bar', 'baz'),
('A', 1, 2),
('B', '2', '3.4'),
(u'B', u'3', u'7.8', True),
('D', 'xyz', 9.0),
('E', None))
cat3 = cat(table3, missing=None)
expectation = (('foo', 'bar', 'baz'),
('A', 1, 2),
('B', '2', '3.4'),
(u'B', u'3', u'7.8'),
('D', 'xyz', 9.0),
('E', None, None))
ieq(expectation, cat3)
# cat more than two tables?
cat4 = cat(table1, table2, table3)
expectation = (('foo', 'bar', 'baz'),
(1, 'A', None),
(2, 'B', None),
(None, 'C', True),
(None, 'D', False),
('A', 1, 2),
('B', '2', '3.4'),
(u'B', u'3', u'7.8'),
('D', 'xyz', 9.0),
('E', None, None))
ieq(expectation, cat4)
def test_cat_with_header():
table1 = (('bar', 'foo'),
('A', 1),
('B', 2))
table2 = (('bar', 'baz'),
('C', True),
('D', False))
actual = cat(table1, header=['A', 'foo', 'B', 'bar', 'C'])
expect = (('A', 'foo', 'B', 'bar', 'C'),
(None, 1, None, 'A', None),
(None, 2, None, 'B', None))
ieq(expect, actual)
ieq(expect, actual)
actual = cat(table1, table2, header=['A', 'foo', 'B', 'bar', 'C'])
expect = (('A', 'foo', 'B', 'bar', 'C'),
(None, 1, None, 'A', None),
(None, 2, None, 'B', None),
(None, None, None, 'C', None),
(None, None, None, 'D', None))
ieq(expect, actual)
ieq(expect, actual)
def test_cat_empty():
table1 = (('foo', 'bar'),
(1, 'A'),
(2, 'B'))
table2 = (('bar', 'baz'),)
expect = (('foo', 'bar', 'baz'),
(1, 'A', None),
(2, 'B', None))
actual = cat(table1, table2)
ieq(expect, actual)
def test_cat_dupfields():
table1 = (('foo', 'foo'),
(1, 'A'),
(2,),
(3, 'B', True))
# these cases are pathological, including to confirm expected behaviour,
# but user needs to rename fields to get something sensible
actual = cat(table1)
expect = (('foo', 'foo'),
(1, 1),
(2, 2),
(3, 3))
ieq(expect, actual)
table2 = (('foo', 'foo', 'bar'),
(4, 'C', True),
(5, 'D', False))
actual = cat(table1, table2)
expect = (('foo', 'foo', 'bar'),
(1, 1, None),
(2, 2, None),
(3, 3, None),
(4, 4, True),
(5, 5, False))
ieq(expect, actual)
def test_stack_dupfields():
table1 = (('foo', 'foo'),
(1, 'A'),
(2,),
(3, 'B', True))
actual = stack(table1)
expect = (('foo', 'foo'),
(1, 'A'),
(2, None),
(3, 'B'))
ieq(expect, actual)
table2 = (('foo', 'foo', 'bar'),
(4, 'C', True),
(5, 'D', False))
actual = stack(table1, table2)
expect = (('foo', 'foo'),
(1, 'A'),
(2, None),
(3, 'B'),
(4, 'C'),
(5, 'D'))
ieq(expect, actual)
def test_addfield():
table = (('foo', 'bar'),
('M', 12),
('F', 34),
('-', 56))
result = addfield(table, 'baz', 42)
expectation = (('foo', 'bar', 'baz'),
('M', 12, 42),
('F', 34, 42),
('-', 56, 42))
ieq(expectation, result)
ieq(expectation, result)
result = addfield(table, 'baz', lambda row: '%s,%s' % (row.foo, row.bar))
expectation = (('foo', 'bar', 'baz'),
('M', 12, 'M,12'),
('F', 34, 'F,34'),
('-', 56, '-,56'))
ieq(expectation, result)
ieq(expectation, result)
result = addfield(table, 'baz', lambda rec: rec['bar'] * 2)
expectation = (('foo', 'bar', 'baz'),
('M', 12, 24),
('F', 34, 68),
('-', 56, 112))
ieq(expectation, result)
ieq(expectation, result)
result = addfield(table, 'baz', expr('{bar} * 2'))
expectation = (('foo', 'bar', 'baz'),
('M', 12, 24),
('F', 34, 68),
('-', 56, 112))
ieq(expectation, result)
ieq(expectation, result)
result = addfield(table, 'baz', 42, index=0)
expectation = (('baz', 'foo', 'bar'),
(42, 'M', 12),
(42, 'F', 34),
(42, '-', 56))
ieq(expectation, result)
ieq(expectation, result)
def test_addfield_empty():
table = (('foo', 'bar'),)
expect = (('foo', 'bar', 'baz'),)
actual = addfield(table, 'baz', 42)
ieq(expect, actual)
ieq(expect, actual)
def test_addfield_coalesce():
table = (('foo', 'bar', 'baz', 'quux'),
('M', 12, 23, 44),
('F', None, 23, 11),
('-', None, None, 42))
result = addfield(table, 'spong', coalesce('bar', 'baz', 'quux'))
expect = (('foo', 'bar', 'baz', 'quux', 'spong'),
('M', 12, 23, 44, 12),
('F', None, 23, 11, 23),
('-', None, None, 42, 42))
ieq(expect, result)
ieq(expect, result)
result = addfield(table, 'spong', coalesce(1, 2, 3))
expect = (('foo', 'bar', 'baz', 'quux', 'spong'),
('M', 12, 23, 44, 12),
('F', None, 23, 11, 23),
('-', None, None, 42, 42))
ieq(expect, result)
ieq(expect, result)
def test_addfield_uneven_rows():
table = (('foo', 'bar'),
('M',),
('F', 34),
('-', 56, 'spong'))
result = addfield(table, 'baz', 42)
expectation = (('foo', 'bar', 'baz'),
('M', None, 42),
('F', 34, 42),
('-', 56, 42))
ieq(expectation, result)
ieq(expectation, result)
def test_addfield_dupfield():
table = (('foo', 'foo'),
('M', 12),
('F', 34),
('-', 56))
result = addfield(table, 'bar', 42)
expectation = (('foo', 'foo', 'bar'),
('M', 12, 42),
('F', 34, 42),
('-', 56, 42))
ieq(expectation, result)
ieq(expectation, result)
def test_addfields():
table = (('foo', 'bar'),
('M', 12),
('F', 34),
('-', 56))
result = addfields(table, [('baz', 42),
('qux', lambda row: '%s,%s' % (row.foo, row.bar)),
('fiz', lambda rec: rec['bar'] * 2, 0)])
expectation = (('fiz', 'foo', 'bar', 'baz', 'qux'),
(24, 'M', 12, 42, 'M,12'),
(68, 'F', 34, 42, 'F,34'),
(112, '-', 56, 42, '-,56'))
ieq(expectation, result)
ieq(expectation, result)
def test_addfields_uneven_rows():
table = (('foo', 'bar'),
('M',),
('F', 34),
('-', 56, 'spong'))
result = addfields(table, [('baz', 42),
('qux', 100),
('qux', 200)])
expectation = (('foo', 'bar', 'baz', 'qux', 'qux'),
('M', None, 42, 100, 200),
('F', 34, 42, 100, 200),
('-', 56, 42, 100, 200))
ieq(expectation, result)
ieq(expectation, result)
result = addfields(table, [('baz', 42),
('qux', 100, 0),
('qux', 200, 0)])
expectation = (('qux', 'qux', 'foo', 'bar', 'baz'),
(200, 100, 'M', None, 42),
(200, 100, 'F', 34, 42),
(200, 100, '-', 56, 42))
ieq(expectation, result)
ieq(expectation, result)
def test_rowslice():
table = (('foo', 'bar', 'baz'),
('A', 1, 2),
('B', '2', '3.4'),
(u'B', u'3', u'7.8', True),
('D', 'xyz', 9.0),
('E', None))
result = rowslice(table, 2)
expectation = (('foo', 'bar', 'baz'),
('A', 1, 2),
('B', '2', '3.4'))
ieq(expectation, result)
result = rowslice(table, 1, 2)
expectation = (('foo', 'bar', 'baz'),
('B', '2', '3.4'))
ieq(expectation, result)
result = rowslice(table, 1, 5, 2)
expectation = (('foo', 'bar', 'baz'),
('B', '2', '3.4'),
('D', 'xyz', 9.0))
ieq(expectation, result)
def test_rowslice_empty():
table = (('foo', 'bar'),)
expect = (('foo', 'bar'),)
actual = rowslice(table, 1, 2)
ieq(expect, actual)
def test_head():
table1 = (('foo', 'bar'),
('a', 1),
('b', 2),
('c', 5),
('d', 7),
('f', 42),
('f', 3),
('h', 90),
('k', 12),
('l', 77),
('q', 2))
table2 = head(table1, 4)
expect = (('foo', 'bar'),
('a', 1),
('b', 2),
('c', 5),
('d', 7))
ieq(expect, table2)
def test_tail():
table1 = (('foo', 'bar'),
('a', 1),
('b', 2),
('c', 5),
('d', 7),
('f', 42),
('f', 3),
('h', 90),
('k', 12),
('l', 77),
('q', 2))
table2 = tail(table1, 4)
expect = (('foo', 'bar'),
('h', 90),
('k', 12),
('l', 77),
('q', 2))
ieq(expect, table2)
def test_tail_empty():
table = (('foo', 'bar'),)
expect = (('foo', 'bar'),)
actual = tail(table)
ieq(expect, actual)
def test_skipcomments():
table1 = (('##aaa', 'bbb', 'ccc'),
('##mmm',),
('#foo', 'bar'),
('##nnn', 1),
('a', 1),
('b', 2))
table2 = skipcomments(table1, '##')
expect2 = (('#foo', 'bar'),
('a', 1),
('b', 2))
ieq(expect2, table2)
ieq(expect2, table2) # can iterate twice?
def test_skipcomments_empty():
table1 = (('##aaa', 'bbb', 'ccc'),
('##mmm',),
('#foo', 'bar'),
('##nnn', 1))
table2 = skipcomments(table1, '##')
expect2 = (('#foo', 'bar'),)
ieq(expect2, table2)
def test_annex():
table1 = (('foo', 'bar'),
('A', 9),
('C', 2),
('F', 1))
table2 = (('foo', 'baz'),
('B', 3),
('D', 10))
expect = (('foo', 'bar', 'foo', 'baz'),
('A', 9, 'B', 3),
('C', 2, 'D', 10),
('F', 1, None, None))
actual = annex(table1, table2)
ieq(expect, actual)
ieq(expect, actual)
expect21 = (('foo', 'baz', 'foo', 'bar'),
('B', 3, 'A', 9),
('D', 10, 'C', 2),
(None, None, 'F', 1))
actual21 = annex(table2, table1)
ieq(expect21, actual21)
ieq(expect21, actual21)
def test_annex_uneven_rows():
table1 = (('foo', 'bar'),
('A', 9, True),
('C', 2),
('F',))
table2 = (('foo', 'baz'),
('B', 3),
('D', 10))
expect = (('foo', 'bar', 'foo', 'baz'),
('A', 9, 'B', 3),
('C', 2, 'D', 10),
('F', None, None, None))
actual = annex(table1, table2)
ieq(expect, actual)
ieq(expect, actual)
def test_addrownumbers():
table1 = (('foo', 'bar'),
('A', 9),
('C', 2),
('F', 1))
expect = (('row', 'foo', 'bar'),
(1, 'A', 9),
(2, 'C', 2),
(3, 'F', 1))
actual = addrownumbers(table1)
ieq(expect, actual)
ieq(expect, actual)
def test_addrownumbers_field_name():
table1 = (('foo', 'bar'),
('A', 9),
('C', 2))
expect = (('id', 'foo', 'bar'),
(1, 'A', 9),
(2, 'C', 2))
actual = addrownumbers(table1, field='id')
ieq(expect, actual)
ieq(expect, actual)
def test_addcolumn():
table1 = (('foo', 'bar'),
('A', 1),
('B', 2))
col = [True, False]
expect2 = (('foo', 'bar', 'baz'),
('A', 1, True),
('B', 2, False))
table2 = addcolumn(table1, 'baz', col)
ieq(expect2, table2)
ieq(expect2, table2)
# test short column
table3 = (('foo', 'bar'),
('A', 1),
('B', 2),
('C', 2))
expect4 = (('foo', 'bar', 'baz'),
('A', 1, True),
('B', 2, False),
('C', 2, None))
table4 = addcolumn(table3, 'baz', col)
ieq(expect4, table4)
# test short table
col = [True, False, False]
expect5 = (('foo', 'bar', 'baz'),
('A', 1, True),
('B', 2, False),
(None, None, False))
table5 = addcolumn(table1, 'baz', col)
ieq(expect5, table5)
def test_empty_addcolumn():
table1 = empty()
table2 = addcolumn(table1, 'foo', ['A', 'B'])
table3 = addcolumn(table2, 'bar', [1, 2])
expect = (('foo', 'bar'),
('A', 1),
('B', 2))
ieq(expect, table3)
ieq(expect, table3)
def test_addfieldusingcontext():
table1 = (('foo', 'bar'),
('A', 1),
('B', 4),
('C', 5),
('D', 9))
expect = (('foo', 'bar', 'baz', 'quux'),
('A', 1, None, 3),
('B', 4, 3, 1),
('C', 5, 1, 4),
('D', 9, 4, None))
def upstream(prv, cur, nxt):
if prv is None:
return None
else:
return cur.bar - prv.bar
def downstream(prv, cur, nxt):
if nxt is None:
return None
else:
return nxt.bar - cur.bar
table2 = addfieldusingcontext(table1, 'baz', upstream)
table3 = addfieldusingcontext(table2, 'quux', downstream)
ieq(expect, table3)
ieq(expect, table3)
def test_addfieldusingcontext_stateful():
table1 = (('foo', 'bar'),
('A', 1),
('B', 4),
('C', 5),
('D', 9))
expect = (('foo', 'bar', 'baz', 'quux'),
('A', 1, 1, 5),
('B', 4, 5, 10),
('C', 5, 10, 19),
('D', 9, 19, 19))
def upstream(prv, cur, nxt):
if prv is None:
return cur.bar
else:
return cur.bar + prv.baz
def downstream(prv, cur, nxt):
if nxt is None:
return prv.quux
elif prv is None:
return nxt.bar + cur.bar
else:
return nxt.bar + prv.quux
table2 = addfieldusingcontext(table1, 'baz', upstream)
table3 = addfieldusingcontext(table2, 'quux', downstream)
ieq(expect, table3)
ieq(expect, table3)
def test_movefield():
table1 = (('foo', 'bar', 'baz'),
(1, 'A', True),
(2, 'B', False))
expect = (('bar', 'foo', 'baz'),
('A', 1, True),
('B', 2, False))
actual = movefield(table1, 'bar', 0)
ieq(expect, actual)
ieq(expect, actual)
actual = movefield(table1, 'foo', 1)
ieq(expect, actual)
ieq(expect, actual)
|
|
import functools
import json
import logging
import re
from io import BytesIO
from urllib.parse import parse_qs
from urllib.parse import urlparse
from pytest import fixture
from pytest import mark
from tornado.httpclient import HTTPResponse
from tornado.httputil import HTTPHeaders
from traitlets.config import Config
from ..github import GitHubOAuthenticator
from .mocks import setup_oauth_mock
def user_model(username):
"""Return a user model"""
return {
'email': 'dinosaurs@space',
'id': 5,
'login': username,
'name': 'Hoban Washburn',
}
@fixture
def github_client(client):
setup_oauth_mock(
client,
host=['github.com', 'api.github.com'],
access_token_path='/login/oauth/access_token',
user_path='/user',
token_type='token',
)
return client
async def test_github(github_client):
authenticator = GitHubOAuthenticator()
handler = github_client.handler_for_user(user_model('wash'))
user_info = await authenticator.authenticate(handler)
name = user_info['name']
assert name == 'wash'
auth_state = user_info['auth_state']
assert 'access_token' in auth_state
assert auth_state == {
'access_token': auth_state['access_token'],
'github_user': {
'email': 'dinosaurs@space',
'id': 5,
'login': name,
'name': 'Hoban Washburn',
},
}
def make_link_header(urlinfo, page):
return {
'Link': '<{}://{}{}?page={}>;rel="next"'.format(
urlinfo.scheme, urlinfo.netloc, urlinfo.path, page
)
}
async def test_allowed_org_membership(github_client):
client = github_client
authenticator = GitHubOAuthenticator()
## Mock Github API
orgs = {
'red': ['grif', 'simmons', 'donut', 'sarge', 'lopez'],
'blue': ['tucker', 'caboose', 'burns', 'sheila', 'texas'],
}
org_teams = {'blue': {'alpha': ['tucker', 'caboose', 'burns']}}
member_regex = re.compile(r'/orgs/(.*)/members')
def org_members(paginate, request):
urlinfo = urlparse(request.url)
org = member_regex.match(urlinfo.path).group(1)
if org not in orgs:
return HTTPResponse(request, 404)
if not paginate:
return [user_model(m) for m in orgs[org]]
else:
page = parse_qs(urlinfo.query).get('page', ['1'])
page = int(page[0])
return org_members_paginated(
org, page, urlinfo, functools.partial(HTTPResponse, request)
)
def org_members_paginated(org, page, urlinfo, response):
if page < len(orgs[org]):
headers = make_link_header(urlinfo, page + 1)
elif page == len(orgs[org]):
headers = {}
else:
return response(400)
headers.update({'Content-Type': 'application/json'})
ret = [user_model(orgs[org][page - 1])]
return response(
200,
headers=HTTPHeaders(headers),
buffer=BytesIO(json.dumps(ret).encode('utf-8')),
)
org_membership_regex = re.compile(r'/orgs/(.*)/members/(.*)')
def org_membership(request):
urlinfo = urlparse(request.url)
urlmatch = org_membership_regex.match(urlinfo.path)
org = urlmatch.group(1)
username = urlmatch.group(2)
print('Request org = %s, username = %s' % (org, username))
if org not in orgs:
print('Org not found: org = %s' % (org))
return HTTPResponse(request, 404)
if username not in orgs[org]:
print('Member not found: org = %s, username = %s' % (org, username))
return HTTPResponse(request, 404)
return HTTPResponse(request, 204)
team_membership_regex = re.compile(r'/orgs/(.*)/teams/(.*)/members/(.*)')
def team_membership(request):
urlinfo = urlparse(request.url)
urlmatch = team_membership_regex.match(urlinfo.path)
org = urlmatch.group(1)
team = urlmatch.group(2)
username = urlmatch.group(3)
print('Request org = %s, team = %s username = %s' % (org, team, username))
if org not in orgs:
print('Org not found: org = %s' % (org))
return HTTPResponse(request, 404)
if team not in org_teams[org]:
print('Team not found in org: team = %s, org = %s' % (team, org))
return HTTPResponse(request, 404)
if username not in org_teams[org][team]:
print(
'Member not found: org = %s, team = %s, username = %s'
% (org, team, username)
)
return HTTPResponse(request, 404)
return HTTPResponse(request, 204)
## Perform tests
for paginate in (False, True):
client_hosts = client.hosts['api.github.com']
client_hosts.append((team_membership_regex, team_membership))
client_hosts.append((org_membership_regex, org_membership))
client_hosts.append((member_regex, functools.partial(org_members, paginate)))
authenticator.allowed_organizations = ['blue']
handler = client.handler_for_user(user_model('caboose'))
user = await authenticator.authenticate(handler)
assert user['name'] == 'caboose'
handler = client.handler_for_user(user_model('donut'))
user = await authenticator.authenticate(handler)
assert user is None
# reverse it, just to be safe
authenticator.allowed_organizations = ['red']
handler = client.handler_for_user(user_model('caboose'))
user = await authenticator.authenticate(handler)
assert user is None
handler = client.handler_for_user(user_model('donut'))
user = await authenticator.authenticate(handler)
assert user['name'] == 'donut'
# test team membership
authenticator.allowed_organizations = ['blue:alpha', 'red']
handler = client.handler_for_user(user_model('tucker'))
user = await authenticator.authenticate(handler)
assert user['name'] == 'tucker'
handler = client.handler_for_user(user_model('grif'))
user = await authenticator.authenticate(handler)
assert user['name'] == 'grif'
handler = client.handler_for_user(user_model('texas'))
user = await authenticator.authenticate(handler)
assert user is None
client_hosts.pop()
client_hosts.pop()
@mark.parametrize(
"org, username, expected",
[
("blue", "texas", "https://api.github.com/orgs/blue/members/texas"),
(
"blue:alpha",
"tucker",
"https://api.github.com/orgs/blue/teams/alpha/members/tucker",
),
("red", "grif", "https://api.github.com/orgs/red/members/grif"),
],
)
def test_build_check_membership_url(org, username, expected):
output = GitHubOAuthenticator()._build_check_membership_url(org, username)
assert output == expected
def test_deprecated_config(caplog):
cfg = Config()
cfg.GitHubOAuthenticator.github_organization_whitelist = ["jupy"]
cfg.Authenticator.whitelist = {"user1"}
log = logging.getLogger("testlog")
authenticator = GitHubOAuthenticator(config=cfg, log=log)
assert (
log.name,
logging.WARNING,
'GitHubOAuthenticator.github_organization_whitelist is deprecated in GitHubOAuthenticator 0.12.0, use '
'GitHubOAuthenticator.allowed_organizations instead',
) in caplog.record_tuples
assert authenticator.allowed_organizations == {"jupy"}
assert authenticator.allowed_users == {"user1"}
|
|
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
"""Most tags tests within their corresponding resource tags, we use this
module to test some universal tagging infrastructure not directly exposed.
"""
import time
from mock import MagicMock, call
from c7n.tags import universal_retry, coalesce_copy_user_tags
from c7n.exceptions import PolicyExecutionError, PolicyValidationError
from c7n.utils import yaml_load
from .common import BaseTest
class UniversalTagTest(BaseTest):
def test_auto_tag_registration(self):
try:
self.load_policy({
'name': 'sfn-auto',
'resource': 'step-machine',
'mode': {'type': 'cloudtrail',
'events': [{'ids': 'some', 'source': 'thing', 'event': 'wicked'}]},
'actions': [{'type': 'auto-tag-user', 'tag': 'creator'}]})
except Exception as e:
self.fail('auto-tag policy failed to load %s' % e)
def test_universal_augment_resource_missing_tags(self):
session_factory = self.replay_flight_data('test_tags_universal_augment_missing_tags')
cache_cluster_id = 'arn:aws:elasticache:us-east-1:644160558196:cluster:test'
client = session_factory().client('elasticache')
tags = client.list_tags_for_resource(ResourceName=cache_cluster_id)
self.assertEqual(len(tags['TagList']), 0)
policy = self.load_policy(
{
'name': 'elasticache-no-tags',
'resource': 'cache-cluster',
'filters': [
{'CacheClusterId': 'test'}
]
},
session_factory=session_factory
)
results = policy.run()
self.assertTrue('Tags' in results[0])
def test_retry_no_error(self):
mock = MagicMock()
mock.side_effect = [{"Result": 42}]
self.assertEqual(universal_retry(mock, []), {"Result": 42})
mock.assert_called_once()
def test_retry_failure_reduced_set(self):
sleep = MagicMock()
self.patch(time, "sleep", sleep)
method = MagicMock()
method.side_effect = [
{"FailedResourcesMap": {"arn:abc": {"ErrorCode": "ThrottlingException"}}},
{"Result": 32},
]
self.assertEqual(
universal_retry(method, ["arn:abc", "arn:def"]), {"Result": 32}
)
sleep.assert_called_once()
self.assertTrue(
method.call_args_list == [
call(ResourceARNList=["arn:abc", "arn:def"]),
call(ResourceARNList=["arn:abc"]),
]
)
def test_retry_pass_error(self):
method = MagicMock()
method.side_effect = [
{"FailedResourcesMap": {"arn:abc": {"ErrorCode": "PermissionDenied"}}}
]
self.assertRaises(Exception, universal_retry, method, ["arn:abc"])
class CoalesceCopyUserTags(BaseTest):
def test_copy_bool_user_tags(self):
tags = [{'Key': 'test-key', 'Value': 'test-value'}]
resource = {
'Tags': tags
}
copy_tags = True
user_tags = []
final_tags = coalesce_copy_user_tags(resource, copy_tags, user_tags)
self.assertEqual(final_tags, tags)
copy_tags = False
user_tags = {'test-key-1': 'test-value'}
final_tags = coalesce_copy_user_tags(resource, copy_tags, user_tags)
self.assertEqual(final_tags, [{'Key': 'test-key-1', 'Value': 'test-value'}])
def test_copy_list_user_tags(self):
tags = [
{
'Key': 'test-key-1',
'Value': 'test-value'
},
{
'Key': 'test-key',
'Value': 'test-value'
}
]
resource = {
'Tags': tags
}
copy_tags = ['test-key-1']
user_tags = []
final_tags = coalesce_copy_user_tags(resource, copy_tags, user_tags)
self.assertEqual(final_tags, [{'Key': 'test-key-1', 'Value': 'test-value'}])
def test_copy_asterisk_user_tags(self):
tags = [
{
'Key': 'test-key-1',
'Value': 'test-value'
},
{
'Key': 'test-key',
'Value': 'test-value'
}
]
resource = {
'Tags': tags
}
copy_tags = ['*']
user_tags = []
final_tags = coalesce_copy_user_tags(resource, copy_tags, user_tags)
self.assertEqual(final_tags, tags)
def test_empty_resource_tags(self):
resource = {}
copy_tags = ['test-key-1']
user_tags = {'user-key': 'test-value'}
final_tags = coalesce_copy_user_tags(resource, copy_tags, user_tags)
self.assertEqual(final_tags, [{'Key': 'user-key', 'Value': 'test-value'}])
def test_copy_user_tags_conflict(self):
tags = [
{
'Key': 'test-key-1',
'Value': 'test-value'
},
{
'Key': 'test-key',
'Value': 'test-value'
}
]
resource = {
'Tags': tags
}
copy_tags = ['*']
user_tags = [{'Key': 'test-key', 'Value': 'test-value-user'}]
final_tags = coalesce_copy_user_tags(resource, copy_tags, user_tags)
self.assertEqual(len(final_tags), 2)
self.assertTrue({'Key': 'test-key-1', 'Value': 'test-value'} in final_tags)
self.assertTrue({'Key': 'test-key', 'Value': 'test-value-user'} in final_tags)
def test_empty_response(self):
resource = {}
user_tags = {}
copy_tags = []
final_tags = coalesce_copy_user_tags(resource, copy_tags, user_tags)
self.assertEqual(final_tags, [])
class CopyRelatedResourceTag(BaseTest):
def test_copy_related_resource_tag_all(self):
session_factory = self.replay_flight_data("test_tags_copy_related_resource_tags_all")
p = self.load_policy(
{
"name": "copy-related-resource-tags-snapshots-volumes",
"resource": "ebs-snapshot",
"filters": [
{
"Tags": "empty"
}
],
"actions": [
{
"type": "copy-related-tag",
"resource": "ebs",
"key": "VolumeId",
"tags": "*"
}
]
},
session_factory=session_factory
)
resources = p.run()
self.assertEqual(len(resources), 1)
client = session_factory().client('ec2', 'us-east-1')
snap = client.describe_snapshots(SnapshotIds=[resources[0]['SnapshotId']])['Snapshots']
vol = client.describe_volumes(VolumeIds=[resources[0]['VolumeId']])['Volumes']
self.assertEqual(snap[0]['Tags'], vol[0]['Tags'])
def test_copy_related_resource_tag_partial(self):
session_factory = self.replay_flight_data("test_tags_copy_related_resource_tag_partial")
p = self.load_policy(
{
"name": "copy-related-resource-tags-snapshots-volumes",
"resource": "ebs-snapshot",
"filters": [
{
"Tags": "empty"
}
],
"actions": [
{
"type": "copy-related-tag",
"resource": "ebs",
"key": "VolumeId",
"tags": [
"tag1",
"tag3"
]
}
]
},
session_factory=session_factory
)
resources = p.run()
self.assertEqual(len(resources), 1)
client = session_factory().client('ec2', 'us-east-1')
snap = client.describe_snapshots(SnapshotIds=[resources[0]['SnapshotId']])['Snapshots']
vol = client.describe_volumes(VolumeIds=[resources[0]['VolumeId']])['Volumes']
vol_tags = {t['Key']: t['Value'] for t in vol[0]['Tags']}
snap_tags = {t['Key']: t['Value'] for t in snap[0]['Tags']}
self.assertFalse(vol_tags == snap_tags)
self.assertEqual(snap_tags['tag1'], vol_tags['tag1'])
self.assertEqual(snap_tags['tag3'], vol_tags['tag3'])
self.assertTrue(vol_tags['tag2'])
self.assertFalse(snap_tags.get('tag2'))
def test_copy_related_resource_tag_missing(self):
session_factory = self.replay_flight_data("test_tags_copy_related_resource_tag_missing")
p = self.load_policy(
{
"name": "copy-related-resource-tags-snapshots-volumes",
"resource": "ebs-snapshot",
"actions": [
{
"type": "copy-related-tag",
"resource": "ebs",
"key": "VolumeId",
"skip_missing": False,
"tags": [
"*"
]
}
]
},
session_factory=session_factory
)
with self.assertRaises(PolicyExecutionError):
p.run()
def test_copy_related_resource_tag_validate(self):
p = self.load_policy(
{
"name": "copy-related-resource-tags-snapshots-volumes",
"resource": "ebs-snapshot",
"actions": [
{
"type": "copy-related-tag",
"resource": "ebs",
"key": "VolumeId",
"skip_missing": False,
"tags": [
"*"
]
}
]
}
)
self.assertFalse(p.validate())
policy = {
"name": "copy-related-resource-tags-snapshots-volumes",
"resource": "ebs-snapshot",
"actions": [
{
"type": "copy-related-tag",
"resource": "not-a-resource",
"key": "VolumeId",
"skip_missing": False,
"tags": [
"*"
]
}
]
}
self.assertRaises(PolicyValidationError, self.load_policy, policy)
def test_copy_related_tag_empty(self):
# check the case where the related expression doesn't return
# value.
output = self.capture_logging('custodian.actions')
session_factory = self.replay_flight_data(
'test_copy_related_resource_tag_empty')
client = session_factory().client('ec2')
p = self.load_policy({
'name': 'copy-related-ec2',
'resource': 'aws.eni',
'actions': [{
'type': 'copy-related-tag',
'resource': 'ec2',
'skip_missing': True,
'key': 'Attachment.InstanceId',
'tags': '*'}]},
session_factory=session_factory)
p.run()
if self.recording:
time.sleep(3)
nics = client.describe_network_interfaces(
NetworkInterfaceIds=['eni-0e1324ba169ed7b2f'])['NetworkInterfaces']
self.assertEqual(
nics[0]['TagSet'],
[{'Key': 'Env', 'Value': 'Dev'},
{'Key': 'Origin', 'Value': 'Home'}])
self.assertEqual(
output.getvalue().strip(),
'Tagged 1 resources from related, missing-skipped 1 unchanged 0')
def test_copy_related_resource_tag_multi_ref(self):
session_factory = self.replay_flight_data('test_copy_related_resource_tag_multi_ref')
client = session_factory().client('ec2')
result = client.describe_volumes()['Volumes']
self.assertEqual(len(result), 1)
vol = result[0]
self.assertEqual(vol['Tags'], [{'Key': 'test', 'Value': 'test'}])
policy = """
name: copy-tags-from-ebs-volume-to-snapshot
resource: ebs-snapshot
filters:
- type: value
key: Tags
value: empty
actions:
- type: copy-related-tag
resource: ebs
skip_missing: True
key: VolumeId
tags: '*'
"""
p = self.load_policy(yaml_load(policy), session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 3)
if self.recording:
time.sleep(10)
all_snaps = client.describe_snapshots(OwnerIds=['self'])['Snapshots']
self.assertEqual(len(all_snaps), 3)
tagged_snaps = [e for e in all_snaps if e['VolumeId'] == vol['VolumeId']]
untagged_snaps = [e for e in all_snaps if e['VolumeId'] != vol['VolumeId']]
self.assertEqual(len(tagged_snaps), 2)
self.assertEqual(tagged_snaps[0]['Tags'], vol['Tags'])
self.assertEqual(tagged_snaps[1]['Tags'], vol['Tags'])
self.assertEqual(len(untagged_snaps), 1)
self.assertTrue('Tags' not in untagged_snaps[0].keys())
|
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs an exe through Valgrind and puts the intermediate files in a
directory.
"""
import datetime
import glob
import logging
import optparse
import os
import re
import shutil
import stat
import subprocess
import sys
import tempfile
import common
import drmemory_analyze
import memcheck_analyze
import tsan_analyze
class BaseTool(object):
"""Abstract class for running Valgrind-, PIN-based and other dynamic
error detector tools.
Always subclass this and implement ToolCommand with framework- and
tool-specific stuff.
"""
def __init__(self):
temp_parent_dir = None
self.log_parent_dir = ""
if common.IsWindows():
# gpu process on Windows Vista+ runs at Low Integrity and can only
# write to certain directories (http://crbug.com/119131)
#
# TODO(bruening): if scripts die in middle and don't clean up temp
# dir, we'll accumulate files in profile dir. should remove
# really old files automatically.
profile = os.getenv("USERPROFILE")
if profile:
self.log_parent_dir = profile + "\\AppData\\LocalLow\\"
if os.path.exists(self.log_parent_dir):
self.log_parent_dir = common.NormalizeWindowsPath(self.log_parent_dir)
temp_parent_dir = self.log_parent_dir
# Generated every time (even when overridden)
self.temp_dir = tempfile.mkdtemp(prefix="vg_logs_", dir=temp_parent_dir)
self.log_dir = self.temp_dir # overridable by --keep_logs
self.option_parser_hooks = []
# TODO(glider): we may not need some of the env vars on some of the
# platforms.
self._env = {
"G_SLICE" : "always-malloc",
"NSS_DISABLE_UNLOAD" : "1",
"NSS_DISABLE_ARENA_FREE_LIST" : "1",
"GTEST_DEATH_TEST_USE_FORK": "1",
}
def ToolName(self):
raise NotImplementedError, "This method should be implemented " \
"in the tool-specific subclass"
def Analyze(self, check_sanity=False):
raise NotImplementedError, "This method should be implemented " \
"in the tool-specific subclass"
def RegisterOptionParserHook(self, hook):
# Frameworks and tools can add their own flags to the parser.
self.option_parser_hooks.append(hook)
def CreateOptionParser(self):
# Defines Chromium-specific flags.
self._parser = optparse.OptionParser("usage: %prog [options] <program to "
"test>")
self._parser.disable_interspersed_args()
self._parser.add_option("-t", "--timeout",
dest="timeout", metavar="TIMEOUT", default=10000,
help="timeout in seconds for the run (default 10000)")
self._parser.add_option("", "--build_dir",
help="the location of the compiler output")
self._parser.add_option("", "--source_dir",
help="path to top of source tree for this build"
"(used to normalize source paths in baseline)")
self._parser.add_option("", "--gtest_filter", default="",
help="which test case to run")
self._parser.add_option("", "--gtest_repeat",
help="how many times to run each test")
self._parser.add_option("", "--gtest_print_time", action="store_true",
default=False,
help="show how long each test takes")
self._parser.add_option("", "--ignore_exit_code", action="store_true",
default=False,
help="ignore exit code of the test "
"(e.g. test failures)")
self._parser.add_option("", "--keep_logs", action="store_true",
default=False,
help="store memory tool logs in the <tool>.logs "
"directory instead of /tmp.\nThis can be "
"useful for tool developers/maintainers.\n"
"Please note that the <tool>.logs directory "
"will be clobbered on tool startup.")
# To add framework- or tool-specific flags, please add a hook using
# RegisterOptionParserHook in the corresponding subclass.
# See ValgrindTool and ThreadSanitizerBase for examples.
for hook in self.option_parser_hooks:
hook(self, self._parser)
def ParseArgv(self, args):
self.CreateOptionParser()
# self._tool_flags will store those tool flags which we don't parse
# manually in this script.
self._tool_flags = []
known_args = []
""" We assume that the first argument not starting with "-" is a program
name and all the following flags should be passed to the program.
TODO(timurrrr): customize optparse instead
"""
while len(args) > 0 and args[0][:1] == "-":
arg = args[0]
if (arg == "--"):
break
if self._parser.has_option(arg.split("=")[0]):
known_args += [arg]
else:
self._tool_flags += [arg]
args = args[1:]
if len(args) > 0:
known_args += args
self._options, self._args = self._parser.parse_args(known_args)
self._timeout = int(self._options.timeout)
self._source_dir = self._options.source_dir
if self._options.keep_logs:
# log_parent_dir has trailing slash if non-empty
self.log_dir = self.log_parent_dir + "%s.logs" % self.ToolName()
if os.path.exists(self.log_dir):
shutil.rmtree(self.log_dir)
os.mkdir(self.log_dir)
logging.info("Logs are in " + self.log_dir)
self._ignore_exit_code = self._options.ignore_exit_code
if self._options.gtest_filter != "":
self._args.append("--gtest_filter=%s" % self._options.gtest_filter)
if self._options.gtest_repeat:
self._args.append("--gtest_repeat=%s" % self._options.gtest_repeat)
if self._options.gtest_print_time:
self._args.append("--gtest_print_time")
return True
def Setup(self, args):
return self.ParseArgv(args)
def ToolCommand(self):
raise NotImplementedError, "This method should be implemented " \
"in the tool-specific subclass"
def Cleanup(self):
# You may override it in the tool-specific subclass
pass
def Execute(self):
""" Execute the app to be tested after successful instrumentation.
Full execution command-line provided by subclassers via proc."""
logging.info("starting execution...")
proc = self.ToolCommand()
for var in self._env:
common.PutEnvAndLog(var, self._env[var])
return common.RunSubprocess(proc, self._timeout)
def RunTestsAndAnalyze(self, check_sanity):
exec_retcode = self.Execute()
analyze_retcode = self.Analyze(check_sanity)
if analyze_retcode:
logging.error("Analyze failed.")
logging.info("Search the log for '[ERROR]' to see the error reports.")
return analyze_retcode
if exec_retcode:
if self._ignore_exit_code:
logging.info("Test execution failed, but the exit code is ignored.")
else:
logging.error("Test execution failed.")
return exec_retcode
else:
logging.info("Test execution completed successfully.")
if not analyze_retcode:
logging.info("Analysis completed successfully.")
return 0
def Main(self, args, check_sanity, min_runtime_in_seconds):
"""Call this to run through the whole process: Setup, Execute, Analyze"""
start_time = datetime.datetime.now()
retcode = -1
if self.Setup(args):
retcode = self.RunTestsAndAnalyze(check_sanity)
shutil.rmtree(self.temp_dir, ignore_errors=True)
self.Cleanup()
else:
logging.error("Setup failed")
end_time = datetime.datetime.now()
runtime_in_seconds = (end_time - start_time).seconds
hours = runtime_in_seconds / 3600
seconds = runtime_in_seconds % 3600
minutes = seconds / 60
seconds = seconds % 60
logging.info("elapsed time: %02d:%02d:%02d" % (hours, minutes, seconds))
if (min_runtime_in_seconds > 0 and
runtime_in_seconds < min_runtime_in_seconds):
logging.error("Layout tests finished too quickly. "
"It should have taken at least %d seconds. "
"Something went wrong?" % min_runtime_in_seconds)
retcode = -1
return retcode
def Run(self, args, module, min_runtime_in_seconds=0):
MODULES_TO_SANITY_CHECK = ["base"]
# TODO(timurrrr): this is a temporary workaround for http://crbug.com/47844
if self.ToolName() == "tsan" and common.IsMac():
MODULES_TO_SANITY_CHECK = []
check_sanity = module in MODULES_TO_SANITY_CHECK
return self.Main(args, check_sanity, min_runtime_in_seconds)
class ValgrindTool(BaseTool):
"""Abstract class for running Valgrind tools.
Always subclass this and implement ToolSpecificFlags() and
ExtendOptionParser() for tool-specific stuff.
"""
def __init__(self):
super(ValgrindTool, self).__init__()
self.RegisterOptionParserHook(ValgrindTool.ExtendOptionParser)
def UseXML(self):
# Override if tool prefers nonxml output
return True
def SelfContained(self):
# Returns true iff the tool is distibuted as a self-contained
# .sh script (e.g. ThreadSanitizer)
return False
def ExtendOptionParser(self, parser):
parser.add_option("", "--suppressions", default=[],
action="append",
help="path to a valgrind suppression file")
parser.add_option("", "--indirect", action="store_true",
default=False,
help="set BROWSER_WRAPPER rather than "
"running valgrind directly")
parser.add_option("", "--indirect_webkit_layout", action="store_true",
default=False,
help="set --wrapper rather than running Dr. Memory "
"directly.")
parser.add_option("", "--trace_children", action="store_true",
default=False,
help="also trace child processes")
parser.add_option("", "--num-callers",
dest="num_callers", default=30,
help="number of callers to show in stack traces")
parser.add_option("", "--generate_dsym", action="store_true",
default=False,
help="Generate .dSYM file on Mac if needed. Slow!")
def Setup(self, args):
if not BaseTool.Setup(self, args):
return False
if common.IsMac():
self.PrepareForTestMac()
return True
def PrepareForTestMac(self):
"""Runs dsymutil if needed.
Valgrind for Mac OS X requires that debugging information be in a .dSYM
bundle generated by dsymutil. It is not currently able to chase DWARF
data into .o files like gdb does, so executables without .dSYM bundles or
with the Chromium-specific "fake_dsym" bundles generated by
build/mac/strip_save_dsym won't give source file and line number
information in valgrind.
This function will run dsymutil if the .dSYM bundle is missing or if
it looks like a fake_dsym. A non-fake dsym that already exists is assumed
to be up-to-date.
"""
test_command = self._args[0]
dsym_bundle = self._args[0] + '.dSYM'
dsym_file = os.path.join(dsym_bundle, 'Contents', 'Resources', 'DWARF',
os.path.basename(test_command))
dsym_info_plist = os.path.join(dsym_bundle, 'Contents', 'Info.plist')
needs_dsymutil = True
saved_test_command = None
if os.path.exists(dsym_file) and os.path.exists(dsym_info_plist):
# Look for the special fake_dsym tag in dsym_info_plist.
dsym_info_plist_contents = open(dsym_info_plist).read()
if not re.search('^\s*<key>fake_dsym</key>$', dsym_info_plist_contents,
re.MULTILINE):
# fake_dsym is not set, this is a real .dSYM bundle produced by
# dsymutil. dsymutil does not need to be run again.
needs_dsymutil = False
else:
# fake_dsym is set. dsym_file is a copy of the original test_command
# before it was stripped. Copy it back to test_command so that
# dsymutil has unstripped input to work with. Move the stripped
# test_command out of the way, it will be restored when this is
# done.
saved_test_command = test_command + '.stripped'
os.rename(test_command, saved_test_command)
shutil.copyfile(dsym_file, test_command)
shutil.copymode(saved_test_command, test_command)
if needs_dsymutil:
if self._options.generate_dsym:
# Remove the .dSYM bundle if it exists.
shutil.rmtree(dsym_bundle, True)
dsymutil_command = ['dsymutil', test_command]
# dsymutil is crazy slow. Ideally we'd have a timeout here,
# but common.RunSubprocess' timeout is only checked
# after each line of output; dsymutil is silent
# until the end, and is then killed, which is silly.
common.RunSubprocess(dsymutil_command)
if saved_test_command:
os.rename(saved_test_command, test_command)
else:
logging.info("No real .dSYM for test_command. Line numbers will "
"not be shown. Either tell xcode to generate .dSYM "
"file, or use --generate_dsym option to this tool.")
def ToolCommand(self):
"""Get the valgrind command to run."""
# Note that self._args begins with the exe to be run.
tool_name = self.ToolName()
# Construct the valgrind command.
if self.SelfContained():
proc = ["valgrind-%s.sh" % tool_name]
else:
proc = ["valgrind", "--tool=%s" % tool_name]
proc += ["--num-callers=%i" % int(self._options.num_callers)]
if self._options.trace_children:
proc += ["--trace-children=yes"]
proc += ["--trace-children-skip='*dbus-daemon*'"]
proc += ["--trace-children-skip='*dbus-launch*'"]
proc += ["--trace-children-skip='*perl*'"]
proc += ["--trace-children-skip='*python*'"]
proc += self.ToolSpecificFlags()
proc += self._tool_flags
suppression_count = 0
for suppression_file in self._options.suppressions:
if os.path.exists(suppression_file):
suppression_count += 1
proc += ["--suppressions=%s" % suppression_file]
if not suppression_count:
logging.warning("WARNING: NOT USING SUPPRESSIONS!")
logfilename = self.log_dir + ("/%s." % tool_name) + "%p"
if self.UseXML():
proc += ["--xml=yes", "--xml-file=" + logfilename]
else:
proc += ["--log-file=" + logfilename]
# The Valgrind command is constructed.
# Handle --indirect_webkit_layout separately.
if self._options.indirect_webkit_layout:
# Need to create the wrapper before modifying |proc|.
wrapper = self.CreateBrowserWrapper(proc, webkit=True)
proc = self._args
proc.append("--wrapper")
proc.append(wrapper)
return proc
# Valgrind doesn't play nice with the Chrome sandbox. Empty this env var
# set by runtest.py to disable the sandbox.
if os.environ.get("CHROME_DEVEL_SANDBOX", None):
logging.info("Removing CHROME_DEVEL_SANDBOX fron environment")
os.environ["CHROME_DEVEL_SANDBOX"] = ''
if self._options.indirect:
wrapper = self.CreateBrowserWrapper(proc)
os.environ["BROWSER_WRAPPER"] = wrapper
logging.info('export BROWSER_WRAPPER=' + wrapper)
proc = []
proc += self._args
return proc
def ToolSpecificFlags(self):
raise NotImplementedError, "This method should be implemented " \
"in the tool-specific subclass"
def CreateBrowserWrapper(self, proc, webkit=False):
"""The program being run invokes Python or something else that can't stand
to be valgrinded, and also invokes the Chrome browser. In this case, use a
magic wrapper to only valgrind the Chrome browser. Build the wrapper here.
Returns the path to the wrapper. It's up to the caller to use the wrapper
appropriately.
"""
command = " ".join(proc)
# Add the PID of the browser wrapper to the logfile names so we can
# separate log files for different UI tests at the analyze stage.
command = command.replace("%p", "$$.%p")
(fd, indirect_fname) = tempfile.mkstemp(dir=self.log_dir,
prefix="browser_wrapper.",
text=True)
f = os.fdopen(fd, "w")
f.write('#!/bin/bash\n'
'echo "Started Valgrind wrapper for this test, PID=$$" >&2\n')
f.write('DIR=`dirname $0`\n'
'TESTNAME_FILE=$DIR/testcase.$$.name\n\n')
if webkit:
# Webkit layout_tests pass the URL as the first line of stdin.
f.write('tee $TESTNAME_FILE | %s "$@"\n' % command)
else:
# Try to get the test case name by looking at the program arguments.
# i.e. Chromium ui_tests used --test-name arg.
# TODO(timurrrr): This doesn't handle "--test-name Test.Name"
# TODO(timurrrr): ui_tests are dead. Where do we use the non-webkit
# wrapper now? browser_tests? What do they do?
f.write('for arg in $@\ndo\n'
' if [[ "$arg" =~ --test-name=(.*) ]]\n then\n'
' echo ${BASH_REMATCH[1]} >$TESTNAME_FILE\n'
' fi\n'
'done\n\n'
'%s "$@"\n' % command)
f.close()
os.chmod(indirect_fname, stat.S_IRUSR|stat.S_IXUSR)
return indirect_fname
def CreateAnalyzer(self):
raise NotImplementedError, "This method should be implemented " \
"in the tool-specific subclass"
def GetAnalyzeResults(self, check_sanity=False):
# Glob all the files in the log directory
filenames = glob.glob(self.log_dir + "/" + self.ToolName() + ".*")
# If we have browser wrapper, the logfiles are named as
# "toolname.wrapper_PID.valgrind_PID".
# Let's extract the list of wrapper_PIDs and name it ppids
ppids = set([int(f.split(".")[-2]) \
for f in filenames if re.search("\.[0-9]+\.[0-9]+$", f)])
analyzer = self.CreateAnalyzer()
if len(ppids) == 0:
# Fast path - no browser wrapper was set.
return analyzer.Report(filenames, None, check_sanity)
ret = 0
for ppid in ppids:
testcase_name = None
try:
f = open(self.log_dir + ("/testcase.%d.name" % ppid))
testcase_name = f.read().strip()
f.close()
wk_layout_prefix="third_party/WebKit/LayoutTests/"
wk_prefix_at = testcase_name.rfind(wk_layout_prefix)
if wk_prefix_at != -1:
testcase_name = testcase_name[wk_prefix_at + len(wk_layout_prefix):]
except IOError:
pass
print "====================================================="
print " Below is the report for valgrind wrapper PID=%d." % ppid
if testcase_name:
print " It was used while running the `%s` test." % testcase_name
else:
print " You can find the corresponding test"
print " by searching the above log for 'PID=%d'" % ppid
sys.stdout.flush()
ppid_filenames = [f for f in filenames \
if re.search("\.%d\.[0-9]+$" % ppid, f)]
# check_sanity won't work with browser wrappers
assert check_sanity == False
ret |= analyzer.Report(ppid_filenames, testcase_name)
print "====================================================="
sys.stdout.flush()
if ret != 0:
print ""
print "The Valgrind reports are grouped by test names."
print "Each test has its PID printed in the log when the test was run"
print "and at the beginning of its Valgrind report."
print "Hint: you can search for the reports by Ctrl+F -> `=#`"
sys.stdout.flush()
return ret
# TODO(timurrrr): Split into a separate file.
class Memcheck(ValgrindTool):
"""Memcheck
Dynamic memory error detector for Linux & Mac
http://valgrind.org/info/tools.html#memcheck
"""
def __init__(self):
super(Memcheck, self).__init__()
self.RegisterOptionParserHook(Memcheck.ExtendOptionParser)
def ToolName(self):
return "memcheck"
def ExtendOptionParser(self, parser):
parser.add_option("--leak-check", "--leak_check", type="string",
default="yes", # --leak-check=yes is equivalent of =full
help="perform leak checking at the end of the run")
parser.add_option("", "--show_all_leaks", action="store_true",
default=False,
help="also show less blatant leaks")
parser.add_option("", "--track_origins", action="store_true",
default=False,
help="Show whence uninitialized bytes came. 30% slower.")
def ToolSpecificFlags(self):
ret = ["--gen-suppressions=all", "--demangle=no"]
ret += ["--leak-check=%s" % self._options.leak_check]
if self._options.show_all_leaks:
ret += ["--show-reachable=yes"]
else:
ret += ["--show-possibly-lost=no"]
if self._options.track_origins:
ret += ["--track-origins=yes"]
# TODO(glider): this is a temporary workaround for http://crbug.com/51716
# Let's see whether it helps.
if common.IsMac():
ret += ["--smc-check=all"]
return ret
def CreateAnalyzer(self):
use_gdb = common.IsMac()
return memcheck_analyze.MemcheckAnalyzer(self._source_dir,
self._options.show_all_leaks,
use_gdb=use_gdb)
def Analyze(self, check_sanity=False):
ret = self.GetAnalyzeResults(check_sanity)
if ret != 0:
logging.info("Please see http://dev.chromium.org/developers/how-tos/"
"using-valgrind for the info on Memcheck/Valgrind")
return ret
class PinTool(BaseTool):
"""Abstract class for running PIN tools.
Always subclass this and implement ToolSpecificFlags() and
ExtendOptionParser() for tool-specific stuff.
"""
def PrepareForTest(self):
pass
def ToolSpecificFlags(self):
raise NotImplementedError, "This method should be implemented " \
"in the tool-specific subclass"
def ToolCommand(self):
"""Get the PIN command to run."""
# Construct the PIN command.
pin_cmd = os.getenv("PIN_COMMAND")
if not pin_cmd:
raise RuntimeError, "Please set PIN_COMMAND environment variable " \
"with the path to pin.exe"
proc = pin_cmd.split(" ")
proc += self.ToolSpecificFlags()
# The PIN command is constructed.
# PIN requires -- to separate PIN flags from the executable name.
# self._args begins with the exe to be run.
proc += ["--"]
proc += self._args
return proc
class ThreadSanitizerBase(object):
"""ThreadSanitizer
Dynamic data race detector for Linux, Mac and Windows.
http://code.google.com/p/data-race-test/wiki/ThreadSanitizer
Since TSan works on both Valgrind (Linux, Mac) and PIN (Windows), we need
to have multiple inheritance
"""
INFO_MESSAGE="Please see http://dev.chromium.org/developers/how-tos/" \
"using-valgrind/threadsanitizer for the info on " \
"ThreadSanitizer"
def __init__(self):
super(ThreadSanitizerBase, self).__init__()
self.RegisterOptionParserHook(ThreadSanitizerBase.ExtendOptionParser)
def ToolName(self):
return "tsan"
def UseXML(self):
return False
def SelfContained(self):
return True
def ExtendOptionParser(self, parser):
parser.add_option("", "--hybrid", default="no",
dest="hybrid",
help="Finds more data races, may give false positive "
"reports unless the code is annotated")
parser.add_option("", "--announce-threads", default="yes",
dest="announce_threads",
help="Show the the stack traces of thread creation")
parser.add_option("", "--free-is-write", default="no",
dest="free_is_write",
help="Treat free()/operator delete as memory write. "
"This helps finding more data races, but (currently) "
"this may give false positive reports on std::string "
"internals, see http://code.google.com/p/data-race-test"
"/issues/detail?id=40")
def EvalBoolFlag(self, flag_value):
if (flag_value in ["1", "true", "yes"]):
return True
elif (flag_value in ["0", "false", "no"]):
return False
raise RuntimeError, "Can't parse flag value (%s)" % flag_value
def ToolSpecificFlags(self):
ret = []
ignore_files = ["ignores.txt"]
for platform_suffix in common.PlatformNames():
ignore_files.append("ignores_%s.txt" % platform_suffix)
for ignore_file in ignore_files:
fullname = os.path.join(self._source_dir,
"tools", "valgrind", "tsan", ignore_file)
if os.path.exists(fullname):
fullname = common.NormalizeWindowsPath(fullname)
ret += ["--ignore=%s" % fullname]
# This should shorten filepaths for local builds.
ret += ["--file-prefix-to-cut=%s/" % self._source_dir]
# This should shorten filepaths on bots.
ret += ["--file-prefix-to-cut=build/src/"]
# This should shorten filepaths for functions intercepted in TSan.
ret += ["--file-prefix-to-cut=scripts/tsan/tsan/"]
ret += ["--gen-suppressions=true"]
if self.EvalBoolFlag(self._options.hybrid):
ret += ["--hybrid=yes"] # "no" is the default value for TSAN
if self.EvalBoolFlag(self._options.announce_threads):
ret += ["--announce-threads"]
if self.EvalBoolFlag(self._options.free_is_write):
ret += ["--free-is-write=yes"]
else:
ret += ["--free-is-write=no"]
# --show-pc flag is needed for parsing the error logs on Darwin.
if platform_suffix == 'mac':
ret += ["--show-pc=yes"]
ret += ["--show-pid=no"]
boring_callers = common.BoringCallers(mangled=False, use_re_wildcards=False)
# TODO(timurrrr): In fact, we want "starting from .." instead of "below .."
for bc in boring_callers:
ret += ["--cut_stack_below=%s" % bc]
return ret
class ThreadSanitizerPosix(ThreadSanitizerBase, ValgrindTool):
def ToolSpecificFlags(self):
proc = ThreadSanitizerBase.ToolSpecificFlags(self)
# The -v flag is needed for printing the list of used suppressions and
# obtaining addresses for loaded shared libraries on Mac.
proc += ["-v"]
return proc
def CreateAnalyzer(self):
use_gdb = common.IsMac()
return tsan_analyze.TsanAnalyzer(self._source_dir, use_gdb)
def Analyze(self, check_sanity=False):
ret = self.GetAnalyzeResults(check_sanity)
if ret != 0:
logging.info(self.INFO_MESSAGE)
return ret
class ThreadSanitizerWindows(ThreadSanitizerBase, PinTool):
def __init__(self):
super(ThreadSanitizerWindows, self).__init__()
self.RegisterOptionParserHook(ThreadSanitizerWindows.ExtendOptionParser)
def ExtendOptionParser(self, parser):
parser.add_option("", "--suppressions", default=[],
action="append",
help="path to TSan suppression file")
def ToolSpecificFlags(self):
proc = ThreadSanitizerBase.ToolSpecificFlags(self)
# On PIN, ThreadSanitizer has its own suppression mechanism
# and --log-file flag which work exactly on Valgrind.
suppression_count = 0
for suppression_file in self._options.suppressions:
if os.path.exists(suppression_file):
suppression_count += 1
suppression_file = common.NormalizeWindowsPath(suppression_file)
proc += ["--suppressions=%s" % suppression_file]
if not suppression_count:
logging.warning("WARNING: NOT USING SUPPRESSIONS!")
logfilename = self.log_dir + "/tsan.%p"
proc += ["--log-file=" + common.NormalizeWindowsPath(logfilename)]
# TODO(timurrrr): Add flags for Valgrind trace children analog when we
# start running complex tests (e.g. UI) under TSan/Win.
return proc
def Analyze(self, check_sanity=False):
filenames = glob.glob(self.log_dir + "/tsan.*")
analyzer = tsan_analyze.TsanAnalyzer(self._source_dir)
ret = analyzer.Report(filenames, None, check_sanity)
if ret != 0:
logging.info(self.INFO_MESSAGE)
return ret
class DrMemory(BaseTool):
"""Dr.Memory
Dynamic memory error detector for Windows.
http://dev.chromium.org/developers/how-tos/using-drmemory
It is not very mature at the moment, some things might not work properly.
"""
def __init__(self, full_mode, pattern_mode):
super(DrMemory, self).__init__()
self.full_mode = full_mode
self.pattern_mode = pattern_mode
self.RegisterOptionParserHook(DrMemory.ExtendOptionParser)
def ToolName(self):
return "drmemory"
def ExtendOptionParser(self, parser):
parser.add_option("", "--suppressions", default=[],
action="append",
help="path to a drmemory suppression file")
parser.add_option("", "--follow_python", action="store_true",
default=False, dest="follow_python",
help="Monitor python child processes. If off, neither "
"python children nor any children of python children "
"will be monitored.")
parser.add_option("", "--indirect", action="store_true",
default=False,
help="set BROWSER_WRAPPER rather than "
"running Dr. Memory directly on the harness")
parser.add_option("", "--indirect_webkit_layout", action="store_true",
default=False,
help="set --wrapper rather than running valgrind "
"directly.")
parser.add_option("", "--use_debug", action="store_true",
default=False, dest="use_debug",
help="Run Dr. Memory debug build")
parser.add_option("", "--trace_children", action="store_true",
default=True,
help="TODO: default value differs from Valgrind")
def ToolCommand(self):
"""Get the tool command to run."""
# WINHEAP is what Dr. Memory supports as there are issues w/ both
# jemalloc (http://code.google.com/p/drmemory/issues/detail?id=320) and
# tcmalloc (http://code.google.com/p/drmemory/issues/detail?id=314)
add_env = {
"CHROME_ALLOCATOR" : "WINHEAP",
"JSIMD_FORCEMMX" : "1", # http://code.google.com/p/drmemory/issues/detail?id=540
}
for k,v in add_env.iteritems():
logging.info("export %s=%s", k, v)
os.putenv(k, v)
drmem_cmd = os.getenv("DRMEMORY_COMMAND")
if not drmem_cmd:
raise RuntimeError, "Please set DRMEMORY_COMMAND environment variable " \
"with the path to drmemory.exe"
proc = drmem_cmd.split(" ")
# By default, don't run python (this will exclude python's children as well)
# to reduce runtime. We're not really interested in spending time finding
# bugs in the python implementation.
# With file-based config we must update the file every time, and
# it will affect simultaneous drmem uses by this user. While file-based
# config has many advantages, here we may want this-instance-only
# (http://code.google.com/p/drmemory/issues/detail?id=334).
drconfig_cmd = [ proc[0].replace("drmemory.exe", "drconfig.exe") ]
drconfig_cmd += ["-quiet"] # suppress errors about no 64-bit libs
run_drconfig = True
if self._options.follow_python:
logging.info("Following python children")
# -unreg fails if not already registered so query for that first
query_cmd = drconfig_cmd + ["-isreg", "python.exe"]
query_proc = subprocess.Popen(query_cmd, stdout=subprocess.PIPE,
shell=True)
(query_out, query_err) = query_proc.communicate()
if re.search("exe not registered", query_out):
run_drconfig = False # all set
else:
drconfig_cmd += ["-unreg", "python.exe"]
else:
logging.info("Excluding python children")
drconfig_cmd += ["-reg", "python.exe", "-norun"]
if run_drconfig:
drconfig_retcode = common.RunSubprocess(drconfig_cmd, self._timeout)
if drconfig_retcode:
logging.error("Configuring whether to follow python children failed " \
"with %d.", drconfig_retcode)
raise RuntimeError, "Configuring python children failed "
suppression_count = 0
supp_files = self._options.suppressions
if self.full_mode:
supp_files += [s.replace(".txt", "_full.txt") for s in supp_files]
for suppression_file in supp_files:
if os.path.exists(suppression_file):
suppression_count += 1
proc += ["-suppress", common.NormalizeWindowsPath(suppression_file)]
if not suppression_count:
logging.warning("WARNING: NOT USING SUPPRESSIONS!")
# Un-comment to dump Dr.Memory events on error
#proc += ["-dr_ops", "-dumpcore_mask", "-dr_ops", "0x8bff"]
# Un-comment and comment next line to debug Dr.Memory
#proc += ["-dr_ops", "-no_hide"]
#proc += ["-dr_ops", "-msgbox_mask", "-dr_ops", "15"]
#Proc += ["-dr_ops", "-stderr_mask", "-dr_ops", "15"]
# Ensure we see messages about Dr. Memory crashing!
proc += ["-dr_ops", "-stderr_mask", "-dr_ops", "12"]
if self._options.use_debug:
proc += ["-debug"]
proc += ["-logdir", common.NormalizeWindowsPath(self.log_dir)]
if self.log_parent_dir:
# gpu process on Windows Vista+ runs at Low Integrity and can only
# write to certain directories (http://crbug.com/119131)
symcache_dir = os.path.join(self.log_parent_dir, "drmemory.symcache")
elif self._options.build_dir:
# The other case is only possible with -t cmdline.
# Anyways, if we omit -symcache_dir the -logdir's value is used which
# should be fine.
symcache_dir = os.path.join(self._options.build_dir, "drmemory.symcache")
if symcache_dir:
if not os.path.exists(symcache_dir):
try:
os.mkdir(symcache_dir)
except OSError:
logging.warning("Can't create symcache dir?")
if os.path.exists(symcache_dir):
proc += ["-symcache_dir", common.NormalizeWindowsPath(symcache_dir)]
# Use -no_summary to suppress DrMemory's summary and init-time
# notifications. We generate our own with drmemory_analyze.py.
proc += ["-batch", "-no_summary"]
# Un-comment to disable interleaved output. Will also suppress error
# messages normally printed to stderr.
#proc += ["-quiet", "-no_results_to_stderr"]
proc += ["-callstack_max_frames", "40"]
# make callstacks easier to read
proc += ["-callstack_srcfile_prefix",
"build\\src,chromium\\src,crt_build\\self_x86"]
proc += ["-callstack_modname_hide",
"*drmemory*,chrome.dll"]
boring_callers = common.BoringCallers(mangled=False, use_re_wildcards=False)
# TODO(timurrrr): In fact, we want "starting from .." instead of "below .."
proc += ["-callstack_truncate_below", ",".join(boring_callers)]
if self.pattern_mode:
proc += ["-pattern", "0xf1fd", "-no_count_leaks", "-redzone_size", "0x20"]
elif not self.full_mode:
proc += ["-light"]
proc += self._tool_flags
# DrM i#850/851: The new -callstack_use_top_fp_selectively has bugs.
proc += ["-no_callstack_use_top_fp_selectively"]
# Dr.Memory requires -- to separate tool flags from the executable name.
proc += ["--"]
if self._options.indirect or self._options.indirect_webkit_layout:
# TODO(timurrrr): reuse for TSan on Windows
wrapper_path = os.path.join(self._source_dir,
"tools", "valgrind", "browser_wrapper_win.py")
wrapper = " ".join(["python", wrapper_path] + proc)
self.CreateBrowserWrapper(wrapper)
logging.info("browser wrapper = " + " ".join(proc))
if self._options.indirect_webkit_layout:
proc = self._args
# Layout tests want forward slashes.
wrapper = wrapper.replace('\\', '/')
proc += ["--wrapper", wrapper]
return proc
else:
proc = []
# Note that self._args begins with the name of the exe to be run.
self._args[0] = common.NormalizeWindowsPath(self._args[0])
proc += self._args
return proc
def CreateBrowserWrapper(self, command):
os.putenv("BROWSER_WRAPPER", command)
def Analyze(self, check_sanity=False):
# Use one analyzer for all the log files to avoid printing duplicate reports
#
# TODO(timurrrr): unify this with Valgrind and other tools when we have
# http://code.google.com/p/drmemory/issues/detail?id=684
analyzer = drmemory_analyze.DrMemoryAnalyzer()
ret = 0
if not self._options.indirect and not self._options.indirect_webkit_layout:
filenames = glob.glob(self.log_dir + "/*/results.txt")
ret = analyzer.Report(filenames, None, check_sanity)
else:
testcases = glob.glob(self.log_dir + "/testcase.*.logs")
# If we have browser wrapper, the per-test logdirs are named as
# "testcase.wrapper_PID.name".
# Let's extract the list of wrapper_PIDs and name it ppids.
# NOTE: ppids may contain '_', i.e. they are not ints!
ppids = set([f.split(".")[-2] for f in testcases])
for ppid in ppids:
testcase_name = None
try:
f = open("%s/testcase.%s.name" % (self.log_dir, ppid))
testcase_name = f.read().strip()
f.close()
except IOError:
pass
print "====================================================="
print " Below is the report for drmemory wrapper PID=%s." % ppid
if testcase_name:
print " It was used while running the `%s` test." % testcase_name
else:
# TODO(timurrrr): hm, the PID line is suppressed on Windows...
print " You can find the corresponding test"
print " by searching the above log for 'PID=%s'" % ppid
sys.stdout.flush()
ppid_filenames = glob.glob("%s/testcase.%s.logs/*/results.txt" %
(self.log_dir, ppid))
ret |= analyzer.Report(ppid_filenames, testcase_name, False)
print "====================================================="
sys.stdout.flush()
logging.info("Please see http://dev.chromium.org/developers/how-tos/"
"using-drmemory for the info on Dr. Memory")
return ret
# RaceVerifier support. See
# http://code.google.com/p/data-race-test/wiki/RaceVerifier for more details.
class ThreadSanitizerRV1Analyzer(tsan_analyze.TsanAnalyzer):
""" TsanAnalyzer that saves race reports to a file. """
TMP_FILE = "rvlog.tmp"
def __init__(self, source_dir, use_gdb):
super(ThreadSanitizerRV1Analyzer, self).__init__(source_dir, use_gdb)
self.out = open(self.TMP_FILE, "w")
def Report(self, files, testcase, check_sanity=False):
reports = self.GetReports(files)
for report in reports:
print >>self.out, report
if len(reports) > 0:
logging.info("RaceVerifier pass 1 of 2, found %i reports" % len(reports))
return -1
return 0
def CloseOutputFile(self):
self.out.close()
class ThreadSanitizerRV1Mixin(object):
"""RaceVerifier first pass.
Runs ThreadSanitizer as usual, but hides race reports and collects them in a
temporary file"""
def __init__(self):
super(ThreadSanitizerRV1Mixin, self).__init__()
self.RegisterOptionParserHook(ThreadSanitizerRV1Mixin.ExtendOptionParser)
def ExtendOptionParser(self, parser):
parser.set_defaults(hybrid="yes")
def CreateAnalyzer(self):
use_gdb = common.IsMac()
self.analyzer = ThreadSanitizerRV1Analyzer(self._source_dir, use_gdb)
return self.analyzer
def Cleanup(self):
super(ThreadSanitizerRV1Mixin, self).Cleanup()
self.analyzer.CloseOutputFile()
class ThreadSanitizerRV2Mixin(object):
"""RaceVerifier second pass."""
def __init__(self):
super(ThreadSanitizerRV2Mixin, self).__init__()
self.RegisterOptionParserHook(ThreadSanitizerRV2Mixin.ExtendOptionParser)
def ExtendOptionParser(self, parser):
parser.add_option("", "--race-verifier-sleep-ms",
dest="race_verifier_sleep_ms", default=10,
help="duration of RaceVerifier delays")
def ToolSpecificFlags(self):
proc = super(ThreadSanitizerRV2Mixin, self).ToolSpecificFlags()
proc += ['--race-verifier=%s' % ThreadSanitizerRV1Analyzer.TMP_FILE,
'--race-verifier-sleep-ms=%d' %
int(self._options.race_verifier_sleep_ms)]
return proc
def Cleanup(self):
super(ThreadSanitizerRV2Mixin, self).Cleanup()
os.unlink(ThreadSanitizerRV1Analyzer.TMP_FILE)
class ThreadSanitizerRV1Posix(ThreadSanitizerRV1Mixin, ThreadSanitizerPosix):
pass
class ThreadSanitizerRV2Posix(ThreadSanitizerRV2Mixin, ThreadSanitizerPosix):
pass
class ThreadSanitizerRV1Windows(ThreadSanitizerRV1Mixin,
ThreadSanitizerWindows):
pass
class ThreadSanitizerRV2Windows(ThreadSanitizerRV2Mixin,
ThreadSanitizerWindows):
pass
class RaceVerifier(object):
"""Runs tests under RaceVerifier/Valgrind."""
MORE_INFO_URL = "http://code.google.com/p/data-race-test/wiki/RaceVerifier"
def RV1Factory(self):
if common.IsWindows():
return ThreadSanitizerRV1Windows()
else:
return ThreadSanitizerRV1Posix()
def RV2Factory(self):
if common.IsWindows():
return ThreadSanitizerRV2Windows()
else:
return ThreadSanitizerRV2Posix()
def ToolName(self):
return "tsan"
def Main(self, args, check_sanity, min_runtime_in_seconds):
logging.info("Running a TSan + RaceVerifier test. For more information, " +
"see " + self.MORE_INFO_URL)
cmd1 = self.RV1Factory()
ret = cmd1.Main(args, check_sanity, min_runtime_in_seconds)
# Verify race reports, if there are any.
if ret == -1:
logging.info("Starting pass 2 of 2. Running the same binary in " +
"RaceVerifier mode to confirm possible race reports.")
logging.info("For more information, see " + self.MORE_INFO_URL)
cmd2 = self.RV2Factory()
ret = cmd2.Main(args, check_sanity, min_runtime_in_seconds)
else:
logging.info("No reports, skipping RaceVerifier second pass")
logging.info("Please see " + self.MORE_INFO_URL + " for more information " +
"on RaceVerifier")
return ret
def Run(self, args, module, min_runtime_in_seconds=0):
return self.Main(args, False, min_runtime_in_seconds)
class EmbeddedTool(BaseTool):
"""Abstract class for tools embedded directly into the test binary.
"""
# TODO(glider): need to override Execute() and support process chaining here.
def ToolCommand(self):
# In the simplest case just the args of the script.
return self._args
class Asan(EmbeddedTool):
"""AddressSanitizer, a memory error detector.
More information at
http://dev.chromium.org/developers/testing/addresssanitizer
"""
def __init__(self):
super(Asan, self).__init__()
self._timeout = 1200
if common.IsMac():
self._env["DYLD_NO_PIE"] = "1"
def ToolName(self):
return "asan"
def ToolCommand(self):
# TODO(glider): use pipes instead of the ugly wrapper here once they
# are supported.
procs = [os.path.join(self._source_dir, "tools", "valgrind",
"asan", "asan_wrapper.sh")]
procs.extend(self._args)
return procs
def Analyze(sels, unused_check_sanity):
return 0
class TsanGcc(EmbeddedTool):
"""ThreadSanitizer with compile-time instrumentation done using GCC.
More information at
code.google.com/p/data-race-test/wiki/GccInstrumentation
"""
def __init__(self):
super(TsanGcc, self).__init__()
self.RegisterOptionParserHook(TsanGcc.ExtendOptionParser)
def ExtendOptionParser(self, parser):
parser.add_option("", "--suppressions", default=[],
action="append",
help="path to TSan suppression file")
def Setup(self, args):
if not super(TsanGcc, self).Setup(args):
return False
ld_library_paths = []
for tail in "lib32", "lib64":
ld_library_paths.append(
os.path.join(self._source_dir, "third_party",
"compiler-tsan", "gcc-current", tail))
# LD_LIBRARY_PATH will be overriden.
self._env["LD_LIBRARY_PATH"] = ":".join(ld_library_paths)
# TODO(glider): this is a temporary solution until Analyze is implemented.
env_options = ["--error-exitcode=1"]
# TODO(glider): merge this with other TSan suppressions code.
suppression_count = 0
for suppression_file in self._options.suppressions:
if os.path.exists(suppression_file):
suppression_count += 1
env_options += ["--suppressions=%s" % suppression_file]
if not suppression_count:
logging.warning("WARNING: NOT USING SUPPRESSIONS!")
self._env["TSAN_ARGS"] = " ".join(env_options)
return True
def ToolName(self):
return "tsan"
def Analyze(self, unused_check_sanity):
# TODO(glider): this should use tsan_analyze.TsanAnalyzer. As a temporary
# solution we set the exit code to 1 when a report occurs, because TSan-GCC
# does not support the --log-file flag yet.
return 0
class ToolFactory:
def Create(self, tool_name):
if tool_name == "memcheck":
return Memcheck()
if tool_name == "tsan":
if common.IsWindows():
return ThreadSanitizerWindows()
else:
return ThreadSanitizerPosix()
if tool_name == "drmemory" or tool_name == "drmemory_light":
# TODO(timurrrr): remove support for "drmemory" when buildbots are
# switched to drmemory_light OR make drmemory==drmemory_full the default
# mode when the tool is mature enough.
return DrMemory(False, False)
if tool_name == "drmemory_full":
return DrMemory(True, False)
if tool_name == "drmemory_pattern":
return DrMemory(False, True)
if tool_name == "tsan_rv":
return RaceVerifier()
if tool_name == "tsan_gcc":
return TsanGcc()
if tool_name == "asan":
return Asan()
try:
platform_name = common.PlatformNames()[0]
except common.NotImplementedError:
platform_name = sys.platform + "(Unknown)"
raise RuntimeError, "Unknown tool (tool=%s, platform=%s)" % (tool_name,
platform_name)
def CreateTool(tool):
return ToolFactory().Create(tool)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class RoleInstancesOperations(object):
"""RoleInstancesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~hybrid_network_management_client.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _start_initial(
self,
location_name, # type: str
vendor_name, # type: str
service_key, # type: str
role_instance_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-05-01"
accept = "application/json"
# Construct URL
url = self._start_initial.metadata['url'] # type: ignore
path_format_arguments = {
'locationName': self._serialize.url("location_name", location_name, 'str'),
'vendorName': self._serialize.url("vendor_name", vendor_name, 'str'),
'serviceKey': self._serialize.url("service_key", service_key, 'str'),
'roleInstanceName': self._serialize.url("role_instance_name", role_instance_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_start_initial.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.HybridNetwork/locations/{locationName}/vendors/{vendorName}/networkFunctions/{serviceKey}/roleInstances/{roleInstanceName}/start'} # type: ignore
def begin_start(
self,
location_name, # type: str
vendor_name, # type: str
service_key, # type: str
role_instance_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Starts a role instance of a vendor network function.
:param location_name: The Azure region where the network function resource was created by
customer.
:type location_name: str
:param vendor_name: The name of the vendor.
:type vendor_name: str
:param service_key: The GUID for the vendor network function.
:type service_key: str
:param role_instance_name: The name of the role instance of the vendor network function.
:type role_instance_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._start_initial(
location_name=location_name,
vendor_name=vendor_name,
service_key=service_key,
role_instance_name=role_instance_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'locationName': self._serialize.url("location_name", location_name, 'str'),
'vendorName': self._serialize.url("vendor_name", vendor_name, 'str'),
'serviceKey': self._serialize.url("service_key", service_key, 'str'),
'roleInstanceName': self._serialize.url("role_instance_name", role_instance_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_start.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.HybridNetwork/locations/{locationName}/vendors/{vendorName}/networkFunctions/{serviceKey}/roleInstances/{roleInstanceName}/start'} # type: ignore
def _stop_initial(
self,
location_name, # type: str
vendor_name, # type: str
service_key, # type: str
role_instance_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-05-01"
accept = "application/json"
# Construct URL
url = self._stop_initial.metadata['url'] # type: ignore
path_format_arguments = {
'locationName': self._serialize.url("location_name", location_name, 'str'),
'vendorName': self._serialize.url("vendor_name", vendor_name, 'str'),
'serviceKey': self._serialize.url("service_key", service_key, 'str'),
'roleInstanceName': self._serialize.url("role_instance_name", role_instance_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_stop_initial.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.HybridNetwork/locations/{locationName}/vendors/{vendorName}/networkFunctions/{serviceKey}/roleInstances/{roleInstanceName}/stop'} # type: ignore
def begin_stop(
self,
location_name, # type: str
vendor_name, # type: str
service_key, # type: str
role_instance_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Powers off (stop) a role instance of a vendor network function.
:param location_name: The Azure region where the network function resource was created by
customer.
:type location_name: str
:param vendor_name: The name of the vendor.
:type vendor_name: str
:param service_key: The GUID for the vendor network function.
:type service_key: str
:param role_instance_name: The name of the role instance of the vendor network function.
:type role_instance_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._stop_initial(
location_name=location_name,
vendor_name=vendor_name,
service_key=service_key,
role_instance_name=role_instance_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'locationName': self._serialize.url("location_name", location_name, 'str'),
'vendorName': self._serialize.url("vendor_name", vendor_name, 'str'),
'serviceKey': self._serialize.url("service_key", service_key, 'str'),
'roleInstanceName': self._serialize.url("role_instance_name", role_instance_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_stop.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.HybridNetwork/locations/{locationName}/vendors/{vendorName}/networkFunctions/{serviceKey}/roleInstances/{roleInstanceName}/stop'} # type: ignore
def _restart_initial(
self,
location_name, # type: str
vendor_name, # type: str
service_key, # type: str
role_instance_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-05-01"
accept = "application/json"
# Construct URL
url = self._restart_initial.metadata['url'] # type: ignore
path_format_arguments = {
'locationName': self._serialize.url("location_name", location_name, 'str'),
'vendorName': self._serialize.url("vendor_name", vendor_name, 'str'),
'serviceKey': self._serialize.url("service_key", service_key, 'str'),
'roleInstanceName': self._serialize.url("role_instance_name", role_instance_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_restart_initial.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.HybridNetwork/locations/{locationName}/vendors/{vendorName}/networkFunctions/{serviceKey}/roleInstances/{roleInstanceName}/restart'} # type: ignore
def begin_restart(
self,
location_name, # type: str
vendor_name, # type: str
service_key, # type: str
role_instance_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Restarts a role instance of a vendor network function.
:param location_name: The Azure region where the network function resource was created by
customer.
:type location_name: str
:param vendor_name: The name of the vendor.
:type vendor_name: str
:param service_key: The GUID for the vendor network function.
:type service_key: str
:param role_instance_name: The name of the role instance of the vendor network function.
:type role_instance_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._restart_initial(
location_name=location_name,
vendor_name=vendor_name,
service_key=service_key,
role_instance_name=role_instance_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'locationName': self._serialize.url("location_name", location_name, 'str'),
'vendorName': self._serialize.url("vendor_name", vendor_name, 'str'),
'serviceKey': self._serialize.url("service_key", service_key, 'str'),
'roleInstanceName': self._serialize.url("role_instance_name", role_instance_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_restart.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.HybridNetwork/locations/{locationName}/vendors/{vendorName}/networkFunctions/{serviceKey}/roleInstances/{roleInstanceName}/restart'} # type: ignore
def get(
self,
location_name, # type: str
vendor_name, # type: str
service_key, # type: str
role_instance_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.RoleInstance"
"""Gets the information of role instance of vendor network function.
:param location_name: The Azure region where the network function resource was created by
customer.
:type location_name: str
:param vendor_name: The name of the vendor.
:type vendor_name: str
:param service_key: The GUID for the vendor network function.
:type service_key: str
:param role_instance_name: The name of the role instance of the vendor network function.
:type role_instance_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RoleInstance, or the result of cls(response)
:rtype: ~hybrid_network_management_client.models.RoleInstance
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RoleInstance"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-05-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'locationName': self._serialize.url("location_name", location_name, 'str'),
'vendorName': self._serialize.url("vendor_name", vendor_name, 'str'),
'serviceKey': self._serialize.url("service_key", service_key, 'str'),
'roleInstanceName': self._serialize.url("role_instance_name", role_instance_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('RoleInstance', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.HybridNetwork/locations/{locationName}/vendors/{vendorName}/networkFunctions/{serviceKey}/roleInstances/{roleInstanceName}'} # type: ignore
def list(
self,
location_name, # type: str
vendor_name, # type: str
service_key, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.NetworkFunctionRoleInstanceListResult"]
"""Lists the information of role instances of vendor network function.
:param location_name: The Azure region where the network function resource was created by
customer.
:type location_name: str
:param vendor_name: The name of the vendor.
:type vendor_name: str
:param service_key: The GUID for the vendor network function.
:type service_key: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkFunctionRoleInstanceListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~hybrid_network_management_client.models.NetworkFunctionRoleInstanceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkFunctionRoleInstanceListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-05-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'locationName': self._serialize.url("location_name", location_name, 'str'),
'vendorName': self._serialize.url("vendor_name", vendor_name, 'str'),
'serviceKey': self._serialize.url("service_key", service_key, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkFunctionRoleInstanceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.HybridNetwork/locations/{locationName}/vendors/{vendorName}/networkFunctions/{serviceKey}/roleInstances'} # type: ignore
|
|
import unittest
from dag import\
DfsNode,\
Dag,\
DagNode
#
# TestDfsNode
#
class TestDfsNode( unittest.TestCase ):
def test_init( self ):
node = DfsNode()
self.assertEqual( node.getColor(), DfsNode.White )
self.assertEqual( node.getPreVisit(), -1 )
self.assertEqual( node.getPostVisit(), -1 )
def test_preVisit( self ):
node = DfsNode()
node.setPreVisit( 11 )
self.assertEqual( node.getPreVisit(), 11 )
def test_postVisit( self ):
node = DfsNode()
node.setPostVisit( 11 )
self.assertEqual( node.getPostVisit(), 11 )
def test_pre_post( self ):
node = DfsNode()
node.setPreVisit( 11 )
node.setPostVisit( 22 )
self.assertEqual( node.getPreVisit(), 11 )
self.assertEqual( node.getPostVisit(), 22 )
def test_pre_post_raise( self ):
node = DfsNode()
node.setPreVisit( 11 )
self.assertRaises( Exception, node.setPostVisit, 10 )
self.assertRaises( Exception, node.setPostVisit, 11 )
#
# TestDagNode
#
class TestDagNode( unittest.TestCase ):
def test_init( self ):
node = DagNode( 11 )
self.assertEqual( node.getData(), 11 )
self.assertEqual( len( node.getChildren() ), 0 )
self.assertEqual( len( node.getParents() ), 0 )
def test_equal( self ):
a11 = DagNode( 11 )
b11 = DagNode( 11 )
a22 = DagNode( 22 )
b22 = DagNode( 22 )
self.assertTrue( a11 == b11 )
self.assertTrue( a22 == b22 )
self.assertFalse( a11 == a22 )
self.assertFalse( b11 == b22 )
def test_addChild( self ):
node1 = DagNode( 11 )
node2 = DagNode( 22 )
node1.addChild( node2 )
self.assertTrue( node2 in node1.getChildren() )
node2.addChild( node1 )
self.assertTrue( node1 in node2.getChildren() )
def test_addParent( self ):
node1 = DagNode( 11 )
node2 = DagNode( 22 )
node1.addParent( node2 )
self.assertTrue( node2 in node1.getParents() )
node2.addParent( node1 )
self.assertTrue( node1 in node2.getParents() )
def test_isRoot( self ):
node1 = DagNode( 11 )
node2 = DagNode( 22 )
node3 = DagNode( 33 )
self.assertTrue( node1.isRoot() )
node1.addChild( node2 )
self.assertTrue( node1.isRoot() )
node3.addChild( node1 )
self.assertTrue( node1.isRoot() )
node1.addParent( node3 )
self.assertFalse( node1.isRoot() )
def test_isLeaf( self ):
node1 = DagNode( 11 )
node2 = DagNode( 22 )
node3 = DagNode( 33 )
self.assertTrue( node1.isLeaf() )
self.assertTrue( node2.isLeaf() )
self.assertTrue( node3.isLeaf() )
node1.addChild( node2 )
self.assertFalse( node1.isLeaf() )
self.assertTrue( node2.isLeaf() )
node3.addChild( node1 )
self.assertFalse( node1.isLeaf() )
def test_setColorRecursively( self ):
node1 = DagNode( 11 )
node2 = DagNode( 22 )
node3 = DagNode( 33 )
node1.addChild( node2 );
node2.addChild( node3 );
self.assertEqual( node1.getColor(), DfsNode.White )
self.assertEqual( node2.getColor(), DfsNode.White )
self.assertEqual( node3.getColor(), DfsNode.White )
node1.setColorRecursively( DfsNode.Black )
self.assertEqual( node1.getColor(), DfsNode.Black )
self.assertEqual( node2.getColor(), DfsNode.Black )
self.assertEqual( node3.getColor(), DfsNode.Black )
#
# DAGTest
#
class DAGTest( unittest.TestCase ):
def test_add_raise( self ):
dag = Dag()
self.assertRaises( Exception, dag.add, -1, "filename" )
self.assertRaises( Exception, dag.add, 2, "filename" )
self.assertRaises( Exception, dag.add, 3, "filename" )
dag.add( 1, "filename" )
self.assertRaises( Exception, dag.add, 3, "filename" )
self.assertRaises( Exception, dag.add, 4, "filename" )
def test_add_1( self ):
dag = Dag()
# filename_1_1
# filename_2_1
# filename_3_1
# filename_2_2
# filename_3_2
# filename_1_2
filename_1_1 = DagNode( "filename_1_1" )
filename_2_1 = DagNode( "filename_2_1" )
filename_3_1 = DagNode( "filename_3_1" )
filename_2_2 = DagNode( "filename_2_2" )
filename_3_2 = DagNode( "filename_3_2" )
filename_1_2 = DagNode( "filename_1_2" )
dag.add( 1, "filename_1_1" )
dag.add( 2, "filename_2_1" )
dag.add( 3, "filename_3_1" )
dag.add( 2, "filename_2_2" )
dag.add( 3, "filename_3_2" )
dag.add( 1, "filename_1_2" )
self.assertEqual( dag.getRoot().getChildren(), set( [ filename_1_1, filename_1_2 ] ) )
self.assertEqual( dag.get( "filename_1_1" ).getChildren(), set( [ filename_2_1, filename_2_2 ] ) )
self.assertEqual( dag.get( "filename_1_1" ).getParents(), set( [ dag.getRoot() ] ) )
self.assertEqual( dag.get( "filename_1_2" ).getChildren(), set() )
self.assertEqual( dag.get( "filename_1_2" ).getParents(), set( [ dag.getRoot() ] ) )
self.assertEqual( dag.get( "filename_2_1" ).getChildren(), set( [ filename_3_1, ] ) )
self.assertEqual( dag.get( "filename_2_1" ).getParents(), set( [ dag.get( "filename_1_1" ) ] ) )
self.assertEqual( dag.get( "filename_2_2" ).getChildren(), set( [ filename_3_2, ] ) )
self.assertEqual( dag.get( "filename_2_2" ).getParents(), set( [ dag.get( "filename_1_1" ) ] ) )
self.assertEqual( dag.get( "filename_3_1" ).getChildren(), set() )
self.assertEqual( dag.get( "filename_3_1" ).getParents(), set( [ dag.get( "filename_2_1" ) ] ) )
self.assertEqual( dag.get( "filename_3_2" ).getChildren(), set() )
self.assertEqual( dag.get( "filename_3_2" ).getParents(), set( [ dag.get( "filename_2_2" ) ] ) )
def test_add_2( self ):
dag = Dag()
# filename_1_1
# filename_2_1
# filename_leaf
# filename_leaf
filename_1_1 = DagNode( "filename_1_1" )
filename_2_1 = DagNode( "filename_2_1" )
filename_leaf = DagNode( "filename_leaf" )
dag.add( 1, "filename_1_1" )
dag.add( 2, "filename_2_1" )
dag.add( 3, "filename_leaf" )
dag.add( 1, "filename_leaf" )
self.assertEqual( dag.getRoot().getChildren(), set( [ filename_1_1, filename_leaf ] ) )
self.assertEqual( dag.get( "filename_1_1" ).getChildren(), set( [ filename_2_1 ] ) )
self.assertEqual( dag.get( "filename_1_1" ).getParents(), set( [ dag.getRoot() ] ) )
self.assertEqual( dag.get( "filename_2_1" ).getChildren(), set( [ filename_leaf, ] ) )
self.assertEqual( dag.get( "filename_2_1" ).getParents(), set( [ filename_1_1 ] ) )
self.assertEqual( dag.get( "filename_leaf" ).getChildren(), set() )
self.assertEqual( \
dag.get( "filename_leaf" ).getParents()\
, set( [ filename_2_1, dag.getRoot() ] )\
)
def test_add_3( self ):
dag = Dag()
# filename_1_1
# filename_2_1
# filename_3_1
# filename_2_2
# filename_3_1
filename_1_1 = DagNode( "filename_1_1" )
filename_2_1 = DagNode( "filename_2_1" )
filename_3_1 = DagNode( "filename_3_1" )
filename_2_2 = DagNode( "filename_2_2" )
dag.add( 1, "filename_1_1" )
dag.add( 2, "filename_2_1" )
dag.add( 3, "filename_3_1" )
dag.add( 2, "filename_2_2" )
dag.add( 3, "filename_3_1" )
self.assertEqual( dag.getRoot().getChildren(), set( [ filename_1_1 ] ) )
self.assertEqual( dag.get( "filename_1_1" ).getChildren(), set( [ filename_2_1, filename_2_2 ] ) )
self.assertEqual( dag.get( "filename_1_1" ).getParents(), set( [ dag.getRoot() ] ) )
self.assertEqual( dag.get( "filename_2_1" ).getChildren(), set( [ filename_3_1, ] ) )
self.assertEqual( dag.get( "filename_2_1" ).getParents(), set( [ filename_1_1 ] ) )
self.assertEqual( dag.get( "filename_2_2" ).getChildren(), set( [ filename_3_1, ] ) )
self.assertEqual( dag.get( "filename_2_2" ).getParents(), set( [ filename_1_1 ] ) )
self.assertEqual( dag.get( "filename_3_1" ).getChildren(), set() )
self.assertEqual( dag.get( "filename_3_1" ).getParents(), set( [ filename_2_1, filename_2_2 ] ) )
def test_cycle( self ):
dag = Dag()
# filename_1_1
# filename_2_1
# filename_1_1
filename_1_1 = DagNode( "filename_1_1" )
filename_2_1 = DagNode( "filename_2_1" )
dag.add( 1, "filename_1_1" )
dag.add( 2, "filename_2_1" )
dag.add( 3, "filename_1_1" )
self.assertEqual( dag.get( "filename_1_1" ).getChildren(), set( [ filename_2_1 ] ) )
self.assertEqual( dag.get( "filename_1_1" ).getParents(), set( [ dag.getRoot() ] ) )
self.assertEqual( dag.get( "filename_2_1" ).getChildren(), set() )
self.assertEqual( dag.get( "filename_2_1" ).getParents(), set( [ filename_1_1 ] ) )
def test_one_node_twice( self ):
dag = Dag()
# filename_1_1
# filename_2_1
# filename_1_1
# filename_3_1
filename_1_1 = DagNode( "filename_1_1" )
filename_2_1 = DagNode( "filename_2_1" )
filename_3_1 = DagNode( "filename_3_1" )
dag.add( 1, "filename_1_1" )
dag.add( 2, "filename_2_1" )
dag.add( 3, "filename_1_1" )
dag.add( 4, "filename_3_1" )
self.assertEqual( dag.get( "filename_1_1" ).getChildren(), set( [ filename_2_1, filename_3_1 ] ) )
self.assertEqual( dag.get( "filename_1_1" ).getParents(), set( [ dag.getRoot() ] ) )
self.assertEqual( dag.get( "filename_2_1" ).getChildren(), set() )
self.assertEqual( dag.get( "filename_2_1" ).getParents(), set( [ filename_1_1 ] ) )
self.assertEqual( dag.get( "filename_3_1" ).getChildren(), set() )
self.assertEqual( dag.get( "filename_3_1" ).getParents(), set( [ filename_1_1 ] ) )
#
# main
#
if __name__ == "__main__":
unittest.main()
|
|
# Copyright 2016 Twitter. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python2.7
''' main.py '''
import argparse
import sys
import time
import heron.common.src.python.utils.log as log
import heron.tools.common.src.python.utils.config as config
import heron.tools.explorer.src.python.args as parse
import heron.tools.explorer.src.python.clusters as clusters
# pylint: disable=redefined-builtin
import heron.tools.explorer.src.python.help as help
import heron.tools.explorer.src.python.logicalplan as logicalplan
import heron.tools.explorer.src.python.opts as opts
import heron.tools.explorer.src.python.physicalplan as physicalplan
import heron.tools.explorer.src.python.topologies as topologies
import heron.tools.explorer.src.python.version as version
Log = log.Log
# pylint: disable=bad-super-call
class SubcommandHelpFormatter(argparse.RawDescriptionHelpFormatter):
""" subcommand help message formatter """
def _format_action(self, action):
parts = super(argparse.RawDescriptionHelpFormatter,
self)._format_action(action)
if action.nargs == argparse.PARSER:
parts = "\n".join(parts.split("\n")[1:])
return parts
################################################################################
# Main parser
################################################################################
def create_parser():
""" create parser """
help_epilog = '''Getting more help:
heron-explorer help <command> Disply help and options for <command>\n
For detailed documentation, go to http://heronstreaming.io'''
parser = argparse.ArgumentParser(
prog='heron-explorer',
epilog=help_epilog,
formatter_class=SubcommandHelpFormatter,
add_help=False)
# sub-commands
subparsers = parser.add_subparsers(
title="Available commands",
metavar='<command> <options>')
# subparser for subcommands related to clusters
clusters.create_parser(subparsers)
# subparser for subcommands related to logical plan
logicalplan.create_parser(subparsers)
# subparser for subcommands related to physical plan
physicalplan.create_parser(subparsers)
# subparser for subcommands related to displaying info
topologies.create_parser(subparsers)
# subparser for help subcommand
help.create_parser(subparsers)
# subparser for version subcommand
version.create_parser(subparsers)
return parser
################################################################################
# Run the command
################################################################################
# pylint: disable=too-many-return-statements
def run(command, *args):
""" run command """
# show all clusters
if command == 'clusters':
return clusters.run(command, *args)
# show topologies
elif command == 'topologies':
return topologies.run(command, *args)
# physical plan
elif command == 'containers':
return physicalplan.run_containers(command, *args)
elif command == 'metrics':
return physicalplan.run_metrics(command, *args)
# logical plan
elif command == 'components':
return logicalplan.run_components(command, *args)
elif command == 'spouts':
return logicalplan.run_spouts(command, *args)
elif command == 'bolts':
return logicalplan.run_bolts(command, *args)
# help
elif command == 'help':
return help.run(command, *args)
# version
elif command == 'version':
return version.run(command, *args)
return 1
def extract_common_args(command, parser, cl_args):
""" extract common args """
try:
# do not pop like cli because ``topologies`` subcommand still needs it
cluster_role_env = cl_args['cluster/[role]/[env]']
config_path = cl_args['config_path']
except KeyError:
# if some of the arguments are not found, print error and exit
subparser = config.get_subparser(parser, command)
print subparser.format_help()
return dict()
cluster = config.get_heron_cluster(cluster_role_env)
config_path = config.get_heron_cluster_conf_dir(cluster, config_path)
new_cl_args = dict()
try:
cluster_tuple = config.parse_cluster_role_env(cluster_role_env, config_path)
new_cl_args['cluster'] = cluster_tuple[0]
new_cl_args['role'] = cluster_tuple[1]
new_cl_args['environ'] = cluster_tuple[2]
new_cl_args['config_path'] = config_path
except Exception as e:
Log.error("Unable to get valid topology location: %s", str(e))
return dict()
cl_args.update(new_cl_args)
return cl_args
################################################################################
# Run the command
################################################################################
def main(args):
""" main """
# create the argument parser
parser = create_parser()
# if no argument is provided, print help and exit
if not args:
parser.print_help()
return 0
# insert the boolean values for some of the options
all_args = parse.insert_bool_values(args)
# parse the args
args, unknown_args = parser.parse_known_args(args=all_args)
command_line_args = vars(args)
command = command_line_args['subcommand']
if unknown_args:
Log.error('Unknown argument: %s', unknown_args[0])
# show help message
command_line_args['help-command'] = command
command = 'help'
if command not in ['help', 'version']:
opts.set_tracker_url(command_line_args)
log.set_logging_level(command_line_args)
if command not in ['topologies', 'clusters']:
command_line_args = extract_common_args(command, parser, command_line_args)
if not command_line_args:
return 1
Log.info("Using tracker URL: %s", command_line_args["tracker_url"])
# timing command execution
start = time.time()
ret = run(command, parser, command_line_args, unknown_args)
end = time.time()
if command != 'help':
sys.stdout.flush()
Log.info('Elapsed time: %.3fs.', (end - start))
return 0 if ret else 1
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
|
|
"""Terminal reporting of the full testing process.
This is a good source for looking at the various reporting hooks.
"""
import argparse
import datetime
import inspect
import platform
import sys
import warnings
from functools import partial
from typing import Any
from typing import Callable
from typing import Dict
from typing import Generator
from typing import List
from typing import Mapping
from typing import Optional
from typing import Sequence
from typing import Set
from typing import TextIO
from typing import Tuple
from typing import Union
import attr
import pluggy
import py
import pytest
from _pytest import nodes
from _pytest import timing
from _pytest._code import ExceptionInfo
from _pytest._code.code import ExceptionRepr
from _pytest._io.wcwidth import wcswidth
from _pytest.compat import order_preserving_dict
from _pytest.compat import TYPE_CHECKING
from _pytest.config import _PluggyPlugin
from _pytest.config import Config
from _pytest.config import ExitCode
from _pytest.config.argparsing import Parser
from _pytest.nodes import Item
from _pytest.nodes import Node
from _pytest.pathlib import absolutepath
from _pytest.pathlib import bestrelpath
from _pytest.pathlib import Path
from _pytest.reports import BaseReport
from _pytest.reports import CollectReport
from _pytest.reports import TestReport
if TYPE_CHECKING:
from typing_extensions import Literal
from _pytest.main import Session
REPORT_COLLECTING_RESOLUTION = 0.5
KNOWN_TYPES = (
"failed",
"passed",
"skipped",
"deselected",
"xfailed",
"xpassed",
"warnings",
"error",
)
_REPORTCHARS_DEFAULT = "fE"
class MoreQuietAction(argparse.Action):
"""A modified copy of the argparse count action which counts down and updates
the legacy quiet attribute at the same time.
Used to unify verbosity handling.
"""
def __init__(
self,
option_strings: Sequence[str],
dest: str,
default: object = None,
required: bool = False,
help: Optional[str] = None,
) -> None:
super().__init__(
option_strings=option_strings,
dest=dest,
nargs=0,
default=default,
required=required,
help=help,
)
def __call__(
self,
parser: argparse.ArgumentParser,
namespace: argparse.Namespace,
values: Union[str, Sequence[object], None],
option_string: Optional[str] = None,
) -> None:
new_count = getattr(namespace, self.dest, 0) - 1
setattr(namespace, self.dest, new_count)
# todo Deprecate config.quiet
namespace.quiet = getattr(namespace, "quiet", 0) + 1
def pytest_addoption(parser: Parser) -> None:
group = parser.getgroup("terminal reporting", "reporting", after="general")
group._addoption(
"-v",
"--verbose",
action="count",
default=0,
dest="verbose",
help="increase verbosity.",
)
group._addoption(
"--no-header",
action="store_true",
default=False,
dest="no_header",
help="disable header",
)
group._addoption(
"--no-summary",
action="store_true",
default=False,
dest="no_summary",
help="disable summary",
)
group._addoption(
"-q",
"--quiet",
action=MoreQuietAction,
default=0,
dest="verbose",
help="decrease verbosity.",
)
group._addoption(
"--verbosity",
dest="verbose",
type=int,
default=0,
help="set verbosity. Default is 0.",
)
group._addoption(
"-r",
action="store",
dest="reportchars",
default=_REPORTCHARS_DEFAULT,
metavar="chars",
help="show extra test summary info as specified by chars: (f)ailed, "
"(E)rror, (s)kipped, (x)failed, (X)passed, "
"(p)assed, (P)assed with output, (a)ll except passed (p/P), or (A)ll. "
"(w)arnings are enabled by default (see --disable-warnings), "
"'N' can be used to reset the list. (default: 'fE').",
)
group._addoption(
"--disable-warnings",
"--disable-pytest-warnings",
default=False,
dest="disable_warnings",
action="store_true",
help="disable warnings summary",
)
group._addoption(
"-l",
"--showlocals",
action="store_true",
dest="showlocals",
default=False,
help="show locals in tracebacks (disabled by default).",
)
group._addoption(
"--tb",
metavar="style",
action="store",
dest="tbstyle",
default="auto",
choices=["auto", "long", "short", "no", "line", "native"],
help="traceback print mode (auto/long/short/line/native/no).",
)
group._addoption(
"--show-capture",
action="store",
dest="showcapture",
choices=["no", "stdout", "stderr", "log", "all"],
default="all",
help="Controls how captured stdout/stderr/log is shown on failed tests. "
"Default is 'all'.",
)
group._addoption(
"--fulltrace",
"--full-trace",
action="store_true",
default=False,
help="don't cut any tracebacks (default is to cut).",
)
group._addoption(
"--color",
metavar="color",
action="store",
dest="color",
default="auto",
choices=["yes", "no", "auto"],
help="color terminal output (yes/no/auto).",
)
group._addoption(
"--code-highlight",
default="yes",
choices=["yes", "no"],
help="Whether code should be highlighted (only if --color is also enabled)",
)
parser.addini(
"console_output_style",
help='console output: "classic", or with additional progress information ("progress" (percentage) | "count").',
default="progress",
)
def pytest_configure(config: Config) -> None:
reporter = TerminalReporter(config, sys.stdout)
config.pluginmanager.register(reporter, "terminalreporter")
if config.option.debug or config.option.traceconfig:
def mywriter(tags, args):
msg = " ".join(map(str, args))
reporter.write_line("[traceconfig] " + msg)
config.trace.root.setprocessor("pytest:config", mywriter)
def getreportopt(config: Config) -> str:
reportchars = config.option.reportchars # type: str
old_aliases = {"F", "S"}
reportopts = ""
for char in reportchars:
if char in old_aliases:
char = char.lower()
if char == "a":
reportopts = "sxXEf"
elif char == "A":
reportopts = "PpsxXEf"
elif char == "N":
reportopts = ""
elif char not in reportopts:
reportopts += char
if not config.option.disable_warnings and "w" not in reportopts:
reportopts = "w" + reportopts
elif config.option.disable_warnings and "w" in reportopts:
reportopts = reportopts.replace("w", "")
return reportopts
@pytest.hookimpl(trylast=True) # after _pytest.runner
def pytest_report_teststatus(report: BaseReport) -> Tuple[str, str, str]:
letter = "F"
if report.passed:
letter = "."
elif report.skipped:
letter = "s"
outcome = report.outcome # type: str
if report.when in ("collect", "setup", "teardown") and outcome == "failed":
outcome = "error"
letter = "E"
return outcome, letter, outcome.upper()
@attr.s
class WarningReport:
"""Simple structure to hold warnings information captured by ``pytest_warning_recorded``.
:ivar str message:
User friendly message about the warning.
:ivar str|None nodeid:
nodeid that generated the warning (see ``get_location``).
:ivar tuple|py.path.local fslocation:
File system location of the source of the warning (see ``get_location``).
"""
message = attr.ib(type=str)
nodeid = attr.ib(type=Optional[str], default=None)
fslocation = attr.ib(
type=Optional[Union[Tuple[str, int], py.path.local]], default=None
)
count_towards_summary = True
def get_location(self, config: Config) -> Optional[str]:
"""Return the more user-friendly information about the location of a warning, or None."""
if self.nodeid:
return self.nodeid
if self.fslocation:
if isinstance(self.fslocation, tuple) and len(self.fslocation) >= 2:
filename, linenum = self.fslocation[:2]
relpath = bestrelpath(
config.invocation_params.dir, absolutepath(filename)
)
return "{}:{}".format(relpath, linenum)
else:
return str(self.fslocation)
return None
class TerminalReporter:
def __init__(self, config: Config, file: Optional[TextIO] = None) -> None:
import _pytest.config
self.config = config
self._numcollected = 0
self._session = None # type: Optional[Session]
self._showfspath = None # type: Optional[bool]
self.stats = {} # type: Dict[str, List[Any]]
self._main_color = None # type: Optional[str]
self._known_types = None # type: Optional[List[str]]
self.startdir = config.invocation_dir
self.startpath = config.invocation_params.dir
if file is None:
file = sys.stdout
self._tw = _pytest.config.create_terminal_writer(config, file)
self._screen_width = self._tw.fullwidth
self.currentfspath = None # type: Union[None, Path, str, int]
self.reportchars = getreportopt(config)
self.hasmarkup = self._tw.hasmarkup
self.isatty = file.isatty()
self._progress_nodeids_reported = set() # type: Set[str]
self._show_progress_info = self._determine_show_progress_info()
self._collect_report_last_write = None # type: Optional[float]
self._already_displayed_warnings = None # type: Optional[int]
self._keyboardinterrupt_memo = None # type: Optional[ExceptionRepr]
def _determine_show_progress_info(self) -> "Literal['progress', 'count', False]":
"""Return whether we should display progress information based on the current config."""
# do not show progress if we are not capturing output (#3038)
if self.config.getoption("capture", "no") == "no":
return False
# do not show progress if we are showing fixture setup/teardown
if self.config.getoption("setupshow", False):
return False
cfg = self.config.getini("console_output_style") # type: str
if cfg == "progress":
return "progress"
elif cfg == "count":
return "count"
else:
return False
@property
def verbosity(self) -> int:
verbosity = self.config.option.verbose # type: int
return verbosity
@property
def showheader(self) -> bool:
return self.verbosity >= 0
@property
def no_header(self) -> bool:
return bool(self.config.option.no_header)
@property
def no_summary(self) -> bool:
return bool(self.config.option.no_summary)
@property
def showfspath(self) -> bool:
if self._showfspath is None:
return self.verbosity >= 0
return self._showfspath
@showfspath.setter
def showfspath(self, value: Optional[bool]) -> None:
self._showfspath = value
@property
def showlongtestinfo(self) -> bool:
return self.verbosity > 0
def hasopt(self, char: str) -> bool:
char = {"xfailed": "x", "skipped": "s"}.get(char, char)
return char in self.reportchars
def write_fspath_result(self, nodeid: str, res, **markup: bool) -> None:
fspath = self.config.rootpath / nodeid.split("::")[0]
if self.currentfspath is None or fspath != self.currentfspath:
if self.currentfspath is not None and self._show_progress_info:
self._write_progress_information_filling_space()
self.currentfspath = fspath
relfspath = bestrelpath(self.startpath, fspath)
self._tw.line()
self._tw.write(relfspath + " ")
self._tw.write(res, flush=True, **markup)
def write_ensure_prefix(self, prefix: str, extra: str = "", **kwargs) -> None:
if self.currentfspath != prefix:
self._tw.line()
self.currentfspath = prefix
self._tw.write(prefix)
if extra:
self._tw.write(extra, **kwargs)
self.currentfspath = -2
def ensure_newline(self) -> None:
if self.currentfspath:
self._tw.line()
self.currentfspath = None
def write(self, content: str, *, flush: bool = False, **markup: bool) -> None:
self._tw.write(content, flush=flush, **markup)
def flush(self) -> None:
self._tw.flush()
def write_line(self, line: Union[str, bytes], **markup: bool) -> None:
if not isinstance(line, str):
line = str(line, errors="replace")
self.ensure_newline()
self._tw.line(line, **markup)
def rewrite(self, line: str, **markup: bool) -> None:
"""Rewinds the terminal cursor to the beginning and writes the given line.
:param erase:
If True, will also add spaces until the full terminal width to ensure
previous lines are properly erased.
The rest of the keyword arguments are markup instructions.
"""
erase = markup.pop("erase", False)
if erase:
fill_count = self._tw.fullwidth - len(line) - 1
fill = " " * fill_count
else:
fill = ""
line = str(line)
self._tw.write("\r" + line + fill, **markup)
def write_sep(
self,
sep: str,
title: Optional[str] = None,
fullwidth: Optional[int] = None,
**markup: bool
) -> None:
self.ensure_newline()
self._tw.sep(sep, title, fullwidth, **markup)
def section(self, title: str, sep: str = "=", **kw: bool) -> None:
self._tw.sep(sep, title, **kw)
def line(self, msg: str, **kw: bool) -> None:
self._tw.line(msg, **kw)
def _add_stats(self, category: str, items: Sequence[Any]) -> None:
set_main_color = category not in self.stats
self.stats.setdefault(category, []).extend(items)
if set_main_color:
self._set_main_color()
def pytest_internalerror(self, excrepr: ExceptionRepr) -> bool:
for line in str(excrepr).split("\n"):
self.write_line("INTERNALERROR> " + line)
return True
def pytest_warning_recorded(
self, warning_message: warnings.WarningMessage, nodeid: str,
) -> None:
from _pytest.warnings import warning_record_to_str
fslocation = warning_message.filename, warning_message.lineno
message = warning_record_to_str(warning_message)
warning_report = WarningReport(
fslocation=fslocation, message=message, nodeid=nodeid
)
self._add_stats("warnings", [warning_report])
def pytest_plugin_registered(self, plugin: _PluggyPlugin) -> None:
if self.config.option.traceconfig:
msg = "PLUGIN registered: {}".format(plugin)
# XXX This event may happen during setup/teardown time
# which unfortunately captures our output here
# which garbles our output if we use self.write_line.
self.write_line(msg)
def pytest_deselected(self, items: Sequence[Item]) -> None:
self._add_stats("deselected", items)
def pytest_runtest_logstart(
self, nodeid: str, location: Tuple[str, Optional[int], str]
) -> None:
# Ensure that the path is printed before the
# 1st test of a module starts running.
if self.showlongtestinfo:
line = self._locationline(nodeid, *location)
self.write_ensure_prefix(line, "")
self.flush()
elif self.showfspath:
self.write_fspath_result(nodeid, "")
self.flush()
def pytest_runtest_logreport(self, report: TestReport) -> None:
self._tests_ran = True
rep = report
res = self.config.hook.pytest_report_teststatus(
report=rep, config=self.config
) # type: Tuple[str, str, Union[str, Tuple[str, Mapping[str, bool]]]]
category, letter, word = res
if not isinstance(word, tuple):
markup = None
else:
word, markup = word
self._add_stats(category, [rep])
if not letter and not word:
# Probably passed setup/teardown.
return
running_xdist = hasattr(rep, "node")
if markup is None:
was_xfail = hasattr(report, "wasxfail")
if rep.passed and not was_xfail:
markup = {"green": True}
elif rep.passed and was_xfail:
markup = {"yellow": True}
elif rep.failed:
markup = {"red": True}
elif rep.skipped:
markup = {"yellow": True}
else:
markup = {}
if self.verbosity <= 0:
self._tw.write(letter, **markup)
else:
self._progress_nodeids_reported.add(rep.nodeid)
line = self._locationline(rep.nodeid, *rep.location)
if not running_xdist:
self.write_ensure_prefix(line, word, **markup)
if self._show_progress_info:
self._write_progress_information_filling_space()
else:
self.ensure_newline()
self._tw.write("[%s]" % rep.node.gateway.id)
if self._show_progress_info:
self._tw.write(
self._get_progress_information_message() + " ", cyan=True
)
else:
self._tw.write(" ")
self._tw.write(word, **markup)
self._tw.write(" " + line)
self.currentfspath = -2
self.flush()
@property
def _is_last_item(self) -> bool:
assert self._session is not None
return len(self._progress_nodeids_reported) == self._session.testscollected
def pytest_runtest_logfinish(self, nodeid: str) -> None:
assert self._session
if self.verbosity <= 0 and self._show_progress_info:
if self._show_progress_info == "count":
num_tests = self._session.testscollected
progress_length = len(" [{}/{}]".format(str(num_tests), str(num_tests)))
else:
progress_length = len(" [100%]")
self._progress_nodeids_reported.add(nodeid)
if self._is_last_item:
self._write_progress_information_filling_space()
else:
main_color, _ = self._get_main_color()
w = self._width_of_current_line
past_edge = w + progress_length + 1 >= self._screen_width
if past_edge:
msg = self._get_progress_information_message()
self._tw.write(msg + "\n", **{main_color: True})
def _get_progress_information_message(self) -> str:
assert self._session
collected = self._session.testscollected
if self._show_progress_info == "count":
if collected:
progress = self._progress_nodeids_reported
counter_format = "{{:{}d}}".format(len(str(collected)))
format_string = " [{}/{{}}]".format(counter_format)
return format_string.format(len(progress), collected)
return " [ {} / {} ]".format(collected, collected)
else:
if collected:
return " [{:3d}%]".format(
len(self._progress_nodeids_reported) * 100 // collected
)
return " [100%]"
def _write_progress_information_filling_space(self) -> None:
color, _ = self._get_main_color()
msg = self._get_progress_information_message()
w = self._width_of_current_line
fill = self._tw.fullwidth - w - 1
self.write(msg.rjust(fill), flush=True, **{color: True})
@property
def _width_of_current_line(self) -> int:
"""Return the width of the current line."""
return self._tw.width_of_current_line
def pytest_collection(self) -> None:
if self.isatty:
if self.config.option.verbose >= 0:
self.write("collecting ... ", flush=True, bold=True)
self._collect_report_last_write = timing.time()
elif self.config.option.verbose >= 1:
self.write("collecting ... ", flush=True, bold=True)
def pytest_collectreport(self, report: CollectReport) -> None:
if report.failed:
self._add_stats("error", [report])
elif report.skipped:
self._add_stats("skipped", [report])
items = [x for x in report.result if isinstance(x, pytest.Item)]
self._numcollected += len(items)
if self.isatty:
self.report_collect()
def report_collect(self, final: bool = False) -> None:
if self.config.option.verbose < 0:
return
if not final:
# Only write "collecting" report every 0.5s.
t = timing.time()
if (
self._collect_report_last_write is not None
and self._collect_report_last_write > t - REPORT_COLLECTING_RESOLUTION
):
return
self._collect_report_last_write = t
errors = len(self.stats.get("error", []))
skipped = len(self.stats.get("skipped", []))
deselected = len(self.stats.get("deselected", []))
selected = self._numcollected - errors - skipped - deselected
if final:
line = "collected "
else:
line = "collecting "
line += (
str(self._numcollected) + " item" + ("" if self._numcollected == 1 else "s")
)
if errors:
line += " / %d error%s" % (errors, "s" if errors != 1 else "")
if deselected:
line += " / %d deselected" % deselected
if skipped:
line += " / %d skipped" % skipped
if self._numcollected > selected > 0:
line += " / %d selected" % selected
if self.isatty:
self.rewrite(line, bold=True, erase=True)
if final:
self.write("\n")
else:
self.write_line(line)
@pytest.hookimpl(trylast=True)
def pytest_sessionstart(self, session: "Session") -> None:
self._session = session
self._sessionstarttime = timing.time()
if not self.showheader:
return
self.write_sep("=", "test session starts", bold=True)
verinfo = platform.python_version()
if not self.no_header:
msg = "platform {} -- Python {}".format(sys.platform, verinfo)
pypy_version_info = getattr(sys, "pypy_version_info", None)
if pypy_version_info:
verinfo = ".".join(map(str, pypy_version_info[:3]))
msg += "[pypy-{}-{}]".format(verinfo, pypy_version_info[3])
msg += ", pytest-{}, py-{}, pluggy-{}".format(
pytest.__version__, py.__version__, pluggy.__version__
)
if (
self.verbosity > 0
or self.config.option.debug
or getattr(self.config.option, "pastebin", None)
):
msg += " -- " + str(sys.executable)
self.write_line(msg)
lines = self.config.hook.pytest_report_header(
config=self.config, startdir=self.startdir
)
self._write_report_lines_from_hooks(lines)
def _write_report_lines_from_hooks(
self, lines: Sequence[Union[str, Sequence[str]]]
) -> None:
for line_or_lines in reversed(lines):
if isinstance(line_or_lines, str):
self.write_line(line_or_lines)
else:
for line in line_or_lines:
self.write_line(line)
def pytest_report_header(self, config: Config) -> List[str]:
line = "rootdir: %s" % config.rootpath
if config.inipath:
line += ", configfile: " + bestrelpath(config.rootpath, config.inipath)
testpaths = config.getini("testpaths")
if testpaths and config.args == testpaths:
rel_paths = [bestrelpath(config.rootpath, x) for x in testpaths]
line += ", testpaths: {}".format(", ".join(rel_paths))
result = [line]
plugininfo = config.pluginmanager.list_plugin_distinfo()
if plugininfo:
result.append("plugins: %s" % ", ".join(_plugin_nameversions(plugininfo)))
return result
def pytest_collection_finish(self, session: "Session") -> None:
self.report_collect(True)
lines = self.config.hook.pytest_report_collectionfinish(
config=self.config, startdir=self.startdir, items=session.items
)
self._write_report_lines_from_hooks(lines)
if self.config.getoption("collectonly"):
if session.items:
if self.config.option.verbose > -1:
self._tw.line("")
self._printcollecteditems(session.items)
failed = self.stats.get("failed")
if failed:
self._tw.sep("!", "collection failures")
for rep in failed:
rep.toterminal(self._tw)
def _printcollecteditems(self, items: Sequence[Item]) -> None:
# To print out items and their parent collectors
# we take care to leave out Instances aka ()
# because later versions are going to get rid of them anyway.
if self.config.option.verbose < 0:
if self.config.option.verbose < -1:
counts = {} # type: Dict[str, int]
for item in items:
name = item.nodeid.split("::", 1)[0]
counts[name] = counts.get(name, 0) + 1
for name, count in sorted(counts.items()):
self._tw.line("%s: %d" % (name, count))
else:
for item in items:
self._tw.line(item.nodeid)
return
stack = [] # type: List[Node]
indent = ""
for item in items:
needed_collectors = item.listchain()[1:] # strip root node
while stack:
if stack == needed_collectors[: len(stack)]:
break
stack.pop()
for col in needed_collectors[len(stack) :]:
stack.append(col)
if col.name == "()": # Skip Instances.
continue
indent = (len(stack) - 1) * " "
self._tw.line("{}{}".format(indent, col))
if self.config.option.verbose >= 1:
obj = getattr(col, "obj", None)
doc = inspect.getdoc(obj) if obj else None
if doc:
for line in doc.splitlines():
self._tw.line("{}{}".format(indent + " ", line))
@pytest.hookimpl(hookwrapper=True)
def pytest_sessionfinish(
self, session: "Session", exitstatus: Union[int, ExitCode]
):
outcome = yield
outcome.get_result()
self._tw.line("")
summary_exit_codes = (
ExitCode.OK,
ExitCode.TESTS_FAILED,
ExitCode.INTERRUPTED,
ExitCode.USAGE_ERROR,
ExitCode.NO_TESTS_COLLECTED,
)
if exitstatus in summary_exit_codes and not self.no_summary:
self.config.hook.pytest_terminal_summary(
terminalreporter=self, exitstatus=exitstatus, config=self.config
)
if session.shouldfail:
self.write_sep("!", str(session.shouldfail), red=True)
if exitstatus == ExitCode.INTERRUPTED:
self._report_keyboardinterrupt()
self._keyboardinterrupt_memo = None
elif session.shouldstop:
self.write_sep("!", str(session.shouldstop), red=True)
self.summary_stats()
@pytest.hookimpl(hookwrapper=True)
def pytest_terminal_summary(self) -> Generator[None, None, None]:
self.summary_errors()
self.summary_failures()
self.summary_warnings()
self.summary_passes()
yield
self.short_test_summary()
# Display any extra warnings from teardown here (if any).
self.summary_warnings()
def pytest_keyboard_interrupt(self, excinfo: ExceptionInfo[BaseException]) -> None:
self._keyboardinterrupt_memo = excinfo.getrepr(funcargs=True)
def pytest_unconfigure(self) -> None:
if self._keyboardinterrupt_memo is not None:
self._report_keyboardinterrupt()
def _report_keyboardinterrupt(self) -> None:
excrepr = self._keyboardinterrupt_memo
assert excrepr is not None
assert excrepr.reprcrash is not None
msg = excrepr.reprcrash.message
self.write_sep("!", msg)
if "KeyboardInterrupt" in msg:
if self.config.option.fulltrace:
excrepr.toterminal(self._tw)
else:
excrepr.reprcrash.toterminal(self._tw)
self._tw.line(
"(to show a full traceback on KeyboardInterrupt use --full-trace)",
yellow=True,
)
def _locationline(self, nodeid, fspath, lineno, domain):
def mkrel(nodeid):
line = self.config.cwd_relative_nodeid(nodeid)
if domain and line.endswith(domain):
line = line[: -len(domain)]
values = domain.split("[")
values[0] = values[0].replace(".", "::") # don't replace '.' in params
line += "[".join(values)
return line
# collect_fspath comes from testid which has a "/"-normalized path.
if fspath:
res = mkrel(nodeid)
if self.verbosity >= 2 and nodeid.split("::")[0] != fspath.replace(
"\\", nodes.SEP
):
res += " <- " + bestrelpath(self.startpath, fspath)
else:
res = "[location]"
return res + " "
def _getfailureheadline(self, rep):
head_line = rep.head_line
if head_line:
return head_line
return "test session" # XXX?
def _getcrashline(self, rep):
try:
return str(rep.longrepr.reprcrash)
except AttributeError:
try:
return str(rep.longrepr)[:50]
except AttributeError:
return ""
#
# Summaries for sessionfinish.
#
def getreports(self, name: str):
values = []
for x in self.stats.get(name, []):
if not hasattr(x, "_pdbshown"):
values.append(x)
return values
def summary_warnings(self) -> None:
if self.hasopt("w"):
all_warnings = self.stats.get(
"warnings"
) # type: Optional[List[WarningReport]]
if not all_warnings:
return
final = self._already_displayed_warnings is not None
if final:
warning_reports = all_warnings[self._already_displayed_warnings :]
else:
warning_reports = all_warnings
self._already_displayed_warnings = len(warning_reports)
if not warning_reports:
return
reports_grouped_by_message = (
order_preserving_dict()
) # type: Dict[str, List[WarningReport]]
for wr in warning_reports:
reports_grouped_by_message.setdefault(wr.message, []).append(wr)
def collapsed_location_report(reports: List[WarningReport]) -> str:
locations = []
for w in reports:
location = w.get_location(self.config)
if location:
locations.append(location)
if len(locations) < 10:
return "\n".join(map(str, locations))
counts_by_filename = order_preserving_dict() # type: Dict[str, int]
for loc in locations:
key = str(loc).split("::", 1)[0]
counts_by_filename[key] = counts_by_filename.get(key, 0) + 1
return "\n".join(
"{}: {} warning{}".format(k, v, "s" if v > 1 else "")
for k, v in counts_by_filename.items()
)
title = "warnings summary (final)" if final else "warnings summary"
self.write_sep("=", title, yellow=True, bold=False)
for message, message_reports in reports_grouped_by_message.items():
maybe_location = collapsed_location_report(message_reports)
if maybe_location:
self._tw.line(maybe_location)
lines = message.splitlines()
indented = "\n".join(" " + x for x in lines)
message = indented.rstrip()
else:
message = message.rstrip()
self._tw.line(message)
self._tw.line()
self._tw.line("-- Docs: https://docs.pytest.org/en/stable/warnings.html")
def summary_passes(self) -> None:
if self.config.option.tbstyle != "no":
if self.hasopt("P"):
reports = self.getreports("passed") # type: List[TestReport]
if not reports:
return
self.write_sep("=", "PASSES")
for rep in reports:
if rep.sections:
msg = self._getfailureheadline(rep)
self.write_sep("_", msg, green=True, bold=True)
self._outrep_summary(rep)
self._handle_teardown_sections(rep.nodeid)
def _get_teardown_reports(self, nodeid: str) -> List[TestReport]:
reports = self.getreports("")
return [
report
for report in reports
if report.when == "teardown" and report.nodeid == nodeid
]
def _handle_teardown_sections(self, nodeid: str) -> None:
for report in self._get_teardown_reports(nodeid):
self.print_teardown_sections(report)
def print_teardown_sections(self, rep: TestReport) -> None:
showcapture = self.config.option.showcapture
if showcapture == "no":
return
for secname, content in rep.sections:
if showcapture != "all" and showcapture not in secname:
continue
if "teardown" in secname:
self._tw.sep("-", secname)
if content[-1:] == "\n":
content = content[:-1]
self._tw.line(content)
def summary_failures(self) -> None:
if self.config.option.tbstyle != "no":
reports = self.getreports("failed") # type: List[BaseReport]
if not reports:
return
self.write_sep("=", "FAILURES")
if self.config.option.tbstyle == "line":
for rep in reports:
line = self._getcrashline(rep)
self.write_line(line)
else:
for rep in reports:
msg = self._getfailureheadline(rep)
self.write_sep("_", msg, red=True, bold=True)
self._outrep_summary(rep)
self._handle_teardown_sections(rep.nodeid)
def summary_errors(self) -> None:
if self.config.option.tbstyle != "no":
reports = self.getreports("error") # type: List[BaseReport]
if not reports:
return
self.write_sep("=", "ERRORS")
for rep in self.stats["error"]:
msg = self._getfailureheadline(rep)
if rep.when == "collect":
msg = "ERROR collecting " + msg
else:
msg = "ERROR at {} of {}".format(rep.when, msg)
self.write_sep("_", msg, red=True, bold=True)
self._outrep_summary(rep)
def _outrep_summary(self, rep: BaseReport) -> None:
rep.toterminal(self._tw)
showcapture = self.config.option.showcapture
if showcapture == "no":
return
for secname, content in rep.sections:
if showcapture != "all" and showcapture not in secname:
continue
self._tw.sep("-", secname)
if content[-1:] == "\n":
content = content[:-1]
self._tw.line(content)
def summary_stats(self) -> None:
if self.verbosity < -1:
return
session_duration = timing.time() - self._sessionstarttime
(parts, main_color) = self.build_summary_stats_line()
line_parts = []
display_sep = self.verbosity >= 0
if display_sep:
fullwidth = self._tw.fullwidth
for text, markup in parts:
with_markup = self._tw.markup(text, **markup)
if display_sep:
fullwidth += len(with_markup) - len(text)
line_parts.append(with_markup)
msg = ", ".join(line_parts)
main_markup = {main_color: True}
duration = " in {}".format(format_session_duration(session_duration))
duration_with_markup = self._tw.markup(duration, **main_markup)
if display_sep:
fullwidth += len(duration_with_markup) - len(duration)
msg += duration_with_markup
if display_sep:
markup_for_end_sep = self._tw.markup("", **main_markup)
if markup_for_end_sep.endswith("\x1b[0m"):
markup_for_end_sep = markup_for_end_sep[:-4]
fullwidth += len(markup_for_end_sep)
msg += markup_for_end_sep
if display_sep:
self.write_sep("=", msg, fullwidth=fullwidth, **main_markup)
else:
self.write_line(msg, **main_markup)
def short_test_summary(self) -> None:
if not self.reportchars:
return
def show_simple(stat, lines: List[str]) -> None:
failed = self.stats.get(stat, [])
if not failed:
return
termwidth = self._tw.fullwidth
config = self.config
for rep in failed:
line = _get_line_with_reprcrash_message(config, rep, termwidth)
lines.append(line)
def show_xfailed(lines: List[str]) -> None:
xfailed = self.stats.get("xfailed", [])
for rep in xfailed:
verbose_word = rep._get_verbose_word(self.config)
pos = _get_pos(self.config, rep)
lines.append("{} {}".format(verbose_word, pos))
reason = rep.wasxfail
if reason:
lines.append(" " + str(reason))
def show_xpassed(lines: List[str]) -> None:
xpassed = self.stats.get("xpassed", [])
for rep in xpassed:
verbose_word = rep._get_verbose_word(self.config)
pos = _get_pos(self.config, rep)
reason = rep.wasxfail
lines.append("{} {} {}".format(verbose_word, pos, reason))
def show_skipped(lines: List[str]) -> None:
skipped = self.stats.get("skipped", []) # type: List[CollectReport]
fskips = _folded_skips(self.startpath, skipped) if skipped else []
if not fskips:
return
verbose_word = skipped[0]._get_verbose_word(self.config)
for num, fspath, lineno, reason in fskips:
if reason.startswith("Skipped: "):
reason = reason[9:]
if lineno is not None:
lines.append(
"%s [%d] %s:%d: %s"
% (verbose_word, num, fspath, lineno, reason)
)
else:
lines.append("%s [%d] %s: %s" % (verbose_word, num, fspath, reason))
REPORTCHAR_ACTIONS = {
"x": show_xfailed,
"X": show_xpassed,
"f": partial(show_simple, "failed"),
"s": show_skipped,
"p": partial(show_simple, "passed"),
"E": partial(show_simple, "error"),
} # type: Mapping[str, Callable[[List[str]], None]]
lines = [] # type: List[str]
for char in self.reportchars:
action = REPORTCHAR_ACTIONS.get(char)
if action: # skipping e.g. "P" (passed with output) here.
action(lines)
if lines:
self.write_sep("=", "short test summary info")
for line in lines:
self.write_line(line)
def _get_main_color(self) -> Tuple[str, List[str]]:
if self._main_color is None or self._known_types is None or self._is_last_item:
self._set_main_color()
assert self._main_color
assert self._known_types
return self._main_color, self._known_types
def _determine_main_color(self, unknown_type_seen: bool) -> str:
stats = self.stats
if "failed" in stats or "error" in stats:
main_color = "red"
elif "warnings" in stats or "xpassed" in stats or unknown_type_seen:
main_color = "yellow"
elif "passed" in stats or not self._is_last_item:
main_color = "green"
else:
main_color = "yellow"
return main_color
def _set_main_color(self) -> None:
unknown_types = [] # type: List[str]
for found_type in self.stats.keys():
if found_type: # setup/teardown reports have an empty key, ignore them
if found_type not in KNOWN_TYPES and found_type not in unknown_types:
unknown_types.append(found_type)
self._known_types = list(KNOWN_TYPES) + unknown_types
self._main_color = self._determine_main_color(bool(unknown_types))
def build_summary_stats_line(self) -> Tuple[List[Tuple[str, Dict[str, bool]]], str]:
main_color, known_types = self._get_main_color()
parts = []
for key in known_types:
reports = self.stats.get(key, None)
if reports:
count = sum(
1 for rep in reports if getattr(rep, "count_towards_summary", True)
)
color = _color_for_type.get(key, _color_for_type_default)
markup = {color: True, "bold": color == main_color}
parts.append(("%d %s" % _make_plural(count, key), markup))
if not parts:
parts = [("no tests ran", {_color_for_type_default: True})]
return parts, main_color
def _get_pos(config: Config, rep: BaseReport):
nodeid = config.cwd_relative_nodeid(rep.nodeid)
return nodeid
def _get_line_with_reprcrash_message(
config: Config, rep: BaseReport, termwidth: int
) -> str:
"""Get summary line for a report, trying to add reprcrash message."""
verbose_word = rep._get_verbose_word(config)
pos = _get_pos(config, rep)
line = "{} {}".format(verbose_word, pos)
len_line = wcswidth(line)
ellipsis, len_ellipsis = "...", 3
if len_line > termwidth - len_ellipsis:
# No space for an additional message.
return line
try:
# Type ignored intentionally -- possible AttributeError expected.
msg = rep.longrepr.reprcrash.message # type: ignore[union-attr]
except AttributeError:
pass
else:
# Only use the first line.
i = msg.find("\n")
if i != -1:
msg = msg[:i]
len_msg = wcswidth(msg)
sep, len_sep = " - ", 3
max_len_msg = termwidth - len_line - len_sep
if max_len_msg >= len_ellipsis:
if len_msg > max_len_msg:
max_len_msg -= len_ellipsis
msg = msg[:max_len_msg]
while wcswidth(msg) > max_len_msg:
msg = msg[:-1]
msg += ellipsis
line += sep + msg
return line
def _folded_skips(
startpath: Path, skipped: Sequence[CollectReport],
) -> List[Tuple[int, str, Optional[int], str]]:
d = {} # type: Dict[Tuple[str, Optional[int], str], List[CollectReport]]
for event in skipped:
assert event.longrepr is not None
assert isinstance(event.longrepr, tuple), (event, event.longrepr)
assert len(event.longrepr) == 3, (event, event.longrepr)
fspath, lineno, reason = event.longrepr
# For consistency, report all fspaths in relative form.
fspath = bestrelpath(startpath, Path(fspath))
keywords = getattr(event, "keywords", {})
# Folding reports with global pytestmark variable.
# This is a workaround, because for now we cannot identify the scope of a skip marker
# TODO: Revisit after marks scope would be fixed.
if (
event.when == "setup"
and "skip" in keywords
and "pytestmark" not in keywords
):
key = (fspath, None, reason) # type: Tuple[str, Optional[int], str]
else:
key = (fspath, lineno, reason)
d.setdefault(key, []).append(event)
values = [] # type: List[Tuple[int, str, Optional[int], str]]
for key, events in d.items():
values.append((len(events), *key))
return values
_color_for_type = {
"failed": "red",
"error": "red",
"warnings": "yellow",
"passed": "green",
}
_color_for_type_default = "yellow"
def _make_plural(count: int, noun: str) -> Tuple[int, str]:
# No need to pluralize words such as `failed` or `passed`.
if noun not in ["error", "warnings"]:
return count, noun
# The `warnings` key is plural. To avoid API breakage, we keep it that way but
# set it to singular here so we can determine plurality in the same way as we do
# for `error`.
noun = noun.replace("warnings", "warning")
return count, noun + "s" if count != 1 else noun
def _plugin_nameversions(plugininfo) -> List[str]:
values = [] # type: List[str]
for plugin, dist in plugininfo:
# Gets us name and version!
name = "{dist.project_name}-{dist.version}".format(dist=dist)
# Questionable convenience, but it keeps things short.
if name.startswith("pytest-"):
name = name[7:]
# We decided to print python package names they can have more than one plugin.
if name not in values:
values.append(name)
return values
def format_session_duration(seconds: float) -> str:
"""Format the given seconds in a human readable manner to show in the final summary."""
if seconds < 60:
return "{:.2f}s".format(seconds)
else:
dt = datetime.timedelta(seconds=int(seconds))
return "{:.2f}s ({})".format(seconds, dt)
|
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
"""
Customize Form is a Single DocType used to mask the Property Setter
Thus providing a better UI from user perspective
"""
import frappe
import frappe.translate
from frappe import _
from frappe.utils import cint
from frappe.model.document import Document
from frappe.model import no_value_fields
from frappe.core.doctype.doctype.doctype import validate_fields_for_doctype
from frappe.model.docfield import supports_translation
doctype_properties = {
'search_fields': 'Data',
'title_field': 'Data',
'image_field': 'Data',
'sort_field': 'Data',
'sort_order': 'Data',
'default_print_format': 'Data',
'read_only_onload': 'Check',
'allow_copy': 'Check',
'istable': 'Check',
'quick_entry': 'Check',
'editable_grid': 'Check',
'max_attachments': 'Int',
'image_view': 'Check',
'track_changes': 'Check',
}
docfield_properties = {
'idx': 'Int',
'label': 'Data',
'fieldtype': 'Select',
'options': 'Text',
'fetch_from': 'Small Text',
'permlevel': 'Int',
'width': 'Data',
'print_width': 'Data',
'reqd': 'Check',
'unique': 'Check',
'ignore_user_permissions': 'Check',
'in_list_view': 'Check',
'in_standard_filter': 'Check',
'in_global_search': 'Check',
'bold': 'Check',
'hidden': 'Check',
'collapsible': 'Check',
'collapsible_depends_on': 'Data',
'print_hide': 'Check',
'print_hide_if_no_value': 'Check',
'report_hide': 'Check',
'allow_on_submit': 'Check',
'translatable': 'Check',
'depends_on': 'Data',
'description': 'Text',
'default': 'Text',
'precision': 'Select',
'read_only': 'Check',
'length': 'Int',
'columns': 'Int',
'remember_last_selected_value': 'Check',
'allow_bulk_edit': 'Check',
}
allowed_fieldtype_change = (('Currency', 'Float', 'Percent'), ('Small Text', 'Data'),
('Text', 'Data'), ('Text', 'Text Editor', 'Code', 'Signature', 'HTML Editor'), ('Data', 'Select'),
('Text', 'Small Text'), ('Text', 'Data', 'Barcode'), ('Code', 'Geolocation'))
allowed_fieldtype_for_options_change = ('Read Only', 'HTML', 'Select', 'Data')
class CustomizeForm(Document):
def on_update(self):
frappe.db.sql("delete from tabSingles where doctype='Customize Form'")
frappe.db.sql("delete from `tabCustomize Form Field`")
def fetch_to_customize(self):
self.clear_existing_doc()
if not self.doc_type:
return
meta = frappe.get_meta(self.doc_type)
# doctype properties
for property in doctype_properties:
self.set(property, meta.get(property))
for d in meta.get("fields"):
new_d = {"fieldname": d.fieldname, "is_custom_field": d.get("is_custom_field"), "name": d.name}
for property in docfield_properties:
new_d[property] = d.get(property)
self.append("fields", new_d)
# load custom translation
translation = self.get_name_translation()
self.label = translation.target_name if translation else ''
# NOTE doc is sent to clientside by run_method
def get_name_translation(self):
'''Get translation object if exists of current doctype name in the default language'''
return frappe.get_value('Translation',
{'source_name': self.doc_type, 'language': frappe.local.lang or 'en'},
['name', 'target_name'], as_dict=True)
def set_name_translation(self):
'''Create, update custom translation for this doctype'''
current = self.get_name_translation()
if current:
if self.label and current.target_name != self.label:
frappe.db.set_value('Translation', current.name, 'target_name', self.label)
frappe.translate.clear_cache()
else:
# clear translation
frappe.delete_doc('Translation', current.name)
else:
if self.label:
frappe.get_doc(dict(doctype='Translation',
source_name=self.doc_type,
target_name=self.label,
language_code=frappe.local.lang or 'en')).insert()
def clear_existing_doc(self):
doc_type = self.doc_type
for fieldname in self.meta.get_valid_columns():
self.set(fieldname, None)
for df in self.meta.get_table_fields():
self.set(df.fieldname, [])
self.doc_type = doc_type
self.name = "Customize Form"
def save_customization(self):
if not self.doc_type:
return
self.flags.update_db = False
self.set_property_setters()
self.update_custom_fields()
self.set_name_translation()
validate_fields_for_doctype(self.doc_type)
if self.flags.update_db:
from frappe.model.db_schema import updatedb
updatedb(self.doc_type)
if not hasattr(self, 'hide_success') or not self.hide_success:
frappe.msgprint(_("{0} updated").format(_(self.doc_type)))
frappe.clear_cache(doctype=self.doc_type)
self.fetch_to_customize()
def set_property_setters(self):
meta = frappe.get_meta(self.doc_type)
# doctype property setters
for property in doctype_properties:
if self.get(property) != meta.get(property):
self.make_property_setter(property=property, value=self.get(property),
property_type=doctype_properties[property])
for df in self.get("fields"):
meta_df = meta.get("fields", {"fieldname": df.fieldname})
if not meta_df or meta_df[0].get("is_custom_field"):
continue
for property in docfield_properties:
if property != "idx" and (df.get(property) or '') != (meta_df[0].get(property) or ''):
if property == "fieldtype":
self.validate_fieldtype_change(df, meta_df[0].get(property), df.get(property))
elif property == "allow_on_submit" and df.get(property):
frappe.msgprint(_("Row {0}: Not allowed to enable Allow on Submit for standard fields")\
.format(df.idx))
continue
elif property == "reqd" and \
((frappe.db.get_value("DocField",
{"parent":self.doc_type,"fieldname":df.fieldname}, "reqd") == 1) \
and (df.get(property) == 0)):
frappe.msgprint(_("Row {0}: Not allowed to disable Mandatory for standard fields")\
.format(df.idx))
continue
elif property == "in_list_view" and df.get(property) \
and df.fieldtype!="Attach Image" and df.fieldtype in no_value_fields:
frappe.msgprint(_("'In List View' not allowed for type {0} in row {1}")
.format(df.fieldtype, df.idx))
continue
elif property == "precision" and cint(df.get("precision")) > 6 \
and cint(df.get("precision")) > cint(meta_df[0].get("precision")):
self.flags.update_db = True
elif property == "unique":
self.flags.update_db = True
elif (property == "read_only" and cint(df.get("read_only"))==0
and frappe.db.get_value("DocField", {"parent": self.doc_type, "fieldname": df.fieldname}, "read_only")==1):
# if docfield has read_only checked and user is trying to make it editable, don't allow it
frappe.msgprint(_("You cannot unset 'Read Only' for field {0}").format(df.label))
continue
elif property == "options" and df.get("fieldtype") not in allowed_fieldtype_for_options_change:
frappe.msgprint(_("You can't set 'Options' for field {0}").format(df.label))
continue
elif property == 'translatable' and not supports_translation(df.get('fieldtype')):
frappe.msgprint(_("You can't set 'Translatable' for field {0}").format(df.label))
continue
self.make_property_setter(property=property, value=df.get(property),
property_type=docfield_properties[property], fieldname=df.fieldname)
def update_custom_fields(self):
for i, df in enumerate(self.get("fields")):
if df.get("is_custom_field"):
if not frappe.db.exists('Custom Field', {'dt': self.doc_type, 'fieldname': df.fieldname}):
self.add_custom_field(df, i)
self.flags.update_db = True
else:
self.update_in_custom_field(df, i)
self.delete_custom_fields()
def add_custom_field(self, df, i):
d = frappe.new_doc("Custom Field")
d.dt = self.doc_type
for property in docfield_properties:
d.set(property, df.get(property))
if i!=0:
d.insert_after = self.fields[i-1].fieldname
d.idx = i
d.insert()
df.fieldname = d.fieldname
def update_in_custom_field(self, df, i):
meta = frappe.get_meta(self.doc_type)
meta_df = meta.get("fields", {"fieldname": df.fieldname})
if not (meta_df and meta_df[0].get("is_custom_field")):
# not a custom field
return
custom_field = frappe.get_doc("Custom Field", meta_df[0].name)
changed = False
for property in docfield_properties:
if df.get(property) != custom_field.get(property):
if property == "fieldtype":
self.validate_fieldtype_change(df, meta_df[0].get(property), df.get(property))
custom_field.set(property, df.get(property))
changed = True
# check and update `insert_after` property
if i!=0:
insert_after = self.fields[i-1].fieldname
if custom_field.insert_after != insert_after:
custom_field.insert_after = insert_after
custom_field.idx = i
changed = True
if changed:
custom_field.db_update()
self.flags.update_db = True
#custom_field.save()
def delete_custom_fields(self):
meta = frappe.get_meta(self.doc_type)
fields_to_remove = (set([df.fieldname for df in meta.get("fields")])
- set(df.fieldname for df in self.get("fields")))
for fieldname in fields_to_remove:
df = meta.get("fields", {"fieldname": fieldname})[0]
if df.get("is_custom_field"):
frappe.delete_doc("Custom Field", df.name)
def make_property_setter(self, property, value, property_type, fieldname=None):
self.delete_existing_property_setter(property, fieldname)
property_value = self.get_existing_property_value(property, fieldname)
if property_value==value:
return
# create a new property setter
# ignore validation becuase it will be done at end
frappe.make_property_setter({
"doctype": self.doc_type,
"doctype_or_field": "DocField" if fieldname else "DocType",
"fieldname": fieldname,
"property": property,
"value": value,
"property_type": property_type
}, ignore_validate=True)
def delete_existing_property_setter(self, property, fieldname=None):
# first delete existing property setter
existing_property_setter = frappe.db.get_value("Property Setter", {"doc_type": self.doc_type,
"property": property, "field_name['']": fieldname or ''})
if existing_property_setter:
frappe.db.sql("delete from `tabProperty Setter` where name=%s", existing_property_setter)
def get_existing_property_value(self, property_name, fieldname=None):
# check if there is any need to make property setter!
if fieldname:
property_value = frappe.db.get_value("DocField", {"parent": self.doc_type,
"fieldname": fieldname}, property_name)
else:
try:
property_value = frappe.db.get_value("DocType", self.doc_type, property_name)
except Exception as e:
if e.args[0]==1054:
property_value = None
else:
raise
return property_value
def validate_fieldtype_change(self, df, old_value, new_value):
allowed = False
for allowed_changes in allowed_fieldtype_change:
if (old_value in allowed_changes and new_value in allowed_changes):
allowed = True
break
if not allowed:
frappe.throw(_("Fieldtype cannot be changed from {0} to {1} in row {2}").format(old_value, new_value, df.idx))
def reset_to_defaults(self):
if not self.doc_type:
return
frappe.db.sql("""delete from `tabProperty Setter` where doc_type=%s
and !(`field_name`='naming_series' and `property`='options')""", self.doc_type)
frappe.clear_cache(doctype=self.doc_type)
self.fetch_to_customize()
|
|
"""This module implements decorators for implementing other decorators
as well as some commonly used decorators.
"""
import sys
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str,
import builtins
exec_ = getattr(builtins, "exec")
del builtins
else:
string_types = basestring,
def exec_(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
from functools import partial
from inspect import ismethod, isclass, formatargspec
from collections import namedtuple
from threading import Lock, RLock
try:
from inspect import signature
except ImportError:
pass
from .wrappers import (FunctionWrapper, BoundFunctionWrapper, ObjectProxy,
CallableObjectProxy)
# Adapter wrapper for the wrapped function which will overlay certain
# properties from the adapter function onto the wrapped function so that
# functions such as inspect.getargspec(), inspect.getfullargspec(),
# inspect.signature() and inspect.getsource() return the correct results
# one would expect.
class _AdapterFunctionCode(CallableObjectProxy):
def __init__(self, wrapped_code, adapter_code):
super(_AdapterFunctionCode, self).__init__(wrapped_code)
self._self_adapter_code = adapter_code
@property
def co_argcount(self):
return self._self_adapter_code.co_argcount
@property
def co_code(self):
return self._self_adapter_code.co_code
@property
def co_flags(self):
return self._self_adapter_code.co_flags
@property
def co_kwonlyargcount(self):
return self._self_adapter_code.co_kwonlyargcount
@property
def co_varnames(self):
return self._self_adapter_code.co_varnames
class _AdapterFunctionSurrogate(CallableObjectProxy):
def __init__(self, wrapped, adapter):
super(_AdapterFunctionSurrogate, self).__init__(wrapped)
self._self_adapter = adapter
@property
def __code__(self):
return _AdapterFunctionCode(self.__wrapped__.__code__,
self._self_adapter.__code__)
@property
def __defaults__(self):
return self._self_adapter.__defaults__
@property
def __kwdefaults__(self):
return self._self_adapter.__kwdefaults__
@property
def __signature__(self):
if 'signature' not in globals():
return self._self_adapter.__signature__
else:
# Can't allow this to fail on Python 3 else it falls
# through to using __wrapped__, but that will be the
# wrong function we want to derive the signature
# from. Thus generate the signature ourselves.
return signature(self._self_adapter)
if PY2:
func_code = __code__
func_defaults = __defaults__
class _BoundAdapterWrapper(BoundFunctionWrapper):
@property
def __func__(self):
return _AdapterFunctionSurrogate(self.__wrapped__.__func__,
self._self_parent._self_adapter)
if PY2:
im_func = __func__
class AdapterWrapper(FunctionWrapper):
__bound_function_wrapper__ = _BoundAdapterWrapper
def __init__(self, *args, **kwargs):
adapter = kwargs.pop('adapter')
super(AdapterWrapper, self).__init__(*args, **kwargs)
self._self_surrogate = _AdapterFunctionSurrogate(
self.__wrapped__, adapter)
self._self_adapter = adapter
@property
def __code__(self):
return self._self_surrogate.__code__
@property
def __defaults__(self):
return self._self_surrogate.__defaults__
@property
def __kwdefaults__(self):
return self._self_surrogate.__kwdefaults__
if PY2:
func_code = __code__
func_defaults = __defaults__
@property
def __signature__(self):
return self._self_surrogate.__signature__
class AdapterFactory(object):
def __call__(self, wrapped):
raise NotImplementedError()
class DelegatedAdapterFactory(AdapterFactory):
def __init__(self, factory):
super(DelegatedAdapterFactory, self).__init__()
self.factory = factory
def __call__(self, wrapped):
return self.factory(wrapped)
adapter_factory = DelegatedAdapterFactory
# Decorator for creating other decorators. This decorator and the
# wrappers which they use are designed to properly preserve any name
# attributes, function signatures etc, in addition to the wrappers
# themselves acting like a transparent proxy for the original wrapped
# function so the wrapper is effectively indistinguishable from the
# original wrapped function.
def decorator(wrapper=None, enabled=None, adapter=None):
# The decorator should be supplied with a single positional argument
# which is the wrapper function to be used to implement the
# decorator. This may be preceded by a step whereby the keyword
# arguments are supplied to customise the behaviour of the
# decorator. The 'adapter' argument is used to optionally denote a
# separate function which is notionally used by an adapter
# decorator. In that case parts of the function '__code__' and
# '__defaults__' attributes are used from the adapter function
# rather than those of the wrapped function. This allows for the
# argument specification from inspect.getargspec() and similar
# functions to be overridden with a prototype for a different
# function than what was wrapped. The 'enabled' argument provides a
# way to enable/disable the use of the decorator. If the type of
# 'enabled' is a boolean, then it is evaluated immediately and the
# wrapper not even applied if it is False. If not a boolean, it will
# be evaluated when the wrapper is called for an unbound wrapper,
# and when binding occurs for a bound wrapper. When being evaluated,
# if 'enabled' is callable it will be called to obtain the value to
# be checked. If False, the wrapper will not be called and instead
# the original wrapped function will be called directly instead.
if wrapper is not None:
# Helper function for creating wrapper of the appropriate
# time when we need it down below.
def _build(wrapped, wrapper, enabled=None, adapter=None):
if adapter:
if isinstance(adapter, AdapterFactory):
adapter = adapter(wrapped)
if not callable(adapter):
ns = {}
if not isinstance(adapter, string_types):
adapter = formatargspec(*adapter)
exec_('def adapter{0}: pass'.format(adapter), ns, ns)
adapter = ns['adapter']
return AdapterWrapper(wrapped=wrapped, wrapper=wrapper,
enabled=enabled, adapter=adapter)
return FunctionWrapper(wrapped=wrapped, wrapper=wrapper,
enabled=enabled)
# The wrapper has been provided so return the final decorator.
# The decorator is itself one of our function wrappers so we
# can determine when it is applied to functions, instance methods
# or class methods. This allows us to bind the instance or class
# method so the appropriate self or cls attribute is supplied
# when it is finally called.
def _wrapper(wrapped, instance, args, kwargs):
# We first check for the case where the decorator was applied
# to a class type.
#
# @decorator
# class mydecoratorclass(object):
# def __init__(self, arg=None):
# self.arg = arg
# def __call__(self, wrapped, instance, args, kwargs):
# return wrapped(*args, **kwargs)
#
# @mydecoratorclass(arg=1)
# def function():
# pass
#
# In this case an instance of the class is to be used as the
# decorator wrapper function. If args was empty at this point,
# then it means that there were optional keyword arguments
# supplied to be used when creating an instance of the class
# to be used as the wrapper function.
if instance is None and isclass(wrapped) and not args:
# We still need to be passed the target function to be
# wrapped as yet, so we need to return a further function
# to be able to capture it.
def _capture(target_wrapped):
# Now have the target function to be wrapped and need
# to create an instance of the class which is to act
# as the decorator wrapper function. Before we do that,
# we need to first check that use of the decorator
# hadn't been disabled by a simple boolean. If it was,
# the target function to be wrapped is returned instead.
_enabled = enabled
if type(_enabled) is bool:
if not _enabled:
return target_wrapped
_enabled = None
# Now create an instance of the class which is to act
# as the decorator wrapper function. Any arguments had
# to be supplied as keyword only arguments so that is
# all we pass when creating it.
target_wrapper = wrapped(**kwargs)
# Finally build the wrapper itself and return it.
return _build(target_wrapped, target_wrapper,
_enabled, adapter)
return _capture
# We should always have the target function to be wrapped at
# this point as the first (and only) value in args.
target_wrapped = args[0]
# Need to now check that use of the decorator hadn't been
# disabled by a simple boolean. If it was, then target
# function to be wrapped is returned instead.
_enabled = enabled
if type(_enabled) is bool:
if not _enabled:
return target_wrapped
_enabled = None
# We now need to build the wrapper, but there are a couple of
# different cases we need to consider.
if instance is None:
if isclass(wrapped):
# In this case the decorator was applied to a class
# type but optional keyword arguments were not supplied
# for initialising an instance of the class to be used
# as the decorator wrapper function.
#
# @decorator
# class mydecoratorclass(object):
# def __init__(self, arg=None):
# self.arg = arg
# def __call__(self, wrapped, instance,
# args, kwargs):
# return wrapped(*args, **kwargs)
#
# @mydecoratorclass
# def function():
# pass
#
# We still need to create an instance of the class to
# be used as the decorator wrapper function, but no
# arguments are pass.
target_wrapper = wrapped()
else:
# In this case the decorator was applied to a normal
# function, or possibly a static method of a class.
#
# @decorator
# def mydecoratorfuntion(wrapped, instance,
# args, kwargs):
# return wrapped(*args, **kwargs)
#
# @mydecoratorfunction
# def function():
# pass
#
# That normal function becomes the decorator wrapper
# function.
target_wrapper = wrapper
else:
if isclass(instance):
# In this case the decorator was applied to a class
# method.
#
# class myclass(object):
# @decorator
# @classmethod
# def decoratorclassmethod(cls, wrapped,
# instance, args, kwargs):
# return wrapped(*args, **kwargs)
#
# instance = myclass()
#
# @instance.decoratorclassmethod
# def function():
# pass
#
# This one is a bit strange because binding was actually
# performed on the wrapper created by our decorator
# factory. We need to apply that binding to the decorator
# wrapper function which which the decorator factory
# was applied to.
target_wrapper = wrapper.__get__(None, instance)
else:
# In this case the decorator was applied to an instance
# method.
#
# class myclass(object):
# @decorator
# def decoratorclassmethod(self, wrapped,
# instance, args, kwargs):
# return wrapped(*args, **kwargs)
#
# instance = myclass()
#
# @instance.decoratorclassmethod
# def function():
# pass
#
# This one is a bit strange because binding was actually
# performed on the wrapper created by our decorator
# factory. We need to apply that binding to the decorator
# wrapper function which which the decorator factory
# was applied to.
target_wrapper = wrapper.__get__(instance, type(instance))
# Finally build the wrapper itself and return it.
return _build(target_wrapped, target_wrapper, _enabled, adapter)
# We first return our magic function wrapper here so we can
# determine in what context the decorator factory was used. In
# other words, it is itself a universal decorator.
return _build(wrapper, _wrapper)
else:
# The wrapper still has not been provided, so we are just
# collecting the optional keyword arguments. Return the
# decorator again wrapped in a partial using the collected
# arguments.
return partial(decorator, enabled=enabled, adapter=adapter)
# Decorator for implementing thread synchronization. It can be used as a
# decorator, in which case the synchronization context is determined by
# what type of function is wrapped, or it can also be used as a context
# manager, where the user needs to supply the correct synchronization
# context. It is also possible to supply an object which appears to be a
# synchronization primitive of some sort, by virtue of having release()
# and acquire() methods. In that case that will be used directly as the
# synchronization primitive without creating a separate lock against the
# derived or supplied context.
def synchronized(wrapped):
# Determine if being passed an object which is a synchronization
# primitive. We can't check by type for Lock, RLock, Semaphore etc,
# as the means of creating them isn't the type. Therefore use the
# existence of acquire() and release() methods. This is more
# extensible anyway as it allows custom synchronization mechanisms.
if hasattr(wrapped, 'acquire') and hasattr(wrapped, 'release'):
# We remember what the original lock is and then return a new
# decorator which acceses and locks it. When returning the new
# decorator we wrap it with an object proxy so we can override
# the context manager methods in case it is being used to wrap
# synchronized statements with a 'with' statement.
lock = wrapped
@decorator
def _synchronized(wrapped, instance, args, kwargs):
# Execute the wrapped function while the original supplied
# lock is held.
with lock:
return wrapped(*args, **kwargs)
class _PartialDecorator(CallableObjectProxy):
def __enter__(self):
lock.acquire()
return lock
def __exit__(self, *args):
lock.release()
return _PartialDecorator(wrapped=_synchronized)
# Following only apply when the lock is being created automatically
# based on the context of what was supplied. In this case we supply
# a final decorator, but need to use FunctionWrapper directly as we
# want to derive from it to add context manager methods in in case it is
# being used to wrap synchronized statements with a 'with' statement.
def _synchronized_lock(context):
# Attempt to retrieve the lock for the specific context.
lock = vars(context).get('_synchronized_lock', None)
if lock is None:
# There is no existing lock defined for the context we
# are dealing with so we need to create one. This needs
# to be done in a way to guarantee there is only one
# created, even if multiple threads try and create it at
# the same time. We can't always use the setdefault()
# method on the __dict__ for the context. This is the
# case where the context is a class, as __dict__ is
# actually a dictproxy. What we therefore do is use a
# meta lock on this wrapper itself, to control the
# creation and assignment of the lock attribute against
# the context.
meta_lock = vars(synchronized).setdefault(
'_synchronized_meta_lock', Lock())
with meta_lock:
# We need to check again for whether the lock we want
# exists in case two threads were trying to create it
# at the same time and were competing to create the
# meta lock.
lock = vars(context).get('_synchronized_lock', None)
if lock is None:
lock = RLock()
setattr(context, '_synchronized_lock', lock)
return lock
def _synchronized_wrapper(wrapped, instance, args, kwargs):
# Execute the wrapped function while the lock for the
# desired context is held. If instance is None then the
# wrapped function is used as the context.
with _synchronized_lock(instance or wrapped):
return wrapped(*args, **kwargs)
class _FinalDecorator(FunctionWrapper):
def __enter__(self):
self._self_lock = _synchronized_lock(self.__wrapped__)
self._self_lock.acquire()
return self._self_lock
def __exit__(self, *args):
self._self_lock.release()
return _FinalDecorator(wrapped=wrapped, wrapper=_synchronized_wrapper)
|
|
<NotepadPlus>
<Project name="djdam">
<Folder name="apps">
<Folder name="admin">
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin\actions.html" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin\base.html" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin\base_site.html" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin\filter.html" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin\index.html" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin\prepopulated_fields_js.html" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin\search_form.html" />
</Folder>
<Folder name="adminactions">
<Folder name="fixtures">
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\adminactions\fixtures\adminactions.json" />
</Folder>
<Folder name="static">
<Folder name="adminactions">
<Folder name="css">
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\adminactions\static\adminactions\css\adminactions.css" />
</Folder>
<Folder name="js">
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\adminactions\static\adminactions\js\export.js" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\adminactions\static\adminactions\js\massupdate.js" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\adminactions\static\adminactions\js\merge.js" />
</Folder>
</Folder>
</Folder>
<Folder name="templates">
<Folder name="adminactions">
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\adminactions\templates\adminactions\any_model.html" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\adminactions\templates\adminactions\charts.html" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\adminactions\templates\adminactions\export_csv.html" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\adminactions\templates\adminactions\export_fixture.html" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\adminactions\templates\adminactions\mass_update.html" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\adminactions\templates\adminactions\merge.html" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\adminactions\templates\adminactions\merge_preview.html" />
</Folder>
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\adminactions\templates\500.html" />
</Folder>
<Folder name="templatetags">
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\adminactions\templatetags\actions.py" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\adminactions\templatetags\massupdate.py" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\adminactions\templatetags\merge.py" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\adminactions\templatetags\__init__.py" />
</Folder>
<Folder name="tests">
<Folder name="selenium_tests">
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\adminactions\tests\selenium_tests\common.py" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\adminactions\tests\selenium_tests\export_csv.py" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\adminactions\tests\selenium_tests\importer.py" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\adminactions\tests\selenium_tests\mass_update.py" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\adminactions\tests\selenium_tests\__init__.py" />
</Folder>
<Folder name="templates">
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\adminactions\tests\templates\base.html" />
</Folder>
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\adminactions\tests\common.py" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\adminactions\tests\exports.py" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\adminactions\tests\mass_update.py" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\adminactions\tests\merge.py" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\adminactions\tests\settings.py" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\adminactions\tests\urls.py" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\adminactions\tests\__init__.py" />
</Folder>
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\adminactions\actions.py" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\adminactions\any.py" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\adminactions\api.py" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\adminactions\exceptions.py" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\adminactions\export.py" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\adminactions\forms.py" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\adminactions\graph.py" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\adminactions\mass_update.py" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\adminactions\merge.py" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\adminactions\models.py" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\adminactions\signals.py" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\adminactions\urls.py" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\adminactions\utils.py" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\adminactions\views.py" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\adminactions\__init__.py" />
</Folder>
<Folder name="admin_tools">
<Folder name="dashboard">
<Folder name="management">
<Folder name="commands">
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\dashboard\management\commands\customdashboard.py" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\dashboard\management\commands\__init__.py" />
</Folder>
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\dashboard\management\__init__.py" />
</Folder>
<Folder name="migrations">
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\dashboard\migrations\0001_initial.py" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\dashboard\migrations\0002_auto__add_field_dashboardpreferences_dashboard_id.py" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\dashboard\migrations\0003_auto__add_unique_dashboardpreferences_dashboard_id_user.py" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\dashboard\migrations\__init__.py" />
</Folder>
<Folder name="static">
<Folder name="admin_tools">
<Folder name="css">
<Folder name="jquery">
<Folder name="images">
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\dashboard\static\admin_tools\css\jquery\images\animated-overlay.gif" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\dashboard\static\admin_tools\css\jquery\images\ui-bg_flat_0_aaaaaa_40x100.png" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\dashboard\static\admin_tools\css\jquery\images\ui-bg_flat_75_ffffff_40x100.png" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\dashboard\static\admin_tools\css\jquery\images\ui-bg_glass_55_fbf9ee_1x400.png" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\dashboard\static\admin_tools\css\jquery\images\ui-bg_glass_65_ffffff_1x400.png" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\dashboard\static\admin_tools\css\jquery\images\ui-bg_glass_75_dadada_1x400.png" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\dashboard\static\admin_tools\css\jquery\images\ui-bg_glass_75_e6e6e6_1x400.png" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\dashboard\static\admin_tools\css\jquery\images\ui-bg_glass_95_fef1ec_1x400.png" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\dashboard\static\admin_tools\css\jquery\images\ui-bg_highlight-soft_75_cccccc_1x100.png" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\dashboard\static\admin_tools\css\jquery\images\ui-icons_222222_256x240.png" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\dashboard\static\admin_tools\css\jquery\images\ui-icons_2e83ff_256x240.png" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\dashboard\static\admin_tools\css\jquery\images\ui-icons_454545_256x240.png" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\dashboard\static\admin_tools\css\jquery\images\ui-icons_888888_256x240.png" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\dashboard\static\admin_tools\css\jquery\images\ui-icons_cd0a0a_256x240.png" />
</Folder>
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\dashboard\static\admin_tools\css\jquery\jquery-ui.css" />
</Folder>
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\dashboard\static\admin_tools\css\dashboard-ie.css" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\dashboard\static\admin_tools\css\dashboard.css" />
</Folder>
<Folder name="js">
<Folder name="jquery">
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\dashboard\static\admin_tools\js\jquery\jquery-ui.min.js" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\dashboard\static\admin_tools\js\jquery\jquery.cookie.min.js" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\dashboard\static\admin_tools\js\jquery\jquery.dashboard.js" />
</Folder>
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\dashboard\static\admin_tools\js\dashboard.js" />
</Folder>
</Folder>
</Folder>
<Folder name="templates">
<Folder name="admin">
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\dashboard\templates\admin\app_index.html" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\dashboard\templates\admin\index.html" />
</Folder>
<Folder name="admin_tools">
<Folder name="dashboard">
<Folder name="modules">
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\dashboard\templates\admin_tools\dashboard\modules\app_list.html" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\dashboard\templates\admin_tools\dashboard\modules\feed.html" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\dashboard\templates\admin_tools\dashboard\modules\group.html" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\dashboard\templates\admin_tools\dashboard\modules\link_list.html" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\dashboard\templates\admin_tools\dashboard\modules\model_list.html" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\dashboard\templates\admin_tools\dashboard\modules\recent_actions.html" />
</Folder>
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\dashboard\templates\admin_tools\dashboard\css.html" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\dashboard\templates\admin_tools\dashboard\dashboard.html" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\dashboard\templates\admin_tools\dashboard\dashboard.txt" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\dashboard\templates\admin_tools\dashboard\dashboard_app_index.txt" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\dashboard\templates\admin_tools\dashboard\dummy.html" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\dashboard\templates\admin_tools\dashboard\module.html" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\dashboard\templates\admin_tools\dashboard\preferences_form.html" />
</Folder>
</Folder>
</Folder>
<Folder name="templatetags">
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\dashboard\templatetags\admin_tools_dashboard_tags.py" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\dashboard\templatetags\__init__.py" />
</Folder>
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\dashboard\dashboards.py" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\dashboard\forms.py" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\dashboard\models.py" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\dashboard\modules.py" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\dashboard\registry.py" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\dashboard\tests.py" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\dashboard\urls.py" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\dashboard\utils.py" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\dashboard\views.py" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\dashboard\__init__.py" />
</Folder>
<Folder name="locale">
<Folder name="ar">
<Folder name="LC_MESSAGES">
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\locale\ar\LC_MESSAGES\django.mo" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\locale\ar\LC_MESSAGES\django.po" />
</Folder>
</Folder>
<Folder name="bg">
<Folder name="LC_MESSAGES">
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\locale\bg\LC_MESSAGES\django.mo" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\locale\bg\LC_MESSAGES\django.po" />
</Folder>
</Folder>
<Folder name="bn">
<Folder name="LC_MESSAGES">
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\locale\bn\LC_MESSAGES\django.mo" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\locale\bn\LC_MESSAGES\django.po" />
</Folder>
</Folder>
<Folder name="ca">
<Folder name="LC_MESSAGES">
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\locale\ca\LC_MESSAGES\django.mo" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\locale\ca\LC_MESSAGES\django.po" />
</Folder>
</Folder>
<Folder name="cs">
<Folder name="LC_MESSAGES">
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\locale\cs\LC_MESSAGES\django.mo" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\locale\cs\LC_MESSAGES\django.po" />
</Folder>
</Folder>
<Folder name="da">
<Folder name="LC_MESSAGES">
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\locale\da\LC_MESSAGES\django.mo" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\locale\da\LC_MESSAGES\django.po" />
</Folder>
</Folder>
<Folder name="de">
<Folder name="LC_MESSAGES">
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\locale\de\LC_MESSAGES\django.mo" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\locale\de\LC_MESSAGES\django.po" />
</Folder>
</Folder>
<Folder name="el">
<Folder name="LC_MESSAGES">
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\locale\el\LC_MESSAGES\django.mo" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\locale\el\LC_MESSAGES\django.po" />
</Folder>
</Folder>
<Folder name="en">
<Folder name="LC_MESSAGES">
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\locale\en\LC_MESSAGES\django.mo" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\locale\en\LC_MESSAGES\django.po" />
</Folder>
</Folder>
<Folder name="es">
<Folder name="LC_MESSAGES">
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\locale\es\LC_MESSAGES\django.mo" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\locale\es\LC_MESSAGES\django.po" />
</Folder>
</Folder>
<Folder name="es_AR">
<Folder name="LC_MESSAGES">
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\locale\es_AR\LC_MESSAGES\django.mo" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\locale\es_AR\LC_MESSAGES\django.po" />
</Folder>
</Folder>
<Folder name="fi">
<Folder name="LC_MESSAGES">
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\locale\fi\LC_MESSAGES\django.mo" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\locale\fi\LC_MESSAGES\django.po" />
</Folder>
</Folder>
<Folder name="fr">
<Folder name="LC_MESSAGES">
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\locale\fr\LC_MESSAGES\django.mo" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\locale\fr\LC_MESSAGES\django.po" />
</Folder>
</Folder>
<Folder name="he">
<Folder name="LC_MESSAGES">
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\locale\he\LC_MESSAGES\django.mo" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\locale\he\LC_MESSAGES\django.po" />
</Folder>
</Folder>
<Folder name="hu">
<Folder name="LC_MESSAGES">
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\locale\hu\LC_MESSAGES\django.mo" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\locale\hu\LC_MESSAGES\django.po" />
</Folder>
</Folder>
<Folder name="it">
<Folder name="LC_MESSAGES">
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\locale\it\LC_MESSAGES\django.mo" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\locale\it\LC_MESSAGES\django.po" />
</Folder>
</Folder>
<Folder name="ja">
<Folder name="LC_MESSAGES">
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\locale\ja\LC_MESSAGES\django.mo" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\locale\ja\LC_MESSAGES\django.po" />
</Folder>
</Folder>
<Folder name="nl">
<Folder name="LC_MESSAGES">
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\locale\nl\LC_MESSAGES\django.mo" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\locale\nl\LC_MESSAGES\django.po" />
</Folder>
</Folder>
<Folder name="pl">
<Folder name="LC_MESSAGES">
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\locale\pl\LC_MESSAGES\django.mo" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\locale\pl\LC_MESSAGES\django.po" />
</Folder>
</Folder>
<Folder name="pt">
<Folder name="LC_MESSAGES">
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\locale\pt\LC_MESSAGES\django.mo" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\locale\pt\LC_MESSAGES\django.po" />
</Folder>
</Folder>
<Folder name="pt_BR">
<Folder name="LC_MESSAGES">
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\locale\pt_BR\LC_MESSAGES\django.mo" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\locale\pt_BR\LC_MESSAGES\django.po" />
</Folder>
</Folder>
<Folder name="ru">
<Folder name="LC_MESSAGES">
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\locale\ru\LC_MESSAGES\django.mo" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\locale\ru\LC_MESSAGES\django.po" />
</Folder>
</Folder>
<Folder name="sk">
<Folder name="LC_MESSAGES">
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\locale\sk\LC_MESSAGES\django.mo" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\locale\sk\LC_MESSAGES\django.po" />
</Folder>
</Folder>
<Folder name="sv">
<Folder name="LC_MESSAGES">
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\locale\sv\LC_MESSAGES\django.mo" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\locale\sv\LC_MESSAGES\django.po" />
</Folder>
</Folder>
<Folder name="tr">
<Folder name="LC_MESSAGES">
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\locale\tr\LC_MESSAGES\django.mo" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\locale\tr\LC_MESSAGES\django.po" />
</Folder>
</Folder>
<Folder name="zh_CN">
<Folder name="LC_MESSAGES">
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\locale\zh_CN\LC_MESSAGES\django.mo" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\locale\zh_CN\LC_MESSAGES\django.po" />
</Folder>
</Folder>
<Folder name="zh_TW">
<Folder name="LC_MESSAGES">
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\locale\zh_TW\LC_MESSAGES\django.mo" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\locale\zh_TW\LC_MESSAGES\django.po" />
</Folder>
</Folder>
</Folder>
<Folder name="menu">
<Folder name="management">
<Folder name="commands">
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\menu\management\commands\custommenu.py" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\menu\management\commands\__init__.py" />
</Folder>
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\menu\management\__init__.py" />
</Folder>
<Folder name="migrations">
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\menu\migrations\0001_initial.py" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\menu\migrations\__init__.py" />
</Folder>
<Folder name="static">
<Folder name="admin_tools">
<Folder name="css">
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\menu\static\admin_tools\css\menu-ie.css" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\menu\static\admin_tools\css\menu.css" />
</Folder>
<Folder name="js">
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\menu\static\admin_tools\js\menu.js" />
</Folder>
</Folder>
</Folder>
<Folder name="templates">
<Folder name="admin">
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\menu\templates\admin\base_site.html" />
</Folder>
<Folder name="admin_tools">
<Folder name="menu">
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\menu\templates\admin_tools\menu\add_bookmark_form.html" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\menu\templates\admin_tools\menu\css.html" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\menu\templates\admin_tools\menu\delete_confirm.html" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\menu\templates\admin_tools\menu\dummy.html" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\menu\templates\admin_tools\menu\form.html" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\menu\templates\admin_tools\menu\item.html" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\menu\templates\admin_tools\menu\menu.html" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\menu\templates\admin_tools\menu\menu.txt" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\menu\templates\admin_tools\menu\remove_bookmark_form.html" />
</Folder>
</Folder>
</Folder>
<Folder name="templatetags">
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\menu\templatetags\admin_tools_menu_tags.py" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\menu\templatetags\__init__.py" />
</Folder>
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\menu\forms.py" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\menu\items.py" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\menu\menus.py" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\menu\models.py" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\menu\tests.py" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\menu\urls.py" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\menu\utils.py" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\menu\views.py" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\menu\__init__.py" />
</Folder>
<Folder name="static">
<Folder name="admin_tools">
<Folder name="images">
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\static\admin_tools\images\admin-tools.png" />
</Folder>
<Folder name="js">
<Folder name="jquery">
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\static\admin_tools\js\jquery\jquery.min.js" />
</Folder>
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\static\admin_tools\js\json.min.js" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\static\admin_tools\js\utils.js" />
</Folder>
</Folder>
</Folder>
<Folder name="theming">
<Folder name="static">
<Folder name="admin_tools">
<Folder name="css">
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\theming\static\admin_tools\css\theming.css" />
</Folder>
<Folder name="images">
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\theming\static\admin_tools\images\django.png" />
</Folder>
</Folder>
</Folder>
<Folder name="templates">
<Folder name="admin">
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\theming\templates\admin\base.html" />
</Folder>
</Folder>
<Folder name="templatetags">
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\theming\templatetags\theming_tags.py" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\theming\templatetags\__init__.py" />
</Folder>
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\theming\models.py" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\theming\tests.py" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\theming\__init__.py" />
</Folder>
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\deprecate_utils.py" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\models.py" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\tests.py" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\urls.py" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\utils.py" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\admin_tools\__init__.py" />
</Folder>
<Folder name="base">
<Folder name="static">
<Folder name="css">
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\base\static\css\app.css" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\base\static\css\bootstrap-responsive.css" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\base\static\css\bootstrap-responsive.min.css" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\base\static\css\bootstrap.css" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\base\static\css\bootstrap.min.css" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\base\static\css\h5bp.css" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\base\static\css\site_stylesheet.css" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\base\static\css\style.css" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\base\static\css\theming.css" />
</Folder>
<Folder name="img">
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\base\static\img\glyphicons-halflings-white.png" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\base\static\img\glyphicons-halflings.png" />
</Folder>
<Folder name="js">
<Folder name="libs">
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\base\static\js\libs\bootstrap.js" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\base\static\js\libs\bootstrap.min.js" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\base\static\js\libs\jquery-1.9.1.min.js" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\base\static\js\libs\less-1.2.1.min.js" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\base\static\js\libs\modernizr-2.6.2-respond-1.1.0.min.js" />
</Folder>
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\base\static\js\main.js" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\base\static\js\plugins.js" />
</Folder>
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\base\static\apple-touch-icon-114x114-precomposed.png" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\base\static\apple-touch-icon-144x144-precomposed.png" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\base\static\apple-touch-icon-57x57-precomposed.png" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\base\static\apple-touch-icon-72x72-precomposed.png" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\base\static\apple-touch-icon-precomposed.png" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\base\static\apple-touch-icon.png" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\base\static\crossdomain.xml" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\base\static\favicon.ico" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\base\static\humans.txt" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\base\static\robots.txt" />
</Folder>
<Folder name="templates">
<Folder name="base">
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\base\templates\base\home.html" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\base\templates\base\jbhome.html" />
</Folder>
<Folder name="_layouts">
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\base\templates\_layouts\base.html" />
</Folder>
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\base\templates\403.html" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\base\templates\404.html" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\base\templates\500.html" />
</Folder>
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\base\forms.py" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\base\models.py" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\base\urls.py" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\base\views.py" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\base\__init__.py" />
</Folder>
<Folder name="searcher">
<Folder name="templates">
<Folder name="searcher">
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\searcher\templates\searcher\album.html" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\searcher\templates\searcher\footer.html" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\searcher\templates\searcher\header.html" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\searcher\templates\searcher\image.html" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\searcher\templates\searcher\list.html" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\searcher\templates\searcher\pbase.html" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\searcher\templates\searcher\recommendations.html" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\searcher\templates\searcher\resultbase.html" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\searcher\templates\searcher\search.html" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\searcher\templates\searcher\search_form.html" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\searcher\templates\searcher\search_results.html" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\searcher\templates\searcher\tbase.html" />
</Folder>
</Folder>
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\searcher\models.py" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\searcher\old_models.py" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\searcher\tests.py" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\searcher\urls.py" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\searcher\views.py" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\searcher\__init__.py" />
</Folder>
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\apps\__init__.py" />
</Folder>
<Folder name="bin">
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\bin\git_precommit_pycheck.py" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\bin\gunicorn_start.sh" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\bin\jenkins.sh" />
</Folder>
<Folder name="conf">
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\conf\nginx-mime.types" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\conf\nginx.conf" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\conf\nginx_gunicorn.conf" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\conf\supervdam.conf" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\conf\supervisord.conf" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\conf\upstart.conf" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\conf\uwsgi.ini" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\conf\uwsgi_params" />
</Folder>
<Folder name="docs">
<Folder name="misc_docs">
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\docs\misc_docs\conf.py" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\docs\misc_docs\index.rst" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\docs\misc_docs\make.bat" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\docs\misc_docs\Makefile_for_bat" />
</Folder>
<Folder name="_templates">
<Folder name="_static">
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\docs\_templates\_static\.keep" />
</Folder>
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\docs\_templates\.keep" />
</Folder>
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\docs\build-github.zsh" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\docs\conf.py" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\docs\deployment.rst" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\docs\environments.rst" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\docs\index.rst" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\docs\install.rst" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\docs\Makefile" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\docs\__init__.py" />
</Folder>
<Folder name="lib">
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\lib\.keep" />
</Folder>
<Folder name="log" />
<Folder name="misc">
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\misc\dashboard.py" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\misc\jb_fabfile.py" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\misc\menu.py" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\misc\new_fabfile.py" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\misc\README.md" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\misc\vagrantconfig.yaml" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\misc\Vagrantfile" />
</Folder>
<Folder name="requirements">
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\requirements\compiled.txt" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\requirements\local.txt" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\requirements\production.txt" />
</Folder>
<Folder name="settings">
<Folder name="apache">
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\settings\apache\django.wsgi" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\settings\apache\wsgi.py" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\settings\apache\__init__.py" />
</Folder>
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\settings\base.py" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\settings\dev.py" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\settings\local-dist.py" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\settings\local.py" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\settings\test.py" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\settings\__init__.py" />
</Folder>
<Folder name="static">
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\static\.gitignore" />
</Folder>
<Folder name="templates">
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\templates\404.html" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\templates\500.html" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\templates\base.html" />
</Folder>
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\urls.py" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\wsgi.py" />
<File name="C:\Documents and Settings\johnb\Desktop\GIT\djdam\__init__.py" />
</Project>
<Project name="Scripts">
<File name="walkdir_fix_exiv2_csv.py" />
<File name="walkdir_RAW-File6_exiv2.py" />
<File name="walkdir_zimages_raw_mysqldb.py" />
<File name="consigRename2Fuller.py" />
<File name="magick_RAW_JPG_convmog.py" />
<File name="mtags_multifile_RAWJPG.py" />
<File name="mtags_singlefile.py" />
<File name="mtags_singlefile_RAW.py" />
</Project>
<Project name="DJDAM" />
</NotepadPlus>
|
|
"""Tests of http client with custom Connector"""
import asyncio
import http.cookies
import gc
import socket
import unittest
import ssl
import tempfile
import shutil
import os.path
from unittest import mock
import aiohttp
from aiohttp import web
from aiohttp import client
from aiohttp.client import ClientResponse
from aiohttp.connector import Connection
class TestBaseConnector(unittest.TestCase):
def setUp(self):
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(None)
self.transport = unittest.mock.Mock()
self.stream = aiohttp.StreamParser()
self.response = ClientResponse('get', 'http://base-conn.org')
self.response._post_init(self.loop)
def tearDown(self):
self.response.close()
self.loop.close()
gc.collect()
def test_del(self):
conn = aiohttp.BaseConnector(loop=self.loop)
transp = unittest.mock.Mock()
conn._conns['a'] = [(transp, 'proto', 123)]
conns_impl = conn._conns
exc_handler = unittest.mock.Mock()
self.loop.set_exception_handler(exc_handler)
with self.assertWarns(ResourceWarning):
del conn
gc.collect()
self.assertFalse(conns_impl)
transp.close.assert_called_with()
msg = {'connector': unittest.mock.ANY, # conn was deleted
'connections': unittest.mock.ANY,
'message': 'Unclosed connector'}
if self.loop.get_debug():
msg['source_traceback'] = unittest.mock.ANY
exc_handler.assert_called_with(self.loop, msg)
def test_del_with_scheduled_cleanup(self):
conn = aiohttp.BaseConnector(loop=self.loop, keepalive_timeout=0.01)
transp = unittest.mock.Mock()
conn._conns['a'] = [(transp, 'proto', 123)]
conns_impl = conn._conns
conn._start_cleanup_task()
exc_handler = unittest.mock.Mock()
self.loop.set_exception_handler(exc_handler)
with self.assertWarns(ResourceWarning):
del conn
yield from asyncio.sleep(0.01)
gc.collect()
self.assertFalse(conns_impl)
transp.close.assert_called_with()
msg = {'connector': unittest.mock.ANY, # conn was deleted
'message': 'Unclosed connector'}
if self.loop.get_debug():
msg['source_traceback'] = unittest.mock.ANY
exc_handler.assert_called_with(self.loop, msg)
def test_del_with_closed_loop(self):
conn = aiohttp.BaseConnector(loop=self.loop)
transp = unittest.mock.Mock()
conn._conns['a'] = [(transp, 'proto', 123)]
conns_impl = conn._conns
conn._start_cleanup_task()
exc_handler = unittest.mock.Mock()
self.loop.set_exception_handler(exc_handler)
self.loop.close()
with self.assertWarns(ResourceWarning):
del conn
gc.collect()
self.assertFalse(conns_impl)
self.assertFalse(transp.close.called)
self.assertTrue(exc_handler.called)
def test_del_empty_conector(self):
conn = aiohttp.BaseConnector(loop=self.loop)
exc_handler = unittest.mock.Mock()
self.loop.set_exception_handler(exc_handler)
del conn
self.assertFalse(exc_handler.called)
def test_create_conn(self):
def go():
conn = aiohttp.BaseConnector(loop=self.loop)
with self.assertRaises(NotImplementedError):
yield from conn._create_connection(object())
self.loop.run_until_complete(go())
@unittest.mock.patch('aiohttp.connector.asyncio')
def test_ctor_loop(self, asyncio):
session = aiohttp.BaseConnector()
self.assertIs(session._loop, asyncio.get_event_loop.return_value)
def test_close(self):
tr = unittest.mock.Mock()
conn = aiohttp.BaseConnector(loop=self.loop)
self.assertFalse(conn.closed)
conn._conns[1] = [(tr, object(), object())]
conn.close()
self.assertFalse(conn._conns)
self.assertTrue(tr.close.called)
self.assertTrue(conn.closed)
def test_get(self):
conn = aiohttp.BaseConnector(loop=self.loop)
self.assertEqual(conn._get(1), (None, None))
tr, proto = unittest.mock.Mock(), unittest.mock.Mock()
conn._conns[1] = [(tr, proto, self.loop.time())]
self.assertEqual(conn._get(1), (tr, proto))
conn.close()
def test_get_expired(self):
conn = aiohttp.BaseConnector(loop=self.loop)
self.assertEqual(conn._get(1), (None, None))
tr, proto = unittest.mock.Mock(), unittest.mock.Mock()
conn._conns[1] = [(tr, proto, self.loop.time() - 1000)]
self.assertEqual(conn._get(1), (None, None))
self.assertFalse(conn._conns)
conn.close()
def test_release(self):
self.loop.time = mock.Mock(return_value=10)
conn = aiohttp.BaseConnector(loop=self.loop)
conn._start_cleanup_task = unittest.mock.Mock()
req = unittest.mock.Mock()
resp = req.response = unittest.mock.Mock()
resp._should_close = False
tr, proto = unittest.mock.Mock(), unittest.mock.Mock()
key = 1
conn._acquired[key].add(tr)
conn._release(key, req, tr, proto)
self.assertEqual(conn._conns[1][0], (tr, proto, 10))
self.assertTrue(conn._start_cleanup_task.called)
conn.close()
def test_release_close(self):
with self.assertWarns(DeprecationWarning):
conn = aiohttp.BaseConnector(share_cookies=True, loop=self.loop)
req = unittest.mock.Mock()
resp = unittest.mock.Mock()
resp.message.should_close = True
req.response = resp
cookies = resp.cookies = http.cookies.SimpleCookie()
cookies['c1'] = 'cookie1'
cookies['c2'] = 'cookie2'
tr, proto = unittest.mock.Mock(), unittest.mock.Mock()
key = 1
conn._acquired[key].add(tr)
conn._release(key, req, tr, proto)
self.assertFalse(conn._conns)
self.assertTrue(tr.close.called)
def test_get_pop_empty_conns(self):
# see issue #473
conn = aiohttp.BaseConnector(loop=self.loop)
key = ('127.0.0.1', 80, False)
conn._conns[key] = []
tr, proto = conn._get(key)
self.assertEqual((None, None), (tr, proto))
self.assertFalse(conn._conns)
def test_release_close_do_not_add_to_pool(self):
# see issue #473
conn = aiohttp.BaseConnector(loop=self.loop)
req = unittest.mock.Mock()
resp = unittest.mock.Mock()
resp.message.should_close = True
req.response = resp
key = ('127.0.0.1', 80, False)
tr, proto = unittest.mock.Mock(), unittest.mock.Mock()
conn._acquired[key].add(tr)
conn._release(key, req, tr, proto)
self.assertFalse(conn._conns)
def test_release_close_do_not_delete_existing_connections(self):
key = ('127.0.0.1', 80, False)
tr1, proto1 = unittest.mock.Mock(), unittest.mock.Mock()
with self.assertWarns(DeprecationWarning):
conn = aiohttp.BaseConnector(share_cookies=True, loop=self.loop)
conn._conns[key] = [(tr1, proto1, 1)]
req = unittest.mock.Mock()
resp = unittest.mock.Mock()
resp.message.should_close = True
req.response = resp
tr, proto = unittest.mock.Mock(), unittest.mock.Mock()
conn._acquired[key].add(tr1)
conn._release(key, req, tr, proto)
self.assertEqual(conn._conns[key], [(tr1, proto1, 1)])
self.assertTrue(tr.close.called)
conn.close()
def test_release_not_started(self):
self.loop.time = mock.Mock(return_value=10)
conn = aiohttp.BaseConnector(loop=self.loop)
req = unittest.mock.Mock()
req.response = None
tr, proto = unittest.mock.Mock(), unittest.mock.Mock()
key = 1
conn._acquired[key].add(tr)
conn._release(key, req, tr, proto)
self.assertEqual(conn._conns, {1: [(tr, proto, 10)]})
self.assertFalse(tr.close.called)
conn.close()
def test_release_not_opened(self):
conn = aiohttp.BaseConnector(loop=self.loop)
req = unittest.mock.Mock()
req.response = unittest.mock.Mock()
req.response.message = None
tr, proto = unittest.mock.Mock(), unittest.mock.Mock()
key = 1
conn._acquired[key].add(tr)
conn._release(key, req, tr, proto)
self.assertTrue(tr.close.called)
def test_connect(self):
tr, proto = unittest.mock.Mock(), unittest.mock.Mock()
proto.is_connected.return_value = True
class Req:
host = 'host'
port = 80
ssl = False
response = unittest.mock.Mock()
conn = aiohttp.BaseConnector(loop=self.loop)
key = ('host', 80, False)
conn._conns[key] = [(tr, proto, self.loop.time())]
conn._create_connection = unittest.mock.Mock()
conn._create_connection.return_value = asyncio.Future(loop=self.loop)
conn._create_connection.return_value.set_result((tr, proto))
connection = self.loop.run_until_complete(conn.connect(Req()))
self.assertFalse(conn._create_connection.called)
self.assertEqual(connection._transport, tr)
self.assertEqual(connection._protocol, proto)
self.assertIsInstance(connection, Connection)
connection.close()
def test_connect_timeout(self):
conn = aiohttp.BaseConnector(loop=self.loop)
conn._create_connection = unittest.mock.Mock()
conn._create_connection.return_value = asyncio.Future(loop=self.loop)
conn._create_connection.return_value.set_exception(
asyncio.TimeoutError())
with self.assertRaises(aiohttp.ClientTimeoutError):
req = unittest.mock.Mock()
self.loop.run_until_complete(conn.connect(req))
def test_connect_oserr(self):
conn = aiohttp.BaseConnector(loop=self.loop)
conn._create_connection = unittest.mock.Mock()
conn._create_connection.return_value = asyncio.Future(loop=self.loop)
err = OSError(1, 'permission error')
conn._create_connection.return_value.set_exception(err)
with self.assertRaises(aiohttp.ClientOSError) as ctx:
req = unittest.mock.Mock()
self.loop.run_until_complete(conn.connect(req))
self.assertEqual(1, ctx.exception.errno)
self.assertTrue(ctx.exception.strerror.startswith('Cannot connect to'))
self.assertTrue(ctx.exception.strerror.endswith('[permission error]'))
def test_start_cleanup_task(self):
loop = unittest.mock.Mock()
loop.time.return_value = 1.5
conn = aiohttp.BaseConnector(loop=loop, keepalive_timeout=10)
self.assertIsNone(conn._cleanup_handle)
conn._start_cleanup_task()
self.assertIsNotNone(conn._cleanup_handle)
loop.call_at.assert_called_with(
12, conn._cleanup)
def test_cleanup(self):
testset = {
1: [(unittest.mock.Mock(), unittest.mock.Mock(), 10),
(unittest.mock.Mock(), unittest.mock.Mock(), 300),
(None, unittest.mock.Mock(), 300)],
}
testset[1][0][1].is_connected.return_value = True
testset[1][1][1].is_connected.return_value = False
loop = unittest.mock.Mock()
loop.time.return_value = 300
conn = aiohttp.BaseConnector(loop=loop)
conn._conns = testset
existing_handle = conn._cleanup_handle = unittest.mock.Mock()
conn._cleanup()
self.assertTrue(existing_handle.cancel.called)
self.assertEqual(conn._conns, {})
self.assertIsNone(conn._cleanup_handle)
def test_cleanup2(self):
testset = {1: [(unittest.mock.Mock(), unittest.mock.Mock(), 300)]}
testset[1][0][1].is_connected.return_value = True
loop = unittest.mock.Mock()
loop.time.return_value = 300.1
conn = aiohttp.BaseConnector(loop=loop, keepalive_timeout=10)
conn._conns = testset
conn._cleanup()
self.assertEqual(conn._conns, testset)
self.assertIsNotNone(conn._cleanup_handle)
loop.call_at.assert_called_with(
310, conn._cleanup)
conn.close()
def test_cleanup3(self):
testset = {1: [(unittest.mock.Mock(), unittest.mock.Mock(), 290.1),
(unittest.mock.Mock(), unittest.mock.Mock(), 305.1)]}
testset[1][0][1].is_connected.return_value = True
loop = unittest.mock.Mock()
loop.time.return_value = 308.5
conn = aiohttp.BaseConnector(loop=loop, keepalive_timeout=10)
conn._conns = testset
conn._cleanup()
self.assertEqual(conn._conns, {1: [testset[1][1]]})
self.assertIsNotNone(conn._cleanup_handle)
loop.call_at.assert_called_with(
316, conn._cleanup)
conn.close()
def test_tcp_connector_ctor(self):
conn = aiohttp.TCPConnector(loop=self.loop)
self.assertTrue(conn.verify_ssl)
self.assertIs(conn.fingerprint, None)
with self.assertWarns(DeprecationWarning):
self.assertFalse(conn.resolve)
self.assertFalse(conn.use_dns_cache)
self.assertEqual(conn.family, 0)
with self.assertWarns(DeprecationWarning):
self.assertEqual(conn.resolved_hosts, {})
self.assertEqual(conn.resolved_hosts, {})
def test_tcp_connector_ctor_fingerprint_valid(self):
valid = b'\xa2\x06G\xad\xaa\xf5\xd8\\J\x99^by;\x06='
conn = aiohttp.TCPConnector(loop=self.loop, fingerprint=valid)
self.assertEqual(conn.fingerprint, valid)
def test_tcp_connector_fingerprint_invalid(self):
invalid = b'\x00'
with self.assertRaises(ValueError):
aiohttp.TCPConnector(loop=self.loop, fingerprint=invalid)
def test_tcp_connector_clear_resolved_hosts(self):
conn = aiohttp.TCPConnector(loop=self.loop)
info = object()
conn._cached_hosts[('localhost', 123)] = info
conn._cached_hosts[('localhost', 124)] = info
conn.clear_resolved_hosts('localhost', 123)
self.assertEqual(
conn.resolved_hosts, {('localhost', 124): info})
conn.clear_resolved_hosts('localhost', 123)
self.assertEqual(
conn.resolved_hosts, {('localhost', 124): info})
with self.assertWarns(DeprecationWarning):
conn.clear_resolved_hosts()
self.assertEqual(conn.resolved_hosts, {})
def test_tcp_connector_clear_dns_cache(self):
conn = aiohttp.TCPConnector(loop=self.loop)
info = object()
conn._cached_hosts[('localhost', 123)] = info
conn._cached_hosts[('localhost', 124)] = info
conn.clear_dns_cache('localhost', 123)
self.assertEqual(
conn.cached_hosts, {('localhost', 124): info})
conn.clear_dns_cache('localhost', 123)
self.assertEqual(
conn.cached_hosts, {('localhost', 124): info})
conn.clear_dns_cache()
self.assertEqual(conn.cached_hosts, {})
def test_tcp_connector_clear_dns_cache_bad_args(self):
conn = aiohttp.TCPConnector(loop=self.loop)
with self.assertRaises(ValueError):
conn.clear_dns_cache('localhost')
def test_ambigous_verify_ssl_and_ssl_context(self):
with self.assertRaises(ValueError):
aiohttp.TCPConnector(
verify_ssl=False,
ssl_context=ssl.SSLContext(ssl.PROTOCOL_SSLv23),
loop=self.loop)
def test_dont_recreate_ssl_context(self):
conn = aiohttp.TCPConnector(loop=self.loop)
ctx = conn.ssl_context
self.assertIs(ctx, conn.ssl_context)
def test_respect_precreated_ssl_context(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
conn = aiohttp.TCPConnector(loop=self.loop, ssl_context=ctx)
self.assertIs(ctx, conn.ssl_context)
def test_close_twice(self):
tr = unittest.mock.Mock()
conn = aiohttp.BaseConnector(loop=self.loop)
conn._conns[1] = [(tr, object(), object())]
conn.close()
self.assertFalse(conn._conns)
self.assertTrue(tr.close.called)
self.assertTrue(conn.closed)
conn._conns = 'Invalid' # fill with garbage
conn.close()
self.assertTrue(conn.closed)
def test_close_cancels_cleanup_handle(self):
conn = aiohttp.BaseConnector(loop=self.loop)
conn._start_cleanup_task()
self.assertIsNotNone(conn._cleanup_handle)
conn.close()
self.assertIsNone(conn._cleanup_handle)
def test_ctor_with_default_loop(self):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
self.addCleanup(loop.close)
self.addCleanup(asyncio.set_event_loop, None)
conn = aiohttp.BaseConnector()
self.assertIs(loop, conn._loop)
def test_connect_with_limit(self):
@asyncio.coroutine
def go():
tr, proto = unittest.mock.Mock(), unittest.mock.Mock()
proto.is_connected.return_value = True
class Req:
host = 'host'
port = 80
ssl = False
response = unittest.mock.Mock()
conn = aiohttp.BaseConnector(loop=self.loop, limit=1)
key = ('host', 80, False)
conn._conns[key] = [(tr, proto, self.loop.time())]
conn._create_connection = unittest.mock.Mock()
conn._create_connection.return_value = asyncio.Future(
loop=self.loop)
conn._create_connection.return_value.set_result((tr, proto))
connection1 = yield from conn.connect(Req())
self.assertEqual(connection1._transport, tr)
self.assertEqual(1, len(conn._acquired[key]))
acquired = False
@asyncio.coroutine
def f():
nonlocal acquired
connection2 = yield from conn.connect(Req())
acquired = True
self.assertEqual(1, len(conn._acquired[key]))
connection2.release()
task = asyncio.async(f(), loop=self.loop)
yield from asyncio.sleep(0.01, loop=self.loop)
self.assertFalse(acquired)
connection1.release()
yield from asyncio.sleep(0, loop=self.loop)
self.assertTrue(acquired)
yield from task
conn.close()
self.loop.run_until_complete(go())
def test_connect_with_limit_cancelled(self):
@asyncio.coroutine
def go():
tr, proto = unittest.mock.Mock(), unittest.mock.Mock()
proto.is_connected.return_value = True
class Req:
host = 'host'
port = 80
ssl = False
response = unittest.mock.Mock()
conn = aiohttp.BaseConnector(loop=self.loop, limit=1)
key = ('host', 80, False)
conn._conns[key] = [(tr, proto, self.loop.time())]
conn._create_connection = unittest.mock.Mock()
conn._create_connection.return_value = asyncio.Future(
loop=self.loop)
conn._create_connection.return_value.set_result((tr, proto))
connection = yield from conn.connect(Req())
self.assertEqual(connection._transport, tr)
self.assertEqual(1, len(conn._acquired[key]))
with self.assertRaises(asyncio.TimeoutError):
# limit exhausted
yield from asyncio.wait_for(conn.connect(Req), 0.01,
loop=self.loop)
connection.close()
self.loop.run_until_complete(go())
def test_connect_with_limit_concurrent(self):
@asyncio.coroutine
def go():
proto = unittest.mock.Mock()
proto.is_connected.return_value = True
class Req:
host = 'host'
port = 80
ssl = False
response = unittest.mock.Mock(_should_close=False)
max_connections = 2
num_connections = 0
conn = aiohttp.BaseConnector(limit=max_connections, loop=self.loop)
# Use a real coroutine for _create_connection; a mock would mask
# problems that only happen when the method yields.
@asyncio.coroutine
def create_connection(req):
nonlocal num_connections
num_connections += 1
yield from asyncio.sleep(0, loop=self.loop)
# Make a new transport mock each time because acquired
# transports are stored in a set. Reusing the same object
# messes with the count.
tr = unittest.mock.Mock()
return tr, proto
conn._create_connection = create_connection
# Simulate something like a crawler. It opens a connection, does
# something with it, closes it, then creates tasks that make more
# connections and waits for them to finish. The crawler is started
# with multiple concurrent requests and stops when it hits a
# predefined maximum number of requests.
max_requests = 10
num_requests = 0
start_requests = max_connections + 1
@asyncio.coroutine
def f(start=True):
nonlocal num_requests
if num_requests == max_requests:
return
num_requests += 1
if not start:
connection = yield from conn.connect(Req())
yield from asyncio.sleep(0, loop=self.loop)
connection.release()
tasks = [
asyncio.async(f(start=False), loop=self.loop)
for i in range(start_requests)
]
yield from asyncio.wait(tasks, loop=self.loop)
yield from f()
conn.close()
self.assertEqual(max_connections, num_connections)
self.loop.run_until_complete(go())
def test_close_with_acquired_connection(self):
@asyncio.coroutine
def go():
tr, proto = unittest.mock.Mock(), unittest.mock.Mock()
proto.is_connected.return_value = True
class Req:
host = 'host'
port = 80
ssl = False
response = unittest.mock.Mock()
conn = aiohttp.BaseConnector(loop=self.loop, limit=1)
key = ('host', 80, False)
conn._conns[key] = [(tr, proto, self.loop.time())]
conn._create_connection = unittest.mock.Mock()
conn._create_connection.return_value = asyncio.Future(
loop=self.loop)
conn._create_connection.return_value.set_result((tr, proto))
connection = yield from conn.connect(Req())
self.assertEqual(1, len(conn._acquired))
conn.close()
self.assertEqual(0, len(conn._acquired))
self.assertTrue(conn.closed)
tr.close.assert_called_with()
self.assertFalse(connection.closed)
connection.close()
self.assertTrue(connection.closed)
self.loop.run_until_complete(go())
def test_default_force_close(self):
connector = aiohttp.BaseConnector(loop=self.loop)
self.assertFalse(connector.force_close)
def test_limit_property(self):
conn = aiohttp.BaseConnector(loop=self.loop, limit=15)
self.assertEqual(15, conn.limit)
conn.close()
def test_limit_property_default(self):
conn = aiohttp.BaseConnector(loop=self.loop)
self.assertIsNone(conn.limit)
conn.close()
class TestHttpClientConnector(unittest.TestCase):
def setUp(self):
self.handler = None
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(None)
def tearDown(self):
if self.handler:
self.loop.run_until_complete(self.handler.finish_connections())
self.loop.stop()
self.loop.run_forever()
self.loop.close()
gc.collect()
def find_unused_port(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('127.0.0.1', 0))
port = s.getsockname()[1]
s.close()
return port
@asyncio.coroutine
def create_server(self, method, path, handler):
app = web.Application(loop=self.loop)
app.router.add_route(method, path, handler)
port = self.find_unused_port()
self.handler = app.make_handler(keep_alive_on=False)
srv = yield from self.loop.create_server(
self.handler, '127.0.0.1', port)
url = "http://127.0.0.1:{}".format(port) + path
self.addCleanup(srv.close)
return app, srv, url
@asyncio.coroutine
def create_unix_server(self, method, path, handler):
tmpdir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, tmpdir)
app = web.Application(loop=self.loop)
app.router.add_route(method, path, handler)
self.handler = app.make_handler(keep_alive_on=False, access_log=None)
sock_path = os.path.join(tmpdir, 'socket.sock')
srv = yield from self.loop.create_unix_server(
self.handler, sock_path)
url = "http://127.0.0.1" + path
self.addCleanup(srv.close)
return app, srv, url, sock_path
def test_tcp_connector(self):
@asyncio.coroutine
def handler(request):
return web.HTTPOk()
app, srv, url = self.loop.run_until_complete(
self.create_server('get', '/', handler))
conn = aiohttp.TCPConnector(loop=self.loop)
r = self.loop.run_until_complete(
aiohttp.request(
'get', url,
connector=conn,
loop=self.loop))
self.loop.run_until_complete(r.release())
self.assertEqual(r.status, 200)
r.close()
conn.close()
def test_tcp_connector_uses_provided_local_addr(self):
@asyncio.coroutine
def handler(request):
return web.HTTPOk()
app, srv, url = self.loop.run_until_complete(
self.create_server('get', '/', handler)
)
port = self.find_unused_port()
conn = aiohttp.TCPConnector(loop=self.loop,
local_addr=('127.0.0.1', port))
r = self.loop.run_until_complete(
aiohttp.request(
'get', url,
connector=conn
))
self.loop.run_until_complete(r.release())
first_conn = next(iter(conn._conns.values()))[0][0]
self.assertEqual(first_conn._sock.getsockname(), ('127.0.0.1', port))
r.close()
conn.close()
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'requires unix')
def test_unix_connector(self):
@asyncio.coroutine
def handler(request):
return web.HTTPOk()
app, srv, url, sock_path = self.loop.run_until_complete(
self.create_unix_server('get', '/', handler))
connector = aiohttp.UnixConnector(sock_path, loop=self.loop)
self.assertEqual(sock_path, connector.path)
r = self.loop.run_until_complete(
client.request(
'get', url,
connector=connector,
loop=self.loop))
self.assertEqual(r.status, 200)
r.close()
def test_connector_cookie_deprecation(self):
with self.assertWarnsRegex(DeprecationWarning,
"^Using `share_cookies` is deprecated"):
conn = aiohttp.TCPConnector(share_cookies=True, loop=self.loop)
conn.close()
def test_ambiguous_ctor_params(self):
with self.assertRaises(ValueError):
aiohttp.TCPConnector(resolve=True, use_dns_cache=False,
loop=self.loop)
def test_both_resolve_and_use_dns_cache(self):
conn = aiohttp.TCPConnector(resolve=True, use_dns_cache=True,
loop=self.loop)
self.assertTrue(conn.use_dns_cache)
with self.assertWarns(DeprecationWarning):
self.assertTrue(conn.resolve)
def test_both_use_dns_cache_only(self):
conn = aiohttp.TCPConnector(use_dns_cache=True,
loop=self.loop)
self.assertTrue(conn.use_dns_cache)
with self.assertWarns(DeprecationWarning):
self.assertTrue(conn.resolve)
|
|
#!/usr/bin/env python
import effess
import os
import os.path as p
import re
import shutil
import string
import sys
import tempfile
import types
import uuid
import subprocess
from app import App
from xml.sax.saxutils import quoteattr
win32_dir = p.join(p.dirname(__file__), 'win32')
if p.exists(win32_dir):
sys.path.append(win32_dir)
import PyRTF
class Win32App(App):
def stage(self, stage_dir, bundle):
App.stage(self, stage_dir, bundle=bundle)
contents = self.get_contents_dir()
self.env.log(u'Copying kboot.exe to %s' % contents);
self.executable_path = p.join(contents, '%s.exe' % self.name)
effess.copy(p.join(self.sdk_dir, 'kboot.exe'), self.executable_path)
# The .installed file for Windows should always exist,
# since we only ever install via the MSI installer.
open(p.join(contents, '.installed'), 'a').close()
self.set_executable_icon()
def set_executable_icon(self):
if not hasattr(self, 'image'):
return
icon_path = str(p.join(self.contents, 'Resources', self.image))
if not p.exists(icon_path):
return
if not(icon_path.lower().endswith('.ico')):
# Assume that GraphicsMagick is on the path for now. This will change
# once the packaging server setup has been improved (running on drive C:\)
convert = 'convert.exe'
try:
subprocess.check_call([convert, '-version'], shell=True)
except subprocess.CalledProcessError:
# If Magick is not installed, skip icon conversion
print 'ImageMagick not installed, aborting icon conversion.'
return
temp_dir = tempfile.mkdtemp()
new_ico_file = p.join(self.contents, 'Resources', '_converted_icon.ico')
ico_command = [convert]
for size in [16, 32, 64, 128]:
resolution = "%dx%d" % (size, size)
args = [convert, icon_path, '-resize', resolution + "^",
"-gravity", "center", "-background", "transparent",
"-extent", resolution, "%s\\%d.png" % (temp_dir, size)]
subprocess.check_call(args, shell=True)
ico_command.append('%s\\%d.png' % (temp_dir, size))
ico_command.append(new_ico_file)
subprocess.check_call(ico_command, shell=True)
icon_path = new_ico_file
self.env.run('%s "%s" "%s"' % (p.join(self.sdk_dir, 'ReplaceVistaIcon.exe'), self.executable_path, icon_path))
def package(self, package_dir, bundle):
contents = self.get_contents_dir()
target = p.join(package_dir, self.name + '.msi')
wxs_path = p.join(package_dir, 'installer.wxs')
template_args = {}
try:
template_args['app_name'] = quoteattr(self.name)
app_version = self.version
version_parts = len(app_version.split("."))
if version_parts < 3:
app_version += ('.0' * (version_parts-1))
template_args['app_version'] = quoteattr(app_version)
template_args['app_guid'] = quoteattr(self.guid)
template_args['app_id'] = quoteattr(self.id)
template_args['app_publisher'] = quoteattr('None provided')
if hasattr(self, 'publisher'):
template_args['app_publishe'] = quoteattr(self.publisher)
template_args['app_description'] = quoteattr('None provided')
if hasattr(self, 'description'):
template_args['app_description'] = quoteattr(self.description)
template_args['app_exe'] = quoteattr(p.join(contents, self.name + '.exe'))
license_rtf_path = p.join(contents, 'LICENSE.rtf')
self.write_license_rtf(license_rtf_path)
template_args['license_file'] = quoteattr(license_rtf_path)
template_args['crt_msm'] = quoteattr(p.join(self.sdk_dir, 'installer',
'Microsoft_VC80_CRT_x86.msm'))
template_args['titanium_installer_dll'] = quoteattr(p.join(
self.sdk_dir, 'installer', 'titanium_installer.dll'))
template_args['dialog_bmp'] = quoteattr(self.get_installer_image(
'dialog-bmp', p.join(self.sdk_dir, 'default_dialog.bmp')))
template_args['banner_bmp'] = quoteattr(self.get_installer_image(
'banner-bmp', p.join(self.sdk_dir, 'default_banner.bmp')))
(app_language, app_codepage) = self.get_app_language()
template_args['app_language'] = quoteattr(app_language)
template_args['app_codepage'] = quoteattr(app_codepage)
root_dir = Directory(self, '.', is_root=True)
walk_dir(contents, root_dir)
template_args["app_dirs"] = root_dir.to_xml()
template_args['component_refs'] = "\n"
for id in Directory.component_ids:
template_args['component_refs'] += \
'\t\t<ComponentRef Id="' + id + '"/>\n'
template_args['dependencies'] = ''
if not bundle:
template_args['dependencies'] = ";".join(self.encode_manifest())
# Render the WXS template and write it to the WXS file
# after converting all template arguments to UTF-8
for (key, value) in template_args.iteritems():
if type(template_args[key]) == types.UnicodeType:
template_args[key] = template_args[key].encode('utf8')
template = string.Template(open(
p.join(self.sdk_dir, 'installer_template.wxs')).read())
wxs_file = open(wxs_path, 'w+')
wxs_text = template.safe_substitute(template_args)
wxs_file.write(wxs_text)
wxs_file.close()
self.env.log(wxs_text.decode('utf8'))
wix_bin_dir = self.get_wix_bin_directory()
self.env.run([
p.join(wix_bin_dir, 'candle.exe'),
'-out',
'%s.wixobj' % wxs_path,
wxs_path,
])
self.env.run([
p.join(wix_bin_dir, 'light.exe'),
'-ext', ' WixUIExtension',
'-out', '%s' % target,
'%s.wixobj' % wxs_path
])
finally:
self.env.ignore_errors(lambda: os.unlink(wxs_path))
self.env.ignore_errors(lambda: os.unlink(wxs_path + '.wixobj'))
def get_wix_bin_directory(self):
path = p.join("C:\\", "Program Files", "Windows Installer XML v3", "bin")
if not p.exists(path):
path = p.join("C:\\", "Program Files (x86)", "Windows Installer XML v3", "bin")
if not p.exists(path):
raise Exception('Could not find WiX v3 bin directory')
return path
def get_app_language(self):
self.language = 'en-us'
self.get_tiapp_element_as_prop('language', 'language')
langs = {
"cs-cz": ("1029", "1250"),
"nl-nl": ("1043", "1252"),
"en-us": ("1033", "1252"),
"fr-fr": ("1036", "1252"),
"de-de": ("1031", "1252"),
"hu-hu": ("1038", "1250"),
"it-it": ("1040", "1252"),
"ja-jp": ("1041", "932" ),
"pl-pl": ("1045", "1250"),
"ru-ru": ("1049", "1251"),
"es-es": ("3082", "1252"),
"uk-ua": ("1058", "1251")
}
if self.language in langs:
return langs[self.language]
else:
return langs['en-us']
def write_license_rtf(self, rtf_path):
# PyRTF and PyRTF-ng it seems do not support Unicode types
# when they do this code should read the file using the codecs
# module and create a Unicode RTF.
if not p.exists(rtf_path):
license_file = p.join(self.get_contents_dir(), 'LICENSE.txt')
if p.exists(license_file):
license_text = open(license_file, 'r').read()
else:
license_text = 'This software was not shipped with a license.'
doc = PyRTF.Document()
section = PyRTF.Section()
doc.Sections.append(section)
for paragraph in re.split("\n\n|\r\n\r\n", license_text):
section.append(paragraph)
renderer = PyRTF.Renderer()
renderer.Write(doc, open(rtf_path, 'w'))
def encode_manifest(self):
output = []
def write_line(str):
output.append(str.encode('utf8'))
write_line(u'#appname:' + self.name)
write_line(u'#appid:' + self.id)
write_line(u'#guid:' + self.guid)
write_line(u'#version:' + self.version)
if hasattr(self, 'image'):
write_line(u'#image:' + self.image)
if hasattr(self, 'publisher'):
write_line(u'#publisher:' + self.publisher)
if hasattr(self, 'description'):
write_line(u'#description:' + self.description)
if hasattr(self, 'url'):
write_line(u'#url:' + self.url)
if hasattr(self, 'loglevel'):
write_line(u'#loglevel:' + self.url)
if hasattr(self, 'stream'):
write_line(u'#stream:' + self.url)
write_line(u'runtime:' + self.runtime_version)
if hasattr(self, 'sdk_version'):
write_line(u'sdk:' + self.sdk_version)
if hasattr(self, 'mobilesdk_version'):
write_line(u'mobilesdk:' + self.mobilesdk_version)
for module in self.modules:
write_line(module[0] + ':' + module[1])
return output
file_template = """
%(indent)s <File Id="%(id)s_file" Source=%(full_path)s KeyPath="%(keypath)s">
%(shortcuts)s
%(indent)s </File>
"""
shortcut_template = """
<Shortcut Id="%(id)s" Directory="%(directory)s" Name="%(name)s"
WorkingDirectory="%(working_dir)s" Icon="ApplicationIcon.exe"
IconIndex="0" Advertise="yes" />
"""
def id_generator():
file_id = 1
while True:
yield "_" + str(file_id)
file_id += 1
unique_ids = id_generator()
class Shortcut(object):
@classmethod
def create_start_menu_shortcut(cls, app):
return Shortcut("ProgramMenuDir", app.name, "INSTALLDIR")
@classmethod
def create_desktop_shortcut(cls, app):
return Shortcut("DesktopFolder", app.name, "INSTALLDIR")
def __init__(self, directory, name, working_dir):
self.directory = directory
self.name = name
self.working_dir = working_dir
def to_xml(self):
return shortcut_template % {
"id": unique_ids.next(),
"directory": self.directory,
"name": self.name,
"working_dir": self.working_dir}
class Directory(object):
component_ids = []
def __init__(self, app, relative_path, is_root=False):
self.app = app
self.relative_path = relative_path
self.name = os.path.basename(relative_path)
self.files = []
self.dirs = []
self.is_root = is_root
def add_file(self, relative_path, full_path, shortcuts=None):
file = {
"filename": quoteattr(os.path.basename(relative_path)),
"relative_path": relative_path,
"id": unique_ids.next(),
"full_path": quoteattr(full_path)
}
# The File attribute containing these shortcuts must be the KeyPath
# of it's containing component for shortcut advertising to work properly.
file['shortcuts'] = ''
file['keypath'] = 'no'
if relative_path == self.app.name + ".exe":
file['shortcuts'] += Shortcut.create_start_menu_shortcut(self.app).to_xml()
file['shortcuts'] += Shortcut.create_desktop_shortcut(self.app).to_xml()
file['keypath'] = 'yes'
self.files.append(file)
def add_dir(self, dir):
self.dirs.append(dir)
def to_xml(self, indent=4):
xml = ""
if not self.is_root:
xml += ("\t" * indent) + '<Directory Id="%s" Name="%s">\n' % \
(unique_ids.next(), self.name)
if len(self.files) > 0:
component_id = unique_ids.next()
Directory.component_ids.append(component_id)
xml += '<Component Id="%s" Guid="%s">' % \
(component_id, str(uuid.uuid4()).upper())
for file in self.files:
file['indent'] = "\t" * indent
xml += file_template % file
xml += '</Component>'
for dir in self.dirs:
xml += dir.to_xml(indent+1)
if not self.is_root:
xml += ("\t" * indent) + "</Directory>\n"
return xml
def walk_dir(path, current_dir, relative_path=""):
for file in os.listdir(path):
if file == "*" or file == "*.*" or file == "." or file == "..":
continue
file_relative_path = os.path.join(relative_path, file)
file_full_path = os.path.join(path, file)
if relative_path == "":
file_relative_path = file
if os.path.isfile(file_full_path):
current_dir.add_file(file_relative_path, file_full_path)
else:
newdir = Directory(current_dir.app, file_relative_path)
current_dir.add_dir(newdir)
walk_dir(file_full_path, newdir, file_relative_path)
|
|
import os
from io import StringIO
import shutil
from unittest import mock
from django.contrib.auth.models import User
from django.core.files.uploadedfile import SimpleUploadedFile
from django.test import TestCase
from django.test.utils import override_settings
from django.urls import reverse
from mapentity.tests.factories import UserFactory, SuperUserFactory
from mapentity.views.generic import MapEntityList
from geotrek.common.mixins import CustomColumnsMixin
from geotrek.common.parsers import Parser
from geotrek.common.tasks import launch_sync_rando
from geotrek.trekking.models import Path
class ViewsTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.user = UserFactory.create(username='homer', password='dooh')
def setUp(self):
self.client.force_login(user=self.user)
def test_settings_json(self):
url = reverse('common:settings_json')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_admin_check_extents(self):
url = reverse('common:check_extents')
response = self.client.get(url)
self.assertEqual(response.status_code, 302)
self.user.is_superuser = True
self.user.save()
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
@override_settings(COLUMNS_LISTS={})
@mock.patch('geotrek.common.mixins.logger')
def test_custom_columns_mixin_error_log(self, mock_logger):
# Create view where columns fields are omitted
class MissingColumns(CustomColumnsMixin, MapEntityList):
model = Path
MissingColumns()
# Assert logger raises error message
message = "Cannot build columns for class MissingColumns.\nPlease define on this class either : \n - a field 'columns'\nOR \n - two fields 'mandatory_columns' AND 'default_extra_columns'"
mock_logger.error.assert_called_with(message)
class ViewsImportTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.user = UserFactory.create(username='homer', password='dooh')
def setUp(self):
self.client.force_login(user=self.user)
def test_import_form_access(self):
url = reverse('common:import_dataset')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_import_update_access(self):
url = reverse('common:import_update_json')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_import_from_file_good_file(self):
self.user.is_superuser = True
self.user.save()
with open('geotrek/common/tests/data/test.zip', 'rb') as real_archive:
url = reverse('common:import_dataset')
response_real = self.client.post(
url, {
'upload-file': 'Upload',
'with-file-parser': '1',
'with-file-zipfile': real_archive,
'with-file-encoding': 'UTF-8'
}
)
self.assertEqual(response_real.status_code, 200)
self.assertNotContains(response_real, "File must be of ZIP type.")
def test_import_from_file_bad_file(self):
self.user.is_superuser = True
self.user.save()
Parser.label = "Test"
fake_archive = SimpleUploadedFile(
"file.doc", b"file_content", content_type="application/msword")
url = reverse('common:import_dataset')
response_fake = self.client.post(
url, {
'upload-file': 'Upload',
'with-file-parser': '1',
'with-file-zipfile': fake_archive,
'with-file-encoding': 'UTF-8'
}
)
self.assertEqual(response_fake.status_code, 200)
self.assertContains(response_fake, "File must be of ZIP type.", 1)
Parser.label = None
def test_import_form_no_parser_no_superuser(self):
self.user.is_superuser = False
self.user.save()
real_archive = open('geotrek/common/tests/data/test.zip', 'rb+')
url = reverse('common:import_dataset')
response_real = self.client.post(
url, {
'upload-file': 'Upload',
'with-file-parser': '1',
'with-file-zipfile': real_archive,
'with-file-encoding': 'UTF-8'
}
)
self.assertEqual(response_real.status_code, 200)
self.assertNotContains(response_real, '<form method="post"')
def test_import_from_web_bad_parser(self):
self.user.is_superuser = True
self.user.save()
url = reverse('common:import_dataset')
response_real = self.client.post(
url, {
'import-web': 'Upload',
'without-file-parser': '99',
}
)
self.assertEqual(response_real.status_code, 200)
self.assertContains(response_real, "Select a valid choice. 99 is not one of the available choices.")
# There is no parser available for user not superuser
def test_import_from_web_good_parser(self):
self.user.is_superuser = True
self.user.save()
url = reverse('common:import_dataset')
real_key = self.client.get(url).context['form_without_file'].fields['parser'].choices[0][0]
response_real = self.client.post(
url, {
'import-web': 'Upload',
'without-file-parser': real_key,
}
)
self.assertEqual(response_real.status_code, 200)
self.assertNotContains(response_real, "Select a valid choice. {real_key} "
"is not one of the available choices.".format(real_key=real_key))
class SyncRandoViewTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.super_user = SuperUserFactory.create(username='admin', password='super')
cls.simple_user = User.objects.create_user(username='homer', password='doooh')
def setUp(self):
if os.path.exists(os.path.join('var', 'tmp_sync_rando')):
shutil.rmtree(os.path.join('var', 'tmp_sync_rando'))
if os.path.exists(os.path.join('var', 'tmp')):
shutil.rmtree(os.path.join('var', 'tmp'))
def test_get_sync_superuser(self):
self.client.login(username='admin', password='super')
response = self.client.get(reverse('common:sync_randos_view'))
self.assertEqual(response.status_code, 200)
def test_post_sync_superuser(self):
"""
test if sync can be launched by superuser post
"""
self.client.login(username='admin', password='super')
response = self.client.post(reverse('common:sync_randos'), data={})
self.assertRedirects(response, '/commands/syncview')
def test_get_sync_simpleuser(self):
self.client.login(username='homer', password='doooh')
response = self.client.get(reverse('common:sync_randos_view'))
self.assertRedirects(response, '/login/?next=/commands/syncview')
def test_post_sync_simpleuser(self):
"""
test if sync can be launched by simple user post
"""
self.client.login(username='homer', password='doooh')
response = self.client.post(reverse('common:sync_randos'), data={})
self.assertRedirects(response, '/login/?next=/commands/sync')
def test_get_sync_states_superuser(self):
self.client.login(username='admin', password='super')
response = self.client.post(reverse('common:sync_randos_state'), data={})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'[]')
def test_get_sync_states_simpleuser(self):
self.client.login(username='homer', password='doooh')
response = self.client.post(reverse('common:sync_randos_state'), data={})
self.assertRedirects(response, '/login/?next=/commands/statesync/')
@mock.patch('sys.stdout', new_callable=StringIO)
@override_settings(CELERY_ALWAYS_EAGER=False,
SYNC_RANDO_ROOT='var/tmp', SYNC_RANDO_OPTIONS={'url': 'http://localhost:8000',
'skip_tiles': True, 'skip_pdf': True,
'skip_dem': True, 'skip_profile_png': True})
def test_get_sync_rando_states_superuser_with_sync_rando(self, mocked_stdout):
self.client.login(username='admin', password='super')
if os.path.exists(os.path.join('var', 'tmp_sync_rando')):
shutil.rmtree(os.path.join('var', 'tmp_sync_rando'))
launch_sync_rando.apply()
response = self.client.post(reverse('common:sync_randos_state'), data={})
self.assertEqual(response.status_code, 200)
self.assertIn(b'"infos": "Sync ended"', response.content)
@mock.patch('sys.stdout', new_callable=StringIO)
@mock.patch('geotrek.common.management.commands.sync_rando.Command.handle', return_value=None,
side_effect=Exception('This is a test'))
@override_settings(CELERY_ALWAYS_EAGER=False,
SYNC_RANDO_ROOT='tmp', SYNC_RANDO_OPTIONS={'url': 'http://localhost:8000',
'skip_tiles': True, 'skip_pdf': True,
'skip_dem': True, 'skip_profile_png': True})
def test_get_sync_rando_states_superuser_with_sync_mobile_fail(self, mocked_stdout, command):
self.client.login(username='admin', password='super')
if os.path.exists(os.path.join('var', 'tmp_sync_rando')):
shutil.rmtree(os.path.join('var', 'tmp_sync_rando'))
launch_sync_rando.apply()
response = self.client.post(reverse('common:sync_randos_state'), data={})
self.assertEqual(response.status_code, 200)
self.assertIn(b'"exc_message": "This is a test"', response.content)
@mock.patch('sys.stdout', new_callable=StringIO)
@mock.patch('geotrek.trekking.models.Trek.prepare_map_image')
@mock.patch('landez.TilesManager.tile', return_value=b'I am a png')
@override_settings(SYNC_RANDO_ROOT='var/tmp', SYNC_RANDO_OPTIONS={'url': 'http://localhost:8000', 'skip_tiles': False,
'skip_pdf': False,
'skip_dem': False, 'skip_profile_png': False})
def test_launch_sync_rando(self, mock_tile, mock_map_image, mocked_stdout):
if os.path.exists(os.path.join('var', 'tmp_sync_rando')):
shutil.rmtree(os.path.join('var', 'tmp_sync_rando'))
task = launch_sync_rando.apply()
log = mocked_stdout.getvalue()
self.assertIn("Done", log)
self.assertEqual(task.status, "SUCCESS")
if os.path.exists(os.path.join('var', 'tmp_sync_rando')):
shutil.rmtree(os.path.join('var', 'tmp_sync_rando'))
@mock.patch('geotrek.common.management.commands.sync_rando.Command.handle', return_value=None,
side_effect=Exception('This is a test'))
@mock.patch('sys.stdout', new_callable=StringIO)
def test_launch_sync_rando_fail(self, mocked_stdout, command):
task = launch_sync_rando.apply()
log = mocked_stdout.getvalue()
self.assertNotIn("Done", log)
self.assertNotIn('Sync ended', log)
self.assertEqual(task.status, "FAILURE")
@mock.patch('geotrek.common.management.commands.sync_rando.Command.handle', return_value=None,
side_effect=Exception('This is a test'))
@override_settings(SYNC_RANDO_ROOT='tmp')
@mock.patch('sys.stdout', new_callable=StringIO)
def test_launch_sync_rando_no_rando_root(self, mocked_stdout, command):
if os.path.exists('tmp'):
shutil.rmtree('tmp')
task = launch_sync_rando.apply()
log = mocked_stdout.getvalue()
self.assertNotIn("Done", log)
self.assertNotIn('Sync rando ended', log)
self.assertEqual(task.status, "FAILURE")
def tearDown(self):
if os.path.exists(os.path.join('var', 'tmp_sync_rando')):
shutil.rmtree(os.path.join('var', 'tmp_sync_rando'))
if os.path.exists(os.path.join('var', 'tmp')):
shutil.rmtree(os.path.join('var', 'tmp'))
|
|
"""Illustrates the same UPDATE into INSERT technique of ``versioned_rows.py``,
but also emits an UPDATE on the **old** row to affect a change in timestamp.
Also includes a :meth:`.SessionEvents.do_orm_execute` hook to limit queries
to only the most recent version.
"""
import datetime
import time
from sqlalchemy import and_
from sqlalchemy import Column
from sqlalchemy import create_engine
from sqlalchemy import DateTime
from sqlalchemy import event
from sqlalchemy import inspect
from sqlalchemy import Integer
from sqlalchemy import String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import attributes
from sqlalchemy.orm import backref
from sqlalchemy.orm import make_transient
from sqlalchemy.orm import make_transient_to_detached
from sqlalchemy.orm import relationship
from sqlalchemy.orm import Session
from sqlalchemy.orm import with_loader_criteria
Base = declarative_base()
# this will be the current time as the test runs
now = None
# in practice this would be a real "now" function
def current_time():
return now
class VersionedStartEnd(object):
start = Column(DateTime, primary_key=True)
end = Column(DateTime, primary_key=True)
def __init__(self, **kw):
# reduce some verbosity when we make a new object
kw.setdefault("start", current_time() - datetime.timedelta(days=3))
kw.setdefault("end", current_time() + datetime.timedelta(days=3))
super(VersionedStartEnd, self).__init__(**kw)
def new_version(self, session):
# our current identity key, which will be used on the "old"
# version of us to emit an UPDATE. this is just for assertion purposes
old_identity_key = inspect(self).key
# make sure self.start / self.end are not expired
self.id, self.start, self.end
# turn us into an INSERT
make_transient(self)
# make the "old" version of us, which we will turn into an
# UPDATE
old_copy_of_us = self.__class__(
id=self.id, start=self.start, end=self.end
)
# turn old_copy_of_us into an UPDATE
make_transient_to_detached(old_copy_of_us)
# the "old" object has our old identity key (that we no longer have)
assert inspect(old_copy_of_us).key == old_identity_key
# now put it back in the session
session.add(old_copy_of_us)
# now update the 'end' - SQLAlchemy sees this as a PK switch
old_copy_of_us.end = current_time()
# fun fact! the new_version() routine is *not* called for
# old_copy_of_us! because we are already in the before_flush() hook!
# this surprised even me. I was thinking we had to guard against
# it. Still might be a good idea to do so.
self.start = current_time()
self.end = current_time() + datetime.timedelta(days=2)
@event.listens_for(Session, "before_flush")
def before_flush(session, flush_context, instances):
for instance in session.dirty:
if not isinstance(instance, VersionedStartEnd):
continue
if not session.is_modified(instance):
continue
if not attributes.instance_state(instance).has_identity:
continue
# make it transient
instance.new_version(session)
# re-add
session.add(instance)
@event.listens_for(Session, "do_orm_execute", retval=True)
def do_orm_execute(execute_state):
"""ensure all queries for VersionedStartEnd include criteria"""
ct = current_time() + datetime.timedelta(seconds=1)
execute_state.statement = execute_state.statement.options(
with_loader_criteria(
VersionedStartEnd,
lambda cls: and_(ct > cls.start, ct < cls.end),
include_aliases=True,
)
)
class Parent(VersionedStartEnd, Base):
__tablename__ = "parent"
id = Column(Integer, primary_key=True)
start = Column(DateTime, primary_key=True)
end = Column(DateTime, primary_key=True)
data = Column(String)
child_n = Column(Integer)
child = relationship(
"Child",
primaryjoin=("Child.id == foreign(Parent.child_n)"),
# note the primaryjoin can also be:
#
# "and_(Child.id == foreign(Parent.child_n), "
# "func.now().between(Child.start, Child.end))"
#
# however the before_compile() above will take care of this for us in
# all cases except for joinedload. You *can* use the above primaryjoin
# as well, it just means the criteria will be present twice for most
# parent->child load operations
#
uselist=False,
backref=backref("parent", uselist=False),
)
class Child(VersionedStartEnd, Base):
__tablename__ = "child"
id = Column(Integer, primary_key=True)
start = Column(DateTime, primary_key=True)
end = Column(DateTime, primary_key=True)
data = Column(String)
def new_version(self, session):
# expire parent's reference to us
session.expire(self.parent, ["child"])
# create new version
VersionedStartEnd.new_version(self, session)
# re-add ourselves to the parent
self.parent.child = self
times = []
def time_passes(s):
"""keep track of timestamps in terms of the database and allow time to
pass between steps."""
# close the transaction, if any, since PG time doesn't increment in the
# transaction
s.commit()
# get "now" in terms of the DB so we can keep the ranges low and
# still have our assertions pass
if times:
time.sleep(1)
times.append(datetime.datetime.now())
if len(times) > 1:
assert times[-1] > times[-2]
return times[-1]
e = create_engine("sqlite://", echo="debug")
Base.metadata.create_all(e)
s = Session(e)
now = time_passes(s)
c1 = Child(id=1, data="child 1")
p1 = Parent(id=1, data="c1", child=c1)
s.add(p1)
s.commit()
# assert raw DB data
assert s.query(Parent.__table__).all() == [
(
1,
times[0] - datetime.timedelta(days=3),
times[0] + datetime.timedelta(days=3),
"c1",
1,
)
]
assert s.query(Child.__table__).all() == [
(
1,
times[0] - datetime.timedelta(days=3),
times[0] + datetime.timedelta(days=3),
"child 1",
)
]
now = time_passes(s)
p1_check = s.query(Parent).first()
assert p1_check is p1
assert p1_check.child is c1
p1.child.data = "elvis presley"
s.commit()
p2_check = s.query(Parent).first()
assert p2_check is p1_check
c2_check = p2_check.child
# same object
assert p2_check.child is c1
# new data
assert c1.data == "elvis presley"
# new end time
assert c1.end == now + datetime.timedelta(days=2)
# assert raw DB data
assert s.query(Parent.__table__).all() == [
(
1,
times[0] - datetime.timedelta(days=3),
times[0] + datetime.timedelta(days=3),
"c1",
1,
)
]
assert s.query(Child.__table__).order_by(Child.end).all() == [
(1, times[0] - datetime.timedelta(days=3), times[1], "child 1"),
(1, times[1], times[1] + datetime.timedelta(days=2), "elvis presley"),
]
now = time_passes(s)
p1.data = "c2 elvis presley"
s.commit()
# assert raw DB data. now there are two parent rows.
assert s.query(Parent.__table__).order_by(Parent.end).all() == [
(1, times[0] - datetime.timedelta(days=3), times[2], "c1", 1),
(
1,
times[2],
times[2] + datetime.timedelta(days=2),
"c2 elvis presley",
1,
),
]
assert s.query(Child.__table__).order_by(Child.end).all() == [
(1, times[0] - datetime.timedelta(days=3), times[1], "child 1"),
(1, times[1], times[1] + datetime.timedelta(days=2), "elvis presley"),
]
# add some more rows to test that these aren't coming back for
# queries
s.add(Parent(id=2, data="unrelated", child=Child(id=2, data="unrelated")))
s.commit()
# Query only knows about one parent for id=1
p3_check = s.query(Parent).filter_by(id=1).one()
assert p3_check is p1
assert p3_check.child is c1
# and one child.
c3_check = s.query(Child).filter(Child.parent == p3_check).one()
assert c3_check is c1
# one child one parent....
c3_check = (
s.query(Child).join(Parent.child).filter(Parent.id == p3_check.id).one()
)
|
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: group_search_service.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='group_search_service.proto',
package='',
syntax='proto3',
serialized_pb=_b('\n\x1agroup_search_service.proto\"%\n\x14PublicGroupJoinToken\x12\r\n\x05token\x18\x01 \x01(\x0c\"\xb3\x01\n\x12\x46indGroupsResponse\x12*\n\x06result\x18\x01 \x01(\x0e\x32\x1a.FindGroupsResponse.Result\x12#\n\x05match\x18\x02 \x03(\x0b\x32\x14.LimitedGroupDetails\x12!\n\x19is_available_for_creation\x18\x03 \x01(\x08\")\n\x06Result\x12\x06\n\x02OK\x10\x00\x12\x17\n\x13RATE_LIMIT_EXCEEDED\x10\x01\" \n\nXiGroupJid\x12\x12\n\nlocal_part\x18\x01 \x01(\t\"z\n\x10GroupDisplayData\x12\x0f\n\x07hashtag\x18\x02 \x01(\t\x12\x14\n\x0c\x64isplay_name\x18\x03 \x01(\t\x12\x1c\n\x14\x64isplay_pic_base_url\x18\x04 \x01(\t\x12!\n\x19\x64isplay_pic_last_modified\x18\x05 \x01(\x04\"\x9f\x01\n\x13LimitedGroupDetails\x12\x18\n\x03jid\x18\x01 \x01(\x0b\x32\x0b.XiGroupJid\x12\'\n\x0c\x64isplay_data\x18\x02 \x01(\x0b\x32\x11.GroupDisplayData\x12\x14\n\x0cmember_count\x18\x03 \x01(\r\x12/\n\x10group_join_token\x18\x64 \x01(\x0b\x32\x15.PublicGroupJoinTokenb\x06proto3')
)
_FINDGROUPSRESPONSE_RESULT = _descriptor.EnumDescriptor(
name='Result',
full_name='FindGroupsResponse.Result',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='OK', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RATE_LIMIT_EXCEEDED', index=1, number=1,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=208,
serialized_end=249,
)
_sym_db.RegisterEnumDescriptor(_FINDGROUPSRESPONSE_RESULT)
_PUBLICGROUPJOINTOKEN = _descriptor.Descriptor(
name='PublicGroupJoinToken',
full_name='PublicGroupJoinToken',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='token', full_name='PublicGroupJoinToken.token', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=30,
serialized_end=67,
)
_FINDGROUPSRESPONSE = _descriptor.Descriptor(
name='FindGroupsResponse',
full_name='FindGroupsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='result', full_name='FindGroupsResponse.result', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='match', full_name='FindGroupsResponse.match', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='is_available_for_creation', full_name='FindGroupsResponse.is_available_for_creation', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_FINDGROUPSRESPONSE_RESULT,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=70,
serialized_end=249,
)
_XIGROUPJID = _descriptor.Descriptor(
name='XiGroupJid',
full_name='XiGroupJid',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='local_part', full_name='XiGroupJid.local_part', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=251,
serialized_end=283,
)
_GROUPDISPLAYDATA = _descriptor.Descriptor(
name='GroupDisplayData',
full_name='GroupDisplayData',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='hashtag', full_name='GroupDisplayData.hashtag', index=0,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='display_name', full_name='GroupDisplayData.display_name', index=1,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='display_pic_base_url', full_name='GroupDisplayData.display_pic_base_url', index=2,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='display_pic_last_modified', full_name='GroupDisplayData.display_pic_last_modified', index=3,
number=5, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=285,
serialized_end=407,
)
_LIMITEDGROUPDETAILS = _descriptor.Descriptor(
name='LimitedGroupDetails',
full_name='LimitedGroupDetails',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='jid', full_name='LimitedGroupDetails.jid', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='display_data', full_name='LimitedGroupDetails.display_data', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='member_count', full_name='LimitedGroupDetails.member_count', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='group_join_token', full_name='LimitedGroupDetails.group_join_token', index=3,
number=100, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=410,
serialized_end=569,
)
_FINDGROUPSRESPONSE.fields_by_name['result'].enum_type = _FINDGROUPSRESPONSE_RESULT
_FINDGROUPSRESPONSE.fields_by_name['match'].message_type = _LIMITEDGROUPDETAILS
_FINDGROUPSRESPONSE_RESULT.containing_type = _FINDGROUPSRESPONSE
_LIMITEDGROUPDETAILS.fields_by_name['jid'].message_type = _XIGROUPJID
_LIMITEDGROUPDETAILS.fields_by_name['display_data'].message_type = _GROUPDISPLAYDATA
_LIMITEDGROUPDETAILS.fields_by_name['group_join_token'].message_type = _PUBLICGROUPJOINTOKEN
DESCRIPTOR.message_types_by_name['PublicGroupJoinToken'] = _PUBLICGROUPJOINTOKEN
DESCRIPTOR.message_types_by_name['FindGroupsResponse'] = _FINDGROUPSRESPONSE
DESCRIPTOR.message_types_by_name['XiGroupJid'] = _XIGROUPJID
DESCRIPTOR.message_types_by_name['GroupDisplayData'] = _GROUPDISPLAYDATA
DESCRIPTOR.message_types_by_name['LimitedGroupDetails'] = _LIMITEDGROUPDETAILS
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
PublicGroupJoinToken = _reflection.GeneratedProtocolMessageType('PublicGroupJoinToken', (_message.Message,), dict(
DESCRIPTOR = _PUBLICGROUPJOINTOKEN,
__module__ = 'group_search_service_pb2'
# @@protoc_insertion_point(class_scope:PublicGroupJoinToken)
))
_sym_db.RegisterMessage(PublicGroupJoinToken)
FindGroupsResponse = _reflection.GeneratedProtocolMessageType('FindGroupsResponse', (_message.Message,), dict(
DESCRIPTOR = _FINDGROUPSRESPONSE,
__module__ = 'group_search_service_pb2'
# @@protoc_insertion_point(class_scope:FindGroupsResponse)
))
_sym_db.RegisterMessage(FindGroupsResponse)
XiGroupJid = _reflection.GeneratedProtocolMessageType('XiGroupJid', (_message.Message,), dict(
DESCRIPTOR = _XIGROUPJID,
__module__ = 'group_search_service_pb2'
# @@protoc_insertion_point(class_scope:XiGroupJid)
))
_sym_db.RegisterMessage(XiGroupJid)
GroupDisplayData = _reflection.GeneratedProtocolMessageType('GroupDisplayData', (_message.Message,), dict(
DESCRIPTOR = _GROUPDISPLAYDATA,
__module__ = 'group_search_service_pb2'
# @@protoc_insertion_point(class_scope:GroupDisplayData)
))
_sym_db.RegisterMessage(GroupDisplayData)
LimitedGroupDetails = _reflection.GeneratedProtocolMessageType('LimitedGroupDetails', (_message.Message,), dict(
DESCRIPTOR = _LIMITEDGROUPDETAILS,
__module__ = 'group_search_service_pb2'
# @@protoc_insertion_point(class_scope:LimitedGroupDetails)
))
_sym_db.RegisterMessage(LimitedGroupDetails)
# @@protoc_insertion_point(module_scope)
|
|
from sympy import (meijerg, I, S, integrate, Integral, oo, gamma,
hyperexpand, exp, simplify, sqrt, pi, erf, sin, cos,
exp_polar, polar_lift, polygamma, hyper, log, expand_func)
from sympy.integrals.meijerint import (_rewrite_single, _rewrite1,
meijerint_indefinite, _inflate_g, _create_lookup_table,
meijerint_definite, meijerint_inversion)
from sympy.utilities import default_sort_key
from sympy.utilities.randtest import (test_numerically,
random_complex_number as randcplx)
from sympy.abc import x, y, a, b, c, d, s, t, z
def test_rewrite_single():
def t(expr, c, m):
e = _rewrite_single(meijerg([a], [b], [c], [d], expr), x)
assert e is not None
assert isinstance(e[0][0][2], meijerg)
assert e[0][0][2].argument.as_coeff_mul(x) == (c, (m,))
def tn(expr):
assert _rewrite_single(meijerg([a], [b], [c], [d], expr), x) is None
t(x, 1, x)
t(x**2, 1, x**2)
t(x**2 + y*x**2, y + 1, x**2)
tn(x**2 + x)
tn(x**y)
def u(expr, x):
from sympy import Add, exp, exp_polar
r = _rewrite_single(expr, x)
e = Add(*[res[0]*res[2] for res in r[0]]).replace(
exp_polar, exp) # XXX Hack?
assert test_numerically(e, expr, x)
u(exp(-x)*sin(x), x)
# The following has stopped working because hyperexpand changed slightly.
# It is probably not worth fixing
#u(exp(-x)*sin(x)*cos(x), x)
# This one cannot be done numerically, since it comes out as a g-function
# of argument 4*pi
# NOTE This also tests a bug in inverse mellin transform (which used to
# turn exp(4*pi*I*t) into a factor of exp(4*pi*I)**t instead of
# exp_polar).
#u(exp(x)*sin(x), x)
assert _rewrite_single(exp(x)*sin(x), x) == \
([(-sqrt(2)/(2*sqrt(pi)), 0,
meijerg(((-S(1)/2, 0, S(1)/4, S(1)/2, S(3)/4), (1,)),
((), (-S(1)/2, 0)), 64*exp_polar(-4*I*pi)/x**4))], True)
def test_rewrite1():
assert _rewrite1(x**3*meijerg([a], [b], [c], [d], x**2 + y*x**2)*5, x) == \
(5, x**3, [(1, 0, meijerg([a], [b], [c], [d], x**2*(y + 1)))], True)
def test_meijerint_indefinite_numerically():
def t(fac, arg):
g = meijerg([a], [b], [c], [d], arg)*fac
subs = {a: randcplx()/10, b: randcplx()/10 + I,
c: randcplx(), d: randcplx()}
integral = meijerint_indefinite(g, x)
assert integral is not None
assert test_numerically(g.subs(subs), integral.diff(x).subs(subs), x)
t(1, x)
t(2, x)
t(1, 2*x)
t(1, x**2)
t(5, x**S('3/2'))
t(x**3, x)
t(3*x**S('3/2'), 4*x**S('7/3'))
def test_inflate():
subs = {a: randcplx()/10, b: randcplx()/10 + I, c: randcplx(),
d: randcplx(), y: randcplx()/10}
def t(a, b, arg, n):
from sympy import Mul
m1 = meijerg(a, b, arg)
m2 = Mul(*_inflate_g(m1, n))
# NOTE: (the random number)**9 must still be on the principal sheet.
# Thus make b&d small to create random numbers of small imaginary part.
return test_numerically(m1.subs(subs), m2.subs(subs), x, b=0.1, d=-0.1)
assert t([[a], [b]], [[c], [d]], x, 3)
assert t([[a, y], [b]], [[c], [d]], x, 3)
assert t([[a], [b]], [[c, y], [d]], 2*x**3, 3)
def test_recursive():
from sympy import symbols, exp_polar, expand
a, b, c = symbols('a b c', positive=True)
r = exp(-(x - a)**2)*exp(-(x - b)**2)
e = integrate(r, (x, 0, oo), meijerg=True)
assert simplify(e.expand()) == (
sqrt(2)*sqrt(pi)*(
(erf(sqrt(2)*(a + b)/2) + 1)*exp(-a**2/2 + a*b - b**2/2))/4)
e = integrate(exp(-(x - a)**2)*exp(-(x - b)**2)*exp(c*x), (x, 0, oo), meijerg=True)
assert simplify(e) == (
sqrt(2)*sqrt(pi)*(erf(sqrt(2)*(2*a + 2*b + c)/4) + 1)*exp(-a**2 - b**2
+ (2*a + 2*b + c)**2/8)/4)
assert simplify(integrate(exp(-(x - a - b - c)**2), (x, 0, oo), meijerg=True)) == \
sqrt(pi)/2*(1 + erf(a + b + c))
assert simplify(integrate(exp(-(x + a + b + c)**2), (x, 0, oo), meijerg=True)) == \
sqrt(pi)/2*(1 - erf(a + b + c))
def test_meijerint():
from sympy import symbols, expand, arg
s, t, mu = symbols('s t mu', real=True)
assert integrate(meijerg([], [], [0], [], s*t)
*meijerg([], [], [mu/2], [-mu/2], t**2/4),
(t, 0, oo)).is_Piecewise
s = symbols('s', positive=True)
assert integrate(x**s*meijerg([[], []], [[0], []], x), (x, 0, oo)) == \
gamma(s + 1)
assert integrate(x**s*meijerg([[], []], [[0], []], x), (x, 0, oo),
meijerg=True) == gamma(s + 1)
assert isinstance(integrate(x**s*meijerg([[], []], [[0], []], x),
(x, 0, oo), meijerg=False),
Integral)
assert meijerint_indefinite(exp(x), x) == exp(x)
# TODO what simplifications should be done automatically?
# This tests "extra case" for antecedents_1.
a, b = symbols('a b', positive=True)
assert simplify(meijerint_definite(x**a, x, 0, b)[0]) == \
b**(a + 1)/(a + 1)
# This tests various conditions and expansions:
meijerint_definite((x + 1)**3*exp(-x), x, 0, oo) == (16, True)
# Again, how about simplifications?
sigma, mu = symbols('sigma mu', positive=True)
i, c = meijerint_definite(exp(-((x - mu)/(2*sigma))**2), x, 0, oo)
assert simplify(i) == sqrt(pi)*sigma*(erf(mu/(2*sigma)) + 1)
assert c == True
i, _ = meijerint_definite(exp(-mu*x)*exp(sigma*x), x, 0, oo)
# TODO it would be nice to test the condition
assert simplify(i) == 1/(mu - sigma)
# Test substitutions to change limits
assert meijerint_definite(exp(x), x, -oo, 2) == (exp(2), True)
assert expand(meijerint_definite(exp(x), x, 0, I)[0]) == exp(I) - 1
assert expand(meijerint_definite(exp(-x), x, 0, x)[0]) == \
1 - exp(-exp(I*arg(x))*abs(x))
# Test -oo to oo
assert meijerint_definite(exp(-x**2), x, -oo, oo) == (sqrt(pi), True)
assert meijerint_definite(exp(-abs(x)), x, -oo, oo) == (2, True)
assert meijerint_definite(exp(-(2*x - 3)**2), x, -oo, oo) == \
(sqrt(pi)/2, True)
assert meijerint_definite(exp(-abs(2*x - 3)), x, -oo, oo) == (1, True)
assert meijerint_definite(exp(-((x - mu)/sigma)**2/2)/sqrt(2*pi*sigma**2),
x, -oo, oo) == (1, True)
# Test one of the extra conditions for 2 g-functinos
assert meijerint_definite(exp(-x)*sin(x), x, 0, oo) == (S(1)/2, True)
# Test a bug
def res(n):
return (1/(1 + x**2)).diff(x, n).subs(x, 1)*(-1)**n
for n in range(6):
assert integrate(exp(-x)*sin(x)*x**n, (x, 0, oo), meijerg=True) == \
res(n)
# This used to test trigexpand... now it is done by linear substitution
assert simplify(integrate(exp(-x)*sin(x + a), (x, 0, oo), meijerg=True)
) == sqrt(2)*sin(a + pi/4)/2
# Test the condition 14 from prudnikov.
# (This is besselj*besselj in disguise, to stop the product from being
# recognised in the tables.)
a, b, s = symbols('a b s')
from sympy import And, re
assert meijerint_definite(meijerg([], [], [a/2], [-a/2], x/4)
*meijerg([], [], [b/2], [-b/2], x/4)*x**(s - 1), x, 0, oo) == \
(4*2**(2*s - 2)*gamma(-2*s + 1)*gamma(a/2 + b/2 + s)
/(gamma(-a/2 + b/2 - s + 1)*gamma(a/2 - b/2 - s + 1)
*gamma(a/2 + b/2 - s + 1)),
And(0 < -2*re(4*s) + 8, 0 < re(a/2 + b/2 + s), re(2*s) < 1))
# test a bug
assert integrate(sin(x**a)*sin(x**b), (x, 0, oo), meijerg=True) == \
Integral(sin(x**a)*sin(x**b), (x, 0, oo))
# test better hyperexpand
assert integrate(exp(-x**2)*log(x), (x, 0, oo), meijerg=True) == \
(sqrt(pi)*polygamma(0, S(1)/2)/4).expand()
# Test hyperexpand bug.
from sympy import lowergamma
n = symbols('n', integer=True)
assert simplify(integrate(exp(-x)*x**n, x, meijerg=True)) == \
lowergamma(n + 1, x)
# Test a bug with argument 1/x
alpha = symbols('alpha', positive=True)
assert meijerint_definite((2 - x)**alpha*sin(alpha/x), x, 0, 2) == \
(sqrt(pi)*alpha*gamma(alpha + 1)*meijerg(((), (alpha/2 + S(1)/2,
alpha/2 + 1)), ((0, 0, S(1)/2), (-S(1)/2,)), alpha**S(2)/16)/4, True)
# test a bug related to 3016
a, s = symbols('a s', positive=True)
assert simplify(integrate(x**s*exp(-a*x**2), (x, -oo, oo))) == \
a**(-s/2 - S(1)/2)*((-1)**s + 1)*gamma(s/2 + S(1)/2)/2
def test_bessel():
from sympy import (besselj, Heaviside, besseli, polar_lift, exp_polar,
powdenest)
assert simplify(integrate(besselj(a, z)*besselj(b, z)/z, (z, 0, oo),
meijerg=True, conds='none')) == \
2*sin(pi*(a/2 - b/2))/(pi*(a - b)*(a + b))
assert simplify(integrate(besselj(a, z)*besselj(a, z)/z, (z, 0, oo),
meijerg=True, conds='none')) == 1/(2*a)
# TODO more orthogonality integrals
assert simplify(integrate(sin(z*x)*(x**2 - 1)**(-(y + S(1)/2)),
(x, 1, oo), meijerg=True, conds='none')
*2/((z/2)**y*sqrt(pi)*gamma(S(1)/2 - y))) == \
besselj(y, z)
# Werner Rosenheinrich
# SOME INDEFINITE INTEGRALS OF BESSEL FUNCTIONS
assert integrate(x*besselj(0, x), x, meijerg=True) == x*besselj(1, x)
assert integrate(x*besseli(0, x), x, meijerg=True) == x*besseli(1, x)
# TODO can do higher powers, but come out as high order ... should they be
# reduced to order 0, 1?
assert integrate(besselj(1, x), x, meijerg=True) == -besselj(0, x)
assert integrate(besselj(1, x)**2/x, x, meijerg=True) == \
-(besselj(0, x)**2 + besselj(1, x)**2)/2
# TODO more besseli when tables are extended or recursive mellin works
assert integrate(besselj(0, x)**2/x**2, x, meijerg=True) == \
-2*x*besselj(0, x)**2 - 2*x*besselj(1, x)**2 \
+ 2*besselj(0, x)*besselj(1, x) - besselj(0, x)**2/x
assert integrate(besselj(0, x)*besselj(1, x), x, meijerg=True) == \
-besselj(0, x)**2/2
assert integrate(x**2*besselj(0, x)*besselj(1, x), x, meijerg=True) == \
x**2*besselj(1, x)**2/2
assert integrate(besselj(0, x)*besselj(1, x)/x, x, meijerg=True) == \
(x*besselj(0, x)**2 + x*besselj(1, x)**2 -
besselj(0, x)*besselj(1, x))
# TODO how does besselj(0, a*x)*besselj(0, b*x) work?
# TODO how does besselj(0, x)**2*besselj(1, x)**2 work?
# TODO sin(x)*besselj(0, x) etc come out a mess
# TODO can x*log(x)*besselj(0, x) be done?
# TODO how does besselj(1, x)*besselj(0, x+a) work?
# TODO more indefinite integrals when struve functions etc are implemented
# test a substitution
assert integrate(besselj(1, x**2)*x, x, meijerg=True) == \
-besselj(0, x**2)/2
def test_inversion():
from sympy import piecewise_fold, besselj, sqrt, I, sin, cos, Heaviside
def inv(f):
return piecewise_fold(meijerint_inversion(f, s, t))
assert inv(1/(s**2 + 1)) == sin(t)*Heaviside(t)
assert inv(s/(s**2 + 1)) == cos(t)*Heaviside(t)
assert inv(exp(-s)/s) == Heaviside(t - 1)
assert inv(1/sqrt(1 + s**2)) == besselj(0, t)*Heaviside(t)
# Test some antcedents checking.
assert meijerint_inversion(sqrt(s)/sqrt(1 + s**2), s, t) is None
assert inv(exp(s**2)) is None
assert meijerint_inversion(exp(-s**2), s, t) is None
def test_lookup_table():
from random import uniform, randrange
from sympy import Add, unpolarify, exp_polar, exp
from sympy.integrals.meijerint import z as z_dummy
table = {}
_create_lookup_table(table)
for _, l in sorted(table.items()):
for formula, terms, cond, hint in sorted(l, key=default_sort_key):
subs = {}
for a in list(formula.free_symbols) + [z_dummy]:
if hasattr(a, 'properties') and a.properties:
# these Wilds match positive integers
subs[a] = randrange(1, 10)
else:
subs[a] = uniform(1.5, 2.0)
if not isinstance(terms, list):
terms = terms(subs)
# First test that hyperexpand can do this.
expanded = [hyperexpand(g) for (_, g) in terms]
assert all(x.is_Piecewise or not x.has(meijerg) for x in expanded)
# Now test that the meijer g-function is indeed as advertised.
expanded = Add(*[f*x for (f, x) in terms])
a, b = formula.n(subs=subs), expanded.n(subs=subs)
r = min(abs(a), abs(b))
if r < 1:
assert abs(a - b).n() <= 1e-10
else:
assert (abs(a - b)/r).n() <= 1e-10
def test_branch_bug():
from sympy import powdenest, lowergamma
# TODO combsimp cannot prove that the factor is unity
assert powdenest(integrate(erf(x**3), x, meijerg=True).diff(x),
polar=True) == 2*erf(x**3)*gamma(S(2)/3)/3/gamma(S(5)/3)
assert integrate(erf(x**3), x, meijerg=True) == \
2*x*erf(x**3)*gamma(S(2)/3)/(3*gamma(S(5)/3)) \
- 2*gamma(S(2)/3)*lowergamma(S(2)/3, x**6)/(3*sqrt(pi)*gamma(S(5)/3))
def test_linear_subs():
from sympy import besselj
assert integrate(sin(x - 1), x, meijerg=True) == -cos(1 - x)
assert integrate(besselj(1, x - 1), x, meijerg=True) == -besselj(0, 1 - x)
def test_probability():
# various integrals from probability theory
from sympy.abc import x, y, z
from sympy import symbols, Symbol, Abs, expand_mul, combsimp, powsimp, sin
mu1, mu2 = symbols('mu1 mu2', real=True, finite=True, bounded=True)
sigma1, sigma2 = symbols('sigma1 sigma2', real=True, finite=True,
bounded=True, positive=True)
rate = Symbol('lambda', real=True, positive=True, bounded=True)
def normal(x, mu, sigma):
return 1/sqrt(2*pi*sigma**2)*exp(-(x - mu)**2/2/sigma**2)
def exponential(x, rate):
return rate*exp(-rate*x)
assert integrate(normal(x, mu1, sigma1), (x, -oo, oo), meijerg=True) == 1
assert integrate(x*normal(x, mu1, sigma1), (x, -oo, oo), meijerg=True) == \
mu1
assert integrate(x**2*normal(x, mu1, sigma1), (x, -oo, oo), meijerg=True) \
== mu1**2 + sigma1**2
assert integrate(x**3*normal(x, mu1, sigma1), (x, -oo, oo), meijerg=True) \
== mu1**3 + 3*mu1*sigma1**2
assert integrate(normal(x, mu1, sigma1)*normal(y, mu2, sigma2),
(x, -oo, oo), (y, -oo, oo), meijerg=True) == 1
assert integrate(x*normal(x, mu1, sigma1)*normal(y, mu2, sigma2),
(x, -oo, oo), (y, -oo, oo), meijerg=True) == mu1
assert integrate(y*normal(x, mu1, sigma1)*normal(y, mu2, sigma2),
(x, -oo, oo), (y, -oo, oo), meijerg=True) == mu2
assert integrate(x*y*normal(x, mu1, sigma1)*normal(y, mu2, sigma2),
(x, -oo, oo), (y, -oo, oo), meijerg=True) == mu1*mu2
assert integrate((x + y + 1)*normal(x, mu1, sigma1)*normal(y, mu2, sigma2),
(x, -oo, oo), (y, -oo, oo), meijerg=True) == 1 + mu1 + mu2
assert integrate((x + y - 1)*normal(x, mu1, sigma1)*normal(y, mu2, sigma2),
(x, -oo, oo), (y, -oo, oo), meijerg=True) == \
-1 + mu1 + mu2
i = integrate(x**2*normal(x, mu1, sigma1)*normal(y, mu2, sigma2),
(x, -oo, oo), (y, -oo, oo), meijerg=True)
assert not i.has(Abs)
assert simplify(i) == mu1**2 + sigma1**2
assert integrate(y**2*normal(x, mu1, sigma1)*normal(y, mu2, sigma2),
(x, -oo, oo), (y, -oo, oo), meijerg=True) == \
sigma2**2 + mu2**2
assert integrate(exponential(x, rate), (x, 0, oo), meijerg=True) == 1
assert integrate(x*exponential(x, rate), (x, 0, oo), meijerg=True) == \
1/rate
assert integrate(x**2*exponential(x, rate), (x, 0, oo), meijerg=True) == \
2/rate**2
def E(expr):
res1 = integrate(expr*exponential(x, rate)*normal(y, mu1, sigma1),
(x, 0, oo), (y, -oo, oo), meijerg=True)
res2 = integrate(expr*exponential(x, rate)*normal(y, mu1, sigma1),
(y, -oo, oo), (x, 0, oo), meijerg=True)
assert expand_mul(res1) == expand_mul(res2)
return res1
assert E(1) == 1
assert E(x*y) == mu1/rate
assert E(x*y**2) == mu1**2/rate + sigma1**2/rate
ans = sigma1**2 + 1/rate**2
assert simplify(E((x + y + 1)**2) - E(x + y + 1)**2) == ans
assert simplify(E((x + y - 1)**2) - E(x + y - 1)**2) == ans
assert simplify(E((x + y)**2) - E(x + y)**2) == ans
# Beta' distribution
alpha, beta = symbols('alpha beta', positive=True)
betadist = x**(alpha - 1)*(1 + x)**(-alpha - beta)*gamma(alpha + beta) \
/gamma(alpha)/gamma(beta)
assert integrate(betadist, (x, 0, oo), meijerg=True) == 1
i = integrate(x*betadist, (x, 0, oo), meijerg=True, conds='separate')
assert (combsimp(i[0]), i[1]) == (alpha/(beta - 1), 1 < beta)
j = integrate(x**2*betadist, (x, 0, oo), meijerg=True, conds='separate')
assert j[1] == (1 < beta - 1)
assert combsimp(j[0] - i[0]**2) == (alpha + beta - 1)*alpha \
/(beta - 2)/(beta - 1)**2
# Beta distribution
# NOTE: this is evaluated using antiderivatives. It also tests that
# meijerint_indefinite returns the simplest possible answer.
a, b = symbols('a b', positive=True)
betadist = x**(a - 1)*(-x + 1)**(b - 1)*gamma(a + b)/(gamma(a)*gamma(b))
assert simplify(integrate(betadist, (x, 0, 1), meijerg=True)) == 1
assert simplify(integrate(x*betadist, (x, 0, 1), meijerg=True)) == \
a/(a + b)
assert simplify(integrate(x**2*betadist, (x, 0, 1), meijerg=True)) == \
a*(a + 1)/(a + b)/(a + b + 1)
assert simplify(integrate(x**y*betadist, (x, 0, 1), meijerg=True)) == \
gamma(a + b)*gamma(a + y)/gamma(a)/gamma(a + b + y)
# Chi distribution
k = Symbol('k', integer=True, positive=True)
chi = 2**(1 - k/2)*x**(k - 1)*exp(-x**2/2)/gamma(k/2)
assert powsimp(integrate(chi, (x, 0, oo), meijerg=True)) == 1
assert simplify(integrate(x*chi, (x, 0, oo), meijerg=True)) == \
sqrt(2)*gamma((k + 1)/2)/gamma(k/2)
assert simplify(integrate(x**2*chi, (x, 0, oo), meijerg=True)) == k
# Chi^2 distribution
chisquared = 2**(-k/2)/gamma(k/2)*x**(k/2 - 1)*exp(-x/2)
assert powsimp(integrate(chisquared, (x, 0, oo), meijerg=True)) == 1
assert simplify(integrate(x*chisquared, (x, 0, oo), meijerg=True)) == k
assert simplify(integrate(x**2*chisquared, (x, 0, oo), meijerg=True)) == \
k*(k + 2)
assert combsimp(integrate(((x - k)/sqrt(2*k))**3*chisquared, (x, 0, oo),
meijerg=True)) == 2*sqrt(2)/sqrt(k)
# Dagum distribution
a, b, p = symbols('a b p', positive=True)
# XXX (x/b)**a does not work
dagum = a*p/x*(x/b)**(a*p)/(1 + x**a/b**a)**(p + 1)
assert simplify(integrate(dagum, (x, 0, oo), meijerg=True)) == 1
# XXX conditions are a mess
arg = x*dagum
assert simplify(integrate(arg, (x, 0, oo), meijerg=True, conds='none')
) == a*b*gamma(1 - 1/a)*gamma(p + 1 + 1/a)/(
(a*p + 1)*gamma(p))
assert simplify(integrate(x*arg, (x, 0, oo), meijerg=True, conds='none')
) == a*b**2*gamma(1 - 2/a)*gamma(p + 1 + 2/a)/(
(a*p + 2)*gamma(p))
# F-distribution
d1, d2 = symbols('d1 d2', positive=True)
f = sqrt(((d1*x)**d1 * d2**d2)/(d1*x + d2)**(d1 + d2))/x \
/gamma(d1/2)/gamma(d2/2)*gamma((d1 + d2)/2)
assert simplify(integrate(f, (x, 0, oo), meijerg=True)) == 1
# TODO conditions are a mess
assert simplify(integrate(x*f, (x, 0, oo), meijerg=True, conds='none')
) == d2/(d2 - 2)
assert simplify(integrate(x**2*f, (x, 0, oo), meijerg=True, conds='none')
) == d2**2*(d1 + 2)/d1/(d2 - 4)/(d2 - 2)
# TODO gamma, rayleigh
# inverse gaussian
lamda, mu = symbols('lamda mu', positive=True)
dist = sqrt(lamda/2/pi)*x**(-S(3)/2)*exp(-lamda*(x - mu)**2/x/2/mu**2)
mysimp = lambda expr: simplify(expr.rewrite(exp))
assert mysimp(integrate(dist, (x, 0, oo))) == 1
assert mysimp(integrate(x*dist, (x, 0, oo))) == mu
assert mysimp(integrate((x - mu)**2*dist, (x, 0, oo))) == mu**3/lamda
assert mysimp(integrate((x - mu)**3*dist, (x, 0, oo))) == 3*mu**5/lamda**2
# Levi
c = Symbol('c', positive=True)
assert integrate(sqrt(c/2/pi)*exp(-c/2/(x - mu))/(x - mu)**S('3/2'),
(x, mu, oo)) == 1
# higher moments oo
# log-logistic
distn = (beta/alpha)*x**(beta - 1)/alpha**(beta - 1)/ \
(1 + x**beta/alpha**beta)**2
assert simplify(integrate(distn, (x, 0, oo))) == 1
# NOTE the conditions are a mess, but correctly state beta > 1
assert simplify(integrate(x*distn, (x, 0, oo), conds='none')) == \
pi*alpha/beta/sin(pi/beta)
# (similar comment for conditions applies)
assert simplify(integrate(x**y*distn, (x, 0, oo), conds='none')) == \
pi*alpha**y*y/beta/sin(pi*y/beta)
# weibull
k = Symbol('k', positive=True)
n = Symbol('n', positive=True)
distn = k/lamda*(x/lamda)**(k - 1)*exp(-(x/lamda)**k)
assert simplify(integrate(distn, (x, 0, oo))) == 1
assert simplify(integrate(x**n*distn, (x, 0, oo))) == \
lamda**n*gamma(1 + n/k)
# rice distribution
from sympy import besseli
nu, sigma = symbols('nu sigma', positive=True)
rice = x/sigma**2*exp(-(x**2 + nu**2)/2/sigma**2)*besseli(0, x*nu/sigma**2)
assert integrate(rice, (x, 0, oo), meijerg=True) == 1
# can someone verify higher moments?
# Laplace distribution
mu = Symbol('mu', real=True)
b = Symbol('b', positive=True)
laplace = exp(-abs(x - mu)/b)/2/b
assert integrate(laplace, (x, -oo, oo), meijerg=True) == 1
assert integrate(x*laplace, (x, -oo, oo), meijerg=True) == mu
assert integrate(x**2*laplace, (x, -oo, oo), meijerg=True) == \
2*b**2 + mu**2
# TODO are there other distributions supported on (-oo, oo) that we can do?
# misc tests
k = Symbol('k', positive=True)
assert combsimp(expand_mul(integrate(log(x)*x**(k - 1)*exp(-x)/gamma(k),
(x, 0, oo)))) == polygamma(0, k)
def test_expint():
""" Test various exponential integrals. """
from sympy import (expint, unpolarify, Symbol, Ci, Si, Shi, Chi,
sin, cos, sinh, cosh, Ei)
assert simplify(unpolarify(integrate(exp(-z*x)/x**y, (x, 1, oo),
meijerg=True, conds='none'
).rewrite(expint).expand(func=True))) == expint(y, z)
assert integrate(exp(-z*x)/x, (x, 1, oo), meijerg=True,
conds='none').rewrite(expint).expand() == \
expint(1, z)
assert integrate(exp(-z*x)/x**2, (x, 1, oo), meijerg=True,
conds='none').rewrite(expint).expand() == \
expint(2, z).rewrite(Ei).rewrite(expint)
assert integrate(exp(-z*x)/x**3, (x, 1, oo), meijerg=True,
conds='none').rewrite(expint).expand() == \
expint(3, z).rewrite(Ei).rewrite(expint).expand()
t = Symbol('t', positive=True)
assert integrate(-cos(x)/x, (x, t, oo), meijerg=True).expand() == Ci(t)
assert integrate(-sin(x)/x, (x, t, oo), meijerg=True).expand() == \
Si(t) - pi/2
assert integrate(sin(x)/x, (x, 0, z), meijerg=True) == Si(z)
assert integrate(sinh(x)/x, (x, 0, z), meijerg=True) == Shi(z)
assert integrate(exp(-x)/x, x, meijerg=True).expand().rewrite(expint) == \
I*pi - expint(1, x)
assert integrate(exp(-x)/x**2, x, meijerg=True).rewrite(expint).expand() \
== expint(1, x) - exp(-x)/x - I*pi
u = Symbol('u', polar=True)
assert integrate(cos(u)/u, u, meijerg=True).expand().as_independent(u)[1] \
== Ci(u)
assert integrate(cosh(u)/u, u, meijerg=True).expand().as_independent(u)[1] \
== Chi(u)
assert integrate(expint(1, x), x, meijerg=True
).rewrite(expint).expand() == x*expint(1, x) - exp(-x)
assert integrate(expint(2, x), x, meijerg=True
).rewrite(expint).expand() == \
-x**2*expint(1, x)/2 + x*exp(-x)/2 - exp(-x)/2
assert simplify(unpolarify(integrate(expint(y, x), x,
meijerg=True).rewrite(expint).expand(func=True))) == \
-expint(y + 1, x)
assert integrate(Si(x), x, meijerg=True) == x*Si(x) + cos(x)
assert integrate(Ci(u), u, meijerg=True).expand() == u*Ci(u) - sin(u)
assert integrate(Shi(x), x, meijerg=True) == x*Shi(x) - cosh(x)
assert integrate(Chi(u), u, meijerg=True).expand() == u*Chi(u) - sinh(u)
assert integrate(Si(x)*exp(-x), (x, 0, oo), meijerg=True) == pi/4
assert integrate(expint(1, x)*sin(x), (x, 0, oo), meijerg=True) == log(2)/2
def test_messy():
from sympy import (laplace_transform, Si, Ci, Shi, Chi, atan, Piecewise,
atanh, acoth, E1, besselj, acosh, asin, Ne, And, re,
fourier_transform, sqrt, Abs)
assert laplace_transform(Si(x), x, s) == ((-atan(s) + pi/2)/s, 0, True)
assert laplace_transform(Shi(x), x, s) == (acoth(s)/s, 1, True)
# where should the logs be simplified?
assert laplace_transform(Chi(x), x, s) == \
((log(s**(-2)) - log((s**2 - 1)/s**2))/(2*s), 1, True)
# TODO maybe simplify the inequalities?
assert laplace_transform(besselj(a, x), x, s)[1:] == \
(0, And(S(0) < re(a/2) + S(1)/2, S(0) < re(a/2) + 1))
# NOTE s < 0 can be done, but argument reduction is not good enough yet
assert fourier_transform(besselj(1, x)/x, x, s, noconds=False) == \
(Piecewise((0, 1 < 4*abs(pi**2*s**2)),
(2*sqrt(-4*pi**2*s**2 + 1), True)), 0 < s)
# TODO FT(besselj(0,x)) - conditions are messy (but for acceptable reasons)
# - folding could be better
assert integrate(E1(x)*besselj(0, x), (x, 0, oo), meijerg=True) == \
log(1 + sqrt(2))
assert integrate(E1(x)*besselj(1, x), (x, 0, oo), meijerg=True) == \
log(S(1)/2 + sqrt(2)/2)
assert integrate(1/x/sqrt(1 - x**2), x, meijerg=True) == \
Piecewise((-acosh(1/x), 1 < abs(x**(-2))), (I*asin(1/x), True))
def test_issue_6122():
assert integrate(exp(-I*x**2), (x, -oo, oo), meijerg=True) == \
-I*sqrt(pi)*exp(I*pi/4)
def test_issue_6252():
expr = 1/x/(a + b*x)**(S(1)/3)
anti = integrate(expr, x, meijerg=True)
assert not expr.has(hyper)
# XXX the expression is a mess, but actually upon differentiation and
# putting in numerical values seems to work...
def test_issue_6348():
assert integrate(exp(I*x)/(1 + x**2), (x, -oo, oo)).simplify().rewrite(exp) \
== pi*exp(-1)
def test_fresnel():
from sympy import fresnels, fresnelc
assert expand_func(integrate(sin(pi*x**2/2), x)) == fresnels(x)
assert expand_func(integrate(cos(pi*x**2/2), x)) == fresnelc(x)
def test_issue_6860():
assert meijerint_indefinite(x**x**x, x) is None
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# google-cloud-asset documentation build configuration file
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(".."))
__version__ = "0.1.0"
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = "4.0.1"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.intersphinx",
"sphinx.ext.coverage",
"sphinx.ext.napoleon",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
]
# autodoc/autosummary flags
autoclass_content = "both"
autodoc_default_flags = ["members"]
autosummary_generate = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# Allow markdown includes (so releases.md can include CHANGLEOG.md)
# http://www.sphinx-doc.org/en/master/markdown.html
source_parsers = {".md": "recommonmark.parser.CommonMarkParser"}
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
source_suffix = [".rst", ".md"]
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The root toctree document.
root_doc = "index"
# General information about the project.
project = u"google-cloud-asset"
copyright = u"2022, Google, LLC"
author = u"Google APIs" # TODO: autogenerate this bit
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
release = __version__
# The short X.Y version.
version = ".".join(release.split(".")[0:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"description": "Google Cloud Client Libraries for Python",
"github_user": "googleapis",
"github_repo": "google-cloud-python",
"github_banner": True,
"font_family": "'Roboto', Georgia, sans",
"head_font_family": "'Roboto', Georgia, serif",
"code_font_family": "'Roboto Mono', 'Consolas', monospace",
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = "google-cloud-asset-doc"
# -- Options for warnings ------------------------------------------------------
suppress_warnings = [
# Temporarily suppress this to avoid "more than one target found for
# cross-reference" warning, which are intractable for us to avoid while in
# a mono-repo.
# See https://github.com/sphinx-doc/sphinx/blob
# /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843
"ref.python"
]
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
# Latex figure (float) alignment
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
root_doc,
"google-cloud-asset.tex",
u"google-cloud-asset Documentation",
author,
"manual",
)
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
root_doc,
"google-cloud-asset",
u"Google Cloud Asset Documentation",
[author],
1,
)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
root_doc,
"google-cloud-asset",
u"google-cloud-asset Documentation",
author,
"google-cloud-asset",
"GAPIC library for Google Cloud Asset API",
"APIs",
)
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
"python": ("http://python.readthedocs.org/en/latest/", None),
"gax": ("https://gax-python.readthedocs.org/en/latest/", None),
"google-auth": ("https://google-auth.readthedocs.io/en/stable", None),
"google-gax": ("https://gax-python.readthedocs.io/en/latest/", None),
"google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None),
"grpc": ("https://grpc.io/grpc/python/", None),
"requests": ("http://requests.kennethreitz.org/en/stable/", None),
"proto": ("https://proto-plus-python.readthedocs.io/en/stable", None),
"protobuf": ("https://googleapis.dev/python/protobuf/latest/", None),
}
# Napoleon settings
napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
|
|
# -*- coding: utf-8 -*-
#
# Utilities for dealing with HTTP requests
#
# RateLimiter helps to only make a certain number of calls per second.
# MultiRequest wraps grequests and issues multiple requests at once with an easy to use interface.
# SSLAdapter helps force use of the highest possible version of TLS.
#
import ssl
import time
from collections import namedtuple
import grequests
from requests import Session
from requests.adapters import HTTPAdapter
from requests import ConnectionError
from threat_intel.exceptions import InvalidRequestError
from threat_intel.util.error_messages import write_error_message
from threat_intel.util.error_messages import write_exception
class SSLAdapter(HTTPAdapter):
"""Attempt to use the highest possible TLS version for HTTPS connections.
By explictly controlling which TLS version is used when connecting, avoid the client offering only SSLv2 or SSLv3.
While it may seem counter intuitive, the best version specifier to pass is `ssl.PROTOCOL_SSLv23`
This will actually choose the highest available protocol compatible with both client and server.
For details see the documentation for `ssl.wrap_socket` https://docs.python.org/2/library/ssl.html#socket-creation
To use this class, mount it to a `requests.Session` and then make HTTPS using the session object.
.. code-block:: python
# Mount an SSLAdapter in a Session
session = requests.Session()
session.mount('https://', SSLAdapter())
# Make a requests call through the session
session.get('https://api.github.com/events')
# Make a grequests call through the session
grequests.get('https://api.github.com/events', session=session)
"""
def init_poolmanager(self, connections, maxsize, block=False, **pool_kwargs):
"""Called to initialize the HTTPAdapter when no proxy is used."""
pool_kwargs['ssl_version'] = ssl.PROTOCOL_SSLv23
return super(SSLAdapter, self).init_poolmanager(connections, maxsize, block, **pool_kwargs)
def proxy_manager_for(self, proxy, **proxy_kwargs):
"""Called to initialize the HTTPAdapter when a proxy is used."""
proxy_kwargs['ssl_version'] = ssl.PROTOCOL_SSLv23
return super(SSLAdapter, self).proxy_manager_for(proxy, **proxy_kwargs)
class RateLimiter(object):
"""Limits how many calls can be made per second"""
CallRecord = namedtuple('CallRecord', ['time', 'num_calls'])
def __init__(self, calls_per_sec):
self._max_calls_per_second = calls_per_sec
self._call_times = []
self._outstanding_calls = 0
def make_calls(self, num_calls=1):
"""Adds appropriate sleep to avoid making too many calls.
Args:
num_calls: int the number of calls which will be made
"""
self._cull()
while self._outstanding_calls + num_calls > self._max_calls_per_second:
time.sleep(0) # yield
self._cull()
self._call_times.append(self.CallRecord(time=time.time(), num_calls=num_calls))
self._outstanding_calls += num_calls
def _cull(self):
"""Remove calls more than 1 second old from the queue."""
right_now = time.time()
cull_from = -1
for index in xrange(len(self._call_times)):
if right_now - self._call_times[index].time >= 1.0:
cull_from = index
self._outstanding_calls -= self._call_times[index].num_calls
else:
break
if cull_from > -1:
self._call_times = self._call_times[cull_from + 1:]
class MultiRequest(object):
"""Wraps grequests to make simultaneous HTTP requests.
Can use a RateLimiter to limit # of outstanding requests.
`multi_get` and `multi_post` try to be smart about how many requests to issue:
* One url & one param - One request will be made.
* Multiple url & one query param - Multiple requests will be made, with differing urls and the same query param.
* Multiple url & multiple query params - Multiple requests will be made, with the same url and differing query params.
"""
_VERB_GET = 'GET'
_VERB_POST = 'POST'
def __init__(self, default_headers=None, max_requests=20, rate_limit=0, req_timeout=25.0, max_retry=10):
"""Create the MultiRequest.
Args:
default_headers - A dict of headers which will be added to every request
max_requests - Maximum number of requests to issue at once
rate_limit - Maximum number of requests to issue per second
req_timeout - Maximum number of seconds to wait without reading a response byte before deciding an error has occurred
"""
self._default_headers = default_headers
self._max_requests = max_requests
self._req_timeout = req_timeout
self._max_retry = max_retry
self._rate_limiter = RateLimiter(rate_limit) if rate_limit else None
self._session = Session()
self._session.mount('https://', SSLAdapter())
def multi_get(self, urls, query_params=None, to_json=True):
"""Issue multiple GET requests.
Args:
urls - A string URL or list of string URLs
query_params - None, a dict, or a list of dicts representing the query params
to_json - A boolean, should the responses be returned as JSON blobs
Returns:
a list of dicts if to_json is set of grequest.response otherwise.
Raises:
InvalidRequestError - Can not decide how many requests to issue.
"""
return self._multi_request(MultiRequest._VERB_GET, urls, query_params, None, to_json=to_json)
def multi_post(self, urls, query_params=None, data=None, to_json=True, send_as_file=False):
"""Issue multiple POST requests.
Args:
urls - A string URL or list of string URLs
query_params - None, a dict, or a list of dicts representing the query params
data - None, a dict or string, or a list of dicts and strings representing the data body.
to_json - A boolean, should the responses be returned as JSON blobs
send_as_file - A boolean, should the data be sent as a file.
Returns:
a list of dicts if to_json is set of grequest.response otherwise.
Raises:
InvalidRequestError - Can not decide how many requests to issue.
"""
return self._multi_request(MultiRequest._VERB_POST, urls, query_params, data, to_json=to_json, send_as_file=send_as_file)
def _create_request(self, verb, url, query_params=None, data=None, send_as_file=False):
"""Helper method to create a single `grequests.post` or `grequests.get`.
Args:
verb - MultiRequest._VERB_POST or MultiRequest._VERB_GET
url - A string URL
query_params - None or a dict
data - None or a string or a dict
send_as_file - A boolean, should the data be sent as a file.
Returns:
requests.PreparedRequest
Raises:
InvalidRequestError - if an invalid verb is passed in.
"""
# Prepare a set of kwargs to make it easier to avoid missing default params.
kwargs = {
'headers': self._default_headers,
'params': query_params,
'timeout': self._req_timeout,
'session': self._session
}
if MultiRequest._VERB_POST == verb:
if not send_as_file:
return grequests.post(url, data=data, **kwargs)
else:
return grequests.post(url, files={'file': data}, **kwargs)
elif MultiRequest._VERB_GET == verb:
return grequests.get(url, data=data, **kwargs)
else:
raise InvalidRequestError('Invalid verb {0}'.format(verb))
def _zip_request_params(self, urls, query_params, data):
"""Massages inputs and returns a list of 3-tuples zipping them up.
This is all the smarts behind deciding how many requests to issue.
It's fine for an input to have 0, 1, or a list of values.
If there are two inputs each with a list of values, the cardinality of those lists much match.
Args:
urls - 1 string URL or a list of URLs
query_params - None, 1 dict, or a list of dicts
data - None, 1 dict or string, or a list of dicts or strings
Returns:
A list of 3-tuples (url, query_param, data)
Raises:
InvalidRequestError - if cardinality of lists does not match
"""
# Everybody gets to be a list
if not isinstance(urls, list):
urls = [urls]
if not isinstance(query_params, list):
query_params = [query_params]
if not isinstance(data, list):
data = [data]
# Counts must not mismatch
url_count = len(urls)
query_param_count = len(query_params)
data_count = len(data)
max_count = max(url_count, query_param_count, data_count)
if ((url_count < max_count and url_count > 1) or
(query_param_count < max_count and query_param_count > 1) or
(data_count < max_count and data_count > 1)):
raise InvalidRequestError('Mismatched parameter count url_count:{0} query_param_count:{1} data_count:{2} max_count:{3}',
url_count, query_param_count, data_count, max_count)
# Pad out lists
if url_count < max_count:
urls = urls * max_count
if query_param_count < max_count:
query_params = query_params * max_count
if data_count < max_count:
data = data * max_count
return zip(urls, query_params, data)
class _FakeResponse(object):
"""_FakeResponse looks enough like a response from grequests to handle when grequests has no response.
Attributes:
request - The request object
status_code - The HTTP response status code
"""
def __init__(self, request, status_code):
self._request = request
self._status_code = status_code
@property
def request(self):
return self._request
@property
def status_code(self):
return self._status_code
def json(self):
"""Convert the response body to a dict."""
return {}
def _wait_for_response(self, requests, to_json):
"""Issue a batch of requests and wait for the responses.
Args:
requests - A list of requests
to_json - A boolean, should the responses be returned as JSON blobs
Returns:
A list of dicts if to_json, a list of grequest.response otherwise
"""
all_responses = []
for retry in range(self._max_retry):
try:
responses = grequests.map(requests)
valid_responses = [response for response in responses if response]
failed_auth_responses = [response for response in responses if response.status_code == 403]
if failed_auth_responses:
raise ConnectionError('Credentials not authorized to access URL')
if len(valid_responses) != len(requests):
continue
else:
break
except ConnectionError:
raise
except:
pass
if retry == self._max_retry:
raise ConnectionError('Unable to complete batch of requests within max_retry retries')
for request, response in zip(requests, responses):
if not response:
# should have caught this earlier, but if not ...
raise ConnectionError('Request to {0} had an empty response'.format(request.url))
if 200 != response.status_code:
write_error_message('url[{0}] status_code[{1}]'.format(response.request.url, response.status_code))
if to_json:
all_responses.append(response.json())
else:
all_responses.append(response)
return all_responses
def _multi_request(self, verb, urls, query_params, data, to_json=True, send_as_file=False):
"""Issues multiple batches of simultaneous HTTP requests and waits for responses.
Args:
verb - MultiRequest._VERB_POST or MultiRequest._VERB_GET
urls - A string URL or list of string URLs
query_params - None, a dict, or a list of dicts representing the query params
data - None, a dict or string, or a list of dicts and strings representing the data body.
to_json - A boolean, should the responses be returned as JSON blobs
Returns:
If multiple requests are made - a list of dicts if to_json, a list of grequest.response otherwise
If a single request is made, the return is not a list
Raises:
InvalidRequestError - if no URL is supplied
"""
if not urls:
raise InvalidRequestError('No URL supplied')
# Break the params into batches of request_params
request_params = self._zip_request_params(urls, query_params, data)
batch_of_params = [request_params[pos:pos + self._max_requests] for pos in xrange(0, len(request_params), self._max_requests)]
# Iteratively issue each batch, applying the rate limiter if necessary
all_responses = []
for param_batch in batch_of_params:
if self._rate_limiter:
self._rate_limiter.make_calls(num_calls=len(param_batch))
requests = []
for url, query_param, datum in param_batch:
requests.append(self._create_request(verb, url, query_params=query_param, data=datum, send_as_file=send_as_file))
all_responses.extend(self._wait_for_response(requests, to_json))
return all_responses
def post_file(self, url, file, to_json=True):
request = self._create_request(MultiRequest._VERB_POST, url)
return request
@classmethod
def error_handling(cls, fn):
"""Decorator to handle errors while calling out to grequests."""
def wrapper(*args, **kwargs):
try:
result = fn(*args, **kwargs)
return result
except InvalidRequestError as e:
write_exception(e)
if hasattr(e, 'request'):
write_error_message('request {0}'.format(repr(e.request)))
if hasattr(e, 'response'):
write_error_message('response {0}'.format(repr(e.response)))
raise e
return wrapper
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.