gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Hook for Mongo DB"""
from ssl import CERT_NONE
from types import TracebackType
from typing import List, Optional, Type, Union
import pymongo
from pymongo import MongoClient, ReplaceOne
from airflow.hooks.base import BaseHook
class MongoHook(BaseHook):
"""
Interact with Mongo. This hook uses the Mongo conn_id.
PyMongo Wrapper to Interact With Mongo Database
Mongo Connection Documentation
https://docs.mongodb.com/manual/reference/connection-string/index.html
You can specify connection string options in extra field of your connection
https://docs.mongodb.com/manual/reference/connection-string/index.html#connection-string-options
If you want use DNS seedlist, set `srv` to True.
ex.
{"srv": true, "replicaSet": "test", "ssl": true, "connectTimeoutMS": 30000}
:param mongo_conn_id: The :ref:`Mongo connection id <howto/connection:mongo>` to use
when connecting to MongoDB.
"""
conn_name_attr = 'conn_id'
default_conn_name = 'mongo_default'
conn_type = 'mongo'
hook_name = 'MongoDB'
def __init__(self, conn_id: str = default_conn_name, *args, **kwargs) -> None:
super().__init__()
self.mongo_conn_id = conn_id
self.connection = self.get_connection(conn_id)
self.extras = self.connection.extra_dejson.copy()
self.client = None
srv = self.extras.pop('srv', False)
scheme = 'mongodb+srv' if srv else 'mongodb'
self.uri = '{scheme}://{creds}{host}{port}/{database}'.format(
scheme=scheme,
creds=f'{self.connection.login}:{self.connection.password}@' if self.connection.login else '',
host=self.connection.host,
port='' if self.connection.port is None else f':{self.connection.port}',
database=self.connection.schema,
)
def __enter__(self):
return self
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
) -> None:
if self.client is not None:
self.close_conn()
def get_conn(self) -> MongoClient:
"""Fetches PyMongo Client"""
if self.client is not None:
return self.client
# Mongo Connection Options dict that is unpacked when passed to MongoClient
options = self.extras
# If we are using SSL disable requiring certs from specific hostname
if options.get('ssl', False):
options.update({'ssl_cert_reqs': CERT_NONE})
self.client = MongoClient(self.uri, **options)
return self.client
def close_conn(self) -> None:
"""Closes connection"""
client = self.client
if client is not None:
client.close()
self.client = None
def get_collection(
self, mongo_collection: str, mongo_db: Optional[str] = None
) -> pymongo.collection.Collection:
"""
Fetches a mongo collection object for querying.
Uses connection schema as DB unless specified.
"""
mongo_db = mongo_db if mongo_db is not None else self.connection.schema
mongo_conn: MongoClient = self.get_conn()
return mongo_conn.get_database(mongo_db).get_collection(mongo_collection)
def aggregate(
self, mongo_collection: str, aggregate_query: list, mongo_db: Optional[str] = None, **kwargs
) -> pymongo.command_cursor.CommandCursor:
"""
Runs an aggregation pipeline and returns the results
https://pymongo.readthedocs.io/en/stable/api/pymongo/collection.html#pymongo.collection.Collection.aggregate
https://pymongo.readthedocs.io/en/stable/examples/aggregation.html
"""
collection = self.get_collection(mongo_collection, mongo_db=mongo_db)
return collection.aggregate(aggregate_query, **kwargs)
def find(
self,
mongo_collection: str,
query: dict,
find_one: bool = False,
mongo_db: Optional[str] = None,
projection: Optional[Union[list, dict]] = None,
**kwargs,
) -> pymongo.cursor.Cursor:
"""
Runs a mongo find query and returns the results
https://pymongo.readthedocs.io/en/stable/api/pymongo/collection.html#pymongo.collection.Collection.find
"""
collection = self.get_collection(mongo_collection, mongo_db=mongo_db)
if find_one:
return collection.find_one(query, projection, **kwargs)
else:
return collection.find(query, projection, **kwargs)
def insert_one(
self, mongo_collection: str, doc: dict, mongo_db: Optional[str] = None, **kwargs
) -> pymongo.results.InsertOneResult:
"""
Inserts a single document into a mongo collection
https://pymongo.readthedocs.io/en/stable/api/pymongo/collection.html#pymongo.collection.Collection.insert_one
"""
collection = self.get_collection(mongo_collection, mongo_db=mongo_db)
return collection.insert_one(doc, **kwargs)
def insert_many(
self, mongo_collection: str, docs: dict, mongo_db: Optional[str] = None, **kwargs
) -> pymongo.results.InsertManyResult:
"""
Inserts many docs into a mongo collection.
https://pymongo.readthedocs.io/en/stable/api/pymongo/collection.html#pymongo.collection.Collection.insert_many
"""
collection = self.get_collection(mongo_collection, mongo_db=mongo_db)
return collection.insert_many(docs, **kwargs)
def update_one(
self,
mongo_collection: str,
filter_doc: dict,
update_doc: dict,
mongo_db: Optional[str] = None,
**kwargs,
) -> pymongo.results.UpdateResult:
"""
Updates a single document in a mongo collection.
https://pymongo.readthedocs.io/en/stable/api/pymongo/collection.html#pymongo.collection.Collection.update_one
:param mongo_collection: The name of the collection to update.
:param filter_doc: A query that matches the documents to update.
:param update_doc: The modifications to apply.
:param mongo_db: The name of the database to use.
Can be omitted; then the database from the connection string is used.
"""
collection = self.get_collection(mongo_collection, mongo_db=mongo_db)
return collection.update_one(filter_doc, update_doc, **kwargs)
def update_many(
self,
mongo_collection: str,
filter_doc: dict,
update_doc: dict,
mongo_db: Optional[str] = None,
**kwargs,
) -> pymongo.results.UpdateResult:
"""
Updates one or more documents in a mongo collection.
https://pymongo.readthedocs.io/en/stable/api/pymongo/collection.html#pymongo.collection.Collection.update_many
:param mongo_collection: The name of the collection to update.
:param filter_doc: A query that matches the documents to update.
:param update_doc: The modifications to apply.
:param mongo_db: The name of the database to use.
Can be omitted; then the database from the connection string is used.
"""
collection = self.get_collection(mongo_collection, mongo_db=mongo_db)
return collection.update_many(filter_doc, update_doc, **kwargs)
def replace_one(
self,
mongo_collection: str,
doc: dict,
filter_doc: Optional[dict] = None,
mongo_db: Optional[str] = None,
**kwargs,
) -> pymongo.results.UpdateResult:
"""
Replaces a single document in a mongo collection.
https://pymongo.readthedocs.io/en/stable/api/pymongo/collection.html#pymongo.collection.Collection.replace_one
.. note::
If no ``filter_doc`` is given, it is assumed that the replacement
document contain the ``_id`` field which is then used as filters.
:param mongo_collection: The name of the collection to update.
:param doc: The new document.
:param filter_doc: A query that matches the documents to replace.
Can be omitted; then the _id field from doc will be used.
:param mongo_db: The name of the database to use.
Can be omitted; then the database from the connection string is used.
"""
collection = self.get_collection(mongo_collection, mongo_db=mongo_db)
if not filter_doc:
filter_doc = {'_id': doc['_id']}
return collection.replace_one(filter_doc, doc, **kwargs)
def replace_many(
self,
mongo_collection: str,
docs: List[dict],
filter_docs: Optional[List[dict]] = None,
mongo_db: Optional[str] = None,
upsert: bool = False,
collation: Optional[pymongo.collation.Collation] = None,
**kwargs,
) -> pymongo.results.BulkWriteResult:
"""
Replaces many documents in a mongo collection.
Uses bulk_write with multiple ReplaceOne operations
https://pymongo.readthedocs.io/en/stable/api/pymongo/collection.html#pymongo.collection.Collection.bulk_write
.. note::
If no ``filter_docs``are given, it is assumed that all
replacement documents contain the ``_id`` field which are then
used as filters.
:param mongo_collection: The name of the collection to update.
:param docs: The new documents.
:param filter_docs: A list of queries that match the documents to replace.
Can be omitted; then the _id fields from docs will be used.
:param mongo_db: The name of the database to use.
Can be omitted; then the database from the connection string is used.
:param upsert: If ``True``, perform an insert if no documents
match the filters for the replace operation.
:param collation: An instance of
:class:`~pymongo.collation.Collation`. This option is only
supported on MongoDB 3.4 and above.
"""
collection = self.get_collection(mongo_collection, mongo_db=mongo_db)
if not filter_docs:
filter_docs = [{'_id': doc['_id']} for doc in docs]
requests = [
ReplaceOne(filter_docs[i], docs[i], upsert=upsert, collation=collation) for i in range(len(docs))
]
return collection.bulk_write(requests, **kwargs)
def delete_one(
self, mongo_collection: str, filter_doc: dict, mongo_db: Optional[str] = None, **kwargs
) -> pymongo.results.DeleteResult:
"""
Deletes a single document in a mongo collection.
https://pymongo.readthedocs.io/en/stable/api/pymongo/collection.html#pymongo.collection.Collection.delete_one
:param mongo_collection: The name of the collection to delete from.
:param filter_doc: A query that matches the document to delete.
:param mongo_db: The name of the database to use.
Can be omitted; then the database from the connection string is used.
"""
collection = self.get_collection(mongo_collection, mongo_db=mongo_db)
return collection.delete_one(filter_doc, **kwargs)
def delete_many(
self, mongo_collection: str, filter_doc: dict, mongo_db: Optional[str] = None, **kwargs
) -> pymongo.results.DeleteResult:
"""
Deletes one or more documents in a mongo collection.
https://pymongo.readthedocs.io/en/stable/api/pymongo/collection.html#pymongo.collection.Collection.delete_many
:param mongo_collection: The name of the collection to delete from.
:param filter_doc: A query that matches the documents to delete.
:param mongo_db: The name of the database to use.
Can be omitted; then the database from the connection string is used.
"""
collection = self.get_collection(mongo_collection, mongo_db=mongo_db)
return collection.delete_many(filter_doc, **kwargs)
| |
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
from future.utils import native
import datetime
import logging
import re
import time
from guessit.rules import rebulk_builder
from guessit.api import GuessItApi, GuessitException
from rebulk import Rebulk
from rebulk.pattern import RePattern
from flexget import plugin
from flexget.event import event
from flexget.utils import qualities
from .parser_common import old_assume_quality
from .parser_common import ParsedEntry, ParsedVideoQuality, ParsedVideo, ParsedSerie, ParsedMovie
log = logging.getLogger('parser_guessit')
logging.getLogger('rebulk').setLevel(logging.WARNING)
logging.getLogger('guessit').setLevel(logging.WARNING)
class GuessitParsedEntry(ParsedEntry):
def __init__(self, data, name, guess_result, **kwargs):
ParsedEntry.__init__(self, data, name, **kwargs)
self._guess_result = guess_result
@property
def parsed_group(self):
return self._guess_result.get('release_group')
@property
def parsed_type(self):
parsed_type = self._guess_result.get('type', self.type)
if parsed_type == 'episode':
return 'series'
return parsed_type
@property
def proper_count(self):
# todo: deprecated. We should remove this field from the rest of code.
version = self._guess_result.get('version')
if version is None:
version = 0
elif version <= 0:
version = -1
else:
version = version - 1
proper_count = self._guess_result.get('proper_count', 0)
fastsub = 'Fastsub' in self._guess_result.get('other', [])
return version + proper_count - (5 if fastsub else 0)
@property
def properties(self):
return self._guess_result
class GuessitParsedVideoQuality(ParsedVideoQuality):
def __init__(self, guess_result):
self._guess_result = guess_result
@property
def video_codec(self):
return self._guess_result.get('video_codec')
@property
def source(self):
return self._guess_result.get('source')
@property
def format(self):
return self._guess_result.get('format')
@property
def audio_codec(self):
return self._guess_result.get('audio_codec')
@property
def video_profile(self):
return self._guess_result.get('video_profile')
@property
def screen_size(self):
return self._guess_result.get('screen_size')
@property
def audio_channels(self):
return self._guess_result.get('audio_channels')
@property
def audio_profile(self):
return self._guess_result.get('audio_profile')
@property
def old_resolution(self):
return self.screen_size if self.screen_size else 'HR' if 'HR' in self._guess_result.get('other', []) else None
@property
def old_source(self):
"""
Those properties should really be extracted to another category of quality ...
"""
if 'Screener' in self._guess_result.get('other', {}):
if self.format == 'BluRay':
return 'bdscr'
return 'dvdscr'
if 'Preair' in self._guess_result.get('other', {}):
return 'preair'
if 'R5' in self._guess_result.get('other', {}):
return 'r5'
return self.format.replace('-', '') if self.format else None
@property
def old_codec(self):
if self.video_profile == '10bit':
return '10bit'
return self.video_codec
@property
def old_audio(self):
if self.audio_codec == 'DTS' and (self.audio_profile in ['HD', 'HDMA']):
return 'dtshd'
elif self.audio_channels == '5.1' and self.audio_codec is None or self.audio_codec == 'DolbyDigital':
return 'dd5.1'
return self.audio_codec
def to_old_quality(self, assumed_quality=None):
resolution = self.old_resolution
source = self.old_source
codec = self.old_codec
audio = self.old_audio
old_quality = qualities.Quality(' '.join([_f for _f in [resolution, source, codec, audio] if _f]))
old_quality = old_assume_quality(old_quality, assumed_quality)
return old_quality
class GuessitParsedVideo(GuessitParsedEntry, ParsedVideo):
def __init__(self, data, name, guess_result, **kwargs):
GuessitParsedEntry.__init__(self, data, name, guess_result, **kwargs)
self._quality = None
@property
def is_3d(self):
return '3D' in self._guess_result.get('other', {})
@property
def quality2(self):
if self._quality is None:
self._quality = GuessitParsedVideoQuality(self._guess_result)
return self._quality
@property
def subtitle_languages(self):
return self._guess_result.get('subtitle_language')
@property
def languages(self):
return self._guess_result.get('language')
@property
def year(self):
return self._guess_result.get('year')
class GuessitParsedMovie(GuessitParsedVideo, ParsedMovie):
def __init__(self, data, name, guess_result, **kwargs):
GuessitParsedVideo.__init__(self, data, name, guess_result, **kwargs)
@property
def title(self):
return self._guess_result.get('title')
@property
def fields(self):
"""
Return a dict of all parser fields
"""
return {
'movie_parser': self,
'movie_name': self.name,
'movie_year': self.year,
'proper': self.proper,
'proper_count': self.proper_count,
'release_group': self.parsed_group,
'is_3d': self.is_3d,
'subtitle_languages': self.subtitle_languages,
'languages': self.languages,
'video_codec': self.quality2.video_codec,
'format': self.quality2.format,
'audio_codec': self.quality2.audio_codec,
'video_profile': self.quality2.video_profile,
'screen_size': self.quality2.screen_size,
'audio_channels': self.quality2.audio_channels,
'audio_profile': self.quality2.audio_profile
}
class GuessitParsedSerie(GuessitParsedVideo, ParsedSerie):
part_re = re.compile('part\\s?(\\d+)', re.IGNORECASE)
def __init__(self, data, name, guess_result, **kwargs):
GuessitParsedVideo.__init__(self, data, name, guess_result, **kwargs)
@property
def series(self):
if self._guess_result.get('country') and hasattr(self._guess_result.get('country'), 'alpha2'):
return "%s (%s)" % (self._guess_result.get('title'), self._guess_result.get('country').alpha2)
return self._guess_result.get('title')
@property
def country(self):
return str(self._guess_result.get('country')) if 'country' in self._guess_result else None
@property
def complete(self):
return 'Complete' in self._guess_result.get('other', [])
@property
def regexp_id(self):
regexp_id = [match.value for match in self._guess_result.matches['regexpId']]
if isinstance(regexp_id, list):
return '-'.join(regexp_id)
else:
return regexp_id
@property
def title(self):
return self._guess_result.get('episode_title')
@property
def special(self):
return (self.episode_details and len(self.episode_details) > 0 or
(self.title and self.title.lower().strip() == 'special'))
@property
def episode_details(self):
return self._guess_result.get('episode_details')
@property
def episode(self):
episode = self._guess_result.get('episode')
if episode is None and 'part' in self._guess_result and not self.date:
return self._guess_result.get('part')
if episode is None and self.title:
matched = self.part_re.search(self.title)
if matched:
return int(matched.group(1))
return episode
@property
def episodes(self):
if 'episode' not in self._guess_result.values_list:
return len(self._guess_result.values_list.get('part', []))
return len(self._guess_result.values_list['episode'])
@property
def date(self):
d = self._guess_result.get('date')
if d:
if d > datetime.date.today() + datetime.timedelta(days=1):
return None
# Don't accept dates that are too old
if d < datetime.date(1970, 1, 1):
return None
return d
@property
def parsed_season(self):
season = self._guess_result.get('season')
if season is None and self.episode and not self.allow_seasonless:
if 'part' in self._guess_result:
return 1
episode_raw = self._guess_result.matches['episode'][0].initiator.raw
if episode_raw and any(c.isalpha() and c.lower() != 'v' for c in episode_raw):
return 1
return season
@property
def valid_strict(self):
return True
def _id_regexps_function(input_string, context):
ret = []
for regexp in context.get('id_regexps'):
for match in RePattern(regexp, children=True).matches(input_string, context):
ret.append(match.span)
return ret
_id_regexps = Rebulk().functional(_id_regexps_function, name='regexpId',
disabled=lambda context: not context.get('id_regexps'))
guessit_api = GuessItApi(rebulk_builder().rebulk(_id_regexps))
class ParserGuessit(object):
def _guessit_options(self, options):
settings = {'name_only': True, 'allowed_languages': ['en', 'fr'], 'allowed_countries': ['us', 'uk', 'gb']}
# 'clean_function': clean_value
options['episode_prefer_number'] = not options.get('identified_by') == 'ep'
if options.get('allow_groups'):
options['expected_group'] = options['allow_groups']
if 'date_yearfirst' in options:
options['date_year_first'] = options['date_yearfirst']
if 'date_dayfirst' in options:
options['date_day_first'] = options['date_dayfirst']
settings.update(options)
return settings
# movie_parser API
def parse_movie(self, data, **kwargs):
log.debug('Parsing movie: `%s` [options: %s]', data, kwargs)
start = time.clock()
guessit_options = self._guessit_options(kwargs)
guessit_options['type'] = 'movie'
guess_result = guessit_api.guessit(data, options=guessit_options)
# NOTE: Guessit expects str on PY3 and unicode on PY2 hence the use of future.utils.native
parsed = GuessitParsedMovie(native(data), kwargs.pop('name', None), guess_result, **kwargs)
end = time.clock()
log.debug('Parsing result: %s (in %s ms)', parsed, (end - start) * 1000)
return parsed
# series_parser API
def parse_series(self, data, **kwargs):
log.debug('Parsing series: `%s` [options: %s]', data, kwargs)
guessit_options = self._guessit_options(kwargs)
if kwargs.get('name') and not guessit_options.get('strict_name'):
expected_title = kwargs['name']
expected_title = expected_title.replace('\'', '(?:\'|\\\'|\\\\\'|-|)?') # apostrophe support
guessit_options['expected_title'] = ['re:' + expected_title]
if kwargs.get('id_regexps'):
guessit_options['id_regexps'] = kwargs.get('id_regexps')
start = time.clock()
# If no series name is provided, we don't tell guessit what kind of match we are looking for
# This prevents guessit from determining that too general of matches are series
parse_type = 'episode' if kwargs.get('name') else None
if parse_type:
guessit_options['type'] = parse_type
# NOTE: Guessit expects str on PY3 and unicode on PY2 hence the use of future.utils.native
try:
guess_result = guessit_api.guessit(native(data), options=guessit_options)
except GuessitException:
log.warning('Parsing %s with guessit failed. Most likely a unicode error.', data)
guess_result = {}
parsed = GuessitParsedSerie(data, kwargs.pop('name', None), guess_result, **kwargs)
end = time.clock()
log.debug('Parsing result: %s (in %s ms)', parsed, (end - start) * 1000)
return parsed
@event('plugin.register')
def register_plugin():
plugin.register(ParserGuessit, 'parser_guessit', groups=['movie_parser', 'series_parser'], api_ver=2)
| |
"""
PostgreSQL database backend for Django.
Requires psycopg 2: http://initd.org/projects/psycopg2
"""
import sys
from django.db import utils
from django.db.backends import *
from django.db.backends.signals import connection_created
from django.db.backends.postgresql.operations import DatabaseOperations as PostgresqlDatabaseOperations
from django.db.backends.postgresql.client import DatabaseClient
from django.db.backends.postgresql.creation import DatabaseCreation
from django.db.backends.postgresql.version import get_version
from django.db.backends.postgresql_psycopg2.introspection import DatabaseIntrospection
from django.utils.safestring import SafeUnicode, SafeString
try:
import psycopg2 as Database
import psycopg2.extensions
except ImportError, e:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading psycopg2 module: %s" % e)
DatabaseError = Database.DatabaseError
IntegrityError = Database.IntegrityError
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
psycopg2.extensions.register_adapter(SafeString, psycopg2.extensions.QuotedString)
psycopg2.extensions.register_adapter(SafeUnicode, psycopg2.extensions.QuotedString)
class CursorWrapper(object):
"""
A thin wrapper around psycopg2's normal cursor class so that we can catch
particular exception instances and reraise them with the right types.
"""
def __init__(self, cursor):
self.cursor = cursor
def execute(self, query, args=None):
try:
return self.cursor.execute(query, args)
except Database.IntegrityError, e:
raise utils.IntegrityError, utils.IntegrityError(*tuple(e)), sys.exc_info()[2]
except Database.DatabaseError, e:
raise utils.DatabaseError, utils.DatabaseError(*tuple(e)), sys.exc_info()[2]
def executemany(self, query, args):
try:
return self.cursor.executemany(query, args)
except Database.IntegrityError, e:
raise utils.IntegrityError, utils.IntegrityError(*tuple(e)), sys.exc_info()[2]
except Database.DatabaseError, e:
raise utils.DatabaseError, utils.DatabaseError(*tuple(e)), sys.exc_info()[2]
def __getattr__(self, attr):
if attr in self.__dict__:
return self.__dict__[attr]
else:
return getattr(self.cursor, attr)
def __iter__(self):
return iter(self.cursor)
class DatabaseFeatures(BaseDatabaseFeatures):
needs_datetime_string_cast = False
can_return_id_from_insert = False
requires_rollback_on_dirty_transaction = True
has_real_datatype = True
class DatabaseOperations(PostgresqlDatabaseOperations):
def last_executed_query(self, cursor, sql, params):
# With psycopg2, cursor objects have a "query" attribute that is the
# exact query sent to the database. See docs here:
# http://www.initd.org/tracker/psycopg/wiki/psycopg2_documentation#postgresql-status-message-and-executed-query
return cursor.query
def return_insert_id(self):
return "RETURNING %s", ()
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'postgresql'
operators = {
'exact': '= %s',
'iexact': '= UPPER(%s)',
'contains': 'LIKE %s',
'icontains': 'LIKE UPPER(%s)',
'regex': '~ %s',
'iregex': '~* %s',
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': 'LIKE %s',
'endswith': 'LIKE %s',
'istartswith': 'LIKE UPPER(%s)',
'iendswith': 'LIKE UPPER(%s)',
}
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.features = DatabaseFeatures(self)
autocommit = self.settings_dict["OPTIONS"].get('autocommit', False)
self.features.uses_autocommit = autocommit
self._set_isolation_level(int(not autocommit))
self.ops = DatabaseOperations(self)
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
self.validation = BaseDatabaseValidation(self)
def _cursor(self):
new_connection = False
set_tz = False
settings_dict = self.settings_dict
if self.connection is None:
new_connection = True
set_tz = settings_dict.get('TIME_ZONE')
if settings_dict['NAME'] == '':
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("You need to specify NAME in your Django settings file.")
conn_params = {
'database': settings_dict['NAME'],
}
conn_params.update(settings_dict['OPTIONS'])
if 'autocommit' in conn_params:
del conn_params['autocommit']
if settings_dict['USER']:
conn_params['user'] = settings_dict['USER']
if settings_dict['PASSWORD']:
conn_params['password'] = settings_dict['PASSWORD']
if settings_dict['HOST']:
conn_params['host'] = settings_dict['HOST']
if settings_dict['PORT']:
conn_params['port'] = settings_dict['PORT']
self.connection = Database.connect(**conn_params)
self.connection.set_client_encoding('UTF8')
self.connection.set_isolation_level(self.isolation_level)
connection_created.send(sender=self.__class__, connection=self)
cursor = self.connection.cursor()
cursor.tzinfo_factory = None
if new_connection:
if set_tz:
cursor.execute("SET TIME ZONE %s", [settings_dict['TIME_ZONE']])
if not hasattr(self, '_version'):
self.__class__._version = get_version(cursor)
if self._version[0:2] < (8, 0):
# No savepoint support for earlier version of PostgreSQL.
self.features.uses_savepoints = False
if self.features.uses_autocommit:
if self._version[0:2] < (8, 2):
# FIXME: Needs extra code to do reliable model insert
# handling, so we forbid it for now.
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("You cannot use autocommit=True with PostgreSQL prior to 8.2 at the moment.")
else:
# FIXME: Eventually we're enable this by default for
# versions that support it, but, right now, that's hard to
# do without breaking other things (#10509).
self.features.can_return_id_from_insert = True
return CursorWrapper(cursor)
def _enter_transaction_management(self, managed):
"""
Switch the isolation level when needing transaction support, so that
the same transaction is visible across all the queries.
"""
if self.features.uses_autocommit and managed and not self.isolation_level:
self._set_isolation_level(1)
def _leave_transaction_management(self, managed):
"""
If the normal operating mode is "autocommit", switch back to that when
leaving transaction management.
"""
if self.features.uses_autocommit and not managed and self.isolation_level:
self._set_isolation_level(0)
def _set_isolation_level(self, level):
"""
Do all the related feature configurations for changing isolation
levels. This doesn't touch the uses_autocommit feature, since that
controls the movement *between* isolation levels.
"""
assert level in (0, 1)
try:
if self.connection is not None:
self.connection.set_isolation_level(level)
finally:
self.isolation_level = level
self.features.uses_savepoints = bool(level)
def _commit(self):
if self.connection is not None:
try:
return self.connection.commit()
except Database.IntegrityError, e:
raise utils.IntegrityError, utils.IntegrityError(*tuple(e)), sys.exc_info()[2]
| |
"""
This contains classes used for analyzing the sentiments of input texts
"""
import re
import pprint
import shelve
# import IOMDataService as DS
# from TextFiltration import Sentences, Words, Lemmatized, Bigrams, Trigrams
import numpy as np
from senti_classifier import senti_classifier
import nltk
from nltk.corpus import sentiwordnet as swn
from nltk.corpus import wordnet as wn
from nltk.corpus import stopwords
from nltk.tokenize import wordpunct_tokenize
class SentiSynsetTools(object):
"""
Tools for loading and working with SentiWordNet stuff
"""
def load_senti_synsets_for_word(self, word):
"""
Get a list of senti_synsets for the word
Args:
word: String to lookup
Returns:
List of senti_synsets
Example:
input: slow
result:
SentiSynset('decelerate.v.01'),
SentiSynset('slow.v.02'),
SentiSynset('slow.v.03'),
SentiSynset('slow.a.01'),
SentiSynset('slow.a.02'),
SentiSynset('slow.a.04'),
SentiSynset('slowly.r.01'),
SentiSynset('behind.r.03')]
"""
return list(swn.senti_synsets('slow'))
def get_scores_from_senti_synset(self, string_name_of_synset, return_format=tuple):
"""
Args:
string_name_of_synset: The string name of the synset that want scores for
return_format: What kind of object to return. Allowed values are tuple, dict
Returns:
On default of tuple returns (positiveScore, negativeScore, objScore)
"""
breakdown = swn.senti_synset(string_name_of_synset)
if return_format is tuple:
return (breakdown.pos_score(), breakdown.neg_score(), breakdown.obj_score())
elif return_format is dict:
return {
'posScore': breakdown.pos_score(),
'negScore': breakdown.neg_score(),
'objScore': breakdown.obj_score()
}
class DisambiguationTools(object):
"""
"""
def disambiguate_word_senses(self, sentence, word):
"""
Attempts to determine the proper sense of the target
word from the sentence in which it appears.
Args:
sentence: String representation of the sentence
word: String represtnation of word
Returns:
Returns a synset which is the best guess.
Example:
disambiguateWordSenses('A cat is a good pet', 'cat')
OUT: Synset('cat.v.01')
"""
wordsynsets = wn.synsets(word)
bestScore = 0.0
result = None
for synset in wordsynsets:
for w in nltk.word_tokenize(sentence):
score = 0.0
for wsynset in wn.synsets(w):
sim = wn.path_similarity(wsynset, synset)
if(sim == None):
continue
else:
score += sim
if (score > bestScore):
bestScore = score
result = synset
return result
class TextPrepare(object):
"""
All tools for preparing text for processing
"""
def __init__(self):
self.stop_words = set(stopwords.words('english'))
self.stop_words.update(['.', ',', '"', "'", '?', '!', ':', ';', '(', ')', '[', ']', '{', '}']) # remove it if you need punctuation
def prepare_text(self, tweet_text):
"""
Returns a bag of words
Prospective
Remove emoticons
:param tweet_text:
:return: list
"""
return [i.lower() for i in wordpunct_tokenize(tweet_text) if i.lower() not in self.stop_words]
class ComputeSentiments(object):
"""
"""
def __init__(self):
self.text_preparer = TextPrepare()
self.disambiguator = DisambiguationTools()
self.sentitools = SentiSynsetTools()
def compute_sentiments(self, tweet_text):
"""
:param tweet_text:
:return:
"""
tokens = self.text_preparer.prepare_text(tweet_text)
for word in tokens:
best_synset = self.disambiguator.disambiguate_word_senses(word, tweet_text)
# Compute the scores
scores_tuple = self.sentitools.get_scores_from_senti_synset(best_synset)
class ItemSentimentAnalyzer(object):
"""
This analyzes and returns the sentiment scores for a particular item
"""
def __init__(self):
pass
# DS.IOMService.__init__(self)
def computeSentimentScores(self, record, tokenizer):
"""
record is a dict which must have record['quote_text']. It normally should have record['quote_id'] or record['vin_id']
tokenizer is a tokenizer with a tokenize method. The unit of analysis (e.g., word, ngram, sentence) is determined by the tokenizer passed in
"""
self.text = record['quote_text']
# To allow this to be used with arbitrary inputs
try:
self.quoteID = record['quote_id']
except:
try:
self.quoteID = record['vin_id']
except:
# Make random ID if none exists
self.quoteID = 'ID' + str(np.random.rand())
# Tokenize the text into the appropriate units
self.tokens = tokenizer.tokenize(self.text)
# Calc number of tokens in the record
self.numTokens = len(self.tokens)
# Calc sentiment scores
self.pos_score, self.neg_score = senti_classifier.polarity_scores(self.tokens)
# Averages are needed because otherwise the score will vary with number of sentences
# Average positive sentiment score of the record
self.avgPos = self.pos_score / self.numTokens
# Average negative sentiment of the record
self.avgNeg = (self.neg_score / self.numTokens) * -1
# Net average sentiment of the record
self.netSent = self.avgPos + self.avgNeg
# Objectivity score (from chris potts )
self.obj_score = 1.0 - self.netSent
# Put the results in a dictionary
self.scores = dict(quoteID=self.quoteID, avgPos=self.avgPos, avgNeg=self.avgNeg, netSent=self.netSent)
return self.scores
#def makeDict(self):
# """
# Makes a dictionary for the result
# Keys: quote_id, avgPos, avgNeg, netSent
# """
# self.result_dict = dict(quote_id=self.quote_id, avgPos=self.avgPos, avgNeg=self.avgNeg, netSent=self.netSent)
# return self.result_dict
def saveSentiments(self, filepath):
"""
Saves the results
Args:
filepath: the path to the shelve file where the data is / is to be stored
"""
#self.makeDict()
self.to_save = self.scores
self.save_sentiment_data_to_file(filepath)
class GroupSentiments:
"""
This is used to compute the sentiment scores for a group of items
"""
def __init__(self, data, groupname):
"""
Args:
data: a list of dictionaries that have been prepared by ItemSentiments to be saved
groupname: the name that the result will be stored with/ or the name to retrieve
"""
self.name = groupname
#self.datafile = datafile
self.quoteIDs = []
self.avgPos = []
self.avgNeg = []
self.netSent = []
for d in data:
self.quoteIDs.append(d['quote_id'])
self.avgPos.append(d['avgPos'])
self.avgNeg.append(d['avgNeg'])
self.netSent.append(d['netSent'])
self.overallpos = np.average(self.avgPos)
self.overallneg = np.average(self.avgNeg)
self.overallsent = np.average(self.netSent)
def saveSentiments(self, filepath):
"""
Saves the results
@param filepath The path to the saved data or to where it should be saved
@type string
"""
self.sentiments = dict(name=self.name, overallpos=self.overallpos, overallneg=self.overallneg,
overallsent=self.overallsent)
db = shelve.open(filepath)
db[str(self.sentiments['name'])] = self.sentiments
db.close()
print(self.sentiments)
class MultiItemSentimentAnalyzer(ItemSentimentAnalyzer):
def __init__(self, data_to_analyze, tokenizer, filepath, label):
"""
@param data_to_analyze List of dictionaries with items that itemsentimentanalzer can operate on
@type list
"""
ItemSentimentAnalyzer.__init__(self)
self.to_save = []
for record in data_to_analyze:
self.computeSentimentScores(record, tokenizer)
self.to_save.append(self.scores)
self.save_sentiment_data_to_file(filepath, label)
| |
import pytest
from moto.dynamodb2.exceptions import (
AttributeIsReservedKeyword,
ExpressionAttributeValueNotDefined,
AttributeDoesNotExist,
ExpressionAttributeNameNotDefined,
IncorrectOperandType,
InvalidUpdateExpressionInvalidDocumentPath,
)
from moto.dynamodb2.models import Item, DynamoType
from moto.dynamodb2.parsing.ast_nodes import (
NodeDepthLeftTypeFetcher,
UpdateExpressionSetAction,
DDBTypedValue,
)
from moto.dynamodb2.parsing.expressions import UpdateExpressionParser
from moto.dynamodb2.parsing.validators import UpdateExpressionValidator
def test_valid_update_expression(table):
update_expression = "set forum_desc=:Desc, forum_type=:NewType"
update_expression_values = {
":Desc": {"S": "AmazingForum"},
":NewType": {"S": "BASIC"},
}
update_expression_ast = UpdateExpressionParser.make(update_expression)
item = Item(
hash_key=DynamoType({"S": "forum_name"}),
range_key=DynamoType({"S": "forum_type"}),
attrs={"forum_name": {"S": "hello"}},
)
UpdateExpressionValidator(
update_expression_ast,
expression_attribute_names=None,
expression_attribute_values=update_expression_values,
item=item,
table=table,
).validate()
def test_validation_of_update_expression_with_keyword(table):
try:
update_expression = "SET myNum = path + :val"
update_expression_values = {":val": {"N": "3"}}
update_expression_ast = UpdateExpressionParser.make(update_expression)
item = Item(
hash_key=DynamoType({"S": "id"}),
range_key=None,
attrs={"id": {"S": "1"}, "path": {"N": "3"}},
)
UpdateExpressionValidator(
update_expression_ast,
expression_attribute_names=None,
expression_attribute_values=update_expression_values,
item=item,
table=table,
).validate()
assert False, "No exception raised"
except AttributeIsReservedKeyword as e:
assert e.keyword == "path"
@pytest.mark.parametrize(
"update_expression", ["SET a = #b + :val2", "SET a = :val2 + #b",],
)
def test_validation_of_a_set_statement_with_incorrect_passed_value(
update_expression, table
):
"""
By running permutations it shows that values are replaced prior to resolving attributes.
An error occurred (ValidationException) when calling the UpdateItem operation: Invalid UpdateExpression:
An expression attribute value used in expression is not defined; attribute value: :val2
"""
update_expression_ast = UpdateExpressionParser.make(update_expression)
item = Item(
hash_key=DynamoType({"S": "id"}),
range_key=None,
attrs={"id": {"S": "1"}, "b": {"N": "3"}},
)
try:
UpdateExpressionValidator(
update_expression_ast,
expression_attribute_names={"#b": "ok"},
expression_attribute_values={":val": {"N": "3"}},
item=item,
table=table,
).validate()
except ExpressionAttributeValueNotDefined as e:
assert e.attribute_value == ":val2"
def test_validation_of_update_expression_with_attribute_that_does_not_exist_in_item(
table,
):
"""
When an update expression tries to get an attribute that does not exist it must throw the appropriate exception.
An error occurred (ValidationException) when calling the UpdateItem operation:
The provided expression refers to an attribute that does not exist in the item
"""
try:
update_expression = "SET a = nonexistent"
update_expression_ast = UpdateExpressionParser.make(update_expression)
item = Item(
hash_key=DynamoType({"S": "id"}),
range_key=None,
attrs={"id": {"S": "1"}, "path": {"N": "3"}},
)
UpdateExpressionValidator(
update_expression_ast,
expression_attribute_names=None,
expression_attribute_values=None,
item=item,
table=table,
).validate()
assert False, "No exception raised"
except AttributeDoesNotExist:
assert True
@pytest.mark.parametrize(
"update_expression", ["SET a = #c", "SET a = #c + #d",],
)
def test_validation_of_update_expression_with_attribute_name_that_is_not_defined(
update_expression, table,
):
"""
When an update expression tries to get an attribute name that is not provided it must throw an exception.
An error occurred (ValidationException) when calling the UpdateItem operation: Invalid UpdateExpression:
An expression attribute name used in the document path is not defined; attribute name: #c
"""
try:
update_expression_ast = UpdateExpressionParser.make(update_expression)
item = Item(
hash_key=DynamoType({"S": "id"}),
range_key=None,
attrs={"id": {"S": "1"}, "path": {"N": "3"}},
)
UpdateExpressionValidator(
update_expression_ast,
expression_attribute_names={"#b": "ok"},
expression_attribute_values=None,
item=item,
table=table,
).validate()
assert False, "No exception raised"
except ExpressionAttributeNameNotDefined as e:
assert e.not_defined_attribute_name == "#c"
def test_validation_of_if_not_exists_not_existing_invalid_replace_value(table):
try:
update_expression = "SET a = if_not_exists(b, a.c)"
update_expression_ast = UpdateExpressionParser.make(update_expression)
item = Item(
hash_key=DynamoType({"S": "id"}),
range_key=None,
attrs={"id": {"S": "1"}, "a": {"S": "A"}},
)
UpdateExpressionValidator(
update_expression_ast,
expression_attribute_names=None,
expression_attribute_values=None,
item=item,
table=table,
).validate()
assert False, "No exception raised"
except AttributeDoesNotExist:
assert True
def get_first_node_of_type(ast, node_type):
return next(NodeDepthLeftTypeFetcher(node_type, ast))
def get_set_action_value(ast):
"""
Helper that takes an AST and gets the first UpdateExpressionSetAction and retrieves the value of that action.
This should only be called on validated expressions.
Args:
ast(Node):
Returns:
DynamoType: The DynamoType object representing the Dynamo value.
"""
set_action = get_first_node_of_type(ast, UpdateExpressionSetAction)
typed_value = set_action.children[1]
assert isinstance(typed_value, DDBTypedValue)
dynamo_value = typed_value.children[0]
assert isinstance(dynamo_value, DynamoType)
return dynamo_value
def test_validation_of_if_not_exists_not_existing_value(table):
update_expression = "SET a = if_not_exists(b, a)"
update_expression_ast = UpdateExpressionParser.make(update_expression)
item = Item(
hash_key=DynamoType({"S": "id"}),
range_key=None,
attrs={"id": {"S": "1"}, "a": {"S": "A"}},
)
validated_ast = UpdateExpressionValidator(
update_expression_ast,
expression_attribute_names=None,
expression_attribute_values=None,
item=item,
table=table,
).validate()
dynamo_value = get_set_action_value(validated_ast)
assert dynamo_value == DynamoType({"S": "A"})
def test_validation_of_if_not_exists_with_existing_attribute_should_return_attribute(
table,
):
update_expression = "SET a = if_not_exists(b, a)"
update_expression_ast = UpdateExpressionParser.make(update_expression)
item = Item(
hash_key=DynamoType({"S": "id"}),
range_key=None,
attrs={"id": {"S": "1"}, "a": {"S": "A"}, "b": {"S": "B"}},
)
validated_ast = UpdateExpressionValidator(
update_expression_ast,
expression_attribute_names=None,
expression_attribute_values=None,
item=item,
table=table,
).validate()
dynamo_value = get_set_action_value(validated_ast)
assert dynamo_value == DynamoType({"S": "B"})
def test_validation_of_if_not_exists_with_existing_attribute_should_return_value(table):
update_expression = "SET a = if_not_exists(b, :val)"
update_expression_values = {":val": {"N": "4"}}
update_expression_ast = UpdateExpressionParser.make(update_expression)
item = Item(
hash_key=DynamoType({"S": "id"}),
range_key=None,
attrs={"id": {"S": "1"}, "b": {"N": "3"}},
)
validated_ast = UpdateExpressionValidator(
update_expression_ast,
expression_attribute_names=None,
expression_attribute_values=update_expression_values,
item=item,
table=table,
).validate()
dynamo_value = get_set_action_value(validated_ast)
assert dynamo_value == DynamoType({"N": "3"})
def test_validation_of_if_not_exists_with_non_existing_attribute_should_return_value(
table,
):
update_expression = "SET a = if_not_exists(b, :val)"
update_expression_values = {":val": {"N": "4"}}
update_expression_ast = UpdateExpressionParser.make(update_expression)
item = Item(
hash_key=DynamoType({"S": "id"}), range_key=None, attrs={"id": {"S": "1"}},
)
validated_ast = UpdateExpressionValidator(
update_expression_ast,
expression_attribute_names=None,
expression_attribute_values=update_expression_values,
item=item,
table=table,
).validate()
dynamo_value = get_set_action_value(validated_ast)
assert dynamo_value == DynamoType({"N": "4"})
def test_validation_of_sum_operation(table):
update_expression = "SET a = a + b"
update_expression_ast = UpdateExpressionParser.make(update_expression)
item = Item(
hash_key=DynamoType({"S": "id"}),
range_key=None,
attrs={"id": {"S": "1"}, "a": {"N": "3"}, "b": {"N": "4"}},
)
validated_ast = UpdateExpressionValidator(
update_expression_ast,
expression_attribute_names=None,
expression_attribute_values=None,
item=item,
table=table,
).validate()
dynamo_value = get_set_action_value(validated_ast)
assert dynamo_value == DynamoType({"N": "7"})
def test_validation_homogeneous_list_append_function(table):
update_expression = "SET ri = list_append(ri, :vals)"
update_expression_ast = UpdateExpressionParser.make(update_expression)
item = Item(
hash_key=DynamoType({"S": "id"}),
range_key=None,
attrs={"id": {"S": "1"}, "ri": {"L": [{"S": "i1"}, {"S": "i2"}]}},
)
validated_ast = UpdateExpressionValidator(
update_expression_ast,
expression_attribute_names=None,
expression_attribute_values={":vals": {"L": [{"S": "i3"}, {"S": "i4"}]}},
item=item,
table=table,
).validate()
dynamo_value = get_set_action_value(validated_ast)
assert dynamo_value == DynamoType(
{"L": [{"S": "i1"}, {"S": "i2"}, {"S": "i3"}, {"S": "i4"}]}
)
def test_validation_hetereogenous_list_append_function(table):
update_expression = "SET ri = list_append(ri, :vals)"
update_expression_ast = UpdateExpressionParser.make(update_expression)
item = Item(
hash_key=DynamoType({"S": "id"}),
range_key=None,
attrs={"id": {"S": "1"}, "ri": {"L": [{"S": "i1"}, {"S": "i2"}]}},
)
validated_ast = UpdateExpressionValidator(
update_expression_ast,
expression_attribute_names=None,
expression_attribute_values={":vals": {"L": [{"N": "3"}]}},
item=item,
table=table,
).validate()
dynamo_value = get_set_action_value(validated_ast)
assert dynamo_value == DynamoType({"L": [{"S": "i1"}, {"S": "i2"}, {"N": "3"}]})
def test_validation_list_append_function_with_non_list_arg(table):
"""
Must error out:
Invalid UpdateExpression: Incorrect operand type for operator or function;
operator or function: list_append, operand type: S'
Returns:
"""
try:
update_expression = "SET ri = list_append(ri, :vals)"
update_expression_ast = UpdateExpressionParser.make(update_expression)
item = Item(
hash_key=DynamoType({"S": "id"}),
range_key=None,
attrs={"id": {"S": "1"}, "ri": {"L": [{"S": "i1"}, {"S": "i2"}]}},
)
UpdateExpressionValidator(
update_expression_ast,
expression_attribute_names=None,
expression_attribute_values={":vals": {"S": "N"}},
item=item,
table=table,
).validate()
except IncorrectOperandType as e:
assert e.operand_type == "S"
assert e.operator_or_function == "list_append"
def test_sum_with_incompatible_types(table):
"""
Must error out:
Invalid UpdateExpression: Incorrect operand type for operator or function; operator or function: +, operand type: S'
Returns:
"""
try:
update_expression = "SET ri = :val + :val2"
update_expression_ast = UpdateExpressionParser.make(update_expression)
item = Item(
hash_key=DynamoType({"S": "id"}),
range_key=None,
attrs={"id": {"S": "1"}, "ri": {"L": [{"S": "i1"}, {"S": "i2"}]}},
)
UpdateExpressionValidator(
update_expression_ast,
expression_attribute_names=None,
expression_attribute_values={":val": {"S": "N"}, ":val2": {"N": "3"}},
item=item,
table=table,
).validate()
except IncorrectOperandType as e:
assert e.operand_type == "S"
assert e.operator_or_function == "+"
def test_validation_of_subraction_operation(table):
update_expression = "SET ri = :val - :val2"
update_expression_ast = UpdateExpressionParser.make(update_expression)
item = Item(
hash_key=DynamoType({"S": "id"}),
range_key=None,
attrs={"id": {"S": "1"}, "a": {"N": "3"}, "b": {"N": "4"}},
)
validated_ast = UpdateExpressionValidator(
update_expression_ast,
expression_attribute_names=None,
expression_attribute_values={":val": {"N": "1"}, ":val2": {"N": "3"}},
item=item,
table=table,
).validate()
dynamo_value = get_set_action_value(validated_ast)
assert dynamo_value == DynamoType({"N": "-2"})
def test_cannot_index_into_a_string(table):
"""
Must error out:
The document path provided in the update expression is invalid for update'
"""
try:
update_expression = "set itemstr[1]=:Item"
update_expression_ast = UpdateExpressionParser.make(update_expression)
item = Item(
hash_key=DynamoType({"S": "id"}),
range_key=None,
attrs={"id": {"S": "foo2"}, "itemstr": {"S": "somestring"}},
)
UpdateExpressionValidator(
update_expression_ast,
expression_attribute_names=None,
expression_attribute_values={":Item": {"S": "string_update"}},
item=item,
table=table,
).validate()
assert False, "Must raise exception"
except InvalidUpdateExpressionInvalidDocumentPath:
assert True
def test_validation_set_path_does_not_need_to_be_resolvable_when_setting_a_new_attribute(
table,
):
"""If this step just passes we are happy enough"""
update_expression = "set d=a"
update_expression_ast = UpdateExpressionParser.make(update_expression)
item = Item(
hash_key=DynamoType({"S": "id"}),
range_key=None,
attrs={"id": {"S": "foo2"}, "a": {"N": "3"}},
)
validated_ast = UpdateExpressionValidator(
update_expression_ast,
expression_attribute_names=None,
expression_attribute_values=None,
item=item,
table=table,
).validate()
dynamo_value = get_set_action_value(validated_ast)
assert dynamo_value == DynamoType({"N": "3"})
def test_validation_set_path_does_not_need_to_be_resolvable_but_must_be_creatable_when_setting_a_new_attribute(
table,
):
try:
update_expression = "set d.e=a"
update_expression_ast = UpdateExpressionParser.make(update_expression)
item = Item(
hash_key=DynamoType({"S": "id"}),
range_key=None,
attrs={"id": {"S": "foo2"}, "a": {"N": "3"}},
)
UpdateExpressionValidator(
update_expression_ast,
expression_attribute_names=None,
expression_attribute_values=None,
item=item,
table=table,
).validate()
assert False, "Must raise exception"
except InvalidUpdateExpressionInvalidDocumentPath:
assert True
| |
'''
Logger object
=============
Differents level are available : trace, debug, info, warning, error, critical.
Examples of usage::
from kivy.logger import Logger
Logger.info('title: This is a info')
Logger.debug('title: This is a debug')
try:
raise Exception('bleh')
except Exception:
Logger.exception('Something happen!')
The message passed to the logger is splited to the first :. The left part is
used as a title, and the right part is used as a message. This way, you can
"categorize" your message easily::
Logger.info('Application: This is a test')
# will appear as
[INFO ] [Application ] This is a test
Logger configuration
--------------------
Logger can be controled in the Kivy configuration file::
[kivy]
log_level = info
log_enable = 1
log_dir = logs
log_name = kivy_%y-%m-%d_%_.txt
More information about the allowed values is described in :mod:`kivy.config`
module.
Logger history
--------------
Even if the logger is not enabled, you can still have the history of latest 100
messages::
from kivy.logger import LoggerHistory
print(LoggerHistory.history)
'''
import logging
import os
import sys
import kivy
from kivy.compat import PY2
from random import randint
from functools import partial
__all__ = ('Logger', 'LOG_LEVELS', 'COLORS', 'LoggerHistory')
Logger = None
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = list(range(8))
#These are the sequences need to get colored ouput
RESET_SEQ = "\033[0m"
COLOR_SEQ = "\033[1;%dm"
BOLD_SEQ = "\033[1m"
previous_stderr = sys.stderr
def formatter_message(message, use_color=True):
if use_color:
message = message.replace("$RESET", RESET_SEQ)
message = message.replace("$BOLD", BOLD_SEQ)
else:
message = message.replace("$RESET", "").replace("$BOLD", "")
return message
COLORS = {
'TRACE': MAGENTA,
'WARNING': YELLOW,
'INFO': GREEN,
'DEBUG': CYAN,
'CRITICAL': RED,
'ERROR': RED}
logging.TRACE = 9
LOG_LEVELS = {
'trace': logging.TRACE,
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL}
class FileHandler(logging.Handler):
history = []
filename = 'log.txt'
fd = None
def purge_logs(self, directory):
'''Purge log is called randomly, to prevent log directory to be filled
by lot and lot of log files.
You've a chance of 1 on 20 to fire a purge log.
'''
if randint(0, 20) != 0:
return
# Use config ?
maxfiles = 100
print('Purge log fired. Analysing...')
join = os.path.join
unlink = os.unlink
# search all log files
l = [join(directory, x) for x in os.listdir(directory)]
if len(l) > maxfiles:
# get creation time on every files
l = [{'fn': x, 'ctime': os.path.getctime(x)} for x in l]
# sort by date
l = sorted(l, key=lambda x: x['ctime'])
# get the oldest (keep last maxfiles)
l = l[:-maxfiles]
print('Purge %d log files' % len(l))
# now, unlink every files in the list
for filename in l:
unlink(filename['fn'])
print('Purge finished !')
def _configure(self):
from time import strftime
from kivy.config import Config
log_dir = Config.get('kivy', 'log_dir')
log_name = Config.get('kivy', 'log_name')
_dir = kivy.kivy_home_dir
if len(log_dir) and log_dir[0] == '/':
_dir = log_dir
else:
_dir = os.path.join(_dir, log_dir)
if not os.path.exists(_dir):
os.mkdir(_dir)
self.purge_logs(_dir)
pattern = log_name.replace('%_', '@@NUMBER@@')
pattern = os.path.join(_dir, strftime(pattern))
n = 0
while True:
filename = pattern.replace('@@NUMBER@@', str(n))
if not os.path.exists(filename):
break
n += 1
if n > 10000: # prevent maybe flooding ?
raise Exception('Too many logfile, remove them')
FileHandler.filename = filename
FileHandler.fd = open(filename, 'w')
Logger.info('Logger: Record log in %s' % filename)
def _write_message(self, record):
if FileHandler.fd in (None, False):
return
FileHandler.fd.write('[%-18s] ' % record.levelname)
try:
FileHandler.fd.write(record.msg)
except UnicodeEncodeError:
if PY2:
FileHandler.fd.write(record.msg.encode('utf8'))
FileHandler.fd.write('\n')
FileHandler.fd.flush()
def emit(self, message):
# during the startup, store the message in the history
if Logger.logfile_activated is None:
FileHandler.history += [message]
return
# startup done, if the logfile is not activated, avoid history.
if Logger.logfile_activated is False:
FileHandler.history = []
return
if FileHandler.fd is None:
try:
self._configure()
except Exception:
# deactivate filehandler...
FileHandler.fd = False
Logger.exception('Error while activating FileHandler logger')
return
while FileHandler.history:
_message = FileHandler.history.pop()
self._write_message(_message)
self._write_message(message)
class LoggerHistory(logging.Handler):
history = []
def emit(self, message):
LoggerHistory.history = [message] + LoggerHistory.history[:100]
class ColoredFormatter(logging.Formatter):
def __init__(self, msg, use_color=True):
logging.Formatter.__init__(self, msg)
self.use_color = use_color
def format(self, record):
# XXX Hack to not show the fucking traceback for Numeric handler
# Lot of people are complaining with that. Now we did.
if 'Unable to load registered array format handler' in record.msg:
if record.args and record.args[0] == 'numeric':
return
try:
msg = record.msg.split(':', 1)
if len(msg) == 2:
record.msg = '[%-12s]%s' % (msg[0], msg[1])
except:
pass
levelname = record.levelname
if record.levelno == logging.TRACE:
levelname = 'TRACE'
record.levelname = levelname
if self.use_color and levelname in COLORS:
levelname_color = (
COLOR_SEQ % (30 + COLORS[levelname]) + levelname + RESET_SEQ)
record.levelname = levelname_color
return logging.Formatter.format(self, record)
class ConsoleHandler(logging.StreamHandler):
def filter(self, record):
try:
msg = record.msg
k = msg.split(':', 1)
if k[0] == 'stderr' and len(k) == 2:
previous_stderr.write(k[1] + '\n')
return False
except:
pass
return True
class LogFile(object):
def __init__(self, channel, func):
self.buffer = ''
self.func = func
self.channel = channel
def write(self, s):
s = self.buffer + s
self.flush()
f = self.func
channel = self.channel
lines = s.split('\n')
for l in lines[:-1]:
f('%s: %s' % (channel, l))
self.buffer = lines[-1]
def flush(self):
return
def logger_config_update(section, key, value):
if LOG_LEVELS.get(value) is None:
raise AttributeError('Loglevel {0!r} doesn\'t exists'.format(value))
Logger.setLevel(level=LOG_LEVELS.get(value))
#: Kivy default logger instance
Logger = logging.getLogger('kivy')
Logger.logfile_activated = None
Logger.trace = partial(Logger.log, logging.TRACE)
# set the Kivy logger as the default
logging.root = Logger
# add default kivy logger
Logger.addHandler(LoggerHistory())
if 'KIVY_NO_FILELOG' not in os.environ:
Logger.addHandler(FileHandler())
# Use the custom handler instead of streaming one.
if 'KIVY_NO_CONSOLELOG' not in os.environ:
if hasattr(sys, '_kivy_logging_handler'):
Logger.addHandler(getattr(sys, '_kivy_logging_handler'))
else:
use_color = os.name != 'nt'
if os.environ.get('KIVY_BUILD') in ('android', 'ios'):
use_color = False
color_fmt = formatter_message(
'[%(levelname)-18s] %(message)s', use_color)
formatter = ColoredFormatter(color_fmt, use_color=use_color)
console = ConsoleHandler()
console.setFormatter(formatter)
Logger.addHandler(console)
# install stderr handlers
sys.stderr = LogFile('stderr', Logger.warning)
#: Kivy history handler
LoggerHistory = LoggerHistory
| |
from FrontEnd.models import *
from FrontEnd.serializers import *
from FrontEnd import utility
from django.contrib.auth.models import User
from django.shortcuts import get_object_or_404
from django.http import Http404, HttpResponse, HttpResponseRedirect
from rest_framework.authentication import SessionAuthentication, TokenAuthentication
from FrontEnd.permissions import *
from rest_framework import authentication, permissions, status, generics, mixins
from rest_framework.response import Response
from rest_framework.renderers import JSONRenderer
from rest_framework.views import APIView
from datetime import datetime, date, timedelta
from django.db.models import Count
from django.db.models import Q
from django.contrib.auth import authenticate
import json
import urllib2
from BeautifulSoup import BeautifulSoup
class DomainList(APIView):
#Give back a list of domains that user has access to
def get(self, request, format=None):
if not request.user.is_authenticated():
return HttpResponse('', status=401)
userExt = request.user.ExtUser
domains = SupportedDomain.objects.filter(Q(Company=userExt.Company))
serializer = DomainSerializer(domains, many=True)
return Response(serializer.data)
#for creating and updating Domains
def post(self, request, format=None):
if not request.user.is_authenticated():
return HttpResponse('', status=401)
dData = request.DATA.copy()
dData['Company'] = request.user.ExtUser.Company.id
if 'id' in dData:
domain = get_object_or_404(SupportedDomain, pk=dData['id'])
if domain.Company != request.user.ExtUser.Company:
return HttpResponse('', status=404)
serializer = DomainSerializer(domain, dData)
if serializer.is_valid():
serializer.save()
interData = dData['Intersticial']
if interData:
inter = get_object_or_404(Intersticial, pk=interData['id'])
if inter.Company != domain.Company:
return HttpResponse('', status=401)
if domain.Intersticial != inter:
domain.Intersticial = inter
serializer = DomainSerializer(domain)
else:
domain.Intersticial = None;
domain.save()
return Response(serializer.data)
else:
return Response(serializer.errors)
else:
serializer = DomainSerializer(dData)
if serializer.is_valid():
if 'goli.us' in dData['Domain']:
return HttpResponse('', status=401)
serializer.save()
interData = dData['Intersticial']
if interData:
inter = get_object_or_404(Intersticial, pk=interData['id'])
nDomain = serializer.object
if inter.Company != nDomain.Company:
return HttpResponse('', status=401)
if nDomain.Intersticial != inter:
nDomain.Intersticial = inter
nDomain.save()
serializer = DomainSerializer(nDomain)
return Response(serializer.Data)
else:
return Response(serializer.errors)
class DomainNode(APIView):
def get(self, request, pk, format=None):
if not request.user.is_authenticated():
return HttpResponse('', status=401)
domain = get_object_or_404(SupportedDomain, pk=pk)
if domain.Company != request.ExtUser.Company:
return HttpResponse('', status=401)
serializer = DomainSerializer(domain)
return Response(serializer.data)
class RedirectUrlsForDomain(APIView):
#TODO Sort by newest first
def get(self, request, pk, format=None):
if not request.user.is_authenticated():
return HttpResponse('', status=401)
userExt = request.user.ExtUser
domain = get_object_or_404(SupportedDomain, pk=pk)
if userExt.Company != domain.Company and domain.Domain != 'goli.us':
return HttpResponse('', status=401)
links = RedirectLink.objects.filter(Domain=domain).filter(IsActive=True)
if domain.Domain == 'goli.us':
links = links.filter(User=userExt)
if 'q' in request.GET:
q = request.GET['q']
links = links.filter(Q(RedirectUrl__contains=q) | Q(LinkTitle__contains=q))
links = links.order_by('-TimeGenerated')
serializer = LinkSerializer(links, many=True)
return Response(serializer.data)
class SingleRedirectUrl(APIView):
def get(self, request, pk, format=None):
if not request.user.is_authenticated():
return HttpResponse('', status=401)
link = get_object_or_404(RedirectLink, pk=pk)
if link.User.Company != request.user.ExtUser.Company:
return HttpResponse('', status=401)
serializer = LinkSerializer(link)
return Response(serializer.data)
def delete(self, request, pk, format=None):
if not request.user.is_authenticated():
return HttpResponse('', status=401)
link = get_object_or_404(RedirectLink, pk=pk)
if link.User.Company != request.user.ExtUser.Company:
return HttpResponse('', status=401)
link.IsActive = False
link.save()
return Response(status=status.HTTP_204_NO_CONTENT)
class RedirectUrl(APIView):
def get(self, request, format=None):
if not request.user.is_authenticated():
return HttpResponse('', status=401)
links = RedirectLink.objects.filter(User=request.user.ExtUser)
serializer = LinkSerializer(links)
return Response(serializer.data)
def post(self, request, *args, **kwargs):
if not request.user.is_authenticated():
return HttpResponse('', status=401)
mData = request.DATA.copy()
hasDomain = False
userExt = request.user.ExtUser
#If there are no domains in the request
if SupportedDomain.objects.get(id=mData['Domain']).Company != userExt.Company:
return HttpResponse('', status=401)
#If the link has already been generated, give that object
prevLink = RedirectLink.objects.filter(RedirectUrl=mData['RedirectUrl']).filter(Domain__id=mData['Domain'])
if hasDomain:
prevLink = prevLink.filter(User=userExt)
if prevLink.count() > 0:
if not prevLink[0].IsActive:
prevLink[0].IsActive = True
prevLink[0].save()
serializer = LinkSerializer(prevLink[0])
return Response(serializer.data)
#Grab the ExtUserid and new unique URL Key
mData['User'] = userExt.id
if 'UrlKey' in mData:
prevLink = RedirectLink.objects.filter(Domain__id=mData['Domain']).filter(UrlKey=mData['UrlKey'])
if prevLink.count() > 0:
#Link has been used for this domain, return
return HttpResponse('', status=400)
else:
mData['UrlKey'] = utility.getUniqueRedirectKeyForDomain(mData['Domain'])
#Serialize and send the response
try:
title = BeautifulSoup(urllib2.urlopen(mData['RedirectUrl'])).title.string
mData['LinkTitle'] = title
except Exception, e:
mData['LinkTitle'] = ''
print "Error getting link title"
print e
serializer = LinkSerializer(data=mData)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
else:
return Response(serializer.errors, status=400)
def getLinkStats(self, request, pk, dateStart=date(1970,1,1), format=None):
if not request.user.is_authenticated():
return HttpResponse('', status=401)
link = get_object_or_404(RedirectLink, pk=pk)
if link.User != request.user.ExtUser:
return HttpResponse('', status=401)
stats = LinkClickTotal.objects.filter(Link=link).filter(Date__gte=dateStart)
clicks = LinkStat.objects.filter(Link=link).filter(TimeClicked__gte=dateStart)
countryClicks = clicks.values('Country','CountryCode').annotate(Clicks=Count('CountryCode'))
countrySerializer = LinkCountryStatsSerializer(countryClicks, many=True)
refererClicks = clicks.values('Referer').annotate(Clicks=Count('Referer'))
refererSerializer = RefererStatsSerializer(refererClicks, many=True)
agentTypes = LinkAgentType.objects.filter(Stat__Link=link).filter(Stat__TimeClicked__gte=dateStart)
browsers = agentTypes.values('Browser').annotate(count=Count('Browser'))
operatingSystems = agentTypes.values('OS').annotate(count=Count('OS'))
devices = agentTypes.values('Device').annotate(count=Count('Device'))
clickSerializer = ClickTotalSerializer(stats, many=True)
nDict = {
'Clicks': clickSerializer.data,
'Referers': refererSerializer.data,
'Countries': countrySerializer.data,
'Browsers': browsers,
'OS': operatingSystems,
'Devices': devices
}
return Response(nDict)
class LinkStatistics(APIView):
def get(self, request, pk, format=None):
return getLinkStats(self, request, pk, format=format)
class MonthLinkStatistics(APIView):
def get(self, request, pk, format=None):
return getLinkStats(self, request, pk, utility.monthdelta(datetime.now(), -1), format)
class ThreeMonthLinkStatistics(APIView):
def get(self, request, pk, format=None):
return getLinkStats(self, request, pk, utility.monthdelta(datetime.now(), -3), format)
class DomainStats(APIView):
def get(self, request, pk, format=None):
if not request.user.is_authenticated():
return HttpResponse('', status=401)
domain = get_object_or_404(SupportedDomain, pk=pk)
if domain.Domain == 'goli.us':
statDict = {
'TotalClicks': self.get_num_clicks(domain, request.user.ExtUser),
'CountriesReached': self.get_unique_countries(domain, request.user.ExtUser),
'UniqueVisitors': self.get_num_unique_visitors(domain, request.user.ExtUser),
'UniqueSources': self.get_unique_sources(domain, request.user.ExtUser)
}
return Response(statDict)
else:
if request.user.ExtUser.Company != domain.Company:
return HttpResponse('', status=403)
serializer = DomainStatsSerializer2(domain)
return Response(serializer.data)
def get_num_clicks(self, obj, user):
return LinkClickTotal.objects.filter(Link__Domain=obj).filter(Link__User=user).filter(Date__gte=(date.today() - timedelta(days=1))).aggregate(Sum('TotalClicked'))['TotalClicked__sum']
def get_num_unique_visitors(self, obj, user):
return LinkStat.objects.filter(Link__Domain=obj).filter(Link__User=user).filter(TimeClicked__gte=(date.today() - timedelta(days=1))).values('IpAddress').distinct().count()
def get_unique_sources(self, obj, user):
return LinkStat.objects.filter(Link__Domain=obj).filter(Link__User=user).filter(TimeClicked__gte=(date.today() - timedelta(days=1))).values('Referer').distinct().count()
def get_unique_countries(self, obj, user):
return LinkStat.objects.filter(Link__Domain=obj).filter(Link__User=user).filter(TimeClicked__gte=(date.today() - timedelta(days=1))).values('CountryCode').distinct().count()
class CompanySerializer(APIView):
def get(self, request, format=None):
if not request.user.is_authenticated():
return HttpResponse('', status=401)
company = request.user.ExtUser.Company
serializer = CompanyInfoSerializer(company)
return Response(serializer.data)
#just returns currently logged in user
class MeSerializer(APIView):
def get(self, request, format=None):
if not request.user.is_authenticated():
return HttpResponse('', status=401)
serializer = UserSerializer(request.user.ExtUser)
return Response(serializer.data)
class InterstitialSingle(generics.RetrieveUpdateDestroyAPIView):
queryset = Intersticial.objects.all()
serializer_class = InterstitialSerializer
permission_classes = (IsCompanies, IsAuthenticated, )
def pre_save(self, obj):
obj.Company = self.request.user.ExtUser.Company
class InterstitialList(APIView):
def get(self, request, format=None):
if not request.user.is_authenticated():
return HttpResponse('', status=401)
interstitials = Intersticial.objects.filter(Company=request.user.ExtUser.Company)
serializer = InterstitialSerializer(interstitials, many=True)
return Response(serializer.data)
def post(self, request, *args, **kwargs):
if not request.user.is_authenticated():
return HttpResponse('', status=401)
cpy = request.DATA.copy()
cpy['Company'] = request.user.ExtUser.Company.id
serializer = InterstitialSerializer(data=cpy)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(serializer.errors, status=400)
class LoginSerializer(APIView):
def get(self, request, format=None):
if not ('username' in request.GET) or not ('password' in request.GET):
return HttpResponse('', status=403)
user = authenticate(username=request.GET['username'],password=request.GET['password'])
if user is not None:
token, created = Token.objects.get_or_create(user=user)
data = {
'token':token.key
}
return Response(data)
else:
return HttpResponse('', status=401)
class AnonUrl(APIView):
def post(self, request, format=None):
mData = request.DATA.copy()
mData['Domain'] = SupportedDomain.objects.get(Domain='t.goli.us').id
prevLink = RedirectLink.objects.filter(RedirectUrl=mData['RedirectUrl']).filter(Domain__id=mData['Domain'])
if prevLink.count() > 0:
serializer = LinkSerializer(prevLink[0])
return Response(serializer.data)
mData['User'] = User.objects.get(username='System').ExtUser.id
if 'UrlKey' in mData:
prevLink = RedirectLink.objects.filter(Domain__id=mData['Domain']).filter(UrlKey=mData['UrlKey'])
if prevLink.count() > 0:
#Link has been used for this domain, return
return HttpResponse('', status=400)
else:
mData['UrlKey'] = utility.getUniqueRedirectKeyForDomain(mData['Domain'])
title = ''
try:
title = BeautifulSoup(urllib2.urlopen(mData['RedirectUrl'])).title.string
except Exception, e:
print "Error getting link title"
print e
mData['LinkTitle'] = title
serializer = LinkSerializer(data=mData)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
else:
return Response(serializer.errors, status=400)
class InterstitialStatView(APIView):
def get(self, request, format=None):
mData = {}
mData['ActionTaken'] = request.GET['action_taken']
mData['Link'] = request.GET['linkid']
mData['Intersticial'] = request.GET['inter_id']
mData['TimeTaken'] = request.GET['time_taken']
serializer = InterstitialStatSerializer(data=mData)
if serializer.is_valid():
serializer.save()
stitial = serializer.object
#save aggregate stat for interstitiial
aggrStats = AggregateInterstitialStat.objects.filter(Intersticial=stitial.Intersticial).filter(Date=date.today())
if len(aggrStats) != 0:
aggrStat = aggrStats[0]
else:
aggrStat = AggregateInterstitialStat(Intersticial=stitial.Intersticial, Date=date.today())
aggrStat.incrementAction(stitial.ActionTaken, stitial.TimeTaken)
aggrStat.save()
return Response('', status=200)
else:
return Response(serializer.errors, status=400)
class DomainIntetstitialStat(APIView):
def get(self, request, pk, format=None):
if not request.user.is_authenticated():
return HttpResponse('', status=401)
inter = get_object_or_404(Intersticial, pk=pk)
if inter.Company != request.user.ExtUser.Company:
return HttpResponse('', status=401)
stats = InterstitialStat.objects.filter(Intersticial=inter).filter(TimeGathered__gte=datetime.fromtimestamp(int(request.GET['from'])))
serializer = InterstitialStatSerializer(stats, many=True)
return Response(serializer.data)
class OverallInterstitialStat(APIView):
def get(self, request, pk, format=None):
if not request.user.is_authenticated():
return HttpResponse('', status=401)
inter = get_object_or_404(Intersticial, pk=pk)
if inter.Company != request.user.ExtUser.Company:
return HttpResponse('', status=401)
stats = AggregateInterstitialStat.objects.filter(Intersticial=inter).filter(Date__gte=datetime.fromtimestamp(int(request.GET['from'])))
aggrData = {
'AdsClicked': 0,
'ButtonsClicked': 0,
'RedirectOcurred': 0
}
for stat in stats:
aggrData['AdsClicked'] += stat.AdClicked
aggrData['ButtonsClicked'] += stat.ButtonClicked
aggrData['RedirectOcurred'] += stat.RedirectOcurred
serializer = OverallInterStatAggregateSerializer(aggrData)
return Response(serializer.data)
| |
''' Significant lifting from https://jmetzen.github.io/2015-11-27/vae.html '''
import time
import numpy as np
import tensorflow as tf
from tensorflow.python.ops import rnn
import random
import matplotlib.pyplot as plt
import re, string
from sklearn.feature_extraction.text import CountVectorizer
from collections import defaultdict
import pickle as pkl
import itertools
import ctc_loss
import os
def load_text(n,num_samples=None):
# fname = 'Oxford_English_Dictionary.txt'
# txt = []
# with open(fname,'rb') as f:
# txt = f.readlines()
# txt = [x.decode('utf-8').strip() for x in txt]
# txt = [re.sub(r'[^a-zA-Z ]+', '', x) for x in txt if len(x) > 1]
# List of words
# word_list = [x.split(' ', 1)[0].strip() for x in txt]
# # List of definitions
# def_list = [x.split(' ', 1)[1].strip()for x in txt]
with open('./training_data/training_data.pkl','rb') as raw:
word_list,dl=pkl.load(raw)
def_list=[]
# def_list=[' '.join(defi) for defi in def_list]
i=0
while i<len( dl):
defi=dl[i]
if len(defi)>0:
def_list+=[' '.join(defi)]
i+=1
else:
dl.pop(i)
word_list.pop(i)
maxlen=0
minlen=100
for defi in def_list:
minlen=min(minlen,len(defi.split()))
maxlen=max(maxlen,len(defi.split()))
print(minlen)
print(maxlen)
maxlen=30
# # Initialize the "CountVectorizer" object, which is scikit-learn's
# # bag of words tool.
# vectorizer = CountVectorizer(analyzer = "word", \
# tokenizer = None, \
# preprocessor = None, \
# stop_words = None, \
# max_features = None, \
# token_pattern='\\b\\w+\\b') # Keep single character words
_map,rev_map=get_one_hot_map(word_list,def_list,n)
if num_samples is not None:
num_samples=len(word_list)
# X = (36665, 56210)
# X = map_one_hot(word_list[:num_samples],_map,1,n)
# y = (36665, 56210)
# print _map
# y,mask = map_one_hot(def_list[:num_samples],_map,maxlen,n)
# np.save('X',X)
# np.save('y',y)
# np.save('mask',mask)
X=np.load('X.npy','r')
y=np.load('y.npy','r')
mask=np.load('mask.npy','r')
print (np.max(y))
return X, y, mask,rev_map
def get_one_hot_map(to_def,corpus,n):
# words={}
# for line in to_def:
# if line:
# words[line.split()[0]]=1
# counts=defaultdict(int)
# uniq=defaultdict(int)
# for line in corpus:
# for word in line.split():
# if word not in words:
# counts[word]+=1
# words=list(words.keys())
words=[]
counts=defaultdict(int)
uniq=defaultdict(int)
for line in to_def+corpus:
for word in line.split():
if word not in words:
counts[word]+=1
_map=defaultdict(lambda :n+1)
rev_map=defaultdict(lambda:"<UNK>")
# words=words[:25000]
for i in counts.values():
uniq[i]+=1
print (len(words))
# random.shuffle(words)
words+=list(map(lambda z:z[0],reversed(sorted(counts.items(),key=lambda x:x[1]))))[:n-len(words)]
print (len(words))
i=0
# random.shuffle(words)
# for num_bits in range(binary_dim):
# for bit_config in itertools.combinations_with_replacement(range(binary_dim),num_bits+1):
# bitmap=np.zeros(binary_dim)
# bitmap[np.array(bit_config)]=1
# num=bitmap*(2** np.arange(binary_dim ))
# num=np.sum(num)
# num=int(num)
# word=words[i]
# _map[word]=num
# rev_map[num]=word
# i+=1
# if i>=len(words):
# break
# if i>=len(words):
# break
for word in words:
i+=1
_map[word]=i
rev_map[i]=word
rev_map[n+1]='<UNK>'
if zero_end_tok:
rev_map[0]='.'
else:
rev_map[0]='Start'
rev_map[n+2]='End'
print (list(reversed(sorted(uniq.items()))))
print (len(list(uniq.items())))
print (len(rev_map.keys()))
print(len(_map.keys()))
print ('heyo')
# print rev_map
return _map,rev_map
def map_word_emb(corpus,_map):
### NOTE: ONLY WORKS ON TARGET WORD (DOES NOT HANDLE UNK PROPERLY)
rtn=[]
rtn2=[]
num_failed=0
num_counted=0
for word in corpus:
w=word.lower()
num_counted+=1
if w not in _map:
num_failed+=1
mapped=_map[w]
rtn.append(mapped)
if get_rand_vec:
mapped_rand=random.choice(list(_map.keys()))
while mapped_rand==word:
mapped_rand=random.choice(list(_map.keys()))
mapped_rand=_map[mapped_rand]
rtn2.append(mapped_rand)
print 'fuck',num_failed/float(num_counted)
if get_rand_vec:
return np.array(rtn),np.array(rtn2)
return np.array(rtn)
def map_one_hot(corpus,_map,maxlen,n):
if maxlen==1:
if not form2:
total_not=0
rtn=np.zeros([len(corpus),n+3],dtype=np.float32)
for l,line in enumerate(corpus):
if len(line)==0:
rtn[l,-1]=1
else:
mapped=_map[line]
if mapped==75001:
total_not+=1
rtn[l,mapped]=1
print (total_not,len(corpus))
return rtn
else:
total_not=0
if not onehot:
rtn=np.zeros([len(corpus),binary_dim],dtype=np.float32)
else:
rtn=np.zeros([len(corpus),2**binary_dim],dtype=np.float32)
for l,line in enumerate(corpus):
# if len(line)==0:
# rtn[l]=n+2
# else:
# if line not in _map:
# total_not+=1
mapped=_map[line]
if mapped==75001:
total_not+=1
if onehot:
binrep=np.zeros(2**binary_dim)
print line
binrep[mapped]=1
else:
binrep=(1&(mapped/(2**np.arange(binary_dim))).astype(np.uint32)).astype(np.float32)
rtn[l]=binrep
print (total_not,len(corpus))
return rtn
else:
if form2:
rtn=np.zeros([len(corpus),maxlen+2,binary_dim],dtype=np.float32)
else:
rtn=np.zeros([len(corpus),maxlen+2],dtype=np.int32)
print (rtn.shape)
mask=np.zeros([len(corpus),maxlen+2],dtype=np.float32)
print (mask.shape)
mask[:,1]=1.0
totes=0
nopes=0
wtf=0
for l,_line in enumerate(corpus):
x=0
line=_line.split()
for i in range(min(len(line),maxlen)):
# if line[i] not in _map:
# nopes+=1
mapped=_map[line[i]]
if form2:
binrep=(1&(mapped/(2**np.arange(binary_dim))).astype(np.uint32)).astype(np.float32)
rtn[l,i+1,:]=binrep
else:
rtn[l,i+1]=mapped
if mapped==75001:
wtf+=1
mask[l,i+1]=1.0
totes+=1
x=i+1
to_app=n+2
if zero_end_tok:
to_app=0
if form2:
rtn[l,x+1,:]=(1&(to_app/(2**np.arange(binary_dim))).astype(np.uint32)).astype(np.float32)
else:
rtn[l,x+1]=to_app
mask[l,x+1]=1.0
print (nopes,totes,wtf)
return rtn,mask
def xavier_init(fan_in, fan_out, constant=1e-4):
""" Xavier initialization of network weights"""
# https://stackoverflow.com/questions/33640581/how-to-do-xavier-initialization-on-tensorflow
low = -constant*np.sqrt(6.0/(fan_in + fan_out))
high = constant*np.sqrt(6.0/(fan_in + fan_out))
return tf.random_uniform((fan_in, fan_out),
minval=low, maxval=high,
dtype=tf.float32)
class VariationalAutoencoder(object):
""" Variation Autoencoder (VAE) with an sklearn-like interface implemented using TensorFlow.
This implementation uses probabilistic encoders and decoders using Gaussian
distributions and realized by multi-layer perceptrons. The VAE can be learned
end-to-end.
See "Auto-Encoding Variational Bayes" by Kingma and Welling for more details.
"""
def __init__(self, network_architecture, transfer_fct=tf.nn.softplus,
learning_rate=0.001, batch_size=100,generative=False,ctrain=False,test=False,global_step=None):
self.network_architecture = network_architecture
self.transfer_fct = transfer_fct
self.learning_rate = learning_rate
print self.learning_rate
self.batch_size = batch_size
if global_step is None:
global_step=tf.Variable(0,trainiable=False)
self.global_step=global_step
self.no_reload=[self.global_step]
# tf Graph input
self.n_words=network_architecture['n_input']
if not form2:
self.x = tf.placeholder(tf.float32, [None,self.n_words],name='x_in')
else:
self.x = tf.placeholder(tf.float32, [None,self.n_words],name='x_in')
self.intype=type(self.x)
if not form2:
self.caption_placeholder = tf.placeholder(tf.int32, [None,network_architecture["maxlen"]],name='caption_placeholder')
else:
self.caption_placeholder = tf.placeholder(tf.float32, [None, network_architecture["maxlen"],self.n_words],name='caption_placeholder')
print self.caption_placeholder.shape
self.mask=tf.placeholder(tf.float32, [None, network_architecture["maxlen"]],name='mask')
self.timestep=tf.placeholder(tf.float32,[],name='timestep')
# Create autoencoder network
to_restore=None
if not generative:
self._create_network()
# Define loss function based variational upper-bound and
# corresponding optimizer
to_restore=tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
self.untrainable_variables=[x for x in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) if x not in self.no_reload]
for var in self.untrainable_variables:
if var not in self.var_embs:
var.trainable=embeddings_trainable
self._create_loss_optimizer()
self.test=test
else:
self._build_gen()
to_restore=tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
self.untrainable_variables=[x for x in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) if x not in self.no_reload]
for var in self.untrainable_variables:
if var not in self.var_embs:
var.trainable=embeddings_trainable
# Initializing the tensor flow variables
init = tf.global_variables_initializer()
# Launch the session
self.sess = tf.InteractiveSession()
print (model_path)
if embeddings_trainable:
print [x.name for x in to_restore]
print 'went into right place'
self.saver = tf.train.Saver(var_list=to_restore,max_to_keep=100)
saved_path=tf.train.latest_checkpoint(model_path)
else:
print [x.name for x in self.untrainable_variables]
self.saver= tf.train.Saver(var_list=self.untrainable_variables,max_to_keep=100)
saved_path=tf.train.latest_checkpoint(model_path.replace('vaedef','defdef'))
self.sess.run(init)
if ctrain:
print(saved_path)
self.saver.restore(self.sess, saved_path)
self.saver=tf.train.Saver(max_to_keep=100)
print [x.name for x in self.saver._var_list]
def _create_network(self):
# Initialize autoencode network weights and biases
network_weights = self._initialize_weights(**self.network_architecture)
start_token_tensor=tf.constant((np.zeros([self.batch_size,binary_dim])).astype(np.float32),dtype=tf.float32)
self.network_weights=network_weights
seqlen=tf.cast(tf.reduce_sum(self.mask,reduction_indices=-1),tf.int32)
KLD_penalty=tf.tanh(tf.cast(self.timestep,tf.float32)/1600.0)
# Use recognition network to determine mean and
# (log) variance of Gaussian distribution in latent
# space
if not same_embedding:
input_embedding,input_embedding_KLD_loss=self._get_input_embedding([network_weights['variational_encoding'],network_weights['biases_variational_encoding']],network_weights['input_meaning'])
else:
input_embedding,input_embedding_KLD_loss=self._get_input_embedding([network_weights['variational_encoding'],network_weights['biases_variational_encoding']],network_weights['LSTM'])
state = self.lstm.zero_state(self.batch_size, dtype=tf.float32)
loss = 0
self.debug=0
probs=[]
with tf.variable_scope("RNN"):
for i in range(self.network_architecture['maxlen']):
if i > 0:
# current_embedding = tf.nn.embedding_lookup(self.word_embedding, caption_placeholder[:,i-1]) + self.embedding_bias
if form3:
current_embedding,KLD_loss=self._get_word_embedding([network_weights['LSTM']],network_weights['input_meaning'], self.caption_placeholder[:,i-1,:],logit=True)
elif form2:
current_embedding,KLD_loss = self._get_word_embedding([network_weights['variational_encoding'],network_weights['biases_variational_encoding']],network_weights['LSTM'], self.caption_placeholder[:,i-1,:],logit=True)
else:
current_embedding,KLD_loss = self._get_word_embedding([network_weights['variational_encoding'],network_weights['biases_variational_encoding']],network_weights['LSTM'], self.caption_placeholder[:,i-1])
if transfertype2:
current_embedding=tf.stop_gradient(current_embedding)
loss+=tf.reduce_sum(KLD_loss*self.mask[:,i])*KLD_penalty
else:
current_embedding = input_embedding
if i > 0:
tf.get_variable_scope().reuse_variables()
out, state = self.lstm(current_embedding, state)
if i > 0:
if not form2:
labels = tf.expand_dims(self.caption_placeholder[:, i], 1)
ix_range=tf.range(0, self.batch_size, 1)
ixs = tf.expand_dims(ix_range, 1)
concat = tf.concat([ixs, labels],1)
onehot = tf.sparse_to_dense(
concat, tf.stack([self.batch_size, self.n_words]), 1.0, 0.0)
else:
onehot=self.caption_placeholder[:,i,:]
logit = tf.matmul(out, network_weights['LSTM']['encoding_weight']) + network_weights['LSTM']['encoding_bias']
if not use_ctc:
if form2:
# best_word=tf.nn.softmax(logit)
# best_word=tf.round(best_word)
# all_the_f_one_h.append(best_word)
xentropy = tf.nn.sigmoid_cross_entropy_with_logits(logits=logit, labels=onehot)
xentropy=tf.reduce_sum(xentropy,reduction_indices=-1)
else:
xentropy = tf.nn.softmax_cross_entropy_with_logits(logits=logit, labels=onehot)
xentropy = xentropy * self.mask[:,i]
xentropy=tf.reduce_sum(xentropy)
self.debug+=xentropy
loss += xentropy
else:
probs.append(tf.expand_dims(tf.nn.sigmoid(logit),1))
if not use_ctc:
loss_ctc=0
self.debug=self.debug/tf.reduce_sum(self.mask[:,1:])
else:
probs=tf.concat(probs,axis=1)
self.debug=probs[0,2]
probs=ctc_loss.get_output_probabilities(probs,self.caption_placeholder[:,1:,:])
loss_ctc=ctc_loss.loss(probs,self.caption_placeholder[:,1:,:],self.network_architecture['maxlen']-2,self.batch_size,seqlen-1)
# self.debug=tf.reduce_sum(input_embedding_KLD_loss)/self.batch_size*KLD_penalty+loss_ctc
loss = (loss / tf.reduce_sum(self.mask[:,1:]))+tf.reduce_sum(input_embedding_KLD_loss)/self.batch_size*KLD_penalty+loss_ctc
self.loss=loss
def _initialize_weights(self, n_lstm_input, maxlen,
n_input, n_z, n_z_m,n_z_m_2):
all_weights = dict()
if not form3:
if not same_embedding:
all_weights['input_meaning'] = {
'affine_weight': tf.Variable(xavier_init(n_z, n_lstm_input),name='affine_weight'),
'affine_bias': tf.Variable(tf.zeros(n_lstm_input),name='affine_bias')}
if not vanilla:
all_weights['biases_variational_encoding'] = {
'out_mean': tf.Variable(tf.zeros([n_z], dtype=tf.float32),name='out_meanb'),
'out_log_sigma': tf.Variable(tf.zeros([n_z], dtype=tf.float32),name='out_log_sigmab')}
all_weights['variational_encoding'] = {
'out_mean': tf.Variable(xavier_init(n_input, n_z),name='out_mean'),
'out_log_sigma': tf.Variable(xavier_init(n_input, n_z),name='out_log_sigma')}
else:
all_weights['biases_variational_encoding'] = {
'out_mean': tf.Variable(tf.zeros([n_z], dtype=tf.float32),name='out_meanb')}
all_weights['variational_encoding'] = {
'out_mean': tf.Variable(xavier_init(n_input, n_z),name='out_mean')}
self.no_reload+=all_weights['input_meaning'].values()
self.var_embs=[]
if transfertype2:
self.var_embs=all_weights['biases_variational_encoding'].values()+all_weights['variational_encoding'].values()
self.lstm=tf.contrib.rnn.BasicLSTMCell(n_lstm_input)
if lstm_stack>1:
self.lstm=tf.contrib.rnn.MultiRNNCell([self.lstm]*lstm_stack)
all_weights['LSTM'] = {
'affine_weight': tf.Variable(xavier_init(n_z, n_lstm_input),name='affine_weight2'),
'affine_bias': tf.Variable(tf.zeros(n_lstm_input),name='affine_bias2'),
'encoding_weight': tf.Variable(xavier_init(n_lstm_input,n_input),name='encoding_weight'),
'encoding_bias': tf.Variable(tf.zeros(n_input),name='encoding_bias'),
'lstm': self.lstm}
else:
if not same_embedding:
all_weights['input_meaning'] = {
'affine_weight': tf.Variable(xavier_init(n_input, n_z),name='affine_weight'),
'affine_bias': tf.Variable(tf.zeros(n_z),name='affine_bias')}
if not vanilla:
all_weights['biases_variational_encoding'] = {
'out_mean': tf.Variable(tf.zeros([n_lstm_input], dtype=tf.float32),name='out_meanb'),
'out_log_sigma': tf.Variable(tf.zeros([n_lstm_input], dtype=tf.float32),name='out_log_sigmab')}
all_weights['variational_encoding'] = {
'out_mean': tf.Variable(xavier_init(n_z, n_lstm_input),name='out_mean'),
'out_log_sigma': tf.Variable(xavier_init(n_z, n_lstm_input),name='out_log_sigma')}
else:
all_weights['biases_variational_encoding'] = {
'out_mean': tf.Variable(tf.zeros([n_lstm_input], dtype=tf.float32),name='out_meanb')}
all_weights['variational_encoding'] = {
'out_mean': tf.Variable(xavier_init(n_z, n_lstm_input),name='out_mean')}
self.no_reload+=all_weights['input_meaning'].values()
self.var_embs=[]
if transfertype2:
self.var_embs=all_weights['biases_variational_encoding'].values()+all_weights['variational_encoding'].values()
self.lstm=tf.contrib.rnn.BasicLSTMCell(n_lstm_input)
if lstm_stack>1:
self.lstm=tf.contrib.rnn.MultiRNNCell([self.lstm]*lstm_stack)
all_weights['LSTM'] = {
'affine_weight': tf.Variable(xavier_init(n_z, n_lstm_input),name='affine_weight2'),
'affine_bias': tf.Variable(tf.zeros(n_lstm_input),name='affine_bias2'),
'encoding_weight': tf.Variable(xavier_init(n_lstm_input,n_input),name='encoding_weight'),
'encoding_bias': tf.Variable(tf.zeros(n_input),name='encoding_bias'),
'lstm': self.lstm}
return all_weights
def _get_input_embedding(self, ve_weights, aff_weights):
if not form3:
z,vae_loss=self._vae_sample(ve_weights[0],ve_weights[1],self.x)
embedding=tf.matmul(z,aff_weights['affine_weight'])+aff_weights['affine_bias']
else:
embedding=tf.matmul(self.x,aff_weights['affine_weight'])+aff_weights['affine_bias']
embedding,vae_loss=self._vae_sample(ve_weights[0],ve_weights[1],embedding)
return embedding,vae_loss
def _get_middle_embedding(self, ve_weights, lstm_weights, x,logit=False):
if logit:
z,vae_loss=self._vae_sample_mid(ve_weights[0],ve_weights[1],x)
else:
if not form2:
z,vae_loss=self._vae_sample_mid(ve_weights[0],ve_weights[1],x, True)
else:
z,vae_loss=self._vae_sample(ve_weights[0],ve_weights[1],tf.one_hot(x,depth=self.network_architecture['n_input']))
all_the_f_one_h.append(tf.one_hot(x,depth=self.network_architecture['n_input']))
embedding=tf.matmul(z,lstm_weights['affine_weight'])+lstm_weights['affine_bias']
return embedding,vae_loss
def _get_word_embedding(self, ve_weights, lstm_weights, x,logit=False):
if not form3:
if logit:
z,vae_loss=self._vae_sample(ve_weights[0],ve_weights[1],x)
else:
if not form2:
z,vae_loss=self._vae_sample(ve_weights[0],ve_weights[1],x, True)
else:
z,vae_loss=self._vae_sample(ve_weights[0],ve_weights[1],tf.one_hot(x,depth=self.network_architecture['n_input']))
all_the_f_one_h.append(tf.one_hot(x,depth=self.network_architecture['n_input']))
embedding=tf.matmul(z,lstm_weights['affine_weight'])+lstm_weights['affine_bias']
else:
embedding=tf.matmul(x,lstm_weights['affine_weight'])+lstm_weights['affine_bias']
embedding=tf.matmul(embedding,ve_weights[0]['affine_weight'])+ve_weights[0]['affine_bias']
vae_loss=0
# if logit:
# z,vae_loss=self._vae_sample(ve_weights[0],ve_weights[1],embedding)
# else:
# if not form2:
# z,vae_loss=self._vae_sample(ve_weights[0],ve_weights[1],embedding, True)
# else:
# z,vae_loss=self._vae_sample(ve_weights[0],ve_weights[1],tf.one_hot(embedding,depth=self.network_architecture['n_input']))
# all_the_f_one_h.append(tf.one_hot(embedding,depth=self.network_architecture['n_input']))
return embedding,vae_loss
def _vae_sample(self, weights, biases, x, lookup=False):
#TODO: consider adding a linear transform layer+relu or softplus here first
if not lookup:
mu=tf.matmul(x,weights['out_mean'])+biases['out_mean']
if not vanilla:
logvar=tf.matmul(x,weights['out_log_sigma'])+biases['out_log_sigma']
else:
mu=tf.nn.embedding_lookup(weights['out_mean'],x)+biases['out_mean']
if not vanilla:
logvar=tf.nn.embedding_lookup(weights['out_log_sigma'],x)+biases['out_log_sigma']
if not vanilla:
epsilon=tf.random_normal(tf.shape(logvar),name='epsilon')
std=tf.exp(.5*logvar)
z=mu+tf.multiply(std,epsilon)
else:
z=mu
KLD=0.0
if not vanilla:
KLD = -0.5 * tf.reduce_sum(1 + logvar - tf.pow(mu, 2) - tf.exp(logvar),axis=-1)
print logvar.shape,epsilon.shape,std.shape,z.shape,KLD.shape
return z,KLD
def _vae_sample_mid(self, weights, biases, x, lookup=False):
#TODO: consider adding a linear transform layer+relu or softplus here first
if not lookup:
mu=tf.matmul(x,weights['out_mean'])+biases['out_mean']
if mid_vae:
logvar=tf.matmul(x,weights['out_log_sigma'])+biases['out_log_sigma']
else:
mu=tf.nn.embedding_lookup(weights['out_mean'],x)+biases['out_mean']
if mid_vae:
logvar=tf.nn.embedding_lookup(weights['out_log_sigma'],x)+biases['out_log_sigma']
if mid_vae:
epsilon=tf.random_normal(tf.shape(logvar),name='epsilon')
std=tf.exp(.5*logvar)
z=mu+tf.multiply(std,epsilon)
else:
z=mu
KLD=0.0
if mid_vae:
KLD = -0.5 * tf.reduce_sum(1 + logvar - tf.pow(mu, 2) - tf.exp(logvar),axis=-1)
print logvar.shape,epsilon.shape,std.shape,z.shape,KLD.shape
return z,KLD
def _create_loss_optimizer(self):
if clip_grad:
opt_func = tf.train.RMSPropOptimizer(learning_rate=self.learning_rate)
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(self.loss, tvars), .1)
self.optimizer = opt_func.apply_gradients(zip(grads, tvars))
else:
self.optimizer = \
tf.train.AdamOptimizer(learning_rate=self.learning_rate).minimize(self.loss)
def _create_loss_test(self):
self.test_op = \
tf.test.compute_gradient_error(self.x,np.array([self.batch_size,self.n_words]),self.loss,[1],extra_feed_dict={})
def partial_fit(self, X,y,mask,testify=False,timestep=0):
"""Train model based on mini-batch of input data.
Return cost of mini-batch.
"""
if self.test and testify:
print tf.test.compute_gradient_error(self.x,np.array([self.batch_size,self.n_words]),self.loss,[self.batch_size],extra_feed_dict={self.caption_placeholder: y, self.mask: mask})
exit()
else:
opt, cost,shit = self.sess.run((self.optimizer, self.loss,self.debug),
feed_dict={self.x: X, self.caption_placeholder: y, self.mask: mask,self.timestep:np.array(timestep).astype(np.float32)})
# print shit
return cost,shit
def _build_gen(self):
#same setup as `_create_network` function
network_weights = self._initialize_weights(**self.network_architecture)
if form2:
start_token_tensor=tf.constant((np.zeros([self.batch_size,binary_dim])).astype(np.float32),dtype=tf.float32)
else:
start_token_tensor=tf.constant((np.zeros([self.batch_size])).astype(np.int32),dtype=tf.int32)
self.network_weights=network_weights
if not same_embedding:
input_embedding,_=self._get_input_embedding([self.network_weights['variational_encoding'],self.network_weights['biases_variational_encoding']],self.network_weights['input_meaning'])
else:
input_embedding,_=self._get_input_embedding([self.network_weights['variational_encoding'],self.network_weights['biases_variational_encoding']],self.network_weights['LSTM'])
print input_embedding.shape
# image_embedding = tf.matmul(img, self.img_embedding) + self.img_embedding_bias
state = self.lstm.zero_state(self.batch_size,dtype=tf.float32)
#declare list to hold the words of our generated captions
all_words = []
num=0
loss=0
debloss=0
with tf.variable_scope("RNN"):
# in the first iteration we have no previous word, so we directly pass in the image embedding
# and set the `previous_word` to the embedding of the start token ([0]) for the future iterations
output, state = self.lstm(input_embedding, state)
print state,output.shape
if form2:
previous_word,_ = self._get_word_embedding([self.network_weights['variational_encoding'],self.network_weights['biases_variational_encoding']],self.network_weights['LSTM'], start_token_tensor,logit=True)
else:
previous_word,_ = self._get_word_embedding([self.network_weights['variational_encoding'],self.network_weights['biases_variational_encoding']],self.network_weights['LSTM'], start_token_tensor)
print previous_word.shape
# previous_word = tf.nn.embedding_lookup(self.word_embedding, [0]) + self.embedding_bias
for i in range(self.network_architecture['maxlen']):
tf.get_variable_scope().reuse_variables()
print i
out, state = self.lstm(previous_word, state)
# get a one-hot word encoding from the output of the LSTM
logit=tf.matmul(out, network_weights['LSTM']['encoding_weight']) + network_weights['LSTM']['encoding_bias']
if not form2:
best_word = tf.argmax(logit, 1)
else:
# best_word=tf.exp(logit)
# best_word=best_word/(1+best_word)
best_word=tf.nn.sigmoid(logit)
best_word=tf.round(best_word)
# with tf.device("/cpu:0"):
# # get the embedding of the best_word to use as input to the next iteration of our LSTM
# previous_word = tf.nn.embedding_lookup(self.word_embedding, best_word)
# previous_word += self.embedding_bias
print logit.shape
if form2:
previous_word,_ = self._get_word_embedding([self.network_weights['variational_encoding'],self.network_weights['biases_variational_encoding']],self.network_weights['LSTM'], best_word,logit=True)
else:
previous_word,_ = self._get_word_embedding([self.network_weights['variational_encoding'],self.network_weights['biases_variational_encoding']],self.network_weights['LSTM'], best_word)
print previous_word.shape
all_words.append(best_word)
onehot=self.caption_placeholder[:,i,:]
xentropy = tf.nn.sigmoid_cross_entropy_with_logits(logits=logit, labels=onehot)
xentropy1=tf.reduce_sum(xentropy,reduction_indices=-1)
mask=tf.cast(best_word!=tf.zeros_like(best_word),dtype=tf.float32)
num+=tf.reduce_sum(mask)
xentropy = xentropy1 * mask
xentropy=tf.reduce_sum(xentropy)
loss += xentropy
xentropy=xentropy1*self.mask[:,i]
xentropy=tf.reduce_sum(xentropy)
debloss+=xentropy
self.loss=loss/num
self.debug=debloss/tf.reduce_sum(self.mask[:,1:])
self.generated_words=all_words
def generate(self, _map, x,y,mask):
""" Generate data by sampling from latent space.
If z_mu is not None, data for this point in latent space is
generated. Otherwise, z_mu is drawn from prior in latent
space.
# """
# if z_mu is None:
# z_mu = np.random.normal(size=self.network_architecture["n_z"])
# # Note: This maps to mean of distribution, we could alternatively
# # sample from Gaussian distribution
# return self.sess.run(self.x_reconstr_mean,
# feed_dict={self.z: z_mu})
# saver = tf.train.Saver()
# saver.restore(self.sess, tf.train.latest_checkpoint(model_path))
generated_word_index,f_it,deb= self.sess.run([self.generated_words,self.loss,self.debug], feed_dict={self.x:x,self.caption_placeholder:y,self.mask:mask})
print f_it
print deb
print generated_word_index
if form2:
generated_word_index=np.array(bin_to_int(generated_word_index))
generated_word_index=np.rollaxis(generated_word_index,1)
else:
generated_word_index=np.array(generated_word_index)
return generated_word_index
# generated_sentence = ixtoword(_map,generated_word_index)
# return generated_sentence
def ixtoword(_map,ixs):
return [[_map[x] for x in y] for y in ixs]
def bin_to_int(a):
return [(x*(2** np.arange(x.shape[-1] ))).sum(axis=-1).astype(np.uint32) for x in a]
def train(network_architecture, learning_rate=0.001,
batch_size=100, training_epochs=10, display_step=2,gen=False,ctrain=False,test=False):
total_batch = int(n_samples / batch_size)
if should_decay and not gen:
global_step=tf.Variable(0*total_batch,trainable=False)
learning_rate = tf.train.exponential_decay(learning_rate, global_step,
all_samps, 0.95, staircase=True)
else:
global_step=tf.Variable(0,trainable=False)
vae = VariationalAutoencoder(network_architecture,
learning_rate=learning_rate,
batch_size=batch_size,generative=gen,ctrain=ctrain,test=test,global_step=global_step)
# Training cycle
# if test:
# maxlen=network_architecture['maxlen']
# return tf.test.compute_gradient_error([vae.x,vae.caption_placeholder,vae.mask],[np.array([batch_size,n_input]),np.array([batch_size,maxlen,n_input]),np.array([batch_size,maxlen])],vae.loss,[])
if gen:
return vae
costs=[]
indlist=np.arange(all_samps).astype(int)
for epoch in range(training_epochs):
avg_cost = 0.
# Loop over all batches
np.random.shuffle(indlist)
testify=False
avg_loss=0
for i in range(total_batch):
batch_xs = X[indlist[i*batch_size:(i+1)*batch_size]]
# Fit training using batch data
# if epoch==2 and i ==0:
# testify=True
cost,loss = vae.partial_fit(batch_xs,y[indlist[i*batch_size:(i+1)*batch_size]].astype(np.uint32),mask[indlist[i*batch_size:(i+1)*batch_size]],timestep=epoch*total_batch+i,testify=testify)
# Compute average loss
avg_cost = avg_cost * i /(i+1) +cost/(i+1)
avg_loss=avg_loss*i/(i+1)+loss/(i+1)
# if i% display_step==0:
print avg_cost,avg_loss
if epoch==0 and i==0:
costs.append(avg_cost)
costs.append(avg_cost)
# Display logs per epoch step
if epoch % display_step == 0 or epoch==1:
if should_save:
print 'saving'
vae.saver.save(vae.sess, os.path.join(model_path,'model'))
pkl.dump(costs,open(loss_output_path,'wb'))
print("Epoch:", '%04d' % (epoch+1),
"cost=", avg_cost)
return vae
if __name__ == "__main__":
import sys
form2=True
form3=True
vanilla=True
if sys.argv[1]!='vanilla':
vanilla=False
mid_vae=False
if sys.argv[2]=='mid_vae':
mid_vae=True
same_embedding=False
clip_grad=True
if sys.argv[3]!='clip':
clip_grad=False
should_save=True
should_train=True
# should_train=not should_train
should_continue=False
should_decay=True
zero_end_tok=True
training_epochs=int(sys.argv[14])
batch_size=int(sys.argv[4])
onehot=False
embeddings_trainable=False
if sys.argv[5]!='transfer':
print('not transfering')
embeddings_trainable=True
transfertype2=True
binary_dim=int(sys.argv[6])
all_the_f_one_h=[]
if not zero_end_tok:
X, y, mask, _map = load_text(2**binary_dim-4)
else:
X, y, mask, _map = load_text(2**binary_dim-3)
n_input =binary_dim
n_samples = 30000
lstm_dim=int(sys.argv[7])
model_path = sys.argv[8]
vartype='van'
transfertype=''
maxlen=int(sys.argv[9])+2
n_z=int(sys.argv[10])
n_z_m=int(sys.argv[11])
n_z_m_2=int(sys.argv[12])
if sys.argv[13]!='2':
transfertype2=False
if not vanilla:
vartype='var'
if not embeddings_trainable:
transfertype='transfer'
if transfertype2:
transfertype+='2'
cliptype=''
if clip_grad:
cliptype='clip'
use_ctc=False
losstype=''
if sys.argv[15]=='ctc_loss':
use_ctc=True
losstype='ctc'
lstm_stack=int(sys.argv[16])
use_bdlstm=False
loss_output_path= 'losses/%ss_%sb_%sl_%sh_%sd_%sz_%szm_%s%s%svaedef%s.pkl'%(str(lstm_stack),str(batch_size),str(maxlen-2),str(lstm_dim),str(n_input),str(n_z),str(n_z_m),str(losstype),str(cliptype),str(vartype),str(transfertype))
all_samps=len(X)
n_samples=all_samps
# X, y = X[:n_samples, :], y[:n_samples, :]
network_architecture = \
dict(maxlen=maxlen, # 2nd layer decoder neurons
n_input=n_input, # One hot encoding input
n_lstm_input=lstm_dim, # LSTM cell size
n_z=n_z, # dimensionality of latent space
n_z_m=n_z_m,
n_z_m_2=n_z_m_2
)
if should_train:
# vae_2d = train(network_architecture, training_epochs=training_epochs, batch_size=batch_size,gen=False,ctrain=should_continue)
# print train(network_architecture, training_epochs=training_epochs, batch_size=batch_size,gen=False,ctrain=should_continue,test=True)
vae_2d = train(network_architecture, training_epochs=training_epochs, batch_size=batch_size,gen=False,ctrain=should_continue,learning_rate=.005)
else:
#vae_2d = train(network_architecture, training_epochs=training_epochs, batch_size=batch_size,gen=True,ctrain=True)
vae_2d = train(network_architecture, training_epochs=training_epochs, batch_size=batch_size,gen=True,ctrain=True)
# # vae_2d._build_gen()
ind_list=np.arange(len(X)).astype(int)
#np.random.shuffle(ind_list)
print batch_size
x_sample = X[ind_list[:batch_size]]
print x_sample
y_sample = y[ind_list[:batch_size]]
print y_sample
y_hat = vae_2d.generate(_map,x_sample,y_sample,mask[ind_list[:batch_size]])
y_hat=y_hat[:10]
# print y_hat
y_hat_words=ixtoword(_map,y_hat)
print y_hat_words
if form2:
y_words=ixtoword(_map,np.array(bin_to_int(y_sample[:10])))
else:
y_words=ixtoword(_map,y_sample)
print(y_hat)
print(y_hat_words)
print(y_words)
print(ixtoword(_map,bin_to_int(np.expand_dims(x_sample[:10],axis=0))))
# # plt.figure(figsize=(8, 6))
# plt.scatter(z_mu[:, 0], z_mu[:, 1], c=np.argmax(y_sample, 1))
# plt.colorbar()
# plt.grid()
# plt.show()
| |
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova import db
from nova import exception
from nova.objects import flavor as flavor_obj
from nova.tests.unit.objects import test_objects
fake_flavor = {
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'id': 1,
'name': 'm1.foo',
'memory_mb': 1024,
'vcpus': 4,
'root_gb': 20,
'ephemeral_gb': 0,
'flavorid': 'm1.foo',
'swap': 0,
'rxtx_factor': 1.0,
'vcpu_weight': 1,
'disabled': False,
'is_public': True,
'extra_specs': {'foo': 'bar'},
}
class _TestFlavor(object):
@staticmethod
def _compare(test, db, obj):
for field, value in db.items():
test.assertEqual(db[field], obj[field])
def test_get_by_id(self):
with mock.patch.object(db, 'flavor_get') as get:
get.return_value = fake_flavor
flavor = flavor_obj.Flavor.get_by_id(self.context, 1)
self._compare(self, fake_flavor, flavor)
def test_get_by_name(self):
with mock.patch.object(db, 'flavor_get_by_name') as get_by_name:
get_by_name.return_value = fake_flavor
flavor = flavor_obj.Flavor.get_by_name(self.context, 'm1.foo')
self._compare(self, fake_flavor, flavor)
def test_get_by_flavor_id(self):
with mock.patch.object(db, 'flavor_get_by_flavor_id') as get_by_id:
get_by_id.return_value = fake_flavor
flavor = flavor_obj.Flavor.get_by_flavor_id(self.context,
'm1.foo')
self._compare(self, fake_flavor, flavor)
def test_add_access(self):
elevated = self.context.elevated()
flavor = flavor_obj.Flavor(context=elevated, flavorid='123')
with mock.patch.object(db, 'flavor_access_add') as add:
flavor.add_access('456')
add.assert_called_once_with(elevated, '123', '456')
def test_add_access_with_dirty_projects(self):
flavor = flavor_obj.Flavor(context=self.context, projects=['1'])
self.assertRaises(exception.ObjectActionError,
flavor.add_access, '2')
def test_remove_access(self):
elevated = self.context.elevated()
flavor = flavor_obj.Flavor(context=elevated, flavorid='123')
with mock.patch.object(db, 'flavor_access_remove') as remove:
flavor.remove_access('456')
remove.assert_called_once_with(elevated, '123', '456')
def test_create(self):
flavor = flavor_obj.Flavor()
flavor.name = 'm1.foo'
flavor.extra_specs = fake_flavor['extra_specs']
with mock.patch.object(db, 'flavor_create') as create:
create.return_value = fake_flavor
flavor.create(self.context)
self.assertEqual(self.context, flavor._context)
# NOTE(danms): Orphan this to avoid lazy-loads
flavor._context = None
self._compare(self, fake_flavor, flavor)
def test_create_with_projects(self):
context = self.context.elevated()
flavor = flavor_obj.Flavor()
flavor.name = 'm1.foo'
flavor.extra_specs = fake_flavor['extra_specs']
flavor.projects = ['project-1', 'project-2']
db_flavor = dict(fake_flavor, projects=list(flavor.projects))
with mock.patch.multiple(db, flavor_create=mock.DEFAULT,
flavor_access_get_by_flavor_id=mock.DEFAULT
) as methods:
methods['flavor_create'].return_value = db_flavor
methods['flavor_access_get_by_flavor_id'].return_value = [
{'project_id': 'project-1'},
{'project_id': 'project-2'}]
flavor.create(context)
methods['flavor_create'].assert_called_once_with(
context,
{'name': 'm1.foo',
'extra_specs': fake_flavor['extra_specs']},
projects=['project-1', 'project-2'])
self.assertEqual(context, flavor._context)
# NOTE(danms): Orphan this to avoid lazy-loads
flavor._context = None
self._compare(self, fake_flavor, flavor)
self.assertEqual(['project-1', 'project-2'], flavor.projects)
def test_create_with_id(self):
flavor = flavor_obj.Flavor(id=123)
self.assertRaises(exception.ObjectActionError, flavor.create,
self.context)
@mock.patch('nova.db.flavor_access_add')
@mock.patch('nova.db.flavor_access_remove')
@mock.patch('nova.db.flavor_extra_specs_delete')
@mock.patch('nova.db.flavor_extra_specs_update_or_create')
def test_save(self, mock_update, mock_delete, mock_remove, mock_add):
ctxt = self.context.elevated()
extra_specs = {'key1': 'value1', 'key2': 'value2'}
projects = ['project-1', 'project-2']
flavor = flavor_obj.Flavor(context=ctxt, flavorid='foo',
extra_specs=extra_specs, projects=projects)
flavor.obj_reset_changes()
# Test deleting an extra_specs key and project
del flavor.extra_specs['key1']
del flavor.projects[-1]
self.assertEqual(set(['extra_specs', 'projects']),
flavor.obj_what_changed())
flavor.save()
self.assertEqual({'key2': 'value2'}, flavor.extra_specs)
mock_delete.assert_called_once_with(ctxt, 'foo', 'key1')
self.assertEqual(['project-1'], flavor.projects)
mock_remove.assert_called_once_with(ctxt, 'foo', 'project-2')
# Test updating an extra_specs key value
flavor.extra_specs['key2'] = 'foobar'
self.assertEqual(set(['extra_specs']), flavor.obj_what_changed())
flavor.save()
self.assertEqual({'key2': 'foobar'}, flavor.extra_specs)
mock_update.assert_called_with(ctxt, 'foo', {'key2': 'foobar'})
# Test adding an extra_specs and project
flavor.extra_specs['key3'] = 'value3'
flavor.projects.append('project-3')
self.assertEqual(set(['extra_specs', 'projects']),
flavor.obj_what_changed())
flavor.save()
self.assertEqual({'key2': 'foobar', 'key3': 'value3'},
flavor.extra_specs)
mock_update.assert_called_with(ctxt, 'foo', {'key2': 'foobar',
'key3': 'value3'})
self.assertEqual(['project-1', 'project-3'], flavor.projects)
mock_add.assert_called_once_with(ctxt, 'foo', 'project-3')
@mock.patch('nova.db.flavor_create')
@mock.patch('nova.db.flavor_extra_specs_delete')
@mock.patch('nova.db.flavor_extra_specs_update_or_create')
def test_save_deleted_extra_specs(self, mock_update, mock_delete,
mock_create):
mock_create.return_value = dict(fake_flavor,
extra_specs={'key1': 'value1'})
ctxt = self.context.elevated()
flavor = flavor_obj.Flavor(context=ctxt)
flavor.flavorid = 'test'
flavor.extra_specs = {'key1': 'value1'}
flavor.create()
flavor.extra_specs = {}
flavor.save()
mock_delete.assert_called_once_with(ctxt, flavor.flavorid,
'key1')
self.assertFalse(mock_update.called)
def test_save_invalid_fields(self):
flavor = flavor_obj.Flavor(id=123)
self.assertRaises(exception.ObjectActionError, flavor.save)
def test_destroy(self):
flavor = flavor_obj.Flavor(id=123, name='foo')
with mock.patch.object(db, 'flavor_destroy') as destroy:
flavor.destroy(self.context)
destroy.assert_called_once_with(self.context, flavor.name)
def test_load_projects(self):
flavor = flavor_obj.Flavor(context=self.context, flavorid='foo')
with mock.patch.object(db, 'flavor_access_get_by_flavor_id') as get:
get.return_value = [{'project_id': 'project-1'}]
projects = flavor.projects
self.assertEqual(['project-1'], projects)
self.assertNotIn('projects', flavor.obj_what_changed())
def test_load_anything_else(self):
flavor = flavor_obj.Flavor()
self.assertRaises(exception.ObjectActionError,
getattr, flavor, 'name')
class TestFlavor(test_objects._LocalTest, _TestFlavor):
pass
class TestFlavorRemote(test_objects._RemoteTest, _TestFlavor):
pass
class _TestFlavorList(object):
def test_get_all(self):
with mock.patch.object(db, 'flavor_get_all') as get_all:
get_all.return_value = [fake_flavor]
filters = {'min_memory_mb': 4096}
flavors = flavor_obj.FlavorList.get_all(self.context,
inactive=False,
filters=filters,
sort_key='id',
sort_dir='asc')
self.assertEqual(1, len(flavors))
_TestFlavor._compare(self, fake_flavor, flavors[0])
get_all.assert_called_once_with(self.context, inactive=False,
filters=filters, sort_key='id',
sort_dir='asc', limit=None,
marker=None)
class TestFlavorList(test_objects._LocalTest, _TestFlavorList):
pass
class TestFlavorListRemote(test_objects._RemoteTest, _TestFlavorList):
pass
| |
# -*- coding: utf-8 -*-
"""
Production Configurations
- Use Amazon's S3 for storing static files and uploaded media
- Use mailgun to send emails
- Use Redis for cache
- Use sentry for error logging
"""
from __future__ import absolute_import, unicode_literals
import logging
from boto.s3.connection import OrdinaryCallingFormat
from django.utils import six
from .base import * # noqa
# Explicit import reused variables to prevent flake8 errors:
from .base import DATABASES, env, INSTALLED_APPS, MIDDLEWARE, TEMPLATES
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Raises ImproperlyConfigured exception if DJANGO_SECRET_KEY not in os.environ
SECRET_KEY = env('DJANGO_SECRET_KEY')
# This ensures that Django will be able to detect a secure connection
# properly on Heroku.
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# raven sentry client
# See https://docs.sentry.io/clients/python/integrations/django/
INSTALLED_APPS += ['raven.contrib.django.raven_compat', ]
# Use Whitenoise to serve static files
# See: https://whitenoise.readthedocs.io/
WHITENOISE_MIDDLEWARE = ['whitenoise.middleware.WhiteNoiseMiddleware', ]
MIDDLEWARE = WHITENOISE_MIDDLEWARE + MIDDLEWARE
RAVEN_MIDDLEWARE = ['raven.contrib.django.raven_compat.middleware.SentryResponseErrorIdMiddleware']
MIDDLEWARE = RAVEN_MIDDLEWARE + MIDDLEWARE
# SECURITY CONFIGURATION
# ------------------------------------------------------------------------------
# See https://docs.djangoproject.com/en/dev/ref/middleware/#module-django.middleware.security
# and https://docs.djangoproject.com/en/dev/howto/deployment/checklist/#run-manage-py-check-deploy
# set this to 60 seconds and then to 518400 when you can prove it works
SECURE_HSTS_SECONDS = 60
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool(
'DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS', default=True)
SECURE_CONTENT_TYPE_NOSNIFF = env.bool(
'DJANGO_SECURE_CONTENT_TYPE_NOSNIFF', default=True)
SECURE_BROWSER_XSS_FILTER = True
SESSION_COOKIE_SECURE = True
SESSION_COOKIE_HTTPONLY = True
SECURE_SSL_REDIRECT = env.bool('DJANGO_SECURE_SSL_REDIRECT', default=True)
CSRF_COOKIE_SECURE = True
CSRF_COOKIE_HTTPONLY = True
X_FRAME_OPTIONS = 'DENY'
# SITE CONFIGURATION
# ------------------------------------------------------------------------------
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = env.list('DJANGO_ALLOWED_HOSTS', default=['example.com', ])
# END SITE CONFIGURATION
INSTALLED_APPS += ['gunicorn', ]
# STORAGE CONFIGURATION
# ------------------------------------------------------------------------------
# Uploaded Media Files
# ------------------------
# See: http://django-storages.readthedocs.io/en/latest/index.html
INSTALLED_APPS += ['storages', ]
AWS_ACCESS_KEY_ID = env('DJANGO_AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = env('DJANGO_AWS_SECRET_ACCESS_KEY')
AWS_STORAGE_BUCKET_NAME = env('DJANGO_AWS_STORAGE_BUCKET_NAME')
AWS_AUTO_CREATE_BUCKET = True
AWS_QUERYSTRING_AUTH = False
AWS_S3_CALLING_FORMAT = OrdinaryCallingFormat()
# AWS cache settings, don't change unless you know what you're doing:
AWS_EXPIRY = 60 * 60 * 24 * 7
# TODO See: https://github.com/jschneier/django-storages/issues/47
# Revert the following and use str after the above-mentioned bug is fixed in
# either django-storage-redux or boto
AWS_HEADERS = {
'Cache-Control': six.b('max-age=%d, s-maxage=%d, must-revalidate' % (
AWS_EXPIRY, AWS_EXPIRY))
}
# URL that handles the media served from MEDIA_ROOT, used for managing
# stored files.
MEDIA_URL = 'https://s3.amazonaws.com/%s/' % AWS_STORAGE_BUCKET_NAME
# Static Assets
# ------------------------
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# EMAIL
# ------------------------------------------------------------------------------
DEFAULT_FROM_EMAIL = env('DJANGO_DEFAULT_FROM_EMAIL',
default='scrappyr <noreply@example.com>')
EMAIL_SUBJECT_PREFIX = env('DJANGO_EMAIL_SUBJECT_PREFIX', default='[scrappyr]')
SERVER_EMAIL = env('DJANGO_SERVER_EMAIL', default=DEFAULT_FROM_EMAIL)
# Anymail with Mailgun
INSTALLED_APPS += ['anymail', ]
ANYMAIL = {
'MAILGUN_API_KEY': env('DJANGO_MAILGUN_API_KEY'),
'MAILGUN_SENDER_DOMAIN': env('MAILGUN_SENDER_DOMAIN')
}
EMAIL_BACKEND = 'anymail.backends.mailgun.MailgunBackend'
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See:
# https://docs.djangoproject.com/en/dev/ref/templates/api/#django.template.loaders.cached.Loader
TEMPLATES[0]['OPTIONS']['loaders'] = [
(
'django.template.loaders.cached.Loader', [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
]
),
]
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# Use the Heroku-style specification
# Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
DATABASES['default'] = env.db('DATABASE_URL')
# CACHING
# ------------------------------------------------------------------------------
REDIS_LOCATION = '{0}/{1}'.format(env('REDIS_URL', default='redis://127.0.0.1:6379'), 0)
# Heroku URL does not pass the DB number, so we parse it in
CACHES = {
'default': {
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': REDIS_LOCATION,
'OPTIONS': {
'CLIENT_CLASS': 'django_redis.client.DefaultClient',
'IGNORE_EXCEPTIONS': True, # mimics memcache behavior.
# http://niwinz.github.io/django-redis/latest/#_memcached_exceptions_behavior # noqa
}
}
}
# Sentry Configuration
SENTRY_DSN = env('DJANGO_SENTRY_DSN')
SENTRY_CLIENT = env('DJANGO_SENTRY_CLIENT',
default='raven.contrib.django.raven_compat.DjangoClient')
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'root': {
'level': 'WARNING',
'handlers': ['sentry', ],
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s '
'%(process)d %(thread)d %(message)s'
},
},
'handlers': {
'sentry': {
'level': 'ERROR',
'class': 'raven.contrib.django.raven_compat.handlers.SentryHandler',
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose'
}
},
'loggers': {
'django.db.backends': {
'level': 'ERROR',
'handlers': ['console', ],
'propagate': False,
},
'raven': {
'level': 'DEBUG',
'handlers': ['console', ],
'propagate': False,
},
'sentry.errors': {
'level': 'DEBUG',
'handlers': ['console', ],
'propagate': False,
},
'django.security.DisallowedHost': {
'level': 'ERROR',
'handlers': ['console', 'sentry', ],
'propagate': False,
},
},
}
SENTRY_CELERY_LOGLEVEL = env.int('DJANGO_SENTRY_LOG_LEVEL', logging.INFO)
RAVEN_CONFIG = {
'CELERY_LOGLEVEL': env.int('DJANGO_SENTRY_LOG_LEVEL', logging.INFO),
'DSN': SENTRY_DSN
}
# Custom Admin URL, use {% url 'admin:index' %}
ADMIN_URL = env('DJANGO_ADMIN_URL')
# Your production stuff: Below this line define 3rd party library settings
# ------------------------------------------------------------------------------
| |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Generic Node base class for all workers that run on hosts."""
import errno
import logging as std_logging
import os
import random
import signal
import sys
import time
try:
# Importing just the symbol here because the io module does not
# exist in Python 2.6.
from io import UnsupportedOperation # noqa
except ImportError:
# Python 2.6
UnsupportedOperation = None
import eventlet
from eventlet import event
from oslo.config import cfg
from nova.openstack.common import eventlet_backdoor
from nova.openstack.common.gettextutils import _ # noqa
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common import threadgroup
rpc = importutils.try_import('nova.openstack.common.rpc')
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def _sighup_supported():
return hasattr(signal, 'SIGHUP')
def _is_daemon():
# The process group for a foreground process will match the
# process group of the controlling terminal. If those values do
# not match, or ioctl() fails on the stdout file handle, we assume
# the process is running in the background as a daemon.
# http://www.gnu.org/software/bash/manual/bashref.html#Job-Control-Basics
try:
is_daemon = os.getpgrp() != os.tcgetpgrp(sys.stdout.fileno())
except OSError as err:
if err.errno == errno.ENOTTY:
# Assume we are a daemon because there is no terminal.
is_daemon = True
else:
raise
except UnsupportedOperation:
# Could not get the fileno for stdout, so we must be a daemon.
is_daemon = True
return is_daemon
def _is_sighup_and_daemon(signo):
if not (_sighup_supported() and signo == signal.SIGHUP):
# Avoid checking if we are a daemon, because the signal isn't
# SIGHUP.
return False
return _is_daemon()
def _signo_to_signame(signo):
signals = {signal.SIGTERM: 'SIGTERM',
signal.SIGINT: 'SIGINT'}
if _sighup_supported():
signals[signal.SIGHUP] = 'SIGHUP'
return signals[signo]
def _set_signals_handler(handler):
signal.signal(signal.SIGTERM, handler)
signal.signal(signal.SIGINT, handler)
if _sighup_supported():
signal.signal(signal.SIGHUP, handler)
class Launcher(object):
"""Launch one or more services and wait for them to complete."""
def __init__(self):
"""Initialize the service launcher.
:returns: None
"""
self.services = Services()
self.backdoor_port = eventlet_backdoor.initialize_if_enabled()
def launch_service(self, service):
"""Load and start the given service.
:param service: The service you would like to start.
:returns: None
"""
service.backdoor_port = self.backdoor_port
self.services.add(service)
def stop(self):
"""Stop all services which are currently running.
:returns: None
"""
self.services.stop()
def wait(self):
"""Waits until all services have been stopped, and then returns.
:returns: None
"""
self.services.wait()
def restart(self):
"""Reload config files and restart service.
:returns: None
"""
cfg.CONF.reload_config_files()
self.services.restart()
class SignalExit(SystemExit):
def __init__(self, signo, exccode=1):
super(SignalExit, self).__init__(exccode)
self.signo = signo
class ServiceLauncher(Launcher):
def _handle_signal(self, signo, frame):
# Allow the process to be killed again and die from natural causes
_set_signals_handler(signal.SIG_DFL)
raise SignalExit(signo)
def handle_signal(self):
_set_signals_handler(self._handle_signal)
def _wait_for_exit_or_signal(self, ready_callback=None):
status = None
signo = 0
LOG.debug(_('Full set of CONF:'))
CONF.log_opt_values(LOG, std_logging.DEBUG)
try:
if ready_callback:
ready_callback()
super(ServiceLauncher, self).wait()
except SignalExit as exc:
signame = _signo_to_signame(exc.signo)
LOG.info(_('Caught %s, exiting'), signame)
status = exc.code
signo = exc.signo
except SystemExit as exc:
status = exc.code
finally:
self.stop()
if rpc:
try:
rpc.cleanup()
except Exception:
# We're shutting down, so it doesn't matter at this point.
LOG.exception(_('Exception during rpc cleanup.'))
return status, signo
def wait(self, ready_callback=None):
while True:
self.handle_signal()
status, signo = self._wait_for_exit_or_signal(ready_callback)
if not _is_sighup_and_daemon(signo):
return status
self.restart()
class ServiceWrapper(object):
def __init__(self, service, workers):
self.service = service
self.workers = workers
self.children = set()
self.forktimes = []
class ProcessLauncher(object):
def __init__(self):
self.children = {}
self.sigcaught = None
self.running = True
rfd, self.writepipe = os.pipe()
self.readpipe = eventlet.greenio.GreenPipe(rfd, 'r')
self.handle_signal()
def handle_signal(self):
_set_signals_handler(self._handle_signal)
def _handle_signal(self, signo, frame):
self.sigcaught = signo
self.running = False
# Allow the process to be killed again and die from natural causes
_set_signals_handler(signal.SIG_DFL)
def _pipe_watcher(self):
# This will block until the write end is closed when the parent
# dies unexpectedly
self.readpipe.read()
LOG.info(_('Parent process has died unexpectedly, exiting'))
sys.exit(1)
def _child_process_handle_signal(self):
# Setup child signal handlers differently
def _sigterm(*args):
signal.signal(signal.SIGTERM, signal.SIG_DFL)
raise SignalExit(signal.SIGTERM)
def _sighup(*args):
signal.signal(signal.SIGHUP, signal.SIG_DFL)
raise SignalExit(signal.SIGHUP)
signal.signal(signal.SIGTERM, _sigterm)
if _sighup_supported():
signal.signal(signal.SIGHUP, _sighup)
# Block SIGINT and let the parent send us a SIGTERM
signal.signal(signal.SIGINT, signal.SIG_IGN)
def _child_wait_for_exit_or_signal(self, launcher):
status = 0
signo = 0
# NOTE(johannes): All exceptions are caught to ensure this
# doesn't fallback into the loop spawning children. It would
# be bad for a child to spawn more children.
try:
launcher.wait()
except SignalExit as exc:
signame = _signo_to_signame(exc.signo)
LOG.info(_('Caught %s, exiting'), signame)
status = exc.code
signo = exc.signo
except SystemExit as exc:
status = exc.code
except BaseException:
LOG.exception(_('Unhandled exception'))
status = 2
finally:
launcher.stop()
return status, signo
def _child_process(self, service):
self._child_process_handle_signal()
# Reopen the eventlet hub to make sure we don't share an epoll
# fd with parent and/or siblings, which would be bad
eventlet.hubs.use_hub()
# Close write to ensure only parent has it open
os.close(self.writepipe)
# Create greenthread to watch for parent to close pipe
eventlet.spawn_n(self._pipe_watcher)
# Reseed random number generator
random.seed()
launcher = Launcher()
launcher.launch_service(service)
return launcher
def _start_child(self, wrap):
if len(wrap.forktimes) > wrap.workers:
# Limit ourselves to one process a second (over the period of
# number of workers * 1 second). This will allow workers to
# start up quickly but ensure we don't fork off children that
# die instantly too quickly.
if time.time() - wrap.forktimes[0] < wrap.workers:
LOG.info(_('Forking too fast, sleeping'))
time.sleep(1)
wrap.forktimes.pop(0)
wrap.forktimes.append(time.time())
pid = os.fork()
if pid == 0:
launcher = self._child_process(wrap.service)
while True:
self._child_process_handle_signal()
status, signo = self._child_wait_for_exit_or_signal(launcher)
if not _is_sighup_and_daemon(signo):
break
launcher.restart()
os._exit(status)
LOG.info(_('Started child %d'), pid)
wrap.children.add(pid)
self.children[pid] = wrap
return pid
def launch_service(self, service, workers=1):
wrap = ServiceWrapper(service, workers)
LOG.info(_('Starting %d workers'), wrap.workers)
while self.running and len(wrap.children) < wrap.workers:
self._start_child(wrap)
def _wait_child(self):
try:
# Don't block if no child processes have exited
pid, status = os.waitpid(0, os.WNOHANG)
if not pid:
return None
except OSError as exc:
if exc.errno not in (errno.EINTR, errno.ECHILD):
raise
return None
if os.WIFSIGNALED(status):
sig = os.WTERMSIG(status)
LOG.info(_('Child %(pid)d killed by signal %(sig)d'),
dict(pid=pid, sig=sig))
else:
code = os.WEXITSTATUS(status)
LOG.info(_('Child %(pid)s exited with status %(code)d'),
dict(pid=pid, code=code))
if pid not in self.children:
LOG.warning(_('pid %d not in child list'), pid)
return None
wrap = self.children.pop(pid)
wrap.children.remove(pid)
return wrap
def _respawn_children(self):
while self.running:
wrap = self._wait_child()
if not wrap:
# Yield to other threads if no children have exited
# Sleep for a short time to avoid excessive CPU usage
# (see bug #1095346)
eventlet.greenthread.sleep(.01)
continue
while self.running and len(wrap.children) < wrap.workers:
self._start_child(wrap)
def wait(self):
"""Loop waiting on children to die and respawning as necessary."""
LOG.debug(_('Full set of CONF:'))
CONF.log_opt_values(LOG, std_logging.DEBUG)
while True:
self.handle_signal()
self._respawn_children()
if self.sigcaught:
signame = _signo_to_signame(self.sigcaught)
LOG.info(_('Caught %s, stopping children'), signame)
if not _is_sighup_and_daemon(self.sigcaught):
break
for pid in self.children:
os.kill(pid, signal.SIGHUP)
self.running = True
self.sigcaught = None
for pid in self.children:
try:
os.kill(pid, signal.SIGTERM)
except OSError as exc:
if exc.errno != errno.ESRCH:
raise
# Wait for children to die
if self.children:
LOG.info(_('Waiting on %d children to exit'), len(self.children))
while self.children:
self._wait_child()
class Service(object):
"""Service object for binaries running on hosts."""
def __init__(self, threads=1000):
self.tg = threadgroup.ThreadGroup(threads)
# signal that the service is done shutting itself down:
self._done = event.Event()
def reset(self):
# NOTE(Fengqian): docs for Event.reset() recommend against using it
self._done = event.Event()
def start(self):
pass
def stop(self):
self.tg.stop()
self.tg.wait()
# Signal that service cleanup is done:
if not self._done.ready():
self._done.send()
def wait(self):
self._done.wait()
class Services(object):
def __init__(self):
self.services = []
self.tg = threadgroup.ThreadGroup()
self.done = event.Event()
def add(self, service):
self.services.append(service)
self.tg.add_thread(self.run_service, service, self.done)
def stop(self):
# wait for graceful shutdown of services:
for service in self.services:
service.stop()
service.wait()
# Each service has performed cleanup, now signal that the run_service
# wrapper threads can now die:
if not self.done.ready():
self.done.send()
# reap threads:
self.tg.stop()
def wait(self):
self.tg.wait()
def restart(self):
self.stop()
self.done = event.Event()
for restart_service in self.services:
restart_service.reset()
self.tg.add_thread(self.run_service, restart_service, self.done)
@staticmethod
def run_service(service, done):
"""Service start wrapper.
:param service: service to run
:param done: event to wait on until a shutdown is triggered
:returns: None
"""
service.start()
done.wait()
def launch(service, workers=None):
if workers:
launcher = ProcessLauncher()
launcher.launch_service(service, workers=workers)
else:
launcher = ServiceLauncher()
launcher.launch_service(service)
return launcher
| |
""" Cisco_IOS_XR_ncs1k_mxp_cfg
This module contains a collection of YANG definitions
for Cisco IOS\-XR ncs1k\-mxp package configuration.
This module contains definitions
for the following management objects\:
hardware\-module\: NCS1k HW module config
Copyright (c) 2013\-2015 by Cisco Systems, Inc.
All rights reserved.
"""
import re
import collections
from enum import Enum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk.errors import YPYError, YPYModelError
class ClientDataRateEnum(Enum):
"""
ClientDataRateEnum
Client data rate
.. data:: TEN_GIG = 1
TenGig
.. data:: FORTY_GIG = 2
FortyGig
.. data:: HUNDRED_GIG = 3
HundredGig
"""
TEN_GIG = 1
FORTY_GIG = 2
HUNDRED_GIG = 3
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ncs1k_mxp_cfg as meta
return meta._meta_table['ClientDataRateEnum']
class FecEnum(Enum):
"""
FecEnum
Fec
.. data:: SD7 = 1
SoftDecision7
.. data:: SD20 = 2
SoftDecision20
"""
SD7 = 1
SD20 = 2
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ncs1k_mxp_cfg as meta
return meta._meta_table['FecEnum']
class TrunkDataRateEnum(Enum):
"""
TrunkDataRateEnum
Trunk data rate
.. data:: HUNDRED_GIG = 2
HundredGig
.. data:: TWO_HUNDRED_GIG = 3
TwoHundredGig
.. data:: TWO_HUNDRED_FIFTY_GIG = 4
TwoHundredFiftyGig
"""
HUNDRED_GIG = 2
TWO_HUNDRED_GIG = 3
TWO_HUNDRED_FIFTY_GIG = 4
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ncs1k_mxp_cfg as meta
return meta._meta_table['TrunkDataRateEnum']
class HardwareModule(object):
"""
NCS1k HW module config
.. attribute:: node
Node
**type**\: list of :py:class:`Node <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ncs1k_mxp_cfg.HardwareModule.Node>`
"""
_prefix = 'ncs1k-mxp-cfg'
_revision = '2015-11-09'
def __init__(self):
self.node = YList()
self.node.parent = self
self.node.name = 'node'
class Node(object):
"""
Node
.. attribute:: location <key>
Fully qualified line card specification
**type**\: str
**pattern:** [\\w\\\-\\.\:,\_@#%$\\+=\\\|;]+
.. attribute:: values
Slice to be Provisioned
**type**\: :py:class:`Values <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ncs1k_mxp_cfg.HardwareModule.Node.Values>`
"""
_prefix = 'ncs1k-mxp-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.location = None
self.values = HardwareModule.Node.Values()
self.values.parent = self
class Values(object):
"""
Slice to be Provisioned
.. attribute:: value
Data rates & FEC
**type**\: list of :py:class:`Value <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ncs1k_mxp_cfg.HardwareModule.Node.Values.Value>`
"""
_prefix = 'ncs1k-mxp-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.value = YList()
self.value.parent = self
self.value.name = 'value'
class Value(object):
"""
Data rates & FEC
.. attribute:: slice_id <key>
Set Slice
**type**\: str
**pattern:** [\\w\\\-\\.\:,\_@#%$\\+=\\\|;]+
.. attribute:: client_rate
Client Rate
**type**\: :py:class:`ClientDataRateEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ncs1k_mxp_cfg.ClientDataRateEnum>`
.. attribute:: fec
FEC
**type**\: :py:class:`FecEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ncs1k_mxp_cfg.FecEnum>`
.. attribute:: trunk_rate
TrunkRate
**type**\: :py:class:`TrunkDataRateEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ncs1k_mxp_cfg.TrunkDataRateEnum>`
"""
_prefix = 'ncs1k-mxp-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.slice_id = None
self.client_rate = None
self.fec = None
self.trunk_rate = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.slice_id is None:
raise YPYModelError('Key property slice_id is None')
return self.parent._common_path +'/Cisco-IOS-XR-ncs1k-mxp-cfg:value[Cisco-IOS-XR-ncs1k-mxp-cfg:slice-id = ' + str(self.slice_id) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.slice_id is not None:
return True
if self.client_rate is not None:
return True
if self.fec is not None:
return True
if self.trunk_rate is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ncs1k_mxp_cfg as meta
return meta._meta_table['HardwareModule.Node.Values.Value']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-ncs1k-mxp-cfg:values'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.value is not None:
for child_ref in self.value:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ncs1k_mxp_cfg as meta
return meta._meta_table['HardwareModule.Node.Values']['meta_info']
@property
def _common_path(self):
if self.location is None:
raise YPYModelError('Key property location is None')
return '/Cisco-IOS-XR-ncs1k-mxp-cfg:hardware-module/Cisco-IOS-XR-ncs1k-mxp-cfg:node[Cisco-IOS-XR-ncs1k-mxp-cfg:location = ' + str(self.location) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.location is not None:
return True
if self.values is not None and self.values._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ncs1k_mxp_cfg as meta
return meta._meta_table['HardwareModule.Node']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-ncs1k-mxp-cfg:hardware-module'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.node is not None:
for child_ref in self.node:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ncs1k_mxp_cfg as meta
return meta._meta_table['HardwareModule']['meta_info']
| |
''' @Summary: API endpoint for controlling functionality of deployed agents. '''
from flask import jsonify, request
from flask_login import login_required
from flask_restful import Resource, reqparse
from datetime import datetime, timezone, timedelta
from api import db
from api.sql.models import * #import all of the models from models.py
from api.util.parse_json import json_decode, json_encode #for json request parsing
## for agent-ops ##
class piController(Resource):
def get(self, _mac_address_=None):
URL = request.url
# time sync
if URL.find("api/picontroller/time") > 0 and _mac_address_ == None:
try:
dtz = timezone(-timedelta(hours=4))
dtUTC = datetime.now(dtz)
dtfUTC = datetime.strftime(dtUTC, '%Y-%m-%d %H:%M:%S')
return jsonify(
status = 200,
datetime = dtfUTC
)
except Exception as e:
return {'status' : 400}
# get agent settings
elif URL.find("api/picontroller") > 0 and _mac_address_ != None:
try:
x = agent_data.query.filter_by(mac_address=_mac_address_).first()
_mode = x.mode
_cmd = x.cmd
_time_setting = x.time_setting
if x != None:
return jsonify(
status = 200,
mode = _mode,
cmd = _cmd,
time_setting = _time_setting
)
else:
return {'status' : 400}
except Exception as e:
return {'status' : 400}
else:
return {'status' : 404}
@login_required
def post(self, _mac_address_): # update ip / status
try:
parser = reqparse.RequestParser()
parser.add_argument('ip_address', type=str, location='json')
parser.add_argument('active', type=TINYINT, location='json')
args = parser.parse_args()
_ip_address = args['ip_address']
active = args['active']
curr_session = db.session # open database session
try:
x = agent_data.query.filter_by(mac_address=_mac_address_).first() #fetch the agent to be updated
x.ip_address = _ip_address # update the row
x.active = active
curr_session.commit() #commit changes
return {
'status' : 200,
'message' : 'Agent update successful'
}
except:
curr_session.rollback()
curr_session.flush() # for resetting non-commited .add()
return {
'status' : 400,
'message' : 'Agent update failure'
}
except Exception as e:
return {'status' : 400}
class manageAgent(Resource):
@login_required
def get(self, _mac_address_):
try:
x = agent_data.query.filter_by(mac_address=_mac_address_).first()
_agent_id = x.agent_id
_mac_address = x.mac_address
_ip_address = x.ip_address
_active = x.active
_company_id = x.company_id
_site = x.site
_mode = x.mode
_cmd = x.cmd
_time_setting = x.time_setting
if x != None:
return jsonify(
status = 200,
message = 'Agent search success!',
agent_id = _agent_id,
mac_address = _mac_address,
ip_address = _ip_address,
active = _active,
company_id = _company_id,
site = _site,
mode = _mode,
cmd = _cmd,
time_setting = _time_setting
)
else:
return {
'status' : 400,
'message' : 'Agent search failure'
}
except Exception as e:
return {'status' : 400}
@login_required
def put(self, _mac_address_):
try:
parser = reqparse.RequestParser()
# conditionally replace agent data if arg exists #
parser.add_argument('agent_id', type=int, help='Agent_id for agent', location='json')
parser.add_argument('ip_address', type=str, help='IP Address for agent', location='json')
parser.add_argument('active', type=TINYINT, help='Is agent active or not', location='json')
parser.add_argument('company_id', type=int, help='Company ID associated with agent', location='json')
parser.add_argument('site', type=str, help='Site agent is deployed at', location='json')
parser.add_argument('mode', type=str, help='Mode agent is operating in', location='json')
parser.add_argument('cmd', type=str, help='Current cmd selection for agent', location='json')
parser.add_argument('time_setting', type=json_encode, help='Time settings for the agent', location='json')
args = parser.parse_args()#strict=True, require=True
curr_session = db.session # open database session
try:
x = agent_data.query.filter_by(mac_address=_mac_address_).first() #fetch the agent to be updated
if args['agent_id'] != None:
x.agent_id = args['agent_id']
if args['ip_address'] != None:
x.ip_address = args['ip_address']
if args['active'] != None:
x.active = args['active']
if args['company_id'] != None:
x.company_id = args['company_id']
if args['site'] != None:
x.site = args['site']
if args['mode'] != None:
x.mode = args['mode']
if args['cmd'] != None:
x.cmd = args['cmd']
if args['time_setting'] != None:
x.time_setting = json_decode(args['time_setting'])
curr_session.commit() #commit changes
return {
'status' : 200,
'message' : 'Agent update successful'
}
except Exception as ex:
curr_session.rollback()
curr_session.flush() # for resetting non-commited .add()
return {
'status' : 400,
'message' : 'Agent update failure'
}
except Exception as e:
return {'status' : 400}
@login_required
def delete(self, _mac_address_):
try:
curr_session = db.session #open database session
x = agent_data.query.filter_by(mac_address=_mac_address_).first()
try:
db.session.delete(x)
db.session.commit()
return {
'status' : 200,
'message' : 'Agent delete successful'
}
except:
curr_session.rollback()
curr_session.flush() # for resetting non-commited .add()
return {
'status' : 400,
'message' : 'Agent delete failure'
}
except Exception as e:
return {'status' : 400}
class manageAgentList(Resource):
@login_required
def get(self):
try:
x = agent_data.query.all()
if x != None:
results = []
for agent in x:
results.append( {
'agent_id' : agent.agent_id,
'mac_address' : agent.mac_address,
'ip_address' : agent.ip_address,
'active' : agent.active,
'company_id' : agent.company_id,
'site' : agent.site,
'mode' : agent.mode,
'cmd' : agent.cmd,
'time_setting' : agent.time_setting #json_encode() / jsonify()
} )
return jsonify(
status = 200,
message = 'Agent search success!',
agent_list = results
)
else:
return {
'status' : 400,
'message' : 'Agent search failure'
}
except Exception as e:
return {'status' : 400}
@login_required
def post(self):
try:
parser = reqparse.RequestParser()
parser.add_argument('agent_id', type=int, help='Agent_id for agent', location='json')
parser.add_argument('mac_address', type=str, help='Mac Address for agent', location='json')
parser.add_argument('ip_address', type=str, help='IP Address for agent', location='json')
parser.add_argument('active', type=TINYINT, help='Is agent active or not', location='json')
parser.add_argument('company_id', type=int, help='Company ID associated with agent', location='json')
parser.add_argument('site', type=str, help='Site agent is deployed at', location='json')
parser.add_argument('mode', type=str, help='Mode agent is operating in', location='json')
parser.add_argument('cmd', type=str, help='Current cmd selection for agent', location='json')
parser.add_argument('time_setting', type=json_encode, help='Time settings for the agent', location='json')
args = parser.parse_args()#strict=True
_agent_id = args['agent_id']
_mac_address = args['mac_address']
_ip_address = args['ip_address']
_active = args['active']
_company_id = args['company_id']
_site = args['site']
_mode = args['mode']
_cmd = args['cmd']
_time_setting = args['time_setting']
query = agent_data(agent_id=_agent_id, mac_address=_mac_address, ip_address=_ip_address,
active=_active, company_id=_company_id, site=_site,
mode=_mode, cmd=_cmd, time_setting=json_decode(_time_setting))
curr_session = db.session #open database session
try:
curr_session.add(query) #add prepared statement to opened session
curr_session.commit() #commit changes
return {
'status' : 200,
'message' : 'Agent creation successful'
}
except:
curr_session.rollback()
curr_session.flush() # for resetting non-commited .add()
return {
'status' : 400,
'message' : 'Agent creation failure'
}
except Exception as e:
return {'status' : 400}
| |
#!/usr/bin/env python
"""usage.py: usage of symbols, including substitutions on pairwise alphabets.
Revision History
Created 10/12/04 by Rob Knight.
9/14/05 Rob Knight: Changed Usage constructor to allow Alphabet on the instance
level, and to eliminate the precalculated flag which was not used. Added
entropy method.
7/20/07 Mike Robeson: Under PairMatrix.__init__ changed 'if data:' to
'if data != None:
8/3/07 Daniel McDonald: Code now relies on numpy and cogent with the exception
of the one scipy function that still needs to be removed
"""
from cogent.maths.scipy_optimize import fmin, brent
from cogent.util.array import scale_trace, norm_diff, \
has_neg_off_diags, sum_neg_off_diags, with_diag, without_diag
from cogent.core.alphabet import get_array_type
from cogent.core.usage import RnaBases, DnaBases, DnaPairs, RnaPairs, Codons
from cogent.core.sequence import ModelSequence, ModelDnaSequence, \
ModelRnaSequence
from operator import add, sub, mul, div
from cogent.maths.matrix_logarithm import logm
from cogent.maths.stats.util import FreqsI
from cogent.maths.matrix_exponentiation import FastExponentiator as expm
from numpy import zeros, array, max, diag, log, nonzero, product, cumsum, \
searchsorted, exp, diagonal, choose, less, repeat, average,\
logical_and, logical_or, logical_not, transpose, compress,\
ravel, concatenate, equal, log, dot, identity, \
newaxis as NewAxis, sum, take, reshape, any, all, asarray
from numpy.linalg import eig
from numpy.linalg import inv as inverse
from numpy.random import random as randarray
ARRAY_TYPE = type(array([0]))
__author__ = "Rob Knight"
__copyright__ = "Copyright 2007-2012, The Cogent Project"
__credits__ = ["Rob Knight", "Mike Robeson", "Daniel McDonald"]
__license__ = "GPL"
__version__ = "1.5.3"
__maintainer__ = "Rob Knight"
__email__ = "rob@spot.colorado.edu"
__status__ = "Production"
class Usage(FreqsI):
"""Stores usage on a particular alphabet. Abstract class.
Note: Usage is abstract because most subclasses (e.g. CodonUsage,
AminoAcidUsage) have specific methods that depend on their alphabets.
Allowing generic Usage objects is disallowed to enforce use of the
appropriate Usage object for specific situations.
Supports most of the Cogent FreqsI interface.
"""
Alphabet = None # concrete subclasses have specific alphabets
def __init__(self, data=None, Alphabet=None):
"""Returns a new Usage object from array of symbol freqs.
Will interpret many different kinds of data, including precalculated
frequencies, arrays of symbols, and cogent.core.sequence.ModelSequence
objects.
Warning: it guesses whether you passed in frequencies or symbols based
on the length of the array, so for example Usage(DnaSequence('ATCG'))
will _not_ give the result you expect. If you know the data type,
use the alternative class method constructors.
"""
if Alphabet is not None:
self.Alphabet = Alphabet
if not self.Alphabet:
raise TypeError, "Usage subclasses must define alphabet."""
if isinstance(data, Usage):
self._data = data._data
else:
self._data = zeros(len(self), 'float64')
if any(data):
self += data
def __getitem__(self, i):
"""Returns item based on alphabet."""
return self._data[self.Alphabet.index(i)]
def __setitem__(self, key, val):
"""Sets item based on alphabet."""
self._data[self.Alphabet.index(key)] = val
def __str__(self):
"""Prints as though it were a tuple of key,value pairs."""
return str(self.items())
def __repr__(self):
"""String representation of self."""
return ''.join([self.__class__.__name__, '(', repr(self._data), ')'])
def __iter__(self):
"""Iterates over keys, like a dict."""
return iter(self.Alphabet)
def __eq__(self, other):
"""Tests whether two Usage objects have the same data."""
if hasattr(other, '_data'):
return all(self._data == other._data)
#if we get here, didn't compare equal
try:
return all(self._data == self.__class__(other)._data)
except:
return False
def __ne__(self, other):
"""Returns True if self and other are not equal."""
if hasattr(other, '_data'):
return any(self._data != other._data)
#if we get here, didn't compare equal
try:
return any(self._data != self.__class__(other)._data)
except:
return True
def __iadd__(self, other):
"""Adds data to self in-place."""
#check if other is nonzero; skip if it isn't
try:
if not other:
return self
except ValueError:
if not any(other):
return self
#first, check if it's a Usage object
if isinstance(other, Usage):
self._data += other._data
return self
#then, check if it's one of our ModelSequence objects
ac = self.Alphabet.counts
if isinstance(other, ModelSequence):
self._data += ac(other._data)
return self
#if it's the same length as self, try to add it as frequencies
try:
if len(other) == len(self):
self._data += other
return self
except TypeError:
pass
#then try to convert it using the alphabet
#WARNING: this will silently ignore unknown keys!
#since we know other wasn't nonzero, we won't accept
#the result if we can't convert anything.
try:
other_freqs = ac(other)
#check if we actually converted anything...
if any(other_freqs):
self._data += other_freqs
return self
except (IndexError, KeyError, TypeError):
pass
#then use the generic conversion function
f = self._find_conversion_function(other)
if f:
f(other, op=add)
return self
else:
raise TypeError, "Could not convert this to freqs: %s" % other
def __isub__(self, other):
"""Subtracts data from self in-place."""
#check if other is nonzero; skip if it isn't
try:
if not other:
return self
except ValueError:
if not any(other):
return self
#first, check if it's a Usage object
if isinstance(other, Usage):
self._data -= other._data
return self
#then, check if it's one of our ModelSequence objects
ac = self.Alphabet.counts
if isinstance(other, ModelSequence):
self._data -= ac(other._data)
return self
#if it's the same length as self, try to add it as frequencies
try:
if len(other) == len(self):
self._data -= other
return self
except TypeError:
pass
#then try to convert it using the alphabet
#WARNING: this will silently ignore unknown keys!
#since we know other wasn't nonzero, we won't accept
#the result if we can't convert anything.
try:
other_freqs = ac(other)
#check if we actually converted anything...
if other_freqs.any():
self._data -= other_freqs
return self
except (IndexError, KeyError, TypeError):
pass
#then use the generic conversion function
f = self._find_conversion_function(other)
if f:
f(other, op=sub)
return self
else:
raise TypeError, "Could not convert this to freqs: %s" % other
def __mul__(self, other):
"""Multiplies self by other (assumed scalar)."""
return self.__class__(self._data * other)
def __imul__(self, other):
"""Multiplies self by other in-place (assumed scalar)."""
self._data *= other
def __div__(self, other):
"""Divides self by other (assumed scalar). Always true division."""
return self.__class__(self._data / (other))
def __idiv__(self, other):
"""Divides self by other (assumed scalar) inplace. Maybe int division."""
self._data /= other
def scale_sum(self, sum_=1.0):
"""Returns copy of self scaled to specified sum."""
return self.__class__(self._data * (sum_/sum(self._data)))
def scale_max(self, max_=1.0):
"""Returns copy of self scaled to specified maximum (default 1)."""
return self.__class__(self._data * (max_/max(self._data)))
def probs(self):
"""Returns copy of self scaled so that the sum is 1."""
return self.__class__(self._data / (sum(self._data)))
def randomIndices(self, length, random_vector=None):
"""Produces random indices according to symbol freqs."""
freqs = cumsum(self._data/sum(self._data))[:-1]
if random_vector is None:
random_vector=randarray(length)
return searchsorted(freqs, random_vector)
def fromSeqData(cls, seq, Alphabet=None):
"""Returns new Usage object from Sequence object."""
return cls.fromArray(seq._data, Alphabet=Alphabet)
def fromArray(cls, a, Alphabet=None):
"""Returns new Usage object from array."""
return cls(cls.Alphabet.counts(a), Alphabet=Alphabet)
fromSeqData = classmethod(fromSeqData)
fromArray = classmethod(fromArray)
#following code is to support FreqsI
def get(self, key, default):
"""Returns self._data[self.Alphabet.index(key) if present, or default."""
try:
return self._data[self.Alphabet.index(key)]
except (KeyError, IndexError, TypeError):
return default
def values(self):
"""Returns list of keys in self (i.e. the alphabet)."""
return list(self._data)
def keys(self):
"""Returns list of values in self (i.e. the data)."""
return list(self.Alphabet)
def items(self):
"""Returns list of (key, value) pairs in self."""
return zip(self.Alphabet, self._data)
def isValid(self):
"""Always valid (except for negative numbers), so override."""
return min(self._data) >= 0
def copy(self):
"""Return copy of self with same alphabet, not sharing data."""
return self.__class__(self._data.copy())
def __delitem__(self, key):
"""Can't really delete items, but raise error if in alphabet."""
if key in self.Alphabet:
raise KeyError, "May not delete required key %s" % key
def purge(self):
"""Can't contain anything not in alphabet, so do nothing."""
pass
def normalize(self, total=1.0, purge=True):
"""Converts counts into probabilities, normalized to 1 in-place.
Changes result to Float64. Purge is always treated as True.
"""
if self._data is not None and self._data.any():
self._data = self._data / (total * sum(self._data))
def choice(self, prob):
"""Returns item corresponding to Pr(prob)."""
if prob > 1:
return self.Alphabet[-1]
summed = cumsum(self._data/sum(self._data))
return self.Alphabet[searchsorted(summed, prob)]
def randomSequence(self, n):
"""Returns list of n random choices, with replacement."""
if not self:
raise IndexError, "All frequencies are zero."
return list(choose(self.randomIndices(n), self.Alphabet))
def subset(self, items, keep=True):
"""Sets all frequencies not in items to 0.
If keep is False, sets all frequencies in items to 0.
"""
if keep:
for i in self.Alphabet:
if i not in items:
self[i] = 0
else:
for i in items:
try:
self[i] = 0
except KeyError:
pass
def scale(self, factor=1, offset=0):
"""Linear transform of values in freqs where val= factor*val + offset."""
self._data = factor * self._data + offset
def __len__(self):
"""Returns length of alphabet."""
return len(self.Alphabet)
def setdefault(self, key, default):
"""Returns self[key] or sets self[key] to default."""
if self[key]:
return self[key]
else:
self[key] = default
return default
def __contains__(self, key):
"""Returns True if key in self."""
try:
return key in self.Alphabet
except TypeError:
return False
def __nonzero__(self):
"""Returns True if self is nonzero."""
return bool(sum(self._data) != 0)
def rekey(self, key_map, default=None, constructor=None):
"""Returns new Freqs with keys remapped using key_map.
key_map should be a dict of {old_key:new_key}.
Values are summed across all keys that map to the same new value.
Keys that are not in the key_map are omitted (if default is None),
or set to the default.
constructor defaults to self.__class__. However, if you're doing
something like mapping amino acid frequencies onto charge frequencies,
you probably want to specify the constructor since the result won't
be valid on the alphabet of the current class.
Note that the resulting Freqs object is not required to contain
values for all the possible keys.
"""
if constructor is None:
constructor = self.__class__
result = constructor()
for key, val in self.items():
new_key = key_map.get(key, default)
curr = result.get(new_key, 0)
try:
result[new_key] = curr + val
except KeyError:
pass
return result
def entropy(self, base=2):
"""Returns Shannon entropy of usage: sum of p log p."""
ln_base = log(base)
flat = ravel(self._data)
total = sum(flat)
if not total:
return 0
flat /= total
ok_indices = nonzero(flat)[0]
ok_vals = take(flat, ok_indices, axis=0)
return -sum(ok_vals * log(ok_vals))/ln_base
class DnaUsage(Usage):
"""Stores usage on the DNA alphabet."""
Alphabet = DnaBases
class RnaUsage(Usage):
"""Stores usage on the RNA alphabet."""
Alphabet = RnaBases
class CodonUsage(Usage):
"""Stores usage on the Codon alphabet."""
Alphabet = Codons
class DnaPairUsage(Usage):
"""Stores usage on the DnaPairs alphabet."""
Alphabet = DnaPairs
class RnaPairUsage(Usage):
"""Stores usage on the RnaPairs alphabet."""
Alphabet = RnaPairs
class PairMatrix(object):
"""Base class for Counts, Probs, and Rates matrices. Immutable.
Holds any numeric relationship between pairs of objects on a JointAlphabet.
Note that the two SubEnumerations of the JointAlphabet need not be the same,
although many subclasses of PairMatrix will require that the two
SubEnumerations _are_ the same because their methods assume square matrices.
"""
def __init__(self, data, Alphabet, Name=None):
"""Returns new PairMatrix object containing data.
WARNING: Alphabet must be a JointAlphabet where the two SubEnumerations
are the same.
"""
self.Alphabet = Alphabet
if any(data):
self._data = reshape(array(data, 'd'), Alphabet.Shape)
else:
self._data = zeros(Alphabet.Shape, 'd')
self.Name = Name
def toMatlab(self):
"""Returns Matlab-formatted string representation."""
if self.Name is None:
name = 'm'
else:
name = str(self.Name)
return ''.join([name, '=', '[', \
';\n'.join([' '.join(map(str, r)) for r in self._data]), '];\n'])
def __str__(self):
"""Returns string representation of array held in self."""
return str(self._data)
def __repr__(self):
"""Returns string representation of self."""
return ''.join([self.__class__.__name__, '(', repr(self._data), \
',', repr(self.Alphabet), ',', repr(self.Name), ')'])
def __getitem__(self, args):
"""__getitem__ passes everything to internal array.
WARNING: m[a,b] will work where a and b are symbols in the alphabet,
but m[a][b] will fail. This is because m[a] produces an array object
with the corresponding row, which is then passed b as an index. Because
the array object doesn't have the alphabet, it can't map the index into
a number.
Slicing is not supported.
"""
# First, test whether args are in the JointAlphabet. Will always be tuple.
if isinstance(args, tuple):
try:
return ravel(self._data)[self.Alphabet.index(tuple(args))]
except (KeyError, TypeError):
pass
return self._data[self.Alphabet.SubEnumerations[0].index(args)]
def __len__(self):
"""Returns number of rows."""
return len(self._data)
def empty(cls, Alphabet):
"""Class method: returns empty matrix sized for alphabet."""
return cls(zeros(Alphabet.Shape), Alphabet)
empty = classmethod(empty)
def __eq__(self, other):
"""Tests whether two Usage objects have the same data."""
try:
return all(self._data == other._data)
#return not bool(all(self._data != other._data))
except:
return False
def __ne__(self, other):
"""Returns True if self and other are not equal."""
try:
return any(self._data != other._data)
#return bool(all(self._data != other._data))
except:
return False
def __iter__(self):
"""Iterates over rows in data."""
return iter(self._data)
class Counts(PairMatrix):
"""Holds the data for a matrix of counts. Immutable.
"""
def toProbs(self):
"""Returns copy of self where rows sum to 1."""
return Probs(self._data/ (sum(self._data, 1)[:,NewAxis]), \
self.Alphabet)
def fromPair(cls, first, second, Alphabet, average=True):
"""Class method: returns new Counts from two sequences.
"""
size = len(Alphabet.SubEnumerations[-1])
#if they're ModelSequence objects, use the _data attribute
if hasattr(first, '_data'):
first, second = first._data, second._data
#figure out what size we need the result to go in: note that the
#result is on a pair alphabet, so the data type of the single
#alphabet (that the sequence starts off in) might not work.
data_type = get_array_type(product(map(len, Alphabet.SubEnumerations)))
first = asarray(first, data_type)
second = asarray(second, data_type)
items = first * size + second
counts = reshape(Alphabet.counts(items), Alphabet.Shape)
if average:
return cls((counts + transpose(counts))/2.0, Alphabet)
else:
return cls(counts, Alphabet)
fromPair = classmethod(fromPair)
def _from_triple_small(cls, first, second, outgroup, Alphabet):
"""Class method: returns new Counts for first from three sequences.
Sequence order is first, second, outgroup.
Use this method when the sequences are short and/or the alphabet is
small: relatively memory intensive because it makes an array the size
of the seq x the alphabet for each sequence. Fast on short sequences,
though.
NOTE: requires input to either all be ModelSequence objects, or all not
be ModelSequence objects. Could change this if desirable.
"""
#if they've got data, assume ModelSequence objects. Otherwise, arrays.
if hasattr(first, '_data'):
first, second, outgroup = first._data, second._data, outgroup._data
size = len(Alphabet.SubEnumerations[-1])
a_eq_b = equal(first, second)
a_ne_b = logical_not(a_eq_b)
a_eq_x = equal(first, outgroup)
b_eq_x = equal(second, outgroup)
#figure out what size we need the result to go in: note that the
#result is on a pair alphabet, so the data type of the single
#alphabet (that the sequence starts off in) might not work.
data_type = get_array_type(product(map(len, Alphabet.SubEnumerations)))
first = asarray(first, data_type)
second = asarray(second, data_type)
b_to_a = second*size + first
a_to_a = first*size + first
b_to_a_items = compress(logical_and(b_eq_x, a_ne_b), b_to_a)
a_to_a_items = compress(logical_or(a_eq_b, a_eq_x), a_to_a)
items = concatenate((b_to_a_items, a_to_a_items))
counts = reshape(Alphabet.counts(items), Alphabet.Shape)
return cls(counts, Alphabet)
def _from_triple_large(cls, first, second, outgroup, Alphabet):
"""Same as _from_triple except copes with very long sequences.
Specifically, allocates an array for the frequencies of each type,
walks through the triple one base at a time, and updates the
appropriate cell. Faster when alphabet and/or sequences are large;
also avoids memory issues because it doesn't allocate the seq x
alphabet array.
NOTE: requires input to either all be ModelSequence objects, or all not
be ModelSequence objects. Could change this if desirable.
WARNING: uses float, not int, as datatype in return value.
"""
#figure out if we already have the data in terms of alphabet indices.
#if not, we need to convert it.
if hasattr(first, '_data'):
first, second, outgroup = first._data, second._data, outgroup._data
else:
if hasattr(Alphabet, 'toIndices'):
converter = Alphabet.toIndices
else:
converter = Alphabet.fromSequenceToArray
# convert to alphabet indices
first, second, outgroup = map(asarray, map(converter,
[first, second, outgroup]))
# only include positions where all three not different
valid_posn = logical_not(logical_and(logical_and(first != outgroup,
second != outgroup),
first != second))
valid_pos = [index for index, val in enumerate(valid_posn) if val]
first = first.take(valid_pos)
second = second.take(valid_pos)
outgroup = outgroup.take(valid_pos)
out_diffs = logical_and(first == second, first != outgroup)
counts = zeros((len(Alphabet.SubEnumerations[0]), \
len(Alphabet.SubEnumerations[0])))
for x, y, out_diff in zip(outgroup, first,
out_diffs):
if out_diff:
counts[y,y] += 1
else:
counts[x,y] += 1
return cls(counts, Alphabet)
def fromTriple(cls, first, second, outgroup, Alphabet, threshold=1e6):
"""Reads counts from triple of sequences, method chosen by data size."""
if len(first) * len(Alphabet) > threshold:
return cls._from_triple_large(first, second, outgroup, Alphabet)
else:
return cls._from_triple_small(first, second, outgroup, Alphabet)
fromTriple = classmethod(fromTriple)
_from_triple_small = classmethod(_from_triple_small)
_from_triple_large = classmethod(_from_triple_large)
class Probs(PairMatrix):
"""Holds the data for a probability matrix. Immutable."""
def isValid(self):
"""Returns True if all values positive and each row sums to 1."""
for row in self:
if sum(row) != 1.0 or min(row) < 0.0:
return False
return True
def makeModel(self, seq):
"""Returns substitution model for seq based on self's rows."""
return take(self._data, seq, axis=0)
def mutate(self, seq, random_vector=None):
"""Returns mutated version of seq, according to self.
seq should behave like a Numeric array.
random_vector should be vector of 0 and 1 of same length as sequence,
if supplied.
Result is always an array, not coerced into seq's class.
"""
sums = cumsum(self._data, 1)
model = take(sums, seq, axis=0)
if random_vector is None:
random_vector = randarray(seq.shape)
return sum(transpose(model)[:-1] < random_vector, axis=0)
#transpose needed to align frames
def toCounts(self, num):
"""Returns count matrix with approximately num counts.
Rounding error may prevent counts from summing exactly to num.
"""
num_rows = len(self)
return Counts(self._data * (num/num_rows), self.Alphabet)
def toRates(self, normalize=False):
"""Returns rate matrix. Does not normalize by default."""
return Rates(logm(self._data), self.Alphabet, self.Name, normalize)
def random(cls, Alphabet, diags=None):
"""Makes random P-matrix with specified diag elements and size.
diags can be a single float, or vector of values with same number
of chars as individual alphabet (e.g. list of 4 elements will act
as elements for the 4 bases).
"""
shape = Alphabet.Shape
if diags is None:
result = randarray(shape)
return cls(result/sum(result, 1)[:,NewAxis], Alphabet)
else:
single_size = shape[0]
diags = array(diags, 'd')
#handle scalar case
if not diags.shape:
diags = reshape(diags, (1,))
if len(diags) == 1:
diags = repeat(diags, single_size)
temp = randarray((single_size, single_size-1))
temp *= ((1.0-diags)/sum(temp, 1))[:,NewAxis]
result = diag(diags)
for r, row in enumerate(temp):
result[r][:r] = row[:r]
result[r][r+1:] = row[r:]
return cls(result, Alphabet)
random = classmethod(random)
class Rates(PairMatrix):
"""Holds the data for a rate matrix. Immutable."""
def __init__(self, data, Alphabet, name=None, normalize=False):
"""Returns new Rates matrix, normalizing trace to -1 if necessary."""
data = array(data)
#check for complex input array
if data.dtype == 'complex128':
self.imag = data.imag
data = data.real
super(Rates, self).__init__(data, Alphabet)
if normalize:
self._normalize_inplace()
def isComplex(self):
"""Returns True if self has a complex component."""
return hasattr(self, 'imag')
def isSignificantlyComplex(self, threshold=0.1):
"""Returns True if complex component is above threshold."""
if hasattr(self, 'imag'):
return sum(ravel(self.imag)) > threshold
else:
return False
def isValid(self, threshold=1e-7):
"""Rate matrix is valid if rows sum to 0 and no negative off-diags.
threshold gives maximum error allowed in row sums.
"""
if max(abs(sum(self._data, -1)) > threshold):
return False
return not has_neg_off_diags(self._data)
def _normalize_inplace(self):
"""Normalizes trace to -1, in-place.
Should only call during __init__, since it mutates the object.
WARNING: Only normalizes real component.
"""
scale_trace(self._data)
def normalize(self):
"""Returns normalized copy of self where trace is -1.
WARNING: Only normalizes real component.
"""
return Rates(self._data, self.Alphabet, normalize=True)
def _get_diagonalized(self):
"""Gets diagonalization of self as u, v, w; caches values."""
if not hasattr(self, '_diag_cache'):
error_tolerance = 1e-4 #amount of error allowed in product
eigenvalues, eigenvectors = eig(self._data)
u = transpose(eigenvectors)
v = eigenvalues
w = inverse(u)
#check that the diagonalization actually worked by multiplying
#the results back together
result = dot(dot(u,v),w)
if abs(sum(ravel(result))) > error_tolerance:
raise ValueError, "Diagonalization failed with erroneous result."
self._diag_cache = u, v, w
return self._diag_cache
_diagonalized = property(_get_diagonalized)
def toProbs(self, time=1.0):
"""Returns probs at exp(self*scale_factor).
The way this works is by diagonalizing the rate matrix so that u is
the matrix with eigenvectors as columns, v is a vector of eigenvalues,
and w is the inverse of u. u * diag(v) * w reconstructs the original
rate matrix. u * diag(exp(v*t)) * w exponentiates the rate matrix to
time t.
This is more expensive than a single exponentiation if the rate matrix
is going to be sxponentiated only once, but faster if it is to be
exponentiated to many different time points.
Note that the diagonalization is not the same as the svd.
If the diagonalization fails, we use the naive version of just
multiplying the rate matrix by the time and exponentiating.
"""
try:
u, v, w = self._diagonalized
#scale v to the right time by exp(v_0*t)
v = diag(exp(v * time))
return Probs(dot(dot(u,v), w), self.Alphabet)
except:
return Probs(expm(self._data)(time), self.Alphabet)
def _timeForSimilarity_naive(self, similarity, freqs=None):
"""Returns time exponent so that exp(q*time) diverges to right distance.
Takes symbol freqs into account if specified; otherwise assumes equal.
freqs: vector of frequencies, applied to each row successively.
WARNING: Factor of 5 slower than timeForSimilarity. Included for
testing that results are identical.
"""
q = self._data
if freqs is None:
def similarity_f(t):
return abs(average(diagonal(expm(q)(t)))-similarity)
else:
def similarity_f(t):
return abs(sum(diagonal(expm(q)(t)*freqs)) - similarity)
initial_guess = array([1.0])
result = fmin(similarity_f, initial_guess, disp=0)
#disp=0 turns off fmin messages
return result
def timeForSimilarity(self, similarity, freqs=None):
"""Returns time exponent so that exp(q*time) diverges to right distance.
Takes symbol freqs into account if specified; otherwise assumes equal.
freqs: vector of frequencies, applied to each row successively.
NOTE: harder to understand, but a factor of 5 faster than the naive
version. The nested matrixmultiply calls have the same effect as
exponentiating the matrix.
"""
#if there's no change, the time is 0
if similarity == 1:
return 0.0
#try fast version first, but if it fails we'll use the naive version.
try:
u, v, w = self._diagonalized
if freqs is None:
def similarity_f(t):
return abs(average(diagonal(dot(u, \
dot(diag(exp(v*t)), w)))) - similarity)
else:
def similarity_f(t):
return abs(sum(diagonal(dot(u, \
dot(diag(exp(v*t)), w)))*freqs) - similarity)
except (TypeError, ValueError):
#get here if diagonalization fails
q = self._data
if freqs is None:
def similarity_f(t):
return abs(average(diagonal(expm(q)(t)))-similarity)
else:
def similarity_f(t):
return abs(sum(diagonal(expm(q)(t)*freqs))-similarity)
return brent(similarity_f)
def toSimilarProbs(self, similarity, freqs=None):
"""Returns Probs at specified divergence.
Convenience wrapper for toProbs and timeForSimilarity.
"""
return self.toProbs(self.timeForSimilarity(similarity, freqs))
def random(cls, Alphabet, diags=None):
"""Makes random Q-matrix with specified diag elements and size.
diags can be a single float, or vector of values with same number
of chars as individual alphabet (e.g. list of 4 elements will act
as elements for the 4 bases).
"""
shape = Alphabet.Shape
single_size = shape[0]
if diags is None:
diags = -randarray(single_size)
else:
diags = array(diags, 'd')
#handle scalar case
if not diags.shape:
diags = reshape(diags, (1,))
if len(diags) == 1:
diags = repeat(diags, single_size)
temp = randarray((single_size, single_size-1))
temp *= ((-diags)/sum(temp, 1))[:,NewAxis]
result = diag(diags)
for r, row in enumerate(temp):
result[r][:r] = row[:r]
result[r][r+1:] = row[r:]
return cls(result, Alphabet)
random = classmethod(random)
def hasNegOffDiags(self):
"""Returns True if any off-diagonal elements negative."""
return has_neg_off_diags(self._data)
def sumNegOffDiags(self):
"""Returns sum of negative off-diagonal elements."""
return sum_neg_off_diags(self._data)
def fixNegsDiag(self):
"""Returns copy of self w/o negative off-diags, using 'diag' heuristic.
If a negative off-diagonal element is encountered, sets it to 0.
Subtracts all the negative off-diagonals from the diagonal to preserve
row sum = 0.
"""
m = self._data.copy()
#clip to 0
m = choose(less(m, 0.), (m, 0.))
for i, row in enumerate(m):
row[i] = -sum(row)
return self.__class__(m, self.Alphabet)
def fixNegsEven(self):
"""Returns copy of self w/o negative off-diags, using 'even' heuristic.
If a negative off-diagonal is encountered, sets it to 0.
Distributes the negative score evenly among the other elements.
"""
m = without_diag(self._data)
for i, row in enumerate(m):
is_neg = row < 0
if any(is_neg):
num_negs = sum(is_neg)
sum_negs = sum(is_neg*row)
is_not_neg = logical_not(is_neg)
num_not_neg = sum(is_not_neg)
new_row = (row + (sum_negs/(num_not_neg+1)))*is_not_neg
m[i] = new_row
return self.__class__(with_diag(m, -sum(m,1)), self.Alphabet)
def _make_error_f(self, to_minimize):
"""Make error function whose minimization estimates q = ln(p)."""
p = expm(self._data)(t=1)
BIG = 1e10
def result(q):
new_q = reshape(q, (4,4))
neg_sum = sum_neg_off_diags(new_q)
p_new = expm(new_q)(t=1)
return to_minimize(ravel(p), ravel(p_new)) - (BIG * neg_sum) \
+ (BIG * sum(abs(sum(new_q,1))))
return result
def fixNegsFmin(self, method=fmin, to_minimize=norm_diff, debug=False):
"""Uses an fmin method to find a good approximate q matrix.
Possible values for method:
fmin: simplex method (the default)
fmin_bfgs: bfgs optimizer #always produces negative elements!
fmin_cg: cg optimizer #doesn't work!
fmin_powell: powell method #doesn't work!
"""
q = self._data
#bail out if q is already ok to start with
if not sum_neg_off_diags(q):
return self
err_f = self._make_error_f(to_minimize)
initial_guess = q.copy()
xmin = method(err_f, initial_guess.flat, disp=0)
#disp=0 turns off messages
new_q = reshape(xmin, self.Alphabet.Shape)[:]
if debug:
if sum_neg_off_diags(new_q):
raise Exception, 'Made invalid Q matrix: %s' % q
return self.__class__(new_q, self.Alphabet)
def fixNegsConstrainedOpt(self, to_minimize=norm_diff, badness=1e6):
"""Uses constrained minimization to find approx q matrix.
to_minimize: metric for comparing orig result and new result.
badness: scale factor for penalizing negative off-diagonal values.
"""
if not sum_neg_off_diags(self._data):
return self
q = ravel(without_diag(self._data))
p = expm(self._data)(t=1)
def err_f(q):
new_q = reshape(array(q), (4,3))
new_q = with_diag(new_q, -sum(new_q, 1))
p_new = expm(new_q)(t=1)
result = to_minimize(ravel(p), ravel(p_new))
if q.min() < 0:
result += -q.min() * badness
return result
a = array(q)
xmin = fmin(func=err_f, x0=a, disp=0)
r = reshape(xmin, (4,3))
new_q = with_diag(r, -sum(r, 1))
return self.__class__(new_q, self.Alphabet)
def fixNegsReflect(self):
"""Fixes negative off-diagonals by subtracting m[i][j] from m[j][i].
Specifically, if m[i][j] is negative, subtracts this value from
m[i][j] and m[i][i] to keep the row total at 0, and then subtracts
it from m[j][i] and m[j][j] to convert a negative flux in the forward
direction into a positive flux in the reverse direction. If both
m[i][j] and m[j][i] are negative, this algorithm converts them both
into positive values, effectively exchanging the magnitudes of the
changes and making the signs positive.
NOTE: It's important to iterate over the original and make changes to
the copy to avoid incorrect results in cases where both m[i][j] and
m[j][i] are negative.
"""
orig = self._data
result = orig.copy()
for i, row in enumerate(orig):
for j, val in enumerate(row):
#skip diagonal
if i == j:
continue
#only make changes if element < 0
if val < 0:
result[i][j] -= val
result[i][i] += val
result[j][i] -= val
result[j][j] += val
return self.__class__(result, self.Alphabet)
def goldman_q_rna_triple(seq1, seq2, outgroup):
"""Returns the Goldman rate matrix for seq1"""
if len(seq1) != len(seq2) != len(outgroup):
raise ValueError, "seq1,seq2 and outgroup are not the same length!"
seq1 = ModelRnaSequence(seq1)
seq2 = ModelRnaSequence(seq2)
outgroup = ModelRnaSequence(outgroup)
m = Counts.fromTriple(seq1, seq2, outgroup, RnaPairs)._data
q = m / m.sum(axis=1)[:,NewAxis]
new_diag = -(q.sum(axis=1) - diag(q))
for i,v in enumerate(new_diag):
q[i,i] = v
return q
def goldman_q_dna_triple(seq1, seq2, outgroup):
"""Returns the Goldman rate matrix for seq1"""
if len(seq1) != len(seq2) != len(outgroup):
raise ValueError, "seq1,seq2 and outgroup are not the same length!"
seq1 = ModelDnaSequence(seq1)
seq2 = ModelDnaSequence(seq2)
outgroup = ModelDnaSequence(outgroup)
m = Counts.fromTriple(seq1, seq2, outgroup, DnaPairs)._data
q = m / m.sum(axis=1)[:,NewAxis]
new_diag = -(q.sum(axis=1) - diag(q))
for i,v in enumerate(new_diag):
q[i,i] = v
return q
def goldman_q_dna_pair(seq1, seq2):
"""Returns the Goldman rate matrix"""
if len(seq1) != len(seq2):
raise ValueError, "seq1 and seq2 are not the same length!"
seq1, seq2 = ModelDnaSequence(seq1), ModelDnaSequence(seq2)
m = Counts.fromPair(seq1, seq2, DnaPairs,average=True)._data
q = m / m.sum(axis=1)[:,NewAxis]
new_diag = -(q.sum(axis=1) - diag(q))
for i,v in enumerate(new_diag):
q[i,i] = v
return q
def goldman_q_rna_pair(seq1, seq2):
"""Returns the Goldman rate matrix"""
if len(seq1) != len(seq2):
raise ValueError, "seq1 and seq2 are not the same length!"
seq1, seq2 = ModelRnaSequence(seq1), ModelRnaSequence(seq2)
m = Counts.fromPair(seq1, seq2, RnaPairs,average=True)._data
q = m / m.sum(axis=1)[:,NewAxis]
new_diag = -(q.sum(axis=1) - diag(q))
for i,v in enumerate(new_diag):
q[i,i] = v
return q
def make_random_from_file(lines):
"""Simulates array random() using values from an iterator."""
def result(shape):
size = product(shape)
items = map(float, [lines.next() for s in range(size)])
a = reshape(array(items), shape)
return a
return result
#randarray = make_random_from_file(open('/Users/rob/random.txt'))
def test_heuristics(p_range=None, num_to_do=71, heuristics=None):
if p_range is None:
p_range = [0.6]
if heuristics is None:
heuristics = ['fixNegsDiag', 'fixNegsEven', 'fixNegsReflect', 'fixNegsConstrainedOpt']
num_heuristics = len(heuristics)
print '\t'.join(['p'] + heuristics)
for p in p_range:
result = zeros((num_to_do, num_heuristics), Float64)
has_nonzero = 0
i = 0
while i < num_to_do:
curr_row = result[i]
random_p = Probs.random(DnaPairs, p)
q = random_p.toRates()
if not q.hasNegOffDiags():
continue
has_nonzero += 1
#print "P:"
#print random_p._data
#print "Q:"
#print q._data
i += 1
for j, h in enumerate(heuristics):
#print "HEURISTIC: ", h
q_corr = getattr(q, h)()
#print "CORRECTED Q: "
#print q_corr._data
p_corr = expm(q_corr._data)(t=1)
#print "CORRECTED P:"
#print p_corr
dist = norm_diff(p_corr, random_p._data)
#print "DISTANCE: ", dist
curr_row[j] = dist
averages = average(result)
print p, '\t', '\t'.join(map(str, averages))
if __name__ == '__main__':
test_heuristics()
| |
#!/usr/bin/env python3
# Copyright (c) 2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the send RPC command."""
from decimal import Decimal, getcontext
from itertools import product
from test_framework.authproxy import JSONRPCException
from test_framework.descriptors import descsum_create
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_fee_amount,
assert_greater_than,
assert_raises_rpc_error,
)
class WalletSendTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
# whitelist all peers to speed up tx relay / mempool sync
self.extra_args = [
["-whitelist=127.0.0.1","-walletrbf=1"],
["-whitelist=127.0.0.1","-walletrbf=1"],
]
getcontext().prec = 8 # Satoshi precision for Decimal
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def test_send(self, from_wallet, to_wallet=None, amount=None, data=None,
arg_conf_target=None, arg_estimate_mode=None, arg_fee_rate=None,
conf_target=None, estimate_mode=None, fee_rate=None, add_to_wallet=None, psbt=None,
inputs=None, add_inputs=None, include_unsafe=None, change_address=None, change_position=None, change_type=None,
include_watching=None, locktime=None, lock_unspents=None, replaceable=None, subtract_fee_from_outputs=None,
expect_error=None):
assert (amount is None) != (data is None)
from_balance_before = from_wallet.getbalances()["mine"]["trusted"]
if include_unsafe:
from_balance_before += from_wallet.getbalances()["mine"]["untrusted_pending"]
if to_wallet is None:
assert amount is None
else:
to_untrusted_pending_before = to_wallet.getbalances()["mine"]["untrusted_pending"]
if amount:
dest = to_wallet.getnewaddress()
outputs = {dest: amount}
else:
outputs = {"data": data}
# Construct options dictionary
options = {}
if add_to_wallet is not None:
options["add_to_wallet"] = add_to_wallet
else:
if psbt:
add_to_wallet = False
else:
add_to_wallet = from_wallet.getwalletinfo()["private_keys_enabled"] # Default value
if psbt is not None:
options["psbt"] = psbt
if conf_target is not None:
options["conf_target"] = conf_target
if estimate_mode is not None:
options["estimate_mode"] = estimate_mode
if fee_rate is not None:
options["fee_rate"] = fee_rate
if inputs is not None:
options["inputs"] = inputs
if add_inputs is not None:
options["add_inputs"] = add_inputs
if include_unsafe is not None:
options["include_unsafe"] = include_unsafe
if change_address is not None:
options["change_address"] = change_address
if change_position is not None:
options["change_position"] = change_position
if change_type is not None:
options["change_type"] = change_type
if include_watching is not None:
options["include_watching"] = include_watching
if locktime is not None:
options["locktime"] = locktime
if lock_unspents is not None:
options["lock_unspents"] = lock_unspents
if replaceable is None:
replaceable = True # default
else:
options["replaceable"] = replaceable
if subtract_fee_from_outputs is not None:
options["subtract_fee_from_outputs"] = subtract_fee_from_outputs
if len(options.keys()) == 0:
options = None
if expect_error is None:
res = from_wallet.send(outputs=outputs, conf_target=arg_conf_target, estimate_mode=arg_estimate_mode, fee_rate=arg_fee_rate, options=options)
else:
try:
assert_raises_rpc_error(expect_error[0], expect_error[1], from_wallet.send,
outputs=outputs, conf_target=arg_conf_target, estimate_mode=arg_estimate_mode, fee_rate=arg_fee_rate, options=options)
except AssertionError:
# Provide debug info if the test fails
self.log.error("Unexpected successful result:")
self.log.error(arg_conf_target)
self.log.error(arg_estimate_mode)
self.log.error(arg_fee_rate)
self.log.error(options)
res = from_wallet.send(outputs=outputs, conf_target=arg_conf_target, estimate_mode=arg_estimate_mode, fee_rate=arg_fee_rate, options=options)
self.log.error(res)
if "txid" in res and add_to_wallet:
self.log.error("Transaction details:")
try:
tx = from_wallet.gettransaction(res["txid"])
self.log.error(tx)
self.log.error("testmempoolaccept (transaction may already be in mempool):")
self.log.error(from_wallet.testmempoolaccept([tx["hex"]]))
except JSONRPCException as exc:
self.log.error(exc)
raise
return
if locktime:
return res
if from_wallet.getwalletinfo()["private_keys_enabled"] and not include_watching:
assert_equal(res["complete"], True)
assert "txid" in res
else:
assert_equal(res["complete"], False)
assert not "txid" in res
assert "psbt" in res
from_balance = from_wallet.getbalances()["mine"]["trusted"]
if include_unsafe:
from_balance += from_wallet.getbalances()["mine"]["untrusted_pending"]
if add_to_wallet and not include_watching:
# Ensure transaction exists in the wallet:
tx = from_wallet.gettransaction(res["txid"])
assert tx
assert_equal(tx["bip125-replaceable"], "yes" if replaceable else "no")
# Ensure transaction exists in the mempool:
tx = from_wallet.getrawtransaction(res["txid"], True)
assert tx
if amount:
if subtract_fee_from_outputs:
assert_equal(from_balance_before - from_balance, amount)
else:
assert_greater_than(from_balance_before - from_balance, amount)
else:
assert next((out for out in tx["vout"] if out["scriptPubKey"]["asm"] == "OP_RETURN 35"), None)
else:
assert_equal(from_balance_before, from_balance)
if to_wallet:
self.sync_mempools()
if add_to_wallet:
if not subtract_fee_from_outputs:
assert_equal(to_wallet.getbalances()["mine"]["untrusted_pending"], to_untrusted_pending_before + Decimal(amount if amount else 0))
else:
assert_equal(to_wallet.getbalances()["mine"]["untrusted_pending"], to_untrusted_pending_before)
return res
def run_test(self):
self.log.info("Setup wallets...")
# w0 is a wallet with coinbase rewards
w0 = self.nodes[0].get_wallet_rpc(self.default_wallet_name)
# w1 is a regular wallet
self.nodes[1].createwallet(wallet_name="w1")
w1 = self.nodes[1].get_wallet_rpc("w1")
# w2 contains the private keys for w3
self.nodes[1].createwallet(wallet_name="w2", blank=True)
w2 = self.nodes[1].get_wallet_rpc("w2")
xpriv = "tprv8ZgxMBicQKsPfHCsTwkiM1KT56RXbGGTqvc2hgqzycpwbHqqpcajQeMRZoBD35kW4RtyCemu6j34Ku5DEspmgjKdt2qe4SvRch5Kk8B8A2v"
xpub = "tpubD6NzVbkrYhZ4YkEfMbRJkQyZe7wTkbTNRECozCtJPtdLRn6cT1QKb8yHjwAPcAr26eHBFYs5iLiFFnCbwPRsncCKUKCfubHDMGKzMVcN1Jg"
if self.options.descriptors:
w2.importdescriptors([{
"desc": descsum_create("wpkh(" + xpriv + "/0/0/*)"),
"timestamp": "now",
"range": [0, 100],
"active": True
},{
"desc": descsum_create("wpkh(" + xpriv + "/0/1/*)"),
"timestamp": "now",
"range": [0, 100],
"active": True,
"internal": True
}])
else:
w2.sethdseed(True)
# w3 is a watch-only wallet, based on w2
self.nodes[1].createwallet(wallet_name="w3", disable_private_keys=True)
w3 = self.nodes[1].get_wallet_rpc("w3")
if self.options.descriptors:
# Match the privkeys in w2 for descriptors
res = w3.importdescriptors([{
"desc": descsum_create("wpkh(" + xpub + "/0/0/*)"),
"timestamp": "now",
"range": [0, 100],
"keypool": True,
"active": True,
"watchonly": True
},{
"desc": descsum_create("wpkh(" + xpub + "/0/1/*)"),
"timestamp": "now",
"range": [0, 100],
"keypool": True,
"active": True,
"internal": True,
"watchonly": True
}])
assert_equal(res, [{"success": True}, {"success": True}])
for _ in range(3):
a2_receive = w2.getnewaddress()
if not self.options.descriptors:
# Because legacy wallets use exclusively hardened derivation, we can't do a ranged import like we do for descriptors
a2_change = w2.getrawchangeaddress() # doesn't actually use change derivation
res = w3.importmulti([{
"desc": w2.getaddressinfo(a2_receive)["desc"],
"timestamp": "now",
"keypool": True,
"watchonly": True
},{
"desc": w2.getaddressinfo(a2_change)["desc"],
"timestamp": "now",
"keypool": True,
"internal": True,
"watchonly": True
}])
assert_equal(res, [{"success": True}, {"success": True}])
w0.sendtoaddress(a2_receive, 10) # fund w3
self.nodes[0].generate(1)
self.sync_blocks()
if not self.options.descriptors:
# w4 has private keys enabled, but only contains watch-only keys (from w2)
# This is legacy wallet behavior only as descriptor wallets don't allow watchonly and non-watchonly things in the same wallet.
self.nodes[1].createwallet(wallet_name="w4", disable_private_keys=False)
w4 = self.nodes[1].get_wallet_rpc("w4")
for _ in range(3):
a2_receive = w2.getnewaddress()
res = w4.importmulti([{
"desc": w2.getaddressinfo(a2_receive)["desc"],
"timestamp": "now",
"keypool": False,
"watchonly": True
}])
assert_equal(res, [{"success": True}])
w0.sendtoaddress(a2_receive, 10) # fund w4
self.nodes[0].generate(1)
self.sync_blocks()
self.log.info("Send to address...")
self.test_send(from_wallet=w0, to_wallet=w1, amount=1)
self.test_send(from_wallet=w0, to_wallet=w1, amount=1, add_to_wallet=True)
self.log.info("Don't broadcast...")
res = self.test_send(from_wallet=w0, to_wallet=w1, amount=1, add_to_wallet=False)
assert(res["hex"])
self.log.info("Return PSBT...")
res = self.test_send(from_wallet=w0, to_wallet=w1, amount=1, psbt=True)
assert(res["psbt"])
self.log.info("Create transaction that spends to address, but don't broadcast...")
self.test_send(from_wallet=w0, to_wallet=w1, amount=1, add_to_wallet=False)
# conf_target & estimate_mode can be set as argument or option
res1 = self.test_send(from_wallet=w0, to_wallet=w1, amount=1, arg_conf_target=1, arg_estimate_mode="economical", add_to_wallet=False)
res2 = self.test_send(from_wallet=w0, to_wallet=w1, amount=1, conf_target=1, estimate_mode="economical", add_to_wallet=False)
assert_equal(self.nodes[1].decodepsbt(res1["psbt"])["fee"],
self.nodes[1].decodepsbt(res2["psbt"])["fee"])
# but not at the same time
for mode in ["unset", "economical", "conservative"]:
self.test_send(from_wallet=w0, to_wallet=w1, amount=1, arg_conf_target=1, arg_estimate_mode="economical",
conf_target=1, estimate_mode=mode, add_to_wallet=False,
expect_error=(-8, "Pass conf_target and estimate_mode either as arguments or in the options object, but not both"))
self.log.info("Create PSBT from watch-only wallet w3, sign with w2...")
res = self.test_send(from_wallet=w3, to_wallet=w1, amount=1)
res = w2.walletprocesspsbt(res["psbt"])
assert res["complete"]
if not self.options.descriptors:
# Descriptor wallets do not allow mixed watch-only and non-watch-only things in the same wallet.
# This is specifically testing that w4 ignores its own private keys and creates a psbt with send
# which is not something that needs to be tested in descriptor wallets.
self.log.info("Create PSBT from wallet w4 with watch-only keys, sign with w2...")
self.test_send(from_wallet=w4, to_wallet=w1, amount=1, expect_error=(-4, "Insufficient funds"))
res = self.test_send(from_wallet=w4, to_wallet=w1, amount=1, include_watching=True, add_to_wallet=False)
res = w2.walletprocesspsbt(res["psbt"])
assert res["complete"]
self.log.info("Create OP_RETURN...")
self.test_send(from_wallet=w0, to_wallet=w1, amount=1)
self.test_send(from_wallet=w0, data="Hello World", expect_error=(-8, "Data must be hexadecimal string (not 'Hello World')"))
self.test_send(from_wallet=w0, data="23")
res = self.test_send(from_wallet=w3, data="23")
res = w2.walletprocesspsbt(res["psbt"])
assert res["complete"]
self.log.info("Test setting explicit fee rate")
res1 = self.test_send(from_wallet=w0, to_wallet=w1, amount=1, arg_fee_rate="1", add_to_wallet=False)
res2 = self.test_send(from_wallet=w0, to_wallet=w1, amount=1, fee_rate="1", add_to_wallet=False)
assert_equal(self.nodes[1].decodepsbt(res1["psbt"])["fee"], self.nodes[1].decodepsbt(res2["psbt"])["fee"])
res = self.test_send(from_wallet=w0, to_wallet=w1, amount=1, fee_rate=7, add_to_wallet=False)
fee = self.nodes[1].decodepsbt(res["psbt"])["fee"]
assert_fee_amount(fee, Decimal(len(res["hex"]) / 2), Decimal("0.00007"))
# "unset" and None are treated the same for estimate_mode
res = self.test_send(from_wallet=w0, to_wallet=w1, amount=1, fee_rate=2, estimate_mode="unset", add_to_wallet=False)
fee = self.nodes[1].decodepsbt(res["psbt"])["fee"]
assert_fee_amount(fee, Decimal(len(res["hex"]) / 2), Decimal("0.00002"))
res = self.test_send(from_wallet=w0, to_wallet=w1, amount=1, arg_fee_rate=4.531, add_to_wallet=False)
fee = self.nodes[1].decodepsbt(res["psbt"])["fee"]
assert_fee_amount(fee, Decimal(len(res["hex"]) / 2), Decimal("0.00004531"))
res = self.test_send(from_wallet=w0, to_wallet=w1, amount=1, arg_fee_rate=3, add_to_wallet=False)
fee = self.nodes[1].decodepsbt(res["psbt"])["fee"]
assert_fee_amount(fee, Decimal(len(res["hex"]) / 2), Decimal("0.00003"))
# Test that passing fee_rate as both an argument and an option raises.
self.test_send(from_wallet=w0, to_wallet=w1, amount=1, arg_fee_rate=1, fee_rate=1, add_to_wallet=False,
expect_error=(-8, "Pass the fee_rate either as an argument, or in the options object, but not both"))
assert_raises_rpc_error(-8, "Use fee_rate (sat/vB) instead of feeRate", w0.send, {w1.getnewaddress(): 1}, 6, "conservative", 1, {"feeRate": 0.01})
assert_raises_rpc_error(-3, "Unexpected key totalFee", w0.send, {w1.getnewaddress(): 1}, 6, "conservative", 1, {"totalFee": 0.01})
for target, mode in product([-1, 0, 1009], ["economical", "conservative"]):
self.test_send(from_wallet=w0, to_wallet=w1, amount=1, conf_target=target, estimate_mode=mode,
expect_error=(-8, "Invalid conf_target, must be between 1 and 1008")) # max value of 1008 per src/policy/fees.h
msg = 'Invalid estimate_mode parameter, must be one of: "unset", "economical", "conservative"'
for target, mode in product([-1, 0], ["btc/kvb", "sat/vb"]):
self.test_send(from_wallet=w0, to_wallet=w1, amount=1, conf_target=target, estimate_mode=mode, expect_error=(-8, msg))
for mode in ["", "foo", Decimal("3.141592")]:
self.test_send(from_wallet=w0, to_wallet=w1, amount=1, conf_target=0.1, estimate_mode=mode, expect_error=(-8, msg))
self.test_send(from_wallet=w0, to_wallet=w1, amount=1, arg_conf_target=0.1, arg_estimate_mode=mode, expect_error=(-8, msg))
assert_raises_rpc_error(-8, msg, w0.send, {w1.getnewaddress(): 1}, 0.1, mode)
for mode in ["economical", "conservative"]:
for k, v in {"string": "true", "bool": True, "object": {"foo": "bar"}}.items():
self.test_send(from_wallet=w0, to_wallet=w1, amount=1, conf_target=v, estimate_mode=mode,
expect_error=(-3, f"Expected type number for conf_target, got {k}"))
# Test setting explicit fee rate just below the minimum of 1 sat/vB.
self.log.info("Explicit fee rate raises RPC error 'fee rate too low' if fee_rate of 0.99999999 is passed")
msg = "Fee rate (0.999 sat/vB) is lower than the minimum fee rate setting (1.000 sat/vB)"
self.test_send(from_wallet=w0, to_wallet=w1, amount=1, fee_rate=0.999, expect_error=(-4, msg))
self.test_send(from_wallet=w0, to_wallet=w1, amount=1, arg_fee_rate=0.999, expect_error=(-4, msg))
self.log.info("Explicit fee rate raises if invalid fee_rate is passed")
# Test fee_rate with zero values.
msg = "Fee rate (0.000 sat/vB) is lower than the minimum fee rate setting (1.000 sat/vB)"
for zero_value in [0, 0.000, 0.00000000, "0", "0.000", "0.00000000"]:
self.test_send(from_wallet=w0, to_wallet=w1, amount=1, fee_rate=zero_value, expect_error=(-4, msg))
self.test_send(from_wallet=w0, to_wallet=w1, amount=1, arg_fee_rate=zero_value, expect_error=(-4, msg))
msg = "Invalid amount"
# Test fee_rate values that don't pass fixed-point parsing checks.
for invalid_value in ["", 0.000000001, 1e-09, 1.111111111, 1111111111111111, "31.999999999999999999999"]:
self.test_send(from_wallet=w0, to_wallet=w1, amount=1, fee_rate=invalid_value, expect_error=(-3, msg))
self.test_send(from_wallet=w0, to_wallet=w1, amount=1, arg_fee_rate=invalid_value, expect_error=(-3, msg))
# Test fee_rate values that cannot be represented in sat/vB.
for invalid_value in [0.0001, 0.00000001, 0.00099999, 31.99999999, "0.0001", "0.00000001", "0.00099999", "31.99999999"]:
self.test_send(from_wallet=w0, to_wallet=w1, amount=1, fee_rate=invalid_value, expect_error=(-3, msg))
self.test_send(from_wallet=w0, to_wallet=w1, amount=1, arg_fee_rate=invalid_value, expect_error=(-3, msg))
# Test fee_rate out of range (negative number).
msg = "Amount out of range"
self.test_send(from_wallet=w0, to_wallet=w1, amount=1, fee_rate=-1, expect_error=(-3, msg))
self.test_send(from_wallet=w0, to_wallet=w1, amount=1, arg_fee_rate=-1, expect_error=(-3, msg))
# Test type error.
msg = "Amount is not a number or string"
for invalid_value in [True, {"foo": "bar"}]:
self.test_send(from_wallet=w0, to_wallet=w1, amount=1, fee_rate=invalid_value, expect_error=(-3, msg))
self.test_send(from_wallet=w0, to_wallet=w1, amount=1, arg_fee_rate=invalid_value, expect_error=(-3, msg))
# TODO: Return hex if fee rate is below -maxmempool
# res = self.test_send(from_wallet=w0, to_wallet=w1, amount=1, conf_target=0.1, estimate_mode="sat/b", add_to_wallet=False)
# assert res["hex"]
# hex = res["hex"]
# res = self.nodes[0].testmempoolaccept([hex])
# assert not res[0]["allowed"]
# assert_equal(res[0]["reject-reason"], "...") # low fee
# assert_fee_amount(fee, Decimal(len(res["hex"]) / 2), Decimal("0.000001"))
self.log.info("If inputs are specified, do not automatically add more...")
res = self.test_send(from_wallet=w0, to_wallet=w1, amount=51, inputs=[], add_to_wallet=False)
assert res["complete"]
utxo1 = w0.listunspent()[0]
assert_equal(utxo1["amount"], 50)
self.test_send(from_wallet=w0, to_wallet=w1, amount=51, inputs=[utxo1],
expect_error=(-4, "Insufficient funds"))
self.test_send(from_wallet=w0, to_wallet=w1, amount=51, inputs=[utxo1], add_inputs=False,
expect_error=(-4, "Insufficient funds"))
res = self.test_send(from_wallet=w0, to_wallet=w1, amount=51, inputs=[utxo1], add_inputs=True, add_to_wallet=False)
assert res["complete"]
self.log.info("Manual change address and position...")
self.test_send(from_wallet=w0, to_wallet=w1, amount=1, change_address="not an address",
expect_error=(-5, "Change address must be a valid bitcoin address"))
change_address = w0.getnewaddress()
self.test_send(from_wallet=w0, to_wallet=w1, amount=1, add_to_wallet=False, change_address=change_address)
assert res["complete"]
res = self.test_send(from_wallet=w0, to_wallet=w1, amount=1, add_to_wallet=False, change_address=change_address, change_position=0)
assert res["complete"]
assert_equal(self.nodes[0].decodepsbt(res["psbt"])["tx"]["vout"][0]["scriptPubKey"]["address"], change_address)
res = self.test_send(from_wallet=w0, to_wallet=w1, amount=1, add_to_wallet=False, change_type="legacy", change_position=0)
assert res["complete"]
change_address = self.nodes[0].decodepsbt(res["psbt"])["tx"]["vout"][0]["scriptPubKey"]["address"]
assert change_address[0] == "m" or change_address[0] == "n"
self.log.info("Set lock time...")
height = self.nodes[0].getblockchaininfo()["blocks"]
res = self.test_send(from_wallet=w0, to_wallet=w1, amount=1, locktime=height + 1)
assert res["complete"]
assert res["txid"]
txid = res["txid"]
# Although the wallet finishes the transaction, it can't be added to the mempool yet:
hex = self.nodes[0].gettransaction(res["txid"])["hex"]
res = self.nodes[0].testmempoolaccept([hex])
assert not res[0]["allowed"]
assert_equal(res[0]["reject-reason"], "non-final")
# It shouldn't be confirmed in the next block
self.nodes[0].generate(1)
assert_equal(self.nodes[0].gettransaction(txid)["confirmations"], 0)
# The mempool should allow it now:
res = self.nodes[0].testmempoolaccept([hex])
assert res[0]["allowed"]
# Don't wait for wallet to add it to the mempool:
res = self.nodes[0].sendrawtransaction(hex)
self.nodes[0].generate(1)
assert_equal(self.nodes[0].gettransaction(txid)["confirmations"], 1)
self.sync_all()
self.log.info("Lock unspents...")
utxo1 = w0.listunspent()[0]
assert_greater_than(utxo1["amount"], 1)
res = self.test_send(from_wallet=w0, to_wallet=w1, amount=1, inputs=[utxo1], add_to_wallet=False, lock_unspents=True)
assert res["complete"]
locked_coins = w0.listlockunspent()
assert_equal(len(locked_coins), 1)
# Locked coins are automatically unlocked when manually selected
res = self.test_send(from_wallet=w0, to_wallet=w1, amount=1, inputs=[utxo1], add_to_wallet=False)
assert res["complete"]
self.log.info("Replaceable...")
res = self.test_send(from_wallet=w0, to_wallet=w1, amount=1, add_to_wallet=True, replaceable=True)
assert res["complete"]
assert_equal(self.nodes[0].gettransaction(res["txid"])["bip125-replaceable"], "yes")
res = self.test_send(from_wallet=w0, to_wallet=w1, amount=1, add_to_wallet=True, replaceable=False)
assert res["complete"]
assert_equal(self.nodes[0].gettransaction(res["txid"])["bip125-replaceable"], "no")
self.log.info("Subtract fee from output")
self.test_send(from_wallet=w0, to_wallet=w1, amount=1, subtract_fee_from_outputs=[0])
self.log.info("Include unsafe inputs")
self.nodes[1].createwallet(wallet_name="w5")
w5 = self.nodes[1].get_wallet_rpc("w5")
self.test_send(from_wallet=w0, to_wallet=w5, amount=2)
self.test_send(from_wallet=w5, to_wallet=w0, amount=1, expect_error=(-4, "Insufficient funds"))
res = self.test_send(from_wallet=w5, to_wallet=w0, amount=1, include_unsafe=True)
assert res["complete"]
if __name__ == '__main__':
WalletSendTest().main()
| |
#############################################################################
#
# Voronoi diagram calculator/ Delaunay triangulator
# Translated to Python by Bill Simons
# September, 2005
#
# Calculate Delaunay triangulation or the Voronoi polygons for a set of
# 2D input points.
#
# Derived from code bearing the following notice:
#
# The author of this software is Steven Fortune. Copyright (c) 1994 by AT&T
# Bell Laboratories.
# Permission to use, copy, modify, and distribute this software for any
# purpose without fee is hereby granted, provided that this entire notice
# is included in all copies of any software which is or includes a copy
# or modification of this software and in all copies of the supporting
# documentation for such software.
# THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR IMPLIED
# WARRANTY. IN PARTICULAR, NEITHER THE AUTHORS NOR AT&T MAKE ANY
# REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE MERCHANTABILITY
# OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR PURPOSE.
#
# Comments were incorporated from Shane O'Sullivan's translation of the
# original code into C++ (http://mapviewer.skynet.ie/voronoi.html)
#
# Steve Fortune's homepage: http://netlib.bell-labs.com/cm/cs/who/sjf/index.html
#
#############################################################################
def usage():
print """
voronoi - compute Voronoi diagram or Delaunay triangulation
voronoi [-t -p -d] [filename]
Voronoi reads from filename (or standard input if no filename given) for a set
of points in the plane and writes either the Voronoi diagram or the Delaunay
triangulation to the standard output. Each input line should consist of two
real numbers, separated by white space.
If option -t is present, the Delaunay triangulation is produced.
Each output line is a triple i j k, which are the indices of the three points
in a Delaunay triangle. Points are numbered starting at 0.
If option -t is not present, the Voronoi diagram is produced.
There are four output record types.
s a b indicates that an input point at coordinates a b was seen.
l a b c indicates a line with equation ax + by = c.
v a b indicates a vertex at a b.
e l v1 v2 indicates a Voronoi segment which is a subsegment of line number l
with endpoints numbered v1 and v2. If v1 or v2 is -1, the line
extends to infinity.
Other options include:
d Print debugging info
p Produce output suitable for input to plot (1), rather than the forms
described above.
On unsorted data uniformly distributed in the unit square, voronoi uses about
20n+140 bytes of storage.
AUTHOR
Steve J. Fortune (1987) A Sweepline Algorithm for Voronoi Diagrams,
Algorithmica 2, 153-174.
"""
#############################################################################
#
# For programmatic use two functions are available:
#
# computeVoronoiDiagram(points)
#
# Takes a list of point objects (which must have x and y fields).
# Returns a 3-tuple of:
#
# (1) a list of 2-tuples, which are the x,y coordinates of the
# Voronoi diagram vertices
# (2) a list of 3-tuples (a,b,c) which are the equations of the
# lines in the Voronoi diagram: a*x + b*y = c
# (3) a list of 3-tuples, (l, v1, v2) representing edges of the
# Voronoi diagram. l is the index of the line, v1 and v2 are
# the indices of the vetices at the end of the edge. If
# v1 or v2 is -1, the line extends to infinity.
#
# computeDelaunayTriangulation(points):
#
# Takes a list of point objects (which must have x and y fields).
# Returns a list of 3-tuples: the indices of the points that form a
# Delaunay triangle.
#
#############################################################################
import math
import sys
import getopt
TOLERANCE = 1e-9
BIG_FLOAT = 1e38
#------------------------------------------------------------------
class Context( object ):
def __init__(self):
self.doPrint = 0
self.debug = 0
self.plot = 0
self.triangulate = False
self.vertices = [] # list of vertex 2-tuples: (x,y)
self.lines = [] # equation of line 3-tuple (a b c), for the equation of the line a*x+b*y = c
self.edges = [] # edge 3-tuple: (line index, vertex 1 index, vertex 2 index) if either vertex index is -1, the edge extends to infiinity
self.triangles = [] # 3-tuple of vertex indices
# self.extra_edges = [] # list of additional vertex 2-tubles (x,y) based on bounded voronoi tesselation
# self.set_bounds(None)
# self.use_bound = False
self.xmin = self.ymin = self.xmax = self.ymax = None
def circle(self,x,y,rad):
pass
def clip_line(self,edge,lid,rid):
pass
# here is where I will create false verticies if
# the voronoi line extends to infinity...
# the extra verticies will be added to the
# extra edges list as 2-tuples
# a,b,c = edge.a,edge.b,edge.c
# if lid == -1:
# x = self.xMin
# y = (c-a*x) / b
# if y < self.yMin or y > self.yMax:
# if y < self.yMin: y = self.yMin
# elif y > self.yMax: y = self.yMax
# x = (c-b*y) / a
# self.extra_edges.append((x,y))
# lid = -(len(self.extra_edges)-1)
# if rid == -1:
# x = self.xMax
# y = (c-a*x) / b
# if y < self.yMin or y > self.yMax:
# if y < self.yMin: y = self.yMin
# elif y > self.yMax: y = self.yMax
# x = (c-b*y) / a
# self.extra_edges.append((x,y))
# rid = -(len(self.extra_edges)-1)
# print lid,rid
# return (lid,rid)
def line(self,x0,y0,x1,y1):
pass
def outSite(self,s):
if(self.debug):
print "site (%d) at %f %f" % (s.sitenum, s.x, s.y)
elif(self.triangulate):
pass
elif(self.plot):
self.circle (s.x, s.y, 3) #cradius)
elif(self.doPrint):
print "s %f %f" % (s.x, s.y)
def outVertex(self,s):
self.vertices.append((s.x,s.y))
if s.x < self.xmin: self.xmin = s.x
elif s.x > self.xmax: self.xmax = s.x
if s.y < self.ymin: self.ymin = s.y
elif s.y > self.ymax: self.ymax = s.y
if(self.debug):
print "vertex(%d) at %f %f" % (s.sitenum, s.x, s.y)
elif(self.triangulate):
pass
elif(self.doPrint and not self.plot):
print "v %f %f" % (s.x,s.y)
def outTriple(self,s1,s2,s3):
self.triangles.append((s1.sitenum, s2.sitenum, s3.sitenum))
if(self.debug):
print "circle through left=%d right=%d bottom=%d" % (s1.sitenum, s2.sitenum, s3.sitenum)
elif(self.triangulate and self.doPrint and not self.plot):
print "%d %d %d" % (s1.sitenum, s2.sitenum, s3.sitenum)
def outBisector(self,edge):
self.lines.append((edge.a, edge.b, edge.c))
if(self.debug):
print "line(%d) %gx+%gy=%g, bisecting %d %d" % (edge.edgenum, edge.a, edge.b, edge.c, edge.reg[0].sitenum, edge.reg[1].sitenum)
elif(self.triangulate):
if(self.plot):
self.line(edge.reg[0].x, edge.reg[0].y, edge.reg[1].x, edge.reg[1].y)
elif(self.doPrint and not self.plot):
print "l %f %f %f" % (edge.a, edge.b, edge.c)
def outEdge(self,edge):
sitenumL = -1
if edge.ep[Edge.LE] is not None:
sitenumL = edge.ep[Edge.LE].sitenum
sitenumR = -1
if edge.ep[Edge.RE] is not None:
sitenumR = edge.ep[Edge.RE].sitenum
# if sitenumL == -1 or sitenumR == -1 and self.use_bound:
# sitenumL,sitenumR = self.clip_line(edge,sitenumL,sitenumR)
self.edges.append((edge.edgenum,sitenumL,sitenumR))
if(not self.triangulate):
if self.plot:
self.clip_line(edge)
elif(self.doPrint):
print "e %d" % edge.edgenum,
print " %d " % sitenumL,
print "%d" % sitenumR
def set_bounds(self,bounds):
if not bounds == None:
self.xmin = bounds.xmin
self.ymin = bounds.ymin
self.xmax = bounds.xmax
self.ymax = bounds.ymax
else:
self.xmin = self.ymin = self.xmax = self.ymax = None
#------------------------------------------------------------------
def voronoi(siteList,context):
edgeList = EdgeList(siteList.xmin,siteList.xmax,len(siteList))
priorityQ = PriorityQueue(siteList.ymin,siteList.ymax,len(siteList))
siteIter = siteList.iterator()
bottomsite = siteIter.next()
context.outSite(bottomsite)
newsite = siteIter.next()
minpt = Site(-BIG_FLOAT,-BIG_FLOAT)
while True:
if not priorityQ.isEmpty():
minpt = priorityQ.getMinPt()
if (newsite and (priorityQ.isEmpty() or cmp(newsite,minpt) < 0)):
# newsite is smallest - this is a site event
context.outSite(newsite)
# get first Halfedge to the LEFT and RIGHT of the new site
lbnd = edgeList.leftbnd(newsite)
rbnd = lbnd.right
# if this halfedge has no edge, bot = bottom site (whatever that is)
# create a new edge that bisects
bot = lbnd.rightreg(bottomsite)
edge = Edge.bisect(bot,newsite)
context.outBisector(edge)
# create a new Halfedge, setting its pm field to 0 and insert
# this new bisector edge between the left and right vectors in
# a linked list
bisector = Halfedge(edge,Edge.LE)
edgeList.insert(lbnd,bisector)
# if the new bisector intersects with the left edge, remove
# the left edge's vertex, and put in the new one
p = lbnd.intersect(bisector)
if p is not None:
priorityQ.delete(lbnd)
priorityQ.insert(lbnd,p,newsite.distance(p))
# create a new Halfedge, setting its pm field to 1
# insert the new Halfedge to the right of the original bisector
lbnd = bisector
bisector = Halfedge(edge,Edge.RE)
edgeList.insert(lbnd,bisector)
# if this new bisector intersects with the right Halfedge
p = bisector.intersect(rbnd)
if p is not None:
# push the Halfedge into the ordered linked list of vertices
priorityQ.insert(bisector,p,newsite.distance(p))
newsite = siteIter.next()
elif not priorityQ.isEmpty():
# intersection is smallest - this is a vector (circle) event
# pop the Halfedge with the lowest vector off the ordered list of
# vectors. Get the Halfedge to the left and right of the above HE
# and also the Halfedge to the right of the right HE
lbnd = priorityQ.popMinHalfedge()
llbnd = lbnd.left
rbnd = lbnd.right
rrbnd = rbnd.right
# get the Site to the left of the left HE and to the right of
# the right HE which it bisects
bot = lbnd.leftreg(bottomsite)
top = rbnd.rightreg(bottomsite)
# output the triple of sites, stating that a circle goes through them
mid = lbnd.rightreg(bottomsite)
context.outTriple(bot,top,mid)
# get the vertex that caused this event and set the vertex number
# couldn't do this earlier since we didn't know when it would be processed
v = lbnd.vertex
siteList.setSiteNumber(v)
context.outVertex(v)
# set the endpoint of the left and right Halfedge to be this vector
if lbnd.edge.setEndpoint(lbnd.pm,v):
context.outEdge(lbnd.edge)
if rbnd.edge.setEndpoint(rbnd.pm,v):
context.outEdge(rbnd.edge)
# delete the lowest HE, remove all vertex events to do with the
# right HE and delete the right HE
edgeList.delete(lbnd)
priorityQ.delete(rbnd)
edgeList.delete(rbnd)
# if the site to the left of the event is higher than the Site
# to the right of it, then swap them and set 'pm' to RIGHT
pm = Edge.LE
if bot.y > top.y:
bot,top = top,bot
pm = Edge.RE
# Create an Edge (or line) that is between the two Sites. This
# creates the formula of the line, and assigns a line number to it
edge = Edge.bisect(bot, top)
context.outBisector(edge)
# create a HE from the edge
bisector = Halfedge(edge, pm)
# insert the new bisector to the right of the left HE
# set one endpoint to the new edge to be the vector point 'v'
# If the site to the left of this bisector is higher than the right
# Site, then this endpoint is put in position 0; otherwise in pos 1
edgeList.insert(llbnd, bisector)
if edge.setEndpoint(Edge.RE - pm, v):
context.outEdge(edge)
# if left HE and the new bisector don't intersect, then delete
# the left HE, and reinsert it
p = llbnd.intersect(bisector)
if p is not None:
priorityQ.delete(llbnd);
priorityQ.insert(llbnd, p, bot.distance(p))
# if right HE and the new bisector don't intersect, then reinsert it
p = bisector.intersect(rrbnd)
if p is not None:
priorityQ.insert(bisector, p, bot.distance(p))
else:
break
he = edgeList.leftend.right
while he is not edgeList.rightend:
context.outEdge(he.edge)
he = he.right
#------------------------------------------------------------------
def isEqual(a,b,relativeError=TOLERANCE):
# is nearly equal to within the allowed relative error
norm = max(abs(a),abs(b))
return (norm < relativeError) or (abs(a - b) < (relativeError * norm))
#------------------------------------------------------------------
class Site(object):
def __init__(self,x=0.0,y=0.0,sitenum=0):
self.x = x
self.y = y
self.sitenum = sitenum
def dump(self):
print "Site #%d (%g, %g)" % (self.sitenum,self.x,self.y)
def __cmp__(self,other):
if self.y < other.y:
return -1
elif self.y > other.y:
return 1
elif self.x < other.x:
return -1
elif self.x > other.x:
return 1
else:
return 0
def distance(self,other):
dx = self.x - other.x
dy = self.y - other.y
return math.sqrt(dx*dx + dy*dy)
#------------------------------------------------------------------
class Edge(object):
LE = 0
RE = 1
EDGE_NUM = 0
DELETED = {} # marker value
def __init__(self):
self.a = 0.0
self.b = 0.0
self.c = 0.0
self.ep = [None,None]
self.reg = [None,None]
self.edgenum = 0
def dump(self):
print "(#%d a=%g, b=%g, c=%g)" % (self.edgenum,self.a,self.b,self.c)
print "ep",self.ep
print "reg",self.reg
def setEndpoint(self, lrFlag, site):
self.ep[lrFlag] = site
if self.ep[Edge.RE - lrFlag] is None:
return False
return True
@staticmethod
def bisect(s1,s2):
newedge = Edge()
newedge.reg[0] = s1 # store the sites that this edge is bisecting
newedge.reg[1] = s2
# to begin with, there are no endpoints on the bisector - it goes to infinity
# ep[0] and ep[1] are None
# get the difference in x dist between the sites
dx = float(s2.x - s1.x)
dy = float(s2.y - s1.y)
adx = abs(dx) # make sure that the difference in positive
ady = abs(dy)
# get the slope of the line
newedge.c = float(s1.x * dx + s1.y * dy + (dx*dx + dy*dy)*0.5)
if adx > ady :
# set formula of line, with x fixed to 1
newedge.a = 1.0
newedge.b = dy/dx
newedge.c /= dx
else:
# set formula of line, with y fixed to 1
newedge.b = 1.0
newedge.a = dx/dy
newedge.c /= dy
newedge.edgenum = Edge.EDGE_NUM
Edge.EDGE_NUM += 1
return newedge
#------------------------------------------------------------------
class Halfedge(object):
def __init__(self,edge=None,pm=Edge.LE):
self.left = None # left Halfedge in the edge list
self.right = None # right Halfedge in the edge list
self.qnext = None # priority queue linked list pointer
self.edge = edge # edge list Edge
self.pm = pm
self.vertex = None # Site()
self.ystar = BIG_FLOAT
def dump(self):
print "Halfedge--------------------------"
print "left: ", self.left
print "right: ", self.right
print "edge: ", self.edge
print "pm: ", self.pm
print "vertex: ",
if self.vertex: self.vertex.dump()
else: print "None"
print "ystar: ", self.ystar
def __cmp__(self,other):
if self.ystar > other.ystar:
return 1
elif self.ystar < other.ystar:
return -1
elif self.vertex.x > other.vertex.x:
return 1
elif self.vertex.x < other.vertex.x:
return -1
else:
return 0
def leftreg(self,default):
if not self.edge:
return default
elif self.pm == Edge.LE:
return self.edge.reg[Edge.LE]
else:
return self.edge.reg[Edge.RE]
def rightreg(self,default):
if not self.edge:
return default
elif self.pm == Edge.LE:
return self.edge.reg[Edge.RE]
else:
return self.edge.reg[Edge.LE]
# returns True if p is to right of halfedge self
def isPointRightOf(self,pt):
e = self.edge
topsite = e.reg[1]
right_of_site = pt.x > topsite.x
if(right_of_site and self.pm == Edge.LE):
return True
if(not right_of_site and self.pm == Edge.RE):
return False
if(e.a == 1.0):
dyp = pt.y - topsite.y
dxp = pt.x - topsite.x
fast = 0;
if ((not right_of_site and e.b < 0.0) or (right_of_site and e.b >= 0.0)):
above = dyp >= e.b * dxp
fast = above
else:
above = pt.x + pt.y * e.b > e.c
if(e.b < 0.0):
above = not above
if (not above):
fast = 1
if (not fast):
dxs = topsite.x - (e.reg[0]).x
above = e.b * (dxp*dxp - dyp*dyp) < dxs*dyp*(1.0+2.0*dxp/dxs + e.b*e.b)
if(e.b < 0.0):
above = not above
else: # e.b == 1.0
yl = e.c - e.a * pt.x
t1 = pt.y - yl
t2 = pt.x - topsite.x
t3 = yl - topsite.y
above = t1*t1 > t2*t2 + t3*t3
if(self.pm==Edge.LE):
return above
else:
return not above
#--------------------------
# create a new site where the Halfedges el1 and el2 intersect
def intersect(self,other):
e1 = self.edge
e2 = other.edge
if (e1 is None) or (e2 is None):
return None
# if the two edges bisect the same parent return None
if e1.reg[1] is e2.reg[1]:
return None
d = e1.a * e2.b - e1.b * e2.a
if isEqual(d,0.0):
return None
xint = (e1.c*e2.b - e2.c*e1.b) / d
yint = (e2.c*e1.a - e1.c*e2.a) / d
if(cmp(e1.reg[1],e2.reg[1]) < 0):
he = self
e = e1
else:
he = other
e = e2
rightOfSite = xint >= e.reg[1].x
if((rightOfSite and he.pm == Edge.LE) or
(not rightOfSite and he.pm == Edge.RE)):
return None
# create a new site at the point of intersection - this is a new
# vector event waiting to happen
return Site(xint,yint)
#------------------------------------------------------------------
class EdgeList(object):
def __init__(self,xmin,xmax,nsites):
if xmin > xmax: xmin,xmax = xmax,xmin
self.hashsize = int(2*math.sqrt(nsites+4))
self.xmin = xmin
self.deltax = float(xmax - xmin)
self.hash = [None]*self.hashsize
self.leftend = Halfedge()
self.rightend = Halfedge()
self.leftend.right = self.rightend
self.rightend.left = self.leftend
self.hash[0] = self.leftend
self.hash[-1] = self.rightend
def insert(self,left,he):
he.left = left
he.right = left.right
left.right.left = he
left.right = he
def delete(self,he):
he.left.right = he.right
he.right.left = he.left
he.edge = Edge.DELETED
# Get entry from hash table, pruning any deleted nodes
def gethash(self,b):
if(b < 0 or b >= self.hashsize):
return None
he = self.hash[b]
if he is None or he.edge is not Edge.DELETED:
return he
# Hash table points to deleted half edge. Patch as necessary.
self.hash[b] = None
return None
def leftbnd(self,pt):
# Use hash table to get close to desired halfedge
bucket = int(((pt.x - self.xmin)/self.deltax * self.hashsize))
if(bucket < 0):
bucket =0;
if(bucket >=self.hashsize):
bucket = self.hashsize-1
he = self.gethash(bucket)
if(he is None):
i = 1
while True:
he = self.gethash(bucket-i)
if (he is not None): break;
he = self.gethash(bucket+i)
if (he is not None): break;
i += 1
# Now search linear list of halfedges for the corect one
if (he is self.leftend) or (he is not self.rightend and he.isPointRightOf(pt)):
he = he.right
while he is not self.rightend and he.isPointRightOf(pt):
he = he.right
he = he.left;
else:
he = he.left
while (he is not self.leftend and not he.isPointRightOf(pt)):
he = he.left
# Update hash table and reference counts
if(bucket > 0 and bucket < self.hashsize-1):
self.hash[bucket] = he
return he
#------------------------------------------------------------------
class PriorityQueue(object):
def __init__(self,ymin,ymax,nsites):
self.ymin = ymin
self.deltay = ymax - ymin
self.hashsize = int(4 * math.sqrt(nsites))
self.count = 0
self.minidx = 0
self.hash = []
for i in range(self.hashsize):
self.hash.append(Halfedge())
def __len__(self):
return self.count
def isEmpty(self):
return self.count == 0
def insert(self,he,site,offset):
he.vertex = site
he.ystar = site.y + offset
last = self.hash[self.getBucket(he)]
next = last.qnext
while((next is not None) and cmp(he,next) > 0):
last = next
next = last.qnext
he.qnext = last.qnext
last.qnext = he
self.count += 1
def delete(self,he):
if (he.vertex is not None):
last = self.hash[self.getBucket(he)]
while last.qnext is not he:
last = last.qnext
last.qnext = he.qnext
self.count -= 1
he.vertex = None
def getBucket(self,he):
bucket = int(((he.ystar - self.ymin) / self.deltay) * self.hashsize)
if bucket < 0: bucket = 0
if bucket >= self.hashsize: bucket = self.hashsize-1
if bucket < self.minidx: self.minidx = bucket
return bucket
def getMinPt(self):
while(self.hash[self.minidx].qnext is None):
self.minidx += 1
he = self.hash[self.minidx].qnext
x = he.vertex.x
y = he.ystar
return Site(x,y)
def popMinHalfedge(self):
curr = self.hash[self.minidx].qnext
self.hash[self.minidx].qnext = curr.qnext
self.count -= 1
return curr
#------------------------------------------------------------------
class SiteList(object):
def __init__(self,pointList):
self.__sites = []
self.__sitenum = 0
self.__xmin = pointList[0].x()
self.__ymin = pointList[0].y()
self.__xmax = pointList[0].x()
self.__ymax = pointList[0].y()
for i,pt in enumerate(pointList):
self.__sites.append(Site(pt.x(),pt.y(),i))
if pt.x() < self.__xmin: self.__xmin = pt.x()
if pt.y() < self.__ymin: self.__ymin = pt.y()
if pt.x() > self.__xmax: self.__xmax = pt.x()
if pt.y() > self.__ymax: self.__ymax = pt.y()
self.__sites.sort()
def setSiteNumber(self,site):
site.sitenum = self.__sitenum
self.__sitenum += 1
class Iterator(object):
def __init__(this,lst): this.generator = (s for s in lst)
def __iter__(this): return this
def next(this):
try:
return this.generator.next()
except StopIteration:
return None
def iterator(self):
return SiteList.Iterator(self.__sites)
def __iter__(self):
return SiteList.Iterator(self.__sites)
def __len__(self):
return len(self.__sites)
def _getxmin(self): return self.__xmin
def _getymin(self): return self.__ymin
def _getxmax(self): return self.__xmax
def _getymax(self): return self.__ymax
xmin = property(_getxmin)
ymin = property(_getymin)
xmax = property(_getxmax)
ymax = property(_getymax)
#------------------------------------------------------------------
def computeVoronoiDiagram( points ):
""" Takes a list of point objects (which must have x and y fields).
Returns a 3-tuple of:
(1) a list of 2-tuples, which are the x,y coordinates of the
Voronoi diagram vertices
(2) a list of 3-tuples (a,b,c) which are the equations of the
lines in the Voronoi diagram: a*x + b*y = c
(3) a list of 3-tuples, (l, v1, v2) representing edges of the
Voronoi diagram. l is the index of the line, v1 and v2 are
the indices of the vetices at the end of the edge. If
v1 or v2 is -1, the line extends to infinity.
"""
siteList = SiteList( points )
context = Context()
context.set_bounds( siteList )
voronoi( siteList, context )
return ( context.vertices, context.lines, context.edges, (context.xmin,context.ymin,context.xmax,context.ymax))
#------------------------------------------------------------------
def computeDelaunayTriangulation( points ):
""" Takes a list of point objects (which must have x and y fields).
Returns a list of 3-tuples: the indices of the points that form a
Delaunay triangle.
"""
siteList = SiteList( points )
context = Context()
context.triangulate = True
voronoi( siteList, context )
return context.triangles
#-----------------------------------------------------------------------------
if __name__=="__main__":
try:
optlist,args = getopt.getopt(sys.argv[1:],"thdp")
except getopt.GetoptError:
usage()
sys.exit(2)
doHelp = 0
c = Context()
c.doPrint = 1
for opt in optlist:
if opt[0] == "-d": c.debug = 1
if opt[0] == "-p": c.plot = 1
if opt[0] == "-t": c.triangulate = 1
if opt[0] == "-h": doHelp = 1
if not doHelp:
pts = []
fp = sys.stdin
if len(args) > 0:
fp = open(args[0],'r')
for line in fp:
fld = line.split()
x = float(fld[0])
y = float(fld[1])
pts.append(Site(x,y))
if len(args) > 0: fp.close()
if doHelp or len(pts) == 0:
usage()
sys.exit(2)
sl = SiteList(pts)
voronoi(sl,c)
| |
# Lint as: python2, python3
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SSD Feature Pyramid Network (FPN) feature extractors based on Resnet v1.
See https://arxiv.org/abs/1708.02002 for details.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import range
import tensorflow.compat.v1 as tf
import tf_slim as slim
from object_detection.meta_architectures import ssd_meta_arch
from object_detection.models import feature_map_generators
from object_detection.utils import context_manager
from object_detection.utils import ops
from object_detection.utils import shape_utils
from nets import resnet_v1
class SSDResnetV1FpnFeatureExtractor(ssd_meta_arch.SSDFeatureExtractor):
"""SSD FPN feature extractor based on Resnet v1 architecture."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams_fn,
resnet_base_fn,
resnet_scope_name,
fpn_scope_name,
fpn_min_level=3,
fpn_max_level=7,
additional_layer_depth=256,
reuse_weights=None,
use_explicit_padding=False,
use_depthwise=False,
use_native_resize_op=False,
override_base_feature_extractor_hyperparams=False):
"""SSD FPN feature extractor based on Resnet v1 architecture.
Args:
is_training: whether the network is in training mode.
depth_multiplier: float depth multiplier for feature extractor.
min_depth: minimum feature extractor depth.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d
and separable_conv2d ops in the layers that are added on top of the
base feature extractor.
resnet_base_fn: base resnet network to use.
resnet_scope_name: scope name under which to construct resnet
fpn_scope_name: scope name under which to construct the feature pyramid
network.
fpn_min_level: the highest resolution feature map to use in FPN. The valid
values are {2, 3, 4, 5} which map to Resnet blocks {1, 2, 3, 4}
respectively.
fpn_max_level: the smallest resolution feature map to construct or use in
FPN. FPN constructions uses features maps starting from fpn_min_level
upto the fpn_max_level. In the case that there are not enough feature
maps in the backbone network, additional feature maps are created by
applying stride 2 convolutions until we get the desired number of fpn
levels.
additional_layer_depth: additional feature map layer channel depth.
reuse_weights: Whether to reuse variables. Default is None.
use_explicit_padding: Whether to use explicit padding when extracting
features. Default is False. UNUSED currently.
use_depthwise: Whether to use depthwise convolutions. UNUSED currently.
use_native_resize_op: Whether to use tf.image.nearest_neighbor_resize
to do upsampling in FPN. Default is false.
override_base_feature_extractor_hyperparams: Whether to override
hyperparameters of the base feature extractor with the one from
`conv_hyperparams_fn`.
Raises:
ValueError: On supplying invalid arguments for unused arguments.
"""
super(SSDResnetV1FpnFeatureExtractor, self).__init__(
is_training=is_training,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams_fn=conv_hyperparams_fn,
reuse_weights=reuse_weights,
use_explicit_padding=use_explicit_padding,
use_depthwise=use_depthwise,
override_base_feature_extractor_hyperparams=
override_base_feature_extractor_hyperparams)
if self._use_explicit_padding is True:
raise ValueError('Explicit padding is not a valid option.')
self._resnet_base_fn = resnet_base_fn
self._resnet_scope_name = resnet_scope_name
self._fpn_scope_name = fpn_scope_name
self._fpn_min_level = fpn_min_level
self._fpn_max_level = fpn_max_level
self._additional_layer_depth = additional_layer_depth
self._use_native_resize_op = use_native_resize_op
def preprocess(self, resized_inputs):
"""SSD preprocessing.
VGG style channel mean subtraction as described here:
https://gist.github.com/ksimonyan/211839e770f7b538e2d8#file-readme-mdnge.
Note that if the number of channels is not equal to 3, the mean subtraction
will be skipped and the original resized_inputs will be returned.
Args:
resized_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
"""
if resized_inputs.shape.as_list()[3] == 3:
channel_means = [123.68, 116.779, 103.939]
return resized_inputs - [[channel_means]]
else:
return resized_inputs
def _filter_features(self, image_features):
# TODO(rathodv): Change resnet endpoint to strip scope prefixes instead
# of munging the scope here.
filtered_image_features = dict({})
for key, feature in image_features.items():
feature_name = key.split('/')[-1]
if feature_name in ['block1', 'block2', 'block3', 'block4']:
filtered_image_features[feature_name] = feature
return filtered_image_features
def extract_features(self, preprocessed_inputs):
"""Extract features from preprocessed inputs.
Args:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
feature_maps: a list of tensors where the ith tensor has shape
[batch, height_i, width_i, depth_i]
"""
preprocessed_inputs = shape_utils.check_min_image_dim(
129, preprocessed_inputs)
with tf.variable_scope(
self._resnet_scope_name, reuse=self._reuse_weights) as scope:
with slim.arg_scope(resnet_v1.resnet_arg_scope()):
with (slim.arg_scope(self._conv_hyperparams_fn())
if self._override_base_feature_extractor_hyperparams else
context_manager.IdentityContextManager()):
_, image_features = self._resnet_base_fn(
inputs=ops.pad_to_multiple(preprocessed_inputs,
self._pad_to_multiple),
num_classes=None,
is_training=None,
global_pool=False,
output_stride=None,
store_non_strided_activations=True,
min_base_depth=self._min_depth,
depth_multiplier=self._depth_multiplier,
scope=scope)
image_features = self._filter_features(image_features)
depth_fn = lambda d: max(int(d * self._depth_multiplier), self._min_depth)
with slim.arg_scope(self._conv_hyperparams_fn()):
with tf.variable_scope(self._fpn_scope_name,
reuse=self._reuse_weights):
base_fpn_max_level = min(self._fpn_max_level, 5)
feature_block_list = []
for level in range(self._fpn_min_level, base_fpn_max_level + 1):
feature_block_list.append('block{}'.format(level - 1))
fpn_features = feature_map_generators.fpn_top_down_feature_maps(
[(key, image_features[key]) for key in feature_block_list],
depth=depth_fn(self._additional_layer_depth),
use_native_resize_op=self._use_native_resize_op)
feature_maps = []
for level in range(self._fpn_min_level, base_fpn_max_level + 1):
feature_maps.append(
fpn_features['top_down_block{}'.format(level - 1)])
last_feature_map = fpn_features['top_down_block{}'.format(
base_fpn_max_level - 1)]
# Construct coarse features
for i in range(base_fpn_max_level, self._fpn_max_level):
last_feature_map = slim.conv2d(
last_feature_map,
num_outputs=depth_fn(self._additional_layer_depth),
kernel_size=[3, 3],
stride=2,
padding='SAME',
scope='bottom_up_block{}'.format(i))
feature_maps.append(last_feature_map)
return feature_maps
class SSDResnet50V1FpnFeatureExtractor(SSDResnetV1FpnFeatureExtractor):
"""SSD Resnet50 V1 FPN feature extractor."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams_fn,
fpn_min_level=3,
fpn_max_level=7,
additional_layer_depth=256,
reuse_weights=None,
use_explicit_padding=False,
use_depthwise=False,
use_native_resize_op=False,
override_base_feature_extractor_hyperparams=False):
"""SSD Resnet50 V1 FPN feature extractor based on Resnet v1 architecture.
Args:
is_training: whether the network is in training mode.
depth_multiplier: float depth multiplier for feature extractor.
min_depth: minimum feature extractor depth.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d
and separable_conv2d ops in the layers that are added on top of the
base feature extractor.
fpn_min_level: the minimum level in feature pyramid networks.
fpn_max_level: the maximum level in feature pyramid networks.
additional_layer_depth: additional feature map layer channel depth.
reuse_weights: Whether to reuse variables. Default is None.
use_explicit_padding: Whether to use explicit padding when extracting
features. Default is False. UNUSED currently.
use_depthwise: Whether to use depthwise convolutions. UNUSED currently.
use_native_resize_op: Whether to use tf.image.nearest_neighbor_resize
to do upsampling in FPN. Default is false.
override_base_feature_extractor_hyperparams: Whether to override
hyperparameters of the base feature extractor with the one from
`conv_hyperparams_fn`.
"""
super(SSDResnet50V1FpnFeatureExtractor, self).__init__(
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams_fn,
resnet_v1.resnet_v1_50,
'resnet_v1_50',
'fpn',
fpn_min_level,
fpn_max_level,
additional_layer_depth,
reuse_weights=reuse_weights,
use_explicit_padding=use_explicit_padding,
use_depthwise=use_depthwise,
use_native_resize_op=use_native_resize_op,
override_base_feature_extractor_hyperparams=
override_base_feature_extractor_hyperparams)
class SSDResnet101V1FpnFeatureExtractor(SSDResnetV1FpnFeatureExtractor):
"""SSD Resnet101 V1 FPN feature extractor."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams_fn,
fpn_min_level=3,
fpn_max_level=7,
additional_layer_depth=256,
reuse_weights=None,
use_explicit_padding=False,
use_depthwise=False,
use_native_resize_op=False,
override_base_feature_extractor_hyperparams=False):
"""SSD Resnet101 V1 FPN feature extractor based on Resnet v1 architecture.
Args:
is_training: whether the network is in training mode.
depth_multiplier: float depth multiplier for feature extractor.
min_depth: minimum feature extractor depth.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d
and separable_conv2d ops in the layers that are added on top of the
base feature extractor.
fpn_min_level: the minimum level in feature pyramid networks.
fpn_max_level: the maximum level in feature pyramid networks.
additional_layer_depth: additional feature map layer channel depth.
reuse_weights: Whether to reuse variables. Default is None.
use_explicit_padding: Whether to use explicit padding when extracting
features. Default is False. UNUSED currently.
use_depthwise: Whether to use depthwise convolutions. UNUSED currently.
use_native_resize_op: Whether to use tf.image.nearest_neighbor_resize
to do upsampling in FPN. Default is false.
override_base_feature_extractor_hyperparams: Whether to override
hyperparameters of the base feature extractor with the one from
`conv_hyperparams_fn`.
"""
super(SSDResnet101V1FpnFeatureExtractor, self).__init__(
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams_fn,
resnet_v1.resnet_v1_101,
'resnet_v1_101',
'fpn',
fpn_min_level,
fpn_max_level,
additional_layer_depth,
reuse_weights=reuse_weights,
use_explicit_padding=use_explicit_padding,
use_depthwise=use_depthwise,
use_native_resize_op=use_native_resize_op,
override_base_feature_extractor_hyperparams=
override_base_feature_extractor_hyperparams)
class SSDResnet152V1FpnFeatureExtractor(SSDResnetV1FpnFeatureExtractor):
"""SSD Resnet152 V1 FPN feature extractor."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams_fn,
fpn_min_level=3,
fpn_max_level=7,
additional_layer_depth=256,
reuse_weights=None,
use_explicit_padding=False,
use_depthwise=False,
use_native_resize_op=False,
override_base_feature_extractor_hyperparams=False):
"""SSD Resnet152 V1 FPN feature extractor based on Resnet v1 architecture.
Args:
is_training: whether the network is in training mode.
depth_multiplier: float depth multiplier for feature extractor.
min_depth: minimum feature extractor depth.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d
and separable_conv2d ops in the layers that are added on top of the
base feature extractor.
fpn_min_level: the minimum level in feature pyramid networks.
fpn_max_level: the maximum level in feature pyramid networks.
additional_layer_depth: additional feature map layer channel depth.
reuse_weights: Whether to reuse variables. Default is None.
use_explicit_padding: Whether to use explicit padding when extracting
features. Default is False. UNUSED currently.
use_depthwise: Whether to use depthwise convolutions. UNUSED currently.
use_native_resize_op: Whether to use tf.image.nearest_neighbor_resize
to do upsampling in FPN. Default is false.
override_base_feature_extractor_hyperparams: Whether to override
hyperparameters of the base feature extractor with the one from
`conv_hyperparams_fn`.
"""
super(SSDResnet152V1FpnFeatureExtractor, self).__init__(
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams_fn,
resnet_v1.resnet_v1_152,
'resnet_v1_152',
'fpn',
fpn_min_level,
fpn_max_level,
additional_layer_depth,
reuse_weights=reuse_weights,
use_explicit_padding=use_explicit_padding,
use_depthwise=use_depthwise,
use_native_resize_op=use_native_resize_op,
override_base_feature_extractor_hyperparams=
override_base_feature_extractor_hyperparams)
| |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-03-21 07:22
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='ActiveExceptionModel',
fields=[
('id', models.IntegerField(editable=False, primary_key=True, serialize=False)),
('class_name', models.CharField(max_length=255)),
('method_name', models.CharField(max_length=255)),
('route', models.TextField()),
('log_data', models.BinaryField()),
('submitted_at', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='ActiveHttpHostModel',
fields=[
('id', models.IntegerField(editable=False, primary_key=True, serialize=False)),
('host', models.CharField(max_length=255, unique=True)),
('name', models.CharField(max_length=255, unique=True)),
('enabled', models.BooleanField(default=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
('edited_at', models.DateTimeField()),
('random_id', models.CharField(default='', max_length=6)),
('secure', models.BooleanField(default=False)),
('port', models.IntegerField()),
('framework', models.CharField(max_length=255)),
('report_type', models.IntegerField(choices=[(0, 'exception')], default=0)),
('package_count', models.IntegerField(default=0)),
('comments', models.TextField(blank=True)),
],
),
migrations.CreateModel(
name='ActivePackageModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.CreateModel(
name='CommonHostModel',
fields=[
('id', models.IntegerField(editable=False, primary_key=True, serialize=False)),
('host', models.CharField(max_length=255, unique=True)),
('name', models.CharField(max_length=255, unique=True)),
('enabled', models.BooleanField(default=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
('edited_at', models.DateTimeField()),
('frequency', models.IntegerField(default=900)),
('random_id', models.CharField(default='', max_length=6)),
('comments', models.TextField(blank=True)),
],
),
migrations.CreateModel(
name='CommonMessageModel',
fields=[
('id', models.IntegerField(editable=False, primary_key=True, serialize=False)),
('type', models.IntegerField(choices=[(0, 'Custom'), (1, 'Brief Message'), (2, 'Detailed Message'), (3, 'Auto Message')], default=0)),
('status', models.IntegerField(choices=[(0, 'Custom'), (1, 'Success'), (2, 'Warning'), (3, 'Notice'), (4, 'Error')], default=0)),
('content', models.TextField(default='')),
('power', models.IntegerField(default=0)),
('enabled', models.BooleanField(default=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='CommonOriginModel',
fields=[
('id', models.IntegerField(editable=False, primary_key=True, serialize=False)),
('origin', models.CharField(max_length=255, unique=True)),
('name', models.CharField(max_length=255, unique=True)),
('power', models.IntegerField()),
('enabled', models.BooleanField(default=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
('edited_at', models.DateTimeField()),
('secret', models.CharField(default='', max_length=32)),
('comments', models.TextField(blank=True)),
],
),
migrations.CreateModel(
name='HttpDataModel',
fields=[
('id', models.IntegerField(editable=False, primary_key=True, serialize=False)),
('succeed', models.BooleanField(default=False)),
('code', models.IntegerField(default=200)),
('header', models.TextField()),
('delay_std', models.FloatField()),
('timestamp', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='HttpHostModel',
fields=[
('id', models.IntegerField(editable=False, primary_key=True, serialize=False)),
('host', models.CharField(max_length=255, unique=True)),
('name', models.CharField(max_length=255, unique=True)),
('enabled', models.BooleanField(default=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
('edited_at', models.DateTimeField()),
('random_id', models.CharField(default='', max_length=6)),
('secure', models.BooleanField(default=False)),
('port', models.IntegerField()),
('frequency', models.IntegerField(default=900)),
('comments', models.TextField(blank=True)),
],
),
migrations.CreateModel(
name='HttpOriginModel',
fields=[
('id', models.IntegerField(editable=False, primary_key=True, serialize=False)),
('origin', models.CharField(max_length=255, unique=True)),
('name', models.CharField(max_length=255, unique=True)),
('power', models.IntegerField()),
('enabled', models.BooleanField(default=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
('edited_at', models.DateTimeField()),
('secret', models.CharField(default='', max_length=32)),
('ua', models.CharField(default='', max_length=512)),
('comments', models.TextField(blank=True)),
],
),
migrations.CreateModel(
name='PingDataModel',
fields=[
('id', models.IntegerField(editable=False, primary_key=True, serialize=False)),
('transmitted_times', models.IntegerField()),
('received_times', models.IntegerField()),
('delay_min', models.FloatField()),
('delay_avg', models.FloatField()),
('delay_max', models.FloatField()),
('delay_std', models.FloatField()),
('timestamp', models.DateTimeField()),
('submitted_at', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='PingHostModel',
fields=[
('id', models.IntegerField(editable=False, primary_key=True, serialize=False)),
('host', models.CharField(max_length=255, unique=True)),
('name', models.CharField(max_length=255, unique=True)),
('enabled', models.BooleanField(default=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
('edited_at', models.DateTimeField()),
('random_id', models.CharField(default='', max_length=6)),
('frequency', models.IntegerField(default=900)),
('comments', models.TextField(blank=True)),
],
),
migrations.CreateModel(
name='PingOriginModel',
fields=[
('id', models.IntegerField(editable=False, primary_key=True, serialize=False)),
('origin', models.CharField(max_length=255, unique=True)),
('name', models.CharField(max_length=255, unique=True)),
('power', models.IntegerField()),
('enabled', models.BooleanField(default=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
('edited_at', models.DateTimeField()),
('secret', models.CharField(default='', max_length=32)),
('comments', models.TextField(blank=True)),
],
),
migrations.CreateModel(
name='RespDataModel',
fields=[
('id', models.IntegerField(editable=False, primary_key=True, serialize=False)),
('succeed', models.BooleanField(default=False)),
('code', models.IntegerField(default=200)),
('header', models.TextField()),
('delay_std', models.FloatField()),
('timestamp', models.DateTimeField(auto_now=True)),
('passed', models.BooleanField(default=False)),
('size', models.IntegerField(default=0)),
('contents', models.TextField()),
],
),
migrations.CreateModel(
name='RespHostModel',
fields=[
('id', models.IntegerField(editable=False, primary_key=True, serialize=False)),
('host', models.CharField(max_length=255, unique=True)),
('name', models.CharField(max_length=255, unique=True)),
('enabled', models.BooleanField(default=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
('edited_at', models.DateTimeField()),
('random_id', models.CharField(default='', max_length=6)),
('url', models.CharField(max_length=255)),
('method', models.IntegerField(choices=[(0, 'HEAD'), (1, 'GET'), (2, 'POST')], default=0)),
('headers', models.TextField(blank=True)),
('body', models.TextField(blank=True)),
('expected_method', models.IntegerField(choices=[(0, 'Simple'), (1, 'Regexp')], default=0)),
('expected_contents', models.TextField()),
('frequency', models.IntegerField(default=900)),
('comments', models.TextField(blank=True)),
],
),
migrations.CreateModel(
name='RespOriginModel',
fields=[
('id', models.IntegerField(editable=False, primary_key=True, serialize=False)),
('origin', models.CharField(max_length=255, unique=True)),
('name', models.CharField(max_length=255, unique=True)),
('power', models.IntegerField()),
('enabled', models.BooleanField(default=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
('edited_at', models.DateTimeField()),
('secret', models.CharField(default='', max_length=32)),
('bandwidth', models.FloatField()),
('comments', models.TextField(blank=True)),
],
),
migrations.CreateModel(
name='SiteConfigCategoryModel',
fields=[
('id', models.IntegerField(editable=False, primary_key=True, serialize=False)),
('name', models.CharField(max_length=255)),
('description', models.TextField(blank=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='SiteConfigModel',
fields=[
('id', models.IntegerField(editable=False, primary_key=True, serialize=False)),
('key', models.CharField(max_length=64, unique=True)),
('value', models.CharField(max_length=256)),
('comments', models.TextField(blank=True)),
('modified_at', models.DateTimeField(auto_now=True)),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='hello_app.SiteConfigCategoryModel')),
],
),
migrations.CreateModel(
name='SiteReportModel',
fields=[
('id', models.IntegerField(editable=False, primary_key=True, serialize=False)),
('name', models.CharField(max_length=255)),
('started_at', models.DateTimeField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('type', models.IntegerField(choices=[(0, 'Custom'), (1, 'Ping Success'), (2, 'Ping Delay'), (3, 'Http Success'), (4, 'Http Delay'), (5, 'Response Success'), (6, 'Response Delay'), (7, 'Exceptions')], default=0)),
('value', models.FloatField(default=0.0)),
('comments', models.TextField(default='No comment')),
],
),
migrations.CreateModel(
name='SiteStatusModel',
fields=[
('id', models.IntegerField(editable=False, primary_key=True, serialize=False)),
('status', models.IntegerField(choices=[(0, 'Custom'), (1, 'Success'), (2, 'Warning'), (3, 'Notice'), (4, 'Error')], default=0)),
('created_at', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='ActiveExceptionPackageModel',
fields=[
('activepackagemodel_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='hello_app.ActivePackageModel')),
],
bases=('hello_app.activepackagemodel',),
),
migrations.CreateModel(
name='HttpReportModel',
fields=[
('sitereportmodel_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='hello_app.SiteReportModel')),
('host', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='hello_app.HttpHostModel')),
],
bases=('hello_app.sitereportmodel',),
),
migrations.CreateModel(
name='PingReportModel',
fields=[
('sitereportmodel_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='hello_app.SiteReportModel')),
('host', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='hello_app.PingHostModel')),
],
bases=('hello_app.sitereportmodel',),
),
migrations.CreateModel(
name='RespReportModel',
fields=[
('sitereportmodel_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='hello_app.SiteReportModel')),
('host', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='hello_app.RespHostModel')),
],
bases=('hello_app.sitereportmodel',),
),
migrations.CreateModel(
name='SiteMessageModel',
fields=[
('commonmessagemodel_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='hello_app.CommonMessageModel')),
],
bases=('hello_app.commonmessagemodel',),
),
migrations.AddField(
model_name='respdatamodel',
name='host',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='hello_app.RespHostModel'),
),
migrations.AddField(
model_name='respdatamodel',
name='origin',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='hello_app.RespOriginModel'),
),
migrations.AddField(
model_name='pingdatamodel',
name='host',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='hello_app.PingHostModel'),
),
migrations.AddField(
model_name='pingdatamodel',
name='origin',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='hello_app.PingOriginModel'),
),
migrations.AddField(
model_name='httpdatamodel',
name='host',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='hello_app.HttpHostModel'),
),
migrations.AddField(
model_name='httpdatamodel',
name='origin',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='hello_app.HttpOriginModel'),
),
migrations.AddField(
model_name='activeexceptionmodel',
name='host',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='hello_app.ActiveHttpHostModel'),
),
]
| |
from tapiriik.settings import WEB_ROOT, ENDOMONDO_CLIENT_KEY, ENDOMONDO_CLIENT_SECRET, SECRET_KEY
from tapiriik.services.service_base import ServiceAuthenticationType, ServiceBase
from tapiriik.services.interchange import UploadedActivity, ActivityType, ActivityStatistic, ActivityStatisticUnit, Waypoint, WaypointType, Location, Lap
from tapiriik.services.api import APIException, APIExcludeActivity, UserException, UserExceptionType
from tapiriik.database import redis
from django.core.urlresolvers import reverse
from datetime import timedelta, datetime
import dateutil.parser
from requests_oauthlib import OAuth1Session
import logging
import pytz
import json
import os
import hashlib
logger = logging.getLogger(__name__)
class EndomondoService(ServiceBase):
ID = "endomondo"
DisplayName = "Endomondo"
DisplayAbbreviation = "EN"
AuthenticationType = ServiceAuthenticationType.OAuth
UserProfileURL = "http://www.endomondo.com/profile/{0}"
UserActivityURL = "http://www.endomondo.com/workouts/{1}/{0}"
PartialSyncRequiresTrigger = True
AuthenticationNoFrame = True
ConfigurationDefaults = {
"DeviceRegistered": False,
}
# The complete list:
# running,cycling transportation,cycling sport,mountain biking,skating,roller skiing,skiing cross country,skiing downhill,snowboarding,kayaking,kite surfing,rowing,sailing,windsurfing,fitness walking,golfing,hiking,orienteering,walking,riding,swimming,spinning,other,aerobics,badminton,baseball,basketball,boxing,stair climbing,cricket,cross training,dancing,fencing,american football,rugby,soccer,handball,hockey,pilates,polo,scuba diving,squash,table tennis,tennis,beach volley,volleyball,weight training,yoga,martial arts,gymnastics,step counter,crossfit,treadmill running,skateboarding,surfing,snowshoeing,wheelchair,climbing,treadmill walking
_activityMappings = {
"running": ActivityType.Running,
"cycling transportation": ActivityType.Cycling,
"cycling sport": ActivityType.Cycling,
"mountain biking": ActivityType.MountainBiking,
"skating": ActivityType.Skating,
"skiing cross country": ActivityType.CrossCountrySkiing,
"skiing downhill": ActivityType.DownhillSkiing,
"snowboarding": ActivityType.Snowboarding,
"rowing": ActivityType.Rowing,
"fitness walking": ActivityType.Walking,
"hiking": ActivityType.Hiking,
"orienteering": ActivityType.Walking,
"walking": ActivityType.Walking,
"swimming": ActivityType.Swimming,
"other": ActivityType.Other,
"treadmill running": ActivityType.Running,
"snowshoeing": ActivityType.Walking,
"wheelchair": ActivityType.Wheelchair,
"climbing": ActivityType.Climbing,
"roller skiing": ActivityType.RollerSkiing,
"treadmill walking": ActivityType.Walking
}
_reverseActivityMappings = {
"running": ActivityType.Running,
"cycling sport": ActivityType.Cycling,
"mountain biking": ActivityType.MountainBiking,
"skating": ActivityType.Skating,
"skiing cross country": ActivityType.CrossCountrySkiing,
"skiing downhill": ActivityType.DownhillSkiing,
"snowboarding": ActivityType.Snowboarding,
"rowing": ActivityType.Rowing,
"walking": ActivityType.Walking,
"hiking": ActivityType.Hiking,
"swimming": ActivityType.Swimming,
"other": ActivityType.Other,
"wheelchair": ActivityType.Wheelchair,
"climbing" : ActivityType.Climbing,
"roller skiing": ActivityType.RollerSkiing
}
_activitiesThatDontRoundTrip = {
ActivityType.Cycling,
ActivityType.Running,
ActivityType.Walking
}
SupportedActivities = list(_activityMappings.values())
ReceivesNonGPSActivitiesWithOtherSensorData = False
def WebInit(self):
self.UserAuthorizationURL = reverse("oauth_redirect", kwargs={"service": "endomondo"})
def _rateLimitBailout(self, response):
if response.status_code == 503 and "user_refused" in response.text:
raise APIException("Endomondo user token rate limit reached", user_exception=UserException(UserExceptionType.RateLimited))
def _oauthSession(self, connection=None, **params):
if connection:
params["resource_owner_key"] = connection.Authorization["Token"]
params["resource_owner_secret"] = connection.Authorization["Secret"]
return OAuth1Session(ENDOMONDO_CLIENT_KEY, client_secret=ENDOMONDO_CLIENT_SECRET, **params)
def GenerateUserAuthorizationURL(self, session, level=None):
oauthSession = self._oauthSession(callback_uri=WEB_ROOT + reverse("oauth_return", kwargs={"service": "endomondo"}))
tokens = oauthSession.fetch_request_token("https://api.endomondo.com/oauth/request_token")
redis_token_key = 'endomondo:oauth:%s' % tokens["oauth_token"]
redis.setex(redis_token_key, tokens["oauth_token_secret"], timedelta(hours=24))
return oauthSession.authorization_url("https://www.endomondo.com/oauth/authorize")
def RetrieveAuthorizationToken(self, req, level):
redis_token_key = "endomondo:oauth:%s" % req.GET["oauth_token"]
secret = redis.get(redis_token_key)
assert secret
redis.delete(redis_token_key)
oauthSession = self._oauthSession(resource_owner_secret=secret)
oauthSession.parse_authorization_response(req.get_full_path())
tokens = oauthSession.fetch_access_token("https://api.endomondo.com/oauth/access_token")
userInfo = oauthSession.get("https://api.endomondo.com/api/1/user")
userInfo = userInfo.json()
return (userInfo["id"], {"Token": tokens["oauth_token"], "Secret": tokens["oauth_token_secret"]})
def RevokeAuthorization(self, serviceRecord):
pass
def _parseDate(self, date):
return datetime.strptime(date, "%Y-%m-%d %H:%M:%S UTC").replace(tzinfo=pytz.utc)
def _formatDate(self, date):
return datetime.strftime(date.astimezone(pytz.utc), "%Y-%m-%d %H:%M:%S UTC")
def DownloadActivityList(self, serviceRecord, exhaustive=False):
oauthSession = self._oauthSession(serviceRecord)
activities = []
exclusions = []
page_url = "https://api.endomondo.com/api/1/workouts"
while True:
resp = oauthSession.get(page_url)
try:
respList = resp.json()["data"]
except ValueError:
self._rateLimitBailout(resp)
raise APIException("Error decoding activity list resp %s %s" % (resp.status_code, resp.text))
for actInfo in respList:
activity = UploadedActivity()
activity.StartTime = self._parseDate(actInfo["start_time"])
logger.debug("Activity s/t %s" % activity.StartTime)
if "is_tracking" in actInfo and actInfo["is_tracking"]:
exclusions.append(APIExcludeActivity("Not complete", activity_id=actInfo["id"], permanent=False, user_exception=UserException(UserExceptionType.LiveTracking)))
continue
if "end_time" in actInfo:
activity.EndTime = self._parseDate(actInfo["end_time"])
if actInfo["sport"] in self._activityMappings:
activity.Type = self._activityMappings[actInfo["sport"]]
# "duration" is timer time
if "duration_total" in actInfo:
activity.Stats.TimerTime = ActivityStatistic(ActivityStatisticUnit.Seconds, value=float(actInfo["duration_total"]))
if "distance_total" in actInfo:
activity.Stats.Distance = ActivityStatistic(ActivityStatisticUnit.Kilometers, value=float(actInfo["distance_total"]))
if "calories_total" in actInfo:
activity.Stats.Energy = ActivityStatistic(ActivityStatisticUnit.Kilocalories, value=float(actInfo["calories_total"]))
activity.Stats.Elevation = ActivityStatistic(ActivityStatisticUnit.Meters)
if "altitude_max" in actInfo:
activity.Stats.Elevation.Max = float(actInfo["altitude_max"])
if "altitude_min" in actInfo:
activity.Stats.Elevation.Min = float(actInfo["altitude_min"])
if "total_ascent" in actInfo:
activity.Stats.Elevation.Gain = float(actInfo["total_ascent"])
if "total_descent" in actInfo:
activity.Stats.Elevation.Loss = float(actInfo["total_descent"])
activity.Stats.Speed = ActivityStatistic(ActivityStatisticUnit.KilometersPerHour)
if "speed_max" in actInfo:
activity.Stats.Speed.Max = float(actInfo["speed_max"])
if "heart_rate_avg" in actInfo:
activity.Stats.HR = ActivityStatistic(ActivityStatisticUnit.BeatsPerMinute, avg=float(actInfo["heart_rate_avg"]))
if "heart_rate_max" in actInfo:
activity.Stats.HR.update(ActivityStatistic(ActivityStatisticUnit.BeatsPerMinute, max=float(actInfo["heart_rate_max"])))
if "cadence_avg" in actInfo:
activity.Stats.Cadence = ActivityStatistic(ActivityStatisticUnit.RevolutionsPerMinute, avg=int(actInfo["cadence_avg"]))
if "cadence_max" in actInfo:
activity.Stats.Cadence.update(ActivityStatistic(ActivityStatisticUnit.RevolutionsPerMinute, max=int(actInfo["cadence_max"])))
if "title" in actInfo:
activity.Name = actInfo["title"]
activity.ServiceData = {"WorkoutID": int(actInfo["id"]), "Sport": actInfo["sport"]}
activity.CalculateUID()
activities.append(activity)
paging = resp.json()["paging"]
if "next" not in paging or not paging["next"] or not exhaustive:
break
else:
page_url = paging["next"]
return activities, exclusions
def SubscribeToPartialSyncTrigger(self, serviceRecord):
resp = self._oauthSession(serviceRecord).put("https://api.endomondo.com/api/1/subscriptions/workout/%s" % serviceRecord.ExternalID)
try:
assert resp.status_code in [200, 201] # Created, or already existed
except:
raise APIException("Could not unsubscribe - received unknown result %s - %s" % (resp.status_code, resp.text))
serviceRecord.SetPartialSyncTriggerSubscriptionState(True)
def UnsubscribeFromPartialSyncTrigger(self, serviceRecord):
resp = self._oauthSession(serviceRecord).delete("https://api.endomondo.com/api/1/subscriptions/workout/%s" % serviceRecord.ExternalID)
try:
assert resp.status_code in [204, 500] # Docs say otherwise, but no-subscription-found is 500
except:
raise APIException("Could not unsubscribe - received unknown result %s - %s" % (resp.status_code, resp.text))
serviceRecord.SetPartialSyncTriggerSubscriptionState(False)
def ExternalIDsForPartialSyncTrigger(self, req):
data = json.loads(req.body.decode("UTF-8"))
delta_external_ids = [int(x["id"]) for x in data["data"]]
return delta_external_ids
def DownloadActivity(self, serviceRecord, activity):
resp = self._oauthSession(serviceRecord).get("https://api.endomondo.com/api/1/workouts/%d" % activity.ServiceData["WorkoutID"], params={"fields": "points"})
try:
resp = resp.json()
except ValueError:
self._rateLimitBailout(resp)
res_txt = resp.text
raise APIException("Parse failure in Endomondo activity download: %s" % resp.status_code)
lap = Lap(stats=activity.Stats, startTime=activity.StartTime, endTime=activity.EndTime)
activity.Laps = [lap]
activity.GPS = False
old_location = None
in_pause = False
for pt in resp["points"]:
wp = Waypoint()
if "time" not in pt:
# Manually-entered activities with a course attached to them have date-less waypoints
# It'd be nice to transfer those courses, but it's a concept few other sites support AFAIK
# So, ignore the points entirely
continue
wp.Timestamp = self._parseDate(pt["time"])
if ("lat" in pt and "lng" in pt) or "alt" in pt:
wp.Location = Location()
if "lat" in pt and "lng" in pt:
wp.Location.Latitude = pt["lat"]
wp.Location.Longitude = pt["lng"]
activity.GPS = True
if "alt" in pt:
wp.Location.Altitude = pt["alt"]
if wp.Location == old_location:
# We have seen the point with the same coordinates
# before. This causes other services (e.g Strava) to
# interpret this as if we were standing for a while,
# which causes us having wrong activity time when
# importing. We mark the point as paused in hopes this
# fixes the issue.
in_pause = True
wp.Type = WaypointType.Pause
elif in_pause:
in_pause = False
wp.Type = WaypointType.Resume
old_location = wp.Location
if "hr" in pt:
wp.HR = pt["hr"]
if "cad" in pt:
wp.Cadence = pt["cad"]
lap.Waypoints.append(wp)
activity.Stationary = len(lap.Waypoints) == 0
return activity
def _deviceId(self, serviceRecord):
csp = hashlib.new("md5")
csp.update(str(serviceRecord.ExternalID).encode("utf-8"))
csp.update(SECRET_KEY.encode("utf-8"))
return "tap-" + csp.hexdigest()
def _getSport(self, activity):
# This is an activity type that doesn't round trip
if (activity.Type in self._activitiesThatDontRoundTrip and
# We have the original sport
"Sport" in activity.ServiceData and
# We know what this sport is
activity.ServiceData["Sport"] in self._activityMappings and
# The type didn't change (if we changed from Walking to Cycling, we'd want to let the new value through)
activity.Type == self._activityMappings[activity.ServiceData["Sport"]]):
return activity.ServiceData["Sport"]
else:
return [k for k,v in self._reverseActivityMappings.items() if v == activity.Type][0]
def UploadActivity(self, serviceRecord, activity):
session = self._oauthSession(serviceRecord)
device_id = self._deviceId(serviceRecord)
if not serviceRecord.GetConfiguration()["DeviceRegistered"]:
device_info = {
"name": "tapiriik",
"vendor": "tapiriik",
"model": "tapiriik",
"os": "tapiriik",
"os_version": "1",
"app_variant": "tapiriik",
"app_version": "1"
}
device_add_resp = session.post("https://api.endomondo.com/api/1/device/%s" % device_id, data=json.dumps(device_info))
if device_add_resp.status_code != 200:
self._rateLimitBailout(device_add_resp)
raise APIException("Could not add device %s %s" % (device_add_resp.status_code, device_add_resp.text))
serviceRecord.SetConfiguration({"DeviceRegistered": True})
activity_id = "tap-" + activity.UID + "-" + str(os.getpid())
sport = self._getSport(activity)
upload_data = {
"device_id": device_id,
"sport": sport,
"start_time": self._formatDate(activity.StartTime),
"end_time": self._formatDate(activity.EndTime),
"points": []
}
if activity.Name:
upload_data["title"] = activity.Name
if activity.Notes:
upload_data["notes"] = activity.Notes
if activity.Stats.Distance.Value is not None:
upload_data["distance_total"] = activity.Stats.Distance.asUnits(ActivityStatisticUnit.Kilometers).Value
if activity.Stats.TimerTime.Value is not None:
upload_data["duration_total"] = activity.Stats.TimerTime.asUnits(ActivityStatisticUnit.Seconds).Value
elif activity.Stats.MovingTime.Value is not None:
upload_data["duration_total"] = activity.Stats.MovingTime.asUnits(ActivityStatisticUnit.Seconds).Value
else:
upload_data["duration_total"] = (activity.EndTime - activity.StartTime).total_seconds()
if activity.Stats.Energy.Value is not None:
upload_data["calories_total"] = activity.Stats.Energy.asUnits(ActivityStatisticUnit.Kilocalories).Value
elev_stats = activity.Stats.Elevation.asUnits(ActivityStatisticUnit.Meters)
if elev_stats.Max is not None:
upload_data["altitude_max"] = elev_stats.Max
if elev_stats.Min is not None:
upload_data["altitude_min"] = elev_stats.Min
if elev_stats.Gain is not None:
upload_data["total_ascent"] = elev_stats.Gain
if elev_stats.Loss is not None:
upload_data["total_descent"] = elev_stats.Loss
speed_stats = activity.Stats.Speed.asUnits(ActivityStatisticUnit.KilometersPerHour)
if speed_stats.Max is not None:
upload_data["speed_max"] = speed_stats.Max
hr_stats = activity.Stats.HR.asUnits(ActivityStatisticUnit.BeatsPerMinute)
if hr_stats.Average is not None:
upload_data["heart_rate_avg"] = hr_stats.Average
if hr_stats.Max is not None:
upload_data["heart_rate_max"] = hr_stats.Max
if activity.Stats.Cadence.Average is not None:
upload_data["cadence_avg"] = activity.Stats.Cadence.asUnits(ActivityStatisticUnit.RevolutionsPerMinute).Average
elif activity.Stats.RunCadence.Average is not None:
upload_data["cadence_avg"] = activity.Stats.RunCadence.asUnits(ActivityStatisticUnit.StepsPerMinute).Average
if activity.Stats.Cadence.Max is not None:
upload_data["cadence_max"] = activity.Stats.Cadence.asUnits(ActivityStatisticUnit.RevolutionsPerMinute).Max
elif activity.Stats.RunCadence.Max is not None:
upload_data["cadence_max"] = activity.Stats.RunCadence.asUnits(ActivityStatisticUnit.StepsPerMinute).Max
for wp in activity.GetFlatWaypoints():
pt = {
"time": self._formatDate(wp.Timestamp),
}
if wp.Location:
if wp.Location.Latitude is not None and wp.Location.Longitude is not None:
pt["lat"] = wp.Location.Latitude
pt["lng"] = wp.Location.Longitude
if wp.Location.Altitude is not None:
pt["alt"] = wp.Location.Altitude
if wp.HR is not None:
pt["hr"] = round(wp.HR)
if wp.Cadence is not None:
pt["cad"] = round(wp.Cadence)
elif wp.RunCadence is not None:
pt["cad"] = round(wp.RunCadence)
if wp.Type == WaypointType.Pause:
pt["inst"] = "pause"
elif wp.Type == WaypointType.Resume:
pt["inst"] = "resume"
upload_data["points"].append(pt)
if len(upload_data["points"]):
upload_data["points"][0]["inst"] = "start"
upload_data["points"][-1]["inst"] = "stop"
upload_resp = session.post("https://api.endomondo.com/api/1/workouts/%s" % activity_id, data=json.dumps(upload_data))
if upload_resp.status_code != 200:
self._rateLimitBailout(upload_resp)
raise APIException("Could not upload activity %s %s" % (upload_resp.status_code, upload_resp.text))
return upload_resp.json()["id"]
def DeleteCachedData(self, serviceRecord):
pass
def DeleteActivity(self, serviceRecord, uploadId):
session = self._oauthSession(serviceRecord)
del_res = session.delete("https://api.endomondo.com/api/1/workouts/%s" % uploadId)
del_res.raise_for_status()
| |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""All KeyTypes and which languages support them."""
from typing import List
from tink import aead
from tink import daead
from tink import hybrid
from tink import jwt
from tink import mac
from tink import prf
from tink import signature
from tink import streaming_aead
from tink.proto import tink_pb2
# All languages supported by cross-language tests.
ALL_LANGUAGES = ['cc', 'java', 'go', 'python']
# All KeyTypes (without the prefix 'type.googleapis.com/google.crypto.tink.')
AEAD_KEY_TYPES = [
'AesEaxKey',
'AesGcmKey',
'AesGcmSivKey',
'AesCtrHmacAeadKey',
'ChaCha20Poly1305Key',
'XChaCha20Poly1305Key',
'KmsAeadKey',
'KmsEnvelopeAeadKey',
]
DAEAD_KEY_TYPES = ['AesSivKey']
STREAMING_AEAD_KEY_TYPES = [
'AesCtrHmacStreamingKey',
'AesGcmHkdfStreamingKey',
]
HYBRID_PRIVATE_KEY_TYPES = ['EciesAeadHkdfPrivateKey', 'HpkePrivateKey']
MAC_KEY_TYPES = [
'AesCmacKey',
'HmacKey',
]
SIGNATURE_KEY_TYPES = [
'EcdsaPrivateKey',
'Ed25519PrivateKey',
'RsaSsaPkcs1PrivateKey',
'RsaSsaPssPrivateKey',
]
PRF_KEY_TYPES = [
'AesCmacPrfKey',
'HmacPrfKey',
'HkdfPrfKey',
]
JWT_MAC_KEY_TYPES = [
'JwtHmacKey',
]
JWT_SIGNATURE_KEY_TYPES = [
'JwtEcdsaPrivateKey',
'JwtRsaSsaPkcs1PrivateKey',
'JwtRsaSsaPssPrivateKey',
]
ALL_KEY_TYPES = (
AEAD_KEY_TYPES + DAEAD_KEY_TYPES + STREAMING_AEAD_KEY_TYPES +
HYBRID_PRIVATE_KEY_TYPES + MAC_KEY_TYPES + SIGNATURE_KEY_TYPES +
PRF_KEY_TYPES + JWT_MAC_KEY_TYPES + JWT_SIGNATURE_KEY_TYPES)
# All languages that are supported by a KeyType
SUPPORTED_LANGUAGES = {
'AesEaxKey': ['cc', 'java', 'python'],
'AesGcmKey': ['cc', 'java', 'go', 'python'],
'AesGcmSivKey': ['cc', 'go', 'python'],
'AesCtrHmacAeadKey': ['cc', 'java', 'go', 'python'],
'ChaCha20Poly1305Key': ['java', 'go'],
'XChaCha20Poly1305Key': ['cc', 'java', 'go', 'python'],
'KmsAeadKey': ['cc', 'java', 'python'],
'KmsEnvelopeAeadKey': ['cc', 'java', 'go', 'python'],
'AesSivKey': ['cc', 'java', 'go', 'python'],
'AesCtrHmacStreamingKey': ['cc', 'java', 'go', 'python'],
'AesGcmHkdfStreamingKey': ['cc', 'java', 'go', 'python'],
'EciesAeadHkdfPrivateKey': ['cc', 'java', 'go', 'python'],
'HpkePrivateKey': ['cc', 'java', 'python'],
'AesCmacKey': ['cc', 'java', 'go', 'python'],
'HmacKey': ['cc', 'java', 'go', 'python'],
'EcdsaPrivateKey': ['cc', 'java', 'go', 'python'],
'Ed25519PrivateKey': ['cc', 'java', 'go', 'python'],
'RsaSsaPkcs1PrivateKey': ['cc', 'java', 'python'],
'RsaSsaPssPrivateKey': ['cc', 'java', 'python'],
'AesCmacPrfKey': ['cc', 'java', 'go', 'python'],
'HmacPrfKey': ['cc', 'java', 'go', 'python'],
'HkdfPrfKey': ['cc', 'java', 'go', 'python'],
'JwtHmacKey': ['cc', 'java', 'python'],
'JwtEcdsaPrivateKey': ['cc', 'java', 'python'],
'JwtRsaSsaPkcs1PrivateKey': ['cc', 'java', 'python'],
'JwtRsaSsaPssPrivateKey': ['cc', 'java', 'python'],
}
KEY_TYPE_FROM_URL = {
'type.googleapis.com/google.crypto.tink.' + key_type: key_type
for key_type in ALL_KEY_TYPES}
# For each KeyType, a list of Tinkey KeyTemplate names.
# TODO(juerg): Add missing key template names, and remove deprecated names.
KEY_TEMPLATE_NAMES = {
'AesEaxKey': [
'AES128_EAX', 'AES128_EAX_RAW', 'AES256_EAX', 'AES256_EAX_RAW'
],
'AesGcmKey': [
'AES128_GCM', 'AES128_GCM_RAW', 'AES256_GCM', 'AES256_GCM_RAW'
],
'AesGcmSivKey': [
'AES128_GCM_SIV', 'AES128_GCM_SIV_RAW', 'AES256_GCM_SIV',
'AES256_GCM_SIV_RAW'
],
'AesCtrHmacAeadKey': [
'AES128_CTR_HMAC_SHA256', 'AES128_CTR_HMAC_SHA256_RAW',
'AES256_CTR_HMAC_SHA256', 'AES256_CTR_HMAC_SHA256_RAW'
],
'ChaCha20Poly1305Key': ['CHACHA20_POLY1305', 'CHACHA20_POLY1305_RAW'],
'XChaCha20Poly1305Key': ['XCHACHA20_POLY1305', 'XCHACHA20_POLY1305_RAW'],
'KmsAeadKey': [],
'KmsEnvelopeAeadKey': [],
'AesSivKey': ['AES256_SIV'],
'AesCtrHmacStreamingKey': [
'AES128_CTR_HMAC_SHA256_4KB',
'AES128_CTR_HMAC_SHA256_1MB',
'AES256_CTR_HMAC_SHA256_4KB',
'AES256_CTR_HMAC_SHA256_1MB',
],
'AesGcmHkdfStreamingKey': [
'AES128_GCM_HKDF_4KB',
'AES128_GCM_HKDF_1MB',
'AES256_GCM_HKDF_4KB',
'AES256_GCM_HKDF_1MB',
],
'EciesAeadHkdfPrivateKey': [
'ECIES_P256_HKDF_HMAC_SHA256_AES128_GCM',
'ECIES_P256_COMPRESSED_HKDF_HMAC_SHA256_AES128_GCM',
'ECIES_P256_HKDF_HMAC_SHA256_AES128_CTR_HMAC_SHA256',
'ECIES_P256_COMPRESSED_HKDF_HMAC_SHA256_AES128_CTR_HMAC_SHA256',
],
'HpkePrivateKey': [
'DHKEM_X25519_HKDF_SHA256_HKDF_SHA256_AES_128_GCM',
'DHKEM_X25519_HKDF_SHA256_HKDF_SHA256_AES_128_GCM_RAW',
'DHKEM_X25519_HKDF_SHA256_HKDF_SHA256_AES_256_GCM',
'DHKEM_X25519_HKDF_SHA256_HKDF_SHA256_AES_256_GCM_RAW',
'DHKEM_X25519_HKDF_SHA256_HKDF_SHA256_CHACHA20_POLY1305',
'DHKEM_X25519_HKDF_SHA256_HKDF_SHA256_CHACHA20_POLY1305_RAW',
],
'AesCmacKey': ['AES_CMAC'],
'HmacKey': [
'HMAC_SHA256_128BITTAG', 'HMAC_SHA256_256BITTAG',
'HMAC_SHA512_256BITTAG', 'HMAC_SHA512_512BITTAG'
],
'EcdsaPrivateKey': [
'ECDSA_P256', 'ECDSA_P256_RAW', 'ECDSA_P384', 'ECDSA_P384_SHA384',
'ECDSA_P384_SHA512', 'ECDSA_P521', 'ECDSA_P256_IEEE_P1363',
'ECDSA_P384_IEEE_P1363', 'ECDSA_P384_SHA384_IEEE_P1363',
'ECDSA_P521_IEEE_P1363'
],
'Ed25519PrivateKey': ['ED25519'],
'RsaSsaPkcs1PrivateKey': [
'RSA_SSA_PKCS1_3072_SHA256_F4', 'RSA_SSA_PKCS1_4096_SHA512_F4'
],
'RsaSsaPssPrivateKey': [
'RSA_SSA_PSS_3072_SHA256_SHA256_32_F4',
'RSA_SSA_PSS_4096_SHA512_SHA512_64_F4'
],
'AesCmacPrfKey': ['AES_CMAC_PRF'],
'HmacPrfKey': ['HMAC_SHA256_PRF', 'HMAC_SHA512_PRF'],
'HkdfPrfKey': ['HKDF_SHA256'],
'JwtHmacKey': [
'JWT_HS256', 'JWT_HS256_RAW', 'JWT_HS384', 'JWT_HS384_RAW', 'JWT_HS512',
'JWT_HS512_RAW'
],
'JwtEcdsaPrivateKey': [
'JWT_ES256', 'JWT_ES256_RAW', 'JWT_ES384', 'JWT_ES384_RAW', 'JWT_ES512',
'JWT_ES512_RAW'
],
'JwtRsaSsaPkcs1PrivateKey': [
'JWT_RS256_2048_F4', 'JWT_RS256_2048_F4_RAW', 'JWT_RS256_3072_F4',
'JWT_RS256_3072_F4_RAW', 'JWT_RS384_3072_F4', 'JWT_RS384_3072_F4_RAW',
'JWT_RS512_4096_F4', 'JWT_RS512_4096_F4_RAW'
],
'JwtRsaSsaPssPrivateKey': [
'JWT_PS256_2048_F4', 'JWT_PS256_2048_F4_RAW', 'JWT_PS256_3072_F4',
'JWT_PS256_3072_F4_RAW', 'JWT_PS384_3072_F4', 'JWT_PS384_3072_F4_RAW',
'JWT_PS512_4096_F4', 'JWT_PS512_4096_F4_RAW'
],
}
# KeyTemplate (as Protobuf) for each KeyTemplate name.
KEY_TEMPLATE = {
'AES128_EAX':
aead.aead_key_templates.AES128_EAX,
'AES128_EAX_RAW':
aead.aead_key_templates.AES128_EAX_RAW,
'AES256_EAX':
aead.aead_key_templates.AES256_EAX,
'AES256_EAX_RAW':
aead.aead_key_templates.AES256_EAX_RAW,
'AES128_GCM':
aead.aead_key_templates.AES128_GCM,
'AES128_GCM_RAW':
aead.aead_key_templates.AES128_GCM_RAW,
'AES256_GCM':
aead.aead_key_templates.AES256_GCM,
'AES256_GCM_RAW':
aead.aead_key_templates.AES256_GCM_RAW,
'AES128_GCM_SIV':
aead.aead_key_templates.AES128_GCM_SIV,
'AES128_GCM_SIV_RAW':
aead.aead_key_templates.AES128_GCM_SIV_RAW,
'AES256_GCM_SIV':
aead.aead_key_templates.AES256_GCM_SIV,
'AES256_GCM_SIV_RAW':
aead.aead_key_templates.AES256_GCM_SIV_RAW,
'AES128_CTR_HMAC_SHA256':
aead.aead_key_templates.AES128_CTR_HMAC_SHA256,
'AES128_CTR_HMAC_SHA256_RAW':
aead.aead_key_templates.AES128_CTR_HMAC_SHA256_RAW,
'AES256_CTR_HMAC_SHA256':
aead.aead_key_templates.AES256_CTR_HMAC_SHA256,
'AES256_CTR_HMAC_SHA256_RAW':
aead.aead_key_templates.AES256_CTR_HMAC_SHA256_RAW,
'CHACHA20_POLY1305':
tink_pb2.KeyTemplate(
type_url=('type.googleapis.com/google.crypto.tink.' +
'ChaCha20Poly1305Key'),
output_prefix_type=tink_pb2.TINK),
'CHACHA20_POLY1305_RAW':
tink_pb2.KeyTemplate(
type_url=('type.googleapis.com/google.crypto.tink.' +
'ChaCha20Poly1305Key'),
output_prefix_type=tink_pb2.RAW),
'XCHACHA20_POLY1305':
aead.aead_key_templates.XCHACHA20_POLY1305,
'XCHACHA20_POLY1305_RAW':
aead.aead_key_templates.XCHACHA20_POLY1305_RAW,
'AES256_SIV':
daead.deterministic_aead_key_templates.AES256_SIV,
'AES128_CTR_HMAC_SHA256_4KB':
streaming_aead.streaming_aead_key_templates.AES128_CTR_HMAC_SHA256_4KB,
'AES128_CTR_HMAC_SHA256_1MB':
streaming_aead.streaming_aead_key_templates.AES128_CTR_HMAC_SHA256_1MB,
'AES256_CTR_HMAC_SHA256_4KB':
streaming_aead.streaming_aead_key_templates.AES256_CTR_HMAC_SHA256_4KB,
'AES256_CTR_HMAC_SHA256_1MB':
streaming_aead.streaming_aead_key_templates.AES256_CTR_HMAC_SHA256_1MB,
'AES128_GCM_HKDF_4KB':
streaming_aead.streaming_aead_key_templates.AES128_GCM_HKDF_4KB,
'AES128_GCM_HKDF_1MB':
streaming_aead.streaming_aead_key_templates.AES128_GCM_HKDF_1MB,
'AES256_GCM_HKDF_4KB':
streaming_aead.streaming_aead_key_templates.AES256_GCM_HKDF_4KB,
'AES256_GCM_HKDF_1MB':
streaming_aead.streaming_aead_key_templates.AES256_GCM_HKDF_1MB,
'ECIES_P256_HKDF_HMAC_SHA256_AES128_GCM':
hybrid.hybrid_key_templates.ECIES_P256_HKDF_HMAC_SHA256_AES128_GCM,
'ECIES_P256_COMPRESSED_HKDF_HMAC_SHA256_AES128_GCM':
hybrid.hybrid_key_templates
.ECIES_P256_COMPRESSED_HKDF_HMAC_SHA256_AES128_GCM,
'ECIES_P256_HKDF_HMAC_SHA256_AES128_CTR_HMAC_SHA256':
hybrid.hybrid_key_templates
.ECIES_P256_HKDF_HMAC_SHA256_AES128_CTR_HMAC_SHA256,
'ECIES_P256_COMPRESSED_HKDF_HMAC_SHA256_AES128_CTR_HMAC_SHA256':
hybrid.hybrid_key_templates
.ECIES_P256_COMPRESSED_HKDF_HMAC_SHA256_AES128_CTR_HMAC_SHA256,
'DHKEM_X25519_HKDF_SHA256_HKDF_SHA256_AES_128_GCM':
hybrid.hybrid_key_templates
.DHKEM_X25519_HKDF_SHA256_HKDF_SHA256_AES_128_GCM,
'DHKEM_X25519_HKDF_SHA256_HKDF_SHA256_AES_128_GCM_RAW':
hybrid.hybrid_key_templates
.DHKEM_X25519_HKDF_SHA256_HKDF_SHA256_AES_128_GCM_RAW,
'DHKEM_X25519_HKDF_SHA256_HKDF_SHA256_AES_256_GCM':
hybrid.hybrid_key_templates
.DHKEM_X25519_HKDF_SHA256_HKDF_SHA256_AES_256_GCM,
'DHKEM_X25519_HKDF_SHA256_HKDF_SHA256_AES_256_GCM_RAW':
hybrid.hybrid_key_templates
.DHKEM_X25519_HKDF_SHA256_HKDF_SHA256_AES_256_GCM_RAW,
'DHKEM_X25519_HKDF_SHA256_HKDF_SHA256_CHACHA20_POLY1305':
hybrid.hybrid_key_templates
.DHKEM_X25519_HKDF_SHA256_HKDF_SHA256_CHACHA20_POLY1305,
'DHKEM_X25519_HKDF_SHA256_HKDF_SHA256_CHACHA20_POLY1305_RAW':
hybrid.hybrid_key_templates
.DHKEM_X25519_HKDF_SHA256_HKDF_SHA256_CHACHA20_POLY1305_RAW,
'AES_CMAC':
mac.mac_key_templates.AES_CMAC,
'HMAC_SHA256_128BITTAG':
mac.mac_key_templates.HMAC_SHA256_128BITTAG,
'HMAC_SHA256_256BITTAG':
mac.mac_key_templates.HMAC_SHA256_256BITTAG,
'HMAC_SHA512_256BITTAG':
mac.mac_key_templates.HMAC_SHA512_256BITTAG,
'HMAC_SHA512_512BITTAG':
mac.mac_key_templates.HMAC_SHA512_512BITTAG,
'ECDSA_P256':
signature.signature_key_templates.ECDSA_P256,
'ECDSA_P256_RAW':
signature.signature_key_templates.ECDSA_P256_RAW,
'ECDSA_P384':
signature.signature_key_templates.ECDSA_P384,
'ECDSA_P384_SHA384':
signature.signature_key_templates.ECDSA_P384_SHA384,
'ECDSA_P384_SHA512':
signature.signature_key_templates.ECDSA_P384_SHA512,
'ECDSA_P521':
signature.signature_key_templates.ECDSA_P521,
'ECDSA_P256_IEEE_P1363':
signature.signature_key_templates.ECDSA_P256_IEEE_P1363,
'ECDSA_P384_IEEE_P1363':
signature.signature_key_templates.ECDSA_P384_IEEE_P1363,
'ECDSA_P384_SHA384_IEEE_P1363':
signature.signature_key_templates.ECDSA_P384_SHA384_IEEE_P1363,
'ECDSA_P521_IEEE_P1363':
signature.signature_key_templates.ECDSA_P521_IEEE_P1363,
'ED25519':
signature.signature_key_templates.ED25519,
'RSA_SSA_PKCS1_3072_SHA256_F4':
signature.signature_key_templates.RSA_SSA_PKCS1_3072_SHA256_F4,
'RSA_SSA_PKCS1_4096_SHA512_F4':
signature.signature_key_templates.RSA_SSA_PKCS1_4096_SHA512_F4,
'RSA_SSA_PSS_3072_SHA256_SHA256_32_F4':
signature.signature_key_templates.RSA_SSA_PSS_3072_SHA256_SHA256_32_F4,
'RSA_SSA_PSS_4096_SHA512_SHA512_64_F4':
signature.signature_key_templates.RSA_SSA_PSS_4096_SHA512_SHA512_64_F4,
'AES_CMAC_PRF':
prf.prf_key_templates.AES_CMAC,
'HMAC_SHA256_PRF':
prf.prf_key_templates.HMAC_SHA256,
'HMAC_SHA512_PRF':
prf.prf_key_templates.HMAC_SHA512,
'HKDF_SHA256':
prf.prf_key_templates.HKDF_SHA256,
'JWT_HS256':
jwt.jwt_hs256_template(),
'JWT_HS256_RAW':
jwt.raw_jwt_hs256_template(),
'JWT_HS384':
jwt.jwt_hs384_template(),
'JWT_HS384_RAW':
jwt.raw_jwt_hs384_template(),
'JWT_HS512':
jwt.jwt_hs512_template(),
'JWT_HS512_RAW':
jwt.raw_jwt_hs512_template(),
'JWT_ES256':
jwt.jwt_es256_template(),
'JWT_ES256_RAW':
jwt.raw_jwt_es256_template(),
'JWT_ES384':
jwt.jwt_es384_template(),
'JWT_ES384_RAW':
jwt.raw_jwt_es384_template(),
'JWT_ES512':
jwt.jwt_es512_template(),
'JWT_ES512_RAW':
jwt.raw_jwt_es512_template(),
'JWT_RS256_2048_F4':
jwt.jwt_rs256_2048_f4_template(),
'JWT_RS256_2048_F4_RAW':
jwt.raw_jwt_rs256_2048_f4_template(),
'JWT_RS256_3072_F4':
jwt.jwt_rs256_3072_f4_template(),
'JWT_RS256_3072_F4_RAW':
jwt.raw_jwt_rs256_3072_f4_template(),
'JWT_RS384_3072_F4':
jwt.jwt_rs384_3072_f4_template(),
'JWT_RS384_3072_F4_RAW':
jwt.raw_jwt_rs384_3072_f4_template(),
'JWT_RS512_4096_F4':
jwt.jwt_rs512_4096_f4_template(),
'JWT_RS512_4096_F4_RAW':
jwt.raw_jwt_rs512_4096_f4_template(),
'JWT_PS256_2048_F4':
jwt.jwt_ps256_2048_f4_template(),
'JWT_PS256_2048_F4_RAW':
jwt.raw_jwt_ps256_2048_f4_template(),
'JWT_PS256_3072_F4':
jwt.jwt_ps256_3072_f4_template(),
'JWT_PS256_3072_F4_RAW':
jwt.raw_jwt_ps256_3072_f4_template(),
'JWT_PS384_3072_F4':
jwt.jwt_ps384_3072_f4_template(),
'JWT_PS384_3072_F4_RAW':
jwt.raw_jwt_ps384_3072_f4_template(),
'JWT_PS512_4096_F4':
jwt.jwt_ps512_4096_f4_template(),
'JWT_PS512_4096_F4_RAW':
jwt.raw_jwt_ps512_4096_f4_template(),
}
# Key template names for which the list of supported languages is different from
# the list of supported languages of the whole key type.
_CUSTOM_SUPPORTED_LANGUAGES_BY_TEMPLATE_NAME = {
# currently empty.
}
def _supported_languages_by_template(
template_name: str, key_type: str) -> List[str]:
if template_name in _CUSTOM_SUPPORTED_LANGUAGES_BY_TEMPLATE_NAME:
return _CUSTOM_SUPPORTED_LANGUAGES_BY_TEMPLATE_NAME[template_name]
return SUPPORTED_LANGUAGES[key_type]
def _all_key_template_names_with_key_type():
for key_type, template_names in KEY_TEMPLATE_NAMES.items():
for template_name in template_names:
yield (template_name, key_type)
SUPPORTED_LANGUAGES_BY_TEMPLATE_NAME = {
name: _supported_languages_by_template(name, template)
for name, template in _all_key_template_names_with_key_type()
}
| |
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import inspect
from collections.abc import Mapping
from dataclasses import dataclass
from functools import update_wrapper
from typing import Any, Sequence, Set, Tuple, Type
from pants.base.exceptions import ResolveError
from pants.base.specs import OriginSpec
from pants.build_graph.address import Address, BuildFileAddress
from pants.engine.objects import Collection, Resolvable, Serializable
from pants.util.objects import TypeConstraintError
def assert_single_address(addresses: Sequence[Address]) -> None:
"""Assert that exactly one address must be contained in the collection."""
if len(addresses) == 0:
raise ResolveError("No targets were matched.")
if len(addresses) > 1:
output = "\n * ".join(address.spec for address in addresses)
raise ResolveError(
"Expected a single target, but was given multiple targets.\n\n"
f"Did you mean one of:\n * {output}"
)
class Addresses(Collection[Address]):
def expect_single(self) -> Address:
assert_single_address(self.dependencies)
return self.dependencies[0]
@dataclass(frozen=True)
class AddressWithOrigin:
"""A BuildFileAddress along with the cmd-line spec it was generated from."""
address: Address
origin: OriginSpec
class AddressesWithOrigins(Collection[AddressWithOrigin]):
def expect_single(self) -> AddressWithOrigin:
assert_single_address(
[address_with_origin.address for address_with_origin in self.dependencies]
)
return self.dependencies[0]
class BuildFileAddresses(Collection[BuildFileAddress]):
"""NB: V2 should generally use Addresses instead of BuildFileAddresses."""
class NotSerializableError(TypeError):
"""Indicates an addressable descriptor is illegally installed in a non-Serializable type."""
class MutationError(AttributeError):
"""Indicates an illegal attempt to mutate an addressable attribute that already has a value."""
class AddressableTypeValidationError(TypeConstraintError):
"""Indicates a value provided to an `AddressableDescriptor` failed to satisfy a type
constraint."""
class AddressableDescriptor:
"""A data descriptor for fields containing one or more addressable items.
An addressable descriptor has lifecycle expectations tightly coupled with the contract of
Serializable objects and the 2-phase hydration of AddressMap.parse, Graph.resolve.
Decorated accessors are write-once, and then read-only. They are intended to be written in a
constructor such that objects containing them have immutable semantics. In other words, the
descriptor is intended to be used like a type-checked `@property` with possibly lazily resolved
values.
The written value is type-checked against a :class:`TypeConstraint` and can only be one of 3
types:
1. An opaque string address.
2. A Resolvable for the address that, when resolved, will meet the type constraint.
3. A concrete value that meets the type constraint.
The 1st type, an opaque string address, is also the type associated with the 1st stage of the
2-stage lifecycle of Serializable objects containing addressable values. In the second and final
stage, the Serializable object is re-constructed with addressable values of the second or third
types; ie: reconstructed with either resolvables or concrete values in place of the first stage
address.
Two affordances are made in type constraint handling:
1. Either a :class:`TypeConstraint` instance can be given if the type constraint is fully known or
else a type constraint class can be given if the type constraint should apply to the type of
the enclosing class. This is useful for declaring an addressable property in a baseclass that
should be type-constrained based on the type of the derived class.
2. Decorators for addressables (see `addressable`, `addressable_sequence` and `addressable_dict`)
allow wrapping of either class functions - typical - or @property descriptors. The property
descriptor case sets up an idiom for recursive addressables. The idiom looks like:
>>> class Thing(Struct):
... def __init__(self, thing):
... super().__init__()
... self.thing = thing
... @property
... def parent(self):
... '''Return this thing's parent.
...
... :rtype: :class:`Thing`
... '''
...
>>> Thing.parent = addressable(Exactly(Thing))(Thing.parent)
Here the `Thing.parent` property is re-assigned with a type-constrained addressable descriptor
after the class is defined so the class can be referred to in the type constraint.
"""
_descriptors: Set[Tuple[Type, str]] = set()
@classmethod
def is_addressable(cls, obj: Any, key: str) -> bool:
"""Return `True` if the given attribute of `obj` is an addressable attribute.
:param obj: The object to inspect.
:param key: The name of the property on `obj` to check.
"""
return (type(obj), key) in cls._descriptors
@classmethod
def _register(cls, obj, descriptor):
cls._descriptors.add((type(obj), descriptor._name))
def __init__(self, name, type_constraint):
self._name = name
self._type_constraint = type_constraint
def __set__(self, instance, value):
if not Serializable.is_serializable(instance):
raise NotSerializableError(
"The addressable descriptor {} can only be applied to methods or "
"properties of Serializable objects, applied to method {} of "
"type {}".format(type(self).__name__, self._name, type(instance).__name__)
)
instance_dict = instance._asdict()
if self._name in instance_dict:
raise MutationError(
"Attribute {} of {} has already been set to {}, rejecting attempt to "
"re-set with {}".format(self._name, instance, instance_dict[self._name], value)
)
value = self._checked_value(instance, value)
self._register(instance, self)
# We mutate the instance dict, which is only OK if used in the conventional idiom of setting
# the value via this data descriptor in the instance's constructor.
instance_dict[self._name] = value
def __get__(self, instance, unused_owner_type=None):
# We know instance is a Serializable from the type-checking done in set.
value = instance._asdict()[self._name]
return self._resolve_value(instance, value)
def _get_type_constraint(self, instance):
if inspect.isclass(self._type_constraint):
return self._type_constraint(type(instance))
else:
return self._type_constraint
def _checked_value(self, instance, value):
# We allow five forms of value:
# 0. None.
# 1. An opaque (to us) address pointing to a value that can be resolved by external
# means.
# 2. A `Resolvable` value that we can lazily resolve and type-check in `__get__`.
# 3. A concrete instance that meets our type constraint.
# 4. A dict when our type constraint has exactly one Serializable subject type - we convert the
# dict into an instance of that type.
if value is None:
return None
if isinstance(value, (str, Address, Resolvable)):
return value
# Support untyped dicts that we deserialize on-demand here into the required type.
# This feature allows for more brevity in the JSON form (local type inference) and an alternate
# construction style in the python forms.
type_constraint = self._get_type_constraint(instance)
if (
isinstance(value, dict)
and len(type_constraint.types) == 1
and Serializable.is_serializable_type(type_constraint.types[0])
):
if not value:
# TODO(John Sirois): Is this the right thing to do? Or should an empty serializable_type
# be constructed?
return None # {} -> None.
else:
serializable_type = type_constraint.types[0]
return serializable_type(**value)
try:
return type_constraint.validate_satisfied_by(value)
except TypeConstraintError as e:
raise AddressableTypeValidationError(
"The value for the {} attribute of {} was invalid".format(self._name, instance), e
)
def _resolve_value(self, instance, value):
if not isinstance(value, Resolvable):
# The value is concrete which means we type-checked on set so no need to do so again, its a
# raw address string or an instance that satisfies our type constraint.
return value
else:
resolved_value = value.resolve()
type_constraint = self._get_type_constraint(instance)
try:
return type_constraint.validate_satisfied_by(resolved_value)
except TypeConstraintError as e:
raise AddressableTypeValidationError(
"The value resolved from {} for the {} property of {} was invalid".format(
value.address, self._name, instance
),
e,
)
def _addressable_wrapper(addressable_descriptor, type_constraint):
def wrapper(func):
# We allow for wrapping property objects to support the following idiom for defining recursive
# addressables:
#
# class Thing(Struct):
# def __init__(self, thing):
# super().__init__()
# self.thing = thing
#
# @property
# def parent(self):
# """Return this thing's parent.
#
# :rtype: :class:`Thing`
# """"
#
# Thing.parent = addressable(Exactly(Thing))(Thing.parent)
func = func.fget if isinstance(func, property) else func
addressable_accessor = addressable_descriptor(func.__name__, type_constraint)
return update_wrapper(addressable_accessor, func)
return wrapper
def addressable(type_constraint):
"""Return an addressable attribute for Serializable classes.
The attribute should have no implementation (it will be ignored), but can carry a docstring.
The implementation is provided by this wrapper. Idiomatic use assigns the value, which can
either be an opaque address string or a resolved value that meets the type constraint, in the
constructor::
>>> class Employee(Serializable):
... def __init__(self, person):
... self.person = person
... @addressable(SubclassesOf(Person))
... def person(self):
... '''The person that is this employee.'''
Addressable attributes are only assignable once, so this pattern yields an immutable `Employee`
whose `person` attribute is either a `Person` instance or
:class:`pants.engine.objects.Resolvable` person or else a string address pointing to one.
See :class:`AddressableDescriptor` for more details.
:param type_constraint: The type constraint the value must satisfy.
:type type_constraint: :class:`TypeConstraint`
"""
return _addressable_wrapper(AddressableDescriptor, type_constraint)
class AddressableSequence(AddressableDescriptor):
def _checked_value(self, instance, value):
if value is None:
return None
if not isinstance(value, (list, tuple)):
raise TypeError(
"The {} property of {} must be a tuple or list, given {} of type {}".format(
self._name, instance, value, type(value).__name__
)
)
return [super(AddressableSequence, self)._checked_value(instance, v) for v in value]
def _resolve_value(self, instance, value):
return (
tuple(super(AddressableSequence, self)._resolve_value(instance, v) for v in value)
if value
else ()
)
def addressable_sequence(type_constraint):
"""Marks a sequence's values as satisfying a given type constraint.
Some (or all) elements of the sequence may be :class:`pants.engine.objects.Resolvable` elements
to resolve later.
See :class:`AddressableDescriptor` for more details.
:param type_constraint: The type constraint the list's values must all satisfy.
:type type_constraint: :class:`TypeConstraint`
"""
return _addressable_wrapper(AddressableSequence, type_constraint)
class AddressableDict(AddressableDescriptor):
def _checked_value(self, instance, value):
if value is None:
return None
if not isinstance(value, Mapping):
raise TypeError(
"The {} property of {} must be a dict, given {} of type {}".format(
self._name, instance, value, type(value).__name__
)
)
return {
k: super(AddressableDict, self)._checked_value(instance, v) for k, v in value.items()
}
def _resolve_value(self, instance, value):
return (
{k: super(AddressableDict, self)._resolve_value(instance, v) for k, v in value.items()}
if value
else {}
)
def addressable_dict(type_constraint):
"""Marks a dicts's values as satisfying a given type constraint.
Some (or all) values in the dict may be :class:`pants.engine.objects.Resolvable` values to
resolve later.
See :class:`AddressableDescriptor` for more details.
:param type_constraint: The type constraint the dict's values must all satisfy.
:type type_constraint: :class:`TypeConstraint`
"""
return _addressable_wrapper(AddressableDict, type_constraint)
# TODO(John Sirois): Move variants into Address 1st class as part of merging the engine/exp
# into the mainline (if they survive).
# TODO: Variants currently require an explicit name (and thus a `:`) in order to parse correctly.
def strip_variants(address):
"""Return a copy of the given address with the variants (if any) stripped from the name.
:rtype: :class:`pants.build_graph.address.Address`
"""
address, _ = parse_variants(address)
return address
def _extract_variants(address, variants_str):
"""Return the variants (if any) represented by the given variants_str.
:returns: The variants or else `None` if there are none.
:rtype: tuple of tuples (key, value) strings
"""
def entries():
for entry in variants_str.split(","):
key, _, value = entry.partition("=")
if not key or not value:
raise ValueError("Invalid variants after the @ in: {}".format(address))
yield (key, value)
return tuple(entries())
def parse_variants(address):
target_name, at_sign, variants_str = address.target_name.partition("@")
if not at_sign:
return address, None
variants = _extract_variants(address, variants_str) if variants_str else None
if isinstance(address, BuildFileAddress):
normalized_address = BuildFileAddress(rel_path=address.rel_path, target_name=target_name)
else:
normalized_address = Address(spec_path=address.spec_path, target_name=target_name)
return normalized_address, variants
| |
# ext/horizontal_shard.py
# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
"""Horizontal sharding support.
Defines a rudimental 'horizontal sharding' system which allows a Session to
distribute queries and persistence operations across multiple databases.
For a usage example, see the :ref:`examples_sharding` example included in
the source distribution.
"""
from .. import event
from .. import exc
from .. import inspect
from .. import util
from ..orm.query import Query
from ..orm.session import Session
__all__ = ["ShardedSession", "ShardedQuery"]
class ShardedQuery(Query):
def __init__(self, *args, **kwargs):
super(ShardedQuery, self).__init__(*args, **kwargs)
self.id_chooser = self.session.id_chooser
self.query_chooser = self.session.query_chooser
self.execute_chooser = self.session.execute_chooser
self._shard_id = None
def set_shard(self, shard_id):
"""Return a new query, limited to a single shard ID.
All subsequent operations with the returned query will
be against the single shard regardless of other state.
The shard_id can be passed for a 2.0 style execution to the
bind_arguments dictionary of :meth:`.Session.execute`::
results = session.execute(
stmt,
bind_arguments={"shard_id": "my_shard"}
)
"""
return self.execution_options(_sa_shard_id=shard_id)
class ShardedSession(Session):
def __init__(
self,
shard_chooser,
id_chooser,
execute_chooser=None,
shards=None,
query_cls=ShardedQuery,
**kwargs,
):
"""Construct a ShardedSession.
:param shard_chooser: A callable which, passed a Mapper, a mapped
instance, and possibly a SQL clause, returns a shard ID. This id
may be based off of the attributes present within the object, or on
some round-robin scheme. If the scheme is based on a selection, it
should set whatever state on the instance to mark it in the future as
participating in that shard.
:param id_chooser: A callable, passed a query and a tuple of identity
values, which should return a list of shard ids where the ID might
reside. The databases will be queried in the order of this listing.
:param execute_chooser: For a given :class:`.ORMExecuteState`,
returns the list of shard_ids
where the query should be issued. Results from all shards returned
will be combined together into a single listing.
.. versionchanged:: 1.4 The ``execute_chooser`` parameter
supersedes the ``query_chooser`` parameter.
:param shards: A dictionary of string shard names
to :class:`~sqlalchemy.engine.Engine` objects.
"""
query_chooser = kwargs.pop("query_chooser", None)
super(ShardedSession, self).__init__(query_cls=query_cls, **kwargs)
event.listen(
self, "do_orm_execute", execute_and_instances, retval=True
)
self.shard_chooser = shard_chooser
self.id_chooser = id_chooser
if query_chooser:
util.warn_deprecated(
"The ``query_choser`` parameter is deprecated; "
"please use ``execute_chooser``.",
"1.4",
)
if execute_chooser:
raise exc.ArgumentError(
"Can't pass query_chooser and execute_chooser "
"at the same time."
)
def execute_chooser(orm_context):
return query_chooser(orm_context.statement)
self.execute_chooser = execute_chooser
else:
self.execute_chooser = execute_chooser
self.query_chooser = query_chooser
self.__binds = {}
if shards is not None:
for k in shards:
self.bind_shard(k, shards[k])
def _identity_lookup(
self,
mapper,
primary_key_identity,
identity_token=None,
lazy_loaded_from=None,
**kw,
):
"""override the default :meth:`.Session._identity_lookup` method so
that we search for a given non-token primary key identity across all
possible identity tokens (e.g. shard ids).
.. versionchanged:: 1.4 Moved :meth:`.Session._identity_lookup` from
the :class:`_query.Query` object to the :class:`.Session`.
"""
if identity_token is not None:
return super(ShardedSession, self)._identity_lookup(
mapper,
primary_key_identity,
identity_token=identity_token,
**kw,
)
else:
q = self.query(mapper)
if lazy_loaded_from:
q = q._set_lazyload_from(lazy_loaded_from)
for shard_id in self.id_chooser(q, primary_key_identity):
obj = super(ShardedSession, self)._identity_lookup(
mapper,
primary_key_identity,
identity_token=shard_id,
lazy_loaded_from=lazy_loaded_from,
**kw,
)
if obj is not None:
return obj
return None
def _choose_shard_and_assign(self, mapper, instance, **kw):
if instance is not None:
state = inspect(instance)
if state.key:
token = state.key[2]
assert token is not None
return token
elif state.identity_token:
return state.identity_token
shard_id = self.shard_chooser(mapper, instance, **kw)
if instance is not None:
state.identity_token = shard_id
return shard_id
def connection_callable(
self, mapper=None, instance=None, shard_id=None, **kwargs
):
"""Provide a :class:`_engine.Connection` to use in the unit of work
flush process.
"""
if shard_id is None:
shard_id = self._choose_shard_and_assign(mapper, instance)
if self.in_transaction():
return self.get_transaction().connection(mapper, shard_id=shard_id)
else:
return self.get_bind(
mapper, shard_id=shard_id, instance=instance
).connect(**kwargs)
def get_bind(
self, mapper=None, shard_id=None, instance=None, clause=None, **kw
):
if shard_id is None:
shard_id = self._choose_shard_and_assign(
mapper, instance, clause=clause
)
return self.__binds[shard_id]
def bind_shard(self, shard_id, bind):
self.__binds[shard_id] = bind
def execute_and_instances(orm_context):
if orm_context.is_select:
load_options = active_options = orm_context.load_options
update_options = None
elif orm_context.is_update or orm_context.is_delete:
load_options = None
update_options = active_options = orm_context.update_delete_options
else:
load_options = update_options = active_options = None
session = orm_context.session
def iter_for_shard(shard_id, load_options, update_options):
execution_options = dict(orm_context.local_execution_options)
bind_arguments = dict(orm_context.bind_arguments)
bind_arguments["shard_id"] = shard_id
if orm_context.is_select:
load_options += {"_refresh_identity_token": shard_id}
execution_options["_sa_orm_load_options"] = load_options
elif orm_context.is_update or orm_context.is_delete:
update_options += {"_refresh_identity_token": shard_id}
execution_options["_sa_orm_update_options"] = update_options
return orm_context.invoke_statement(
bind_arguments=bind_arguments, execution_options=execution_options
)
if active_options and active_options._refresh_identity_token is not None:
shard_id = active_options._refresh_identity_token
elif "_sa_shard_id" in orm_context.execution_options:
shard_id = orm_context.execution_options["_sa_shard_id"]
elif "shard_id" in orm_context.bind_arguments:
shard_id = orm_context.bind_arguments["shard_id"]
else:
shard_id = None
if shard_id is not None:
return iter_for_shard(shard_id, load_options, update_options)
else:
partial = []
for shard_id in session.execute_chooser(orm_context):
result_ = iter_for_shard(shard_id, load_options, update_options)
partial.append(result_)
return partial[0].merge(*partial[1:])
| |
# Copyright (c) 2014 Hitachi Data Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import time
import mock
from oslo_concurrency import processutils as putils
from oslo_config import cfg
from cinder import exception
from cinder import test
from cinder import utils
from cinder.volume.drivers.hitachi import hnas_backend
from cinder.volume.drivers.hitachi import hnas_nfs as nfs
CONF = cfg.CONF
HNAS_RESULT1 = "\n\
FS ID FS Label FS Permanent ID EVS ID EVS Label\n\
----- ----------- ------------------ ------ ---------\n\
1026 gold 0xaadee0e035cfc0b7 1 EVSTest1\n\
1025 fs01-husvm 0xaada5dff78668800 1 EVSTest1\n\
1027 large-files 0xaadee0ef012a0d54 1 EVSTest1\n\
1028 platinun 0xaadee1ea49d1a32c 1 EVSTest1\n\
1029 test_hdp 0xaadee09634acfcac 1 EVSTest1\n\
1030 cinder1 0xaadfcf742fba644e 1 EVSTest1\n\
1031 cinder2 0xaadfcf7e0769a6bc 1 EVSTest1\n\
1024 fs02-husvm 0xaac8715e2e9406cd 2 EVSTest2\n\
\n"
HNAS_RESULT2 = "cluster MAC: 83-68-96-AA-DA-5D"
HNAS_RESULT3 = "\n\
Model: HNAS 4040 \n\
Software: 11.2.3319.14 (built 2013-09-19 12:34:24+01:00) \n\
Hardware: NAS Platform (M2SEKW1339109) \n\
board MMB1 \n\
mmb 11.2.3319.14 release (2013-09-19 12:34:24+01:00)\n\
board MFB1 \n\
mfb1hw MB v0883 WL v002F TD v002F FD v002F TC v0059 \
RY v0059 TY v0059 IC v0059 WF v00E2 FS v00E2 OS v00E2 \
WD v00E2 DI v001A FC v0002 \n\
Serial no B1339745 (Thu Jan 1 00:00:50 2009) \n\
board MCP \n\
Serial no B1339109 (Thu Jan 1 00:00:49 2009) \n\
\n"
HNAS_RESULT4 = "\n\
EVS Type Label IP Address Mask Port \n\
---------- --------------- ------------------ --------------- ------\n\
admin hnas4040 192.0.2.2 255.255.255.0 eth1 \n\
admin hnas4040 172.24.44.15 255.255.255.0 eth0 \n\
evs 1 EVSTest1 172.24.44.20 255.255.255.0 ag1 \n\
evs 1 EVSTest1 10.0.0.20 255.255.255.0 ag1 \n\
evs 2 EVSTest2 172.24.44.21 255.255.255.0 ag1 \n\
\n"
HNAS_RESULT5 = "\n\
ID Label EVS Size Used Snapshots Deduped\
Avail Thin ThinSize ThinAvail \
FS Type \n\
---- ----------- --- ------- ------------- --------- -------\
- ------------- ---- -------- --------- ---------------------\
------------- \n\
1025 fs01-husvm 1 250 GB 21.4 GB (9%) 0 B (0%) NA \
228 GB (91%) No 32 KB,\
WFS-2,128 DSBs\n\
1026 gold 1 19.9 GB 2.30 GB (12% NA 0 B (0%)\
17.6 GB (88%) No 4 KB,WFS-2,128 DSBs,\
dedupe enabled\n\
1027 large-files 1 19.8 GB 2.43 GB (12%) 0 B (0%) NA \
17.3 GB (88%) No 32 KB,\
WFS-2,128 DSBs\n\
1028 platinun 1 19.9 GB 2.30 GB (12%) NA 0 B (0%)\
17.6 GB (88%) No 4 KB,WFS-2,128 DSBs,\
dedupe enabled\n\
1029 silver 1 19.9 GB 3.19 GB (16%) 0 B (0%) NA \
6.7 GB (84%) No 4 KB,\
WFS-2,128 DSBs\n\
1030 cinder1 1 40.8 GB 2.24 GB (5%) 0 B (0%) NA \
38.5 GB (95%) No 4 KB,\
WFS-2,128 DSBs\n\
1031 cinder2 1 39.8 GB 2.23 GB (6%) 0 B (0%) NA \
37.6 GB (94%) No 4 KB,\
WFS-2,128 DSBs\n\
1024 fs02-husvm 2 49.8 GB 3.54 GB (7%) 0 B (0%) NA \
46.2 GB (93%) No 32 KB,\
WFS-2,128 DSBs\n\
1032 test 2 3.97 GB 2.12 GB (53%) 0 B (0%) NA \
1.85 GB (47%) No 4 KB,\
WFS-2,128 DSBs\n\
1058 huge_FS 7 1.50 TB Not determined\n\
1053 fs-unmounted 4 108 GB Not mounted \
NA 943 MB (18%) 39.2 GB (36%) No 4 KB,\
WFS-2,128 DSBs,dedupe enabled\n\
\n"
HNAS_RESULT6 = "\n\
ID Label EVS Size Used Snapshots Deduped Avail \
Thin ThinSize ThinAvail FS Type\n\
---- ---------- --- ------ ------------ --------- ------- ------------ \
---- -------- --------- --------------------\n\
1025 fs01-husvm 1 250 GB 21.4 GB (9%) 0 B (0%) NA 228 GB (91%) \
No 32 KB,WFS-2,128 DSBs\n\
\n"
HNAS_RESULT7 = "\n\
Export configuration: \n\
Export name: /export01-husvm \n\
Export path: /export01-husvm \n\
File system label: test_hdp \n\
File system size: 250 GB \n\
File system free space: 228 GB \n\
File system state: \n\
formatted = Yes \n\
mounted = Yes \n\
failed = No \n\
thin provisioned = No \n\
Access snapshots: Yes \n\
Display snapshots: Yes \n\
Read Caching: Disabled \n\
Disaster recovery setting: \n\
Recovered = No \n\
Transfer setting = Use file system default \n\
\n"
HNAS_RESULT8 = "Logical unit creation started at 2014-12-24 00:38:30+00:00."
HNAS_RESULT9 = "Logical unit deleted successfully."
HNAS_RESULT10 = ""
HNAS_RESULT11 = "Logical unit expansion started at 2014-12-24 01:25:03+00:00."
HNAS_RESULT12 = "\n\
Alias : test_iqn \n\
Globally unique name: iqn.2014-12.10.10.10.10:evstest1.cinder-silver \n\
Comment : \n\
Secret : test_secret \n\
Authentication : Enabled \n\
Logical units : No logical units. \n\
\n"
HNAS_RESULT13 = "Logical unit added successfully."
HNAS_RESULT14 = "Logical unit removed successfully."
HNAS_RESULT15 = "Target created successfully."
HNAS_RESULT16 = ""
HNAS_RESULT17 = "\n\
EVS Type Label IP Address Mask Port \n\
---------- --------------- ------------------ --------------- ------\n\
evs 1 EVSTest1 172.24.44.20 255.255.255.0 ag1 \n\
evs 2 EVSTest1 10.0.0.20 255.255.255.0 ag1 \n\
\n"
HNAS_RESULT18 = "Version: 11.1.3225.01\n\
Directory: /u/u60/_Eng_Axalon_SMU/OfficialBuilds/fish/angel/3225.01/main/bin/\
x86_64_linux-bart_libc-2.7_release\n\
Date: Feb 22 2013, 04:10:09\n\
\n"
HNAS_RESULT19 = " ID Label Size Used Snapshots \
Deduped Avail Thin ThinSize ThinAvail FS Type\n\
---- ------------- ------- ------------- --------- ------- -------------\
---- -------- --------- -------------------\n\
1025 fs01-husvm 250 GB 47.1 GB (19%) 0 B (0%) NA 203 GB (81%)\
No 4 KB,WFS-2,128 DSBs\n\
1047 manage_test02 19.9 GB 9.29 GB (47%) 0 B (0%) NA 10.6 GB (53%)\
No 4 KB,WFS-2,128 DSBs\n\
1058 huge_FS 7 1.50 TB Not determined\n\
1053 fs-unmounted 4 108 GB Not mounted \
NA 943 MB (18%) 39.2 GB (36%) No 4 KB,\
WFS-2,128 DSBs,dedupe enabled\n\
\n"
HNAS_RESULT20 = "\n\
Alias : test_iqn \n\
Globally unique name: iqn.2014-12.10.10.10.10:evstest1.cinder-silver \n\
Comment : \n\
Secret : \n\
Authentication : Enabled \n\
Logical units : No logical units. \n\
\n"
HNAS_RESULT20 = "Target does not exist."
HNAS_RESULT21 = "Target created successfully."
HNAS_RESULT22 = "Failed to establish SSC connection"
HNAS_RESULT23 = "\n\
Alias : cinder-Gold\n\
Globally unique name: iqn.2015-06.10.10.10.10:evstest1.cinder-gold\n\
Comment :\n\
Secret : None\n\
Authentication : Enabled\n\
Logical units : No logical units.\n\
Access configuration :\n\
\n\
Alias : cinder-GoldIsh\n\
Globally unique name: iqn.2015-06.10.10.10.10:evstest1.cinder-goldish\n\
Comment :\n\
Secret : None\n\
Authentication : Enabled\n\
Logical units : No logical units.\n\
Access configuration :\n\
\n\
Alias : cinder-default\n\
Globally unique name: iqn.2014-12.10.10.10.10:evstest1.cinder-default\n\
Comment :\n\
Secret : pxr6U37LZZJBoMc\n\
Authentication : Disabled\n\
Logical units : Logical units :\n\
\n\
LUN Logical Unit\n\
---- --------------------------------\n\
0 volume-8ddd1a54-9daf-4fa5-842...\n\
1 volume-99da7ae7-1e7f-4d57-8bf...\n\
\n\
Access configuration :\n\
"
HNAS_RESULT24 = "Logical unit modified successfully."
HNAS_RESULT25 = "Current selected file system: HNAS-iSCSI-TEST, number(32)."
HNAS_RESULT26 = "Name : volume-test \n\
Comment: \n\
Path : /.cinder/volume-test.iscsi \n\
Size : 2 GB \n\
File System : fs1 \n\
File System Mounted : YES \n\
Logical Unit Mounted: No"
HNAS_RESULT27 = "Connection reset"
HNAS_CMDS = {
('ssh', '0.0.0.0', 'supervisor', 'supervisor', 'evsfs', 'list'):
["%s" % HNAS_RESULT1, ""],
('ssh', '0.0.0.0', 'supervisor', 'supervisor', 'cluster-getmac',):
["%s" % HNAS_RESULT2, ""],
('ssh', '-version',): ["%s" % HNAS_RESULT18, ""],
('ssh', '-u', 'supervisor', '-p', 'supervisor', '0.0.0.0', 'ver',):
["%s" % HNAS_RESULT3, ""],
('ssh', '0.0.0.0', 'supervisor', 'supervisor', 'ver',):
["%s" % HNAS_RESULT3, ""],
('ssh', '0.0.0.0', 'supervisor', 'supervisor', 'evsipaddr', '-l'):
["%s" % HNAS_RESULT4, ""],
('ssh', '0.0.0.0', 'supervisor', 'supervisor', 'df', '-a'):
["%s" % HNAS_RESULT5, ""],
('ssh', '0.0.0.0', 'supervisor', 'supervisor', 'df', '-f', 'test_hdp'):
["%s" % HNAS_RESULT6, ""],
('ssh', '0.0.0.0', 'supervisor', 'supervisor', 'for-each-evs', '-q',
'nfs-export', 'list'):
["%s" % HNAS_RESULT7, ""],
('ssh', '0.0.0.0', 'supervisor', 'supervisor',
'console-context', '--evs', '1', 'iscsi-lu', 'add', '-e', 'test_name',
'test_hdp', '/.cinder/test_name.iscsi',
'1M'):
["%s" % HNAS_RESULT8, ""],
('ssh', '0.0.0.0', 'supervisor', 'supervisor',
'console-context', '--evs', '1', 'iscsi-lu', 'del', '-d', '-f',
'test_lun'):
["%s" % HNAS_RESULT9, ""],
('ssh', '0.0.0.0', 'supervisor', 'supervisor',
'console-context', '--evs', '1', 'file-clone-create', '-f', 'fs01-husvm',
'/.cinder/test_lu.iscsi', 'cloned_lu'):
["%s" % HNAS_RESULT10, ""],
('ssh', '0.0.0.0', 'supervisor', 'supervisor',
'console-context', '--evs', '1', 'iscsi-lu', 'expand', 'expanded_lu',
'1M'):
["%s" % HNAS_RESULT11, ""],
('ssh', '0.0.0.0', 'supervisor', 'supervisor',
'console-context', '--evs', '1', 'iscsi-target', 'list', 'test_iqn'):
["%s" % HNAS_RESULT12, ""],
('ssh', '0.0.0.0', 'supervisor', 'supervisor',
'console-context', '--evs', '1', 'iscsi-target', 'addlu', 'test_iqn',
'test_lun', '0'):
["%s" % HNAS_RESULT13, ""],
('ssh', '0.0.0.0', 'supervisor', 'supervisor',
'console-context', '--evs', '1', 'iscsi-target', 'dellu', 'test_iqn',
0):
["%s" % HNAS_RESULT14, ""],
('ssh', '0.0.0.0', 'supervisor', 'supervisor',
'console-context', '--evs', '1', 'iscsi-target', 'add', 'myTarget',
'secret'):
["%s" % HNAS_RESULT15, ""],
('ssh', '0.0.0.0', 'supervisor', 'supervisor',
'console-context', '--evs', '1', 'iscsi-target', 'mod', '-s',
'test_secret', '-a', 'enable', 'test_iqn'): ["%s" % HNAS_RESULT15, ""],
('ssh', '0.0.0.0', 'supervisor', 'supervisor',
'console-context', '--evs', '1', 'iscsi-lu', 'clone', '-e', 'test_lu',
'test_clone',
'/.cinder/test_clone.iscsi'):
["%s" % HNAS_RESULT16, ""],
('ssh', '0.0.0.0', 'supervisor', 'supervisor', 'evsipaddr', '-e', '1'):
["%s" % HNAS_RESULT17, ""],
('ssh', '0.0.0.0', 'supervisor', 'supervisor',
'console-context', '--evs', '1', 'iscsi-target', 'list'):
["%s" % HNAS_RESULT23, ""],
('ssh', '0.0.0.0', 'supervisor', 'supervisor', 'console-context', '--evs',
'1', 'iscsi-target', 'addlu', 'cinder-default',
'volume-8ddd1a54-0000-0000-0000', '2'):
["%s" % HNAS_RESULT13, ""],
('ssh', '0.0.0.0', 'supervisor', 'supervisor', 'console-context', '--evs',
'1', 'selectfs', 'fs01-husvm'):
["%s" % HNAS_RESULT25, ""],
('ssh', '0.0.0.0', 'supervisor', 'supervisor', 'console-context', '--evs',
'1', 'iscsi-lu', 'list', 'test_lun'):
["%s" % HNAS_RESULT26, ""],
('ssh', '0.0.0.0', 'supervisor', 'supervisor', 'console-context', '--evs',
'1', 'iscsi-lu', 'mod', '-n', 'vol_test', 'new_vol_test'):
["%s" % HNAS_RESULT24, ""]
}
DRV_CONF = {'ssh_enabled': 'True',
'mgmt_ip0': '0.0.0.0',
'cluster_admin_ip0': None,
'ssh_port': '22',
'ssh_private_key': 'test_key',
'username': 'supervisor',
'password': 'supervisor'}
UTILS_EXEC_OUT = ["output: test_cmd", ""]
def m_run_cmd(*args, **kargs):
return HNAS_CMDS.get(args)
class HDSHNASBendTest(test.TestCase):
def __init__(self, *args, **kwargs):
super(HDSHNASBendTest, self).__init__(*args, **kwargs)
@mock.patch.object(nfs, 'factory_bend')
def setUp(self, m_factory_bend):
super(HDSHNASBendTest, self).setUp()
self.hnas_bend = hnas_backend.HnasBackend(DRV_CONF)
@mock.patch('six.moves.builtins.open')
@mock.patch('os.path.isfile', return_value=True)
@mock.patch('paramiko.RSAKey.from_private_key_file')
@mock.patch('paramiko.SSHClient')
@mock.patch.object(putils, 'ssh_execute',
return_value=(HNAS_RESULT5, ''))
@mock.patch.object(utils, 'execute')
@mock.patch.object(time, 'sleep')
def test_run_cmd(self, m_sleep, m_utl, m_ssh, m_ssh_cli, m_pvt_key,
m_file, m_open):
self.flags(ssh_hosts_key_file='/var/lib/cinder/ssh_known_hosts',
state_path='/var/lib/cinder')
# Test main flow
self.hnas_bend.drv_configs['ssh_enabled'] = 'True'
out, err = self.hnas_bend.run_cmd('ssh', '0.0.0.0',
'supervisor', 'supervisor',
'df', '-a')
self.assertIn('fs01-husvm', out)
self.assertIn('WFS-2,128 DSBs', out)
# Test exception throwing when not using SSH
m_utl.side_effect = putils.ProcessExecutionError(stdout='',
stderr=HNAS_RESULT22,
exit_code=255)
self.hnas_bend.drv_configs['ssh_enabled'] = 'False'
self.assertRaises(exception.HNASConnError, self.hnas_bend.run_cmd,
'ssh', '0.0.0.0', 'supervisor', 'supervisor',
'df', '-a')
m_utl.side_effect = putils.ProcessExecutionError(stdout='',
stderr=HNAS_RESULT27,
exit_code=255)
self.hnas_bend.drv_configs['ssh_enabled'] = 'False'
self.assertRaises(exception.HNASConnError, self.hnas_bend.run_cmd,
'ssh', '0.0.0.0', 'supervisor', 'supervisor',
'df', '-a')
# Test exception throwing when using SSH
m_ssh.side_effect = putils.ProcessExecutionError(stdout='',
stderr=HNAS_RESULT22,
exit_code=255)
self.hnas_bend.drv_configs['ssh_enabled'] = 'True'
self.assertRaises(exception.HNASConnError, self.hnas_bend.run_cmd,
'ssh', '0.0.0.0', 'supervisor', 'supervisor',
'df', '-a')
@mock.patch.object(hnas_backend.HnasBackend, 'run_cmd',
side_effect=m_run_cmd)
@mock.patch.object(utils, 'execute', return_value=UTILS_EXEC_OUT)
def test_get_version(self, m_cmd, m_exec):
out = self.hnas_bend.get_version("ssh", "1.0", "0.0.0.0", "supervisor",
"supervisor")
self.assertIn('11.2.3319.14', out)
self.assertIn('83-68-96-AA-DA-5D', out)
@mock.patch.object(hnas_backend.HnasBackend, 'run_cmd',
side_effect=m_run_cmd)
def test_get_version_ssh_cluster(self, m_cmd):
self.hnas_bend.drv_configs['ssh_enabled'] = 'True'
self.hnas_bend.drv_configs['cluster_admin_ip0'] = '1.1.1.1'
out = self.hnas_bend.get_version("ssh", "1.0", "0.0.0.0", "supervisor",
"supervisor")
self.assertIn('11.2.3319.14', out)
self.assertIn('83-68-96-AA-DA-5D', out)
@mock.patch.object(hnas_backend.HnasBackend, 'run_cmd',
side_effect=m_run_cmd)
@mock.patch.object(utils, 'execute', return_value=UTILS_EXEC_OUT)
def test_get_version_ssh_disable(self, m_cmd, m_exec):
self.hnas_bend.drv_configs['ssh_enabled'] = 'False'
out = self.hnas_bend.get_version("ssh", "1.0", "0.0.0.0", "supervisor",
"supervisor")
self.assertIn('11.2.3319.14', out)
self.assertIn('83-68-96-AA-DA-5D', out)
self.assertIn('Utility_version', out)
@mock.patch.object(hnas_backend.HnasBackend, 'run_cmd',
side_effect=m_run_cmd)
def test_get_iscsi_info(self, m_execute):
out = self.hnas_bend.get_iscsi_info("ssh", "0.0.0.0", "supervisor",
"supervisor")
self.assertIn('172.24.44.20', out)
self.assertIn('172.24.44.21', out)
self.assertIn('10.0.0.20', out)
self.assertEqual(4, len(out.split('\n')))
@mock.patch.object(hnas_backend.HnasBackend, 'run_cmd')
def test_get_hdp_info(self, m_run_cmd):
# tests when there is two or more evs
m_run_cmd.return_value = (HNAS_RESULT5, "")
out = self.hnas_bend.get_hdp_info("ssh", "0.0.0.0", "supervisor",
"supervisor")
self.assertEqual(10, len(out.split('\n')))
self.assertIn('gold', out)
self.assertIn('silver', out)
line1 = out.split('\n')[0]
self.assertEqual(12, len(line1.split()))
# test when there is only one evs
m_run_cmd.return_value = (HNAS_RESULT19, "")
out = self.hnas_bend.get_hdp_info("ssh", "0.0.0.0", "supervisor",
"supervisor")
self.assertEqual(3, len(out.split('\n')))
self.assertIn('fs01-husvm', out)
self.assertIn('manage_test02', out)
line1 = out.split('\n')[0]
self.assertEqual(12, len(line1.split()))
@mock.patch.object(hnas_backend.HnasBackend, 'run_cmd',
side_effect=m_run_cmd)
def test_get_nfs_info(self, m_run_cmd):
out = self.hnas_bend.get_nfs_info("ssh", "0.0.0.0", "supervisor",
"supervisor")
self.assertEqual(2, len(out.split('\n')))
self.assertIn('/export01-husvm', out)
self.assertIn('172.24.44.20', out)
self.assertIn('10.0.0.20', out)
@mock.patch.object(hnas_backend.HnasBackend, 'run_cmd',
side_effect=m_run_cmd)
def test_create_lu(self, m_cmd):
out = self.hnas_bend.create_lu("ssh", "0.0.0.0", "supervisor",
"supervisor", "test_hdp", "1",
"test_name")
self.assertIn('successfully created', out)
@mock.patch.object(hnas_backend.HnasBackend, 'run_cmd',
side_effect=m_run_cmd)
def test_delete_lu(self, m_cmd):
out = self.hnas_bend.delete_lu("ssh", "0.0.0.0", "supervisor",
"supervisor", "test_hdp", "test_lun")
self.assertIn('deleted successfully', out)
@mock.patch.object(hnas_backend.HnasBackend, 'run_cmd',
side_effect=m_run_cmd)
def test_create_dup(self, m_cmd):
out = self.hnas_bend.create_dup("ssh", "0.0.0.0", "supervisor",
"supervisor", "test_lu", "test_hdp",
"1", "test_clone")
self.assertIn('successfully created', out)
@mock.patch.object(hnas_backend.HnasBackend, 'run_cmd',
side_effect=m_run_cmd)
def test_file_clone(self, m_cmd):
out = self.hnas_bend.file_clone("ssh", "0.0.0.0", "supervisor",
"supervisor", "fs01-husvm",
"/.cinder/test_lu.iscsi", "cloned_lu")
self.assertIn('LUN cloned_lu HDP', out)
@mock.patch.object(hnas_backend.HnasBackend, 'run_cmd',
side_effect=m_run_cmd)
def test_extend_vol(self, m_cmd):
out = self.hnas_bend.extend_vol("ssh", "0.0.0.0", "supervisor",
"supervisor", "test_hdp", "test_lun",
"1", "expanded_lu")
self.assertIn('successfully extended', out)
@mock.patch.object(hnas_backend.HnasBackend, 'run_cmd',
side_effect=m_run_cmd)
def test_add_iscsi_conn(self, m_cmd):
out = self.hnas_bend.add_iscsi_conn("ssh", "0.0.0.0", "supervisor",
"supervisor",
"volume-8ddd1a54-0000-0000-0000",
"test_hdp", "test_port",
"cinder-default", "test_init")
self.assertIn('successfully paired', out)
@mock.patch.object(hnas_backend.HnasBackend, 'run_cmd',
side_effect=m_run_cmd)
def test_del_iscsi_conn(self, m_cmd):
out = self.hnas_bend.del_iscsi_conn("ssh", "0.0.0.0", "supervisor",
"supervisor", "1", "test_iqn", 0)
self.assertIn('already deleted', out)
@mock.patch.object(hnas_backend.HnasBackend, 'get_evs', return_value=0)
@mock.patch.object(hnas_backend.HnasBackend, 'run_cmd')
def test_get_targetiqn(self, m_cmd, m_get_evs):
m_cmd.side_effect = [[HNAS_RESULT12, '']]
out = self.hnas_bend.get_targetiqn("ssh", "0.0.0.0", "supervisor",
"supervisor", "test_iqn",
"test_hdp", "test_secret")
self.assertEqual('test_iqn', out)
m_cmd.side_effect = [[HNAS_RESULT20, ''], [HNAS_RESULT21, '']]
out = self.hnas_bend.get_targetiqn("ssh", "0.0.0.0", "supervisor",
"supervisor", "test_iqn2",
"test_hdp", "test_secret")
self.assertEqual('test_iqn2', out)
m_cmd.side_effect = [[HNAS_RESULT20, ''], [HNAS_RESULT21, '']]
out = self.hnas_bend.get_targetiqn("ssh", "0.0.0.0", "supervisor",
"supervisor", "test_iqn3",
"test_hdp", "")
self.assertEqual('test_iqn3', out)
@mock.patch.object(hnas_backend.HnasBackend, 'run_cmd',
side_effect=m_run_cmd)
def test_set_targetsecret(self, m_execute):
self.hnas_bend.set_targetsecret("ssh", "0.0.0.0", "supervisor",
"supervisor", "test_iqn",
"test_hdp", "test_secret")
@mock.patch.object(hnas_backend.HnasBackend, 'run_cmd')
def test_get_targetsecret(self, m_run_cmd):
# test when target has secret
m_run_cmd.return_value = (HNAS_RESULT12, "")
out = self.hnas_bend.get_targetsecret("ssh", "0.0.0.0", "supervisor",
"supervisor", "test_iqn",
"test_hdp")
self.assertEqual('test_secret', out)
# test when target don't have secret
m_run_cmd.return_value = (HNAS_RESULT20, "")
out = self.hnas_bend.get_targetsecret("ssh", "0.0.0.0", "supervisor",
"supervisor", "test_iqn",
"test_hdp")
self.assertEqual('', out)
@mock.patch.object(hnas_backend.HnasBackend, 'run_cmd')
def test_get_targets(self, m_run_cmd):
# Test normal behaviour
m_run_cmd.return_value = (HNAS_RESULT23, "")
tgt_list = self.hnas_bend._get_targets("ssh", "0.0.0.0", "supervisor",
"supervisor", 1)
self.assertEqual(3, len(tgt_list))
self.assertEqual(2, len(tgt_list[2]['luns']))
# Test calling with parameter
tgt_list = self.hnas_bend._get_targets("ssh", "0.0.0.0", "supervisor",
"supervisor", 1,
'cinder-default')
self.assertEqual(1, len(tgt_list))
self.assertEqual(2, len(tgt_list[0]['luns']))
# Test error in BE command
m_run_cmd.side_effect = putils.ProcessExecutionError
tgt_list = self.hnas_bend._get_targets("ssh", "0.0.0.0", "supervisor",
"supervisor", 1)
self.assertEqual(0, len(tgt_list))
@mock.patch.object(hnas_backend.HnasBackend,
'run_cmd', side_effect=m_run_cmd)
def test_check_targets(self, m_run_cmd):
result, tgt = self.hnas_bend.check_target("ssh", "0.0.0.0",
"supervisor",
"supervisor", "test_hdp",
"cinder-default")
self.assertTrue(result)
self.assertEqual('cinder-default', tgt['alias'])
result, tgt = self.hnas_bend.check_target("ssh", "0.0.0.0",
"supervisor",
"supervisor", "test_hdp",
"cinder-no-target")
self.assertFalse(result)
self.assertIsNone(tgt)
@mock.patch.object(hnas_backend.HnasBackend,
'run_cmd', side_effect=m_run_cmd)
def test_check_lu(self, m_run_cmd):
ret = self.hnas_bend.check_lu("ssh", "0.0.0.0", "supervisor",
"supervisor",
"volume-8ddd1a54-9daf-4fa5-842",
"test_hdp")
result, lunid, tgt = ret
self.assertTrue(result)
self.assertEqual('0', lunid)
ret = self.hnas_bend.check_lu("ssh", "0.0.0.0", "supervisor",
"supervisor",
"volume-8ddd1a54-0000-0000-000",
"test_hdp")
result, lunid, tgt = ret
self.assertFalse(result)
@mock.patch.object(hnas_backend.HnasBackend, 'get_evs', return_value=1)
@mock.patch.object(hnas_backend.HnasBackend, 'run_cmd',
return_value = (HNAS_RESULT26, ""))
def test_get_existing_lu_info(self, m_run_cmd, m_get_evs):
out = self.hnas_bend.get_existing_lu_info("ssh", "0.0.0.0",
"supervisor",
"supervisor", "fs01-husvm",
"test_lun")
m_get_evs.assert_called_once_with('ssh', '0.0.0.0', 'supervisor',
'supervisor', 'fs01-husvm')
m_run_cmd.assert_called_once_with('ssh', '0.0.0.0', 'supervisor',
'supervisor', 'console-context',
'--evs', 1, 'iscsi-lu', 'list',
'test_lun')
self.assertEqual(HNAS_RESULT26, out)
@mock.patch.object(hnas_backend.HnasBackend, 'get_evs', return_value=1)
@mock.patch.object(hnas_backend.HnasBackend, 'run_cmd',
return_value=(HNAS_RESULT24, ""))
def test_rename_existing_lu(self, m_run_cmd, m_get_evs):
out = self.hnas_bend.rename_existing_lu("ssh", "0.0.0.0",
"supervisor",
"supervisor", "fs01-husvm",
"vol_test",
"new_vol_test")
m_get_evs.assert_called_once_with('ssh', '0.0.0.0', 'supervisor',
'supervisor', 'fs01-husvm')
m_run_cmd.assert_called_once_with('ssh', '0.0.0.0', 'supervisor',
'supervisor', 'console-context',
'--evs', 1, 'iscsi-lu', 'mod',
'-n', 'vol_test', 'new_vol_test')
self.assertEqual(HNAS_RESULT24, out)
| |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from distutils import util
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
from google.api_core import client_options as client_options_lib # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
from google.ads.googleads.v8.resources.types import carrier_constant
from google.ads.googleads.v8.services.types import carrier_constant_service
from .transports.base import (
CarrierConstantServiceTransport,
DEFAULT_CLIENT_INFO,
)
from .transports.grpc import CarrierConstantServiceGrpcTransport
class CarrierConstantServiceClientMeta(type):
"""Metaclass for the CarrierConstantService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[CarrierConstantServiceTransport]]
_transport_registry["grpc"] = CarrierConstantServiceGrpcTransport
def get_transport_class(
cls, label: str = None,
) -> Type[CarrierConstantServiceTransport]:
"""Return an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class CarrierConstantServiceClient(metaclass=CarrierConstantServiceClientMeta):
"""Service to fetch carrier constants."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Convert api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "googleads.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
CarrierConstantServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(
info
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
CarrierConstantServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> CarrierConstantServiceTransport:
"""Return the transport used by the client instance.
Returns:
CarrierConstantServiceTransport: The transport used by the client instance.
"""
return self._transport
@staticmethod
def carrier_constant_path(criterion_id: str,) -> str:
"""Return a fully-qualified carrier_constant string."""
return "carrierConstants/{criterion_id}".format(
criterion_id=criterion_id,
)
@staticmethod
def parse_carrier_constant_path(path: str) -> Dict[str, str]:
"""Parse a carrier_constant path into its component segments."""
m = re.match(r"^carrierConstants/(?P<criterion_id>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Return a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Return a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Return a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Return a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Return a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path
)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, CarrierConstantServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the carrier constant service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.CarrierConstantServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
use_client_cert = bool(
util.strtobool(
os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
)
)
ssl_credentials = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
import grpc # type: ignore
cert, key = client_options.client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
is_mtls = True
else:
creds = SslCredentials()
is_mtls = creds.is_mtls
ssl_credentials = creds.ssl_credentials if is_mtls else None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = (
self.DEFAULT_MTLS_ENDPOINT
if is_mtls
else self.DEFAULT_ENDPOINT
)
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, CarrierConstantServiceTransport):
# transport is a CarrierConstantServiceTransport instance.
if credentials:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
self._transport = transport
elif isinstance(transport, str):
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials, host=self.DEFAULT_ENDPOINT
)
else:
self._transport = CarrierConstantServiceGrpcTransport(
credentials=credentials,
host=api_endpoint,
ssl_channel_credentials=ssl_credentials,
client_info=client_info,
)
def get_carrier_constant(
self,
request: carrier_constant_service.GetCarrierConstantRequest = None,
*,
resource_name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> carrier_constant.CarrierConstant:
r"""Returns the requested carrier constant in full detail.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `HeaderError <>`__
`InternalError <>`__ `QuotaError <>`__ `RequestError <>`__
Args:
request (:class:`google.ads.googleads.v8.services.types.GetCarrierConstantRequest`):
The request object. Request message for
[CarrierConstantService.GetCarrierConstant][google.ads.googleads.v8.services.CarrierConstantService.GetCarrierConstant].
resource_name (:class:`str`):
Required. Resource name of the
carrier constant to fetch.
This corresponds to the ``resource_name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v8.resources.types.CarrierConstant:
A carrier criterion that can be used
in campaign targeting.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([resource_name]):
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a carrier_constant_service.GetCarrierConstantRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, carrier_constant_service.GetCarrierConstantRequest
):
request = carrier_constant_service.GetCarrierConstantRequest(
request
)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if resource_name is not None:
request.resource_name = resource_name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.get_carrier_constant
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("resource_name", request.resource_name),)
),
)
# Send the request.
response = rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
# Done; return the response.
return response
__all__ = ("CarrierConstantServiceClient",)
| |
from __future__ import division
import math
import numpy
from chainer.backends import cuda
from chainer import optimizer
_default_hyperparam = optimizer.Hyperparameter()
_default_hyperparam.alpha = 0.001
_default_hyperparam.beta1 = 0.9
_default_hyperparam.beta2 = 0.999
_default_hyperparam.eps = 1e-8
_default_hyperparam.eta = 1.0
_default_hyperparam.weight_decay_rate = 0
_default_hyperparam.amsgrad = False
def _learning_rate(hp, t):
if t == 0:
raise RuntimeError(
'Can\'t determine the learning rate of Adam optimizer '
'because the update steps have not been started.')
fix1 = 1. - math.pow(hp.beta1, t)
fix2 = 1. - math.pow(hp.beta2, t)
return hp.alpha * math.sqrt(fix2) / fix1
class AdamRule(optimizer.UpdateRule):
"""Update rule of Adam optimization algorithm.
See: `Adam: A Method for Stochastic Optimization \
<https://arxiv.org/abs/1412.6980v8>`_
Modified for proper weight decay.
See: `Fixing Weight Decay Regularization in Adam \
<https://openreview.net/forum?id=rk6qdGgCZ>`_
With option to use AMSGrad variant of Adam.
See: `On the Convergence of Adam and Beyond \
<https://openreview.net/forum?id=ryQu7f-RZ>`_
See :class:`~chainer.optimizers.Adam` for the default values
of the hyperparameters.
Args:
parent_hyperparam (~chainer.optimizer.Hyperparameter): Hyperparameter
that provides the default values.
alpha (float): Coefficient of learning rate.
beta1 (float): Exponential decay rate of the first order moment.
beta2 (float): Exponential decay rate of the second order moment.
eps (float): Small value for the numerical stability.
eta (float): Schedule multiplier, can be used for warm restarts.
weight_decay_rate (float): Weight decay rate.
amsgrad (bool): Whether to use the AMSGrad variant of Adam.
"""
def __init__(self, parent_hyperparam=None,
alpha=None, beta1=None, beta2=None, eps=None,
eta=None, weight_decay_rate=None, amsgrad=None):
super(AdamRule, self).__init__(
parent_hyperparam or _default_hyperparam)
if alpha is not None:
self.hyperparam.alpha = alpha
if beta1 is not None:
self.hyperparam.beta1 = beta1
if beta2 is not None:
self.hyperparam.beta2 = beta2
if eps is not None:
self.hyperparam.eps = eps
if eta is not None:
self.hyperparam.eta = eta
if weight_decay_rate is not None:
self.hyperparam.weight_decay_rate = weight_decay_rate
if amsgrad is not None:
self.hyperparam.amsgrad = amsgrad
def init_state(self, param):
xp = cuda.get_array_module(param.data)
with cuda.get_device_from_array(param.data):
self.state['m'] = xp.zeros_like(param.data)
self.state['v'] = xp.zeros_like(param.data)
if self.hyperparam.amsgrad:
self.state['vhat'] = xp.zeros_like(param.data)
def update_core_cpu(self, param):
grad = param.grad
if grad is None:
return
hp = self.hyperparam
eps = grad.dtype.type(hp.eps)
if hp.eps != 0 and eps == 0:
raise ValueError(
'eps of Adam optimizer is too small for {} ({})'.format(
grad.dtype.name, hp.eps))
m, v = self.state['m'], self.state['v']
m += (1 - hp.beta1) * (grad - m)
v += (1 - hp.beta2) * (grad * grad - v)
if hp.amsgrad:
vhat = self.state['vhat']
numpy.maximum(vhat, v, out=vhat)
else:
vhat = v
param.data -= hp.eta * (self.lr * m / (numpy.sqrt(vhat) + hp.eps) +
hp.weight_decay_rate * param.data)
def update_core_gpu(self, param):
grad = param.grad
if grad is None:
return
hp = self.hyperparam
eps = grad.dtype.type(hp.eps)
if hp.eps != 0 and eps == 0:
raise ValueError(
'eps of Adam optimizer is too small for {} ({})'.format(
grad.dtype.name, hp.eps))
if hp.amsgrad:
cuda.elementwise(
'T grad, T lr, T one_minus_beta1, T one_minus_beta2, T eps, \
T eta, T weight_decay_rate',
'T param, T m, T v, T vhat',
'''m += one_minus_beta1 * (grad - m);
v += one_minus_beta2 * (grad * grad - v);
vhat = max(vhat, v);
param -= eta * (lr * m / (sqrt(vhat) + eps) +
weight_decay_rate * param);''',
'adam')(grad, self.lr, 1 - hp.beta1,
1 - hp.beta2, hp.eps,
hp.eta, hp.weight_decay_rate,
param.data, self.state['m'], self.state['v'],
self.state['vhat'])
else:
cuda.elementwise(
'T grad, T lr, T one_minus_beta1, T one_minus_beta2, T eps, \
T eta, T weight_decay_rate',
'T param, T m, T v',
'''m += one_minus_beta1 * (grad - m);
v += one_minus_beta2 * (grad * grad - v);
param -= eta * (lr * m / (sqrt(v) + eps) +
weight_decay_rate * param);''',
'adam')(grad, self.lr, 1 - hp.beta1,
1 - hp.beta2, hp.eps,
hp.eta, hp.weight_decay_rate,
param.data, self.state['m'], self.state['v'])
@property
def lr(self):
return _learning_rate(self.hyperparam, self.t)
class Adam(optimizer.GradientMethod):
"""Adam optimizer.
See: `Adam: A Method for Stochastic Optimization \
<https://arxiv.org/abs/1412.6980v8>`_
Modified for proper weight decay (also called AdamW).
AdamW introduces the additional parameters ``eta``
and ``weight_decay_rate``, which can be used to properly scale the
learning rate, and decouple the weight decay rate from ``alpha``,
as shown in the below paper.
Note that with the default values ``eta = 1`` and
``weight_decay_rate = 0``, this implementation is identical to
the standard Adam method.
See: `Fixing Weight Decay Regularization in Adam \
<https://openreview.net/forum?id=rk6qdGgCZ>`_
A flag ``amsgrad`` to use the AMSGrad variant of Adam from
the paper: `On the Convergence of Adam and Beyond \
<https://openreview.net/forum?id=ryQu7f-RZ>`_
Args:
alpha (float): Coefficient of learning rate.
beta1 (float): Exponential decay rate of the first order moment.
beta2 (float): Exponential decay rate of the second order moment.
eps (float): Small value for the numerical stability.
eta (float): Schedule multiplier, can be used for warm restarts.
weight_decay_rate (float): Weight decay rate.
amsgrad (bool): Whether to use AMSGrad variant of Adam.
"""
def __init__(self,
alpha=_default_hyperparam.alpha,
beta1=_default_hyperparam.beta1,
beta2=_default_hyperparam.beta2,
eps=_default_hyperparam.eps,
eta=_default_hyperparam.eta,
weight_decay_rate=_default_hyperparam.weight_decay_rate,
amsgrad=_default_hyperparam.amsgrad):
super(Adam, self).__init__()
self.hyperparam.alpha = alpha
self.hyperparam.beta1 = beta1
self.hyperparam.beta2 = beta2
self.hyperparam.eps = eps
self.hyperparam.eta = eta
self.hyperparam.weight_decay_rate = weight_decay_rate
self.hyperparam.amsgrad = amsgrad
alpha = optimizer.HyperparameterProxy('alpha')
beta1 = optimizer.HyperparameterProxy('beta1')
beta2 = optimizer.HyperparameterProxy('beta2')
eps = optimizer.HyperparameterProxy('eps')
eta = optimizer.HyperparameterProxy('eta')
weight_decay_rate = optimizer.HyperparameterProxy('weight_decay_rate')
amsgrad = optimizer.HyperparameterProxy('amsgrad')
def create_update_rule(self):
return AdamRule(self.hyperparam)
@property
def lr(self):
return _learning_rate(self.hyperparam, self.t)
| |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Astropy is a package intended to contain core functionality and some
common tools needed for performing astronomy and astrophysics research with
Python. It also provides an index for other astronomy packages and tools for
managing them.
"""
import sys
import os
from warnings import warn
from .version import version as __version__
def _is_astropy_source(path=None):
"""
Returns whether the source for this module is directly in an astropy
source distribution or checkout.
"""
# If this __init__.py file is in ./astropy/ then import is within a source
# dir .astropy-root is a file distributed with the source, but that should
# not installed
if path is None:
path = os.path.join(os.path.dirname(__file__), os.pardir)
elif os.path.isfile(path):
path = os.path.dirname(path)
source_dir = os.path.abspath(path)
return os.path.exists(os.path.join(source_dir, '.astropy-root'))
# The location of the online documentation for astropy
# This location will normally point to the current released version of astropy
if 'dev' in __version__:
online_docs_root = 'https://docs.astropy.org/en/latest/'
else:
online_docs_root = f'https://docs.astropy.org/en/{__version__}/'
from . import config as _config # noqa: E402
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for `astropy`.
"""
unicode_output = _config.ConfigItem(
False,
'When True, use Unicode characters when outputting values, and '
'displaying widgets at the console.')
use_color = _config.ConfigItem(
sys.platform != 'win32',
'When True, use ANSI color escape sequences when writing to the console.',
aliases=['astropy.utils.console.USE_COLOR', 'astropy.logger.USE_COLOR'])
max_lines = _config.ConfigItem(
None,
description='Maximum number of lines in the display of pretty-printed '
'objects. If not provided, try to determine automatically from the '
'terminal size. Negative numbers mean no limit.',
cfgtype='integer(default=None)',
aliases=['astropy.table.pprint.max_lines'])
max_width = _config.ConfigItem(
None,
description='Maximum number of characters per line in the display of '
'pretty-printed objects. If not provided, try to determine '
'automatically from the terminal size. Negative numbers mean no '
'limit.',
cfgtype='integer(default=None)',
aliases=['astropy.table.pprint.max_width'])
conf = Conf()
# Define a base ScienceState for configuring constants and units
from .utils.state import ScienceState # noqa: E402
class base_constants_version(ScienceState):
"""
Base class for the real version-setters below
"""
_value = 'test'
_versions = dict(test='test')
@classmethod
def validate(cls, value):
if value not in cls._versions:
raise ValueError(f'Must be one of {list(cls._versions.keys())}')
return cls._versions[value]
@classmethod
def set(cls, value):
"""
Set the current constants value.
"""
import sys
if 'astropy.units' in sys.modules:
raise RuntimeError('astropy.units is already imported')
if 'astropy.constants' in sys.modules:
raise RuntimeError('astropy.constants is already imported')
return super().set(cls, value)
class physical_constants(base_constants_version):
"""
The version of physical constants to use
"""
# Maintainers: update when new constants are added
_value = 'codata2018'
_versions = dict(codata2018='codata2018', codata2014='codata2014',
codata2010='codata2010', astropyconst40='codata2018',
astropyconst20='codata2014', astropyconst13='codata2010')
class astronomical_constants(base_constants_version):
"""
The version of astronomical constants to use
"""
# Maintainers: update when new constants are added
_value = 'iau2015'
_versions = dict(iau2015='iau2015', iau2012='iau2012',
astropyconst40='iau2015', astropyconst20='iau2015',
astropyconst13='iau2012')
# Create the test() function
from .tests.runner import TestRunner # noqa: E402
test = TestRunner.make_test_runner_in(__path__[0])
# if we are *not* in setup mode, import the logger and possibly populate the
# configuration file with the defaults
def _initialize_astropy():
from . import config
try:
from .utils import _compiler # noqa: F401
except ImportError:
if _is_astropy_source():
raise ImportError('You appear to be trying to import astropy from '
'within a source checkout or from an editable '
'installation without building the extension '
'modules first. Either run:\n\n'
' pip install -e .\n\nor\n\n'
' python setup.py build_ext --inplace\n\n'
'to make sure the extension modules are built ')
else:
# Outright broken installation, just raise standard error
raise
# add these here so we only need to cleanup the namespace at the end
config_dir = os.path.dirname(__file__)
try:
config.configuration.update_default_config(__package__, config_dir)
except config.configuration.ConfigurationDefaultMissingError as e:
wmsg = (e.args[0] + " Cannot install default profile. If you are "
"importing from source, this is expected.")
warn(config.configuration.ConfigurationDefaultMissingWarning(wmsg))
# Set the bibtex entry to the article referenced in CITATION.
def _get_bibtex():
citation_file = os.path.join(os.path.dirname(__file__), 'CITATION')
with open(citation_file, 'r') as citation:
refs = citation.read().split('@ARTICLE')[1:]
if len(refs) == 0:
return ''
bibtexreference = f'@ARTICLE{refs[0]}'
return bibtexreference
__citation__ = __bibtex__ = _get_bibtex()
from .logger import _init_log, _teardown_log # noqa: E402, F401
log = _init_log()
_initialize_astropy()
from .utils.misc import find_api_page # noqa: E402, F401
def online_help(query):
"""
Search the online Astropy documentation for the given query.
Opens the results in the default web browser. Requires an active
Internet connection.
Parameters
----------
query : str
The search query.
"""
from urllib.parse import urlencode
import webbrowser
version = __version__
if 'dev' in version:
version = 'latest'
else:
version = 'v' + version
url = f"https://docs.astropy.org/en/{version}/search.html?{urlencode({'q': query})}"
webbrowser.open(url)
__dir_inc__ = ['__version__', '__githash__',
'__bibtex__', 'test', 'log', 'find_api_page', 'online_help',
'online_docs_root', 'conf', 'physical_constants',
'astronomical_constants']
from types import ModuleType as __module_type__ # noqa: E402
# Clean up top-level namespace--delete everything that isn't in __dir_inc__
# or is a magic attribute, and that isn't a submodule of this package
for varname in dir():
if not ((varname.startswith('__') and varname.endswith('__')) or
varname in __dir_inc__ or
(varname[0] != '_' and
isinstance(locals()[varname], __module_type__) and
locals()[varname].__name__.startswith(__name__ + '.'))):
# The last clause in the the above disjunction deserves explanation:
# When using relative imports like ``from .. import config``, the
# ``config`` variable is automatically created in the namespace of
# whatever module ``..`` resolves to (in this case astropy). This
# happens a few times just in the module setup above. This allows
# the cleanup to keep any public submodules of the astropy package
del locals()[varname]
del varname, __module_type__
| |
#!/usr/bin/env python3
# Copyright (c) 2014-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the RBF code."""
from copy import deepcopy
from decimal import Decimal
from test_framework.messages import (
BIP125_SEQUENCE_NUMBER,
COIN,
COutPoint,
CTransaction,
CTxIn,
CTxOut,
)
from test_framework.script import CScript, OP_DROP
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
from test_framework.script_util import (
DUMMY_P2WPKH_SCRIPT,
DUMMY_2_P2WPKH_SCRIPT,
)
from test_framework.wallet import MiniWallet
from test_framework.address import ADDRESS_BCRT1_UNSPENDABLE
MAX_REPLACEMENT_LIMIT = 100
class ReplaceByFeeTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.extra_args = [
[
"-acceptnonstdtxn=1",
"-maxorphantx=1000",
"-limitancestorcount=50",
"-limitancestorsize=101",
"-limitdescendantcount=200",
"-limitdescendantsize=101",
],
]
self.supports_cli = False
def run_test(self):
self.wallet = MiniWallet(self.nodes[0])
# the pre-mined test framework chain contains coinbase outputs to the
# MiniWallet's default address ADDRESS_BCRT1_P2WSH_OP_TRUE in blocks
# 76-100 (see method BitcoinTestFramework._initialize_chain())
self.wallet.rescan_utxos()
self.log.info("Running test simple doublespend...")
self.test_simple_doublespend()
self.log.info("Running test doublespend chain...")
self.test_doublespend_chain()
self.log.info("Running test doublespend tree...")
self.test_doublespend_tree()
self.log.info("Running test replacement feeperkb...")
self.test_replacement_feeperkb()
self.log.info("Running test spends of conflicting outputs...")
self.test_spends_of_conflicting_outputs()
self.log.info("Running test new unconfirmed inputs...")
self.test_new_unconfirmed_inputs()
self.log.info("Running test too many replacements...")
self.test_too_many_replacements()
self.log.info("Running test opt-in...")
self.test_opt_in()
self.log.info("Running test RPC...")
self.test_rpc()
self.log.info("Running test prioritised transactions...")
self.test_prioritised_transactions()
self.log.info("Running test no inherited signaling...")
self.test_no_inherited_signaling()
self.log.info("Running test replacement relay fee...")
self.test_replacement_relay_fee()
self.log.info("Passed")
def make_utxo(self, node, amount, confirmed=True, scriptPubKey=DUMMY_P2WPKH_SCRIPT):
"""Create a txout with a given amount and scriptPubKey
confirmed - txouts created will be confirmed in the blockchain;
unconfirmed otherwise.
"""
txid, n = self.wallet.send_to(from_node=node, scriptPubKey=scriptPubKey, amount=amount)
# If requested, ensure txouts are confirmed.
if confirmed:
mempool_size = len(node.getrawmempool())
while mempool_size > 0:
self.generate(node, 1)
new_size = len(node.getrawmempool())
# Error out if we have something stuck in the mempool, as this
# would likely be a bug.
assert new_size < mempool_size
mempool_size = new_size
return COutPoint(int(txid, 16), n)
def test_simple_doublespend(self):
"""Simple doublespend"""
# we use MiniWallet to create a transaction template with inputs correctly set,
# and modify the output (amount, scriptPubKey) according to our needs
tx_template = self.wallet.create_self_transfer(from_node=self.nodes[0])['tx']
tx1a = deepcopy(tx_template)
tx1a.vout = [CTxOut(1 * COIN, DUMMY_P2WPKH_SCRIPT)]
tx1a_hex = tx1a.serialize().hex()
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, 0)
# Should fail because we haven't changed the fee
tx1b = deepcopy(tx_template)
tx1b.vout = [CTxOut(1 * COIN, DUMMY_2_P2WPKH_SCRIPT)]
tx1b_hex = tx1b.serialize().hex()
# This will raise an exception due to insufficient fee
assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx1b_hex, 0)
# Extra 0.1 BTC fee
tx1b.vout[0].nValue -= int(0.1 * COIN)
tx1b_hex = tx1b.serialize().hex()
# Works when enabled
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, 0)
mempool = self.nodes[0].getrawmempool()
assert tx1a_txid not in mempool
assert tx1b_txid in mempool
assert_equal(tx1b_hex, self.nodes[0].getrawtransaction(tx1b_txid))
def test_doublespend_chain(self):
"""Doublespend of a long chain"""
initial_nValue = 5 * COIN
tx0_outpoint = self.make_utxo(self.nodes[0], initial_nValue)
prevout = tx0_outpoint
remaining_value = initial_nValue
chain_txids = []
while remaining_value > 1 * COIN:
remaining_value -= int(0.1 * COIN)
tx = CTransaction()
tx.vin = [CTxIn(prevout, nSequence=0)]
tx.vout = [CTxOut(remaining_value, CScript([1, OP_DROP] * 15 + [1]))]
tx_hex = tx.serialize().hex()
txid = self.nodes[0].sendrawtransaction(tx_hex, 0)
chain_txids.append(txid)
prevout = COutPoint(int(txid, 16), 0)
# Whether the double-spend is allowed is evaluated by including all
# child fees - 4 BTC - so this attempt is rejected.
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(initial_nValue - 3 * COIN, DUMMY_P2WPKH_SCRIPT)]
dbl_tx_hex = dbl_tx.serialize().hex()
# This will raise an exception due to insufficient fee
assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, dbl_tx_hex, 0)
# Accepted with sufficient fee
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(int(0.1 * COIN), DUMMY_P2WPKH_SCRIPT)]
dbl_tx_hex = dbl_tx.serialize().hex()
self.nodes[0].sendrawtransaction(dbl_tx_hex, 0)
mempool = self.nodes[0].getrawmempool()
for doublespent_txid in chain_txids:
assert doublespent_txid not in mempool
def test_doublespend_tree(self):
"""Doublespend of a big tree of transactions"""
initial_nValue = 5 * COIN
tx0_outpoint = self.make_utxo(self.nodes[0], initial_nValue)
def branch(prevout, initial_value, max_txs, tree_width=5, fee=0.00001 * COIN, _total_txs=None):
if _total_txs is None:
_total_txs = [0]
if _total_txs[0] >= max_txs:
return
txout_value = (initial_value - fee) // tree_width
if txout_value < fee:
return
vout = [CTxOut(txout_value, CScript([i+1]))
for i in range(tree_width)]
tx = CTransaction()
tx.vin = [CTxIn(prevout, nSequence=0)]
tx.vout = vout
tx_hex = tx.serialize().hex()
assert len(tx.serialize()) < 100000
txid = self.nodes[0].sendrawtransaction(tx_hex, 0)
yield tx
_total_txs[0] += 1
txid = int(txid, 16)
for i, txout in enumerate(tx.vout):
for x in branch(COutPoint(txid, i), txout_value,
max_txs,
tree_width=tree_width, fee=fee,
_total_txs=_total_txs):
yield x
fee = int(0.00001 * COIN)
n = MAX_REPLACEMENT_LIMIT
tree_txs = list(branch(tx0_outpoint, initial_nValue, n, fee=fee))
assert_equal(len(tree_txs), n)
# Attempt double-spend, will fail because too little fee paid
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(initial_nValue - fee * n, DUMMY_P2WPKH_SCRIPT)]
dbl_tx_hex = dbl_tx.serialize().hex()
# This will raise an exception due to insufficient fee
assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, dbl_tx_hex, 0)
# 0.1 BTC fee is enough
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(initial_nValue - fee * n - int(0.1 * COIN), DUMMY_P2WPKH_SCRIPT)]
dbl_tx_hex = dbl_tx.serialize().hex()
self.nodes[0].sendrawtransaction(dbl_tx_hex, 0)
mempool = self.nodes[0].getrawmempool()
for tx in tree_txs:
tx.rehash()
assert tx.hash not in mempool
# Try again, but with more total transactions than the "max txs
# double-spent at once" anti-DoS limit.
for n in (MAX_REPLACEMENT_LIMIT + 1, MAX_REPLACEMENT_LIMIT * 2):
fee = int(0.00001 * COIN)
tx0_outpoint = self.make_utxo(self.nodes[0], initial_nValue)
tree_txs = list(branch(tx0_outpoint, initial_nValue, n, fee=fee))
assert_equal(len(tree_txs), n)
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(initial_nValue - 2 * fee * n, DUMMY_P2WPKH_SCRIPT)]
dbl_tx_hex = dbl_tx.serialize().hex()
# This will raise an exception
assert_raises_rpc_error(-26, "too many potential replacements", self.nodes[0].sendrawtransaction, dbl_tx_hex, 0)
for tx in tree_txs:
tx.rehash()
self.nodes[0].getrawtransaction(tx.hash)
def test_replacement_feeperkb(self):
"""Replacement requires fee-per-KB to be higher"""
tx0_outpoint = self.make_utxo(self.nodes[0], int(1.1 * COIN))
tx1a = CTransaction()
tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1a.vout = [CTxOut(1 * COIN, DUMMY_P2WPKH_SCRIPT)]
tx1a_hex = tx1a.serialize().hex()
self.nodes[0].sendrawtransaction(tx1a_hex, 0)
# Higher fee, but the fee per KB is much lower, so the replacement is
# rejected.
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(int(0.001 * COIN), CScript([b'a' * 999000]))]
tx1b_hex = tx1b.serialize().hex()
# This will raise an exception due to insufficient fee
assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx1b_hex, 0)
def test_spends_of_conflicting_outputs(self):
"""Replacements that spend conflicting tx outputs are rejected"""
utxo1 = self.make_utxo(self.nodes[0], int(1.2 * COIN))
utxo2 = self.make_utxo(self.nodes[0], 3 * COIN)
tx1a = CTransaction()
tx1a.vin = [CTxIn(utxo1, nSequence=0)]
tx1a.vout = [CTxOut(int(1.1 * COIN), DUMMY_P2WPKH_SCRIPT)]
tx1a_hex = tx1a.serialize().hex()
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, 0)
tx1a_txid = int(tx1a_txid, 16)
# Direct spend an output of the transaction we're replacing.
tx2 = CTransaction()
tx2.vin = [CTxIn(utxo1, nSequence=0), CTxIn(utxo2, nSequence=0)]
tx2.vin.append(CTxIn(COutPoint(tx1a_txid, 0), nSequence=0))
tx2.vout = tx1a.vout
tx2_hex = tx2.serialize().hex()
# This will raise an exception
assert_raises_rpc_error(-26, "bad-txns-spends-conflicting-tx", self.nodes[0].sendrawtransaction, tx2_hex, 0)
# Spend tx1a's output to test the indirect case.
tx1b = CTransaction()
tx1b.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0)]
tx1b.vout = [CTxOut(1 * COIN, DUMMY_P2WPKH_SCRIPT)]
tx1b_hex = tx1b.serialize().hex()
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, 0)
tx1b_txid = int(tx1b_txid, 16)
tx2 = CTransaction()
tx2.vin = [CTxIn(utxo1, nSequence=0), CTxIn(utxo2, nSequence=0),
CTxIn(COutPoint(tx1b_txid, 0))]
tx2.vout = tx1a.vout
tx2_hex = tx2.serialize().hex()
# This will raise an exception
assert_raises_rpc_error(-26, "bad-txns-spends-conflicting-tx", self.nodes[0].sendrawtransaction, tx2_hex, 0)
def test_new_unconfirmed_inputs(self):
"""Replacements that add new unconfirmed inputs are rejected"""
confirmed_utxo = self.make_utxo(self.nodes[0], int(1.1 * COIN))
unconfirmed_utxo = self.make_utxo(self.nodes[0], int(0.1 * COIN), False)
tx1 = CTransaction()
tx1.vin = [CTxIn(confirmed_utxo)]
tx1.vout = [CTxOut(1 * COIN, DUMMY_P2WPKH_SCRIPT)]
tx1_hex = tx1.serialize().hex()
self.nodes[0].sendrawtransaction(tx1_hex, 0)
tx2 = CTransaction()
tx2.vin = [CTxIn(confirmed_utxo), CTxIn(unconfirmed_utxo)]
tx2.vout = tx1.vout
tx2_hex = tx2.serialize().hex()
# This will raise an exception
assert_raises_rpc_error(-26, "replacement-adds-unconfirmed", self.nodes[0].sendrawtransaction, tx2_hex, 0)
def test_too_many_replacements(self):
"""Replacements that evict too many transactions are rejected"""
# Try directly replacing more than MAX_REPLACEMENT_LIMIT
# transactions
# Start by creating a single transaction with many outputs
initial_nValue = 10 * COIN
utxo = self.make_utxo(self.nodes[0], initial_nValue)
fee = int(0.0001 * COIN)
split_value = int((initial_nValue - fee) / (MAX_REPLACEMENT_LIMIT + 1))
outputs = []
for _ in range(MAX_REPLACEMENT_LIMIT + 1):
outputs.append(CTxOut(split_value, CScript([1])))
splitting_tx = CTransaction()
splitting_tx.vin = [CTxIn(utxo, nSequence=0)]
splitting_tx.vout = outputs
splitting_tx_hex = splitting_tx.serialize().hex()
txid = self.nodes[0].sendrawtransaction(splitting_tx_hex, 0)
txid = int(txid, 16)
# Now spend each of those outputs individually
for i in range(MAX_REPLACEMENT_LIMIT + 1):
tx_i = CTransaction()
tx_i.vin = [CTxIn(COutPoint(txid, i), nSequence=0)]
tx_i.vout = [CTxOut(split_value - fee, DUMMY_P2WPKH_SCRIPT)]
tx_i_hex = tx_i.serialize().hex()
self.nodes[0].sendrawtransaction(tx_i_hex, 0)
# Now create doublespend of the whole lot; should fail.
# Need a big enough fee to cover all spending transactions and have
# a higher fee rate
double_spend_value = (split_value - 100 * fee) * (MAX_REPLACEMENT_LIMIT + 1)
inputs = []
for i in range(MAX_REPLACEMENT_LIMIT + 1):
inputs.append(CTxIn(COutPoint(txid, i), nSequence=0))
double_tx = CTransaction()
double_tx.vin = inputs
double_tx.vout = [CTxOut(double_spend_value, CScript([b'a']))]
double_tx_hex = double_tx.serialize().hex()
# This will raise an exception
assert_raises_rpc_error(-26, "too many potential replacements", self.nodes[0].sendrawtransaction, double_tx_hex, 0)
# If we remove an input, it should pass
double_tx = CTransaction()
double_tx.vin = inputs[0:-1]
double_tx.vout = [CTxOut(double_spend_value, CScript([b'a']))]
double_tx_hex = double_tx.serialize().hex()
self.nodes[0].sendrawtransaction(double_tx_hex, 0)
def test_opt_in(self):
"""Replacing should only work if orig tx opted in"""
tx0_outpoint = self.make_utxo(self.nodes[0], int(1.1 * COIN))
# Create a non-opting in transaction
tx1a = CTransaction()
tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0xffffffff)]
tx1a.vout = [CTxOut(1 * COIN, DUMMY_P2WPKH_SCRIPT)]
tx1a_hex = tx1a.serialize().hex()
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, 0)
# This transaction isn't shown as replaceable
assert_equal(self.nodes[0].getmempoolentry(tx1a_txid)['bip125-replaceable'], False)
# Shouldn't be able to double-spend
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(int(0.9 * COIN), DUMMY_P2WPKH_SCRIPT)]
tx1b_hex = tx1b.serialize().hex()
# This will raise an exception
assert_raises_rpc_error(-26, "txn-mempool-conflict", self.nodes[0].sendrawtransaction, tx1b_hex, 0)
tx1_outpoint = self.make_utxo(self.nodes[0], int(1.1 * COIN))
# Create a different non-opting in transaction
tx2a = CTransaction()
tx2a.vin = [CTxIn(tx1_outpoint, nSequence=0xfffffffe)]
tx2a.vout = [CTxOut(1 * COIN, DUMMY_P2WPKH_SCRIPT)]
tx2a_hex = tx2a.serialize().hex()
tx2a_txid = self.nodes[0].sendrawtransaction(tx2a_hex, 0)
# Still shouldn't be able to double-spend
tx2b = CTransaction()
tx2b.vin = [CTxIn(tx1_outpoint, nSequence=0)]
tx2b.vout = [CTxOut(int(0.9 * COIN), DUMMY_P2WPKH_SCRIPT)]
tx2b_hex = tx2b.serialize().hex()
# This will raise an exception
assert_raises_rpc_error(-26, "txn-mempool-conflict", self.nodes[0].sendrawtransaction, tx2b_hex, 0)
# Now create a new transaction that spends from tx1a and tx2a
# opt-in on one of the inputs
# Transaction should be replaceable on either input
tx1a_txid = int(tx1a_txid, 16)
tx2a_txid = int(tx2a_txid, 16)
tx3a = CTransaction()
tx3a.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0xffffffff),
CTxIn(COutPoint(tx2a_txid, 0), nSequence=0xfffffffd)]
tx3a.vout = [CTxOut(int(0.9 * COIN), CScript([b'c'])), CTxOut(int(0.9 * COIN), CScript([b'd']))]
tx3a_hex = tx3a.serialize().hex()
tx3a_txid = self.nodes[0].sendrawtransaction(tx3a_hex, 0)
# This transaction is shown as replaceable
assert_equal(self.nodes[0].getmempoolentry(tx3a_txid)['bip125-replaceable'], True)
tx3b = CTransaction()
tx3b.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0)]
tx3b.vout = [CTxOut(int(0.5 * COIN), DUMMY_P2WPKH_SCRIPT)]
tx3b_hex = tx3b.serialize().hex()
tx3c = CTransaction()
tx3c.vin = [CTxIn(COutPoint(tx2a_txid, 0), nSequence=0)]
tx3c.vout = [CTxOut(int(0.5 * COIN), DUMMY_P2WPKH_SCRIPT)]
tx3c_hex = tx3c.serialize().hex()
self.nodes[0].sendrawtransaction(tx3b_hex, 0)
# If tx3b was accepted, tx3c won't look like a replacement,
# but make sure it is accepted anyway
self.nodes[0].sendrawtransaction(tx3c_hex, 0)
def test_prioritised_transactions(self):
# Ensure that fee deltas used via prioritisetransaction are
# correctly used by replacement logic
# 1. Check that feeperkb uses modified fees
tx0_outpoint = self.make_utxo(self.nodes[0], int(1.1 * COIN))
tx1a = CTransaction()
tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1a.vout = [CTxOut(1 * COIN, DUMMY_P2WPKH_SCRIPT)]
tx1a_hex = tx1a.serialize().hex()
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, 0)
# Higher fee, but the actual fee per KB is much lower.
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(int(0.001 * COIN), CScript([b'a' * 740000]))]
tx1b_hex = tx1b.serialize().hex()
# Verify tx1b cannot replace tx1a.
assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx1b_hex, 0)
# Use prioritisetransaction to set tx1a's fee to 0.
self.nodes[0].prioritisetransaction(txid=tx1a_txid, fee_delta=int(-0.1 * COIN))
# Now tx1b should be able to replace tx1a
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, 0)
assert tx1b_txid in self.nodes[0].getrawmempool()
# 2. Check that absolute fee checks use modified fee.
tx1_outpoint = self.make_utxo(self.nodes[0], int(1.1 * COIN))
tx2a = CTransaction()
tx2a.vin = [CTxIn(tx1_outpoint, nSequence=0)]
tx2a.vout = [CTxOut(1 * COIN, DUMMY_P2WPKH_SCRIPT)]
tx2a_hex = tx2a.serialize().hex()
self.nodes[0].sendrawtransaction(tx2a_hex, 0)
# Lower fee, but we'll prioritise it
tx2b = CTransaction()
tx2b.vin = [CTxIn(tx1_outpoint, nSequence=0)]
tx2b.vout = [CTxOut(int(1.01 * COIN), DUMMY_P2WPKH_SCRIPT)]
tx2b.rehash()
tx2b_hex = tx2b.serialize().hex()
# Verify tx2b cannot replace tx2a.
assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx2b_hex, 0)
# Now prioritise tx2b to have a higher modified fee
self.nodes[0].prioritisetransaction(txid=tx2b.hash, fee_delta=int(0.1 * COIN))
# tx2b should now be accepted
tx2b_txid = self.nodes[0].sendrawtransaction(tx2b_hex, 0)
assert tx2b_txid in self.nodes[0].getrawmempool()
def test_rpc(self):
us0 = self.wallet.get_utxo()
ins = [us0]
outs = {ADDRESS_BCRT1_UNSPENDABLE: Decimal(1.0000000)}
rawtx0 = self.nodes[0].createrawtransaction(ins, outs, 0, True)
rawtx1 = self.nodes[0].createrawtransaction(ins, outs, 0, False)
json0 = self.nodes[0].decoderawtransaction(rawtx0)
json1 = self.nodes[0].decoderawtransaction(rawtx1)
assert_equal(json0["vin"][0]["sequence"], 4294967293)
assert_equal(json1["vin"][0]["sequence"], 4294967295)
if self.is_wallet_compiled():
self.init_wallet(0)
rawtx2 = self.nodes[0].createrawtransaction([], outs)
frawtx2a = self.nodes[0].fundrawtransaction(rawtx2, {"replaceable": True})
frawtx2b = self.nodes[0].fundrawtransaction(rawtx2, {"replaceable": False})
json0 = self.nodes[0].decoderawtransaction(frawtx2a['hex'])
json1 = self.nodes[0].decoderawtransaction(frawtx2b['hex'])
assert_equal(json0["vin"][0]["sequence"], 4294967293)
assert_equal(json1["vin"][0]["sequence"], 4294967294)
def test_no_inherited_signaling(self):
confirmed_utxo = self.wallet.get_utxo()
# Create an explicitly opt-in parent transaction
optin_parent_tx = self.wallet.send_self_transfer(
from_node=self.nodes[0],
utxo_to_spend=confirmed_utxo,
sequence=BIP125_SEQUENCE_NUMBER,
fee_rate=Decimal('0.01'),
)
assert_equal(True, self.nodes[0].getmempoolentry(optin_parent_tx['txid'])['bip125-replaceable'])
replacement_parent_tx = self.wallet.create_self_transfer(
from_node=self.nodes[0],
utxo_to_spend=confirmed_utxo,
sequence=BIP125_SEQUENCE_NUMBER,
fee_rate=Decimal('0.02'),
)
# Test if parent tx can be replaced.
res = self.nodes[0].testmempoolaccept(rawtxs=[replacement_parent_tx['hex']])[0]
# Parent can be replaced.
assert_equal(res['allowed'], True)
# Create an opt-out child tx spending the opt-in parent
parent_utxo = self.wallet.get_utxo(txid=optin_parent_tx['txid'])
optout_child_tx = self.wallet.send_self_transfer(
from_node=self.nodes[0],
utxo_to_spend=parent_utxo,
sequence=0xffffffff,
fee_rate=Decimal('0.01'),
)
# Reports true due to inheritance
assert_equal(True, self.nodes[0].getmempoolentry(optout_child_tx['txid'])['bip125-replaceable'])
replacement_child_tx = self.wallet.create_self_transfer(
from_node=self.nodes[0],
utxo_to_spend=parent_utxo,
sequence=0xffffffff,
fee_rate=Decimal('0.02'),
mempool_valid=False,
)
# Broadcast replacement child tx
# BIP 125 :
# 1. The original transactions signal replaceability explicitly or through inheritance as described in the above
# Summary section.
# The original transaction (`optout_child_tx`) doesn't signal RBF but its parent (`optin_parent_tx`) does.
# The replacement transaction (`replacement_child_tx`) should be able to replace the original transaction.
# See CVE-2021-31876 for further explanations.
assert_equal(True, self.nodes[0].getmempoolentry(optin_parent_tx['txid'])['bip125-replaceable'])
assert_raises_rpc_error(-26, 'txn-mempool-conflict', self.nodes[0].sendrawtransaction, replacement_child_tx["hex"], 0)
self.log.info('Check that the child tx can still be replaced (via a tx that also replaces the parent)')
replacement_parent_tx = self.wallet.send_self_transfer(
from_node=self.nodes[0],
utxo_to_spend=confirmed_utxo,
sequence=0xffffffff,
fee_rate=Decimal('0.03'),
)
# Check that child is removed and update wallet utxo state
assert_raises_rpc_error(-5, 'Transaction not in mempool', self.nodes[0].getmempoolentry, optout_child_tx['txid'])
self.wallet.get_utxo(txid=optout_child_tx['txid'])
def test_replacement_relay_fee(self):
tx = self.wallet.send_self_transfer(from_node=self.nodes[0])['tx']
# Higher fee, higher feerate, different txid, but the replacement does not provide a relay
# fee conforming to node's `incrementalrelayfee` policy of 1000 sat per KB.
tx.vout[0].nValue -= 1
assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx.serialize().hex())
if __name__ == '__main__':
ReplaceByFeeTest().main()
| |
"""
Tests for the client
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import mock
import ga4gh.client.client as client
import ga4gh.client.exceptions as exceptions
import ga4gh.schemas.protocol as protocol
class TestSearchMethodsCallRunRequest(unittest.TestCase):
"""
Test that search methods call lower-level functionality correctly
"""
def setUp(self):
self.httpClient = client.HttpClient("http://example.com")
self.httpClient._run_search_request = mock.Mock()
self.httpClient._run_get_request = mock.Mock()
self.httpClient._run_list_request = mock.Mock()
self.httpClient._run_get_request_path = mock.Mock()
self.httpClient._run_post_request = mock.Mock()
self.objectId = "SomeId"
self.objectName = "objectName"
self.datasetId = "datasetId"
self.variantSetId = "variantSetId"
self.variantAnnotationSetId = "variantAnnotationSetId"
self.featureSetId = "featureSetId"
self.continuousSetId = "continuousSetId"
self.parentId = "parentId"
self.feature = "feature"
self.referenceSetId = "referenceSetId"
self.referenceId = "referenceId"
self.readGroupIds = ["readGroupId"]
self.referenceName = "referenceName"
self.biosampleId = "biosampleId"
self.biosampleName = "biosampleName"
self.individualName = "individualName"
self.individualId = "individualId"
self.geneSymbol = "geneSymbol"
self.start = 100
self.end = 101
self.referenceName = "referenceName"
self.callSetIds = ["id1", "id2"]
self.pageSize = 1000
self.httpClient.set_page_size(self.pageSize)
self.assemblyId = "assemblyId"
self.accession = "accession"
self.md5checksum = "md5checksum"
self.phenotype_association_set_id = "phenotype_association_set_id"
self.feature_ids = ["id1", "id2"]
self.phenotype_ids = ["id3", "id4"]
self.evidence = protocol.EvidenceQuery()
self.rnaQuantificationSetId = "rnaQuantificationSetId"
self.rnaQuantificationId = "rnaQuantificationId"
self.expressionLevelId = "expressionLevelId"
self.threshold = 0.0
def testSetPageSize(self):
testClient = client.AbstractClient()
# pageSize is None by default
self.assertIsNone(testClient.get_page_size())
for pageSize in [1, 10, 100]:
testClient.set_page_size(pageSize)
self.assertEqual(testClient.get_page_size(), pageSize)
def testSearchVariants(self):
request = protocol.SearchVariantsRequest()
request.reference_name = self.referenceName
request.start = self.start
request.end = self.end
request.variant_set_id = self.variantSetId
request.call_set_ids.extend(self.callSetIds)
request.page_size = self.pageSize
self.httpClient.search_variants(
self.variantSetId, start=self.start, end=self.end,
reference_name=self.referenceName, call_set_ids=self.callSetIds)
self.httpClient._run_search_request.assert_called_once_with(
request, "variants", protocol.SearchVariantsResponse)
def testSearchDatasets(self):
request = protocol.SearchDatasetsRequest()
request.page_size = self.pageSize
self.httpClient.search_datasets()
self.httpClient._run_search_request.assert_called_once_with(
request, "datasets", protocol.SearchDatasetsResponse)
def testSearchVariantSets(self):
request = protocol.SearchVariantSetsRequest()
request.dataset_id = self.datasetId
request.page_size = self.pageSize
self.httpClient.search_variant_sets(self.datasetId)
self.httpClient._run_search_request.assert_called_once_with(
request, "variantsets", protocol.SearchVariantSetsResponse)
def testSearchVariantAnnotationSets(self):
request = protocol.SearchVariantAnnotationSetsRequest()
request.variant_set_id = self.variantSetId
request.page_size = self.pageSize
self.httpClient.search_variant_annotation_sets(self.variantSetId)
self.httpClient._run_search_request.assert_called_once_with(
request, "variantannotationsets",
protocol.SearchVariantAnnotationSetsResponse)
def testSearchVariantAnnotations(self):
request = protocol.SearchVariantAnnotationsRequest()
request.variant_annotation_set_id = self.variantAnnotationSetId
request.page_size = self.pageSize
request.reference_name = self.referenceName
request.reference_id = self.referenceId
request.start = self.start
request.end = self.end
self.httpClient.search_variant_annotations(
self.variantAnnotationSetId,
reference_name=self.referenceName,
start=self.start,
end=self.end,
effects=[],
reference_id=self.referenceId)
self.httpClient._run_search_request.assert_called_once_with(
request, "variantannotations",
protocol.SearchVariantAnnotationsResponse)
with self.assertRaises(exceptions.ErrantRequestException):
self.httpClient.search_variant_annotations(
self.variantAnnotationSetId,
reference_name=self.referenceName,
start=self.start,
end=self.end,
effects=[{"term": "just a term"}, {"term_id": "an id"}],
reference_id=self.referenceId)
def testSearchFeatures(self):
request = protocol.SearchFeaturesRequest()
request.feature_set_id = self.featureSetId
request.parent_id = self.parentId
request.page_size = self.pageSize
request.reference_name = self.referenceName
request.start = self.start
request.end = self.end
request.name = self.objectName
request.gene_symbol = self.geneSymbol
request.feature_types.append(self.feature)
self.httpClient.search_features(
self.featureSetId, parent_id=self.parentId,
reference_name=self.referenceName, start=self.start,
end=self.end, feature_types=[self.feature],
name=self.objectName, gene_symbol=self.geneSymbol)
self.httpClient._run_search_request.assert_called_once_with(
request, "features", protocol.SearchFeaturesResponse)
def testSearchFeatureSets(self):
request = protocol.SearchFeatureSetsRequest()
request.dataset_id = self.datasetId
request.page_size = self.pageSize
self.httpClient.search_feature_sets(self.datasetId)
self.httpClient._run_search_request.assert_called_once_with(
request, "featuresets", protocol.SearchFeatureSetsResponse)
def testSearchContinuous(self):
request = protocol.SearchContinuousRequest()
request.continuous_set_id = self.continuousSetId
request.page_size = self.pageSize
request.reference_name = self.referenceName
request.start = self.start
request.end = self.end
self.httpClient.search_continuous(
self.continuousSetId, reference_name=self.referenceName,
start=self.start, end=self.end)
self.httpClient._run_search_request.assert_called_once_with(
request, "continuous", protocol.SearchContinuousResponse)
def testSearchContinuousSets(self):
request = protocol.SearchContinuousSetsRequest()
request.dataset_id = self.datasetId
request.page_size = self.pageSize
self.httpClient.search_continuous_sets(self.datasetId)
self.httpClient._run_search_request.assert_called_once_with(
request, "continuoussets", protocol.SearchContinuousSetsResponse)
def testSearchReferenceSets(self):
request = protocol.SearchReferenceSetsRequest()
request.page_size = self.pageSize
request.accession = self.accession
request.md5checksum = self.md5checksum
request.assembly_id = self.assemblyId
self.httpClient.search_reference_sets(
accession=self.accession, md5checksum=self.md5checksum,
assembly_id=self.assemblyId)
self.httpClient._run_search_request.assert_called_once_with(
request, "referencesets", protocol.SearchReferenceSetsResponse)
def testSearchReferences(self):
request = protocol.SearchReferencesRequest()
request.reference_set_id = self.referenceSetId
request.page_size = self.pageSize
request.accession = self.accession
request.md5checksum = self.md5checksum
self.httpClient.search_references(
self.referenceSetId, accession=self.accession,
md5checksum=self.md5checksum)
self.httpClient._run_search_request.assert_called_once_with(
request, "references", protocol.SearchReferencesResponse)
def testSearchReadGroupSets(self):
request = protocol.SearchReadGroupSetsRequest()
request.dataset_id = self.datasetId
request.name = self.objectName
request.biosample_id = self.biosampleId
request.page_size = self.pageSize
self.httpClient.search_read_group_sets(
self.datasetId,
name=self.objectName,
biosample_id=self.biosampleId)
self.httpClient._run_search_request.assert_called_once_with(
request, "readgroupsets", protocol.SearchReadGroupSetsResponse)
def testSearchCallSets(self):
request = protocol.SearchCallSetsRequest()
request.variant_set_id = self.variantSetId
request.name = self.objectName
request.biosample_id = self.biosampleId
request.page_size = self.pageSize
self.httpClient.search_call_sets(
self.variantSetId,
name=self.objectName,
biosample_id=self.biosampleId)
self.httpClient._run_search_request.assert_called_once_with(
request, "callsets", protocol.SearchCallSetsResponse)
def testSearchReads(self):
request = protocol.SearchReadsRequest()
request.read_group_ids.extend(self.readGroupIds)
request.reference_id = self.referenceId
request.start = self.start
request.end = self.end
request.page_size = self.pageSize
self.httpClient.search_reads(
self.readGroupIds, reference_id=self.referenceId,
start=self.start, end=self.end)
self.httpClient._run_search_request.assert_called_once_with(
request, "reads", protocol.SearchReadsResponse)
def testSearchExpressionLevels(self):
request = protocol.SearchExpressionLevelsRequest()
request.rna_quantification_id = self.rnaQuantificationId
request.threshold = self.threshold
request.page_size = self.pageSize
self.httpClient.search_expression_levels(
self.rnaQuantificationId, threshold=self.threshold)
self.httpClient._run_search_request.assert_called_once_with(
request, "expressionlevels",
protocol.SearchExpressionLevelsResponse)
def testSearchRnaQuantificationSets(self):
request = protocol.SearchRnaQuantificationSetsRequest()
request.dataset_id = self.datasetId
request.page_size = self.pageSize
self.httpClient.search_rna_quantification_sets(self.datasetId)
self.httpClient._run_search_request.assert_called_once_with(
request, "rnaquantificationsets",
protocol.SearchRnaQuantificationSetsResponse)
def testSearchRnaQuantifications(self):
request = protocol.SearchRnaQuantificationsRequest()
request.rna_quantification_set_id = self.rnaQuantificationSetId
request.page_size = self.pageSize
self.httpClient.search_rna_quantifications(
rna_quantification_set_id=self.rnaQuantificationSetId)
self.httpClient._run_search_request.assert_called_once_with(
request, "rnaquantifications",
protocol.SearchRnaQuantificationsResponse)
def testSearchBiosamples(self):
request = protocol.SearchBiosamplesRequest()
request.dataset_id = self.datasetId
request.name = self.biosampleName
request.individual_id = self.individualId
request.page_size = self.pageSize
self.httpClient.search_biosamples(
self.datasetId, self.biosampleName, self.individualId)
self.httpClient._run_search_request.assert_called_once_with(
request, "biosamples", protocol.SearchBiosamplesResponse)
def testSearchIndividuals(self):
request = protocol.SearchIndividualsRequest()
request.dataset_id = self.datasetId
request.name = self.individualName
request.page_size = self.pageSize
self.httpClient.search_individuals(
self.datasetId, self.individualName)
self.httpClient._run_search_request.assert_called_once_with(
request, "individuals", protocol.SearchIndividualsResponse)
def testGetReferenceSet(self):
self.httpClient.get_reference_set(self.objectId)
self.httpClient._run_get_request.assert_called_once_with(
"referencesets", protocol.ReferenceSet, self.objectId)
def testGetVariantAnnotationSet(self):
self.httpClient.get_variant_annotation_set(self.objectId)
self.httpClient._run_get_request.assert_called_once_with(
"variantannotationsets", protocol.VariantAnnotationSet,
self.objectId)
def testGetVariantSet(self):
self.httpClient.get_variant_set(self.objectId)
self.httpClient._run_get_request.assert_called_once_with(
"variantsets", protocol.VariantSet, self.objectId)
def testGetReference(self):
self.httpClient.get_reference(self.objectId)
self.httpClient._run_get_request.assert_called_once_with(
"references", protocol.Reference, self.objectId)
def testGetReadGroupSets(self):
self.httpClient.get_read_group_set(self.objectId)
self.httpClient._run_get_request.assert_called_once_with(
"readgroupsets", protocol.ReadGroupSet, self.objectId)
def testGetReadGroup(self):
self.httpClient.get_read_group(self.objectId)
self.httpClient._run_get_request.assert_called_once_with(
"readgroups", protocol.ReadGroup, self.objectId)
def testGetCallSets(self):
self.httpClient.get_call_set(self.objectId)
self.httpClient._run_get_request.assert_called_once_with(
"callsets", protocol.CallSet, self.objectId)
def testGetDatasets(self):
self.httpClient.get_dataset(self.objectId)
self.httpClient._run_get_request.assert_called_once_with(
"datasets", protocol.Dataset, self.objectId)
def testGetVariant(self):
self.httpClient.get_variant(self.objectId)
self.httpClient._run_get_request.assert_called_once_with(
"variants", protocol.Variant, self.objectId)
def testGetBiosample(self):
self.httpClient.get_biosample(self.objectId)
self.httpClient._run_get_request.assert_called_once_with(
"biosamples", protocol.Biosample, self.objectId)
def testGetIndividual(self):
self.httpClient.get_individual(self.objectId)
self.httpClient._run_get_request.assert_called_once_with(
"individuals", protocol.Individual, self.objectId)
def testGetRnaQuantificationSet(self):
self.httpClient.get_rna_quantification_set(self.objectId)
self.httpClient._run_get_request.assert_called_once_with(
"rnaquantificationsets", protocol.RnaQuantificationSet,
self.objectId)
def testGetRnaQuantification(self):
self.httpClient.get_rna_quantification(self.objectId)
self.httpClient._run_get_request.assert_called_once_with(
"rnaquantifications", protocol.RnaQuantification, self.objectId)
def testGetExpressionLevel(self):
self.httpClient.get_expression_level(self.objectId)
self.httpClient._run_get_request.assert_called_once_with(
"expressionlevels", protocol.ExpressionLevel, self.objectId)
def testGetFeatureSet(self):
self.httpClient.get_feature_set(self.objectId)
self.httpClient._run_get_request.assert_called_once_with(
"featuresets", protocol.FeatureSet, self.objectId)
def testGetFeature(self):
self.httpClient.get_feature(self.objectId)
self.httpClient._run_get_request.assert_called_once_with(
"features", protocol.Feature, self.objectId)
def testGetContinuousSet(self):
self.httpClient.get_continuous_set(self.objectId)
self.httpClient._run_get_request.assert_called_once_with(
"continuoussets", protocol.ContinuousSet, self.objectId)
def testSearchGenotypePhenotype(self):
request = protocol.SearchGenotypePhenotypeRequest()
request.phenotype_association_set_id = \
self.phenotype_association_set_id
request.feature_ids.extend(self.feature_ids)
request.phenotype_ids.extend(self.phenotype_ids)
request.evidence.extend([self.evidence])
request.page_size = self.pageSize
self.httpClient.search_genotype_phenotype(
phenotype_association_set_id=self.phenotype_association_set_id,
feature_ids=self.feature_ids,
phenotype_ids=self.phenotype_ids,
evidence=[self.evidence])
self.httpClient._run_search_request.assert_called_once_with(
request, "featurephenotypeassociations",
protocol.SearchGenotypePhenotypeResponse)
def testSearchPhenotype(self):
request = protocol.SearchPhenotypesRequest()
request.phenotype_association_set_id = \
self.phenotype_association_set_id
request.id = self.phenotype_ids[0]
request.page_size = self.pageSize
self.httpClient.search_phenotype(
phenotype_association_set_id=self.phenotype_association_set_id,
phenotype_id=self.phenotype_ids[0])
self.httpClient._run_search_request.assert_called_once_with(
request, "phenotypes",
protocol.SearchPhenotypesResponse)
def testSearchPhenotypeAssociationSets(self):
request = protocol.SearchPhenotypeAssociationSetsRequest()
request.dataset_id = self.datasetId
request.page_size = self.pageSize
self.httpClient.search_phenotype_association_sets(
dataset_id=self.datasetId)
self.httpClient._run_search_request.assert_called_once_with(
request, "phenotypeassociationsets",
protocol.SearchPhenotypeAssociationSetsResponse)
def testListPeers(self):
request = protocol.ListPeersRequest()
self.httpClient.list_peers()
self.httpClient._run_list_request.assert_called_once_with(
request,
"peers/list",
protocol.ListPeersResponse)
def testGetInfo(self):
self.httpClient.get_info()
self.httpClient._run_get_request_path.assert_called_once_with(
"info", protocol.GetInfoResponse)
def testAnnounce(self):
url = "http://1kgenomes.ga4gh.org"
self.httpClient.announce(url)
request = protocol.AnnouncePeerRequest()
request.peer.url = url
self.httpClient._run_post_request.assert_called_once_with(
request, "announce", protocol.AnnouncePeerResponse)
| |
__author__ = 'wangfeng'
import time
import os
import shutil
from oslo.config import cfg
from libcloud.compute.types import StorageVolumeState,NodeState
from libcloud.compute.base import NodeSize, NodeImage
from libcloud.storage.types import ObjectDoesNotExistError
from nova import utils
from nova import exception as exception
from nova.i18n import _, _LW
from nova.openstack.common import jsonutils
from nova.openstack.common import imageutils
from nova.openstack.common import fileutils as fileutils
from nova.openstack.common import log as logging
from nova.compute import task_states
from nova.volume.cinder import API as cinder_api
from nova.image.api import API as glance_api
from nova.compute import power_state
from nova.virt import driver
# from adapter import Ec2Adapter as Ec2Adapter
import adapter
import exception_ex
hybrid_cloud_opts = [
cfg.StrOpt('provide_cloud_type',
default='aws',
help='provider cloud type ')
]
ec2_opts = [
cfg.StrOpt('conversion_dir',
default='/tmp',
help='where conversion happens'),
cfg.StrOpt('access_key_id',
help='the access key id for connection to EC2 '),
cfg.StrOpt('secret_key',
help='the secret key for connection to EC2 '),
cfg.StrOpt('region',
default='us-east-1',
help='the region for connection to EC2 '),
cfg.StrOpt('availability_zone',
default='us-east-1a',
help='the availability_zone for connection to EC2 '),
cfg.StrOpt('base_linux_image',
default='ami-68d8e93a',
help='use for create a base ec2 instance'),
cfg.StrOpt('storage_tmp_dir',
default='wfbucketse',
help='a cloud storage temp directory '),
cfg.StrOpt('cascaded_node_id',
help='az31 node id in provider cloud'),
cfg.StrOpt('subnet_api',
help='api subnet'),
cfg.StrOpt('subnet_data',
help='data subnet'),
cfg.StrOpt('cgw_host_ip',
help='compute gateway ip'),
cfg.StrOpt('cgw_host_id',
help='compute gateway id in provider cloud'),
cfg.StrOpt('cgw_user_name',
help='compute gateway user name'),
cfg.StrOpt('cgw_certificate',
help='full name of compute gateway public key'),
cfg.StrOpt('rabbit_host_ip_public',
help=''),
cfg.StrOpt('rabbit_password_public',
help=''),
cfg.StrOpt('vpn_route_gateway',
help=''),
cfg.DictOpt('flavor_map',
default={'m1.tiny': 't2.micro', 'm1.small': 't2.micro', 'm1.medium': 't2.micro3',
'm1.large': 't2.micro', 'm1.xlarge': 't2.micro'},
help='map nova flavor name to aws ec2 instance specification id')
]
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.register_opts(hybrid_cloud_opts)
CONF.register_opts(ec2_opts, 'provider_opts')
CHUNK_SIZE = 1024*4
# EC2 = get_driver(CONF.ec2.driver_type)
class AwsEc2Driver(driver.ComputeDriver):
def __init__(self, virtapi):
# import pdb
# pdb.set_trace()
if CONF.provide_cloud_type == 'aws':
if (CONF.provider_opts.access_key_id is None or
CONF.provider_opts.secret_key is None):
raise Exception(_("Must specify access_key_id and "
"secret_key to use aws ec2"))
self.compute_adapter = adapter.Ec2Adapter(CONF.provider_opts.access_key_id,
secret=CONF.provider_opts.secret_key,
region=CONF.provider_opts.region,
secure=False)
self.storage_adapter = adapter.S3Adapter(CONF.provider_opts.access_key_id,
secret=CONF.provider_opts.secret_key,
region=CONF.provider_opts.region,
secure=False)
self.cinder_api = cinder_api()
self.glance_api = glance_api()
def init_host(self, host):
pass
def list_instances(self):
"""List VM instances from all nodes."""
# import pdb
# pdb.set_trace()
instances = []
try:
nodes = self.compute_adapter.list_nodes()
except:
LOG.error('list nodes failed')
return instances
if nodes is None:
LOG.error('list nodes failed, Nodes are null!')
return instances
for node in nodes:
instance_uuid = node.extra.get('tags').get('hybrid_cloud_instance_id')
instances.append(instance_uuid)
return instances
def snapshot(self, context, instance, image_id, update_task_state):
# import pdb
# pdb.set_trace()
# self._do_snapshot_1(context, instance, image_id, update_task_state)
update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD)
self._do_snapshot_2(context, instance, image_id, update_task_state)
def _do_snapshot_1(self, context, instance, image_id, update_task_state):
# 1) get provider node
provider_node_id = self._get_provider_node_id(instance)
provider_nodes = self.compute_adapter.list_nodes(ex_node_ids=[provider_node_id])
if not provider_nodes:
LOG.error('instance %s is not found' % instance.uuid)
raise exception.InstanceNotFound
if len(provider_nodes)>1:
LOG.error('instance %s are more than one' % instance.uuid)
raise exception_ex.MultiInstanceConfusion
provider_node = provider_nodes[0]
# 2) get root-volume id
provider_volumes = self.compute_adapter.list_volumes(node=provider_node)
if not provider_volumes:
raise exception.VolumeNotFound
provider_volume = provider_volumes[0]
# 3) export
self.compute_adapter.export_volume(provider_volume.id,
CONF.provider_opts.conversion_dir,
image_id,
cgw_host_id=CONF.provider_opts.cgw_host_id,
cgw_host_ip=CONF.provider_opts.cgw_host_ip,
cgw_username=CONF.provider_opts.cgw_username,
cgw_certificate=CONF.provider_opts.cgw_certificate,
transfer_station=CONF.provider_opts.storage_tmp_dir)
# 4) upload to glance
src_file_name = '%s/%s' %(CONF.provider_opts.conversion_dir, image_id)
file_size = os.path.getsize(src_file_name)
metadata = self.glance_api.get(context, image_id)
image_metadata = {"disk_format": "qcow2",
"is_public": "false",
"name": metadata['name'],
"status": "active",
"container_format": "bare",
"size": file_size,
"properties": {"owner_id": instance['project_id']}}
src_file_handle = fileutils.file_open(src_file_name, "rb")
self.glance_api.create(context,image_metadata,src_file_handle)
src_file_handle.close()
def _do_snapshot_2(self, context, instance, image_id, update_task_state):
# xxx(wangfeng)
# import pdb
# pdb.set_trace()
# a) get provider node id
provider_node_id = self._get_provider_node_id(instance)
provider_nodes = self.compute_adapter.list_nodes(ex_node_ids=[provider_node_id])
if not provider_nodes:
LOG.error('instance %s is not found' % instance.uuid)
raise exception.InstanceNotFound
if len(provider_nodes)>1:
LOG.error('instance %s are more than one' % instance.uuid)
raise exception_ex.MultiInstanceConfusion
provider_node = provider_nodes[0]
# b) export-instance to s3
# self.compute_adapter.ex_stop_node(provider_node)
try:
task = self.compute_adapter.create_export_instance_task(provider_node_id,
CONF.provider_opts.storage_tmp_dir)
except:
task = self.compute_adapter.create_export_instance_task(provider_node_id,
CONF.provider_opts.storage_tmp_dir)
while not task.is_completed():
time.sleep(10)
task = self.compute_adapter.get_task_info(task)
obj_key = task.export_to_s3_info.s3_key
obj_bucket = task.export_to_s3_info.s3_bucket
# c) download from s3
obj = self.storage_adapter.get_object(obj_bucket,obj_key)
conv_dir = '%s/%s' % (CONF.provider_opts.conversion_dir,image_id)
fileutils.ensure_tree(conv_dir)
org_full_name = '%s/%s.vmdk' % (conv_dir,image_id)
# with open(org_full_name, 'wb') as f:
# for chunk in self.storage_adapter.download_object_as_stream(obj,chunk_size=CHUNK_SIZE):
# if chunk:
# f.write(chunk)
# f.flush()
self.storage_adapter.download_object(obj,org_full_name)
# d) convert to qcow2
dest_full_name = '%s/%s.qcow2' % (conv_dir,image_id)
convert_image(org_full_name,
dest_full_name,
'qcow2')
# upload to glance
update_task_state(task_state=task_states.IMAGE_UPLOADING,
expected_state=task_states.IMAGE_PENDING_UPLOAD)
file_size = os.path.getsize(dest_full_name)
metadata = self.glance_api.get(context, image_id)
image_metadata = {"disk_format": "qcow2",
"is_public": "false",
"name": metadata['name'],
"status": "active",
"container_format": "bare",
"size": file_size,
"properties": {"owner_id": instance['project_id']}}
src_file_handle = fileutils.file_open(dest_full_name, "rb")
self.glance_api.create(context,image_metadata,src_file_handle)
src_file_handle.close()
def _generate_provider_node_name(self, instance):
# xxx(wangfeng): it may should use instance name(cacading instance uuid)
return instance.uuid
def _get_provider_node_size(self, flavor):
return NodeSize(id=CONF.provider_opts.flavor_map[flavor.name],
name=None, ram=None, disk=None, bandwidth=None,price=None, driver=self.compute_adapter)
def _get_image_id_from_meta(self,image_meta):
if 'id' in image_meta:
# create from image
return image_meta['id']
elif 'image_id' in image_meta:
# attach
return image_meta['image_id']
elif 'properties' in image_meta:
# create from volume
return image_meta['properties']['image_id']
else:
return None
def _spawn_from_image(self, context, instance, image_meta, injected_files,
admin_password, network_info, block_device_info):
# 0.get provider_image,
retry_time = 3
provider_image_id = None
while (not provider_image_id) and retry_time>0:
provider_image_id = self._get_provider_image_id(image_meta)
retry_time = retry_time-1
if provider_image_id is not None:
provider_image = self.compute_adapter.get_image(provider_image_id)
else:
provider_image = None
# 1. if provider_image do not exist,, import image first
if not provider_image:
image_uuid = self._get_image_id_from_meta(image_meta)
container = self.storage_adapter.get_container(CONF.provider_opts.storage_tmp_dir)
try:
self.storage_adapter.get_object(container.name,image_uuid)
except ObjectDoesNotExistError:
# 1.1 download qcow2 file from glance
this_conversion_dir = '%s/%s' % (CONF.provider_opts.conversion_dir,image_uuid)
orig_file_full_name = '%s/%s.qcow2' % (this_conversion_dir,'orig_file')
fileutils.ensure_tree(this_conversion_dir)
self.glance_api.download(context,image_uuid,dest_path=orig_file_full_name)
# 1.2 convert to provider image format
converted_file_format = 'vmdk'
converted_file_name = '%s.%s' % ('converted_file', converted_file_format)
converted_file_full_name = '%s/%s' % (this_conversion_dir,converted_file_name)
convert_image(orig_file_full_name,
converted_file_full_name,
converted_file_format,
subformat='streamoptimized')
# 1.3 upload to provider_image_id
# self.storage_adapter.upload_object(converted_file_full_name,container,image_uuid)
object_name = image_uuid
extra = {'content_type': 'text/plain'}
with open(converted_file_full_name,'rb') as f:
obj = self.storage_adapter.upload_object_via_stream(container=container,
object_name=object_name,
iterator=f,
extra=extra)
# except:
# LOG.error('Connect to provider storage error')
task = self.compute_adapter.create_import_image_task(CONF.provider_opts.storage_tmp_dir,
image_uuid,
image_name=image_uuid)
while not task.is_completed():
time.sleep(5)
task = self.compute_adapter.get_task_info(task)
provider_image = self.compute_adapter.get_image(task.image_id)
set_tag_func = getattr(self.compute_adapter, 'ex_create_tags')
if set_tag_func:
set_tag_func(provider_image, {'hybrid_cloud_image_id': image_uuid})
# 2. map flovar to node size, from configuration
provider_size = self._get_provider_node_size(instance.get_flavor())
# 3. get a subnet, create_node in this subnet
# provider_subnet = self.compute_adapter.ex_list_subnets()[0]
provider_subnet_data = self.compute_adapter.ex_list_subnets(
subnet_ids=[CONF.provider_opts.subnet_data])[0]
provider_subnet_api = self.compute_adapter.ex_list_subnets(
subnet_ids=[CONF.provider_opts.subnet_api])[0]
provider_node_name = self._generate_provider_node_name(instance)
user_data = self._generate_user_data()
provider_node = self.compute_adapter.create_node(name=provider_node_name,
image=provider_image,
size=provider_size,
ex_subnet=provider_subnet_data,
ex_userdata=user_data)
# 4. mapping instance id to provider node, using metadata
instance.metadata['provider_node_id'] = provider_node.id
instance.save()
set_tag_func = getattr(self.compute_adapter, 'ex_create_tags')
if set_tag_func:
set_tag_func(provider_node, {'hybrid_cloud_instance_id': instance.uuid})
# 5 create a network interface and attach it to node
while provider_node.state!=NodeState.RUNNING and provider_node.state!=NodeState.STOPPED:
provider_node = self.compute_adapter.list_nodes(ex_node_ids=[provider_node.id])[0]
time.sleep(10)
provider_interface = self.compute_adapter.ex_create_network_interface(
provider_subnet_api,
name='Test Interface',
description='My Test')
try:
self.compute_adapter.ex_attach_network_interface_to_node(provider_interface,provider_node, 1)
except:
self.compute_adapter.ex_attach_network_interface_to_node(provider_interface,provider_node, 1)
return provider_node
def _generate_user_data(self):
return 'RABBIT_HOST_IP=%s;RABBIT_PASSWORD=%s;VPN_ROUTE_GATEWAY=%s' % (CONF.provider_opts.rabbit_host_ip_public,
CONF.provider_opts.rabbit_password_public,
CONF.provider_opts.vpn_route_gateway)
def _spawn_from_volume(self, context, instance, image_meta, injected_files,
admin_password, network_info, block_device_info):
self._create_node_ec2(context, instance, image_meta, injected_files,
admin_password, network_info, block_device_info)
def _create_node_ec2(self, context, instance, image_meta, injected_files,
admin_password, network_info, block_device_info):
# 1. create a common vm
# 1.1 map flovar to node size, from configuration
provider_size = self._get_provider_node_size(instance.get_flavor())
# 1.2 get common image
provder_image = self.compute_adapter.get_image(CONF.provider_opts.base_linux_image)
# 1.3. create_node, and get_node_stat, waiting for node creation finish
provider_node_name = self._generate_provider_node_name(instance)
provider_node = self.compute_adapter.create_node(name=provider_node_name, image=provder_image, size=provider_size)
# 2. power off the vm
self.compute_adapter.ex_stop_node(provider_node)
# 3. detach origin root volume
provider_volumes = self.compute_adapter.list_volumes(node=provider_node)
provider_volume = provider_volumes[0]
self.compute_adapter.detach_volume(provider_volume)
# 4. attach this volume
self.compute_adapter.attach_volume(provider_node,provider_volume, provider_volume.extra.get('device'))
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
"""Create VM instance."""
# import pdb
# pdb.set_trace()
LOG.debug(_("image meta is:%s") % image_meta)
LOG.debug(_("instance is:%s") % instance)
bdms = block_device_info.get('block_device_mapping',[])
if len(bdms) > 0:
volume_id = bdms[0]['connection_info']['data']['volume_id']
provider_volume_id = self._get_provider_volume_id(context,volume_id)
if provider_volume_id is not None:
provider_volumes = self.compute_adapter.list_volumes(ex_volume_ids=[provider_volume_id])
else:
provider_volumes = []
if not provider_volumes:
# if has no provider volume, boot from image: (import image in provider cloud, then boot instance)
provider_node = self._spawn_from_image(context, instance, image_meta, injected_files,
admin_password, network_info, block_device_info)
provider_volume = self.compute_adapter.list_volumes(node=provider_node)
self._map_volume_to_provider(context, volume_id, provider_volume[0])
elif len(provider_volumes) == 0:
# if has provider volume, boot from volume:
self._spawn_from_volume(context, instance, image_meta, injected_files,
admin_password, network_info, block_device_info)
else:
LOG.error('create instance %s faild: multi volume confusion') % instance.uuid
raise exception_ex.MultiVolumeConfusion
else:
# if boot from image: (import image in provider cloud, then boot instance)
self._spawn_from_image(context, instance, image_meta, injected_files,
admin_password, network_info, block_device_info)
LOG.debug("creating instance %s success!" % instance.uuid)
def _map_volume_to_provider(self,context, volume_id, provider_volume):
# mapping intance root-volume to cinder volume
if not provider_volume:
self.cinder_api.update_volume_metadata(context,
volume_id,
{'provider_volume_id': None})
else:
self.cinder_api.update_volume_metadata(context,
volume_id,
{'provider_volume_id': provider_volume.id})
set_tag_func = getattr(self.compute_adapter, 'ex_create_tags')
if set_tag_func:
set_tag_func(provider_volume, {'hybrid_cloud_volume_id': volume_id})
def _get_provider_image_id(self, image_obj):
try:
image_uuid = self._get_image_id_from_meta(image_obj)
provider_image = self.compute_adapter.list_images(
ex_filters={'tag:hybrid_cloud_image_id':image_uuid})
if provider_image is None:
raise exception_ex.ProviderRequestTimeOut
if len(provider_image)==0:
# raise exception.ImageNotFound
LOG.warning('Image %s NOT Found at provider cloud' % image_uuid)
return None
elif len(provider_image)>1:
raise exception_ex.MultiImageConfusion
else:
return provider_image[0].id
except:
LOG.error('Can NOT get image %s from provider cloud tag' % image_uuid)
return None
def resume_state_on_host_boot(self, context, instance, network_info,
block_device_info=None):
pass
def attach_volume(self, context, connection_info, instance, mountpoint,
disk_bus=None, device_type=None, encryption=None):
"""Attach volume storage to VM instance."""
# import pdb
# pdb.set_trace()
volume_id = connection_info['data']['volume_id']
instance_id = instance.uuid
LOG.info("attach volume")
provider_node_id = self._get_provider_node_id(instance)
provider_volume_id = self._get_provider_volume_id(context, volume_id)
# 1.get node
if not provider_node_id:
LOG.error('instance %s is not found' % instance_id)
raise exception.InstanceNotFound
else:
provider_nodes = self.compute_adapter.list_nodes(ex_node_ids=[provider_node_id])
if not provider_nodes:
LOG.error('instance %s is not found' % instance_id)
raise exception.InstanceNotFound
if len(provider_nodes)>1:
LOG.error('instance %s are more than one' % instance_id)
raise exception_ex.MultiInstanceConfusion
provider_node = provider_nodes[0]
# 2.get volume
if not provider_volume_id:
# LOG.error('volume %s is not found' % volume_id)
# raise exception.VolumeNotFound
# 1. if provider_image do not exist,, import image first
volume = self.cinder_api.get(context,volume_id)
image_meta = volume.get('volume_image_metadata')
if not image_meta:
LOG.error('Provider Volume NOT Found!')
exception_ex.VolumeNotFoundAtProvider
else:
# 1.1 download qcow2 file from glance
image_uuid = self._get_image_id_from_meta(image_meta)
orig_file_name = 'orig_file.qcow2'
this_conversion_dir = '%s/%s' % (CONF.provider_opts.conversion_dir,volume_id)
orig_file_full_name = '%s/%s' % (this_conversion_dir,orig_file_name)
fileutils.ensure_tree(this_conversion_dir)
self.glance_api.download(context,image_uuid,dest_path=orig_file_full_name)
# 1.2 convert to provider image format
converted_file_format = 'vmdk'
converted_file_name = '%s.%s' % ('converted_file', converted_file_format)
converted_file_path = '%s/%s' % (CONF.provider_opts.conversion_dir,volume_id)
converted_file_full_name = '%s/%s' % (converted_file_path,converted_file_name)
convert_image(orig_file_full_name,
converted_file_full_name,
converted_file_format,
subformat='streamoptimized')
# 1.3 upload to provider_image_id
container = self.storage_adapter.get_container(CONF.provider_opts.storage_tmp_dir)
# self.storage_adapter.upload_object(converted_file_full_name,container,volume_id)
object_name = volume_id
extra = {'content_type': 'text/plain'}
with open(converted_file_full_name,'rb') as f:
obj = self.storage_adapter.upload_object_via_stream(container=container,
object_name=object_name,
iterator=f,
extra=extra)
obj = self.storage_adapter.get_object(container.name,volume_id)
task = self.compute_adapter.create_import_volume_task(CONF.provider_opts.storage_tmp_dir,
volume_id,
'VMDK',
obj.size,
str(volume.get('size')),
volume_loc=provider_node.extra.get('availability'))
while not task.is_completed():
time.sleep(10)
if task.is_cancelled():
LOG.error('import volume fail!')
raise exception_ex.UploadVolumeFailure
task = self.compute_adapter.get_task_info(task)
provider_volume_id = task.volume_id
provider_volumes = self.compute_adapter.list_volumes(ex_volume_ids=[provider_volume_id])
if provider_volumes is None:
LOG.error('Time out when query provider volume %s', provider_volume_id)
raise exception_ex.ProviderRequestTimeOut
if len(provider_volumes)>1:
LOG.error('volume %s are more than one' % volume_id)
raise exception_ex.MultiVolumeConfusion
provider_volume = provider_volumes[0]
if provider_volume.state != StorageVolumeState.AVAILABLE:
LOG.error('volume %s is not available' % volume_id)
raise exception.InvalidVolume
# 3.attach
self.compute_adapter.attach_volume(provider_node,provider_volume,mountpoint)
# 4. map volume to provider volume
self._map_volume_to_provider(context, volume_id, provider_volume)
def _get_provider_volume_id(self, context, volume_id):
provider_volume_id = self.cinder_api.get_volume_metadata_value(context,volume_id,'provider_volume_id')
if not provider_volume_id:
try:
provider_volumes = self.compute_adapter.list_volumes(ex_filters={'tag:hybrid_cloud_volume_id':volume_id})
if len(provider_volumes) == 1:
provider_volume_id = provider_volumes[0].id
self.cinder_api.update_volume_metadata(context,volume_id,{'provider_volume_id':provider_volume_id})
elif len(provider_volumes)>1:
raise exception_ex.MultiImageConfusion
else:
LOG.warning('Volume %s NOT Found at provider cloud' % volume_id)
# raise exception.ImageNotFound
except:
LOG.error('Can NOT get volume %s from provider cloud tag' % volume_id)
return provider_volume_id
def detach_volume(self, connection_info, instance, mountpoint,
encryption=None):
"""Detach the disk attached to the instance."""
# import pdb
# pdb.set_trace()
LOG.info("detach volume")
volume_id = connection_info['data']['volume_id']
# instance_id = instance.uuid
# 1.get volume
# provider_volume_id = self._get_provider_volume_id(volume_id)
# provider_volumes = self.compute_adapter.list_volumes(ex_volume_ids=[provider_volume_id])
# xxx(wangfeng): if provdier did not have tags, we need modify this solution
provider_volumes = self.compute_adapter.list_volumes(ex_filters={'tag:hybrid_cloud_volume_id':volume_id})
if not provider_volumes:
LOG.error('volume %s is not found' % volume_id)
raise exception.VolumeNotFound
if len(provider_volumes)>1:
LOG.error('volume %s are more than one' % volume_id)
raise exception_ex.MultiVolumeConfusion
provider_volume = provider_volumes[0]
if provider_volume['state'] != StorageVolumeState.ATTACHING:
LOG.error('volume %s is not attaching' % volume_id)
# 2.dettach
self.compute_adapter.detach_volume(provider_volume)
# self._map_volume_to_provider(context, volume_id, provider_volume)
pass
def get_available_resource(self, nodename):
"""Retrieve resource info.
This method is called when nova-compute launches, and
as part of a periodic task.
:returns: dictionary describing resources
"""
# xxx(wangfeng):
return {'vcpus': 32,
'memory_mb': 164403,
'local_gb': 5585,
'vcpus_used': 0,
'memory_mb_used': 69005,
'local_gb_used': 3479,
'hypervisor_type': 'aws',
'hypervisor_version': 5005000,
'hypervisor_hostname': nodename,
'cpu_info': '{"model": ["Intel(R) Xeon(R) CPU E5-2670 0 @ 2.60GHz"], \
"vendor": ["Huawei Technologies Co., Ltd."], \
"topology": {"cores": 16, "threads": 32}}',
'supported_instances': jsonutils.dumps(
[["i686", "ec2", "hvm"], ["x86_64", "ec2", "hvm"]]),
'numa_topology': None,
}
def get_available_nodes(self, refresh=False):
"""Returns nodenames of all nodes managed by the compute service.
This method is for multi compute-nodes support. If a driver supports
multi compute-nodes, this method returns a list of nodenames managed
by the service. Otherwise, this method should return
[hypervisor_hostname].
"""
# return "aws-ec2-hypervisor"
return "hybrid_%s" % CONF.provider_opts.region
def get_info(self, instance):
# import pdb
# pdb.set_trace()
state = power_state.NOSTATE
# xxx(wangfeng): it is too slow to connect to aws to get info. so I delete it
# provider_node_id = self._get_provider_node_id(instance)
# nodes = None
# if provider_node_id is not None:
# nodes = self.compute_adapter.list_nodes(ex_node_ids=[provider_node_id])
#
# if not nodes:
# LOG.error('instance %s Not Found' % instance.uuid)
# # raise exception.InstanceNotFound
# elif len(nodes) > 1:
# LOG.error('more than one instance %s Found' % instance.uuid)
# # raise exception_ex.MultiInstanceConfusion
# else:
# node = nodes[0]
# node_status = node.get('state')
# instance_type =node.get('extra').get('instance_type')
return {'state': state,
'max_mem': 0,
'mem': 0,
'num_cpu': 1,
'cpu_time': 0}
def destroy(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None):
"""Destroy VM instance."""
# import pdb
# pdb.set_trace()
LOG.debug('begin destory node %s',instance.uuid)
provider_node_id = self._get_provider_node_id(instance)
if not provider_node_id:
LOG.warning('Instance %s NOT Found at provider cloud' % instance.uuid)
return
nodes = self.compute_adapter.list_nodes(ex_node_ids=[provider_node_id])
if not nodes:
# raise exception.InstanceNotFound
LOG.warning('Instance %s NOT Found at provider cloud' % instance.uuid)
return
if len(nodes) > 1:
LOG.error('More than one instance %s are Found at provider cloud' % instance.uuid)
raise exception_ex.MultiInstanceConfusion
node = nodes[0]
if not destroy_disks:
# dettach volumes
if len(block_device_info) > 0:
provider_volume_ids = []
# get volume id
for device in block_device_info:
volume_id = device[ 'connection_info']['data']['volume_id']
provider_volume_ids.append(self._get_provider_volume_id(context,volume_id))
# get volume in provide cloud
provider_volumes = self.compute_adapter.list_volumes(node=provider_volume_ids)
# detach
for provider_volume in provider_volumes:
self.compute_adapter.detach_volume(provider_volume)
self._map_volume_to_provider(context, volume_id, None)
# destory node
self.compute_adapter.destroy_node(node)
def _get_provider_node_id(self, instance_obj):
# import pdb
# pdb.set_trace()
"""map openstack instance_uuid to ec2 instance id"""
# if instance has metadata:provider_node_id, it's provider node id
provider_node_id = instance_obj.metadata.get('provider_node_id')
# if instance has NOT metadata:provider_node_id, search provider cloud instance's tag
if not provider_node_id:
try:
provider_node = self.compute_adapter.list_nodes(ex_filters={'tag:hybrid_cloud_instance_id':instance_obj.uuid})
if len(provider_node) == 1:
provider_node_id = provider_node[0].id
instance_obj.metadata.set('provider_node_id', provider_node_id)
instance_obj.save()
elif len(provider_node)>1:
raise exception_ex.MultiImageConfusion
else:
# raise exception.ImageNotFound
LOG.warning('Instance %s NOT Found at provider cloud' % instance_obj.uuid)
except:
LOG.warning('Can NOT get instance %s from provider cloud tag' % instance_obj.uuid)
return provider_node_id
def get_volume_connector(self, instance):
pass
def power_off(self, instance, timeout=0, retry_interval=0):
# import pdb
# pdb.set_trace()
LOG.debug('Power off node %s',instance.uuid)
provider_node_id = self._get_provider_node_id(instance)
nodes = self.compute_adapter.list_nodes(ex_node_ids=[provider_node_id])
if not nodes:
raise exception.InstanceNotFound
if len(nodes) > 1:
raise exception_ex.MultiInstanceConfusion
node = nodes[0]
self.compute_adapter.ex_stop_node(node)
def power_on(self, context, instance, network_info,
block_device_info=None):
# import pdb
# pdb.set_trace()
LOG.debug('Power on node %s',instance.uuid)
provider_node_id = self._get_provider_node_id(instance)
nodes = self.compute_adapter.list_nodes(ex_node_ids=[provider_node_id])
if not nodes:
raise exception.InstanceNotFound
if len(nodes) > 1:
raise exception_ex.MultiInstanceConfusion
node = nodes[0]
self.compute_adapter.ex_start_node(node)
def get_instance_macs(self, instance):
LOG.debug('Start to get macs of instance %s', instance)
filters = {'tag:Name': instance['uuid']}
nodes = self.compute_adapter.list_nodes(ex_filters=filters)
instance_macs = dict()
if nodes is not None and len(nodes) == 1:
node = nodes[0]
nw_interfaces = node.extra['network_interfaces']
for nw_interface in nw_interfaces:
subnet_id = nw_interface.extra['subnet_id']
vpc_id = nw_interface.extra['vpc_id']
mac_address = nw_interface.extra['mac_address']
# NOTE(nkapotoxin): Now we make the subnet_id is the provider
# network id
instance_macs[subnet_id] = mac_address
return instance_macs
def qemu_img_info(path):
"""Return an object containing the parsed output from qemu-img info."""
# flag.
if not os.path.exists(path):
msg = (_("Path does not exist %(path)s") % {'path': path})
raise exception.InvalidDiskInfo(reason=msg)
out, err = utils.execute('env', 'LC_ALL=C', 'LANG=C',
'qemu-img', 'info', path)
if not out:
msg = (_("Failed to run qemu-img info on %(path)s : %(error)s") %
{'path': path, 'error': err})
raise exception.InvalidDiskInfo(reason=msg)
return imageutils.QemuImgInfo(out)
def convert_image(source, dest, out_format, run_as_root=False, **kwargs):
"""Convert image to other format."""
cmd = ('qemu-img', 'convert', '-O', out_format, source, dest)
utils.execute(*cmd, run_as_root=run_as_root)
if kwargs.has_key('subformat'):
if kwargs.get('subformat') == 'streamoptimized':
dir_name = os.path.dirname(dest)
base_name = os.path.basename(dest)
ovf_name = '%s/%s.ovf' % (dir_name,base_name)
vmx_name_temp = '%s/vmx/template.vmx' % CONF.provider_opts.conversion_dir
vmx_name = '%s/template.vmx' % dir_name
shutil.copy2(vmx_name_temp,vmx_name)
mk_ovf_cmd = ('ovftool', '-o',vmx_name, ovf_name)
convert_file = '%s/converted-file.vmdk' % dir_name
os.rename(dest, convert_file)
utils.execute(*mk_ovf_cmd, run_as_root=run_as_root)
vmdk_file_name = '%s/%s-disk1.vmdk' % (dir_name,base_name)
fileutils.delete_if_exists(dest)
os.rename(vmdk_file_name, dest)
fileutils.delete_if_exists(ovf_name)
fileutils.delete_if_exists('%s/%s.mf' % (dir_name,base_name))
fileutils.delete_if_exists(convert_file)
| |
# Copyright (c) 2011 OpenStack, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Manage hosts in the current zone.
"""
import UserDict
from nova.compute import task_states
from nova.compute import vm_states
from nova import db
from nova import exception
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova.scheduler import filters
from nova.scheduler import weights
host_manager_opts = [
cfg.MultiStrOpt('scheduler_available_filters',
default=['nova.scheduler.filters.all_filters'],
help='Filter classes available to the scheduler which may '
'be specified more than once. An entry of '
'"nova.scheduler.filters.standard_filters" '
'maps to all filters included with nova.'),
cfg.ListOpt('scheduler_default_filters',
default=[
'RetryFilter',
'AvailabilityZoneFilter',
'RamFilter',
'ComputeFilter',
'ComputeCapabilitiesFilter',
'ImagePropertiesFilter'
],
help='Which filter class names to use for filtering hosts '
'when not specified in the request.'),
cfg.ListOpt('scheduler_weight_classes',
default=['nova.scheduler.weights.all_weighers'],
help='Which weight class names to use for weighing hosts'),
]
CONF = cfg.CONF
CONF.register_opts(host_manager_opts)
LOG = logging.getLogger(__name__)
class ReadOnlyDict(UserDict.IterableUserDict):
"""A read-only dict."""
def __init__(self, source=None):
self.data = {}
self.update(source)
def __setitem__(self, key, item):
raise TypeError
def __delitem__(self, key):
raise TypeError
def clear(self):
raise TypeError
def pop(self, key, *args):
raise TypeError
def popitem(self):
raise TypeError
def update(self, source=None):
if source is None:
return
elif isinstance(source, UserDict.UserDict):
self.data = source.data
elif isinstance(source, type({})):
self.data = source
else:
raise TypeError
class HostState(object):
"""Mutable and immutable information tracked for a host.
This is an attempt to remove the ad-hoc data structures
previously used and lock down access.
"""
def __init__(self, host, node, capabilities=None, service=None):
self.host = host
self.nodename = node
self.update_capabilities(capabilities, service)
# Mutable available resources.
# These will change as resources are virtually "consumed".
self.total_usable_disk_gb = 0
self.disk_mb_used = 0
self.free_ram_mb = 0
self.free_disk_mb = 0
self.vcpus_total = 0
self.vcpus_used = 0
# Valid vm types on this host: 'pv', 'hvm' or 'all'
if 'allowed_vm_type' in self.capabilities:
self.allowed_vm_type = self.capabilities['allowed_vm_type']
else:
self.allowed_vm_type = 'all'
# Additional host information from the compute node stats:
self.vm_states = {}
self.task_states = {}
self.num_instances = 0
self.num_instances_by_project = {}
self.num_instances_by_os_type = {}
self.num_io_ops = 0
# Resource oversubscription values for the compute host:
self.limits = {}
self.updated = None
def update_capabilities(self, capabilities=None, service=None):
# Read-only capability dicts
if capabilities is None:
capabilities = {}
self.capabilities = ReadOnlyDict(capabilities)
if service is None:
service = {}
self.service = ReadOnlyDict(service)
def update_from_compute_node(self, compute):
"""Update information about a host from its compute_node info."""
if (self.updated and compute['updated_at']
and self.updated > compute['updated_at']):
return
all_ram_mb = compute['memory_mb']
# Assume virtual size is all consumed by instances if use qcow2 disk.
least = compute.get('disk_available_least')
free_disk_mb = least if least is not None else compute['free_disk_gb']
free_disk_mb *= 1024
self.disk_mb_used = compute['local_gb_used'] * 1024
#NOTE(jogo) free_ram_mb can be negative
self.free_ram_mb = compute['free_ram_mb']
self.total_usable_ram_mb = all_ram_mb
self.total_usable_disk_gb = compute['local_gb']
self.free_disk_mb = free_disk_mb
self.vcpus_total = compute['vcpus']
self.vcpus_used = compute['vcpus_used']
self.updated = compute['updated_at']
stats = compute.get('stats', [])
statmap = self._statmap(stats)
# Track number of instances on host
self.num_instances = int(statmap.get('num_instances', 0))
# Track number of instances by project_id
project_id_keys = [k for k in statmap.keys() if
k.startswith("num_proj_")]
for key in project_id_keys:
project_id = key[9:]
self.num_instances_by_project[project_id] = int(statmap[key])
# Track number of instances in certain vm_states
vm_state_keys = [k for k in statmap.keys() if k.startswith("num_vm_")]
for key in vm_state_keys:
vm_state = key[7:]
self.vm_states[vm_state] = int(statmap[key])
# Track number of instances in certain task_states
task_state_keys = [k for k in statmap.keys() if
k.startswith("num_task_")]
for key in task_state_keys:
task_state = key[9:]
self.task_states[task_state] = int(statmap[key])
# Track number of instances by host_type
os_keys = [k for k in statmap.keys() if k.startswith("num_os_type_")]
for key in os_keys:
os = key[12:]
self.num_instances_by_os_type[os] = int(statmap[key])
self.num_io_ops = int(statmap.get('io_workload', 0))
def consume_from_instance(self, instance):
"""Incrementally update host state from an instance"""
disk_mb = (instance['root_gb'] + instance['ephemeral_gb']) * 1024
ram_mb = instance['memory_mb']
vcpus = instance['vcpus']
self.free_ram_mb -= ram_mb
self.free_disk_mb -= disk_mb
self.vcpus_used += vcpus
self.updated = timeutils.utcnow()
# Track number of instances on host
self.num_instances += 1
# Track number of instances by project_id
project_id = instance.get('project_id')
if project_id not in self.num_instances_by_project:
self.num_instances_by_project[project_id] = 0
self.num_instances_by_project[project_id] += 1
# Track number of instances in certain vm_states
vm_state = instance.get('vm_state', vm_states.BUILDING)
if vm_state not in self.vm_states:
self.vm_states[vm_state] = 0
self.vm_states[vm_state] += 1
# Track number of instances in certain task_states
task_state = instance.get('task_state')
if task_state not in self.task_states:
self.task_states[task_state] = 0
self.task_states[task_state] += 1
# Track number of instances by host_type
os_type = instance.get('os_type')
if os_type not in self.num_instances_by_os_type:
self.num_instances_by_os_type[os_type] = 0
self.num_instances_by_os_type[os_type] += 1
vm_state = instance.get('vm_state', vm_states.BUILDING)
task_state = instance.get('task_state')
if vm_state == vm_states.BUILDING or task_state in [
task_states.RESIZE_MIGRATING, task_states.REBUILDING,
task_states.RESIZE_PREP, task_states.IMAGE_SNAPSHOT,
task_states.IMAGE_BACKUP]:
self.num_io_ops += 1
def _statmap(self, stats):
return dict((st['key'], st['value']) for st in stats)
def __repr__(self):
return ("(%s, %s) ram:%s disk:%s io_ops:%s instances:%s vm_type:%s" %
(self.host, self.nodename, self.free_ram_mb, self.free_disk_mb,
self.num_io_ops, self.num_instances, self.allowed_vm_type))
class HostManager(object):
"""Base HostManager class."""
# Can be overridden in a subclass
host_state_cls = HostState
def __init__(self):
# { (host, hypervisor_hostname) : { <service> : { cap k : v }}}
self.service_states = {}
self.host_state_map = {}
self.filter_handler = filters.HostFilterHandler()
self.filter_classes = self.filter_handler.get_matching_classes(
CONF.scheduler_available_filters)
self.weight_handler = weights.HostWeightHandler()
self.weight_classes = self.weight_handler.get_matching_classes(
CONF.scheduler_weight_classes)
def _choose_host_filters(self, filter_cls_names):
"""Since the caller may specify which filters to use we need
to have an authoritative list of what is permissible. This
function checks the filter names against a predefined set
of acceptable filters.
"""
if filter_cls_names is None:
filter_cls_names = CONF.scheduler_default_filters
if not isinstance(filter_cls_names, (list, tuple)):
filter_cls_names = [filter_cls_names]
good_filters = []
bad_filters = []
for filter_name in filter_cls_names:
found_class = False
for cls in self.filter_classes:
if cls.__name__ == filter_name:
good_filters.append(cls)
found_class = True
break
if not found_class:
bad_filters.append(filter_name)
if bad_filters:
msg = ", ".join(bad_filters)
raise exception.SchedulerHostFilterNotFound(filter_name=msg)
return good_filters
def get_filtered_hosts(self, hosts, filter_properties,
filter_class_names=None):
"""Filter hosts and return only ones passing all filters"""
def _strip_ignore_hosts(host_map, hosts_to_ignore):
ignored_hosts = []
for host in hosts_to_ignore:
if host in host_map:
del host_map[host]
ignored_hosts.append(host)
ignored_hosts_str = ', '.join(ignored_hosts)
msg = _('Host filter ignoring hosts: %(ignored_hosts_str)s')
LOG.debug(msg, locals())
def _match_forced_hosts(host_map, hosts_to_force):
for host in host_map.keys():
if host not in hosts_to_force:
del host_map[host]
if not host_map:
forced_hosts_str = ', '.join(hosts_to_force)
msg = _("No hosts matched due to not matching 'force_hosts'"
"value of '%(forced_hosts_str)s'")
LOG.debug(msg, locals())
return
forced_hosts_str = ', '.join(host_map.iterkeys())
msg = _('Host filter forcing available hosts to '
'%(forced_hosts_str)s')
LOG.debug(msg, locals())
filter_classes = self._choose_host_filters(filter_class_names)
ignore_hosts = filter_properties.get('ignore_hosts', [])
force_hosts = filter_properties.get('force_hosts', [])
if ignore_hosts or force_hosts:
name_to_cls_map = dict([(x.host, x) for x in hosts])
if ignore_hosts:
_strip_ignore_hosts(name_to_cls_map, ignore_hosts)
if force_hosts:
_match_forced_hosts(name_to_cls_map, force_hosts)
if not name_to_cls_map:
return []
hosts = name_to_cls_map.itervalues()
return self.filter_handler.get_filtered_objects(filter_classes,
hosts, filter_properties)
def get_weighed_hosts(self, hosts, weight_properties):
"""Weigh the hosts"""
return self.weight_handler.get_weighed_objects(self.weight_classes,
hosts, weight_properties)
def update_service_capabilities(self, service_name, host, capabilities):
"""Update the per-service capabilities based on this notification."""
if service_name != 'compute':
LOG.debug(_('Ignoring %(service_name)s service update '
'from %(host)s'), locals())
return
state_key = (host, capabilities.get('hypervisor_hostname'))
LOG.debug(_("Received %(service_name)s service update from "
"%(state_key)s.") % locals())
# Copy the capabilities, so we don't modify the original dict
capab_copy = dict(capabilities)
capab_copy["timestamp"] = timeutils.utcnow() # Reported time
self.service_states[state_key] = capab_copy
def get_all_host_states(self, context):
"""Returns a list of HostStates that represents all the hosts
the HostManager knows about. Also, each of the consumable resources
in HostState are pre-populated and adjusted based on data in the db.
"""
# Get resource usage across the available compute nodes:
compute_nodes = db.compute_node_get_all(context)
for compute in compute_nodes:
service = compute['service']
if not service:
LOG.warn(_("No service for compute ID %s") % compute['id'])
continue
host = service['host']
node = compute.get('hypervisor_hostname')
state_key = (host, node)
capabilities = self.service_states.get(state_key, None)
host_state = self.host_state_map.get(state_key)
if host_state:
host_state.update_capabilities(capabilities,
dict(service.iteritems()))
else:
host_state = self.host_state_cls(host, node,
capabilities=capabilities,
service=dict(service.iteritems()))
self.host_state_map[state_key] = host_state
host_state.update_from_compute_node(compute)
return self.host_state_map.itervalues()
| |
#!/usr/bin/env python
# copyright 2013 UNL Holland Computing Center
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import re
from pylab import *
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from optparse import OptionParser
from time import time
import HCCPlot,HCCRootParser, HCCTool1
import operator
import os.path
import ROOT
def main():
'''main options for program'''
#command line options
parser = OptionParser()
parser.add_option('--file', '-f', help='ROOT input file',dest='file')
parser.add_option('--tree', '-t', help='Tree to parse, Default Event', dest='treeName')
parser.add_option('--sizeCutoff', '-s',dest='read_cutoff', help='Read size cutoff, Default 100')
parser.add_option('--output_directory','-o', dest = 'output', help='The name of the output file')
parser.add_option('--regex', '-r', dest='fileNameRegex', help='Regular expression to filter files')
parser.add_option('--window_size', '-w', dest='window_size', help='Size of the displayed data graphic in mb Default:20')
parser.add_option('--begin_read', '-b', dest='beginning', help='Beeginning read to display in the file graphic')
parser.add_option('--raw_only', dest='raw_only', help='Create only the raw read graphics and not overlayed file usage', action='store_true')
parser.add_option('--even_check', '-c', dest='check_fun_file', help='File to import that contains checkEvent method to validate any event')
parser.add_option('--top_n','-n', dest = 'topN' , help='Color only the top N branches in the file')
parser.add_option('--prefix','-p', dest = 'prefix' , help='Prefix to append to files to search for file on system')
(options, args)= parser.parse_args()
#
# Handle command line options
#
#
#If now file we have an error
if options.file is None:
print "File root file is required"
parser.print_help()
exit(0)
if not options.file.upper().endswith('.ROOT'):
print 'Root file required'
exit()
try:
with open(options.file) as f: pass
except IOError as e:
print 'file does not exits'
exit()
#require the file name and tree
if options.treeName is None:
print "Using default tree of Events"
treeName = 'Events'
else:
treeName = options.treeName
#output file name
if options.output != None:
outDir = options.output + '/'
else:
outDir = './'
#regular expression
if options.fileNameRegex != None:
frex = re.compile(options.fileNameRegex)
else:
frex = None
#size expression
if options.window_size != None:
window_size = int(options.window_size)
else:
window_size = 20
if options.read_cutoff != None:
size_cutoff = int(options.read_cutoff)
else:
size_cutoff = 50
if options.beginning:
begin = int(options.beginning)
else:
begin = 0
if options.raw_only:
raw_only = True
else:
raw_only = False
if options.check_fun_file:
check_fun_file = __import__(options.check_fun_file)
else:
check_fun_file = None
topN = None
#color only top ten files
if options.topN:
topN = int(options.topN)
if topN > 30:
topN = 30
else:
topN = 30
#prefix for the file name
prefix = '/mnt/hadoop/user/uscms01/pnfs/unl.edu/data4/cms'
if options.prefix:
prefix = options.prefix
#
# End Command Line Stuff
#
#Load the needed c++ classes into here
ROOT.gROOT.ProcessLine("gErrorIgnoreLevel = 2500;")
if os.path.exists("XrdFar"):
ROOT.gSystem.Load("XrdFar/XrdFar")
tf = ROOT.TFile(options.file)
if not os.path.exists("XrdFar"):
tf.MakeProject("XrdFar", "*", "new++")
#Loop through all the events within the file
for event in tf.XrdFar:
if fileShouldBeRead(event, frex, size_cutoff, check_fun_file):
print '\n\n'
print 'Starting on File: %s' % event.F.mName
#get a bunch of needed info now that it has passed
offsets = list(event.I.mOffsetVec)
lengths = list(event.I.mLengthVec)
name = event.F.mName
uname = event.U.mServerUsername
server = event.S.mHost
readSize = event.F.mRTotalMB
data = []
idx = name.rfind('/')
shortName = name[idx+1:]
idx = name.rfind('/',0,idx)
#Alsor removing /store from the front of them
title = name[7:idx]
file = prefix + name
#trasform the data
for i in range(len(offsets)):
val = transformData(offsets[i], lengths[i], i)
data = data + val
arr = createArray(data)
points = markPoints(data)
usage, limits = fixFileUsage(arr,size = window_size, start = begin)
try:
print '\tCreating Usage Graph'
HCCPlot.plotUsage(arr, name = title, outname = outDir + shortName+'_rawreads', points = points, subtitle = uname )
except Exception, e:
print "Error Plotting Layout: %s" % str(e)
if not raw_only:
t1 = time()
print '\tParsing layout'
fileLayout = HCCRootParser.parseFileTool2(file, treeName, None, False, topN)
t2 = time();
print '\t\tParse Time: ' + str(t2-t1)
tenBig= HCCTool1.getTenBig(fileLayout, topN)
fileLayout = HCCPlot.transformByteToMB(fileLayout)
fileLayout = cutToRange(fileLayout, limits)
colorMap = HCCTool1.createColorMap(fileLayout, False, tenBig)
try:
print "\tPlotting layout overlay"
HCCPlot.plotFileLayoutTool2(fileLayout, False,outDir + shortName + '.png', colorMap, tenBig,limits = limits, title = title, fileUsage = usage)
except Exception, e:
print "Error Plotting Layout: %s" % str(e)
def fileShouldBeRead(event, frex, min_size, check_fun_file):
'''Functions to check if the file should be read and parsed by the program'''
its_a_go = True
#If they have a module with desired import and stuff then run the check_event method
if check_fun_file != None:
its_a_go = its_a_go and check_fun_file.check_event(event)
#check regex
if frex != None:
res = frex.search(event.F.mName)
val = res != None
its_a_go = its_a_go and val
#check min size
its_a_go = its_a_go and event.F.mRTotalMB > min_size
#check the size
its_a_go = its_a_go and len(list(event.I.mLengthVec)) > 0
return its_a_go
def cutToRange(fileLayout, limits):
'''Cut the file usage data array to the given range '''
newData = []
for p in fileLayout:
if limits[0]/2 <= p[0] <= limits[1]/2:
newData.append(p)
return newData
def fixFileUsage(data, size = 20, start = 0):
'''Fix up the file usage by cutting info to a certain size and start read # within the file '''
count = -1
found = False
reached = False
idx = 0
prevEnc = []
#loop through until we have found enough points and reached a certain count
while not found or not reached:
if idx == len(data):
print 'Index out of bounds. Will reset to 0'
idx = 0
prevEnc = []
count = 0
start = 0
break
for j in range(len(data[idx])):
if data[idx][j] != 0:
found = True
if data[idx][j] not in prevEnc:
count += 1
prevEnc.append(data[idx][j])
if count == start:
reached = True
break
#need to insure we don't accidnetly increment the index
if not found or not reached :
idx = idx + 1
#now cut the data down to size but leave whole array intact for drawing purposes
start =idx
offset = size * 2
end = start + offset
if end > len(data):
end = len(data)
start = end - offset
newData = np.zeros(( len(data), len(data[0])) )
for i in range(start,end):
for j in range(len(data[i])):
newData[i][j] =data[i][j]
return [ma.masked_equal(newData,0) ,[start,end]]
def markPoints(data):
'''Mark all points where a back track was made in the file read data'''
maxEncountered = 0
sortedData = sorted(data, key=lambda point: point[3])
points = []
for i in sortedData:
if i[0] < maxEncountered:
points.append([i[1],i[0]*2])
maxEncountered = i[0]
if len(points) < 1:
return None
else:
return points
def createArray(data):
'''Create the data array out of what is there now'''
sortedData = sorted(data, key=lambda tup: tup[0])
arr = np.zeros(((sortedData[-1][0]+1)*2,pow(2,10)))
for p in sortedData:
arr[p[0]*2][p[1]:p[2]] = p[3]
return arr
def transformData(offset, length, index, scale=None):
'''transform the data that is obtained parsing the root file into the
data that can be plotted using the two methods in this class'''
if scale == None:
scale = pow(2,20)
line = offset / scale
loc = offset % scale
end = loc + length
if end > scale:
values = []
values.append([line, loc/1024, scale/1024,index])
length = length - offset
while length > 0:
line = line + 1
loc = 0
end = 0
if(length < scale):
end = length
else:
end = scale
length = length - scale
values.append([line,loc/1024,end/1024, index])
return values
else:
return [[line,loc/1024,end/1024,index]]
if __name__ == '__main__':
main()
| |
# vi: ts=8 sts=4 sw=4 et
#
# session.py: session management
#
# This file is part of Draco2. Draco2 is free software and is made available
# under the MIT license. Consult the file "LICENSE" that is distributed
# together with this file for the exact licensing terms.
#
# Draco2 is copyright (c) 1999-2007 by the Draco2 authors. See the file
# "AUTHORS" for a complete overview.
#
# $Revision: 1187 $
import sys
import datetime
import logging
from draco2.util.http import Cookie
from draco2.util.serialize import loads, dumps
from draco2.util.namespace import DictNamespace
from draco2.core.model import Session as SessionObject
from draco2.session.exception import SessionError
from draco2.session.namespace import SessionNamespace
from draco2.session.util import dump_sessionid, generate_sessionid
class SessionInterface(object):
"""The session management interface."""
@classmethod
def _create(cls, api):
"""Factory method."""
raise NotImplementedError
def load(self, sessionid):
"""Load an exisiting session."""
raise NotImplementedError
def new(self):
"""Create a new session."""
raise NotImplementedError
def destroy(self):
"""Destroy the current session."""
raise NotImplementedError
def sessionid(self):
"""Return the session ID."""
raise NotImplementedError
def set_timeout(self, timeout):
"""Set the default session timeout. """
raise NotImplementedError
def create_date(self):
"""Return the creation date of this session."""
raise NotImplementedError
def expire_date(self):
"""Return the expiration date of this session."""
raise NotImplementedError
def enter_subsession(self):
"""Push a new subsession on the subsession stack."""
raise NotImplementedError
def leave_subsession(self):
"""Pop a subsession."""
raise NotImplementedError
def clear_subsessions(self):
"""Clear the subsesion stack."""
raise NotImplementedError
def namespace(self, scope=None, subsession=True):
"""Return the session namespace with scope `scope'."""
raise NotImplementedError
ns = namespace
def commit(self):
"""Commit the session (flushes namespaces)."""
raise NotImplementedError
class Session(object):
"""Session management object.
This class represents a single HTTP session.
"""
def __init__(self, request, response, transaction,
security=None, events=None):
"""Constructor."""
self.m_session = None
self.m_sessionid = None
self.m_request = request
self.m_response = response
self.m_transaction = transaction
self.m_security = security
self.m_events = events
self.m_namespaces = {}
self.set_timeout(7200) # two hours
def load(self, sessionid):
"""Load an existing session."""
if not sessionid[0]:
return False
# Lock the session. Every session is updated at each request.
# Without locking, this would lead to serialization errors (and thus
# transaction retries) with every concurrent request. Therefore
# better prevent concurrent request (this is the hypothesis, at
# least).
result = self.m_transaction.select(SessionObject, 'id=%s',
(sessionid[0],), lock=True)
if not result:
return False
session = result[0]
if not self._check(session):
return False
self._update(session)
self.m_session = session
self.m_sessionid = sessionid
return True
def _check(self, session):
"""Check session object `session'."""
logger = logging.getLogger('draco2.session.session')
# Once a session has been logged in we don't allow it to be used
# without credentials anymore. The theory is that these sessions
# may contain sensitive data and therefore we want as least as
# much protection as the security context provides us.
if self.m_security and session['principal'] and \
session['principal'] != self.m_security.principal():
logger.info('Session principal mismatch.')
self.m_transaction.delete(session)
return False
now = datetime.datetime.now()
if session['expire_date'] <= now:
logger.warning('Client provided expired session id.')
return False
return True
def _update(self, session):
"""Update time stamps in session object `session'."""
updates = {} # Batch updates
updates['principal'] = self.m_security.principal()
now = datetime.datetime.now()
updates['last_used'] = now
expire_date = now + datetime.timedelta(seconds=self.m_timeout)
updates['expire_date'] = expire_date
session.update(updates)
cookie = Cookie('draco-session', session['id'],
expires=expire_date, path='/')
self.m_response.set_cookie(cookie)
def new(self):
"""Create a new session."""
session = SessionObject()
sessionid = generate_sessionid()
session['id'] = sessionid[0]
session['last_subsession'] = sessionid[1]
session['principal'] = self.m_security.principal()
now = datetime.datetime.now()
session['create_date'] = now
session['last_used'] = now
expire_date = now + datetime.timedelta(seconds=self.m_timeout)
session['expire_date'] = expire_date
self.m_transaction.insert(session)
value = dump_sessionid((sessionid[0], None))
cookie = Cookie('draco-session', value, expires=expire_date, path='/')
self.m_response.set_cookie(cookie)
self.m_session = session
self.m_sessionid = sessionid
def destroy(self):
"""Destroy the current session."""
if not self.m_session:
return
self.m_transaction.delete(self.m_session)
expired = datetime.datetime(1970, 1, 1)
cookie = Cookie('draco-session', '', expires=expired, path='/')
self.m_response.set_cookie(cookie)
self.m_session = None
self.m_sessionid = None
def sessionid(self):
"""Return the session ID."""
return self.m_sessionid
def set_timeout(self, timeout):
"""Set the default session timeout. """
self.m_timeout = timeout
def create_date(self):
"""Return the creation date of this session."""
if not self.m_session:
raise SessionError, 'Session not initialized.'
return self.m_session['create_date']
def expire_date(self):
"""Return the expiration date of this session."""
if not self.m_session:
raise SessionError, 'Session not initialized.'
return self.m_session['expire_date']
def _next_subsession(self):
"""Allocate a new subsession."""
subsession = self.m_session['last_subsession'] + 1
subsession = subsession % sys.maxint
self.m_session['last_subsession'] = subsession
return subsession
def enter_subsession(self, name=None):
"""Push a new subsession on the subsession stack."""
if not self.m_session:
raise SessionError, 'Session not initialized.'
parent = self.m_sessionid[1]
child = self._next_subsession()
self.m_sessionid = (self.m_sessionid[0], child)
ns = self.namespace('__draco2__')
ns['parent'] = parent
ns['name'] = name
enter = enter_subsession
def leave_subsession(self):
"""Pop a subsession."""
if not self.m_session:
raise SessionError, 'Session not initialized.'
ns = self.namespace('__draco2__')
try:
parent = ns['parent']
except KeyError:
return
self.m_sessionid = (self.m_sessionid[0], parent)
leave = leave_subsession
def clear_subsessions(self):
"""Clear the subsesion stack."""
if not self.m_session:
raise SessionError, 'Session not initialized.'
self.m_sessionid = (self.m_sessionid[0], 0)
clear = clear_subsessions
def namespace(self, scope=None, subsession=True):
"""Return the session namespace with scope `scope'."""
if not self.m_session:
raise SessionError, 'Session not initialized.'
if callable(scope):
scope = self.m_response.patch_uri(scope)
elif scope is None:
scope = '__default__'
if subsession:
scope = '%s/%d' % (scope, self.m_sessionid[1])
try:
ns = self.m_namespaces[scope]
except KeyError:
ns = SessionNamespace(self.m_session['id'], scope, self.m_transaction)
self.m_namespaces[scope] = ns
return ns
ns = namespace
def globals(self, scope=None):
"""Return a global namespace with scope `scope'."""
return self.namespace(scope, False)
def locals(self, scope=None):
"""Return a local namespace with scope `scope'."""
return self.namespace(scope, True)
def commit(self):
"""Commit the session (flushes namespaces)."""
if not self.m_session:
raise SessionError, 'Session not initialized.'
for ns in self.m_namespaces.values():
ns.flush()
self.m_namespaces.clear()
class DummySession(SessionInterface):
"""A dummy session.
This session is allocated to web robots. Using a dummy session
instead of setting api.session to "None" allows for the same code
path for web robots and normal clients.
"""
def __init__(self):
"""Constructor."""
self.m_namespaces = {}
self.set_timeout(7200)
@classmethod
def _create(cls, api):
"""Factory method."""
session = cls()
session.new()
return session
def load(self, sessionid):
"""Load an exisiting session."""
return False
def new(self):
"""Create a new session."""
self.m_subsession = 0
self.m_create_date = datetime.datetime.now()
self.m_expire_date = self.m_create_date
self.m_expire_date += datetime.timedelta(self.m_timeout)
def destroy(self):
"""Destroy the current session."""
def sessionid(self):
"""Return the session ID."""
return None
def set_timeout(self, timeout):
"""Set the default session timeout. """
self.m_timeout = timeout
def create_date(self):
"""Return the creation date of this session."""
return self.m_create_date
def expire_date(self):
"""Return the expiration date of this session."""
return self.m_expire_date
def enter_subsession(self, name=None):
"""Push a new subsession on the subsession stack."""
self.m_subsession += 1
enter = enter_subsession
def leave_subsession(self):
"""Pop a subsession."""
if self.m_subsession > 0:
self.m_subsession -= 1
leave = leave_subsession
def clear_subsessions(self):
"""Clear the subsesion stack."""
self.m_subsession = 0
clear = clear_subsessions
def namespace(self, scope=None, subsession=True):
"""Return the session namespace with scope `scope'."""
if callable(scope):
scope = self.m_response.resolve_uri(scope)
elif scope is None:
scope = '__default__'
if subsession:
scope = '%s/%d' % (scope, self.m_subsession)
try:
ns = self.m_namespaces[scope]
except KeyError:
ns = DictNamespace()
self.m_namespaces[scope] = ns
return ns
ns = namespace
def globals(self, scope=None):
"""Return a global namespace with scope `scope'."""
return self.namespace(scope, False)
def locals(self, scope=None):
"""Return a local namespace with scope `scope'."""
return self.namespace(scope, True)
def commit(self):
"""Commit the session (flushes namespaces)."""
| |
# -*- coding: utf-8 -*-
"""
celery.datastructures
~~~~~~~~~~~~~~~~~~~~~
Custom types and data structures.
:copyright: (c) 2009 - 2012 by Ask Solem.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from __future__ import with_statement
import sys
import time
from collections import defaultdict
from itertools import chain
from billiard.einfo import ExceptionInfo # noqa
from kombu.utils.limits import TokenBucket # noqa
from .utils.functional import LRUCache, first, uniq # noqa
class CycleError(Exception):
"""A cycle was detected in an acyclic graph."""
class DependencyGraph(object):
"""A directed acyclic graph of objects and their dependencies.
Supports a robust topological sort
to detect the order in which they must be handled.
Takes an optional iterator of ``(obj, dependencies)``
tuples to build the graph from.
.. warning::
Does not support cycle detection.
"""
def __init__(self, it=None):
self.adjacent = {}
if it is not None:
self.update(it)
def add_arc(self, obj):
"""Add an object to the graph."""
self.adjacent.setdefault(obj, [])
def add_edge(self, A, B):
"""Add an edge from object ``A`` to object ``B``
(``A`` depends on ``B``)."""
self[A].append(B)
def topsort(self):
"""Sort the graph topologically.
:returns: a list of objects in the order
in which they must be handled.
"""
graph = DependencyGraph()
components = self._tarjan72()
NC = dict((node, component)
for component in components
for node in component)
for component in components:
graph.add_arc(component)
for node in self:
node_c = NC[node]
for successor in self[node]:
successor_c = NC[successor]
if node_c != successor_c:
graph.add_edge(node_c, successor_c)
return [t[0] for t in graph._khan62()]
def valency_of(self, obj):
"""Returns the velency (degree) of a vertex in the graph."""
try:
l = [len(self[obj])]
except KeyError:
return 0
for node in self[obj]:
l.append(self.valency_of(node))
return sum(l)
def update(self, it):
"""Update the graph with data from a list
of ``(obj, dependencies)`` tuples."""
tups = list(it)
for obj, _ in tups:
self.add_arc(obj)
for obj, deps in tups:
for dep in deps:
self.add_edge(obj, dep)
def edges(self):
"""Returns generator that yields for all edges in the graph."""
return (obj for obj, adj in self.iteritems() if adj)
def _khan62(self):
"""Khans simple topological sort algorithm from '62
See http://en.wikipedia.org/wiki/Topological_sorting
"""
count = defaultdict(lambda: 0)
result = []
for node in self:
for successor in self[node]:
count[successor] += 1
ready = [node for node in self if not count[node]]
while ready:
node = ready.pop()
result.append(node)
for successor in self[node]:
count[successor] -= 1
if count[successor] == 0:
ready.append(successor)
result.reverse()
return result
def _tarjan72(self):
"""Tarjan's algorithm to find strongly connected components.
See http://bit.ly/vIMv3h.
"""
result, stack, low = [], [], {}
def visit(node):
if node in low:
return
num = len(low)
low[node] = num
stack_pos = len(stack)
stack.append(node)
for successor in self[node]:
visit(successor)
low[node] = min(low[node], low[successor])
if num == low[node]:
component = tuple(stack[stack_pos:])
stack[stack_pos:] = []
result.append(component)
for item in component:
low[item] = len(self)
for node in self:
visit(node)
return result
def to_dot(self, fh, ws=" " * 4):
"""Convert the graph to DOT format.
:param fh: A file, or a file-like object to write the graph to.
"""
fh.write("digraph dependencies {\n")
for obj, adjacent in self.iteritems():
if not adjacent:
fh.write(ws + '"%s"\n' % (obj, ))
for req in adjacent:
fh.write(ws + '"%s" -> "%s"\n' % (obj, req))
fh.write("}\n")
def __iter__(self):
return self.adjacent.iterkeys()
def __getitem__(self, node):
return self.adjacent[node]
def __len__(self):
return len(self.adjacent)
def __contains__(self, obj):
return obj in self.adjacent
def _iterate_items(self):
return self.adjacent.iteritems()
items = iteritems = _iterate_items
def __repr__(self):
return '\n'.join(self.repr_node(N) for N in self)
def repr_node(self, obj, level=1):
output = ["%s(%s)" % (obj, self.valency_of(obj))]
if obj in self:
for other in self[obj]:
d = "%s(%s)" % (other, self.valency_of(other))
output.append(' ' * level + d)
output.extend(self.repr_node(other, level + 1).split('\n')[1:])
return '\n'.join(output)
class AttributeDictMixin(object):
"""Adds attribute access to mappings.
`d.key -> d[key]`
"""
def __getattr__(self, k):
"""`d.key -> d[key]`"""
try:
return self[k]
except KeyError:
raise AttributeError(
"'%s' object has no attribute '%s'" % (type(self).__name__, k))
def __setattr__(self, key, value):
"""`d[key] = value -> d.key = value`"""
self[key] = value
class AttributeDict(dict, AttributeDictMixin):
"""Dict subclass with attribute access."""
pass
class DictAttribute(object):
"""Dict interface to attributes.
`obj[k] -> obj.k`
"""
def __init__(self, obj):
self.obj = obj
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def setdefault(self, key, default):
try:
return self[key]
except KeyError:
self[key] = default
return default
def __getitem__(self, key):
try:
return getattr(self.obj, key)
except AttributeError:
raise KeyError(key)
def __setitem__(self, key, value):
setattr(self.obj, key, value)
def __contains__(self, key):
return hasattr(self.obj, key)
def _iterate_keys(self):
return vars(self.obj).iterkeys()
iterkeys = _iterate_keys
def __iter__(self):
return self.iterkeys()
def _iterate_items(self):
return vars(self.obj).iteritems()
iteritems = _iterate_items
if sys.version_info[0] == 3: # pragma: no cover
items = _iterate_items
keys = _iterate_keys
else:
def keys(self):
return list(self._iterate_keys())
def items(self):
return list(self._iterate_items())
class ConfigurationView(AttributeDictMixin):
"""A view over an applications configuration dicts.
If the key does not exist in ``changes``, the ``defaults`` dict
is consulted.
:param changes: Dict containing changes to the configuration.
:param defaults: Dict containing the default configuration.
"""
changes = None
defaults = None
_order = None
def __init__(self, changes, defaults):
self.__dict__.update(changes=changes, defaults=defaults,
_order=[changes] + defaults)
def __getitem__(self, key):
for d in self._order:
try:
return d[key]
except KeyError:
pass
raise KeyError(key)
def __setitem__(self, key, value):
self.changes[key] = value
def first(self, *keys):
return first(None, (self.get(key) for key in keys))
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def setdefault(self, key, default):
try:
return self[key]
except KeyError:
self[key] = default
return default
def update(self, *args, **kwargs):
return self.changes.update(*args, **kwargs)
def __contains__(self, key):
for d in self._order:
if key in d:
return True
return False
def __repr__(self):
return repr(dict(self.iteritems()))
def __iter__(self):
return self.iterkeys()
def _iter(self, op):
# defaults must be first in the stream, so values in
# changes takes precedence.
return chain(*[op(d) for d in reversed(self._order)])
def _iterate_keys(self):
return uniq(self._iter(lambda d: d.iterkeys()))
iterkeys = _iterate_keys
def _iterate_items(self):
return ((key, self[key]) for key in self)
iteritems = _iterate_items
def _iterate_values(self):
return (self[key] for key in self)
itervalues = _iterate_values
def keys(self):
return list(self._iterate_keys())
def items(self):
return list(self._iterate_items())
def values(self):
return list(self._iterate_values())
class LimitedSet(object):
"""Kind-of Set with limitations.
Good for when you need to test for membership (`a in set`),
but the list might become to big, so you want to limit it so it doesn't
consume too much resources.
:keyword maxlen: Maximum number of members before we start
evicting expired members.
:keyword expires: Time in seconds, before a membership expires.
"""
__slots__ = ("maxlen", "expires", "_data", "__len__")
def __init__(self, maxlen=None, expires=None):
self.maxlen = maxlen
self.expires = expires
self._data = {}
self.__len__ = self._data.__len__
def add(self, value):
"""Add a new member."""
self._expire_item()
self._data[value] = time.time()
def clear(self):
"""Remove all members"""
self._data.clear()
def pop_value(self, value):
"""Remove membership by finding value."""
self._data.pop(value, None)
def _expire_item(self):
"""Hunt down and remove an expired item."""
while 1:
if self.maxlen and len(self) >= self.maxlen:
value, when = self.first
if not self.expires or time.time() > when + self.expires:
try:
self.pop_value(value)
except TypeError: # pragma: no cover
continue
break
def __contains__(self, value):
return value in self._data
def update(self, other):
if isinstance(other, self.__class__):
self._data.update(other._data)
else:
for obj in other:
self.add(obj)
def as_dict(self):
return self._data
def __iter__(self):
return iter(self._data)
def __repr__(self):
return "LimitedSet(%r)" % (self._data.keys(), )
@property
def chronologically(self):
return sorted(self._data.items(), key=lambda (value, when): when)
@property
def first(self):
"""Get the oldest member."""
return self.chronologically[0]
| |
# Copyright (c) 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import importlib
import io
import operator
import pkgutil
import traceback
import types
from docutils import nodes
from docutils.parsers import rst
from docutils import utils
TAG = ':yaql:'
def _get_modules_names(package):
"""Get names of modules in package"""
return sorted(
map(operator.itemgetter(1),
pkgutil.walk_packages(package.__path__,
'{0}.'.format(package.__name__))))
def _get_functions_names(module):
"""Get names of the functions in the current module"""
return [name for name in dir(module) if
isinstance(getattr(module, name, None), types.FunctionType)]
def write_method_doc(method, output):
"""Construct method documentation from a docstring.
1) Strip TAG
2) Embolden function name
3) Add :callAs: after :signature:
"""
msg = "Error: function {0} has no valid YAQL documentation."
if method.__doc__:
doc = method.__doc__
try:
# strip TAG
doc = doc[doc.index(TAG) + len(TAG):]
# embolden function name
line_break = doc.index('\n')
yaql_name = doc[:line_break]
(emit_header, is_overload) = yield yaql_name
if emit_header:
output.write(yaql_name)
output.write('\n')
output.write('~' * len(yaql_name))
output.write('\n')
doc = doc[line_break:]
# add :callAs: parameter
try:
signature_index = doc.index(':signature:')
position = doc.index(' :', signature_index +
len(':signature:'))
if hasattr(method, '__yaql_function__'):
if (method.__yaql_function__.name and
'operator' in method.__yaql_function__.name):
call_as = 'operator'
elif (method.__yaql_function__.is_function and
method.__yaql_function__.is_method):
call_as = 'function or method'
elif method.__yaql_function__.is_method:
call_as = 'method'
else:
call_as = 'function'
else:
call_as = 'function'
call_as_str = ' :callAs: {0}\n'.format(call_as)
text = doc[:position] + call_as_str + doc[position:]
except ValueError:
text = doc
if is_overload:
text = '* ' + '\n '.join(text.split('\n'))
output.write(text)
else:
output.write(text)
except ValueError:
yield method.func_name
output.write(msg.format(method.func_name))
def write_module_doc(module, output):
"""Generate and write rst document for module.
Generate and write rst document for the single module.
:parameter module: takes a Python module which should be documented.
:type module: Python module
:parameter output: takes file to which generated document will be written.
:type output: file
"""
functions_names = _get_functions_names(module)
if module.__doc__:
output.write(module.__doc__)
output.write('\n')
seq = []
for name in functions_names:
method = getattr(module, name)
it = write_method_doc(method, output)
try:
name = next(it)
seq.append((name, it))
except StopIteration:
pass
seq.sort(key=operator.itemgetter(0))
prev_name = None
for i, item in enumerate(seq):
name = item[0]
emit_header = name != prev_name
prev_name = name
if emit_header:
overload = i < len(seq) - 1 and seq[i + 1][0] == name
else:
overload = True
try:
item[1].send((emit_header, overload))
except StopIteration:
pass
output.write('\n\n')
output.write('\n')
def write_package_doc(package, output):
"""Writes rst document for the package.
Generate and write rst document for the modules in the given package.
:parameter package: takes a Python package which should be documented
:type package: Python module
:parameter output: takes file to which generated document will be written.
:type output: file
"""
modules = _get_modules_names(package)
for module_name in modules:
module = importlib.import_module(module_name)
write_module_doc(module, output)
def generate_doc(source):
try:
package = importlib.import_module(source)
except ImportError:
return 'Error: No such module {0}'.format(source)
out = io.StringIO()
try:
if hasattr(package, '__path__'):
write_package_doc(package, out)
else:
write_module_doc(package, out)
res = out.getvalue()
return res
except Exception as e:
return '.. code-block:: python\n\n Error: {0}\n {1}\n\n'.format(
str(e), '\n '.join([''] + traceback.format_exc().split('\n')))
class YaqlDocNode(nodes.General, nodes.Element):
source = None
def __init__(self, source):
self.source = source
super(YaqlDocNode, self).__init__()
class YaqlDocDirective(rst.Directive):
has_content = False
required_arguments = 1
def run(self):
return [YaqlDocNode(self.arguments[0])]
def render(app, doctree, fromdocname):
for node in doctree.traverse(YaqlDocNode):
new_doc = utils.new_document('YAQL', doctree.settings)
content = generate_doc(node.source)
rst.Parser().parse(content, new_doc)
node.replace_self(new_doc.children)
def setup(app):
app.add_node(YaqlDocNode)
app.add_directive('yaqldoc', YaqlDocDirective)
app.connect('doctree-resolved', render)
return {'version': '0.1'}
| |
import json
import os
import sys
import ConfigParser
import collections
import occi
import render
# keep in sync with pOCCI/pOCCI.cfg
occi_defaults = {
'authtype': 'basic',
'ignoressl': False,
'mimetype': 'text/plain',
'outputformat': 'json',
'curlverbose': False,
'connectiontimeout': 60,
'timeout': 120,
'tests.category': 'Category:compute;class=kind;scheme="http://schemas.ogf.org/occi/infrastructure#"',
}
occi_config = {}
renderers = {}
renderer = None
renderer_big = None
renderer_httpheaders = None
def occi_format(results):
count_f = 0
count_o = 0
for r in results:
if 'status' in r:
if r['status']:
count_o += 1
else:
count_f += 1
r['status'] = result2str(r['status'])
if 'running_time' in r:
r['running_time'] = round(r['running_time'], 3)
out = {}
out['tests'] = results
out['passed'] = count_o
out['failed'] = count_f
return out
def html_escape(s):
s = str(s)
s = s.replace("&", "&")
s = s.replace("<", "<")
s = s.replace(">", ">")
s = s.replace("\n", "<br>")
return s
def occi_print(results, outputformat):
if outputformat == 'plain':
for r in results['tests']:
print '%s %s' % (r['name'], r['status'])
if 'reason' in r:
print >> sys.stderr, r['reason']
elif outputformat == 'json':
print json.dumps(results, indent=4)
elif outputformat in ['html', 'htmltable']:
if outputformat == 'html':
print '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">\n\
<html>\n\
\n\
<head>\n\
<title>OCCI Compliance Tests Results</title>\n\
<meta http-equiv="Content-Type" content="text/html;charset=utf-8">\n\
<style type="text/css">\n\
th {text-align:left}\n\
td.ok {color:green}\n\
td.fail {color:red}\n\
td.skipped {color:orange}\n\
</style>\n\
</head>\n\
\n\
<body>\n\
\n\
<table>\n\
<tr>\n\
<th>Test</th>\n\
<th>Running Time</th>\n\
<th>Status</th>\n\
<th>Reason</th>\n\
</tr>'
for r in results['tests']:
css = 'skipped'
if r['status'] == 'OK':
css = 'ok'
elif r['status'] == 'FAIL':
css = 'fail'
print ' <tr class="%s">' % css
print ' <td class="name">%s</td>' % html_escape(r['name'])
if 'objective' in r:
print ' <td class="objective">%s</td>' % html_escape(r['objective'])
print ' <td class="time">%s</td>' % html_escape(r['running_time'])
print ' <td class="%s">%s</td>' % (css, html_escape(r['status']))
if 'reason' in r:
print ' <td class="reason">%s</td>' % html_escape('\n'.join(r['reason']))
print ' </tr>'
if outputformat == 'html':
print '</table>\n\
\n\
</body>\n\
\n\
</html>'
else:
print >> sys.stderr, 'Only "plain", "json" output types are possible'
def occi_test(name, objective, status, err_msg, running_time=None):
test = collections.OrderedDict()
test['name'] = name
if objective is not None:
test['objective'] = objective
test['status'] = status
if running_time is not None:
test['running_time'] = running_time
if err_msg:
test['reason'] = err_msg
return test
def result2str(result):
return 'OK' if result else 'FAIL'
def occi_init():
"""Initialize pOCCI.
"""
# bigger data requires anything except HTTP Headers renderer
if occi_config['mimetype'] == 'text/occi':
occi_config['mimetype.big'] = 'text/plain'
else:
occi_config['mimetype.big'] = occi_config['mimetype']
occi_render_init()
def occi_config_init():
"""Initialize pOCCI configuration.
Reads the configuration file: /etc/pOCCI.cfg, ~/.pOCCI.cfg.
"""
global occi_config
config = ConfigParser.ConfigParser()
config.read(['/etc/pOCCI.cfg', os.path.expanduser('~/.pOCCI.cfg')])
if config.has_section('main'):
for key, value in config.items('main'):
#print 'config: %s = %s (%s)' % (key, value, type(eval(value)))
occi_config[key] = eval(value)
for key, value in occi_defaults.iteritems():
if key not in occi_config:
occi_config[key] = value
return True
def occi_render_init():
"""Initialize pOCCI renderers.
Limitations:
- For HTTP GET requests 'text/occi' is always needed
- For bigger data 'text/occi' should not be used (using 'text/plain')
"""
mimetypes = ['text/plain', 'text/occi']
self = sys.modules[__name__]
# renderers always needed
for mime in mimetypes:
renderers[mime] = render.create_renderer(mime)
# user configurable renderer
if occi_config['mimetype'] in mimetypes:
renderer = renderers[occi_config['mimetype']]
else:
renderer = render.create_renderer(occi_config['mimetype'])
# big data requires anything except HTTP Headers renderer
renderer_big = renderer
if occi_config['mimetype'] != occi_config['mimetype.big']:
if occi_config['mimetype.big'] in mimetypes:
renderer_big = renderers[occi_config['mimetype.big']]
else:
renderer_big = render.create_renderer(occi_config['mimetype.big'])
# HTTP GET requests needs HTTP Headers renderer
renderer_httpheaders = renderer
if occi_config['mimetype'] != 'text/occi':
renderer_httpheaders = renderers['text/occi']
# configurable filters
for f in ['tests.category', 'tests.entity']:
if f in occi_config:
try:
categories = renderers['text/plain'].parse_categories([occi_config[f]], None)
except occi.ParseError as pe:
print >> sys.stderr, ("Can't parse '%s' config option: " % f) + str(pe)
sys.exit(2)
if categories:
occi_config['occi.%s' % f] = categories[0]
if occi_config['curlverbose']:
print ("[config] '%s'=" % f) + str(categories[0])
self.renderer = renderer
self.renderer_big = renderer_big
self.renderer_httpheaders = renderer_httpheaders
if not occi_config:
occi_config_init()
| |
# Copyright 2013 OpenStack Foundation
# Copyright 2013 IBM Corp
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
from six import moves
from tempest_lib import decorators
from tempest.api.image import base
from tempest.common.utils import data_utils
from tempest import test
class BasicOperationsImagesTest(base.BaseV2ImageTest):
"""
Here we test the basic operations of images
"""
@decorators.skip_because(bug="1452987")
@test.attr(type='smoke')
@test.idempotent_id('139b765e-7f3d-4b3d-8b37-3ca3876ee318')
def test_register_upload_get_image_file(self):
"""
Here we test these functionalities - Register image,
upload the image file, get image and get image file api's
"""
uuid = '00000000-1111-2222-3333-444455556666'
image_name = data_utils.rand_name('image')
body = self.create_image(name=image_name,
container_format='bare',
disk_format='raw',
visibility='private',
ramdisk_id=uuid)
self.assertIn('id', body)
image_id = body.get('id')
self.assertIn('name', body)
self.assertEqual(image_name, body['name'])
self.assertIn('visibility', body)
self.assertEqual('private', body['visibility'])
self.assertIn('status', body)
self.assertEqual('queued', body['status'])
# Now try uploading an image file
file_content = data_utils.random_bytes()
image_file = moves.cStringIO(file_content)
self.client.store_image_file(image_id, image_file)
# Now try to get image details
body = self.client.show_image(image_id)
self.assertEqual(image_id, body['id'])
self.assertEqual(image_name, body['name'])
self.assertEqual(uuid, body['ramdisk_id'])
self.assertIn('size', body)
self.assertEqual(1024, body.get('size'))
# Now try get image file
body = self.client.load_image_file(image_id)
self.assertEqual(file_content, body.data)
@test.attr(type='smoke')
@test.idempotent_id('f848bb94-1c6e-45a4-8726-39e3a5b23535')
def test_delete_image(self):
# Deletes an image by image_id
# Create image
image_name = data_utils.rand_name('image')
body = self.client.create_image(name=image_name,
container_format='bare',
disk_format='raw',
visibility='private')
image_id = body['id']
# Delete Image
self.client.delete_image(image_id)
self.client.wait_for_resource_deletion(image_id)
# Verifying deletion
images = self.client.list_images()
images_id = [item['id'] for item in images]
self.assertNotIn(image_id, images_id)
@decorators.skip_because(bug="1452987")
@test.attr(type='smoke')
@test.idempotent_id('f66891a7-a35c-41a8-b590-a065c2a1caa6')
def test_update_image(self):
# Updates an image by image_id
# Create image
image_name = data_utils.rand_name('image')
body = self.client.create_image(name=image_name,
container_format='bare',
disk_format='iso',
visibility='private')
self.addCleanup(self.client.delete_image, body['id'])
self.assertEqual('queued', body['status'])
image_id = body['id']
# Now try uploading an image file
image_file = moves.cStringIO(data_utils.random_bytes())
self.client.store_image_file(image_id, image_file)
# Update Image
new_image_name = data_utils.rand_name('new-image')
body = self.client.update_image(image_id, [
dict(replace='/name', value=new_image_name)])
# Verifying updating
body = self.client.show_image(image_id)
self.assertEqual(image_id, body['id'])
self.assertEqual(new_image_name, body['name'])
class ListImagesTest(base.BaseV2ImageTest):
"""
Here we test the listing of image information
"""
@classmethod
def resource_setup(cls):
super(ListImagesTest, cls).resource_setup()
# We add a few images here to test the listing functionality of
# the images API
cls._create_standard_image('bare', 'raw')
cls._create_standard_image('bare', 'raw')
cls._create_standard_image('ami', 'raw')
# Add some more for listing
cls._create_standard_image('ami', 'ami')
cls._create_standard_image('ari', 'ari')
cls._create_standard_image('aki', 'aki')
@classmethod
def _create_standard_image(cls, container_format, disk_format):
"""
Create a new standard image and return the ID of the newly-registered
image. Note that the size of the new image is a random number between
1024 and 4096
"""
size = random.randint(1024, 4096)
image_file = moves.cStringIO(data_utils.random_bytes(size))
name = data_utils.rand_name('image')
body = cls.create_image(name=name,
container_format=container_format,
disk_format=disk_format,
visibility='private')
image_id = body['id']
# cls.client.store_image(image_id, data=image_file)
return image_id
def _list_by_param_value_and_assert(self, params):
"""
Perform list action with given params and validates result.
"""
images_list = self.client.list_images(params=params)
# Validating params of fetched images
for image in images_list:
for key in params:
msg = "Failed to list images by %s" % key
self.assertEqual(params[key], image[key], msg)
@decorators.skip_because(bug="1452987")
@test.idempotent_id('1e341d7a-90a9-494c-b143-2cdf2aeb6aee')
def test_index_no_params(self):
# Simple test to see all fixture images returned
images_list = self.client.list_images()
image_list = map(lambda x: x['id'], images_list)
for image in self.created_images:
self.assertIn(image, image_list)
@decorators.skip_because(bug="1452987")
@test.idempotent_id('9959ca1d-1aa7-4b7a-a1ea-0fff0499b37e')
def test_list_images_param_container_format(self):
# Test to get all images with container_format='bare'
params = {"container_format": "bare"}
self._list_by_param_value_and_assert(params)
@decorators.skip_because(bug="1452987")
@test.idempotent_id('4a4735a7-f22f-49b6-b0d9-66e1ef7453eb')
def test_list_images_param_disk_format(self):
# Test to get all images with disk_format = raw
params = {"disk_format": "raw"}
self._list_by_param_value_and_assert(params)
@decorators.skip_because(bug="1452987")
@test.idempotent_id('7a95bb92-d99e-4b12-9718-7bc6ab73e6d2')
def test_list_images_param_visibility(self):
# Test to get all images with visibility = private
params = {"visibility": "private"}
self._list_by_param_value_and_assert(params)
@decorators.skip_because(bug="1452987")
@test.idempotent_id('cf1b9a48-8340-480e-af7b-fe7e17690876')
def test_list_images_param_size(self):
# Test to get all images by size
image_id = self.created_images[1]
# Get image metadata
image = self.client.show_image(image_id)
params = {"size": image['size']}
self._list_by_param_value_and_assert(params)
@decorators.skip_because(bug="1452987")
@test.idempotent_id('4ad8c157-971a-4ba8-aa84-ed61154b1e7f')
def test_list_images_param_min_max_size(self):
# Test to get all images with size between 2000 to 3000
image_id = self.created_images[1]
# Get image metadata
image = self.client.show_image(image_id)
size = image['size']
params = {"size_min": size - 500, "size_max": size + 500}
images_list = self.client.list_images(params=params)
image_size_list = map(lambda x: x['size'], images_list)
for image_size in image_size_list:
self.assertTrue(image_size >= params['size_min'] and
image_size <= params['size_max'],
"Failed to get images by size_min and size_max")
@decorators.skip_because(bug="1452987")
@test.idempotent_id('7fc9e369-0f58-4d05-9aa5-0969e2d59d15')
def test_list_images_param_status(self):
# Test to get all active images
params = {"status": "active"}
self._list_by_param_value_and_assert(params)
@decorators.skip_because(bug="1452987")
@test.idempotent_id('e914a891-3cc8-4b40-ad32-e0a39ffbddbb')
def test_list_images_param_limit(self):
# Test to get images by limit
params = {"limit": 2}
images_list = self.client.list_images(params=params)
self.assertEqual(len(images_list), params['limit'],
"Failed to get images by limit")
@decorators.skip_because(bug="1452987")
@test.idempotent_id('622b925c-479f-4736-860d-adeaf13bc371')
def test_get_image_schema(self):
# Test to get image schema
schema = "image"
body = self.client.show_schema(schema)
self.assertEqual("image", body['name'])
@decorators.skip_because(bug="1452987")
@test.idempotent_id('25c8d7b2-df21-460f-87ac-93130bcdc684')
def test_get_images_schema(self):
# Test to get images schema
schema = "images"
body = self.client.show_schema(schema)
self.assertEqual("images", body['name'])
| |
import datetime
from django.core.urlresolvers import reverse
from django.contrib.sites.models import Site
from .chimpy.chimpy import Connection as BaseConnection, ChimpyException
from .utils import wrap, build_dict, Cache, WarningLogger
from .exceptions import (
MCCampaignDoesNotExist,
MCListDoesNotExist,
MCConnectionFailed,
MCTemplateDoesNotExist,
MCFolderDoesNotExist,
)
from .constants import *
from .settings import WEBHOOK_KEY
class SegmentCondition(object):
OPERATORS = {
'eq': lambda a,b: a == b,
'ne': lambda a,b: a != b,
'gt': lambda a,b: a > b,
'lt': lambda a,b: a < b,
'like': lambda a,b: a in b,
'nlike': lambda a,b: a not in b,
'starts': lambda a,b: str(a).startswith(str(b)),
'ends': lambda a,b: str(a).endswith(str(b))
}
def __init__(self, field, op, value):
self.field = field
self.op = op
self.value = value
check_function_name = 'check_%s' % self.field
if not hasattr(self, check_function_name):
check_function_name = 'merge_check'
self.checker = getattr(self, check_function_name)
def check(self, member):
return self.checker(member)
def check_interests(self, member):
interests = self.value.split(',')
if self.op == 'all':
for interest in interests:
if interest not in member.interests:
return False
return True
elif self.op == 'one':
for interest in interests:
if interest in member.interests:
return True
return False
else:
for interest in interests:
if interest in member.interests:
return False
return True
def merge_check(self, member):
return self.OPERATORS[self.op](member.merges[self.field.upper()], self.value)
class BaseChimpObject(object):
_attrs = ()
_methods = ()
verbose_attr = 'id'
cache_key = 'id'
def __init__(self, master, info):
self.master = master
for attr in self._attrs:
setattr(self, attr, info[attr])
base = self.__class__.__name__.lower()
self.cache = master.cache.get_child_cache(getattr(self, self.cache_key))
self.con = master.con
for method in self._methods:
setattr(self, method, wrap(base, self.master.con, method, self.id))
def __repr__(self):
return '<%s object: %s>' % (self.__class__.__name__, getattr(self, self.verbose_attr))
def __str__(self):
return unicode(self).encode('utf-8')
class Campaign(BaseChimpObject):
_attrs = ('archive_url', 'create_time', 'emails_sent', 'folder_id',
'from_email', 'from_name', 'id', 'inline_css', 'list_id',
'send_time', 'status', 'subject', 'title', 'to_name', 'type',
'web_id')
_methods = ('delete', 'pause', 'replicate', 'resume', 'schedule',
'send_now', 'send_test', 'unschedule')
verbose_attr = 'subject'
def __init__(self, master, info):
super(Campaign, self).__init__(master, info)
try:
self.list = self.master.get_list_by_id(self.list_id)
except MCListDoesNotExist:
self.list = None
self._content = None
self.frozen_info = info
def __unicode__(self):
return self.subject
@property
def content(self):
return self.get_content()
def get_content(self):
if self._content is None:
self._content = self.con.campaign_content(self.id)
return self._content
def send_now_async(self):
now = datetime.datetime.utcnow()
soon = now + datetime.timedelta(minutes=1)
return self.schedule(soon)
def delete(self):
return self.con.campaign_delete(self.id)
def pause(self):
return self.con.campaign_pause(self.id)
def update(self):
status = []
for key, value in self._get_diff():
status.append(self.con.campaign_update(self.id, key, value))
return all(status)
def _get_diff(self):
diff = []
new_frozen = {}
for key in self._attrs:
current = getattr(self, key)
if self.frozen_info[key] != current:
diff.append((key, current))
new_frozen[key] = current
self.frozen_info = new_frozen
return diff
@property
def is_sent(self):
return self.status == 'sent'
class Member(BaseChimpObject):
_attrs = ('email', 'timestamp')
_extended_attrs = ('id', 'ip_opt', 'ip_signup', 'merges', 'status')
verbose_attr = 'email'
cache_key = 'email'
def __init__(self, master, info):
super(Member, self).__init__(master, info)
def __unicode__(self):
return self.email
def __getattr__(self, attr):
if attr in self._extended_attrs:
return self.info[attr]
raise AttributeError, attr
@property
def interests(self):
return [i.strip() for i in self.merges['INTERESTS'].split(',')]
@property
def info(self):
return self.get_info()
def get_info(self):
return self.cache.get('list_member_info', self.con.list_member_info, self.master.id, self.email)
def update(self):
return self.con.list_update_member(self.master.id, self.email, self.merges)
class LazyMemberDict(dict):
def __init__(self, master):
super(LazyMemberDict, self).__init__()
self._list = master
def __getitem__(self, key):
if key in self:
return super(LazyMemberDict, self).__getitem__(key)
value = self._list.get_member(key)
self[key] = value
return value
class List(BaseChimpObject):
'''
This represents a mailing list. Most of the methods (defined in _methods) are wrappers of the flat
API found in chimpy.chimpy. As such, signatures are the same.
'''
_methods = ('batch_subscribe',
'batch_unsubscribe',
'subscribe', # Sig: (email_address,merge_vars{},email_type='text',double_optin=True)
'unsubscribe')
_attrs = ('id', 'date_created', 'name', 'web_id', 'stats')
verbose_attr = 'name'
def __init__(self, *args, **kwargs):
super(List, self).__init__(*args, **kwargs)
self.members = LazyMemberDict(self)
def segment_test(self, match, conditions):
return self.master.con.campaign_segment_test(self.id, {'match': match, 'conditions': conditions})
def list_interest_groupings(self):
return self.master.con.list_interest_groupings(self.id)
def list_interest_groups(self, grouping_id=None, full=False):
grouping_id = int(grouping_id or self._default_grouping())
groupings = self.list_interest_groupings()
grouping = None
for g in groupings:
if int(g['id']) == grouping_id:
grouping = g
break
if not grouping:
return []
if not full:
return [group['name'] for group in grouping['groups']]
return grouping
def add_interest_group(self, groupname, grouping_id=None):
grouping_id = grouping_id or self._default_grouping()
return self.master.con.list_interest_group_add(self.id, groupname, grouping_id)
def remove_interest_group(self, groupname, grouping_id=None):
grouping_id = grouping_id or self._default_grouping()
return self.master.con.list_interest_group_del(self.id, groupname, grouping_id)
def update_interest_group(self, oldname, newname, grouping_id=None):
grouping_id = grouping_id or self._default_grouping()
return self.master.con.list_interest_group_update(self.id, oldname, newname, grouping_id)
def add_interests_if_not_exist(self, *interests):
self.cache.flush('interest_groups')
interest_groups = self.interest_groups['groups']
names = set(g['name'] for g in interest_groups)
for interest in set(interests):
if interest not in names:
self.add_interest_group(interest)
interest_groups.append(interest)
def _default_grouping(self):
if not hasattr(self, '_default_grouping_id'):
groupings = self.list_interest_groupings()
if len(groupings):
self._default_grouping_id = groupings[0]['id']
else:
self._default_grouping_id = None
return self._default_grouping_id
@property
def webhooks(self):
return self.get_webhooks()
def get_webhooks(self):
return self.cache.get('webhooks', self.master.con.list_webhooks, self.id)
def add_webhook(self, url, actions, sources):
return self.master.con.list_webhook_add(self.id, url, actions, sources)
def remove_webhook(self, url):
return self.master.con.list_webhook_del(self.id, url)
def add_webhook_if_not_exists(self, url, actions, sources):
for webhook in self.webhooks:
if webhook['url'] == url:
return True
return self.add_webhook(url, actions, sources)
def install_webhook(self):
domain = Site.objects.get_current().domain
if not (domain.startswith('http://') or domain.startswith('https://')):
domain = 'http://%s' % domain
if domain.endswith('/'):
domain = domain[:-1]
url = domain + reverse('mailchimp_webhook', kwargs={'key': WEBHOOK_KEY})
actions = {'subscribe': True,
'unsubscribe': True,
'profile': True,
'cleaned': True,
'upemail': True,}
sources = {'user': True,
'admin': True,
'api': False}
return self.add_webhook_if_not_exists(url, actions, sources)
@property
def interest_groups(self):
return self.get_interest_groups()
def get_interest_groups(self):
return self.cache.get('interest_groups', self.list_interest_groups, full=True)
def add_merge(self, key, desc, req=None):
req = req or {}
return self.master.con.list_merge_var_add(self.id, key, desc, req if req else False)
def remove_merge(self, key):
return self.master.con.list_merge_var_del(self.id, key)
def add_merges_if_not_exists(self, *new_merges):
self.cache.flush('merges')
merges = [m['tag'].upper() for m in self.merges]
for merge in set(new_merges):
if merge.upper() not in merges:
self.add_merge(merge, merge, False)
merges.append(merge.upper())
@property
def merges(self):
return self.get_merges()
def get_merges(self):
return self.cache.get('merges', self.master.con.list_merge_vars, self.id)
def __unicode__(self):
return self.name
def get_member(self, email):
try:
data = self.master.con.list_member_info(self.id, email)
except ChimpyException:
return None
# actually it would make more sense giving the member everything
memberdata = {}
memberdata['timestamp'] = data['timestamp']
memberdata['email'] = data['email']
return Member(self, memberdata)
def filter_members(self, segment_opts):
"""
segment_opts = {'match': 'all' if self.segment_options_all else 'any',
'conditions': json.loads(self.segment_options_conditions)}
"""
mode = all if segment_opts['match'] == 'all' else any
conditions = [SegmentCondition(**dict((str(k), v) for k,v in c.items())) for c in segment_opts['conditions']]
for email, member in self.members.items():
if mode([condition.check(member) for condition in conditions]):
yield member
class Template(BaseChimpObject):
_attrs = ('id', 'layout', 'name', 'preview_image', 'sections', 'default_content', 'source', 'preview')
verbose_attr = 'name'
def build(self, **kwargs):
class BuiltTemplate(object):
def __init__(self, template, data):
self.template = template
self.data = data
self.id = self.template.id
def __iter__(self):
return iter(self.data.items())
data = {}
for key, value in kwargs.items():
if key in self.sections:
data['html_%s' % key] = value
return BuiltTemplate(self, data)
class Folder(BaseChimpObject):
_attrs = ('id', 'name', 'type', 'date_created')
def __init__(self, master, info):
info['id'] = info['folder_id']
del info['folder_id']
super(Folder, self).__init__(master, info)
class Connection(object):
REGULAR = REGULAR_CAMPAIGN
PLAINTEXT = PLAINTEXT_CAMPAIGN
ABSPLIT = ABSPLIT_CAMPAIGN
RSS = RSS_CAMPAIGN
TRANS = TRANS_CAMPAIGN
AUTO = AUTO_CAMPAIGN
DOES_NOT_EXIST = {
'templates': MCTemplateDoesNotExist,
'campaigns': MCCampaignDoesNotExist,
'lists': MCListDoesNotExist,
'folders': MCFolderDoesNotExist,
}
def __init__(self, api_key=None, secure=False, check=True):
self._secure = secure
self._check = check
self._api_key = None
self.con = None
self.is_connected = False
if api_key is not None:
self.connect(api_key)
def connect(self, api_key):
self._api_key = api_key
self.cache = Cache(api_key)
self.warnings = WarningLogger()
self.con = self.warnings.proxy(BaseConnection(self._api_key, self._secure))
if self._check:
status = self.ping()
if status != STATUS_OK:
raise MCConnectionFailed(status)
self.is_connected = True
def ping(self):
return self.con.ping()
@property
def campaigns(self):
return self.get_campaigns()
def get_campaigns(self):
return self.cache.get('campaigns', self._get_categories)
@property
def lists(self):
return self.get_lists()
def get_lists(self):
return self.cache.get('lists', self._get_lists)
@property
def templates(self):
return self.get_templates()
def get_templates(self):
return self.cache.get('templates', self._get_templates)
def _get_categories(self):
return build_dict(self, Campaign, self.con.campaigns()['data'])
def _get_lists(self):
return build_dict(self, List, self.con.lists())
def _get_templates(self):
templates = self.con.campaign_templates()
for t in templates:
t.update(self.con.template_info(template_id=t['id']))
return build_dict(self, Template, templates)
@property
def folders(self):
return self.get_folders()
def get_folders(self):
return self.cache.get('folders', self._get_folders)
def _get_folders(self):
return build_dict(self, Folder, self.con.folders(), key='folder_id')
def get_list_by_id(self, id):
return self._get_by_id('lists', id)
def get_campaign_by_id(self, id):
return self._get_by_id('campaigns', id)
def get_template_by_id(self, id):
return self._get_by_id('templates', id)
def get_template_by_name(self, name):
return self._get_by_key('templates', 'name', name)
def get_folder_by_id(self, id):
return self._get_by_id('folders', id)
def get_folder_by_name(self, name):
return self._get_by_key('folders', 'name', name)
def _get_by_id(self, thing, id):
try:
return getattr(self, thing)[id]
except KeyError:
self.cache.flush(thing)
try:
return getattr(self, thing)[id]
except KeyError:
raise self.DOES_NOT_EXIST[thing](id)
def _get_by_key(self, thing, name, key):
for id, obj in getattr(self, thing).items():
if getattr(obj, name) == key:
return obj
raise self.DOES_NOT_EXIST[thing]('%s=%s' % (name, key))
def create_campaign(self, campaign_type, campaign_list, template, subject,
from_email, from_name, to_name, folder_id=None,
tracking=None, title='',
authenticate=False, analytics=None, auto_footer=False,
generate_text=False, auto_tweet=False, segment_opts=None,
type_opts=None):
"""
Creates a new campaign and returns it for the arguments given.
"""
tracking = tracking or {'opens':True, 'html_clicks': True}
type_opts = type_opts or {}
segment_opts = segment_opts or {}
analytics = analytics or {}
options = {}
if title:
options['title'] = title
else:
options['title'] = subject
options['list_id'] = campaign_list.id
options['template_id'] = template.id
options['subject'] = subject
options['from_email'] = from_email
options['from_name'] = from_name
options['to_name'] = to_name
if folder_id:
options['folder_id'] = folder_id
options['tracking'] = tracking
options['authenticate'] = bool(authenticate)
if analytics:
options['analytics'] = analytics
options['auto_footer'] = bool(auto_footer)
options['generate_text'] = bool(generate_text)
options['auto_tweet'] = bool(auto_tweet)
content = dict(template)
kwargs = {}
if segment_opts.get('conditions', None):
kwargs['segment_opts'] = segment_opts
if type_opts:
kwargs['type_opts'] = type_opts
cid = self.con.campaign_create(campaign_type, options, content,
**kwargs)
camp = self.get_campaign_by_id(cid)
camp.template_object = template
return camp
def queue(self, campaign_type, contents, list_id, template_id, subject,
from_email, from_name, to_name, folder_id=None, tracking_opens=True,
tracking_html_clicks=True, tracking_text_clicks=False, title=None,
authenticate=False, google_analytics=None, auto_footer=False,
auto_tweet=False, segment_options=False, segment_options_all=True,
segment_options_conditions=None, type_opts=None, obj=None):
from mailchimp.models import Queue
segment_options_conditions = segment_options_conditions or []
type_opts = type_opts or {}
kwargs = locals().copy()
del kwargs['Queue']
del kwargs['self']
return Queue.objects.queue(**kwargs)
| |
#!/usr/bin/env python
#
# (c) Copyright 2016 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Convert output of ovs-ofctl/dpctl dump-flows into pipeline JSON which
would support those flows on an HPE Aruba switch. If the set of flows
cannot fit into a possible pipeline within the HPE Aruba switch, one
or more errors will be generated to indicate the reason. """
import sys
import re
import os
import json
# =========================== Match Field Data ================================
# This tool is coded according to the "Flow Syntax" section of the following
# OVS documentation:
# http://openvswitch.org/support/dist-docs/ovs-ofctl.8.txt
# Match fields are keyed to the group they correspond with.
# Those fields which are their own group are given high numbers.
OVS_MATCH_FIELDS = {
# Basic OF match fields
'in_port':4, 'dl_vlan':4, 'dl_vlan_pcp':4,
'dl_src':1, 'dl_dst':1, 'dl_type':1,
'nw_src':2, 'nw_dst':3,
'nw_proto':4, 'ip_proto':4, 'ip_dscp':4,
'tcp_src':4, 'tcp_dst':4, 'udp_src':4, 'udp_dst':4,
'tcp_flags':899, 'icmp_type':900, 'icmp_code':901,
# Nicira extensions
'arp_op':902, 'arp_spa':903, 'arp_tpa':904, 'arp_sha':905, 'arp_tha':906,
'ipv6_src':2, 'ipv6_dst':3, 'ipv6_label':907, 'nd_target':908}
# Match fields which are not supported by HPE Aruba switches,
# regardless of the pipeline configuration.
OVS_UNSUPPORTED_MATCH = [
# Basic OF match fields
'nw_tos', 'nw_ttl', 'nw_ecn', 'ip_ecn',
'sctp_src', 'sctp_dst', 'metadata',
# Nicira extensions
'vlan_tci', 'ip_frag', 'nd_sll', 'nd_tll',
'mpls_bos', 'mpls_label', 'mpls_tc',
'tun_id', 'tunnel_id', 'tun_flags', 'tun_src', 'tun_dst',
'tun_ipv6_src', 'tun_ipv6_dst', 'tun_gbp_id', 'tun_gbp_flags',
'tun_metadataidx', 'regidx', 'xregidx', 'pkt_mark', 'actset_output',
'conj_id', 'ct_state', 'ct_zone', 'ct_mark', 'ct_label']
OVS_KNOWN_MATCH = set(OVS_MATCH_FIELDS.keys()).union(set(OVS_UNSUPPORTED_MATCH))
# OVS match abbreviations, keyed to what they abbreviate
OVS_MATCH_ABBREV = {
'ip': 'dl_type=0x0800',
'ipv6': 'dl_type=0x86dd',
'icmp': 'dl_type=0x0800,nw_proto=1',
'icmp6': 'dl_type=0x86dd,nw_proto=58',
'tcp': 'dl_type=0x0800,nw_proto=6',
'tcp6': 'dl_type=0x86dd,nw_proto=6',
'udp': 'dl_type=0x0800,nw_proto=17',
'udp6': 'dl_type=0x86dd,nw_proto=17',
'sctp': 'dl_type=0x0800,nw_proto=132',
'sctp6': 'dl_type=0x86dd,nw_proto=132',
'arp': 'dl_type=0x0806',
'rarp': 'dl_type=0x8035',
'mpls': 'dl_type=0x8847',
'mplsm': 'dl_type=0x8848',
# Assume deprecated tp_src/dst are UDP (to avoid complicated parsing)
'tp_src': 'udp_src',
'tp_dst': 'udp_dst',
# Special case use of vlan_tci to match packets without a VLAN tag,
# to dl_vlan=0. Per OF1.3.3 spec p.117:
# - Testing for an exact match with 0x0 matches only packets without
# * an 802.1Q header.
'vlan_tci=0x0000/0x1fff': 'dl_vlan=0'}
# Conversion from OVS field name to RYU field name. Keys should be identical
# to the keys in OVS_MATCH_FIELDS.
JSON_FIELDS = {
# Basic OF match fields
'in_port':'in_port', 'dl_vlan':'vlan_vid', 'dl_vlan_pcp':'vlan_pcp',
'dl_src':'eth_src', 'dl_dst':'eth_dst', 'dl_type':'eth_type',
'nw_src':'ipv4_src', 'nw_dst':'ipv4_dst',
'nw_proto':'ip_proto', 'ip_proto':'ip_proto', 'ip_dscp':'ip_dscp',
'tcp_src':'tcp_src', 'tcp_dst':'tcp_dst', 'udp_src':'udp_src', 'udp_dst':'udp_dst',
'tcp_flags':'tcp_flags', 'icmp_type':'icmpv4_type', 'icmp_code':'icmpv4_code',
# Nicira extensions
'arp_op':'arp_op', 'arp_spa':'arp_spa', 'arp_tpa':'arp_tpa', 'arp_sha':'arp_sha', 'arp_tha':'arp_tha',
'ipv6_src':'ipv6_src', 'ipv6_dst':'ipv6_dst', 'ipv6_label':'ipv6_flabel', 'nd_target':'ipv6_nd_target'}
if sorted(JSON_FIELDS.keys()) != sorted(OVS_MATCH_FIELDS.keys()):
print('ERROR: Key mismatch between JSON_FIELDS and OVS_MATCH_FIELDS:\n')
print(set(JSON_FIELDS.keys()).symmetric_difference(set(OVS_MATCH_FIELDS.keys())))
exit(2)
# Fields which HPE Aruba supports as setfield in any pipeline (keyed with RYU field names, not OVS)
ARUBA_SETFIELDS = [
'eth_dst', 'eth_src', 'vlan_vid', 'vlan_pcp', 'ip_dscp',
'ipv4_src', 'ipv4_dst', 'tcp_src', 'tcp_dst', 'udp_src', 'udp_dst']
DEBUG = ('DEBUG' in os.environ)
GENERATE_JSON = True
# =========================== Utility Functions ================================
def debug(arg):
if DEBUG:
print(arg)
def error(arg):
print(arg)
global GENERATE_JSON
GENERATE_JSON = False
# =========================== Input Processing ================================
# Check for the input data file
if len(sys.argv) < 2:
print("Please specify a filename which contains the output of 'ovs-ofctl dump-flows'")
exit(1)
# Allocate variables which will hold data extracted from OVS output
TABLE_MATCH = {} # Table ID key, value is a list of exact match keys
TABLE_WILDCARDS = {} # Table ID key, value is a list of wildcardable match keys
TABLE_MASKS = {} # Table ID key, value is a list of maskable match keys
TABLE_SIZE = {} # Table ID key, value is number of flows in table
# Iterate over all lines of the file, gathering data
debug("=== Per-flow pipeline analysis ===")
for line in open(sys.argv[1]):
# Skip empty lines
line = line.rstrip().lstrip()
if not line:
continue
# debug("FLOW: "+line)
line_data = re.split(r'\s+', line)
errors = 0
table = None
match = None
# Identify the table
for data in line_data:
if "table=" not in data:
continue
# Store the numeric table ID
table = data.split('=')[1]
table = re.sub(r'\D', '', table)
# Identify match criteria
for data in line_data:
if "priority=" not in data:
continue
match = data
# Verify that we found both table ID and match data
if table is None:
debug("Failed to identify table ID in line:\n "+line)
continue
if match is None:
debug("Failed to identify match data in line:\n "+line)
continue
# Handle deprecated tp_src/dst interpretation, which is context-dependent
if re.match('.*[^c]tp_(src|dst).*', match) != None:
if "udp" in match or "proto=17" in match:
match = re.sub(r'([^c]*)tp_(dst|src)', r'\1udp_\2', match)
else:
match = re.sub(r'([^c]*)tp_(dst|src)', r'\1tcp_\2', match)
# Replace abbreviations
for abbrev in OVS_MATCH_ABBREV:
fields = match.split(',')
fields = [re.sub("^"+abbrev+"$", OVS_MATCH_ABBREV[abbrev], f) for f in fields]
match = ','.join(fields)
# Get the list of matched and masked fields
match_keys = set([])
masks = set([])
for m in match.split(','):
# Skip empty match field
if m == "":
continue
# Parse the match key
mp = m.split('=')
key = mp[0]
match_keys.add(key)
# Parse the match value
if len(mp) >= 2:
value = m.split('=')[1]
if "/" in value:
masks.add(key)
else:
error("Failed to parse special key-value abbreviation: "+str(m))
match_keys.remove('priority') # Ignore priority, not a match field
# Verify our hard-coded OVS match fields are complete
if not OVS_KNOWN_MATCH.issuperset(match_keys):
error("Attempted to match unknown field(s) "+str(list(match_keys.difference(OVS_KNOWN_MATCH)))+" in flow:\n "+line)
errors += 1
# Check unsupported matches
for unsupp in OVS_UNSUPPORTED_MATCH:
if unsupp in match_keys:
error("Match field '"+unsupp+"' is not supported, but was used in:\n "+line)
errors += 1
# If we've hit errors, skip this flow because it will complicate the
# global validation done later.
if errors > 0:
continue
# Increment table size
if table in TABLE_SIZE:
TABLE_SIZE[table] += 1
else:
TABLE_SIZE[table] = 1
TABLE_MATCH[table] = set(match_keys)
TABLE_WILDCARDS[table] = set([])
TABLE_MASKS[table] = set([])
# Record match fields
tm = TABLE_MATCH[table]
exact = tm.intersection(match_keys)
wildcard = tm.symmetric_difference(match_keys)
TABLE_MATCH[table] = set(exact)
TABLE_WILDCARDS[table] = set(wildcard).union(TABLE_WILDCARDS[table])
TABLE_MASKS[table] = TABLE_MASKS[table].union(masks)
# Globals used in validation
debug("\n=== Global and per-table pipeline analysis ===")
MAX_SUPPORTED_TABLES = 12
MIN_TCAM_SIZE = 2
MIN_HASH_SIZE = 16
MAX_TCAM_TILES = 8 * 1024
MAX_HASH_TILES = 64 * 1024
# Check if too many tables were used
if len(TABLE_SIZE.keys()) > MAX_SUPPORTED_TABLES:
error("HPE Aruba switches support a maximum of "+str(MAX_SUPPORTED_TABLES)+" tables, but "+str(len(TABLE_SIZE.keys()))+" were used:\n "+str(TABLE_SIZE.keys()))
# Check if table 0 (required) was used
if '0' not in TABLE_SIZE:
error("Table 0 was not used, but is required by the OpenFlow specification")
# Get a numerically-sorted list of table IDs
tables = TABLE_SIZE.keys()
tables.sort(key=int)
# Display and analyze gathered data to check for unsupported conditions
tcam_tiles = 0
hash_tiles = 0
for table in tables:
# Get all fields being matched
size = TABLE_SIZE[table]
exact = TABLE_MATCH[table]
wildcard = TABLE_WILDCARDS[table]
mask = TABLE_MASKS[table]
all_matches = exact.union(wildcard).union(mask)
# Special-case: If a table had flows but none of the flows specified
# match criteria, we'll specify at least one wildcard (ETH_TYPE) so that
# the table is considered a TCAM. A hash must have all match fields specified,
# but since no flows specified match criteria we know a TCAM is expected.
# Wildcard ETH_TYPE since it is a dependency of many other fields.
if len(all_matches) == 0:
TABLE_WILDCARDS[table].add('dl_type')
wildcard = TABLE_WILDCARDS[table]
all_matches = exact.union(wildcard).union(mask)
# Display table data
debug("TABLE #"+table+" has "+str(size)+" entries")
if len(exact) > 0:
debug(" exact-match: "+str(sorted(list(exact))))
if len(wildcard) > 0:
debug(" wildcards: "+str(sorted(list(wildcard))))
if len(mask) > 0:
debug(" maskable: "+str(sorted(list(mask))))
# Determine number of match groups
groups = [OVS_MATCH_FIELDS[m] for m in all_matches]
groups = set(groups)
gc = len(groups)
# Automatically upconvert Hash->TCAM if attempting to match 4 groups in hash
if len(wildcard) == 0 and len(mask) == 0 and gc == 4:
wildcard = exact.copy()
exact.clear()
debug(" ** Table #"+table+" has been converted from Hash to TCAM, due to matching 4 groups")
# Calculate resource usage, based on table type
if len(wildcard) > 0 or len(mask) > 0:
table_type = "TCAM"
if gc > 4:
error("Table #"+table+" attempts to match fields from "+str(gc)+" groups. Maximum of 4 match groups supported in "+table_type)
if size < MIN_TCAM_SIZE:
debug(" ** Table #"+table+" has been auto-resized to minimum size of "+str(MIN_TCAM_SIZE))
size = MIN_TCAM_SIZE
mult = gc if gc != 3 else 4 # TCAM: 3 groups use same as 4 groups
tiles = size * mult
tcam_tiles += tiles
else:
table_type = "Hash"
if gc > 3:
error("Table #"+table+" attempts to match fields from "+str(gc)+" groups. Maximum of 3 match groups supported in "+table_type)
if size < MIN_HASH_SIZE:
debug(" ** Table #"+table+" has been auto-resized to minimum size of "+str(MIN_HASH_SIZE))
size = MIN_HASH_SIZE
mult = gc if gc != 3 else 2 # Hash: 3 groups use same as 2 groups
tiles = size * mult
hash_tiles += tiles
# Store any adjusted values
TABLE_SIZE[table] = size
TABLE_MATCH[table] = exact
TABLE_WILDCARDS[table] = wildcard
TABLE_MASKS[table] = mask
# Calculate resource allocation for this table
debug(" allocation: "+str(len(groups))+" groups "+str(list(groups))+" using "+str(tiles)+" "+table_type+" resources")
# Verify that tables will fit into available hardware resources
debug("Total resources: TCAM={} ({:.2f}%) Hash={} ({:.2f}%)".format(tcam_tiles, 100*float(tcam_tiles)/MAX_TCAM_TILES,
hash_tiles, 100*float(hash_tiles)/MAX_HASH_TILES))
if tcam_tiles > MAX_TCAM_TILES:
error("Pipeline uses "+str(tcam_tiles)+" TCAM resources. Maximum of "+str(MAX_TCAM_TILES)+" available.")
if hash_tiles > MAX_HASH_TILES:
error("Pipeline uses "+str(hash_tiles)+" Hash resources. Maximum of "+str(MAX_HASH_TILES)+" available.")
# Exit now if not generating JSON
if not GENERATE_JSON:
exit(1)
# Generate JSON for RYU pipeline format
debug("\n=== Auto-generated pipeline JSON ===")
prev_tables = set([])
JSON = '['
for table in tables:
# Get all fields being matched
size = TABLE_SIZE[table]
exact = TABLE_MATCH[table]
wildcard = TABLE_WILDCARDS[table]
mask = TABLE_MASKS[table]
all_matches = exact.union(wildcard).union(mask)
# Table header information
JSON += '{"max_entries": '+str(size)+','
JSON += '"name": "Table '+table+'",'
JSON += '"table_id": '+table+','
JSON += '"metadata_match": 0,'
JSON += '"metadata_write": 0,'
JSON += '"config": 3,'
JSON += '"properties": ['
# Matches
JSON += '{"type":8, "name":"OFPTFPT_MATCH", "oxm_ids": ['
for m in all_matches:
hasmask = ""
if m in mask:
hasmask = ' , "hasmask": true'
JSON += '{ "type": "'+JSON_FIELDS[m]+'", "name": "'+JSON_FIELDS[m]+'"'+hasmask+' },'
# Trim trailing common from last match
if len(all_matches) > 0:
JSON = JSON.rstrip(',')
JSON += ']},'
# Wildcards
JSON += '{"type":10, "name": "OFPTFPT_WILDCARDS", "oxm_ids": ['
for w in wildcard:
JSON += '{ "type": "'+JSON_FIELDS[w]+'", "name":"'+JSON_FIELDS[w]+'" },'
# Trim trailing common from last wildcard
if len(wildcard) > 0:
JSON = JSON.rstrip(',')
JSON += ']},'
# Now that we've generated the match+wildcard criteria, we can assume that
# all other tables will support all actions, so generate the same action
# criteria, regardless of what the flows actually tried to use.
genericSetfields = ['{"type":"'+f+'","name":"'+f+'"}' for f in ARUBA_SETFIELDS]
genericActions = [
'{"type":0,"name":"OFPAT_OUTPUT"}',
'{"type":17,"name":"OFPAT_PUSH_VLAN"}',
'{"type":18,"name":"OFPAT_POP_VLAN"}',
'{"type":22,"name":"OFPAT_GROUP"}',
'{"type":23,"name":"OFPAT_SET_NW_TTL"}',
'{"type":25,"name":"OFPAT_SET_FIELD"}']
genericInstructions = [
'{"type":1,"name":"OFPIT_GOTO_TABLE"}',
'{"type":3,"name":"OFPIT_WRITE_ACTIONS"}',
'{"type":4,"name":"OFPIT_APPLY_ACTIONS"}',
'{"type":5,"name":"OFPIT_CLEAR_ACTIONS"}',
'{"type":6,"name":"OFPIT_METER"}']
genericProps = ','.join(
['{ "type":0, "name":"OFPTFPT_INSTRUCTIONS", "instruction_ids": [ '+','.join(genericInstructions)+' ] }',
'{ "type":1, "name":"OFPTFPT_INSTRUCTIONS_MISS", "instruction_ids": [ '+','.join(genericInstructions)+' ] }',
'{ "type":4, "name":"OFPTFPT_WRITE_ACTIONS", "action_ids": [ '+','.join(genericActions)+' ] }',
'{ "type":5, "name":"OFPTFPT_WRITE_ACTIONS_MISS", "action_ids": [ '+','.join(genericActions)+' ] }',
'{ "type":6, "name":"OFPTFPT_APPLY_ACTIONS", "action_ids": [ '+','.join(genericActions)+' ] }',
'{ "type":7, "name":"OFPTFPT_APPLY_ACTIONS_MISS", "action_ids": [ '+','.join(genericActions)+' ] }',
'{ "type":12, "name":"OFPTFPT_WRITE_SETFIELD", "oxm_ids": [ '+','.join(genericSetfields)+' ] }',
'{ "type":13, "name":"OFPTFPT_WRITE_SETFIELD_MISS", "oxm_ids": [ '+','.join(genericSetfields)+' ] }',
'{ "type":14, "name":"OFPTFPT_APPLY_SETFIELD", "oxm_ids": [ '+','.join(genericSetfields)+' ] }',
'{ "type":15, "name":"OFPTFPT_APPLY_SETFIELD_MISS", "oxm_ids": [ '+','.join(genericSetfields)+' ] }'])
# Remove GOTO from last table
prev_tables.add(table)
remaining_tables = set(TABLE_SIZE.keys()).difference(prev_tables)
lastTable = (len(remaining_tables) == 0)
if lastTable:
genericProps = ''.join(genericProps.rsplit('{"type":1,"name":"OFPIT_GOTO_TABLE"}, ', 1))
JSON += genericProps+','
JSON += '{ "type":2, "name": "OFPTFPT_NEXT_TABLES", "table_ids": [ '+','.join(remaining_tables)+' ] },'
JSON += '{ "type":3, "name": "OFPTFPT_NEXT_TABLES_MISS", "table_ids": [ '+','.join(remaining_tables)+' ] }'
JSON += ']}'
if not lastTable:
JSON += ','
# Wrap things up and print ...
JSON += ']'
# Pretty-print the condensed JSON string, for easier diffs
jsonobj = json.loads(JSON)
json.dump(jsonobj, sys.stdout, sort_keys=True, indent=4, separators=(',', ': '))
| |
import random
import time
from twisted.test import proto_helpers
from twisted.trial import unittest
from coapserver import CoAPServer
from coapthon import defines
from coapthon.messages.message import Message
from coapthon.messages.option import Option
from coapthon.messages.request import Request
from coapthon.messages.response import Response
from coapthon.serializer import Serializer
__author__ = 'Giacomo Tanganelli'
__version__ = "2.0"
class Tests(unittest.TestCase):
def setUp(self):
self.proto = CoAPServer("127.0.0.1", 5683)
self.tr = proto_helpers.FakeDatagramTransport()
self.proto.makeConnection(self.tr)
self.current_mid = random.randint(1, 1000)
def _test(self, message, expected):
serializer = Serializer()
datagram = serializer.serialize(message)
self.proto.datagramReceived(datagram, ("127.0.0.1", 5600))
datagram, source = self.tr.written[-1]
host, port = source
message = serializer.deserialize(datagram, host, port)
self.assertEqual(message.type, expected.type)
self.assertEqual(message.mid, expected.mid)
self.assertEqual(message.code, expected.code)
self.assertEqual(message.source, source)
self.assertEqual(message.token, expected.token)
self.assertEqual(message.payload, expected.payload)
self.assertEqual(message.options, expected.options)
self.tr.written = []
def _test_modular(self, lst):
serializer = Serializer()
for t in lst:
message, expected = t
send_ack = False
if message is not None:
datagram = serializer.serialize(message)
self.proto.datagramReceived(datagram, ("127.0.0.1", 5600))
else:
send_ack = True
datagram, source = self.tr.written.pop(0)
host, port = source
message = serializer.deserialize(datagram, host, port)
self.assertEqual(message.type, expected.type)
if not send_ack:
self.assertEqual(message.mid, expected.mid)
self.assertEqual(message.code, expected.code)
self.assertEqual(message.source, source)
self.assertEqual(message.token, expected.token)
self.assertEqual(message.payload, expected.payload)
self.assertEqual(message.options, expected.options)
if send_ack:
message = Message.new_ack(message)
datagram = serializer.serialize(message)
self.proto.datagramReceived(datagram, ("127.0.0.1", 5600))
self.tr.written = []
def _test_separate(self, message, notification):
serializer = Serializer()
datagram = serializer.serialize(message)
self.proto.datagramReceived(datagram, ("127.0.0.1", 5600))
datagram, source = self.tr.written[0]
host, port = source
message = serializer.deserialize(datagram, host, port)
self.assertEqual(message.type, defines.inv_types["ACK"])
self.assertEqual(message.code, None)
self.assertEqual(message.mid, self.current_mid + 4)
self.assertEqual(message.source, source)
datagram, source = self.tr.written[1]
host, port = source
message = serializer.deserialize(datagram, host, port)
self.assertEqual(message.type, notification.type)
self.assertEqual(message.code, notification.code)
self.assertEqual(message.source, source)
self.assertEqual(message.token, notification.token)
self.assertEqual(message.payload, notification.payload)
self.assertEqual(message.options, notification.options)
self.tr.written = []
message = Message.new_ack(message)
datagram = serializer.serialize(message)
self.proto.datagramReceived(datagram, ("127.0.0.1", 5600))
self.tr.written = []
def tearDown(self):
self.proto.stopProtocol()
del self.proto
del self.tr
def test_get_storage(self):
args = ("/storage",)
kwargs = {}
path = args[0]
req = Request()
for key in kwargs:
o = Option()
o.number = defines.inv_options[key]
o.value = kwargs[key]
req.add_option(o)
req.code = defines.inv_codes['GET']
req.uri_path = path
req.type = defines.inv_types["CON"]
req._mid = self.current_mid
expected = Response()
expected.type = defines.inv_types["ACK"]
expected._mid = self.current_mid
expected.code = defines.responses["CONTENT"]
expected.token = None
expected.payload = "Storage Resource for PUT, POST and DELETE"
self._test(req, expected)
def test_get_not_found(self):
args = ("/not_found",)
kwargs = {}
path = args[0]
req = Request()
for key in kwargs:
o = Option()
o.number = defines.inv_options[key]
o.value = kwargs[key]
req.add_option(o)
req.code = defines.inv_codes['GET']
req.uri_path = path
req.type = defines.inv_types["CON"]
req._mid = self.current_mid + 1
expected = Response()
expected.type = defines.inv_types["NON"]
expected._mid = self.current_mid + 1
expected.code = defines.responses["NOT_FOUND"]
expected.token = None
expected.payload = None
self._test(req, expected)
def test_post_and_get_storage(self):
args = ("/storage/data1",)
kwargs = {}
path = args[0]
req = Request()
for key in kwargs:
o = Option()
o.number = defines.inv_options[key]
o.value = kwargs[key]
req.add_option(o)
req.code = defines.inv_codes['POST']
req.uri_path = path
req.type = defines.inv_types["CON"]
req._mid = self.current_mid + 2
req.payload = "Created"
expected = Response()
expected.type = defines.inv_types["ACK"]
expected._mid = self.current_mid + 2
expected.code = defines.responses["CREATED"]
expected.token = None
expected.payload = None
option = Option()
option.number = defines.inv_options["Location-Path"]
option.value = "storage/data1"
expected.add_option(option)
self._test(req, expected)
req = Request()
for key in kwargs:
o = Option()
o.number = defines.inv_options[key]
o.value = kwargs[key]
req.add_option(o)
req.code = defines.inv_codes['GET']
req.uri_path = path
req.type = defines.inv_types["CON"]
req._mid = self.current_mid + 3
expected = Response()
expected.type = defines.inv_types["ACK"]
expected._mid = self.current_mid + 3
expected.code = defines.responses["CONTENT"]
expected.token = None
expected.payload = "Created"
def test_long(self):
args = ("/long",)
kwargs = {}
path = args[0]
req = Request()
for key in kwargs:
o = Option()
o.number = defines.inv_options[key]
o.value = kwargs[key]
req.add_option(o)
req.code = defines.inv_codes['GET']
req.uri_path = path
req.type = defines.inv_types["CON"]
req._mid = self.current_mid
expected = Response()
expected.type = defines.inv_types["ACK"]
expected._mid = self.current_mid
expected.code = None
expected.token = None
expected.payload = None
expected2 = Response()
expected2.type = defines.inv_types["CON"]
expected2.code = defines.responses["CONTENT"]
expected2.token = None
expected2.payload = "Long Time"
self._test_modular([(req, expected), (None, expected2)])
def test_big(self):
args = ("/big",)
path = args[0]
req = Request()
req.code = defines.inv_codes['GET']
req.uri_path = path
req.type = defines.inv_types["CON"]
req._mid = self.current_mid
expected = Response()
expected.type = defines.inv_types["ACK"]
expected._mid = self.current_mid
expected.code = defines.responses["CONTENT"]
expected.token = None
expected.payload = "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Cras sollicitudin fermentum ornare." \
" Cras accumsan tellus quis dui lacinia eleifend. Proin ultrices rutrum orci vitae luctus. " \
"Nullam malesuada pretium elit, at aliquam odio vehicula in. Etiam nec maximus elit. Etiam " \
"at erat ac ex ornare feugiat. Curabitur sed malesuada orci, id aliquet nunc. Phasellus nec " \
"leo luctus, blandit lorem sit amet, interdum metus. Duis efficitur volutpat magna, ac " \
"ultricies nibh aliquet sit amet. Etiam tempor egestas augue in hendrerit. Nunc eget augue " \
"ultricies, dignissim lacus et, vulputate dolor. Nulla eros odio, fringilla vel massa ut, " \
"facilisis cursus quam. Fusce faucibus lobortis congue. Fusce consectetur porta neque, id " \
"sollicitudin velit maximus eu. Sed pharetra leo quam, vel finibus turpis cursus ac. Aenean " \
"ac nisi massa. Cras commodo arcu nec ante tristique ullamcorper. Quisque eu hendrerit urna. " \
"Cras fringilla eros ut nunc maximus, non porta nisl mollis. Aliquam in rutrum massa. " \
"Praesent tristique turpis dui, at ultri"
option = Option()
option.number = defines.inv_options["Block2"]
option.value = 14
expected.add_option(option)
req2 = Request()
req2.code = defines.inv_codes['GET']
req2.uri_path = path
req2.type = defines.inv_types["CON"]
req2._mid = self.current_mid + 1
option = Option()
option.number = defines.inv_options["Block2"]
option.value = 22
req2.add_option(option)
expected2 = Response()
expected2.type = defines.inv_types["ACK"]
expected2.code = defines.responses["CONTENT"]
expected2._mid = self.current_mid + 1
expected2.token = None
expected2.payload = "cies lorem fermentum at. Vivamus sit amet ornare neque, a imperdiet nisl. Quisque a " \
"iaculis libero, id tempus lacus. Aenean convallis est non justo consectetur, a hendrerit " \
"enim consequat. In accumsan ante a egestas luctus. Etiam quis neque nec eros vestibulum " \
"faucibus. Nunc viverra ipsum lectus, vel scelerisque dui dictum a. Ut orci enim, ultrices " \
"a ultrices nec, pharetra in quam. Donec accumsan sit amet eros eget fermentum.Vivamus ut " \
"odio ac odio malesuada accumsan. Aenean vehicula diam at tempus ornare. Phasellus dictum " \
"mauris a mi consequat, vitae mattis nulla fringilla. Ut laoreet tellus in nisl efficitur, " \
"a luctus justo tempus. Fusce finibus libero eget velit finibus iaculis. Morbi rhoncus " \
"purus vel vestibulum ullamcorper. Sed ac metus in urna fermentum feugiat. Nulla nunc " \
"diam, sodales aliquam mi id, varius porta nisl. Praesent vel nibh ac turpis rutrum " \
"laoreet at non odio. Phasellus ut posuere mi. Suspendisse malesuada velit nec mauris " \
"convallis porta. Vivamus sed ultrices sapien, at cras amet."
option = Option()
option.number = defines.inv_options["Block2"]
option.value = 22
expected2.add_option(option)
self._test_modular([(req, expected), (req2, expected2)])
def test_get_separate(self):
args = ("/separate",)
kwargs = {}
path = args[0]
req = Request()
for key in kwargs:
o = Option()
o.number = defines.inv_options[key]
o.value = kwargs[key]
req.add_option(o)
req.code = defines.inv_codes['GET']
req.uri_path = path
req.type = defines.inv_types["CON"]
req._mid = self.current_mid + 4
expected = Response()
expected.type = defines.inv_types["CON"]
expected.code = defines.responses["CONTENT"]
expected.token = None
expected.payload = "Separate"
self._test_separate(req, expected)
# def _test_notification(self, lst):
# serializer = Serializer()
# for t in lst:
# message, expected = t
# send_ack = False
# if message is not None:
# datagram = serializer.serialize(message)
# if message.source is not None:
# host, port = message.source
# else:
# host, port = ("127.0.0.1", 5600)
#
# self.proto.datagramReceived(datagram, (host, port))
# else:
# send_ack = True
# while True:
# try:
# datagram, source = self.tr.written.pop(0)
# break
# except IndexError:
# continue
# host, port = source
# message = serializer.deserialize(datagram, host, port)
# self.assertEqual(message.type, expected.type)
# if not send_ack:
# self.assertEqual(message.mid, expected.mid)
# self.assertEqual(message.code, expected.code)
# self.assertEqual(message.source, source)
# self.assertEqual(message.token, expected.token)
# self.assertEqual(message.payload, expected.payload)
# self.assertEqual(message.options, expected.options)
# if send_ack:
# message = Message.new_ack(message)
# datagram = serializer.serialize(message)
# self.proto.datagramReceived(datagram, ("127.0.0.1", 5600))
#
# self.tr.written = []
#
# message = Message.new_ack(message)
# datagram = serializer.serialize(message)
# self.proto.datagramReceived(datagram, ("127.0.0.1", 5600))
#
# self.tr.written = []
#
# def test_observing(self):
# args = ("/basic",)
# path = args[0]
#
# req = Request()
# req.source = ("127.0.0.1", 5600)
# req.code = defines.inv_codes['GET']
# req.uri_path = path
# req.type = defines.inv_types["CON"]
# req.mid = self.current_mid + 5
# o = Option()
# o.number = defines.inv_options["Observe"]
# o.value = 0
# req.add_option(o)
#
# expected = Response()
# expected.type = defines.inv_types["ACK"]
# expected.mid = self.current_mid + 5
# expected.code = defines.responses["CONTENT"]
# expected.token = None
# expected.payload = "Basic Resource"
# option = Option()
# option.number = defines.inv_options["Observe"]
# option.value = 1
# expected.add_option(option)
#
# req_put = Request()
# req_put.source = ("127.0.0.1", 5601)
# req_put.code = defines.inv_codes['PUT']
# req_put.uri_path = path
# req_put.type = defines.inv_types["CON"]
# req_put.mid = self.current_mid + 6
# req_put.payload = "Edited"
#
# expected_put = Response()
# expected_put.type = defines.inv_types["ACK"]
# expected_put.mid = self.current_mid + 6
# expected_put.code = defines.responses["CHANGED"]
# expected_put.token = None
# expected_put.payload = None
#
# notification = Response()
# notification.type = defines.inv_types["CON"]
# notification.code = defines.responses["CONTENT"]
# notification.token = None
# notification.payload = "Edited"
# option = Option()
# option.number = defines.inv_options["Observe"]
# option.value = 2
# notification.add_option(option)
#
# self._test_notification([(req, expected), (req_put, expected_put), (None, notification)])
#
| |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
r"""Eval preprocessing functions."""
import tensorflow as tf
CROP_PADDING = 32
def distorted_bounding_box_crop(image_bytes,
bbox,
min_object_covered=0.1,
aspect_ratio_range=(3. / 4, 4. / 3.),
area_range=(0.05, 1.0),
max_attempts=100):
"""Generates cropped_image using one of the bboxes randomly distorted."""
shape = tf.image.extract_jpeg_shape(image_bytes)
bbox_begin, bbox_size, _ = tf.image.sample_distorted_bounding_box(
shape,
bounding_boxes=bbox,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
max_attempts=max_attempts,
use_image_if_no_bounding_boxes=True)
# Crop the image to the specified bounding box.
offset_y, offset_x, _ = tf.unstack(bbox_begin)
target_height, target_width, _ = tf.unstack(bbox_size)
crop_window = tf.stack([offset_y, offset_x, target_height, target_width])
image = tf.image.decode_and_crop_jpeg(image_bytes, crop_window, channels=3)
return image
def _at_least_x_are_equal(a, b, x):
"""At least x of a and b Tensors are equal."""
match = tf.equal(a, b)
match = tf.cast(match, tf.int32)
return tf.greater_equal(tf.reduce_sum(match), x)
def _decode_and_center_crop(image_bytes, image_size):
"""Crops to center of image with padding then scales image_size."""
shape = tf.image.extract_jpeg_shape(image_bytes)
image_height = shape[0]
image_width = shape[1]
padded_center_crop_size = tf.cast(
((image_size / (image_size + CROP_PADDING)) *
tf.cast(tf.minimum(image_height, image_width), tf.float32)), tf.int32)
offset_height = ((image_height - padded_center_crop_size) + 1) // 2
offset_width = ((image_width - padded_center_crop_size) + 1) // 2
crop_window = tf.stack([
offset_height, offset_width, padded_center_crop_size,
padded_center_crop_size
])
image = tf.image.decode_and_crop_jpeg(image_bytes, crop_window, channels=3)
image = tf.image.resize([image], [image_size, image_size],
method='bicubic')[0]
return image
def _decode_and_random_crop(image_bytes, image_size):
"""Make a random crop of image_size."""
bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4])
image = distorted_bounding_box_crop(
image_bytes,
bbox,
min_object_covered=0.1,
aspect_ratio_range=(3. / 4, 4. / 3.),
area_range=(0.08, 1.0),
max_attempts=10)
original_shape = tf.image.extract_jpeg_shape(image_bytes)
bad = _at_least_x_are_equal(original_shape, tf.shape(image), 3)
image = tf.cond(bad, lambda: _decode_and_center_crop(image_bytes, image_size),
lambda: tf.image.resize([image], [image_size, image_size], # pylint: disable=g-long-lambda
method='bicubic')[0]) # pylint: disable=g-long-lambda
return image
def crop_image(x, offset_height, offset_width, crop_height, crop_width):
"""Crops the given image using the provided offsets and sizes."""
original_shape = tf.shape(x)
rank_assertion = tf.Assert(
tf.equal(tf.rank(x), 3), ['Rank of image must be equal to 3.'])
with tf.control_dependencies([rank_assertion]):
cropped_shape = tf.stack([crop_height, crop_width, original_shape[2]])
size_assertion = tf.Assert(
tf.logical_and(
tf.greater_equal(original_shape[0], crop_height),
tf.greater_equal(original_shape[1], crop_width)),
['Crop size greater than the image size.'])
offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0]))
with tf.control_dependencies([size_assertion]):
x = tf.slice(x, offsets, cropped_shape)
x = tf.reshape(x, cropped_shape)
return x
def rescale_image(x, size):
"""Rescale the image by scaling the smaller spatial dimension to `size`."""
shape = tf.cast(tf.shape(x), tf.float32)
w_greater = tf.greater(shape[0], shape[1])
shape = tf.cond(w_greater,
lambda: tf.cast([shape[0] / shape[1] * size, size], tf.int32),
lambda: tf.cast([size, shape[1] / shape[0] * size], tf.int32))
return tf.image.resize_bicubic([x], shape)[0]
def center_crop(image, size):
"""Crops to center of image with specified `size`."""
image_height = tf.shape(image)[0]
image_width = tf.shape(image)[1]
offset_height = ((image_height - size) + 1) / 2
offset_width = ((image_width - size) + 1) / 2
image = crop_image(image, offset_height, offset_width, size, size)
return image
def rescale_input(x):
"""Rescales image input to be in range [0,1]."""
current_min = tf.reduce_min(x)
current_max = tf.reduce_max(x)
# we add an epsilon value to prevent division by zero
epsilon = 1e-5
rescaled_x = tf.div(
tf.subtract(x, current_min),
tf.maximum(tf.subtract(current_max, current_min), epsilon))
return rescaled_x
def preprocess_for_eval(image_bytes, image_size):
"""Preprocesses the given image for evaluation.
Args:
image_bytes: `Tensor` representing an image binary of arbitrary size.
image_size: image size.
Returns:
A preprocessed image `Tensor`.
"""
image = _decode_and_center_crop(image_bytes, image_size)
image = tf.reshape(image, [image_size, image_size, 3])
image = tf.image.convert_image_dtype(image, tf.float32)
return image
def preprocess_for_train(image_bytes, image_size):
"""Preprocesses the given image for training.
Args:
image_bytes: `Tensor` representing an image binary of arbitrary size.
image_size: image size.
Returns:
A preprocessed image `Tensor`.
"""
image = _decode_and_random_crop(image_bytes, image_size)
image = tf.image.convert_image_dtype(image, tf.float32)
return image
def preprocess_image(image_bytes, image_size, is_training=False):
"""Preprocesses the given image."""
if is_training:
return preprocess_for_train(image_bytes, image_size)
else:
return preprocess_for_eval(image_bytes, image_size)
def preprocess_for_eval_non_bytes(image, image_size):
"""Preprocesses the given image."""
image = rescale_image(image, image_size + 32)
image = center_crop(image, image_size)
image = tf.reshape(image, [image_size, image_size, 3])
return image
| |
import pytest
from unittest.mock import Mock
from collections import OrderedDict
from nbformat.v4 import new_code_cell
from .. import translators
from ..exceptions import PapermillException
from ..models import Parameter
@pytest.mark.parametrize(
"test_input,expected",
[
("foo", '"foo"'),
('{"foo": "bar"}', '"{\\"foo\\": \\"bar\\"}"'),
({"foo": "bar"}, '{"foo": "bar"}'),
({"foo": '"bar"'}, '{"foo": "\\"bar\\""}'),
({"foo": ["bar"]}, '{"foo": ["bar"]}'),
({"foo": {"bar": "baz"}}, '{"foo": {"bar": "baz"}}'),
({"foo": {"bar": '"baz"'}}, '{"foo": {"bar": "\\"baz\\""}}'),
(["foo"], '["foo"]'),
(["foo", '"bar"'], '["foo", "\\"bar\\""]'),
([{"foo": "bar"}], '[{"foo": "bar"}]'),
([{"foo": '"bar"'}], '[{"foo": "\\"bar\\""}]'),
(12345, '12345'),
(-54321, '-54321'),
(1.2345, '1.2345'),
(-5432.1, '-5432.1'),
(float('nan'), "float('nan')"),
(float('-inf'), "float('-inf')"),
(float('inf'), "float('inf')"),
(True, 'True'),
(False, 'False'),
(None, 'None'),
],
)
def test_translate_type_python(test_input, expected):
assert translators.PythonTranslator.translate(test_input) == expected
@pytest.mark.parametrize(
"parameters,expected",
[
({"foo": "bar"}, '# Parameters\nfoo = "bar"\n'),
({"foo": True}, '# Parameters\nfoo = True\n'),
({"foo": 5}, '# Parameters\nfoo = 5\n'),
({"foo": 1.1}, '# Parameters\nfoo = 1.1\n'),
({"foo": ['bar', 'baz']}, '# Parameters\nfoo = ["bar", "baz"]\n'),
({"foo": {'bar': 'baz'}}, '# Parameters\nfoo = {"bar": "baz"}\n'),
(
OrderedDict([['foo', 'bar'], ['baz', ['buz']]]),
'# Parameters\nfoo = "bar"\nbaz = ["buz"]\n',
),
],
)
def test_translate_codify_python(parameters, expected):
assert translators.PythonTranslator.codify(parameters) == expected
@pytest.mark.parametrize(
"test_input,expected", [("", '#'), ("foo", '# foo'), ("['best effort']", "# ['best effort']")]
)
def test_translate_comment_python(test_input, expected):
assert translators.PythonTranslator.comment(test_input) == expected
@pytest.mark.parametrize(
"test_input,expected",
[
("a = 2", [Parameter("a", "None", "2", "")]),
("a: int = 2", [Parameter("a", "int", "2", "")]),
("a = 2 # type:int", [Parameter("a", "int", "2", "")]),
("a = False # Nice variable a", [Parameter("a", "None", "False", "Nice variable a")]),
(
"a: float = 2.258 # type: int Nice variable a",
[Parameter("a", "float", "2.258", "Nice variable a")],
), # noqa
(
"a = 'this is a string' # type: int Nice variable a",
[Parameter("a", "int", "'this is a string'", "Nice variable a")],
),
(
"a: List[str] = ['this', 'is', 'a', 'string', 'list'] # Nice variable a",
[
Parameter(
"a", "List[str]", "['this', 'is', 'a', 'string', 'list']", "Nice variable a"
)
],
),
(
"a: List[str] = [\n 'this', # First\n 'is',\n 'a',\n 'string',\n 'list' # Last\n] # Nice variable a", # noqa
[Parameter("a", "List[str]", "['this','is','a','string','list']", "Nice variable a")],
),
(
"a: List[str] = [\n 'this',\n 'is',\n 'a',\n 'string',\n 'list'\n] # Nice variable a", # noqa
[Parameter("a", "List[str]", "['this','is','a','string','list']", "Nice variable a")],
),
(
"""a: List[str] = [
'this', # First
'is',
'a',
'string',
'list' # Last
] # Nice variable a
b: float = -2.3432 # My b variable
""",
[
Parameter("a", "List[str]", "['this','is','a','string','list']", "Nice variable a"),
Parameter("b", "float", "-2.3432", "My b variable"),
],
),
],
)
def test_inspect_python(test_input, expected):
cell = new_code_cell(source=test_input)
assert translators.PythonTranslator.inspect(cell) == expected
@pytest.mark.parametrize(
"test_input,expected",
[
("foo", '"foo"'),
('{"foo": "bar"}', '"{\\"foo\\": \\"bar\\"}"'),
({"foo": "bar"}, 'list("foo" = "bar")'),
({"foo": '"bar"'}, 'list("foo" = "\\"bar\\"")'),
({"foo": ["bar"]}, 'list("foo" = list("bar"))'),
({"foo": {"bar": "baz"}}, 'list("foo" = list("bar" = "baz"))'),
({"foo": {"bar": '"baz"'}}, 'list("foo" = list("bar" = "\\"baz\\""))'),
(["foo"], 'list("foo")'),
(["foo", '"bar"'], 'list("foo", "\\"bar\\"")'),
([{"foo": "bar"}], 'list(list("foo" = "bar"))'),
([{"foo": '"bar"'}], 'list(list("foo" = "\\"bar\\""))'),
(12345, '12345'),
(-54321, '-54321'),
(1.2345, '1.2345'),
(-5432.1, '-5432.1'),
(True, 'TRUE'),
(False, 'FALSE'),
(None, 'NULL'),
],
)
def test_translate_type_r(test_input, expected):
assert translators.RTranslator.translate(test_input) == expected
@pytest.mark.parametrize(
"test_input,expected", [("", '#'), ("foo", '# foo'), ("['best effort']", "# ['best effort']")]
)
def test_translate_comment_r(test_input, expected):
assert translators.RTranslator.comment(test_input) == expected
@pytest.mark.parametrize(
"parameters,expected",
[
({"foo": "bar"}, '# Parameters\nfoo = "bar"\n'),
({"foo": True}, '# Parameters\nfoo = TRUE\n'),
({"foo": 5}, '# Parameters\nfoo = 5\n'),
({"foo": 1.1}, '# Parameters\nfoo = 1.1\n'),
({"foo": ['bar', 'baz']}, '# Parameters\nfoo = list("bar", "baz")\n'),
({"foo": {'bar': 'baz'}}, '# Parameters\nfoo = list("bar" = "baz")\n'),
(
OrderedDict([['foo', 'bar'], ['baz', ['buz']]]),
'# Parameters\nfoo = "bar"\nbaz = list("buz")\n',
),
# Underscores remove
({"___foo": 5}, '# Parameters\nfoo = 5\n'),
],
)
def test_translate_codify_r(parameters, expected):
assert translators.RTranslator.codify(parameters) == expected
@pytest.mark.parametrize(
"test_input,expected",
[
("foo", '"foo"'),
('{"foo": "bar"}', '"{\\"foo\\": \\"bar\\"}"'),
({"foo": "bar"}, 'Map("foo" -> "bar")'),
({"foo": '"bar"'}, 'Map("foo" -> "\\"bar\\"")'),
({"foo": ["bar"]}, 'Map("foo" -> Seq("bar"))'),
({"foo": {"bar": "baz"}}, 'Map("foo" -> Map("bar" -> "baz"))'),
({"foo": {"bar": '"baz"'}}, 'Map("foo" -> Map("bar" -> "\\"baz\\""))'),
(["foo"], 'Seq("foo")'),
(["foo", '"bar"'], 'Seq("foo", "\\"bar\\"")'),
([{"foo": "bar"}], 'Seq(Map("foo" -> "bar"))'),
([{"foo": '"bar"'}], 'Seq(Map("foo" -> "\\"bar\\""))'),
(12345, '12345'),
(-54321, '-54321'),
(1.2345, '1.2345'),
(-5432.1, '-5432.1'),
(2147483648, '2147483648L'),
(-2147483649, '-2147483649L'),
(True, 'true'),
(False, 'false'),
(None, 'None'),
],
)
def test_translate_type_scala(test_input, expected):
assert translators.ScalaTranslator.translate(test_input) == expected
@pytest.mark.parametrize(
"test_input,expected",
[("", '//'), ("foo", '// foo'), ("['best effort']", "// ['best effort']")],
)
def test_translate_comment_scala(test_input, expected):
assert translators.ScalaTranslator.comment(test_input) == expected
@pytest.mark.parametrize(
"input_name,input_value,expected",
[
("foo", '""', 'val foo = ""'),
("foo", '"bar"', 'val foo = "bar"'),
("foo", 'Map("foo" -> "bar")', 'val foo = Map("foo" -> "bar")'),
],
)
def test_translate_assign_scala(input_name, input_value, expected):
assert translators.ScalaTranslator.assign(input_name, input_value) == expected
@pytest.mark.parametrize(
"parameters,expected",
[
({"foo": "bar"}, '// Parameters\nval foo = "bar"\n'),
({"foo": True}, '// Parameters\nval foo = true\n'),
({"foo": 5}, '// Parameters\nval foo = 5\n'),
({"foo": 1.1}, '// Parameters\nval foo = 1.1\n'),
({"foo": ['bar', 'baz']}, '// Parameters\nval foo = Seq("bar", "baz")\n'),
({"foo": {'bar': 'baz'}}, '// Parameters\nval foo = Map("bar" -> "baz")\n'),
(
OrderedDict([['foo', 'bar'], ['baz', ['buz']]]),
'// Parameters\nval foo = "bar"\nval baz = Seq("buz")\n',
),
],
)
def test_translate_codify_scala(parameters, expected):
assert translators.ScalaTranslator.codify(parameters) == expected
# C# section
@pytest.mark.parametrize(
"test_input,expected",
[
("foo", '"foo"'),
('{"foo": "bar"}', '"{\\"foo\\": \\"bar\\"}"'),
({"foo": "bar"}, 'new Dictionary<string,Object>{ { "foo" , "bar" } }'),
({"foo": '"bar"'}, 'new Dictionary<string,Object>{ { "foo" , "\\"bar\\"" } }'),
(["foo"], 'new [] { "foo" }'),
(["foo", '"bar"'], 'new [] { "foo", "\\"bar\\"" }'),
([{"foo": "bar"}], 'new [] { new Dictionary<string,Object>{ { "foo" , "bar" } } }'),
(12345, '12345'),
(-54321, '-54321'),
(1.2345, '1.2345'),
(-5432.1, '-5432.1'),
(2147483648, '2147483648L'),
(-2147483649, '-2147483649L'),
(True, 'true'),
(False, 'false'),
],
)
def test_translate_type_csharp(test_input, expected):
assert translators.CSharpTranslator.translate(test_input) == expected
@pytest.mark.parametrize(
"test_input,expected",
[("", '//'), ("foo", '// foo'), ("['best effort']", "// ['best effort']")],
)
def test_translate_comment_csharp(test_input, expected):
assert translators.CSharpTranslator.comment(test_input) == expected
@pytest.mark.parametrize(
"input_name,input_value,expected",
[("foo", '""', 'var foo = "";'), ("foo", '"bar"', 'var foo = "bar";')],
)
def test_translate_assign_csharp(input_name, input_value, expected):
assert translators.CSharpTranslator.assign(input_name, input_value) == expected
@pytest.mark.parametrize(
"parameters,expected",
[
({"foo": "bar"}, '// Parameters\nvar foo = "bar";\n'),
({"foo": True}, '// Parameters\nvar foo = true;\n'),
({"foo": 5}, '// Parameters\nvar foo = 5;\n'),
({"foo": 1.1}, '// Parameters\nvar foo = 1.1;\n'),
({"foo": ['bar', 'baz']}, '// Parameters\nvar foo = new [] { "bar", "baz" };\n'),
(
{"foo": {'bar': 'baz'}},
'// Parameters\nvar foo = new Dictionary<string,Object>{ { "bar" , "baz" } };\n',
),
],
)
def test_translate_codify_csharp(parameters, expected):
assert translators.CSharpTranslator.codify(parameters) == expected
# Powershell section
@pytest.mark.parametrize(
"test_input,expected",
[
("foo", '"foo"'),
('{"foo": "bar"}', '"{`"foo`": `"bar`"}"'),
({"foo": "bar"}, '@{"foo" = "bar"}'),
({"foo": '"bar"'}, '@{"foo" = "`"bar`""}'),
({"foo": ["bar"]}, '@{"foo" = @("bar")}'),
({"foo": {"bar": "baz"}}, '@{"foo" = @{"bar" = "baz"}}'),
({"foo": {"bar": '"baz"'}}, '@{"foo" = @{"bar" = "`"baz`""}}'),
(["foo"], '@("foo")'),
(["foo", '"bar"'], '@("foo", "`"bar`"")'),
([{"foo": "bar"}], '@(@{"foo" = "bar"})'),
([{"foo": '"bar"'}], '@(@{"foo" = "`"bar`""})'),
(12345, '12345'),
(-54321, '-54321'),
(1.2345, '1.2345'),
(-5432.1, '-5432.1'),
(float('nan'), "[double]::NaN"),
(float('-inf'), "[double]::NegativeInfinity"),
(float('inf'), "[double]::PositiveInfinity"),
(True, '$True'),
(False, '$False'),
(None, '$Null'),
],
)
def test_translate_type_powershell(test_input, expected):
assert translators.PowershellTranslator.translate(test_input) == expected
@pytest.mark.parametrize(
"parameters,expected",
[
({"foo": "bar"}, '# Parameters\n$foo = "bar"\n'),
({"foo": True}, '# Parameters\n$foo = $True\n'),
({"foo": 5}, '# Parameters\n$foo = 5\n'),
({"foo": 1.1}, '# Parameters\n$foo = 1.1\n'),
({"foo": ['bar', 'baz']}, '# Parameters\n$foo = @("bar", "baz")\n'),
({"foo": {'bar': 'baz'}}, '# Parameters\n$foo = @{"bar" = "baz"}\n'),
(
OrderedDict([['foo', 'bar'], ['baz', ['buz']]]),
'# Parameters\n$foo = "bar"\n$baz = @("buz")\n',
),
],
)
def test_translate_codify_powershell(parameters, expected):
assert translators.PowershellTranslator.codify(parameters) == expected
@pytest.mark.parametrize(
"input_name,input_value,expected",
[("foo", '""', '$foo = ""'), ("foo", '"bar"', '$foo = "bar"')],
)
def test_translate_assign_powershell(input_name, input_value, expected):
assert translators.PowershellTranslator.assign(input_name, input_value) == expected
@pytest.mark.parametrize(
"test_input,expected", [("", '#'), ("foo", '# foo'), ("['best effort']", "# ['best effort']")]
)
def test_translate_comment_powershell(test_input, expected):
assert translators.PowershellTranslator.comment(test_input) == expected
# F# section
@pytest.mark.parametrize(
"test_input,expected",
[
("foo", '"foo"'),
('{"foo": "bar"}', '"{\\"foo\\": \\"bar\\"}"'),
({"foo": "bar"}, '[ ("foo", "bar" :> IComparable) ] |> Map.ofList'),
({"foo": '"bar"'}, '[ ("foo", "\\"bar\\"" :> IComparable) ] |> Map.ofList'),
(["foo"], '[ "foo" ]'),
(["foo", '"bar"'], '[ "foo"; "\\"bar\\"" ]'),
([{"foo": "bar"}], '[ [ ("foo", "bar" :> IComparable) ] |> Map.ofList ]'),
(12345, '12345'),
(-54321, '-54321'),
(1.2345, '1.2345'),
(-5432.1, '-5432.1'),
(2147483648, '2147483648L'),
(-2147483649, '-2147483649L'),
(True, 'true'),
(False, 'false'),
],
)
def test_translate_type_fsharp(test_input, expected):
assert translators.FSharpTranslator.translate(test_input) == expected
@pytest.mark.parametrize(
"test_input,expected",
[("", '(* *)'), ("foo", '(* foo *)'), ("['best effort']", "(* ['best effort'] *)")],
)
def test_translate_comment_fsharp(test_input, expected):
assert translators.FSharpTranslator.comment(test_input) == expected
@pytest.mark.parametrize(
"input_name,input_value,expected",
[("foo", '""', 'let foo = ""'), ("foo", '"bar"', 'let foo = "bar"')],
)
def test_translate_assign_fsharp(input_name, input_value, expected):
assert translators.FSharpTranslator.assign(input_name, input_value) == expected
@pytest.mark.parametrize(
"parameters,expected",
[
({"foo": "bar"}, '(* Parameters *)\nlet foo = "bar"\n'),
({"foo": True}, '(* Parameters *)\nlet foo = true\n'),
({"foo": 5}, '(* Parameters *)\nlet foo = 5\n'),
({"foo": 1.1}, '(* Parameters *)\nlet foo = 1.1\n'),
({"foo": ['bar', 'baz']}, '(* Parameters *)\nlet foo = [ "bar"; "baz" ]\n'),
(
{"foo": {'bar': 'baz'}},
'(* Parameters *)\nlet foo = [ ("bar", "baz" :> IComparable) ] |> Map.ofList\n',
),
],
)
def test_translate_codify_fsharp(parameters, expected):
assert translators.FSharpTranslator.codify(parameters) == expected
@pytest.mark.parametrize(
"test_input,expected",
[
("foo", '"foo"'),
('{"foo": "bar"}', '"{\\"foo\\": \\"bar\\"}"'),
({"foo": "bar"}, 'Dict("foo" => "bar")'),
({"foo": '"bar"'}, 'Dict("foo" => "\\"bar\\"")'),
({"foo": ["bar"]}, 'Dict("foo" => ["bar"])'),
({"foo": {"bar": "baz"}}, 'Dict("foo" => Dict("bar" => "baz"))'),
({"foo": {"bar": '"baz"'}}, 'Dict("foo" => Dict("bar" => "\\"baz\\""))'),
(["foo"], '["foo"]'),
(["foo", '"bar"'], '["foo", "\\"bar\\""]'),
([{"foo": "bar"}], '[Dict("foo" => "bar")]'),
([{"foo": '"bar"'}], '[Dict("foo" => "\\"bar\\"")]'),
(12345, '12345'),
(-54321, '-54321'),
(1.2345, '1.2345'),
(-5432.1, '-5432.1'),
(True, 'true'),
(False, 'false'),
(None, 'nothing'),
],
)
def test_translate_type_julia(test_input, expected):
assert translators.JuliaTranslator.translate(test_input) == expected
@pytest.mark.parametrize(
"parameters,expected",
[
({"foo": "bar"}, '# Parameters\nfoo = "bar"\n'),
({"foo": True}, '# Parameters\nfoo = true\n'),
({"foo": 5}, '# Parameters\nfoo = 5\n'),
({"foo": 1.1}, '# Parameters\nfoo = 1.1\n'),
({"foo": ['bar', 'baz']}, '# Parameters\nfoo = ["bar", "baz"]\n'),
({"foo": {'bar': 'baz'}}, '# Parameters\nfoo = Dict("bar" => "baz")\n'),
(
OrderedDict([['foo', 'bar'], ['baz', ['buz']]]),
'# Parameters\nfoo = "bar"\nbaz = ["buz"]\n',
),
],
)
def test_translate_codify_julia(parameters, expected):
assert translators.JuliaTranslator.codify(parameters) == expected
@pytest.mark.parametrize(
"test_input,expected", [("", '#'), ("foo", '# foo'), ('["best effort"]', '# ["best effort"]')]
)
def test_translate_comment_julia(test_input, expected):
assert translators.JuliaTranslator.comment(test_input) == expected
@pytest.mark.parametrize(
"test_input,expected",
[
("foo", '"foo"'),
('{"foo": "bar"}', '"{""foo"": ""bar""}"'),
({1: "foo"}, 'containers.Map({\'1\'}, {"foo"})'),
({1.0: "foo"}, 'containers.Map({\'1.0\'}, {"foo"})'),
({None: "foo"}, 'containers.Map({\'None\'}, {"foo"})'),
({True: "foo"}, 'containers.Map({\'True\'}, {"foo"})'),
({"foo": "bar"}, 'containers.Map({\'foo\'}, {"bar"})'),
({"foo": '"bar"'}, 'containers.Map({\'foo\'}, {"""bar"""})'),
({"foo": ["bar"]}, 'containers.Map({\'foo\'}, {{"bar"}})'),
(
{"foo": {"bar": "baz"}},
'containers.Map({\'foo\'}, {containers.Map({\'bar\'}, {"baz"})})',
),
(
{"foo": {"bar": '"baz"'}},
'containers.Map({\'foo\'}, {containers.Map({\'bar\'}, {"""baz"""})})',
),
(["foo"], '{"foo"}'),
(["foo", '"bar"'], '{"foo", """bar"""}'),
([{"foo": "bar"}], '{containers.Map({\'foo\'}, {"bar"})}'),
([{"foo": '"bar"'}], '{containers.Map({\'foo\'}, {"""bar"""})}'),
(12345, '12345'),
(-54321, '-54321'),
(1.2345, '1.2345'),
(-5432.1, '-5432.1'),
(True, 'true'),
(False, 'false'),
(None, 'NaN'),
],
)
def test_translate_type_matlab(test_input, expected):
assert translators.MatlabTranslator.translate(test_input) == expected
@pytest.mark.parametrize(
"parameters,expected",
[
({"foo": "bar"}, '% Parameters\nfoo = "bar";\n'),
({"foo": True}, '% Parameters\nfoo = true;\n'),
({"foo": 5}, '% Parameters\nfoo = 5;\n'),
({"foo": 1.1}, '% Parameters\nfoo = 1.1;\n'),
({"foo": ['bar', 'baz']}, '% Parameters\nfoo = {"bar", "baz"};\n'),
({"foo": {'bar': 'baz'}}, '% Parameters\nfoo = containers.Map({\'bar\'}, {"baz"});\n'),
(
OrderedDict([['foo', 'bar'], ['baz', ['buz']]]),
'% Parameters\nfoo = "bar";\nbaz = {"buz"};\n',
),
],
)
def test_translate_codify_matlab(parameters, expected):
assert translators.MatlabTranslator.codify(parameters) == expected
@pytest.mark.parametrize(
"test_input,expected", [("", '%'), ("foo", '% foo'), ("['best effort']", "% ['best effort']")]
)
def test_translate_comment_matlab(test_input, expected):
assert translators.MatlabTranslator.comment(test_input) == expected
def test_find_translator_with_exact_kernel_name():
my_new_kernel_translator = Mock()
my_new_language_translator = Mock()
translators.papermill_translators.register("my_new_kernel", my_new_kernel_translator)
translators.papermill_translators.register("my_new_language", my_new_language_translator)
assert (
translators.papermill_translators.find_translator("my_new_kernel", "my_new_language")
is my_new_kernel_translator
)
def test_find_translator_with_exact_language():
my_new_language_translator = Mock()
translators.papermill_translators.register("my_new_language", my_new_language_translator)
assert (
translators.papermill_translators.find_translator("unregistered_kernel", "my_new_language")
is my_new_language_translator
)
def test_find_translator_with_no_such_kernel_or_language():
with pytest.raises(PapermillException):
translators.papermill_translators.find_translator(
"unregistered_kernel", "unregistered_language"
)
def test_translate_uses_str_representation_of_unknown_types():
class FooClass:
def __str__(self):
return "foo"
obj = FooClass()
assert translators.Translator.translate(obj) == '"foo"'
def test_translator_must_implement_translate_dict():
class MyNewTranslator(translators.Translator):
pass
with pytest.raises(NotImplementedError):
MyNewTranslator.translate_dict({"foo": "bar"})
def test_translator_must_implement_translate_list():
class MyNewTranslator(translators.Translator):
pass
with pytest.raises(NotImplementedError):
MyNewTranslator.translate_list(["foo", "bar"])
def test_translator_must_implement_comment():
class MyNewTranslator(translators.Translator):
pass
with pytest.raises(NotImplementedError):
MyNewTranslator.comment("foo")
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: caktux
# @Date: 2015-02-23 13:42:34
# @Last Modified by: caktux
# @Last Modified time: 2015-04-22 05:43:54
####### SCHEDULERS
# Configure the Schedulers, which decide how to react to incoming changes.
from buildbot.schedulers.basic import AnyBranchScheduler, SingleBranchScheduler
from buildbot.schedulers.forcesched import *
from buildbot.schedulers.timed import Nightly
from buildbot.schedulers.triggerable import Triggerable
from buildbot.changes import filter
distributions = ['trusty', 'utopic', 'vivid']
schedulers = []
self_codebases={
'ethereum-buildbot': {
'repository': 'https://github.com/ethereum/ethereum-buildbot.git',
'branch': 'master',
'revision': None
}
}
dockers_codebases={
'ethereum-dockers': {
'repository': 'https://github.com/ethereum/ethereum-dockers.git',
'branch': 'master',
'revision': None
}
}
cpp_ethereum_codebases={
'cpp-ethereum': {
'repository': 'https://github.com/ethereum/cpp-ethereum.git',
'branch': None,
'revision': None
},
'tests': {
'repository': 'https://github.com/ethereum/tests.git',
'branch': None,
'revision': None
}
}
go_ethereum_codebases={
'go-ethereum': {
'repository': 'https://github.com/ethereum/go-ethereum.git',
'branch': None,
'revision': None
},
'go-build': {
'repository': 'https://github.com/ethereum/go-build.git',
'branch': None,
'revision': None
}
}
ethereumj_codebases={
'ethereumj': {
'repository': 'https://github.com/ethereum/ethereumj.git',
'branch': 'master',
'revision': None
}
}
pyethereum_codebases={
'pyethereum': {
'repository': 'https://github.com/ethereum/pyethereum.git',
'branch': None,
'revision': None
}
}
pyethapp_codebases={
'pyethapp': {
'repository': 'https://github.com/ethereum/pyethapp.git',
'branch': None,
'revision': None
}
}
serpent_codebases={
'serpent': {
'repository': 'https://github.com/ethereum/serpent.git',
'branch': None,
'revision': None
}
}
brew_codebases={
'homebrew-ethereum': {
'repository': 'https://github.com/ethereum/homebrew-ethereum.git',
'branch': 'master',
'revision': None
}
}
ethereumjs_codebases={
'ethereumjs': {
'repository': 'https://github.com/ethereum/ethereum.js.git',
'branch': 'master',
'revision': None
}
}
integration_codebases={
'integration': {
'repository': 'https://github.com/etherex/etherex.git',
'branch': 'master',
'revision': None
}
}
all_cpp_ethereum_codebases=cpp_ethereum_codebases.copy()
all_cpp_ethereum_codebases.update(brew_codebases)
all_go_ethereum_codebases=go_ethereum_codebases.copy()
all_go_ethereum_codebases.update(brew_codebases)
all_ethereumj_codebases=ethereumj_codebases.copy()
all_pyethereum_codebases=pyethereum_codebases.copy()
all_pyethapp_codebases=pyethapp_codebases.copy()
all_serpent_codebases=serpent_codebases.copy()
all_serpent_codebases.update(pyethereum_codebases)
all_brew_cpp_codebases=cpp_ethereum_codebases.copy()
all_brew_cpp_codebases.update(brew_codebases)
all_brew_go_codebases=go_ethereum_codebases.copy()
all_brew_go_codebases.update(brew_codebases)
all_integration_codebases=cpp_ethereum_codebases.copy()
all_integration_codebases.update(ethereumjs_codebases)
all_integration_codebases.update(integration_codebases)
for scheduler in [
SingleBranchScheduler(
name="ethereum-buildbot-git",
change_filter=filter.ChangeFilter(project='ethereum-buildbot', branch='master'),
codebases=self_codebases,
treeStableTimer=60,
builderNames=["buildbot"]),
]: schedulers.append(scheduler)
for branch in ['master', 'develop']:
for scheduler in [
SingleBranchScheduler(
name="cpp-ethereum-%s-git" % branch,
change_filter=filter.ChangeFilter(project='cpp-ethereum', branch=branch),
codebases=all_cpp_ethereum_codebases,
treeStableTimer=60,
builderNames=[
"Linux C++ %s branch" % branch,
"Linux C++ GUI %s branch" % branch,
"Linux C++ %s evmjit" % branch,
"OSX C++ %s branch" % branch,
"OSX C++ GUI %s branch" % branch,
"OSX C++ %s evmjit" % branch,
"Windows C++ %s branch" % branch]),
SingleBranchScheduler(
name="go-ethereum-%s-git" % branch,
change_filter=filter.ChangeFilter(project='go-ethereum', branch=branch),
codebases=all_go_ethereum_codebases,
treeStableTimer=60,
builderNames=[
"Linux Go %s branch" % branch,
"Linux Go GUI %s branch" % branch,
"ARM Go %s branch" % branch,
"OSX Go %s branch" % branch,
"OSX Go GUI %s branch" % branch,
"Windows Go %s branch" % branch,
# "Windows Go GUI %s branch" % branch
]),
SingleBranchScheduler(
name="pyethereum-%s-git" % branch,
change_filter=filter.ChangeFilter(project='pyethereum', branch=branch),
codebases=all_pyethereum_codebases,
treeStableTimer=60,
builderNames=[
"Linux PyEthereum %s" % branch,
"OSX PyEthereum %s" % branch]),
SingleBranchScheduler(
name="serpent-%s-git" % branch,
change_filter=filter.ChangeFilter(project='serpent', branch=branch),
codebases=all_serpent_codebases,
treeStableTimer=60,
builderNames=[
"Linux Serpent %s" % branch,
"OSX Serpent %s" % branch]),
# Brew triggerables
Triggerable(
name="cpp-ethereum-%s-brew" % branch,
builderNames=[
"OSX C++ %s brew" % branch,
"OSX C++ GUI %s brew" % branch],
codebases=all_cpp_ethereum_codebases),
Triggerable(
name="go-ethereum-%s-brew" % branch,
builderNames=[
"OSX Go %s brew" % branch,
"OSX Go GUI %s brew" % branch],
codebases=all_go_ethereum_codebases),
# Extra triggerable checks
Triggerable(
name="cpp-ethereum-%s-check" % branch,
builderNames=["Linux C++ %s check" % branch],
codebases=all_cpp_ethereum_codebases),
Triggerable(
name="cpp-ethereum-%s-osx-check" % branch,
builderNames=["OSX C++ %s check" % branch],
codebases=all_cpp_ethereum_codebases),
# PoC node servers
Triggerable(
name="cpp-ethereum-%s-server" % branch,
builderNames=["Linux C++ %s server" % branch],
codebases=all_cpp_ethereum_codebases)
]: schedulers.append(scheduler)
for architecture in ['i386', 'amd64']:
for distribution in distributions:
for scheduler in [
Triggerable(
name="cpp-ethereum-%s-%s-%s" % (branch, architecture, distribution),
builderNames=["Linux C++ %s deb %s-%s" % (branch, architecture, distribution)]),
Triggerable(
name="go-ethereum-%s-%s-%s" % (branch, architecture, distribution),
builderNames=["Linux Go %s deb %s-%s" % (branch, architecture, distribution)])
]: schedulers.append(scheduler)
for scheduler in [
SingleBranchScheduler(
name="pyethapp-git",
change_filter=filter.ChangeFilter(project='pyethapp', branch='master'),
codebases=all_pyethapp_codebases,
treeStableTimer=60,
builderNames=[
"Linux PyEthApp",
"OSX PyEthApp"]),
SingleBranchScheduler(
name="ethereumj-git",
change_filter=filter.ChangeFilter(project='ethereumj', branch='master'),
codebases=all_ethereumj_codebases,
treeStableTimer=300,
builderNames=["Linux EthereumJ"]),
# Brew
# SingleBranchScheduler(
# name="brew-cpp-git",
# change_filter=filter.ChangeFilter(project='brew', branch='master'),
# codebases=all_brew_cpp_codebases,
# treeStableTimer=300,
# builderNames=["OSX C++ master brew", "OSX C++ develop brew"]),
# SingleBranchScheduler(
# name="brew-go-git",
# change_filter=filter.ChangeFilter(project='brew', branch='master'),
# codebases=all_brew_go_codebases,
# treeStableTimer=300,
# builderNames=["OSX Go master brew", "OSX Go develop brew"]),
# Pull requests
AnyBranchScheduler(
name="cpp-ethereum-develop-pr-git",
change_filter=filter.ChangeFilter(project='cpp-ethereum', category='pull-request'),
codebases=all_cpp_ethereum_codebases,
treeStableTimer=60,
builderNames=[
"Linux C++ pull requests",
"Linux C++ evmjit pull requests",
"OSX C++ pull requests",
"OSX C++ evmjit pull requests",
"Windows C++ pull requests"
]),
AnyBranchScheduler(
name="go-ethereum-develop-pr-git",
change_filter=filter.ChangeFilter(project='go-ethereum', category='pull-request'),
codebases=all_go_ethereum_codebases,
treeStableTimer=60,
builderNames=[
"Linux Go pull requests",
"ARM Go pull requests",
"OSX Go pull requests",
"Windows Go pull requests"
]),
AnyBranchScheduler(
name="pyethereum-pr-git",
change_filter=filter.ChangeFilter(project='pyethereum', category='pull-request'),
codebases=all_pyethereum_codebases,
treeStableTimer=60,
builderNames=[
"Linux PyEthereum PRs",
"OSX PyEthereum PRs"
]),
AnyBranchScheduler(
name="pyethapp-pr-git",
change_filter=filter.ChangeFilter(project='pyethapp', category='pull-request'),
codebases=all_pyethapp_codebases,
treeStableTimer=60,
builderNames=[
"Linux PyEthApp PRs",
"OSX PyEthApp PRs"
]),
AnyBranchScheduler(
name="serpent-pr-git",
change_filter=filter.ChangeFilter(project='serpent', category='pull-request'),
codebases=all_serpent_codebases,
treeStableTimer=60,
builderNames=[
"Linux Serpent PRs",
"OSX Serpent PRs"
]),
AnyBranchScheduler(
name="ethereumj-pr-git",
change_filter=filter.ChangeFilter(project='ethereumj', category='pull-request'),
codebases=all_ethereumj_codebases,
treeStableTimer=300,
builderNames=[
"Linux EthereumJ PRs"
]),
# Integration tests
Triggerable(
name="cpp-ethereum-integration",
builderNames=["Linux C++ integration"],
codebases=all_integration_codebases)
]: schedulers.append(scheduler)
#
# Forced schedulers
#
for scheduler in [
ForceScheduler(
name="force-self-update",
builderNames=["buildbot"],
codebases=["ethereum-buildbot"])
]: schedulers.append(scheduler)
for buildslave in ["one", "two", "three", "four"]:
for scheduler in [
ForceScheduler(
name="force-buildslave-cpp-%s" % buildslave,
builderNames=["buildslave-cpp-%s" % buildslave],
codebases=["ethereum-dockers"]),
ForceScheduler(
name="force-buildslave-go-%s" % buildslave,
builderNames=["buildslave-go-%s" % buildslave],
codebases=["ethereum-dockers"])
]: schedulers.append(scheduler)
for buildslave in ["one", "two"]:
for scheduler in [
ForceScheduler(
name="force-buildslave-python-%s" % buildslave,
builderNames=["buildslave-python-%s" % buildslave],
codebases=["ethereum-dockers"]),
ForceScheduler(
name="force-buildslave-java-%s" % buildslave,
builderNames=["buildslave-java-%s" % buildslave],
codebases=["ethereum-dockers"])
]: schedulers.append(scheduler)
for branch in ['master', 'develop']:
for scheduler in [
# Linux C++/Go
ForceScheduler(
name="force-cpp-ethereum-%s" % branch,
builderNames=["Linux C++ %s branch" % branch],
codebases=["cpp-ethereum", "tests"]),
ForceScheduler(
name="force-cpp-ethereum-gui-%s" % branch,
builderNames=["Linux C++ GUI %s branch" % branch],
codebases=["cpp-ethereum", "tests"]),
ForceScheduler(
name="force-cpp-ethereum-%s-evmjit" % branch,
builderNames=["Linux C++ %s evmjit" % branch],
codebases=["cpp-ethereum", "tests"]),
ForceScheduler(
name="force-go-ethereum-%s" % branch,
builderNames=["Linux Go %s branch" % branch],
codebases=["go-ethereum"]),
ForceScheduler(
name="force-go-ethereum-gui-%s" % branch,
builderNames=["Linux Go GUI %s branch" % branch],
codebases=["go-ethereum"]),
ForceScheduler(
name="force-go-ethereum-arm-%s" % branch,
builderNames=["ARM Go %s branch" % branch],
codebases=["go-ethereum"]),
# OSX C++/Go
ForceScheduler(
name="force-cpp-ethereum-%s-osx" % branch,
builderNames=["OSX C++ %s branch" % branch],
codebases=["cpp-ethereum", "tests"]),
ForceScheduler(
name="force-cpp-ethereum-gui-%s-osx" % branch,
builderNames=["OSX C++ GUI %s branch" % branch],
codebases=["cpp-ethereum", "tests"]),
ForceScheduler(
name="force-cpp-ethereum-%s-osx-evmjit" % branch,
builderNames=["OSX C++ %s evmjit" % branch],
codebases=["cpp-ethereum", "tests"]),
ForceScheduler(
name="force-go-ethereum-%s-osx" % branch,
builderNames=["OSX Go %s branch" % branch],
codebases=["go-ethereum", "go-build"]),
ForceScheduler(
name="force-go-ethereum-gui-%s-osx" % branch,
builderNames=["OSX Go GUI %s branch" % branch],
codebases=["go-ethereum", "go-build"]),
ForceScheduler(
name="force-cpp-ethereum-%s-brew" % branch,
builderNames=["OSX C++ %s brew" % branch],
codebases=["homebrew-ethereum", "cpp-ethereum", "tests"]),
ForceScheduler(
name="force-cpp-ethereum-gui-%s-brew" % branch,
builderNames=["OSX C++ GUI %s brew" % branch],
codebases=["homebrew-ethereum", "cpp-ethereum", "tests"]),
ForceScheduler(
name="force-go-ethereum-%s-brew" % branch,
builderNames=["OSX Go %s brew" % branch],
codebases=["homebrew-ethereum", "go-ethereum"]),
ForceScheduler(
name="force-go-ethereum-gui-%s-brew" % branch,
builderNames=["OSX Go GUI %s brew" % branch],
codebases=["homebrew-ethereum", "go-ethereum"]),
# Windows C++/Go
ForceScheduler(
name="force-cpp-ethereum-%s-win" % branch,
builderNames=["Windows C++ %s branch" % branch],
codebases=["cpp-ethereum", "tests"]),
ForceScheduler(
name="force-go-ethereum-%s-win" % branch,
builderNames=["Windows Go %s branch" % branch],
codebases=["go-ethereum", "go-build"]),
ForceScheduler(
name="force-go-ethereum-gui-%s-win" % branch,
builderNames=["Windows Go GUI %s branch" % branch],
codebases=["go-ethereum", "go-build"]),
# Other schedulers
ForceScheduler(
name="force-pyethereum-%s" % branch,
builderNames=["Linux PyEthereum %s" % branch],
codebases=["pyethereum"]),
ForceScheduler(
name="force-serpent-%s" % branch,
builderNames=["Linux Serpent %s" % branch],
codebases=["serpent", "pyethereum"]),
ForceScheduler(
name="force-pyethereum-osx-%s" % branch,
builderNames=["OSX PyEthereum %s" % branch],
codebases=["pyethereum"]),
ForceScheduler(
name="force-serpent-osx-%s" % branch,
builderNames=["OSX Serpent %s" % branch],
codebases=["serpent", "pyethereum"])
]: schedulers.append(scheduler)
for scheduler in [
ForceScheduler(
name="force-pyethapp",
builderNames=["Linux PyEthApp"],
codebases=["pyethapp"]),
ForceScheduler(
name="force-pyethapp-osx",
builderNames=["OSX PyEthApp"],
codebases=["pyethapp"]),
ForceScheduler(
name="force-ethereumj",
builderNames=["Linux EthereumJ"],
codebases=["ethereumj"]),
# Pull requests
# Linux
ForceScheduler(
name="force-cpp-ethereum-pr",
builderNames=["Linux C++ pull requests"],
codebases=["cpp-ethereum", "tests"]),
ForceScheduler(
name="force-cpp-ethereum-evmjit-pr",
builderNames=["Linux C++ evmjit pull requests"],
codebases=["cpp-ethereum", "tests"]),
ForceScheduler(
name="force-go-ethereum-pr",
builderNames=["Linux Go pull requests"],
codebases=["go-ethereum"]),
ForceScheduler(
name="force-go-ethereum-arm-pr",
builderNames=["ARM Go pull requests"],
codebases=["go-ethereum"]),
ForceScheduler(
name="force-pyethereum-pr",
builderNames=["Linux PyEthereum PRs"],
codebases=["pyethereum"]),
ForceScheduler(
name="force-pyethapp-pr",
builderNames=["Linux PyEthApp PRs"],
codebases=["pyethapp"]),
ForceScheduler(
name="force-serpent-pr",
builderNames=["Linux Serpent PRs"],
codebases=["serpent", "pyethereum"]),
ForceScheduler(
name="force-ethereumj-pr",
builderNames=["Linux EthereumJ PRs"],
codebases=["ethereumj"]),
# OSX
ForceScheduler(
name="force-cpp-ethereum-osx-pr",
builderNames=["OSX C++ pull requests"],
codebases=["cpp-ethereum", "tests"]),
ForceScheduler(
name="force-cpp-ethereum-osx-evmjit-pr",
builderNames=["OSX C++ evmjit pull requests"],
codebases=["cpp-ethereum", "tests"]),
ForceScheduler(
name="force-go-ethereum-osx-pr",
builderNames=["OSX Go pull requests"],
codebases=["go-ethereum", "go-build"]),
ForceScheduler(
name="force-pyethereum-osx-pr",
builderNames=["OSX PyEthereum PRs"],
codebases=["pyethereum"]),
ForceScheduler(
name="force-pyethapp-osx-pr",
builderNames=["OSX PyEthApp PRs"],
codebases=["pyethapp"]),
ForceScheduler(
name="force-serpent-osx-pr",
builderNames=["OSX Serpent PRs"],
codebases=["serpent", "pyethereum"]),
# Windows
ForceScheduler(
name="force-cpp-ethereum-win-pr",
builderNames=["Windows C++ pull requests"],
codebases=["cpp-ethereum", "tests"]),
ForceScheduler(
name="force-go-ethereum-win-pr",
builderNames=["Windows Go pull requests"],
codebases=["go-ethereum"]),
# Integration
ForceScheduler(
name="force-cpp-ethereum-integration",
builderNames=["Linux C++ integration"],
codebases=["cpp-ethereum", "ethereumjs", "integration"])
]: schedulers.append(scheduler)
for buildslave in ["one", "two", "three", "four"]:
for scheduler in [
Nightly(
name="nightly-buildslave-cpp-%s" % buildslave,
builderNames=["buildslave-cpp-%s" % buildslave],
codebases=dockers_codebases,
branch=None,
hour=3,
minute=0),
Nightly(
name="nightly-buildslave-go-%s" % buildslave,
builderNames=["buildslave-go-%s" % buildslave],
codebases=dockers_codebases,
branch=None,
hour=3,
minute=0)
]: schedulers.append(scheduler)
for buildslave in ["one", "two"]:
for scheduler in [
Nightly(
name="nightly-buildslave-python-%s" % buildslave,
builderNames=["buildslave-python-%s" % buildslave],
codebases=dockers_codebases,
branch=None,
hour=3,
minute=30),
Nightly(
name="nightly-buildslave-java-%s" % buildslave,
builderNames=["buildslave-java-%s" % buildslave],
codebases=dockers_codebases,
branch=None,
hour=3,
minute=30)
]: schedulers.append(scheduler)
# for architecture in ['i386', 'amd64']:
for distribution in distributions:
for scheduler in [
# Triggerable(
# name="libcryptopp-%s-%s" % (architecture, distribution),
# builderNames=["libcryptopp %s-%s" % (architecture, distribution)]),
# Triggerable(
# name="libjson-rpc-cpp-%s-%s" % (architecture, distribution),
# builderNames=["libjson-rpc-cpp %s-%s" % (architecture, distribution)]),
ForceScheduler(
name="force-libcryptopp-%s-%s" % ("amd64", distribution),
builderNames=["libcryptopp %s-%s" % ("amd64", distribution)],
# codebases=["cryptopp"],
repository=FixedParameter(name="repository", default=""),
project=FixedParameter(name="project", default=""),
branch=FixedParameter(name="branch", default="master"),
revision=
StringParameter(
name="revision",
label="Revision:<br>",
default="81fd1114fa64ee680ad642063aa29c3f62a44cdd",
required=True,
size=40),
properties=[
StringParameter(
name="version",
label="Version:<br>",
default="5.6.2",
required=True,
size=20)
]),
ForceScheduler(
name="force-libjson-rpc-cpp-%s-%s" % ("amd64", distribution),
builderNames=["libjson-rpc-cpp %s-%s" % ("amd64", distribution)],
# codebases=["json-rpc-cpp"],
repository=FixedParameter(name="repository", default=""),
project=FixedParameter(name="project", default=""),
branch=FixedParameter(name="branch", default="master"),
revision=
StringParameter(
name="revision",
label="Revision:<br>",
default="5dce039508d17ed1717eacf46be34d1a1eea1c87",
required=True,
size=40),
properties=[
StringParameter(
name="version",
label="Version:<br>",
default="0.4.2",
required=True,
size=10)
]),
ForceScheduler(
name="force-qtwebengine-%s-%s" % ("amd64", distribution),
builderNames=["qtwebengine %s-%s" % ("amd64", distribution)],
repository=FixedParameter(name="repository", default=""),
project=FixedParameter(name="project", default=""),
branch=StringParameter(name="branch", default="5.4.1"),
revision=
StringParameter(
name="revision",
label="Revision:<br>",
default="72ff0b7d9600db642e2d2e95c78c70454bbdb5e7",
required=True,
size=40),
properties=[
StringParameter(
name="version",
label="Version:<br>",
default="v5.4.1",
required=True,
size=10)
]),
ForceScheduler(
name="force-golang-%s-%s" % ("amd64", distribution),
builderNames=["golang %s-%s" % ("amd64", distribution)],
repository=FixedParameter(name="repository", default=""),
project=FixedParameter(name="project", default=""),
branch=StringParameter(name="branch", default="release-branch.go1.4"),
revision=
StringParameter(
name="revision",
label="Revision:<br>",
default="883bc6ed0ea815293fe6309d66f967ea60630e87",
required=True,
size=40),
properties=[
StringParameter(
name="version",
label="Version:<br>",
default="2:1.4.2",
required=True,
size=10)
])
]: schedulers.append(scheduler)
if distribution in ['trusty', 'utopic']:
for scheduler in [
ForceScheduler(
name="force-qt5-%s" % distribution,
builderNames=["qt5 %s" % distribution],
properties=[
StringParameter(
name="version",
label="Version:<br>",
default="5.4.1",
required=True,
size=10)
])
]: schedulers.append(scheduler)
| |
# This file is part of the bapsflib package, a Python toolkit for the
# BaPSF group at UCLA.
#
# http://plasma.physics.ucla.edu/
#
# Copyright 2017-2018 Erik T. Everson and contributors
#
# License: Standard 3-clause BSD; see "LICENSES/LICENSE.txt" for full
# license terms and contributor agreement.
#
import astropy.units as u
import h5py
import numpy as np
import os
import unittest as ut
from typing import Tuple
from bapsflib._hdf.maps import FauxHDFBuilder
from ..templates import HDFMapDigiTemplate
def method_overridden(cls, obj, method: str) -> bool:
"""check if obj's class over-road base class method"""
obj_method = method in obj.__class__.__dict__.keys()
base_method = method in cls.__dict__.keys()
return obj_method and base_method
class DigitizerTestCase(ut.TestCase):
"""Base TestCase for testing digitizer mapping classes."""
# TODO: DESIGN A FAILURES TEST 'test_map_failures'
# - These are required scenarios where the mapping class should
# raise a HDFMappingError
f = NotImplemented # type: FauxHDFBuilder
DEVICE_NAME = NotImplemented # type: str
DEVICE_PATH = NotImplemented # type: str
MAP_CLASS = NotImplemented
@classmethod
def setUpClass(cls):
# skip tests if in MSIDiagnosticTestCase
if cls is DigitizerTestCase:
raise ut.SkipTest("In DigitizerTestCase, skipping base tests")
super().setUpClass()
# create HDF5 file
cls.f = FauxHDFBuilder()
def setUp(self):
# setup HDF5 file
if not (self.DEVICE_NAME in self.f.modules and len(self.f.modules) == 1):
# clear HDF5 file and add module
self.f.remove_all_modules()
self.f.add_module(self.DEVICE_NAME)
# define `mod` attribute
self.mod = self.f.modules[self.DEVICE_NAME]
def tearDown(self):
# reset module
self.mod.knobs.reset()
@classmethod
def tearDownClass(cls):
# cleanup and close HDF5 file
super().tearDownClass()
cls.f.cleanup()
@property
def map(self) -> HDFMapDigiTemplate:
"""Map object of device"""
return self.map_device(self.dgroup)
@property
def dgroup(self) -> h5py.Group:
"""Device HDF5 group"""
return self.f[self.DEVICE_PATH]
def map_device(self, group: h5py.Group) -> HDFMapDigiTemplate:
"""Mapping function"""
return self.MAP_CLASS(group)
def test_map_basics(self):
"""Test all required basic map features."""
self.assertDigitizerMapBasics(self.map, self.dgroup)
def test_not_h5py_group(self):
"""Test error if object to map is not h5py.Group"""
with self.assertRaises(TypeError):
self.map_device(None)
def assertDigitizerMapBasics(self, _map: HDFMapDigiTemplate, _group: h5py.Group):
# check instance
self.assertIsInstance(_map, HDFMapDigiTemplate)
# assert attribute existence
self.assertTrue(hasattr(_map, "_build_configs"))
self.assertTrue(hasattr(_map, "active_configs"))
self.assertTrue(hasattr(_map, "configs"))
self.assertTrue(hasattr(_map, "construct_dataset_name"))
self.assertTrue(hasattr(_map, "construct_header_dataset_name"))
self.assertTrue(hasattr(_map, "deduce_config_active_status"))
self.assertTrue(hasattr(_map, "device_adcs"))
self.assertTrue(hasattr(_map, "device_name"))
self.assertTrue(hasattr(_map, "get_adc_info"))
self.assertTrue(hasattr(_map, "group"))
self.assertTrue(hasattr(_map, "info"))
# ---- test general attributes (part 1 of 2) ----
# 'device_adcs'
# 'device_name'
# 'group'
#
# check `device_adcs`
self.assertIsInstance(_map.device_adcs, tuple)
self.assertTrue(bool(_map.device_adcs))
self.assertTrue(all(isinstance(adc, str) for adc in _map.device_adcs))
# check `device_name`
self.assertEqual(_map.device_name, _map.info["group name"])
# check `group`
self.assertIsInstance(_map.group, h5py.Group)
self.assertEqual(_map.group, _group)
# ---- test map.info ----
# type
self.assertIsInstance(_map.info, dict)
# key existence
self.assertIn("group name", _map.info)
self.assertIn("group path", _map.info)
# values
self.assertEqual(_map.info["group name"], os.path.basename(_group.name))
self.assertEqual(_map.info["group path"], _group.name)
# ---- test map.configs ----
#
# - The `configs` dictionary contains the translation info
# in-order to translate the data stored in the HDF5 datasets
# to the structure numpy array constructed by HDFReadData
#
# - Each item in `configs` must be structured as:
# Key == name of configuration
# Value == configuration dictionary (config_dict)
#
# - The keys in the config_dict breakdown into 2 categories:
# 1. Required, which breakdown into 2 sub-categories
#
# A. non-polymorphic keys
# ('active', 'adc', 'config group path', and 'shotnum')
# B. polymorphic keys
# ~ these keys are the adc names listed in the 'adc' key
#
# ~ these keys are used by HDFReadData to translate data
# from the HDF5 file into a structured numpy array
#
# 2. Optional meta-info keys
#
# ~ not used in the translation, are considered meta-info
# for the Digitizer
# ~ meta-info keys are added to the `info` dictionary
# attribute that is bound to the numpy array data object
# constructed by HDFReadControls
#
self.assertIsInstance(_map.configs, dict)
for cname, config in _map.configs.items():
# must be a dict
self.assertIsInstance(config, dict)
# look for required keys
# - polymorphic keys are examined below in the section
# "examine polymorphic "adc" keys"
#
self.assertIn("active", config)
self.assertIn("adc", config)
self.assertIn("config group path", config)
self.assertIn("shotnum", config)
# examine 'active' key
self.assertIsInstance(config["active"], bool)
# examine 'adc' key
self.assertIsInstance(config["adc"], tuple)
for adc in config["adc"]:
self.assertIsInstance(adc, str)
self.assertIn(adc, _map.device_adcs)
# examine 'config group path' key
self.assertIsInstance(config["config group path"], str)
self.assertIsNotNone(_group.get(config["config group path"]))
# -- examine 'shotnum' key --
# required keys
self.assertIsInstance(config["shotnum"], dict)
self.assertIn("dset field", config["shotnum"])
self.assertIn("shape", config["shotnum"])
self.assertIn("dtype", config["shotnum"])
# ['shotnum']['dset field']
self.assertIsInstance(config["shotnum"]["dset field"], tuple)
self.assertEqual(len(config["shotnum"]["dset field"]), 1)
self.assertIsInstance(config["shotnum"]["dset field"][0], str)
# ['shotnum']['shape']
self.assertEqual(config["shotnum"]["shape"], ())
# ['shotnum']['dtype']
self.assertTrue(np.issubdtype(config["shotnum"]["dtype"], np.integer))
# -- examine polymorphic "adc" keys --
for adc in config["adc"]:
# is a tuple of 3-element tuples
self.assertIsInstance(config[adc], tuple)
self.assertTrue(bool(config[adc]))
for conn in config[adc]:
self.assertIsInstance(conn, tuple)
self.assertEqual(len(conn), 3)
# 1st element is board number
self.assertIsInstance(conn[0], (int, np.integer))
# 2nd element is tuple of active channels on board
self.assertIsInstance(conn[1], tuple)
self.assertTrue(bool(conn[1]))
self.assertTrue(
all(isinstance(ch, (int, np.integer)) for ch in conn[1])
)
# 3rd element is dict of setup parameters
self.assertIsInstance(conn[2], dict)
self.assertIn("bit", conn[2])
self.assertIn("clock rate", conn[2])
self.assertIn("nshotnum", conn[2])
self.assertIn("nt", conn[2])
self.assertIn("shot average (software)", conn[2])
self.assertIn("sample average (hardware)", conn[2])
# check 'bit'
self.assertIsInstance(conn[2]["bit"], (int, np.integer))
self.assertTrue(conn[2]["bit"] > 0)
# check 'clock rate'
self.assertIsInstance(conn[2]["clock rate"], u.Quantity)
# noinspection PyUnresolvedReferences
self.assertTrue(conn[2]["clock rate"].unit.is_equivalent(u.Hertz))
# check 'nshotnum' and 'nt'
for key in ("nshotnum", "nt"):
self.assertIsInstance(conn[2][key], (int, np.integer))
self.assertTrue(conn[2][key] > 0 or conn[2][key] == -1)
# check 'shot average' and 'sample average'
for key in ("shot average (software)", "sample average (hardware)"):
self.assertIsInstance(conn[2][key], (type(None), int, np.integer))
if conn[2][key] is not None:
self.assertFalse(conn[2][key] <= 1)
"""
# ------ Basic construct_dataset_name() Behavior ------
#
# 1. board is invalid (board = -1)
# 2. channel is invalid (channel = -1)
# 3. config_name is invalid (config_name='')
# 4. adc is invalid (adc='')
# 5. return_into=False returns string
# 6. return_info=True returns 2-element tuple
#
# gather active map info
config = _map.active_configs[0]
adc = _map.configs[config]['adc'][0]
brd = _map.configs[config][adc][0][0]
ch = _map.configs[config][adc][0][1][0]
# (1) invalid board number
self.assertRaises(ValueError,
_map.construct_dataset_name, -1, 1)
# (2) invalid channel number
self.assertRaises(ValueError,
_map.construct_dataset_name, brd, -1)
# (3) invalid config_name
self.assertRaises(ValueError,
_map.construct_dataset_name,
brd, ch, config_name='')
# (4) invalid adc
self.assertRaises(ValueError,
_map.construct_dataset_name,
brd, ch, adc='')
# (5) returned object must be string
dname = _map.construct_dataset_name(brd, ch)
self.assertIsInstance(dname, str)
# (6) returned object must be 2-element tuple
# 0 = is a string
# 1 = is a dict
#
dname = _map.construct_dataset_name(brd, ch, return_info=True)
self.assertIsInstance(dname, tuple)
self.assertEqual(len(dname), 2)
self.assertIsInstance(dname[0], str)
self.assertIsInstance(dname[1], dict)
self.assertIn('bit', dname[1])
self.assertIn('clock rate', dname[1])
self.assertIn('shot average (software)', dname[1])
self.assertIn('sample average (hardware)', dname[1])
self.assertIn('adc', dname[1])
self.assertIn('configuration name', dname[1])
self.assertIn('digitizer', dname[1])
"""
# ---- test general attributes (part 2 of 2) ----
# 'active_configs'
# 'construct_dataset_name'
# 'construct_header_dataset_name'
# 'deduce_config_active_status'
# 'get_adc_info'
#
# check `active_configs`
self.assertIsInstance(_map.active_configs, list)
self.assertTrue(bool(_map.active_configs))
for active in _map.active_configs:
self.assertIsInstance(active, str)
self.assertTrue(_map.configs[active]["active"])
# check `construct_dataset_name`
self.assertConstructDatasetName(_map, _group)
# check `construct_header_dataset_name`
self.assertConstructHeaderDatasetName(_map, _group)
# check `deduce_config_active_status`
for cname in _map.configs:
active_status = _map.deduce_config_active_status(cname)
self.assertIsInstance(active_status, bool)
self.assertEqual(active_status, _map.configs[cname]["active"])
# check `get_adc_info`
self.assertFalse(
method_overridden(HDFMapDigiTemplate, _map, "get_adc_info"),
msg="Overriding HDFMapDigiTemplate method 'get_adc_info' is NOT allowed",
)
def assertConstructDatasetName(self, _map: HDFMapDigiTemplate, _group: h5py.Group):
"""Assert all expected datasets exist"""
# build kwargs groupings
kwargs_list = []
for cname, config in _map.configs.items():
for adc in config["adc"]:
for conn in config[adc]:
brd = conn[0]
chs = conn[1]
for ch in chs:
kwargs_list.append(
{
"board": brd,
"channel": ch,
"config_name": cname,
"adc": adc,
"return_info": False,
}
)
for kwargs in kwargs_list:
if kwargs["config_name"] not in _map.active_configs:
with self.assertRaises(ValueError):
_map.construct_dataset_name(**kwargs)
else:
# -- usage without setup info dict --
dset_name = _map.construct_dataset_name(**kwargs)
self.assertIsInstance(dset_name, str)
self.assertIsNotNone(_group.get(dset_name))
# -- usage with setup info dict --
kwargs["return_info"] = True
stuff = _map.construct_dataset_name(**kwargs)
self.assertIsInstance(stuff, tuple)
self.assertEqual(len(stuff), 2)
self.assertEqual(stuff[0], dset_name)
self.assertIsInstance(stuff[1], dict)
def assertConstructHeaderDatasetName(
self, _map: HDFMapDigiTemplate, _group: h5py.Group
):
"""Assert all expected header datasets exist"""
# build kwargs groupings
kwargs_list = []
for cname, config in _map.configs.items():
for adc in config["adc"]:
for conn in config[adc]:
brd = conn[0]
chs = conn[1]
for ch in chs:
kwargs_list.append(
{
"board": brd,
"channel": ch,
"config_name": cname,
"adc": adc,
}
)
for kwargs in kwargs_list:
if kwargs["config_name"] not in _map.active_configs:
with self.assertRaises(ValueError):
_map.construct_dataset_name(**kwargs)
else:
# -- usage without setup info dict --
dset_name = _map.construct_dataset_name(**kwargs)
self.assertIsInstance(dset_name, str)
self.assertIsNotNone(_group.get(dset_name))
def assertConnectionsEqual(
self,
_map: HDFMapDigiTemplate,
connections: Tuple[Tuple[int, Tuple[int, ...]], ...],
adc: str,
config_name: str,
):
"""
Test equality of mapped adc connections and expected
adc connections.
"""
map_conns = _map.configs[config_name][adc]
filter_conns = []
for conn in map_conns:
filter_conns.append((conn[0], conn[1]))
map_conns = tuple(filter_conns)
self.assertEqual(map_conns, connections)
| |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from collections import defaultdict
import socket
import itertools
import logging
from eventlet import GreenPile, GreenPool, Timeout
from swift.common import constraints
from swift.common.daemon import Daemon
from swift.common.direct_client import (
direct_head_container, direct_delete_container_object,
direct_put_container_object, ClientException)
from swift.common.internal_client import InternalClient, UnexpectedResponse
from swift.common.utils import get_logger, split_path, majority_size, \
FileLikeIter, Timestamp, last_modified_date_to_timestamp, \
LRUCache, decode_timestamps
MISPLACED_OBJECTS_ACCOUNT = '.misplaced_objects'
MISPLACED_OBJECTS_CONTAINER_DIVISOR = 3600 # 1 hour
CONTAINER_POLICY_TTL = 30
def cmp_policy_info(info, remote_info):
"""
You have to squint to see it, but the general strategy is just:
if either has been recreated:
return the newest (of the recreated)
else
return the oldest
I tried cleaning it up for awhile, but settled on just writing a bunch of
tests instead. Once you get an intuitive sense for the nuance here you
can try and see there's a better way to spell the boolean logic but it all
ends up looking sorta hairy.
:returns: -1 if info is correct, 1 if remote_info is better
"""
def is_deleted(info):
return (info['delete_timestamp'] > info['put_timestamp'] and
info.get('count', info.get('object_count', 0)) == 0)
deleted = is_deleted(info)
remote_deleted = is_deleted(remote_info)
if any([deleted, remote_deleted]):
if not deleted:
return -1
elif not remote_deleted:
return 1
return cmp(remote_info['status_changed_at'],
info['status_changed_at'])
def has_been_recreated(info):
return (info['put_timestamp'] > info['delete_timestamp'] >
Timestamp(0))
remote_recreated = has_been_recreated(remote_info)
recreated = has_been_recreated(info)
if any([remote_recreated, recreated]):
if not recreated:
return 1
elif not remote_recreated:
return -1
return cmp(remote_info['status_changed_at'],
info['status_changed_at'])
return cmp(info['status_changed_at'], remote_info['status_changed_at'])
def incorrect_policy_index(info, remote_info):
"""
Compare remote_info to info and decide if the remote storage policy index
should be used instead of ours.
"""
if 'storage_policy_index' not in remote_info:
return False
if remote_info['storage_policy_index'] == \
info['storage_policy_index']:
return False
return info['storage_policy_index'] != sorted(
[info, remote_info], cmp=cmp_policy_info)[0]['storage_policy_index']
def translate_container_headers_to_info(headers):
default_timestamp = Timestamp(0).internal
return {
'storage_policy_index': int(headers['X-Backend-Storage-Policy-Index']),
'put_timestamp': headers.get('x-backend-put-timestamp',
default_timestamp),
'delete_timestamp': headers.get('x-backend-delete-timestamp',
default_timestamp),
'status_changed_at': headers.get('x-backend-status-changed-at',
default_timestamp),
}
def best_policy_index(headers):
container_info = map(translate_container_headers_to_info, headers)
container_info.sort(cmp=cmp_policy_info)
return container_info[0]['storage_policy_index']
def get_reconciler_container_name(obj_timestamp):
"""
Get the name of a container into which a misplaced object should be
enqueued. The name is the object's last modified time rounded down to the
nearest hour.
:param obj_timestamp: a string representation of the object's 'created_at'
time from it's container db row.
:return: a container name
"""
# Use last modified time of object to determine reconciler container name
_junk, _junk, ts_meta = decode_timestamps(obj_timestamp)
return str(int(ts_meta) //
MISPLACED_OBJECTS_CONTAINER_DIVISOR *
MISPLACED_OBJECTS_CONTAINER_DIVISOR)
def get_reconciler_obj_name(policy_index, account, container, obj):
return "%(policy_index)d:/%(acc)s/%(con)s/%(obj)s" % {
'policy_index': policy_index, 'acc': account,
'con': container, 'obj': obj}
def get_reconciler_content_type(op):
try:
return {
'put': 'application/x-put',
'delete': 'application/x-delete',
}[op.lower()]
except KeyError:
raise ValueError('invalid operation type %r' % op)
def get_row_to_q_entry_translator(broker):
account = broker.account
container = broker.container
op_type = {
0: get_reconciler_content_type('put'),
1: get_reconciler_content_type('delete'),
}
def translator(obj_info):
name = get_reconciler_obj_name(obj_info['storage_policy_index'],
account, container,
obj_info['name'])
return {
'name': name,
'deleted': 0,
'created_at': obj_info['created_at'],
'etag': obj_info['created_at'],
'content_type': op_type[obj_info['deleted']],
'size': 0,
}
return translator
def add_to_reconciler_queue(container_ring, account, container, obj,
obj_policy_index, obj_timestamp, op,
force=False, conn_timeout=5, response_timeout=15):
"""
Add an object to the container reconciler's queue. This will cause the
container reconciler to move it from its current storage policy index to
the correct storage policy index.
:param container_ring: container ring
:param account: the misplaced object's account
:param container: the misplaced object's container
:param obj: the misplaced object
:param obj_policy_index: the policy index where the misplaced object
currently is
:param obj_timestamp: the misplaced object's X-Timestamp. We need this to
ensure that the reconciler doesn't overwrite a newer
object with an older one.
:param op: the method of the operation (DELETE or PUT)
:param force: over-write queue entries newer than obj_timestamp
:param conn_timeout: max time to wait for connection to container server
:param response_timeout: max time to wait for response from container
server
:returns: .misplaced_object container name, False on failure. "Success"
means a majority of containers got the update.
"""
container_name = get_reconciler_container_name(obj_timestamp)
object_name = get_reconciler_obj_name(obj_policy_index, account,
container, obj)
if force:
# this allows an operator to re-enqueue an object that has
# already been popped from the queue to be reprocessed, but
# could potentially prevent out of order updates from making it
# into the queue
x_timestamp = Timestamp.now().internal
else:
x_timestamp = obj_timestamp
q_op_type = get_reconciler_content_type(op)
headers = {
'X-Size': 0,
'X-Etag': obj_timestamp,
'X-Timestamp': x_timestamp,
'X-Content-Type': q_op_type,
}
def _check_success(*args, **kwargs):
try:
direct_put_container_object(*args, **kwargs)
return 1
except (ClientException, Timeout, socket.error):
return 0
pile = GreenPile()
part, nodes = container_ring.get_nodes(MISPLACED_OBJECTS_ACCOUNT,
container_name)
for node in nodes:
pile.spawn(_check_success, node, part, MISPLACED_OBJECTS_ACCOUNT,
container_name, object_name, headers=headers,
conn_timeout=conn_timeout,
response_timeout=response_timeout)
successes = sum(pile)
if successes >= majority_size(len(nodes)):
return container_name
else:
return False
def slightly_later_timestamp(ts, offset=1):
return Timestamp(ts, offset=offset).internal
def parse_raw_obj(obj_info):
"""
Translate a reconciler container listing entry to a dictionary
containing the parts of the misplaced object queue entry.
:param obj_info: an entry in an a container listing with the
required keys: name, content_type, and hash
:returns: a queue entry dict with the keys: q_policy_index, account,
container, obj, q_op, q_ts, q_record, and path
"""
raw_obj_name = obj_info['name'].encode('utf-8')
policy_index, obj_name = raw_obj_name.split(':', 1)
q_policy_index = int(policy_index)
account, container, obj = split_path(obj_name, 3, 3, rest_with_last=True)
try:
q_op = {
'application/x-put': 'PUT',
'application/x-delete': 'DELETE',
}[obj_info['content_type']]
except KeyError:
raise ValueError('invalid operation type %r' %
obj_info.get('content_type', None))
return {
'q_policy_index': q_policy_index,
'account': account,
'container': container,
'obj': obj,
'q_op': q_op,
'q_ts': decode_timestamps((obj_info['hash']))[0],
'q_record': last_modified_date_to_timestamp(
obj_info['last_modified']),
'path': '/%s/%s/%s' % (account, container, obj)
}
@LRUCache(maxtime=CONTAINER_POLICY_TTL)
def direct_get_container_policy_index(container_ring, account_name,
container_name):
"""
Talk directly to the primary container servers to figure out the storage
policy index for a given container.
:param container_ring: ring in which to look up the container locations
:param account_name: name of the container's account
:param container_name: name of the container
:returns: storage policy index, or None if it couldn't get a majority
"""
def _eat_client_exception(*args):
try:
return direct_head_container(*args)
except ClientException as err:
if err.http_status == 404:
return err.http_headers
except (Timeout, socket.error):
pass
pile = GreenPile()
part, nodes = container_ring.get_nodes(account_name, container_name)
for node in nodes:
pile.spawn(_eat_client_exception, node, part, account_name,
container_name)
headers = [x for x in pile if x is not None]
if len(headers) < majority_size(len(nodes)):
return
return best_policy_index(headers)
def direct_delete_container_entry(container_ring, account_name, container_name,
object_name, headers=None):
"""
Talk directly to the primary container servers to delete a particular
object listing. Does not talk to object servers; use this only when a
container entry does not actually have a corresponding object.
"""
pool = GreenPool()
part, nodes = container_ring.get_nodes(account_name, container_name)
for node in nodes:
pool.spawn_n(direct_delete_container_object, node, part, account_name,
container_name, object_name, headers=headers)
# This either worked or it didn't; if it didn't, we'll retry on the next
# reconciler loop when we see the queue entry again.
pool.waitall()
class ContainerReconciler(Daemon):
"""
Move objects that are in the wrong storage policy.
"""
def __init__(self, conf):
self.conf = conf
# This option defines how long an un-processable misplaced object
# marker will be retried before it is abandoned. It is not coupled
# with the tombstone reclaim age in the consistency engine.
self.reclaim_age = int(conf.get('reclaim_age', 86400 * 7))
self.interval = int(conf.get('interval', 30))
conf_path = conf.get('__file__') or \
'/etc/swift/container-reconciler.conf'
self.logger = get_logger(conf, log_route='container-reconciler')
request_tries = int(conf.get('request_tries') or 3)
self.swift = InternalClient(conf_path,
'Swift Container Reconciler',
request_tries)
self.stats = defaultdict(int)
self.last_stat_time = time.time()
def stats_log(self, metric, msg, *args, **kwargs):
"""
Update stats tracking for metric and emit log message.
"""
level = kwargs.pop('level', logging.DEBUG)
log_message = '%s: ' % metric + msg
self.logger.log(level, log_message, *args, **kwargs)
self.stats[metric] += 1
def log_stats(self, force=False):
"""
Dump stats to logger, noop when stats have been already been
logged in the last minute.
"""
now = time.time()
should_log = force or (now - self.last_stat_time > 60)
if should_log:
self.last_stat_time = now
self.logger.info('Reconciler Stats: %r', dict(**self.stats))
def pop_queue(self, container, obj, q_ts, q_record):
"""
Issue a delete object request to the container for the misplaced
object queue entry.
:param container: the misplaced objects container
:param obj: the name of the misplaced object
:param q_ts: the timestamp of the misplaced object
:param q_record: the timestamp of the queue entry
N.B. q_ts will normally be the same time as q_record except when
an object was manually re-enqued.
"""
q_path = '/%s/%s/%s' % (MISPLACED_OBJECTS_ACCOUNT, container, obj)
x_timestamp = slightly_later_timestamp(max(q_record, q_ts))
self.stats_log('pop_queue', 'remove %r (%f) from the queue (%s)',
q_path, q_ts, x_timestamp)
headers = {'X-Timestamp': x_timestamp}
direct_delete_container_entry(
self.swift.container_ring, MISPLACED_OBJECTS_ACCOUNT,
container, obj, headers=headers)
def throw_tombstones(self, account, container, obj, timestamp,
policy_index, path):
"""
Issue a delete object request to the given storage_policy.
:param account: the account name
:param container: the container name
:param obj: the object name
:param timestamp: the timestamp of the object to delete
:param policy_index: the policy index to direct the request
:param path: the path to be used for logging
"""
x_timestamp = slightly_later_timestamp(timestamp)
self.stats_log('cleanup_attempt', '%r (%f) from policy_index '
'%s (%s) will be deleted',
path, timestamp, policy_index, x_timestamp)
headers = {
'X-Timestamp': x_timestamp,
'X-Backend-Storage-Policy-Index': policy_index,
}
success = False
try:
self.swift.delete_object(account, container, obj,
acceptable_statuses=(2, 404),
headers=headers)
except UnexpectedResponse as err:
self.stats_log('cleanup_failed', '%r (%f) was not cleaned up '
'in storage_policy %s (%s)', path, timestamp,
policy_index, err)
else:
success = True
self.stats_log('cleanup_success', '%r (%f) was successfully '
'removed from policy_index %s', path, timestamp,
policy_index)
return success
def _reconcile_object(self, account, container, obj, q_policy_index, q_ts,
q_op, path, **kwargs):
"""
Perform object reconciliation.
:param account: the account name of the misplaced object
:param container: the container name of the misplaced object
:param obj: the object name
:param q_policy_index: the policy index of the source indicated by the
queue entry.
:param q_ts: the timestamp of the misplaced object
:param q_op: the operation of the misplaced request
:param path: the full path of the misplaced object for logging
:returns: True to indicate the request is fully processed
successfully, otherwise False.
"""
container_policy_index = direct_get_container_policy_index(
self.swift.container_ring, account, container)
if container_policy_index is None:
self.stats_log('unavailable_container', '%r (%f) unable to '
'determine the destination policy_index',
path, q_ts)
return False
if container_policy_index == q_policy_index:
self.stats_log('noop_object', '%r (%f) container policy_index '
'%s matches queue policy index %s', path, q_ts,
container_policy_index, q_policy_index)
return True
# check if object exists in the destination already
self.logger.debug('checking for %r (%f) in destination '
'policy_index %s', path, q_ts,
container_policy_index)
headers = {
'X-Backend-Storage-Policy-Index': container_policy_index}
dest_obj = self.swift.get_object_metadata(account, container, obj,
headers=headers,
acceptable_statuses=(2, 4))
dest_ts = Timestamp(dest_obj.get('x-backend-timestamp', 0))
if dest_ts >= q_ts:
self.stats_log('found_object', '%r (%f) in policy_index %s '
'is newer than queue (%f)', path, dest_ts,
container_policy_index, q_ts)
return self.throw_tombstones(account, container, obj, q_ts,
q_policy_index, path)
# object is misplaced
self.stats_log('misplaced_object', '%r (%f) in policy_index %s '
'should be in policy_index %s', path, q_ts,
q_policy_index, container_policy_index)
# fetch object from the source location
self.logger.debug('fetching %r (%f) from storage policy %s', path,
q_ts, q_policy_index)
headers = {
'X-Backend-Storage-Policy-Index': q_policy_index}
try:
source_obj_status, source_obj_info, source_obj_iter = \
self.swift.get_object(account, container, obj,
headers=headers,
acceptable_statuses=(2, 4))
except UnexpectedResponse as err:
source_obj_status = err.resp.status_int
source_obj_info = {}
source_obj_iter = None
source_ts = Timestamp(source_obj_info.get('x-backend-timestamp', 0))
if source_obj_status == 404 and q_op == 'DELETE':
return self.ensure_tombstone_in_right_location(
q_policy_index, account, container, obj, q_ts, path,
container_policy_index, source_ts)
else:
return self.ensure_object_in_right_location(
q_policy_index, account, container, obj, q_ts, path,
container_policy_index, source_ts, source_obj_status,
source_obj_info, source_obj_iter)
def ensure_object_in_right_location(self, q_policy_index, account,
container, obj, q_ts, path,
container_policy_index, source_ts,
source_obj_status, source_obj_info,
source_obj_iter, **kwargs):
"""
Validate source object will satisfy the misplaced object queue entry
and move to destination.
:param q_policy_index: the policy_index for the source object
:param account: the account name of the misplaced object
:param container: the container name of the misplaced object
:param obj: the name of the misplaced object
:param q_ts: the timestamp of the misplaced object
:param path: the full path of the misplaced object for logging
:param container_policy_index: the policy_index of the destination
:param source_ts: the timestamp of the source object
:param source_obj_status: the HTTP status source object request
:param source_obj_info: the HTTP headers of the source object request
:param source_obj_iter: the body iter of the source object request
"""
if source_obj_status // 100 != 2 or source_ts < q_ts:
if q_ts < time.time() - self.reclaim_age:
# it's old and there are no tombstones or anything; give up
self.stats_log('lost_source', '%r (%s) was not available in '
'policy_index %s and has expired', path,
q_ts.internal, q_policy_index,
level=logging.CRITICAL)
return True
# the source object is unavailable or older than the queue
# entry; a version that will satisfy the queue entry hopefully
# exists somewhere in the cluster, so wait and try again
self.stats_log('unavailable_source', '%r (%s) in '
'policy_index %s responded %s (%s)', path,
q_ts.internal, q_policy_index, source_obj_status,
source_ts.internal, level=logging.WARNING)
return False
# optimistically move any source with a timestamp >= q_ts
ts = max(Timestamp(source_ts), q_ts)
# move the object
put_timestamp = slightly_later_timestamp(ts, offset=2)
self.stats_log('copy_attempt', '%r (%f) in policy_index %s will be '
'moved to policy_index %s (%s)', path, source_ts,
q_policy_index, container_policy_index, put_timestamp)
headers = source_obj_info.copy()
headers['X-Backend-Storage-Policy-Index'] = container_policy_index
headers['X-Timestamp'] = put_timestamp
try:
self.swift.upload_object(
FileLikeIter(source_obj_iter), account, container, obj,
headers=headers)
except UnexpectedResponse as err:
self.stats_log('copy_failed', 'upload %r (%f) from '
'policy_index %s to policy_index %s '
'returned %s', path, source_ts, q_policy_index,
container_policy_index, err, level=logging.WARNING)
return False
except: # noqa
self.stats_log('unhandled_error', 'unable to upload %r (%f) '
'from policy_index %s to policy_index %s ', path,
source_ts, q_policy_index, container_policy_index,
level=logging.ERROR, exc_info=True)
return False
self.stats_log('copy_success', '%r (%f) moved from policy_index %s '
'to policy_index %s (%s)', path, source_ts,
q_policy_index, container_policy_index, put_timestamp)
return self.throw_tombstones(account, container, obj, q_ts,
q_policy_index, path)
def ensure_tombstone_in_right_location(self, q_policy_index, account,
container, obj, q_ts, path,
container_policy_index, source_ts,
**kwargs):
"""
Issue a DELETE request against the destination to match the
misplaced DELETE against the source.
"""
delete_timestamp = slightly_later_timestamp(q_ts, offset=2)
self.stats_log('delete_attempt', '%r (%f) in policy_index %s '
'will be deleted from policy_index %s (%s)', path,
source_ts, q_policy_index, container_policy_index,
delete_timestamp)
headers = {
'X-Backend-Storage-Policy-Index': container_policy_index,
'X-Timestamp': delete_timestamp,
}
try:
self.swift.delete_object(account, container, obj,
headers=headers)
except UnexpectedResponse as err:
self.stats_log('delete_failed', 'delete %r (%f) from '
'policy_index %s (%s) returned %s', path,
source_ts, container_policy_index,
delete_timestamp, err, level=logging.WARNING)
return False
except: # noqa
self.stats_log('unhandled_error', 'unable to delete %r (%f) '
'from policy_index %s (%s)', path, source_ts,
container_policy_index, delete_timestamp,
level=logging.ERROR, exc_info=True)
return False
self.stats_log('delete_success', '%r (%f) deleted from '
'policy_index %s (%s)', path, source_ts,
container_policy_index, delete_timestamp,
level=logging.INFO)
return self.throw_tombstones(account, container, obj, q_ts,
q_policy_index, path)
def reconcile_object(self, info):
"""
Process a possibly misplaced object write request. Determine correct
destination storage policy by checking with primary containers. Check
source and destination, copying or deleting into destination and
cleaning up the source as needed.
This method wraps _reconcile_object for exception handling.
:param info: a queue entry dict
:returns: True to indicate the request is fully processed
successfully, otherwise False.
"""
self.logger.debug('checking placement for %r (%f) '
'in policy_index %s', info['path'],
info['q_ts'], info['q_policy_index'])
success = False
try:
success = self._reconcile_object(**info)
except: # noqa
self.logger.exception('Unhandled Exception trying to '
'reconcile %r (%f) in policy_index %s',
info['path'], info['q_ts'],
info['q_policy_index'])
if success:
metric = 'success'
msg = 'was handled successfully'
else:
metric = 'retry'
msg = 'must be retried'
msg = '%(path)r (%(q_ts)f) in policy_index %(q_policy_index)s ' + msg
self.stats_log(metric, msg, info, level=logging.INFO)
self.log_stats()
return success
def _iter_containers(self):
"""
Generate a list of containers to process.
"""
# hit most recent container first instead of waiting on the updaters
current_container = get_reconciler_container_name(time.time())
yield current_container
container_gen = self.swift.iter_containers(MISPLACED_OBJECTS_ACCOUNT)
self.logger.debug('looking for containers in %s',
MISPLACED_OBJECTS_ACCOUNT)
while True:
one_page = None
try:
one_page = list(itertools.islice(
container_gen, constraints.CONTAINER_LISTING_LIMIT))
except UnexpectedResponse as err:
self.logger.error('Error listing containers in '
'account %s (%s)',
MISPLACED_OBJECTS_ACCOUNT, err)
if not one_page:
# don't generally expect more than one page
break
# reversed order since we expect older containers to be empty
for c in reversed(one_page):
# encoding here is defensive
container = c['name'].encode('utf8')
if container == current_container:
continue # we've already hit this one this pass
yield container
def _iter_objects(self, container):
"""
Generate a list of objects to process.
:param container: the name of the container to process
If the given container is empty and older than reclaim_age this
processor will attempt to reap it.
"""
self.logger.debug('looking for objects in %s', container)
found_obj = False
try:
for raw_obj in self.swift.iter_objects(
MISPLACED_OBJECTS_ACCOUNT, container):
found_obj = True
yield raw_obj
except UnexpectedResponse as err:
self.logger.error('Error listing objects in container %s (%s)',
container, err)
if float(container) < time.time() - self.reclaim_age and \
not found_obj:
# Try to delete old empty containers so the queue doesn't
# grow without bound. It's ok if there's a conflict.
self.swift.delete_container(
MISPLACED_OBJECTS_ACCOUNT, container,
acceptable_statuses=(2, 404, 409, 412))
def reconcile(self):
"""
Main entry point for processing misplaced objects.
Iterate over all queue entries and delegate to reconcile_object.
"""
self.logger.debug('pulling items from the queue')
for container in self._iter_containers():
for raw_obj in self._iter_objects(container):
try:
obj_info = parse_raw_obj(raw_obj)
except Exception:
self.stats_log('invalid_record',
'invalid queue record: %r', raw_obj,
level=logging.ERROR, exc_info=True)
continue
finished = self.reconcile_object(obj_info)
if finished:
self.pop_queue(container, raw_obj['name'],
obj_info['q_ts'],
obj_info['q_record'])
self.log_stats()
self.logger.debug('finished container %s', container)
def run_once(self, *args, **kwargs):
"""
Process every entry in the queue.
"""
try:
self.reconcile()
except: # noqa
self.logger.exception('Unhandled Exception trying to reconcile')
self.log_stats(force=True)
def run_forever(self, *args, **kwargs):
while True:
self.run_once(*args, **kwargs)
self.stats = defaultdict(int)
self.logger.info('sleeping between intervals (%ss)', self.interval)
time.sleep(self.interval)
| |
# Copyright (c) 2007-2009 The PyAMF Project.
# See LICENSE.txt for details.
"""
PyAMF SQLAlchemy adapter tests.
@since 0.4
"""
import unittest
import sqlalchemy
from sqlalchemy import MetaData, Table, Column, Integer, String, ForeignKey, \
create_engine
from sqlalchemy.orm import mapper, relation, sessionmaker, clear_mappers
import pyamf.flex
from pyamf.tests.util import Spam
from pyamf.adapters import _sqlalchemy_orm as adapter
class BaseObject(object):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
class User(BaseObject):
def __init__(self, **kwargs):
BaseObject.__init__(self, **kwargs)
self.lazy_loaded = [LazyLoaded()]
class Address(BaseObject):
pass
class LazyLoaded(BaseObject):
pass
class AnotherLazyLoaded(BaseObject):
pass
class BaseTestCase(unittest.TestCase):
"""
Initialise up all table/mappers.
"""
def setUp(self):
# Create DB and map objects
self.metadata = MetaData()
self.engine = create_engine('sqlite:///:memory:', echo=False)
Session = sessionmaker(bind=self.engine)
self.session = Session()
self.tables = {}
self.tables['users'] = Table('users', self.metadata,
Column('id', Integer, primary_key=True),
Column('name', String(64)))
self.tables['addresses'] = Table('addresses', self.metadata,
Column('id', Integer, primary_key=True),
Column('user_id', Integer, ForeignKey('users.id')),
Column('email_address', String(128)))
self.tables['lazy_loaded'] = Table('lazy_loaded', self.metadata,
Column('id', Integer, primary_key=True),
Column('user_id', Integer, ForeignKey('users.id')))
self.tables['another_lazy_loaded'] = Table('another_lazy_loaded', self.metadata,
Column('id', Integer, primary_key=True),
Column('user_id', Integer, ForeignKey('users.id')))
self.mappers = {}
self.mappers['user'] = mapper(User, self.tables['users'], properties={
'addresses': relation(Address, backref='user', lazy=False),
'lazy_loaded': relation(LazyLoaded, lazy=True),
'another_lazy_loaded': relation(AnotherLazyLoaded, lazy=True)
})
self.mappers['addresses'] = mapper(Address, self.tables['addresses'])
self.mappers['lazy_loaded'] = mapper(LazyLoaded,
self.tables['lazy_loaded'])
self.mappers['another_lazy_loaded'] = mapper(AnotherLazyLoaded,
self.tables['another_lazy_loaded'])
self.metadata.create_all(self.engine)
pyamf.register_class(User, 'server.User')
pyamf.register_class(Address, 'server.Address')
pyamf.register_class(LazyLoaded, 'server.LazyLoaded')
def tearDown(self):
clear_mappers()
pyamf.unregister_class(User)
pyamf.unregister_class(Address)
pyamf.unregister_class(LazyLoaded)
def _build_obj(self):
user = User()
user.name = "test_user"
user.addresses.append(Address(email_address="test@example.org"))
return user
def _save(self, obj):
# this covers deprecation warnings etc.
if hasattr(self.session, 'add'):
self.session.add(obj)
elif hasattr(self.session, 'save'):
self.session.save(obj)
else:
raise AttributeError('Don\'t know how to save an object')
def _clear(self):
# this covers deprecation warnings etc.
if hasattr(self.session, 'expunge_all'):
self.session.expunge_all()
elif hasattr(self.session, 'clear'):
self.session.clear()
else:
raise AttributeError('Don\'t know how to clear session')
class SATestCase(BaseTestCase):
def _test_obj(self, encoded, decoded):
self.assertEquals(User, decoded.__class__)
self.assertEquals(encoded.name, decoded.name)
self.assertEquals(encoded.addresses[0].email_address, decoded.addresses[0].email_address)
def test_encode_decode_transient(self):
user = self._build_obj()
encoder = pyamf.get_encoder(pyamf.AMF3)
encoder.writeElement(user)
encoded = encoder.stream.getvalue()
decoded = pyamf.get_decoder(pyamf.AMF3, encoded).readElement()
self._test_obj(user, decoded)
def test_encode_decode_persistent(self):
user = self._build_obj()
self._save(user)
self.session.commit()
self.session.refresh(user)
encoder = pyamf.get_encoder(pyamf.AMF3)
encoder.writeElement(user)
encoded = encoder.stream.getvalue()
decoded = pyamf.get_decoder(pyamf.AMF3, encoded).readElement()
self._test_obj(user, decoded)
def test_encode_decode_list(self):
max = 5
for i in range(0, max):
user = self._build_obj()
user.name = "%s" % i
self._save(user)
self.session.commit()
users = self.session.query(User).all()
encoder = pyamf.get_encoder(pyamf.AMF3)
encoder.writeElement(users)
encoded = encoder.stream.getvalue()
decoded = pyamf.get_decoder(pyamf.AMF3, encoded).readElement()
self.assertEquals([].__class__, decoded.__class__)
for i in range(0, max):
self._test_obj(users[i], decoded[i])
def test_sa_merge(self):
user = self._build_obj()
for i, string in enumerate(['one', 'two', 'three']):
addr = Address(email_address="%s@example.org" % string)
user.addresses.append(addr)
self._save(user)
self.session.commit()
self.session.refresh(user)
encoder = pyamf.get_encoder(pyamf.AMF3)
encoder.writeElement(user)
encoded = encoder.stream.getvalue()
decoded = pyamf.get_decoder(pyamf.AMF3, encoded).readElement()
del decoded.addresses[0]
del decoded.addresses[1]
merged_user = self.session.merge(decoded)
self.assertEqual(len(merged_user.addresses), 2)
def test_encode_decode_with_references(self):
user = self._build_obj()
self._save(user)
self.session.commit()
self.session.refresh(user)
max = 5
users = []
for i in range(0, max):
users.append(user)
encoder = pyamf.get_encoder(pyamf.AMF3)
encoder.writeElement(users)
encoded = encoder.stream.getvalue()
decoded = pyamf.get_decoder(pyamf.AMF3, encoded).readElement()
for i in range(0, max):
self.assertEquals(id(decoded[0]), id(decoded[i]))
class BaseClassAliasTestCase(BaseTestCase):
def setUp(self):
BaseTestCase.setUp(self)
self.alias = pyamf.get_class_alias(User)
class ClassAliasTestCase(BaseClassAliasTestCase):
def test_type(self):
self.assertEquals(self.alias.__class__, adapter.SaMappedClassAlias)
def test_get_mapper(self):
self.assertFalse(hasattr(self.alias, 'mapper'))
self.alias.compile()
mapper = adapter.class_mapper(User)
self.assertTrue(hasattr(self.alias, 'mapper'))
self.assertEquals(id(mapper), id(self.alias.mapper))
def test_get_attrs(self):
u = self._build_obj()
static, dynamic = self.alias.getEncodableAttributes(u)
self.assertEquals(static.keys(), [
'id',
'lazy_loaded',
'addresses',
'name',
'another_lazy_loaded'
])
self.assertEquals(dynamic, {'sa_key': [None], 'sa_lazy': []})
def test_get_attributes(self):
u = self._build_obj()
self.assertFalse(u in self.session)
self.assertEquals([None], self.mappers['user'].primary_key_from_instance(u))
static, dynamic = self.alias.getEncodableAttributes(u)
self.assertEquals(static, {
'addresses': u.addresses,
'lazy_loaded': u.lazy_loaded,
'another_lazy_loaded': [],
'id': None,
'name': 'test_user'
})
self.assertEquals(dynamic, {
'sa_lazy': [],
'sa_key': [None]
})
def test_property(self):
class Person(object):
foo = 'bar'
baz = 'gak'
def _get_rw_property(self):
return self.foo
def _set_rw_property(self, val):
self.foo = val
def _get_ro_property(self):
return self.baz
rw = property(_get_rw_property, _set_rw_property)
ro = property(_get_ro_property)
self.mappers['person'] = mapper(Person, self.tables['users'])
alias = adapter.SaMappedClassAlias(Person, 'person')
obj = Person()
sa, da = alias.getEncodableAttributes(obj)
self.assertEquals(sa, {
'id': None,
'name': None})
self.assertEquals(da, {
'sa_key': [None],
'sa_lazy': [],
'rw': 'bar',
'ro': 'gak'})
self.assertEquals(obj.ro, 'gak')
alias.applyAttributes(obj, {
'sa_key': [None],
'sa_lazy': [],
'id': None,
'name': None,
'rw': 'bar',
'ro': 'baz'})
self.assertEquals(obj.ro, 'gak')
class ApplyAttributesTestCase(BaseClassAliasTestCase):
def test_undefined(self):
u = self.alias.createInstance()
attrs = {
'sa_lazy': ['another_lazy_loaded'],
'sa_key': [None],
'addresses': [],
'lazy_loaded': [],
'another_lazy_loaded': pyamf.Undefined, # <-- the important bit
'id': None,
'name': 'test_user'
}
self.alias.applyAttributes(u, attrs)
d = u.__dict__.copy()
if sqlalchemy.__version__.startswith('0.4'):
self.assertTrue('_state' in d)
del d['_state']
elif sqlalchemy.__version__.startswith('0.5'):
self.assertTrue('_sa_instance_state' in d)
del d['_sa_instance_state']
self.assertEquals(d, {
'lazy_loaded': [],
'addresses': [],
'name': 'test_user',
'id': None
})
def test_decode_unaliased(self):
u = self.alias.createInstance()
attrs = {
'sa_lazy': [],
'sa_key': [None],
'addresses': [],
'lazy_loaded': [],
# this is important because we haven't registered AnotherLazyLoaded
# as an alias and the decoded object for an untyped object is an
# instance of pyamf.ASObject
'another_lazy_loaded': [pyamf.ASObject({'id': 1, 'user_id': None})],
'id': None,
'name': 'test_user'
}
# sqlalchemy can't find any state to work with
self.assertRaises(AttributeError, self.alias.applyAttributes, u, attrs)
class AdapterTestCase(BaseTestCase):
"""
Checks to see if the adapter will actually intercept a class correctly.
"""
def test_mapped(self):
self.assertNotEquals(None, adapter.class_mapper(User))
self.assertTrue(adapter.is_class_sa_mapped(User))
def test_instance(self):
u = User()
self.assertTrue(adapter.is_class_sa_mapped(u))
def test_not_mapped(self):
self.assertRaises(adapter.UnmappedInstanceError, adapter.class_mapper, Spam)
self.assertFalse(adapter.is_class_sa_mapped(Spam))
def suite():
suite = unittest.TestSuite()
try:
import pysqlite2
except ImportError:
return suite
classes = [
SATestCase,
AdapterTestCase,
ClassAliasTestCase,
ApplyAttributesTestCase
]
for x in classes:
suite.addTest(unittest.makeSuite(x))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| |
# -*- coding: utf-8 -*-
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import hashlib
import base64
import re
import hmac
import version
from util import print_error, InvalidPassword
import ecdsa
import aes
################################## transactions
RECOMMENDED_FEE = 10000
COINBASE_MATURITY = 100
COIN = 100000000
# supported types of transction outputs
TYPE_ADDRESS = 53
TYPE_PUBKEY = 1
TYPE_SCRIPT = 2
# AES encryption
EncodeAES = lambda secret, s: base64.b64encode(aes.encryptData(secret,s))
DecodeAES = lambda secret, e: aes.decryptData(secret, base64.b64decode(e))
def strip_PKCS7_padding(s):
"""return s stripped of PKCS7 padding"""
if len(s)%16 or not s:
raise ValueError("String of len %d can't be PCKS7-padded" % len(s))
numpads = ord(s[-1])
if numpads > 16:
raise ValueError("String ending with %r can't be PCKS7-padded" % s[-1])
if s[-numpads:] != numpads*chr(numpads):
raise ValueError("Invalid PKCS7 padding")
return s[:-numpads]
# backport padding fix to AES module
aes.strip_PKCS7_padding = strip_PKCS7_padding
def aes_encrypt_with_iv(key, iv, data):
mode = aes.AESModeOfOperation.modeOfOperation["CBC"]
key = map(ord, key)
iv = map(ord, iv)
data = aes.append_PKCS7_padding(data)
keysize = len(key)
assert keysize in aes.AES.keySize.values(), 'invalid key size: %s' % keysize
moo = aes.AESModeOfOperation()
(mode, length, ciph) = moo.encrypt(data, mode, key, keysize, iv)
return ''.join(map(chr, ciph))
def aes_decrypt_with_iv(key, iv, data):
mode = aes.AESModeOfOperation.modeOfOperation["CBC"]
key = map(ord, key)
iv = map(ord, iv)
keysize = len(key)
assert keysize in aes.AES.keySize.values(), 'invalid key size: %s' % keysize
data = map(ord, data)
moo = aes.AESModeOfOperation()
decr = moo.decrypt(data, None, mode, key, keysize, iv)
decr = strip_PKCS7_padding(decr)
return decr
def pw_encode(s, password):
if password:
secret = Hash(password)
return EncodeAES(secret, s.encode("utf8"))
else:
return s
def pw_decode(s, password):
if password is not None:
secret = Hash(password)
try:
d = DecodeAES(secret, s).decode("utf8")
except Exception:
raise InvalidPassword()
return d
else:
return s
def rev_hex(s):
return s.decode('hex')[::-1].encode('hex')
def int_to_hex(i, length=1):
s = hex(i)[2:].rstrip('L')
s = "0"*(2*length - len(s)) + s
return rev_hex(s)
def var_int(i):
if i<0xfd:
return int_to_hex(i)
elif i<=0xffff:
return "fd"+int_to_hex(i,2)
elif i<=0xffffffff:
return "fe"+int_to_hex(i,4)
else:
return "ff"+int_to_hex(i,8)
def op_push(i):
if i<0x4c:
return int_to_hex(i)
elif i<0xff:
return '4c' + int_to_hex(i)
elif i<0xffff:
return '4d' + int_to_hex(i,2)
else:
return '4e' + int_to_hex(i,4)
def sha256(x):
return hashlib.sha256(x).digest()
def Hash(x):
if type(x) is unicode: x=x.encode('utf-8')
return sha256(sha256(x))
hash_encode = lambda x: x[::-1].encode('hex')
hash_decode = lambda x: x.decode('hex')[::-1]
hmac_sha_512 = lambda x,y: hmac.new(x, y, hashlib.sha512).digest()
def is_new_seed(x, prefix=version.SEED_PREFIX):
import mnemonic
x = mnemonic.prepare_seed(x)
s = hmac_sha_512("Seed version", x.encode('utf8')).encode('hex')
return s.startswith(prefix)
def is_old_seed(seed):
import old_mnemonic
words = seed.strip().split()
try:
old_mnemonic.mn_decode(words)
uses_electrum_words = True
except Exception:
uses_electrum_words = False
try:
seed.decode('hex')
is_hex = (len(seed) == 32 or len(seed) == 64)
except Exception:
is_hex = False
return is_hex or (uses_electrum_words and (len(words) == 12 or len(words) == 24))
# pywallet openssl private key implementation
def i2o_ECPublicKey(pubkey, compressed=False):
# public keys are 65 bytes long (520 bits)
# 0x04 + 32-byte X-coordinate + 32-byte Y-coordinate
# 0x00 = point at infinity, 0x02 and 0x03 = compressed, 0x04 = uncompressed
# compressed keys: <sign> <x> where <sign> is 0x02 if y is even and 0x03 if y is odd
if compressed:
if pubkey.point.y() & 1:
key = '03' + '%064x' % pubkey.point.x()
else:
key = '02' + '%064x' % pubkey.point.x()
else:
key = '04' + \
'%064x' % pubkey.point.x() + \
'%064x' % pubkey.point.y()
return key.decode('hex')
# end pywallet openssl private key implementation
############ functions from pywallet #####################
def hash_160(public_key):
md = hashlib.new('ripemd160')
md.update(sha256(public_key))
return md.digest()
def public_key_to_bc_address(public_key):
h160 = hash_160(public_key)
return hash_160_to_bc_address(h160)
def hash_160_to_bc_address(h160, addrtype = 53):
vh160 = chr(addrtype) + h160
h = Hash(vh160)
addr = vh160 + h[0:4]
return base_encode(addr, base=58)
def bc_address_to_hash_160(addr):
bytes = base_decode(addr, 25, base=58)
return ord(bytes[0]), bytes[1:21]
__b58chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
assert len(__b58chars) == 58
__b43chars = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ$*+-./:'
assert len(__b43chars) == 43
def base_encode(v, base):
""" encode v, which is a string of bytes, to base58."""
if base == 58:
chars = __b58chars
elif base == 43:
chars = __b43chars
long_value = 0L
for (i, c) in enumerate(v[::-1]):
long_value += (256**i) * ord(c)
result = ''
while long_value >= base:
div, mod = divmod(long_value, base)
result = chars[mod] + result
long_value = div
result = chars[long_value] + result
# Bitcoin does a little leading-zero-compression:
# leading 0-bytes in the input become leading-1s
nPad = 0
for c in v:
if c == '\0': nPad += 1
else: break
return (chars[0]*nPad) + result
def base_decode(v, length, base):
""" decode v into a string of len bytes."""
if base == 58:
chars = __b58chars
elif base == 43:
chars = __b43chars
long_value = 0L
for (i, c) in enumerate(v[::-1]):
long_value += chars.find(c) * (base**i)
result = ''
while long_value >= 256:
div, mod = divmod(long_value, 256)
result = chr(mod) + result
long_value = div
result = chr(long_value) + result
nPad = 0
for c in v:
if c == chars[0]: nPad += 1
else: break
result = chr(0)*nPad + result
if length is not None and len(result) != length:
return None
return result
def EncodeBase58Check(vchIn):
hash = Hash(vchIn)
return base_encode(vchIn + hash[0:4], base=58)
def DecodeBase58Check(psz):
vchRet = base_decode(psz, None, base=58)
key = vchRet[0:-4]
csum = vchRet[-4:]
hash = Hash(key)
cs32 = hash[0:4]
if cs32 != csum:
return None
else:
return key
def PrivKeyToSecret(privkey):
return privkey[9:9+32]
def SecretToASecret(secret, compressed=False, addrtype=53):
vchIn = chr((addrtype+128)&255) + secret
if compressed: vchIn += '\01'
return EncodeBase58Check(vchIn)
def ASecretToSecret(key, addrtype=53):
vch = DecodeBase58Check(key)
if vch and vch[0] == chr((addrtype+128)&255):
return vch[1:]
elif is_minikey(key):
return minikey_to_private_key(key)
else:
return False
def regenerate_key(sec):
b = ASecretToSecret(sec)
if not b:
return False
b = b[0:32]
return EC_KEY(b)
def GetPubKey(pubkey, compressed=False):
return i2o_ECPublicKey(pubkey, compressed)
def GetSecret(pkey):
return ('%064x' % pkey.secret).decode('hex')
def is_compressed(sec):
b = ASecretToSecret(sec)
return len(b) == 33
def public_key_from_private_key(sec):
# rebuild public key from private key, compressed or uncompressed
pkey = regenerate_key(sec)
assert pkey
compressed = is_compressed(sec)
public_key = GetPubKey(pkey.pubkey, compressed)
return public_key.encode('hex')
def address_from_private_key(sec):
public_key = public_key_from_private_key(sec)
address = public_key_to_bc_address(public_key.decode('hex'))
return address
def is_valid(addr):
return is_address(addr)
def is_address(addr):
ADDRESS_RE = re.compile('[1-9A-HJ-NP-Za-km-z]{26,}\\Z')
if not ADDRESS_RE.match(addr):
return False
try:
addrtype, h = bc_address_to_hash_160(addr)
except Exception:
return False
if addrtype not in [53, 85]:
return False
return addr == hash_160_to_bc_address(h, addrtype)
def is_private_key(key):
try:
k = ASecretToSecret(key)
return k is not False
except:
return False
########### end pywallet functions #######################
def is_minikey(text):
# Minikeys are typically 22 or 30 characters, but this routine
# permits any length of 20 or more provided the minikey is valid.
# A valid minikey must begin with an 'S', be in base58, and when
# suffixed with '?' have its SHA256 hash begin with a zero byte.
# They are widely used in Casascius physical bitoins.
return (len(text) >= 20 and text[0] == 'S'
and all(c in __b58chars for c in text)
and ord(sha256(text + '?')[0]) == 0)
def minikey_to_private_key(text):
return sha256(text)
from ecdsa.ecdsa import curve_secp256k1, generator_secp256k1
from ecdsa.curves import SECP256k1
from ecdsa.ellipticcurve import Point
from ecdsa.util import string_to_number, number_to_string
def msg_magic(message):
varint = var_int(len(message))
encoded_varint = "".join([chr(int(varint[i:i+2], 16)) for i in xrange(0, len(varint), 2)])
return "\x18Bitcoin Signed Message:\n" + encoded_varint + message
def verify_message(address, signature, message):
try:
EC_KEY.verify_message(address, signature, message)
return True
except Exception as e:
print_error("Verification error: {0}".format(e))
return False
def encrypt_message(message, pubkey):
return EC_KEY.encrypt_message(message, pubkey.decode('hex'))
def chunks(l, n):
return [l[i:i+n] for i in xrange(0, len(l), n)]
def ECC_YfromX(x,curved=curve_secp256k1, odd=True):
_p = curved.p()
_a = curved.a()
_b = curved.b()
for offset in range(128):
Mx = x + offset
My2 = pow(Mx, 3, _p) + _a * pow(Mx, 2, _p) + _b % _p
My = pow(My2, (_p+1)/4, _p )
if curved.contains_point(Mx,My):
if odd == bool(My&1):
return [My,offset]
return [_p-My,offset]
raise Exception('ECC_YfromX: No Y found')
def negative_point(P):
return Point( P.curve(), P.x(), -P.y(), P.order() )
def point_to_ser(P, comp=True ):
if comp:
return ( ('%02x'%(2+(P.y()&1)))+('%064x'%P.x()) ).decode('hex')
return ( '04'+('%064x'%P.x())+('%064x'%P.y()) ).decode('hex')
def ser_to_point(Aser):
curve = curve_secp256k1
generator = generator_secp256k1
_r = generator.order()
assert Aser[0] in ['\x02','\x03','\x04']
if Aser[0] == '\x04':
return Point( curve, string_to_number(Aser[1:33]), string_to_number(Aser[33:]), _r )
Mx = string_to_number(Aser[1:])
return Point( curve, Mx, ECC_YfromX(Mx, curve, Aser[0]=='\x03')[0], _r )
class MyVerifyingKey(ecdsa.VerifyingKey):
@classmethod
def from_signature(klass, sig, recid, h, curve):
""" See http://www.secg.org/download/aid-780/sec1-v2.pdf, chapter 4.1.6 """
from ecdsa import util, numbertheory
import msqr
curveFp = curve.curve
G = curve.generator
order = G.order()
# extract r,s from signature
r, s = util.sigdecode_string(sig, order)
# 1.1
x = r + (recid/2) * order
# 1.3
alpha = ( x * x * x + curveFp.a() * x + curveFp.b() ) % curveFp.p()
beta = msqr.modular_sqrt(alpha, curveFp.p())
y = beta if (beta - recid) % 2 == 0 else curveFp.p() - beta
# 1.4 the constructor checks that nR is at infinity
R = Point(curveFp, x, y, order)
# 1.5 compute e from message:
e = string_to_number(h)
minus_e = -e % order
# 1.6 compute Q = r^-1 (sR - eG)
inv_r = numbertheory.inverse_mod(r,order)
Q = inv_r * ( s * R + minus_e * G )
return klass.from_public_point( Q, curve )
class MySigningKey(ecdsa.SigningKey):
"""Enforce low S values in signatures"""
def sign_number(self, number, entropy=None, k=None):
curve = SECP256k1
G = curve.generator
order = G.order()
r, s = ecdsa.SigningKey.sign_number(self, number, entropy, k)
if s > order/2:
s = order - s
return r, s
class EC_KEY(object):
def __init__( self, k ):
secret = string_to_number(k)
self.pubkey = ecdsa.ecdsa.Public_key( generator_secp256k1, generator_secp256k1 * secret )
self.privkey = ecdsa.ecdsa.Private_key( self.pubkey, secret )
self.secret = secret
def get_public_key(self, compressed=True):
return point_to_ser(self.pubkey.point, compressed).encode('hex')
def sign(self, msg_hash):
private_key = MySigningKey.from_secret_exponent(self.secret, curve = SECP256k1)
public_key = private_key.get_verifying_key()
signature = private_key.sign_digest_deterministic(msg_hash, hashfunc=hashlib.sha256, sigencode = ecdsa.util.sigencode_string)
assert public_key.verify_digest(signature, msg_hash, sigdecode = ecdsa.util.sigdecode_string)
return signature
def sign_message(self, message, compressed, address):
signature = self.sign(Hash(msg_magic(message)))
for i in range(4):
sig = chr(27 + i + (4 if compressed else 0)) + signature
try:
self.verify_message(address, sig, message)
return sig
except Exception:
continue
else:
raise Exception("error: cannot sign message")
@classmethod
def verify_message(self, address, sig, message):
if len(sig) != 65:
raise Exception("Wrong encoding")
nV = ord(sig[0])
if nV < 27 or nV >= 35:
raise Exception("Bad encoding")
if nV >= 31:
compressed = True
nV -= 4
else:
compressed = False
recid = nV - 27
h = Hash(msg_magic(message))
public_key = MyVerifyingKey.from_signature(sig[1:], recid, h, curve = SECP256k1)
# check public key
public_key.verify_digest(sig[1:], h, sigdecode = ecdsa.util.sigdecode_string)
pubkey = point_to_ser(public_key.pubkey.point, compressed)
# check that we get the original signing address
addr = public_key_to_bc_address(pubkey)
if address != addr:
raise Exception("Bad signature")
# ECIES encryption/decryption methods; AES-128-CBC with PKCS7 is used as the cipher; hmac-sha256 is used as the mac
@classmethod
def encrypt_message(self, message, pubkey):
pk = ser_to_point(pubkey)
if not ecdsa.ecdsa.point_is_valid(generator_secp256k1, pk.x(), pk.y()):
raise Exception('invalid pubkey')
ephemeral_exponent = number_to_string(ecdsa.util.randrange(pow(2,256)), generator_secp256k1.order())
ephemeral = EC_KEY(ephemeral_exponent)
ecdh_key = point_to_ser(pk * ephemeral.privkey.secret_multiplier)
key = hashlib.sha512(ecdh_key).digest()
iv, key_e, key_m = key[0:16], key[16:32], key[32:]
ciphertext = aes_encrypt_with_iv(key_e, iv, message)
ephemeral_pubkey = ephemeral.get_public_key(compressed=True).decode('hex')
encrypted = 'BIE1' + ephemeral_pubkey + ciphertext
mac = hmac.new(key_m, encrypted, hashlib.sha256).digest()
return base64.b64encode(encrypted + mac)
def decrypt_message(self, encrypted):
encrypted = base64.b64decode(encrypted)
if len(encrypted) < 85:
raise Exception('invalid ciphertext: length')
magic = encrypted[:4]
ephemeral_pubkey = encrypted[4:37]
ciphertext = encrypted[37:-32]
mac = encrypted[-32:]
if magic != 'BIE1':
raise Exception('invalid ciphertext: invalid magic bytes')
try:
ephemeral_pubkey = ser_to_point(ephemeral_pubkey)
except AssertionError, e:
raise Exception('invalid ciphertext: invalid ephemeral pubkey')
if not ecdsa.ecdsa.point_is_valid(generator_secp256k1, ephemeral_pubkey.x(), ephemeral_pubkey.y()):
raise Exception('invalid ciphertext: invalid ephemeral pubkey')
ecdh_key = point_to_ser(ephemeral_pubkey * self.privkey.secret_multiplier)
key = hashlib.sha512(ecdh_key).digest()
iv, key_e, key_m = key[0:16], key[16:32], key[32:]
if mac != hmac.new(key_m, encrypted[:-32], hashlib.sha256).digest():
raise Exception('invalid ciphertext: invalid mac')
return aes_decrypt_with_iv(key_e, iv, ciphertext)
###################################### BIP32 ##############################
random_seed = lambda n: "%032x"%ecdsa.util.randrange( pow(2,n) )
BIP32_PRIME = 0x80000000
def get_pubkeys_from_secret(secret):
# public key
private_key = ecdsa.SigningKey.from_string( secret, curve = SECP256k1 )
public_key = private_key.get_verifying_key()
K = public_key.to_string()
K_compressed = GetPubKey(public_key.pubkey,True)
return K, K_compressed
# Child private key derivation function (from master private key)
# k = master private key (32 bytes)
# c = master chain code (extra entropy for key derivation) (32 bytes)
# n = the index of the key we want to derive. (only 32 bits will be used)
# If n is negative (i.e. the 32nd bit is set), the resulting private key's
# corresponding public key can NOT be determined without the master private key.
# However, if n is positive, the resulting private key's corresponding
# public key can be determined without the master private key.
def CKD_priv(k, c, n):
is_prime = n & BIP32_PRIME
return _CKD_priv(k, c, rev_hex(int_to_hex(n,4)).decode('hex'), is_prime)
def _CKD_priv(k, c, s, is_prime):
order = generator_secp256k1.order()
keypair = EC_KEY(k)
cK = GetPubKey(keypair.pubkey,True)
data = chr(0) + k + s if is_prime else cK + s
I = hmac.new(c, data, hashlib.sha512).digest()
k_n = number_to_string( (string_to_number(I[0:32]) + string_to_number(k)) % order , order )
c_n = I[32:]
return k_n, c_n
# Child public key derivation function (from public key only)
# K = master public key
# c = master chain code
# n = index of key we want to derive
# This function allows us to find the nth public key, as long as n is
# non-negative. If n is negative, we need the master private key to find it.
def CKD_pub(cK, c, n):
if n & BIP32_PRIME: raise
return _CKD_pub(cK, c, rev_hex(int_to_hex(n,4)).decode('hex'))
# helper function, callable with arbitrary string
def _CKD_pub(cK, c, s):
order = generator_secp256k1.order()
I = hmac.new(c, cK + s, hashlib.sha512).digest()
curve = SECP256k1
pubkey_point = string_to_number(I[0:32])*curve.generator + ser_to_point(cK)
public_key = ecdsa.VerifyingKey.from_public_point( pubkey_point, curve = SECP256k1 )
c_n = I[32:]
cK_n = GetPubKey(public_key.pubkey,True)
return cK_n, c_n
BITCOIN_HEADER_PRIV = "0488ade4"
BITCOIN_HEADER_PUB = "0488b21e"
TESTNET_HEADER_PRIV = "04358394"
TESTNET_HEADER_PUB = "043587cf"
BITCOIN_HEADERS = (BITCOIN_HEADER_PUB, BITCOIN_HEADER_PRIV)
TESTNET_HEADERS = (TESTNET_HEADER_PUB, TESTNET_HEADER_PRIV)
def _get_headers(testnet):
"""Returns the correct headers for either testnet or navcoin, in the form
of a 2-tuple, like (public, private)."""
if testnet:
return TESTNET_HEADERS
else:
return BITCOIN_HEADERS
def deserialize_xkey(xkey):
xkey = DecodeBase58Check(xkey)
assert len(xkey) == 78
xkey_header = xkey[0:4].encode('hex')
# Determine if the key is a navcoin key or a testnet key.
if xkey_header in TESTNET_HEADERS:
head = TESTNET_HEADER_PRIV
elif xkey_header in BITCOIN_HEADERS:
head = BITCOIN_HEADER_PRIV
else:
raise Exception("Unknown xkey header: '%s'" % xkey_header)
depth = ord(xkey[4])
fingerprint = xkey[5:9]
child_number = xkey[9:13]
c = xkey[13:13+32]
if xkey[0:4].encode('hex') == head:
K_or_k = xkey[13+33:]
else:
K_or_k = xkey[13+32:]
return depth, fingerprint, child_number, c, K_or_k
def get_xkey_name(xkey, testnet=False):
depth, fingerprint, child_number, c, K = deserialize_xkey(xkey)
n = int(child_number.encode('hex'), 16)
if n & BIP32_PRIME:
child_id = "%d'"%(n - BIP32_PRIME)
else:
child_id = "%d"%n
if depth == 0:
return ''
elif depth == 1:
return child_id
else:
raise BaseException("xpub depth error")
def xpub_from_xprv(xprv, testnet=False):
depth, fingerprint, child_number, c, k = deserialize_xkey(xprv)
K, cK = get_pubkeys_from_secret(k)
header_pub, _ = _get_headers(testnet)
xpub = header_pub.decode('hex') + chr(depth) + fingerprint + child_number + c + cK
return EncodeBase58Check(xpub)
def bip32_root(seed, testnet=False):
header_pub, header_priv = _get_headers(testnet)
I = hmac.new("Bitcoin seed", seed, hashlib.sha512).digest()
master_k = I[0:32]
master_c = I[32:]
K, cK = get_pubkeys_from_secret(master_k)
xprv = (header_priv + "00" + "00000000" + "00000000").decode("hex") + master_c + chr(0) + master_k
xpub = (header_pub + "00" + "00000000" + "00000000").decode("hex") + master_c + cK
return EncodeBase58Check(xprv), EncodeBase58Check(xpub)
def xpub_from_pubkey(cK, testnet=False):
header_pub, header_priv = _get_headers(testnet)
assert cK[0] in ['\x02','\x03']
master_c = chr(0)*32
xpub = (header_pub + "00" + "00000000" + "00000000").decode("hex") + master_c + cK
return EncodeBase58Check(xpub)
def bip32_private_derivation(xprv, branch, sequence, testnet=False):
assert sequence.startswith(branch)
if branch == sequence:
return xprv, xpub_from_xprv(xprv, testnet)
header_pub, header_priv = _get_headers(testnet)
depth, fingerprint, child_number, c, k = deserialize_xkey(xprv)
sequence = sequence[len(branch):]
for n in sequence.split('/'):
if n == '': continue
i = int(n[:-1]) + BIP32_PRIME if n[-1] == "'" else int(n)
parent_k = k
k, c = CKD_priv(k, c, i)
depth += 1
_, parent_cK = get_pubkeys_from_secret(parent_k)
fingerprint = hash_160(parent_cK)[0:4]
child_number = ("%08X"%i).decode('hex')
K, cK = get_pubkeys_from_secret(k)
xprv = header_priv.decode('hex') + chr(depth) + fingerprint + child_number + c + chr(0) + k
xpub = header_pub.decode('hex') + chr(depth) + fingerprint + child_number + c + cK
return EncodeBase58Check(xprv), EncodeBase58Check(xpub)
def bip32_public_derivation(xpub, branch, sequence, testnet=False):
header_pub, _ = _get_headers(testnet)
depth, fingerprint, child_number, c, cK = deserialize_xkey(xpub)
assert sequence.startswith(branch)
sequence = sequence[len(branch):]
for n in sequence.split('/'):
if n == '': continue
i = int(n)
parent_cK = cK
cK, c = CKD_pub(cK, c, i)
depth += 1
fingerprint = hash_160(parent_cK)[0:4]
child_number = ("%08X"%i).decode('hex')
xpub = header_pub.decode('hex') + chr(depth) + fingerprint + child_number + c + cK
return EncodeBase58Check(xpub)
def bip32_private_key(sequence, k, chain):
for i in sequence:
k, chain = CKD_priv(k, chain, i)
return SecretToASecret(k, True)
| |
#$Id$
from projects.model.Bug import Bug
from projects.model.Project import Project
from projects.model.Defaultfield import Defaultfield
from projects.model.Customfield import Customfield
class BugsParser:
"""This class is used to parse the json response for Bugs."""
def to_json(self, bug):
"""This method is used to create json object for bug object.
Args:
bug(instance): Bug object.
Returns:
dict: Json object for bugs object.
"""
data = {}
if bug.get_title() != "":
data['title'] = bug.get_title()
if bug.get_description() != "":
data['description'] = bug.get_description()
if bug.get_assignee_id() != 0:
data['assignee'] = bug.get_assignee_id()
if bug.get_flag() != "":
data['flag'] = bug.get_flag()
if bug.get_classification_id() != 0:
data['classification_id'] = bug.get_classification_id()
if bug.get_milestone_id() != 0:
data['milestone_id'] = bug.get_milestone_id()
if bug.get_due_date() != "":
data['due_date'] = bug.get_due_date()
if bug.get_module_id() != 0:
data['module_id'] = bug.get_module_id()
if bug.get_severity_id() != 0:
data['severity_id'] = bug.get_severity_id()
if bug.get_reproducible_id() != 0:
data['reproducible_id'] = bug.get_reproducible_id()
return data
def get_bug(self, resp):
"""This method parses the given response and returns bug object.
Args:
resp(dict): Response containing json object for bug.
Returns:
instance: Bug object.
"""
bug = Bug()
if 'id' in resp:
bug.set_id(resp['id'])
if 'key' in resp:
bug.set_key(resp['key'])
if 'project' in resp:
if 'id' in resp['project']:
project = Project()
project.set_id(resp['project']['id'])
bug.set_project(project)
if 'flag' in resp:
bug.set_flag(resp['flag'])
if 'title' in resp:
bug.set_title(resp['title'])
if 'reporter_id' in resp:
bug.set_reporter_id(resp['reporter_id'])
if 'reported_person' in resp:
bug.set_reported_person(resp['reported_person'])
if 'created_time' in resp:
bug.set_created_time(resp['created_time'])
if 'created_time_format' in resp:
bug.set_created_time_format(resp['created_time_format'])
if 'created_time_long' in resp:
bug.set_created_time_long(resp['created_time_long'])
if 'assignee_name' in resp:
bug.set_assignee_name(resp['assignee_name'])
if 'classification' in resp:
if 'id' in resp['classification']:
bug.set_classification_id(resp['classification']['id'])
if 'type' in resp['classification']:
bug.set_classification_type(resp['classification']['type'])
if 'severity' in resp:
if 'id' in resp['severity']:
bug.set_severity_id(resp['severity']['id'])
if 'type' in resp['severity']:
bug.set_severity_type(resp['severity']['type'])
if 'status' in resp:
if 'id' in resp['status']:
bug.set_status_id(resp['status']['id'])
if 'type' in resp['status']:
bug.set_status_type(resp['status']['type'])
if 'closed' in resp:
bug.set_closed(resp['closed'])
if 'reproducible' in resp:
if 'id' in resp['reproducible']:
bug.set_reproducible_id(resp['reproducible']['id'])
if 'type' in resp['reproducible']:
bug.set_reproducible_type(resp['reproducible']['type'])
if 'module' in resp:
if 'id' in resp['module']:
bug.set_module_id(resp['module']['id'])
if 'name' in resp['module']:
bug.set_module_name(resp['module']['name'])
if 'link' in resp:
link = resp['link']
if 'self' in link:
if 'url' in link['self']:
bug.set_url(link['self']['url'])
if 'timesheet' in link:
if 'url' in link['timesheet']:
bug.set_timesheet_url(link['timesheet']['url'])
return bug
def get_bugs(self, resp):
"""This method parses the given response and returns list of bugs object.
Args:
resp(dict): Dictionary containing json response for bugs.
Returns:
list of instance: List of bugs object.
"""
bugs = []
for value in resp['bugs']:
bug = self.get_bug(value)
bugs.append(bug)
return bugs
def get_message(self, resp):
"""This method is used to parse the given response and returns string message.
Args:
resp(dict): Response containing json object for message.
Returns:
str: Success message.
"""
return resp['response']
def get_default_fields(self, resp):
"""
Parse the JSON response and make it into the Default field object.
Args:
resp(dict): Response cotains the details of the default fields.
Returns:
instance: Defaultfield object.
"""
defaultfield = Defaultfield();
if 'defaultfields' in resp:
defaultfields = resp['defaultfields'];
if 'severity_details' in defaultfields:
severity_details = defaultfields['severity_details'];
severitydetails = [];
for json in severity_details:
severitydetails.append(self.json_to_dict(json));
defaultfield.set_severity_details(severitydetails);
if 'status_deatils' in defaultfields:
status_deatils = defaultfields['status_deatils'];
statusdeatils = [];
for json in status_deatils:
statusdeatils.append(self.json_to_dict(json));
defaultfield.set_status_deatils(statusdeatils);
if 'module_details' in defaultfields:
module_details = defaultfields['module_details'];
moduledetails = [];
for json in module_details:
moduledetails.append(self.json_to_dict(json));
defaultfield.set_module_details(moduledetails);
if 'priority_details' in defaultfields:
priority_details = defaultfields['priority_details'];
prioritydetails = [];
for json in priority_details:
prioritydetails.append(self.json_to_dict(json));
defaultfield.set_priority_details(prioritydetails);
if 'classification_details' in defaultfields:
classification_details = defaultfields['classification_details'];
classificationdetails = [];
for json in classification_details:
classificationdetails.append(self.json_to_dict(json));
defaultfield.set_classification_details(classificationdetails);
return defaultfield;
def json_to_dict(self, json):
'''
Parse the JSON response into dict object.
Args:
json(dict): Dictionary object.
Returns:
dict : Returns the dictionary object.
'''
details = {};
for key,value in json.items():
details[key] = value;
return details;
def get_custom_fields(self, resp):
'''
Parse the JSON response and make it into the list of Customfield object.
Args:
resp(dict): Response cotains the details of the custom fields.
Returns:
list of instance: Returns list of Customfield object.
'''
customfields_list = [];
if 'customfields' in resp:
customfields = resp['customfields'];
for json_obj in customfields:
customfields_list.append(self.json_to_customfield(json_obj));
return customfields_list;
def json_to_customfield(self, json_obj):
'''
Parse the JSON object into Customfield object.
Args:
json_obj(dict): JSON response contains the details of the custom field.
Returns:
instance: Returns the Customfield object.
'''
customfield = Customfield();
if 'label_name' in json_obj:
customfield.set_label_name(json_obj['label_name']);
if 'column_name' in json_obj:
customfield.set_column_name(json_obj['column_name']);
if 'default_Value' in json_obj:
customfield.set_default_value(json_obj['default_Value']);
if 'picklist_values' in json_obj:
picklist_values = json_obj['picklist_values'];
picklistvalues = []
for i in range(len(picklist_values)):
picklistvalues.append(picklist_values[i]);
customfield.set_picklist_values(picklistvalues);
return customfield;
| |
"""Test the Smappee component config flow module."""
from http import HTTPStatus
from unittest.mock import patch
from homeassistant import data_entry_flow, setup
from homeassistant.components.smappee.const import (
CONF_HOSTNAME,
CONF_SERIALNUMBER,
DOMAIN,
ENV_CLOUD,
ENV_LOCAL,
TOKEN_URL,
)
from homeassistant.config_entries import SOURCE_USER, SOURCE_ZEROCONF
from homeassistant.const import CONF_CLIENT_ID, CONF_CLIENT_SECRET
from homeassistant.helpers import config_entry_oauth2_flow
from tests.common import MockConfigEntry
CLIENT_ID = "1234"
CLIENT_SECRET = "5678"
async def test_show_user_form(hass):
"""Test that the user set up form is served."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
)
assert result["step_id"] == "environment"
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
async def test_show_user_host_form(hass):
"""Test that the host form is served after choosing the local option."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
)
assert result["step_id"] == "environment"
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"environment": ENV_LOCAL}
)
assert result["step_id"] == ENV_LOCAL
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
async def test_show_zeroconf_connection_error_form(hass):
"""Test that the zeroconf confirmation form is served."""
with patch("pysmappee.api.SmappeeLocalApi.logon", return_value=None):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_ZEROCONF},
data={
"host": "1.2.3.4",
"port": 22,
CONF_HOSTNAME: "Smappee1006000212.local.",
"type": "_ssh._tcp.local.",
"name": "Smappee1006000212._ssh._tcp.local.",
"properties": {"_raw": {}},
},
)
assert result["description_placeholders"] == {CONF_SERIALNUMBER: "1006000212"}
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "zeroconf_confirm"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"host": "1.2.3.4"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "cannot_connect"
assert len(hass.config_entries.async_entries(DOMAIN)) == 0
async def test_show_zeroconf_connection_error_form_next_generation(hass):
"""Test that the zeroconf confirmation form is served."""
with patch("pysmappee.mqtt.SmappeeLocalMqtt.start_attempt", return_value=False):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_ZEROCONF},
data={
"host": "1.2.3.4",
"port": 22,
CONF_HOSTNAME: "Smappee5001000212.local.",
"type": "_ssh._tcp.local.",
"name": "Smappee5001000212._ssh._tcp.local.",
"properties": {"_raw": {}},
},
)
assert result["description_placeholders"] == {CONF_SERIALNUMBER: "5001000212"}
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "zeroconf_confirm"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"host": "1.2.3.4"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "cannot_connect"
assert len(hass.config_entries.async_entries(DOMAIN)) == 0
async def test_connection_error(hass):
"""Test we show user form on Smappee connection error."""
with patch("pysmappee.api.SmappeeLocalApi.logon", return_value=None), patch(
"pysmappee.mqtt.SmappeeLocalMqtt.start_attempt", return_value=None
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
)
assert result["step_id"] == "environment"
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"environment": ENV_LOCAL}
)
assert result["step_id"] == ENV_LOCAL
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"host": "1.2.3.4"}
)
assert result["reason"] == "cannot_connect"
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
async def test_user_local_connection_error(hass):
"""Test we show user form on Smappee connection error in local next generation option."""
with patch("pysmappee.api.SmappeeLocalApi.logon", return_value=None), patch(
"pysmappee.mqtt.SmappeeLocalMqtt.start_attempt", return_value=True
), patch("pysmappee.mqtt.SmappeeLocalMqtt.start", return_value=True), patch(
"pysmappee.mqtt.SmappeeLocalMqtt.stop", return_value=True
), patch(
"pysmappee.mqtt.SmappeeLocalMqtt.is_config_ready", return_value=None
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
)
assert result["step_id"] == "environment"
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"environment": ENV_LOCAL}
)
assert result["step_id"] == ENV_LOCAL
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"host": "1.2.3.4"}
)
assert result["reason"] == "cannot_connect"
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
async def test_zeroconf_wrong_mdns(hass):
"""Test we abort if unsupported mDNS name is discovered."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_ZEROCONF},
data={
"host": "1.2.3.4",
"port": 22,
CONF_HOSTNAME: "example.local.",
"type": "_ssh._tcp.local.",
"name": "example._ssh._tcp.local.",
"properties": {"_raw": {}},
},
)
assert result["reason"] == "invalid_mdns"
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
async def test_full_user_wrong_mdns(hass):
"""Test we abort user flow if unsupported mDNS name got resolved."""
with patch("pysmappee.api.SmappeeLocalApi.logon", return_value={}), patch(
"pysmappee.api.SmappeeLocalApi.load_advanced_config",
return_value=[{"key": "mdnsHostName", "value": "Smappee5100000001"}],
), patch(
"pysmappee.api.SmappeeLocalApi.load_command_control_config", return_value=[]
), patch(
"pysmappee.api.SmappeeLocalApi.load_instantaneous",
return_value=[{"key": "phase0ActivePower", "value": 0}],
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
)
assert result["step_id"] == "environment"
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"environment": ENV_LOCAL}
)
assert result["step_id"] == ENV_LOCAL
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"host": "1.2.3.4"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "invalid_mdns"
async def test_user_device_exists_abort(hass):
"""Test we abort user flow if Smappee device already configured."""
with patch("pysmappee.api.SmappeeLocalApi.logon", return_value={}), patch(
"pysmappee.api.SmappeeLocalApi.load_advanced_config",
return_value=[{"key": "mdnsHostName", "value": "Smappee1006000212"}],
), patch(
"pysmappee.api.SmappeeLocalApi.load_command_control_config", return_value=[]
), patch(
"pysmappee.api.SmappeeLocalApi.load_instantaneous",
return_value=[{"key": "phase0ActivePower", "value": 0}],
):
config_entry = MockConfigEntry(
domain=DOMAIN,
data={"host": "1.2.3.4"},
unique_id="1006000212",
source=SOURCE_USER,
)
config_entry.add_to_hass(hass)
assert len(hass.config_entries.async_entries(DOMAIN)) == 1
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
)
assert result["step_id"] == "environment"
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"environment": ENV_LOCAL}
)
assert result["step_id"] == ENV_LOCAL
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"host": "1.2.3.4"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
assert len(hass.config_entries.async_entries(DOMAIN)) == 1
async def test_zeroconf_device_exists_abort(hass):
"""Test we abort zeroconf flow if Smappee device already configured."""
with patch("pysmappee.api.SmappeeLocalApi.logon", return_value={}), patch(
"pysmappee.api.SmappeeLocalApi.load_advanced_config",
return_value=[{"key": "mdnsHostName", "value": "Smappee1006000212"}],
), patch(
"pysmappee.api.SmappeeLocalApi.load_command_control_config", return_value=[]
), patch(
"pysmappee.api.SmappeeLocalApi.load_instantaneous",
return_value=[{"key": "phase0ActivePower", "value": 0}],
):
config_entry = MockConfigEntry(
domain=DOMAIN,
data={"host": "1.2.3.4"},
unique_id="1006000212",
source=SOURCE_USER,
)
config_entry.add_to_hass(hass)
assert len(hass.config_entries.async_entries(DOMAIN)) == 1
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_ZEROCONF},
data={
"host": "1.2.3.4",
"port": 22,
CONF_HOSTNAME: "Smappee1006000212.local.",
"type": "_ssh._tcp.local.",
"name": "Smappee1006000212._ssh._tcp.local.",
"properties": {"_raw": {}},
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
assert len(hass.config_entries.async_entries(DOMAIN)) == 1
async def test_cloud_device_exists_abort(hass):
"""Test we abort cloud flow if Smappee Cloud device already configured."""
config_entry = MockConfigEntry(
domain=DOMAIN,
unique_id="smappeeCloud",
source=SOURCE_USER,
)
config_entry.add_to_hass(hass)
assert len(hass.config_entries.async_entries(DOMAIN)) == 1
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured_device"
assert len(hass.config_entries.async_entries(DOMAIN)) == 1
async def test_zeroconf_abort_if_cloud_device_exists(hass):
"""Test we abort zeroconf flow if Smappee Cloud device already configured."""
config_entry = MockConfigEntry(
domain=DOMAIN,
unique_id="smappeeCloud",
source=SOURCE_USER,
)
config_entry.add_to_hass(hass)
assert len(hass.config_entries.async_entries(DOMAIN)) == 1
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_ZEROCONF},
data={
"host": "1.2.3.4",
"port": 22,
CONF_HOSTNAME: "Smappee1006000212.local.",
"type": "_ssh._tcp.local.",
"name": "Smappee1006000212._ssh._tcp.local.",
"properties": {"_raw": {}},
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured_device"
assert len(hass.config_entries.async_entries(DOMAIN)) == 1
async def test_zeroconf_confirm_abort_if_cloud_device_exists(hass):
"""Test we abort zeroconf confirm flow if Smappee Cloud device already configured."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_ZEROCONF},
data={
"host": "1.2.3.4",
"port": 22,
CONF_HOSTNAME: "Smappee1006000212.local.",
"type": "_ssh._tcp.local.",
"name": "Smappee1006000212._ssh._tcp.local.",
"properties": {"_raw": {}},
},
)
config_entry = MockConfigEntry(
domain=DOMAIN,
unique_id="smappeeCloud",
source=SOURCE_USER,
)
config_entry.add_to_hass(hass)
assert len(hass.config_entries.async_entries(DOMAIN)) == 1
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured_device"
assert len(hass.config_entries.async_entries(DOMAIN)) == 1
async def test_abort_cloud_flow_if_local_device_exists(hass):
"""Test we abort the cloud flow if a Smappee local device already configured."""
config_entry = MockConfigEntry(
domain=DOMAIN,
data={"host": "1.2.3.4"},
unique_id="1006000212",
source=SOURCE_USER,
)
config_entry.add_to_hass(hass)
assert len(hass.config_entries.async_entries(DOMAIN)) == 1
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"environment": ENV_CLOUD}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured_local_device"
assert len(hass.config_entries.async_entries(DOMAIN)) == 1
async def test_full_user_flow(
hass, hass_client_no_auth, aioclient_mock, current_request_with_host
):
"""Check full flow."""
assert await setup.async_setup_component(
hass,
DOMAIN,
{
DOMAIN: {CONF_CLIENT_ID: CLIENT_ID, CONF_CLIENT_SECRET: CLIENT_SECRET},
"http": {"base_url": "https://example.com"},
},
)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"environment": ENV_CLOUD}
)
state = config_entry_oauth2_flow._encode_jwt(
hass,
{
"flow_id": result["flow_id"],
"redirect_uri": "https://example.com/auth/external/callback",
},
)
client = await hass_client_no_auth()
resp = await client.get(f"/auth/external/callback?code=abcd&state={state}")
assert resp.status == HTTPStatus.OK
assert resp.headers["content-type"] == "text/html; charset=utf-8"
aioclient_mock.post(
TOKEN_URL["PRODUCTION"],
json={
"refresh_token": "mock-refresh-token",
"access_token": "mock-access-token",
"type": "Bearer",
"expires_in": 60,
},
)
with patch(
"homeassistant.components.smappee.async_setup_entry", return_value=True
) as mock_setup:
await hass.config_entries.flow.async_configure(result["flow_id"])
assert len(hass.config_entries.async_entries(DOMAIN)) == 1
assert len(mock_setup.mock_calls) == 1
async def test_full_zeroconf_flow(hass):
"""Test the full zeroconf flow."""
with patch("pysmappee.api.SmappeeLocalApi.logon", return_value={}), patch(
"pysmappee.api.SmappeeLocalApi.load_advanced_config",
return_value=[{"key": "mdnsHostName", "value": "Smappee1006000212"}],
), patch(
"pysmappee.api.SmappeeLocalApi.load_command_control_config", return_value=[]
), patch(
"pysmappee.api.SmappeeLocalApi.load_instantaneous",
return_value=[{"key": "phase0ActivePower", "value": 0}],
), patch(
"homeassistant.components.smappee.async_setup_entry", return_value=True
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_ZEROCONF},
data={
"host": "1.2.3.4",
"port": 22,
CONF_HOSTNAME: "Smappee1006000212.local.",
"type": "_ssh._tcp.local.",
"name": "Smappee1006000212._ssh._tcp.local.",
"properties": {"_raw": {}},
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "zeroconf_confirm"
assert result["description_placeholders"] == {CONF_SERIALNUMBER: "1006000212"}
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"host": "1.2.3.4"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "smappee1006000212"
assert len(hass.config_entries.async_entries(DOMAIN)) == 1
entry = hass.config_entries.async_entries(DOMAIN)[0]
assert entry.unique_id == "1006000212"
async def test_full_user_local_flow(hass):
"""Test the full zeroconf flow."""
with patch("pysmappee.api.SmappeeLocalApi.logon", return_value={}), patch(
"pysmappee.api.SmappeeLocalApi.load_advanced_config",
return_value=[{"key": "mdnsHostName", "value": "Smappee1006000212"}],
), patch(
"pysmappee.api.SmappeeLocalApi.load_command_control_config", return_value=[]
), patch(
"pysmappee.api.SmappeeLocalApi.load_instantaneous",
return_value=[{"key": "phase0ActivePower", "value": 0}],
), patch(
"homeassistant.components.smappee.async_setup_entry", return_value=True
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
)
assert result["step_id"] == "environment"
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["description_placeholders"] is None
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"environment": ENV_LOCAL},
)
assert result["step_id"] == ENV_LOCAL
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"host": "1.2.3.4"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "smappee1006000212"
assert len(hass.config_entries.async_entries(DOMAIN)) == 1
entry = hass.config_entries.async_entries(DOMAIN)[0]
assert entry.unique_id == "1006000212"
async def test_full_zeroconf_flow_next_generation(hass):
"""Test the full zeroconf flow."""
with patch(
"pysmappee.mqtt.SmappeeLocalMqtt.start_attempt", return_value=True
), patch("pysmappee.mqtt.SmappeeLocalMqtt.start", return_value=None,), patch(
"pysmappee.mqtt.SmappeeLocalMqtt.is_config_ready",
return_value=None,
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_ZEROCONF},
data={
"host": "1.2.3.4",
"port": 22,
CONF_HOSTNAME: "Smappee5001000212.local.",
"type": "_ssh._tcp.local.",
"name": "Smappee5001000212._ssh._tcp.local.",
"properties": {"_raw": {}},
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "zeroconf_confirm"
assert result["description_placeholders"] == {CONF_SERIALNUMBER: "5001000212"}
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"host": "1.2.3.4"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "smappee5001000212"
assert len(hass.config_entries.async_entries(DOMAIN)) == 1
entry = hass.config_entries.async_entries(DOMAIN)[0]
assert entry.unique_id == "5001000212"
| |
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from enum import Enum
from typing import Union, TypeVar, Generic
from pyflink import add_version_doc
from pyflink.java_gateway import get_gateway
from pyflink.table.types import DataType, _to_java_data_type
from pyflink.util.java_utils import to_jarray
__all__ = ['Expression', 'TimeIntervalUnit', 'TimePointUnit']
_aggregation_doc = """
{op_desc}
Example:
::
>>> tab \\
>>> .group_by(col("a")) \\
>>> .select(col("a"),
>>> col("b").sum.alias("d"),
>>> col("b").sum0.alias("e"),
>>> col("b").min.alias("f"),
>>> col("b").max.alias("g"),
>>> col("b").count.alias("h"),
>>> col("b").avg.alias("i"),
>>> col("b").stddev_pop.alias("j"),
>>> col("b").stddev_samp.alias("k"),
>>> col("b").var_pop.alias("l"),
>>> col("b").var_samp.alias("m"),
>>> col("b").collect.alias("n"))
.. seealso:: :py:attr:`~Expression.sum`, :py:attr:`~Expression.sum0`, :py:attr:`~Expression.min`,
:py:attr:`~Expression.max`, :py:attr:`~Expression.count`, :py:attr:`~Expression.avg`,
:py:attr:`~Expression.stddev_pop`, :py:attr:`~Expression.stddev_samp`,
:py:attr:`~Expression.var_pop`, :py:attr:`~Expression.var_samp`,
:py:attr:`~Expression.collect`
"""
_math_log_doc = """
{op_desc}
.. seealso:: :py:attr:`~Expression.log10`, :py:attr:`~Expression.log2`, :py:attr:`~Expression.ln`,
:func:`~Expression.log`
"""
_math_trigonometric_doc = """
Calculates the {op_desc} of a given number.
.. seealso:: :py:attr:`~Expression.sin`, :py:attr:`~Expression.cos`, :py:attr:`~Expression.sinh`,
:py:attr:`~Expression.cosh`, :py:attr:`~Expression.tan`, :py:attr:`~Expression.cot`,
:py:attr:`~Expression.asin`, :py:attr:`~Expression.acos`, :py:attr:`~Expression.atan`,
:py:attr:`~Expression.tanh`
"""
_string_doc_seealso = """
.. seealso:: :func:`~Expression.trim_leading`, :func:`~Expression.trim_trailing`,
:func:`~Expression.trim`, :func:`~Expression.replace`,
:py:attr:`~Expression.char_length`, :py:attr:`~Expression.upper_case`,
:py:attr:`~Expression.lower_case`, :py:attr:`~Expression.init_cap`,
:func:`~Expression.like`, :func:`~Expression.similar`,
:func:`~Expression.position`, :func:`~Expression.lpad`, :func:`~Expression.rpad`,
:func:`~Expression.overlay`, :func:`~Expression.regexp_replace`,
:func:`~Expression.regexp_extract`, :func:`~Expression.substring`,
:py:attr:`~Expression.from_base64`, :py:attr:`~Expression.to_base64`,
:py:attr:`~Expression.ltrim`, :py:attr:`~Expression.rtrim`, :func:`~Expression.repeat`
"""
_temporal_doc_seealso = """
.. seealso:: :py:attr:`~Expression.to_date`, :py:attr:`~Expression.to_time`,
:py:attr:`~Expression.to_timestamp`, :func:`~Expression.extract`,
:func:`~Expression.floor`, :func:`~Expression.ceil`
"""
_time_doc = """
Creates an interval of the given number of {op_desc}.
The produced expression is of type :func:`~DataTypes.INTERVAL`.
.. seealso:: :py:attr:`~Expression.year`, :py:attr:`~Expression.years`,
:py:attr:`~Expression.quarter`, :py:attr:`~Expression.quarters`,
:py:attr:`~Expression.month`, :py:attr:`~Expression.months`,
:py:attr:`~Expression.week`, :py:attr:`~Expression.weeks`, :py:attr:`~Expression.day`,
:py:attr:`~Expression.days`, :py:attr:`~Expression.hour`, :py:attr:`~Expression.hours`,
:py:attr:`~Expression.minute`, :py:attr:`~Expression.minutes`,
:py:attr:`~Expression.second`, :py:attr:`~Expression.seconds`,
:py:attr:`~Expression.milli`, :py:attr:`~Expression.millis`
"""
_hash_doc = """
Returns the {op_desc} hash of the string argument; null if string is null.
:return: string of {bit} hexadecimal digits or null.
.. seealso:: :py:attr:`~Expression.md5`, :py:attr:`~Expression.sha1`, :py:attr:`~Expression.sha224`,
:py:attr:`~Expression.sha256`, :py:attr:`~Expression.sha384`,
:py:attr:`~Expression.sha512`, :py:attr:`~Expression.sha2`
"""
def _make_math_log_doc():
math_log_funcs = {
Expression.log10: "Calculates the base 10 logarithm of the given value.",
Expression.log2: "Calculates the base 2 logarithm of the given value.",
Expression.ln: "Calculates the natural logarithm of the given value.",
Expression.log: "Calculates the natural logarithm of the given value if base is not "
"specified. Otherwise, calculates the logarithm of the given value to the "
"given base.",
}
for func, op_desc in math_log_funcs.items():
func.__doc__ = _math_log_doc.format(op_desc=op_desc)
def _make_math_trigonometric_doc():
math_trigonometric_funcs = {
Expression.cosh: "hyperbolic cosine",
Expression.sinh: "hyperbolic sine",
Expression.sin: "sine",
Expression.cos: "cosine",
Expression.tan: "tangent",
Expression.cot: "cotangent",
Expression.asin: "arc sine",
Expression.acos: "arc cosine",
Expression.atan: "arc tangent",
Expression.tanh: "hyperbolic tangent",
}
for func, op_desc in math_trigonometric_funcs.items():
func.__doc__ = _math_trigonometric_doc.format(op_desc=op_desc)
def _make_aggregation_doc():
aggregation_funcs = {
Expression.sum: "Returns the sum of the numeric field across all input values. "
"If all values are null, null is returned.",
Expression.sum0: "Returns the sum of the numeric field across all input values. "
"If all values are null, 0 is returned.",
Expression.min: "Returns the minimum value of field across all input values.",
Expression.max: "Returns the maximum value of field across all input values.",
Expression.count: "Returns the number of input rows for which the field is not null.",
Expression.avg: "Returns the average (arithmetic mean) of the numeric field across all "
"input values.",
Expression.stddev_pop: "Returns the population standard deviation of an expression(the "
"square root of var_pop).",
Expression.stddev_samp: "Returns the sample standard deviation of an expression(the square "
"root of var_samp).",
Expression.var_pop: "Returns the population standard variance of an expression.",
Expression.var_samp: "Returns the sample variance of a given expression.",
Expression.collect: "Returns multiset aggregate of a given expression.",
}
for func, op_desc in aggregation_funcs.items():
func.__doc__ = _aggregation_doc.format(op_desc=op_desc)
def _make_string_doc():
string_funcs = [
Expression.substring, Expression.trim_leading, Expression.trim_trailing, Expression.trim,
Expression.replace, Expression.char_length, Expression.upper_case, Expression.lower_case,
Expression.init_cap, Expression.like, Expression.similar, Expression.position,
Expression.lpad, Expression.rpad, Expression.overlay, Expression.regexp_replace,
Expression.regexp_extract, Expression.from_base64, Expression.to_base64,
Expression.ltrim, Expression.rtrim, Expression.repeat
]
for func in string_funcs:
func.__doc__ = func.__doc__.replace(' ', '') + _string_doc_seealso
def _make_temporal_doc():
temporal_funcs = [
Expression.to_date, Expression.to_time, Expression.to_timestamp, Expression.extract,
Expression.floor, Expression.ceil
]
for func in temporal_funcs:
func.__doc__ = func.__doc__.replace(' ', '') + _temporal_doc_seealso
def _make_time_doc():
time_funcs = {
Expression.year: "years",
Expression.years: "years",
Expression.quarter: "quarters",
Expression.quarters: "quarters",
Expression.month: "months",
Expression.months: "months",
Expression.week: "weeks",
Expression.weeks: "weeks",
Expression.day: "days",
Expression.days: "days",
Expression.hour: "hours",
Expression.hours: "hours",
Expression.minute: "minutes",
Expression.minutes: "minutes",
Expression.second: "seconds",
Expression.seconds: "seconds",
Expression.milli: "millis",
Expression.millis: "millis"
}
for func, op_desc in time_funcs.items():
func.__doc__ = _time_doc.format(op_desc=op_desc)
def _make_hash_doc():
hash_funcs = {
Expression.md5: ("MD5", 32),
Expression.sha1: ("SHA-1", 40),
Expression.sha224: ("SHA-224", 56),
Expression.sha256: ("SHA-256", 64),
Expression.sha384: ("SHA-384", 96),
Expression.sha512: ("SHA-512", 128)
}
for func, (op_desc, bit) in hash_funcs.items():
func.__doc__ = _hash_doc.format(op_desc=op_desc, bit=bit)
def _add_version_doc():
for func_name in dir(Expression):
if not func_name.startswith("_"):
add_version_doc(getattr(Expression, func_name), "1.12.0")
def _get_java_expression(expr, to_expr: bool = False):
"""
Returns the Java expression for the given expr. If expr is a Python expression, returns the
underlying Java expression, otherwise, convert it to a Java expression if to_expr is true.
"""
if isinstance(expr, Expression):
return expr._j_expr
elif to_expr:
gateway = get_gateway()
return gateway.jvm.Expressions.lit(expr)
else:
return expr
def _get_or_create_java_expression(expr: Union["Expression", str]):
if isinstance(expr, Expression):
return expr._j_expr
elif isinstance(expr, str):
from pyflink.table.expressions import col
return col(expr)._j_expr
else:
raise TypeError(
"Invalid argument: expected Expression or string, got {0}.".format(type(expr)))
def _unary_op(op_name: str):
def _(self) -> 'Expression':
return Expression(getattr(self._j_expr, op_name)())
return _
def _binary_op(op_name: str, reverse: bool = False):
def _(self, other) -> 'Expression':
if reverse:
return Expression(getattr(_get_java_expression(other, True), op_name)(self._j_expr))
else:
return Expression(getattr(self._j_expr, op_name)(_get_java_expression(other)))
return _
def _ternary_op(op_name: str):
def _(self, first, second) -> 'Expression':
return Expression(getattr(self._j_expr, op_name)(
_get_java_expression(first), _get_java_expression(second)))
return _
def _expressions_op(op_name: str):
def _(self, *args) -> 'Expression':
from pyflink.table import expressions
return getattr(expressions, op_name)(self, *[_get_java_expression(arg) for arg in args])
return _
class TimeIntervalUnit(Enum):
"""
Units for working with time intervals.
.. versionadded:: 1.12.0
"""
YEAR = 0,
YEAR_TO_MONTH = 1,
QUARTER = 2,
MONTH = 3,
WEEK = 4,
DAY = 5,
DAY_TO_HOUR = 6,
DAY_TO_MINUTE = 7,
DAY_TO_SECOND = 8,
HOUR = 9,
SECOND = 10,
HOUR_TO_MINUTE = 11,
HOUR_TO_SECOND = 12,
MINUTE = 13,
MINUTE_TO_SECOND = 14
def _to_j_time_interval_unit(self):
gateway = get_gateway()
JTimeIntervalUnit = gateway.jvm.org.apache.flink.table.expressions.TimeIntervalUnit
return getattr(JTimeIntervalUnit, self.name)
class TimePointUnit(Enum):
"""
Units for working with points in time.
.. versionadded:: 1.12.0
"""
YEAR = 0,
MONTH = 1,
DAY = 2,
HOUR = 3,
MINUTE = 4,
SECOND = 5,
QUARTER = 6,
WEEK = 7,
MILLISECOND = 8,
MICROSECOND = 9
def _to_j_time_point_unit(self):
gateway = get_gateway()
JTimePointUnit = gateway.jvm.org.apache.flink.table.expressions.TimePointUnit
return getattr(JTimePointUnit, self.name)
T = TypeVar('T')
class Expression(Generic[T]):
"""
Expressions represent a logical tree for producing a computation result.
Expressions might be literal values, function calls, or field references.
.. versionadded:: 1.12.0
"""
def __init__(self, j_expr_or_property_name):
self._j_expr_or_property_name = j_expr_or_property_name
__abs__ = _unary_op("abs")
# comparison functions
__eq__ = _binary_op("isEqual")
__ne__ = _binary_op("isNotEqual")
__lt__ = _binary_op("isLess")
__gt__ = _binary_op("isGreater")
__le__ = _binary_op("isLessOrEqual")
__ge__ = _binary_op("isGreaterOrEqual")
# logic functions
__and__ = _binary_op("and")
__or__ = _binary_op("or")
__invert__ = _unary_op('isNotTrue')
__rand__ = _binary_op("and")
__ror__ = _binary_op("or")
# arithmetic functions
__add__ = _binary_op("plus")
__sub__ = _binary_op("minus")
__mul__ = _binary_op("times")
__truediv__ = _binary_op("dividedBy")
__mod__ = _binary_op("mod")
__pow__ = _binary_op("power")
__neg__ = _expressions_op("negative")
__radd__ = _binary_op("plus", True)
__rsub__ = _binary_op("minus", True)
__rmul__ = _binary_op("times")
__rtruediv__ = _binary_op("dividedBy", True)
__rmod__ = _binary_op("mod", True)
__rpow__ = _binary_op("power", True)
def __str__(self):
return self._j_expr.asSummaryString()
def __getattr__(self, name):
if name == '_j_expr':
if isinstance(self._j_expr_or_property_name, str):
gateway = get_gateway()
return getattr(gateway.jvm.Expressions, self._j_expr_or_property_name)
else:
return self._j_expr_or_property_name
return self.get(name)
def __getitem__(self, index):
return self.at(index)
# ---------------------------- arithmetic functions ----------------------------------
@property
def exp(self) -> 'Expression[float]':
"""
Calculates the Euler's number raised to the given power.
"""
return _unary_op("exp")(self)
@property
def log10(self) -> 'Expression[float]':
return _unary_op("log10")(self)
@property
def log2(self) -> 'Expression[float]':
return _unary_op("log2")(self)
@property
def ln(self) -> 'Expression[float]':
return _unary_op("ln")(self)
def log(self, base=None) -> 'Expression[float]':
if base is None:
return _unary_op("log")(self)
else:
return _binary_op("log")(self, base)
@property
def cosh(self) -> 'Expression[float]':
return _unary_op("cosh")(self)
@property
def sinh(self) -> 'Expression[float]':
return _unary_op("sinh")(self)
@property
def sin(self) -> 'Expression[float]':
return _unary_op("sin")(self)
@property
def cos(self) -> 'Expression[float]':
return _unary_op("cos")(self)
@property
def tan(self) -> 'Expression[float]':
return _unary_op("tan")(self)
@property
def cot(self) -> 'Expression[float]':
return _unary_op("cot")(self)
@property
def asin(self) -> 'Expression[float]':
return _unary_op("asin")(self)
@property
def acos(self) -> 'Expression[float]':
return _unary_op("acos")(self)
@property
def atan(self) -> 'Expression[float]':
return _unary_op("atan")(self)
@property
def tanh(self) -> 'Expression[float]':
return _unary_op("tanh")(self)
@property
def degrees(self) -> 'Expression[float]':
"""
Converts numeric from radians to degrees.
.. seealso:: :py:attr:`~Expression.radians`
"""
return _unary_op("degrees")(self)
@property
def radians(self) -> 'Expression[float]':
"""
Converts numeric from degrees to radians.
.. seealso:: :py:attr:`~Expression.degrees`
"""
return _unary_op("radians")(self)
@property
def sqrt(self) -> 'Expression[float]':
"""
Calculates the square root of a given value.
"""
return _unary_op("sqrt")(self)
@property
def abs(self) -> 'Expression[T]':
"""
Calculates the absolute value of given value.
"""
return _unary_op("abs")(self)
@property
def sign(self) -> 'Expression[T]':
"""
Calculates the signum of a given number.
e.g. `lit(1.23).sign` leads to `1.00`, `lit(-1.23).sign` leads to `-1.00`.
"""
return _unary_op("sign")(self)
def round(self, places: Union[int, 'Expression[int]']):
"""
Rounds the given number to integer places right to the decimal point.
e.g. `lit(646.646).round(2)` leads to `646.65`, `lit(646.646).round(3)` leads to `646.646`,
`lit(646.646).round(0)` leads to `647`, `lit(646.646).round(-2)` leads to `600`.
"""
return _binary_op("round")(self, places)
def between(self, lower_bound, upper_bound) -> 'Expression[bool]':
"""
Returns true if the given expression is between lower_bound and upper_bound
(both inclusive). False otherwise. The parameters must be numeric types or identical
comparable types.
e.g. `lit(2.1).between(2.1, 2.1)` leads to `true`,
`lit("2018-05-05").to_date.between(lit("2018-05-01").to_date, lit("2018-05-10").to_date)`
leads to `true`.
:param lower_bound: numeric or comparable expression
:param upper_bound: numeric or comparable expression
.. seealso:: :func:`~Expression.not_between`
"""
return _ternary_op("between")(self, lower_bound, upper_bound)
def not_between(self, lower_bound, upper_bound) -> 'Expression[bool]':
"""
Returns true if the given expression is not between lower_bound and upper_bound
(both inclusive). False otherwise. The parameters must be numeric types or identical
comparable types.
e.g. `lit(2.1).not_between(2.1, 2.1)` leads to `false`,
`lit("2018-05-05").to_date.not_between(lit("2018-05-01").to_date,
lit("2018-05-10").to_date)` leads to `false`.
:param lower_bound: numeric or comparable expression
:param upper_bound: numeric or comparable expression
.. seealso:: :func:`~Expression.between`
"""
return _ternary_op("notBetween")(self, lower_bound, upper_bound)
def then(self, if_true, if_false) -> 'Expression':
"""
Ternary conditional operator that decides which of two other expressions should be evaluated
based on a evaluated boolean condition.
e.g. lit(42).is_greater(5).then("A", "B") leads to "A"
:param if_true: expression to be evaluated if condition holds
:param if_false: expression to be evaluated if condition does not hold
"""
return _ternary_op("then")(self, if_true, if_false)
def if_null(self, null_replacement) -> 'Expression':
"""
Returns null_replacement if the given expression is null; otherwise the expression is
returned.
This function returns a data type that is very specific in terms of nullability. The
returned type is the common type of both arguments but only nullable if the
null_replacement is nullable.
The function allows to pass nullable columns into a function or table that is declared
with a NOT NULL constraint.
e.g. col("nullable_column").if_null(5) returns never null.
"""
return _binary_op("ifNull")(self, null_replacement)
@property
def is_null(self) -> 'Expression[bool]':
"""
Returns true if the given expression is null.
.. seealso:: :py:attr:`~Expression.is_not_null`
"""
return _unary_op("isNull")(self)
@property
def is_not_null(self) -> 'Expression[bool]':
"""
Returns true if the given expression is not null.
.. seealso:: :py:attr:`~Expression.is_null`
"""
return _unary_op("isNotNull")(self)
@property
def is_true(self) -> 'Expression[bool]':
"""
Returns true if given boolean expression is true. False otherwise (for null and false).
.. seealso:: :py:attr:`~Expression.is_false`, :py:attr:`~Expression.is_not_true`,
:py:attr:`~Expression.is_not_false`
"""
return _unary_op("isTrue")(self)
@property
def is_false(self) -> 'Expression[bool]':
"""
Returns true if given boolean expression is false. False otherwise (for null and true).
.. seealso:: :py:attr:`~Expression.is_true`, :py:attr:`~Expression.is_not_true`,
:py:attr:`~Expression.is_not_false`
"""
return _unary_op("isFalse")(self)
@property
def is_not_true(self) -> 'Expression[bool]':
"""
Returns true if given boolean expression is not true (for null and false). False otherwise.
.. seealso:: :py:attr:`~Expression.is_true`, :py:attr:`~Expression.is_false`,
:py:attr:`~Expression.is_not_false`
"""
return _unary_op("isNotTrue")(self)
@property
def is_not_false(self) -> 'Expression[bool]':
"""
Returns true if given boolean expression is not false (for null and true). False otherwise.
.. seealso:: :py:attr:`~Expression.is_true`, :py:attr:`~Expression.is_false`,
:py:attr:`~Expression.is_not_true`
"""
return _unary_op("isNotFalse")(self)
@property
def distinct(self) -> 'Expression':
"""
Similar to a SQL distinct aggregation clause such as COUNT(DISTINCT a), declares that an
aggregation function is only applied on distinct input values.
Example:
::
>>> tab \\
>>> .group_by(col("a")) \\
>>> .select(col("a"), col("b").sum.distinct.alias("d"))
"""
return _unary_op("distinct")(self)
@property
def sum(self) -> 'Expression':
return _unary_op("sum")(self)
@property
def sum0(self) -> 'Expression':
return _unary_op("sum0")(self)
@property
def min(self) -> 'Expression':
return _unary_op("min")(self)
@property
def max(self) -> 'Expression':
return _unary_op("max")(self)
@property
def count(self) -> 'Expression':
return _unary_op("count")(self)
@property
def avg(self) -> 'Expression':
return _unary_op("avg")(self)
@property
def stddev_pop(self) -> 'Expression':
return _unary_op("stddevPop")(self)
@property
def stddev_samp(self) -> 'Expression':
return _unary_op("stddevSamp")(self)
@property
def var_pop(self) -> 'Expression':
return _unary_op("varPop")(self)
@property
def var_samp(self) -> 'Expression':
return _unary_op("varSamp")(self)
@property
def collect(self) -> 'Expression':
return _unary_op("collect")(self)
def alias(self, name: str, *extra_names: str) -> 'Expression[T]':
"""
Specifies a name for an expression i.e. a field.
Example:
::
>>> tab.select(col('a').alias('b'))
:param name: name for one field.
:param extra_names: additional names if the expression expands to multiple fields
"""
gateway = get_gateway()
return _ternary_op("as")(self, name, to_jarray(gateway.jvm.String, extra_names))
def cast(self, data_type: DataType) -> 'Expression':
"""
Converts a value to a given data type.
e.g. lit("42").cast(DataTypes.INT()) leads to 42.
"""
return _binary_op("cast")(self, _to_java_data_type(data_type))
@property
def asc(self) -> 'Expression':
"""
Specifies ascending order of an expression i.e. a field for order_by.
Example:
::
>>> tab.order_by(col('a').asc)
.. seealso:: :py:attr:`~Expression.desc`
"""
return _unary_op("asc")(self)
@property
def desc(self) -> 'Expression':
"""
Specifies descending order of an expression i.e. a field for order_by.
Example:
::
>>> tab.order_by(col('a').desc)
.. seealso:: :py:attr:`~Expression.asc`
"""
return _unary_op("desc")(self)
def in_(self, first_element_or_table, *remaining_elements) -> 'Expression':
"""
If first_element_or_table is a Table, Returns true if an expression exists in a given table
sub-query. The sub-query table must consist of one column. This column must have the same
data type as the expression.
.. note::
This operation is not supported in a streaming environment yet if
first_element_or_table is a Table.
Otherwise, Returns true if an expression exists in a given list of expressions. This is a
shorthand for multiple OR conditions.
If the testing set contains null, the result will be null if the element can not be found
and true if it can be found. If the element is null, the result is always null.
e.g. lit("42").in(1, 2, 3) leads to false.
Example:
::
>>> tab.where(col("a").in_(1, 2, 3))
>>> table_a.where(col("x").in_(table_b.select("y")))
"""
from pyflink.table import Table
if isinstance(first_element_or_table, Table):
assert len(remaining_elements) == 0
return _binary_op("in")(self, first_element_or_table._j_table)
else:
gateway = get_gateway()
ApiExpressionUtils = gateway.jvm.org.apache.flink.table.expressions.ApiExpressionUtils
remaining_elements = (first_element_or_table, *remaining_elements)
exprs = [ApiExpressionUtils.objectToExpression(_get_java_expression(e))
for e in remaining_elements]
return _binary_op("in")(self, to_jarray(gateway.jvm.Object, exprs))
@property
def start(self) -> 'Expression':
"""
Returns the start time (inclusive) of a window when applied on a window reference.
Example:
::
>>> tab.window(Tumble
>>> .over(row_interval(2))
>>> .on(col("a"))
>>> .alias("w")) \\
>>> .group_by(col("c"), col("w")) \\
>>> .select(col("c"), col("w").start, col("w").end, col("w").proctime)
.. seealso:: :py:attr:`~Expression.end`
"""
return _unary_op("start")(self)
@property
def end(self) -> 'Expression':
"""
Returns the end time (exclusive) of a window when applied on a window reference.
e.g. if a window ends at 10:59:59.999 this property will return 11:00:00.000.
Example:
::
>>> orders.window(Tumble
>>> .over(row_interval(2))
>>> .on(col("a"))
>>> .alias("w")) \\
>>> .group_by(col("c"), col("w")) \\
>>> .select(col("c"), col("w").start, col("w").end, col("w").proctime)
.. seealso:: :py:attr:`~Expression.start`
"""
return _unary_op("end")(self)
@property
def bin(self) -> 'Expression[str]':
"""
Returns a string representation of an integer numeric value in binary format. Returns null
if numeric is null. E.g. "4" leads to "100", "12" leads to "1100".
.. seealso:: :py:attr:`~Expression.hex`
"""
return _unary_op("bin")(self)
@property
def hex(self) -> 'Expression[str]':
"""
Returns a string representation of an integer numeric value or a string in hex format.
Returns null if numeric or string is null.
E.g. a numeric 20 leads to "14", a numeric 100 leads to "64", and a string "hello,world"
leads to "68656c6c6f2c776f726c64".
.. seealso:: :py:attr:`~Expression.bin`
"""
return _unary_op("hex")(self)
def truncate(self, n: Union[int, 'Expression[int]'] = 0) -> 'Expression[T]':
"""
Returns a number of truncated to n decimal places.
If n is 0, the result has no decimal point or fractional part.
n can be negative to cause n digits left of the decimal point of the value to become zero.
E.g. truncate(42.345, 2) to 42.34, 42.truncate(-1) to 40
"""
return _binary_op("truncate")(self, n)
# ---------------------------- string functions ----------------------------------
def substring(self,
begin_index: Union[int, 'Expression[int]'],
length: Union[int, 'Expression[int]'] = None) -> 'Expression[str]':
"""
Creates a substring of the given string at given index for a given length.
:param begin_index: first character of the substring (starting at 1, inclusive)
:param length: number of characters of the substring
"""
if length is None:
return _binary_op("substring")(self, begin_index)
else:
return _ternary_op("substring")(self, begin_index, length)
def trim_leading(self, character: Union[str, 'Expression[str]'] = None) -> 'Expression[str]':
"""
Removes leading space characters from the given string if character is None.
Otherwise, removes leading specified characters from the given string.
"""
if character is None:
return _unary_op("trimLeading")(self)
else:
return _binary_op("trimLeading")(self, character)
def trim_trailing(self, character: Union[str, 'Expression[str]'] = None) -> 'Expression[str]':
"""
Removes trailing space characters from the given string if character is None.
Otherwise, removes trailing specified characters from the given string.
"""
if character is None:
return _unary_op("trimTrailing")(self)
else:
return _binary_op("trimTrailing")(self, character)
def trim(self, character: Union[str, 'Expression[str]'] = None) -> 'Expression[str]':
"""
Removes leading and trailing space characters from the given string if character
is None. Otherwise, removes leading and trailing specified characters from the given string.
"""
if character is None:
return _unary_op("trim")(self)
else:
return _binary_op("trim")(self, character)
def replace(self,
search: Union[str, 'Expression[str]'] = None,
replacement: Union[str, 'Expression[str]'] = None) -> 'Expression[str]':
"""
Returns a new string which replaces all the occurrences of the search target
with the replacement string (non-overlapping).
e.g. `lit('This is a test String.').replace(' ', '_')` leads to `This_is_a_test_String.`
"""
return _ternary_op("replace")(self, search, replacement)
@property
def char_length(self) -> 'Expression[int]':
"""
Returns the length of a string.
"""
return _unary_op("charLength")(self)
@property
def upper_case(self) -> 'Expression[str]':
"""
Returns all of the characters in a string in upper case using the rules of the default
locale.
"""
return _unary_op("upperCase")(self)
@property
def lower_case(self) -> 'Expression[str]':
"""
Returns all of the characters in a string in lower case using the rules of the default
locale.
"""
return _unary_op("lowerCase")(self)
@property
def init_cap(self) -> 'Expression[str]':
"""
Converts the initial letter of each word in a string to uppercase. Assumes a
string containing only [A-Za-z0-9], everything else is treated as whitespace.
"""
return _unary_op("initCap")(self)
def like(self, pattern: Union[str, 'Expression[str]'] = None) -> 'Expression[bool]':
"""
Returns true, if a string matches the specified LIKE pattern.
e.g. 'Jo_n%' matches all strings that start with 'Jo(arbitrary letter)n'
"""
return _binary_op("like")(self, pattern)
def similar(self, pattern: Union[str, 'Expression[str]'] = None) -> 'Expression[bool]':
"""
Returns true, if a string matches the specified SQL regex pattern.
e.g. 'A+' matches all strings that consist of at least one A
"""
return _binary_op("similar")(self, pattern)
def position(self, haystack: Union[str, 'Expression[str]'] = None) -> 'Expression[int]':
"""
Returns the position of string in an other string starting at 1.
Returns 0 if string could not be found. e.g. lit('a').position('bbbbba') leads to 6.
"""
return _binary_op("position")(self, haystack)
def lpad(self,
length: Union[int, 'Expression[int]'],
pad: Union[str, 'Expression[str]']) -> 'Expression[str]':
"""
Returns a string left-padded with the given pad string to a length of len characters.
If the string is longer than len, the return value is shortened to len characters.
e.g. lit('hi').lpad(4, '??') returns '??hi', lit('hi').lpad(1, '??') returns 'h'
"""
return _ternary_op("lpad")(self, length, pad)
def rpad(self,
length: Union[int, 'Expression[int]'],
pad: Union[str, 'Expression[str]']) -> 'Expression[str]':
"""
Returns a string right-padded with the given pad string to a length of len characters.
If the string is longer than len, the return value is shortened to len characters.
e.g. lit('hi').rpad(4, '??') returns 'hi??', lit('hi').rpad(1, '??') returns 'h'
"""
return _ternary_op("rpad")(self, length, pad)
def overlay(self,
new_string: Union[str, 'Expression[str]'],
starting: Union[int, 'Expression[int]'],
length: Union[int, 'Expression[int]'] = None) -> 'Expression[str]':
"""
Replaces a substring of string with a string starting at a position
(starting at 1). e.g. lit('xxxxxtest').overlay('xxxx', 6) leads to 'xxxxxxxxx'
lit('xxxxxtest').overlay('xxxx', 6, 2) leads to 'xxxxxxxxxst'
"""
if length is None:
return _ternary_op("overlay")(self, new_string, starting)
else:
j_expr_new_string = new_string._j_expr \
if isinstance(new_string, Expression) else new_string
j_expr_starting = starting._j_expr \
if isinstance(starting, Expression) else starting
j_expr_length = length._j_expr \
if isinstance(length, Expression) else length
return Expression(getattr(self._j_expr, "overlay")(
j_expr_new_string, j_expr_starting, j_expr_length))
def regexp_replace(self,
regex: Union[str, 'Expression[str]'],
replacement: Union[str, 'Expression[str]']) -> 'Expression[str]':
"""
Returns a string with all substrings that match the regular expression
consecutively being replaced.
"""
return _ternary_op("regexpReplace")(self, regex, replacement)
def regexp_extract(
self,
regex: Union[str, 'Expression[str]'],
extract_index: Union[int, 'Expression[int]'] = None) -> 'Expression[str]':
"""
Returns a string extracted with a specified regular expression and a regex match
group index.
"""
if extract_index is None:
return _ternary_op("regexpExtract")(self, regex)
else:
return _ternary_op("regexpExtract")(self, regex, extract_index)
@property
def from_base64(self) -> 'Expression[str]':
"""
Returns the base string decoded with base64.
"""
return _unary_op("fromBase64")(self)
@property
def to_base64(self) -> 'Expression[str]':
"""
Returns the base64-encoded result of the input string.
"""
return _unary_op("toBase64")(self)
@property
def ltrim(self) -> 'Expression[str]':
"""
Returns a string that removes the left whitespaces from the given string.
"""
return _unary_op("ltrim")(self)
@property
def rtrim(self) -> 'Expression[str]':
"""
Returns a string that removes the right whitespaces from the given string.
"""
return _unary_op("rtrim")(self)
def repeat(self, n: Union[int, 'Expression[int]']) -> 'Expression[str]':
"""
Returns a string that repeats the base string n times.
"""
return _binary_op("repeat")(self, n)
def over(self, alias) -> 'Expression':
"""
Defines an aggregation to be used for a previously specified over window.
Example:
::
>>> tab.window(Over
>>> .partition_by(col('c'))
>>> .order_by(col('rowtime'))
>>> .preceding(row_interval(2))
>>> .following(CURRENT_ROW)
>>> .alias("w")) \\
>>> .select(col('c'), col('a'), col('a').count.over(col('w')))
"""
return _binary_op("over")(self, alias)
# ---------------------------- temporal functions ----------------------------------
@property
def to_date(self) -> 'Expression':
"""
Parses a date string in the form "yyyy-MM-dd" to a SQL Date. It's equivalent to
`col.cast(DataTypes.DATE())`.
Example:
::
>>> lit("2016-06-15").to_date
"""
return _unary_op("toDate")(self)
@property
def to_time(self) -> 'Expression':
"""
Parses a time string in the form "HH:mm:ss" to a SQL Time. It's equivalent to
`col.cast(DataTypes.TIME())`.
Example:
::
>>> lit("3:30:00").to_time
"""
return _unary_op("toTime")(self)
@property
def to_timestamp(self) -> 'Expression':
"""
Parses a timestamp string in the form "yyyy-MM-dd HH:mm:ss[.SSS]" to a SQL Timestamp.
It's equivalent to `col.cast(DataTypes.TIMESTAMP(3))`.
Example:
::
>>> lit('2016-06-15 3:30:00.001').to_timestamp
"""
return _unary_op("toTimestamp")(self)
def extract(self, time_interval_unit: TimeIntervalUnit) -> 'Expression':
"""
Extracts parts of a time point or time interval. Returns the part as a long value.
e.g. `lit("2006-06-05").to_date.extract(TimeIntervalUnit.DAY)` leads to `5`.
"""
return _binary_op("extract")(
self, time_interval_unit._to_j_time_interval_unit())
def floor(self, time_interval_unit: TimeIntervalUnit = None) -> 'Expression':
"""
If time_interval_unit is specified, it rounds down a time point to the given
unit, e.g. `lit("12:44:31").to_date.floor(TimeIntervalUnit.MINUTE)` leads to
`12:44:00`. Otherwise, it calculates the largest integer less than or equal to a
given number.
"""
if time_interval_unit is None:
return _unary_op("floor")(self)
else:
return _binary_op("floor")(
self, time_interval_unit._to_j_time_interval_unit())
def ceil(self, time_interval_unit: TimeIntervalUnit = None) -> 'Expression':
"""
If time_interval_unit is specified, it rounds up a time point to the given unit,
e.g. `lit("12:44:31").to_date.floor(TimeIntervalUnit.MINUTE)` leads to 12:45:00.
Otherwise, it calculates the smallest integer greater than or equal to a given number.
"""
if time_interval_unit is None:
return _unary_op("ceil")(self)
else:
return _binary_op("ceil")(
self, time_interval_unit._to_j_time_interval_unit())
# ---------------------------- advanced type helper functions -----------------------------
def get(self, name_or_index: Union[str, int]) -> 'Expression':
"""
Accesses the field of a Flink composite type (such as Tuple, POJO, etc.) by name or index
and returns it's value.
:param name_or_index: name or index of the field (similar to Flink's field expressions)
.. seealso:: :py:attr:`~Expression.flatten`
"""
return _binary_op("get")(self, name_or_index)
@property
def flatten(self) -> 'Expression':
"""
Converts a Flink composite type (such as Tuple, POJO, etc.) and all of its direct subtypes
into a flat representation where every subtype is a separate field.
.. seealso:: :func:`~Expression.get`
"""
return _unary_op("flatten")(self)
def at(self, index) -> 'Expression':
"""
Accesses the element of an array or map based on a key or an index (starting at 1).
:param index: index key or position of the element (array index starting at 1)
.. seealso:: :py:attr:`~Expression.cardinality`, :py:attr:`~Expression.element`
"""
return _binary_op("at")(self, index)
@property
def cardinality(self) -> 'Expression':
"""
Returns the number of elements of an array or number of entries of a map.
.. seealso:: :func:`~Expression.at`, :py:attr:`~Expression.element`
"""
return _unary_op("cardinality")(self)
@property
def element(self) -> 'Expression':
"""
Returns the sole element of an array with a single element. Returns null if the array is
empty. Throws an exception if the array has more than one element.
.. seealso:: :func:`~Expression.at`, :py:attr:`~Expression.cardinality`
"""
return _unary_op("element")(self)
# ---------------------------- time definition functions -----------------------------
@property
def rowtime(self) -> 'Expression':
"""
Declares a field as the rowtime attribute for indicating, accessing, and working in
Flink's event time.
.. seealso:: :py:attr:`~Expression.rowtime`
"""
return _unary_op("rowtime")(self)
@property
def proctime(self) -> 'Expression':
"""
Declares a field as the proctime attribute for indicating, accessing, and working in
Flink's processing time.
.. seealso:: :py:attr:`~Expression.proctime`
"""
return _unary_op("proctime")(self)
@property
def year(self) -> 'Expression':
return _unary_op("year")(self)
@property
def years(self) -> 'Expression':
return _unary_op("years")(self)
@property
def quarter(self) -> 'Expression':
return _unary_op("quarter")(self)
@property
def quarters(self) -> 'Expression':
return _unary_op("quarters")(self)
@property
def month(self) -> 'Expression':
return _unary_op("month")(self)
@property
def months(self) -> 'Expression':
return _unary_op("months")(self)
@property
def week(self) -> 'Expression':
return _unary_op("week")(self)
@property
def weeks(self) -> 'Expression':
return _unary_op("weeks")(self)
@property
def day(self) -> 'Expression':
return _unary_op("day")(self)
@property
def days(self) -> 'Expression':
return _unary_op("days")(self)
@property
def hour(self) -> 'Expression':
return _unary_op("hour")(self)
@property
def hours(self) -> 'Expression':
return _unary_op("hours")(self)
@property
def minute(self) -> 'Expression':
return _unary_op("minute")(self)
@property
def minutes(self) -> 'Expression':
return _unary_op("minutes")(self)
@property
def second(self) -> 'Expression':
return _unary_op("second")(self)
@property
def seconds(self) -> 'Expression':
return _unary_op("seconds")(self)
@property
def milli(self) -> 'Expression':
return _unary_op("milli")(self)
@property
def millis(self) -> 'Expression':
return _unary_op("millis")(self)
# ---------------------------- hash functions -----------------------------
@property
def md5(self) -> 'Expression[str]':
return _unary_op("md5")(self)
@property
def sha1(self) -> 'Expression[str]':
return _unary_op("sha1")(self)
@property
def sha224(self) -> 'Expression[str]':
return _unary_op("sha224")(self)
@property
def sha256(self) -> 'Expression[str]':
return _unary_op("sha256")(self)
@property
def sha384(self) -> 'Expression[str]':
return _unary_op("sha384")(self)
@property
def sha512(self) -> 'Expression[str]':
return _unary_op("sha512")(self)
def sha2(self, hash_length: Union[int, 'Expression[int]']) -> 'Expression[str]':
"""
Returns the hash for the given string expression using the SHA-2 family of hash
functions (SHA-224, SHA-256, SHA-384, or SHA-512).
:param hash_length: bit length of the result (either 224, 256, 384, or 512)
:return: string or null if one of the arguments is null.
.. seealso:: :py:attr:`~Expression.md5`, :py:attr:`~Expression.sha1`,
:py:attr:`~Expression.sha224`, :py:attr:`~Expression.sha256`,
:py:attr:`~Expression.sha384`, :py:attr:`~Expression.sha512`
"""
return _binary_op("sha2")(self, hash_length)
# add the docs
_make_math_log_doc()
_make_math_trigonometric_doc()
_make_aggregation_doc()
_make_string_doc()
_make_temporal_doc()
_make_time_doc()
_make_hash_doc()
# add the version docs
_add_version_doc()
| |
'''
utilities (:mod:`calour.util`)
==============================
.. currentmodule:: calour.util
Functions
^^^^^^^^^
.. autosummary::
:toctree: generated
join_fields
compute_prevalence
register_functions
set_log_level
'''
# ----------------------------------------------------------------------------
# Copyright (c) 2016--, Calour development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import os
import hashlib
import inspect
import re
import configparser
import warnings
from types import FunctionType
from functools import wraps, update_wrapper
from importlib import import_module
from collections.abc import Sequence
from logging import getLogger
from numbers import Real
from pkg_resources import resource_filename
import numpy as np
import scipy
logger = getLogger(__name__)
def join_fields(df, field1, field2, new_field=None, sep='_', pad=None):
'''Join two fields into a single new field
Parameters
----------
df : pandas.DataFrame
field1 : str
Name of the first field to join. The value in this column can be any data type.
field2 : str
Name of the field to join. The value in this column can be any data type.
new_field : str, default=None
name of the new (joined) field. Default to name it as field1 + sep + field2
sep : str, optional
The separator between the values of the two fields when joining
pad : str, default=None
Padding char. Align and pad the text in field1 and field2 before joining. Default to join without padding.
Returns
-------
pandas.DataFrame
the original data frame with new joined field.
Examples
--------
>>> import pandas as pd
>>> pd.set_option('display.max_colwidth', None)
>>> df = pd.DataFrame([['dog', 'bone'], ['monkey', 'banana']], columns=['animal', 'food'])
>>> # pandas display on Mac is problematic with ellipsis, skip it for now.
>>> join_fields(df, 'animal', 'food') #doctest: +SKIP
animal food animal_food
0 dog bone dog_bone
1 monkey banana monkey_banana
>>> join_fields(df, 'animal', 'food', new_field='new', pad='-') #doctest: +SKIP
animal food animal_food new
0 dog bone dog_bone dog---_--bone
1 monkey banana monkey_banana monkey_banana
'''
logger.debug('joining fields %s and %s into %s' % (field1, field2, new_field))
# validate the data
if field1 not in df.columns:
raise ValueError('field %s not in the data frame' % field1)
if field2 not in df.columns:
raise ValueError('field %s not in the data frame' % field2)
# get the new column name
if new_field is None:
new_field = field1 + sep + field2
if new_field in df.columns:
raise ValueError('new field name %s already exists in df. Please use different new_field value' % new_field)
col1 = df[field1].astype(str)
max1 = col1.str.len().max()
col2 = df[field2].astype(str)
max2 = col2.str.len().max()
if pad is not None:
col1 = col1.str.pad(width=max1, side='right', fillchar=pad)
col2 = col2.str.pad(width=max2, side='left', fillchar=pad)
df[new_field] = col1 + sep + col2
return df
def compute_prevalence(abundance):
'''Return the prevalence at each abundance cutoffs.
Each sample that has the feature above the cutoff (exclusive) will
be counted.
Parameters
----------
abundance : 1d array-like of numeric
The abundance of a feature across samples.
Returns
-------
np.ndarray
1d sorted array that contains the unique abundance values in the input array.
np.ndarray
same size with the 1st 1d array. Each value in the array is
the feature prevalence defined as its abundance > each unique
value in the 1st array.
Examples
--------
>>> abund = [0, 1, 0, 2, 4]
>>> x, y = compute_prevalence(abund)
>>> x #doctest: +SKIP
array([0, 1, 2, 4])
>>> y #doctest: +SKIP
array([0.6, 0.4, 0.2, 0.])
'''
# unique values are sorted
cutoffs, counts = np.unique(abundance, return_counts=True)
cum_counts = np.cumsum(counts)
prevalences = 1 - cum_counts / counts.sum()
return cutoffs, prevalences
def _transition_index(obj):
'''Return the transition index and current value of the list.
Examples
-------
>>> obj = ['a', 'a', 'b']
>>> list(_transition_index(obj))
[(2, 'a'), (3, 'b')]
>>> obj = ['a', 'a', 'b', 1, 2, None, None]
>>> list(_transition_index(obj))
[(2, 'a'), (3, 'b'), (4, 1), (5, 2), (7, None)]
Parameters
----------
obj : Iterable of arbitrary objects
Yields
------
tuple of (int, arbitrary)
the transition index, the item value
'''
it = enumerate(obj)
i, item = next(it)
item = str(type(item)), item
for i, current in it:
current = str(type(current)), current
if item != current:
yield i, item[1]
item = current
yield i + 1, item[1]
def _convert_axis_name(func):
'''Convert str value of axis to 0/1.
This allows the decorated function with ``axis`` parameter to
accept "sample"/"s" and "feature"/"f" as value for ``axis``
parameter.
This should be always the closest decorator to the function if
you have multiple decorators for this function.
'''
conversion = {'sample': 0,
's': 0,
'samples': 0,
'feature': 1,
'f': 1,
'features': 1}
@wraps(func)
def inner(*args, **kwargs):
sig = inspect.signature(func)
ba = sig.bind(*args, **kwargs)
param = ba.arguments
v = param.get('axis', None)
if v is None:
return func(*args, **kwargs)
if isinstance(v, str):
param['axis'] = conversion[v.lower()]
elif v not in {0, 1}:
raise ValueError('unknown axis `%r`' % v)
return func(*ba.args, **ba.kwargs)
return inner
def _get_taxonomy_string(exp, sep=';', remove_underscore=True, to_lower=False):
'''Get a nice taxonomy string.
Convert the taxonomy list stored (from biom.read_table) to a single string per feature.
Parameters
----------
exp : Experiment
with the taxonomy entry in the feature_metadata
sep : str, optional
the output separator to use between the taxonomic levels
remove_underscore : bool, optional
True (default) to remove the entries like 'g__' and missing values
False to keep them
to_lower : bool, optional
False (default) to keep case
True to convert to lowercase
Returns
-------
taxonomy : list of str
list of taxonomy string per feature
'''
# test if we have taxonomy in the feature metadata
logger.debug('getting taxonomy string')
if 'taxonomy' not in exp.feature_metadata.columns:
raise ValueError('No taxonomy field in experiment')
# if it is not a list - just return it
if not isinstance(exp.feature_metadata['taxonomy'][0], list):
return list(exp.feature_metadata['taxonomy'].values)
if not remove_underscore:
taxonomy = [sep.join(x) for x in exp.feature_metadata['taxonomy']]
else:
taxonomy = []
for ctax in exp.feature_metadata['taxonomy']:
taxstr = ''
for clevel in ctax:
clevel = clevel.strip()
if len(clevel) > 3:
if clevel[1:3] == '__':
clevel = clevel[3:]
taxstr += clevel + sep
if len(taxstr) == 0:
taxstr = 'na'
taxonomy.append(taxstr)
if to_lower:
taxonomy = [x.lower() for x in taxonomy]
return taxonomy
def get_file_md5(f, encoding='utf-8'):
'''get the md5 of the text file.
Parameters
----------
f : str
name of the file to calculate md5 on
encoding : str or None, optional
encoding of the text file (see python str.encode() ). None to use 'utf-8'
Returns
-------
flmd5: str
the md5 of the file f
'''
logger.debug('getting file md5 for file %s' % f)
if f is None:
return None
with open(f, 'rb') as fl:
flmd5 = hashlib.md5()
chunk_size = 4096
for chunk in iter(lambda: fl.read(chunk_size), b""):
flmd5.update(chunk)
flmd5 = flmd5.hexdigest()
logger.debug('md5 of %s: %s' % (f, flmd5))
return flmd5
def get_data_md5(data):
'''Calculate the md5 of a dense/sparse matrix
Calculat matrix md5 based on row by row order
Parameters
----------
data : dense or sparse matrix
Returns
-------
datmd5 : str
the md5 of the data
'''
logger.debug('caculating data md5')
if scipy.sparse.issparse(data):
# if sparse need to convert to numpy array
data = data.toarray()
# convert to string of raw data since hashlib.md5 does not take numpy array as input
datmd5 = hashlib.md5(data.tobytes())
datmd5 = datmd5.hexdigest()
logger.debug('data md5 is: %s' % datmd5)
return datmd5
def get_config_file():
'''Get the calour config file location
If the environment CALOUR_CONFIG_FILE is set, take the config file from it
otherwise return CALOUR_PACKAGE_LOCATION/calour/calour.config
Returns
-------
config_file_name : str
the full path to the calour config file
'''
if 'CALOUR_CONFIG_FILE' in os.environ:
config_file_name = os.environ['CALOUR_CONFIG_FILE']
logger.debug('Using calour config file %s from CALOUR_CONFIG_FILE variable' % config_file_name)
else:
config_file_name = resource_filename(__package__, 'calour.config')
return config_file_name
def set_config_value(key, value, section='DEFAULT', config_file_name=None):
'''Set the value in the calour config file
Parameters
----------
key : str
the key to get the value for
value : str
the value to store
section : str, optional
the section to get the value from
config_file_name : str, optional
the full path to the config file or None to use default config file
'''
if config_file_name is None:
config_file_name = get_config_file()
config = configparser.ConfigParser()
config.read(config_file_name)
if section not in config:
config.add_section(section)
config.set(section, key, value)
with open(config_file_name, 'w') as config_file:
config.write(config_file)
logger.debug('wrote key %s value %s to config file' % (key, value))
def get_config_sections(config_file_name=None):
'''Get a list of the sections in the config file
Parameters
----------
config_file_name : str, optional
the full path to the config file or None to use default config file
Returns
-------
list of str
List of the sections in the config file
'''
if config_file_name is None:
config_file_name = get_config_file()
logger.debug('getting sections from config file %s' % config_file_name)
config = configparser.ConfigParser()
config.read(config_file_name)
return config.sections()
def get_config_value(key, fallback=None, section='DEFAULT', config_file_name=None):
'''Get the value from the calour config file
Parameters
----------
key : str
the key to get the value for
fallback : str, optional
the fallback value if the key/section/file does not exist
section : str, optional
the section to get the value from
config_file_name : str, optional
the full path to the config file or None to use default config file
Returns
-------
value : str
value of the key or fallback if file/section/key does not exist
'''
if config_file_name is None:
config_file_name = get_config_file()
config = configparser.ConfigParser()
config.read(config_file_name)
if section not in config:
logger.debug('section %s not in config file %s' % (section, config_file_name))
return fallback
if key not in config[section]:
logger.debug('key %s not in config file %s section %s' % (key, config_file_name, section))
return fallback
value = config[section][key]
return value
def set_log_level(level):
'''Set the debug level for calour
You can see the logging levels at:
https://docs.python.org/3.5/library/logging.html#levels
Parameters
----------
level : int or str
10 for debug, 20 for info, 30 for warn, etc.
It is passing to :func:`logging.Logger.setLevel`
'''
clog = getLogger('calour')
clog.setLevel(level)
def _to_list(x):
'''if x is non iterable or string, convert to iterable.
See the expected behavior in the examples below.
Examples
--------
>>> _to_list('a')
['a']
>>> _to_list({})
[{}]
>>> _to_list(['a'])
['a']
>>> _to_list(set(['a']))
[{'a'}]
'''
if isinstance(x, str):
return [x]
if isinstance(x, Sequence):
return x
return [x]
def _argsort(values, reverse=False):
'''Sort a sequence of values of heterogeneous variable types.
This is useful to overcome the problem when using numpy.argsort on a pandas
series values with missing values or different data types.
Examples
--------
>>> l = [10, 'b', np.nan, 2.5, 'a']
>>> idx = _argsort(l)
>>> idx
[3, 0, 2, 4, 1]
>>> l_sorted = [l[i] for i in idx]
>>> l_sorted
[2.5, 10, nan, 'a', 'b']
>>> l_sorted_reverse = [l[i] for i in _argsort(l, True)]
>>> l_sorted_reverse
['b', 'a', nan, 10, 2.5]
Parameters
----------
values : iterable
the values to sort
Returns
-------
list of ints
the positions of the sorted values
'''
pairs = []
for cval in values:
if isinstance(cval, Real):
if np.isnan(cval):
cval = np.inf
else:
cval = float(cval)
pairs.append((str(type(cval)), cval))
# # convert all numbers to float otherwise int will be sorted different place
# values = [float(x) if isinstance(x, Real) else x for x in values]
# # make values ordered by type and sort inside each var type
# values = [(str(type(x)), x) if not np.isnan(x) else (str(type(x)), np.inf) for x in values]
# return sorted(range(len(values)), key=values.__getitem__)
return sorted(range(len(pairs)), key=pairs.__getitem__, reverse=reverse)
def _clone_function(f):
'''Make a copy of a function'''
# based on http://stackoverflow.com/a/13503277/2289509
new_f = FunctionType(f.__code__, f.__globals__,
name=f.__name__,
argdefs=f.__defaults__,
closure=f.__closure__)
new_f = update_wrapper(new_f, f)
new_f.__kwdefaults__ = f.__kwdefaults__
return new_f
def register_functions(clss, modules=None):
'''Search and modify functions in the modules.
This searches all the functions defined in the given
``modules`` and modify functions as following:
1. for each public function with ``axis`` parameter, decorate it with
:func:`._convert_axis_name` to convert "s" and "f" to 0 or
1 for the ``axis`` parameter.
2. for each public function that accepts its 1st argument of type
defined in ``clss``, register it as a class method to the class
type of its 1st argument.
3. for each public function that accepts its 1st argument of type
defined in ``clss`` **and** returns value of the same type,
also decorate it with :meth:`.Experiment._record_sig`.
Parameters
----------
clss : tuple of ``class`` objects
The class that functions will .
modules : iterable of str, default=None
The module names where functions are defined. ``None`` means all public
modules in `calour`.
'''
# pattern to recognize the Parameters section
p = re.compile(r"(\n +Parameters\n +-+ *)")
if modules is None:
modules = ['calour.' + i for i in
['io', 'sorting', 'filtering', 'analysis', 'training', 'transforming',
'heatmap.heatmap', 'plotting', 'manipulation', 'database', 'export_html']]
for module_name in modules:
module = import_module(module_name)
functions = inspect.getmembers(module, inspect.isfunction)
for fn, f in functions:
sig = inspect.signature(f)
params = sig.parameters
# ski private function
if fn.startswith('_'):
continue
if 'axis' in params.keys():
f = _convert_axis_name(f)
for _, param in params.items():
cls = param.annotation
if cls in clss:
# make a copy of the function because we want
# to update the docstring of the original
# function but not that of the registered
# version
if hasattr(cls, fn):
# python can't distinguish defined and
# imported functions. If a function is defined
# in a module and imported in another, without
# this check, it will get processed twice.
continue
if sig.return_annotation is cls:
setattr(cls, fn, cls._record_sig(_clone_function(f)))
else:
setattr(cls, fn, _clone_function(f))
updated = ('\n .. note:: This function is also available as a class method :meth:`.{0}.{1}`\n'
'\\1'
'\n exp : {0}'
'\n Input {0} object.'
'\n')
# use `or` in case f.__doc__ is None
f.__doc__ = p.sub(updated.format(cls.__name__, fn), f.__doc__ or '')
# only check the first func parameter
break
def deprecated(message):
'''Deprecation decorator.
Parameters
----------
message : str
the message to print together with deprecation warning.
'''
def deprecated_decorator(func):
@wraps(func)
def deprecated_func(*args, **kwargs):
warnings.warn("{} is a deprecated function. {}".format(func.__name__, message),
category=DeprecationWarning,
stacklevel=2)
warnings.simplefilter('default', DeprecationWarning)
return func(*args, **kwargs)
return deprecated_func
return deprecated_decorator
def format_docstring(*args, **kwargs):
'''Format the docstring of the decorated function.'''
def dec(obj):
obj.__doc__ = obj.__doc__.format(*args, **kwargs)
return obj
return dec
| |
# mammon - utility/third-party stuff, each thing has it's own header and provenance
# information.
# CaseInsensitiveDict from requests.
#
# Copyright 2015 Kenneth Reitz
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import collections
class CaseInsensitiveDict(collections.MutableMapping):
"""
A case-insensitive ``dict``-like object.
Implements all methods and operations of
``collections.MutableMapping`` as well as dict's ``copy``. Also
provides ``lower_items``.
All keys are expected to be strings. The structure remembers the
case of the last key to be set, and ``iter(instance)``,
``keys()``, ``items()``, ``iterkeys()``, and ``iteritems()``
will contain case-sensitive keys. However, querying and contains
testing is case insensitive::
cid = CaseInsensitiveDict()
cid['Accept'] = 'application/json'
cid['aCCEPT'] == 'application/json' # True
list(cid) == ['Accept'] # True
For example, ``headers['content-encoding']`` will return the
value of a ``'Content-Encoding'`` response header, regardless
of how the header name was originally stored.
If the constructor, ``.update``, or equality comparison
operations are given keys that have equal ``.casefold()``s, the
behavior is undefined.
"""
def __init__(self, data=None, **kwargs):
self._store = dict()
if data is None:
data = {}
self.update(data, **kwargs)
def __setitem__(self, key, value):
# Use the lowercased key for lookups, but store the actual
# key alongside the value.
self._store[key.casefold()] = (key, value)
def __getitem__(self, key):
return self._store[key.casefold()][1]
def __delitem__(self, key):
del self._store[key.casefold()]
def __iter__(self):
return (casedkey for casedkey, mappedvalue in self._store.values())
def __len__(self):
return len(self._store)
def lower_items(self):
"""Like iteritems(), but with all lowercase keys."""
return (
(lowerkey, keyval[1])
for (lowerkey, keyval)
in self._store.items()
)
def __eq__(self, other):
if isinstance(other, collections.Mapping):
other = CaseInsensitiveDict(other)
else:
return NotImplemented
# Compare insensitively
return dict(self.lower_items()) == dict(other.lower_items())
# Copy is required
def copy(self):
return CaseInsensitiveDict(self._store.values())
def __repr__(self):
return str(dict(self.items()))
# a modified ExpiringDict implementation
#
# Copyright 2013-2015 Rackspace
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class ExpiringDict(collections.OrderedDict):
def __init__(self, max_len, max_age_seconds):
collections.OrderedDict.__init__(self)
self.max_len = max_len
self.max_age = max_age_seconds
def __contains__(self, key):
try:
item = collections.OrderedDict.__getitem__(self, key.casefold())
if time.time() - item[1] < self.max_age:
return True
else:
del self[key.casefold()]
except KeyError:
pass
return False
def __getitem__(self, key, with_age=False, max_age=None):
item = collections.OrderedDict.__getitem__(self, key.casefold())
item_age = time.time() - item[1]
if not max_age:
max_age = self.max_age
if item_age < max_age:
if with_age:
return item[0], item_age
else:
return item[0]
else:
del self[key.casefold()]
raise KeyError(key.casefold())
def __setitem__(self, key, value):
if len(self) == self.max_len:
self.popitem(last=False)
collections.OrderedDict.__setitem__(self, key.casefold(), (value, time.time()))
def pop(self, key, default=None):
try:
item = collections.OrderedDict.__getitem__(self, key.casefold())
del self[key.casefold()]
return item[0]
except KeyError:
return default
def get(self, key, default=None, with_age=False, max_age=None):
try:
return self.__getitem__(key.casefold(), with_age, max_age)
except KeyError:
if with_age:
return default, None
else:
return default
def put(self, key, value, ts=None):
if len(self) == self.max_len:
self.popitem(last=False)
if not ts:
ts = time.time()
collections.OrderedDict.__setitem__(self, key.casefold(), (value, ts))
def items(self):
r = []
for key in self:
try:
r.append((key, self[key]))
except KeyError:
pass
return r
def values(self):
r = []
for key in self:
try:
r.append(self[key])
except KeyError:
pass
return r
def fromkeys(self):
raise NotImplementedError()
def iteritems(self):
raise NotImplementedError()
def itervalues(self):
raise NotImplementedError()
def viewitems(self):
raise NotImplementedError()
def viewkeys(self):
raise NotImplementedError()
def viewvalues(self):
raise NotImplementedError()
# fast irc casemapping validation
# part of mammon, under mammon license.
import string
special = '_-|^{}[]`'
nick_allowed_chars = string.ascii_letters + string.digits + special
nick_allowed_chars_tbl = str.maketrans('', '', nick_allowed_chars)
first_nick_allowed_chars = string.ascii_letters + special
def validate_nick(nick):
if nick[0] not in first_nick_allowed_chars:
return False
remainder = nick[1:]
badchars = remainder.translate(nick_allowed_chars_tbl)
return badchars == ''
chan_allowed_chars = string.ascii_letters + string.digits + special + '`~!@#$%^&*()+=|\\<>/?'
chan_allowed_chars_tbl = str.maketrans('', '', chan_allowed_chars)
def validate_chan(chan_name):
if chan_name[0] != '#':
return False
badchars = chan_name[1:].translate(chan_allowed_chars_tbl)
return badchars == ''
def uniq(input):
output = []
for x in input:
if x not in output:
output.append(x)
return output
class UserHost:
def __init__(self, nuh):
self.nuh = nuh
# XXX - put try:except on these just in case doesn't exist
@property
def nickname(self):
return self.nuh.split('!')[0]
@property
def username(self):
return self.nuh.split('!')[1].split('@')[0]
@property
def hostname(self):
return self.nug.split('@')[1]
| |
"""
mainwindow.py
Python programming for the primary MUCK window.
"""
import json
try:
import pygtk
pygtk.require("2.0")
except:
pass
import gobject
from gi.repository import Gtk, GLib
from aboutwindow import AboutWindow
from newaliaswindow import NewAliasWindow
from newlogwindow import NewLogWindow
from connection import Connection
from savetextbufferwindow import SaveTextBufferWindow
class MainWindow(object):
"""
Main window of the application.
"""
application = None
"""
Primary application state.
"""
output_buffer = None
"""
The text buffer used for the output.
"""
has_aliases = False
"""
Whether or not we currently have aliases.
"""
def __init__(self, application):
builder = Gtk.Builder()
builder.add_from_file("ui/Main.glade")
self.window = builder.get_object("MainWindow")
self.label_output = builder.get_object("LabelOutput")
self.label_output.set_use_markup(True)
self.alias_list = builder.get_object("ListBoxAliases")
self.scroll_window = builder.get_object("ViewPortContent")
self.logging_enable = builder.get_object("CheckItemLoggingEnable")
self.view_port_aliases = builder.get_object("ViewPortAliases")
self.entry_input = builder.get_object("EntryInput")
self.application = application
self.add_alias("No Aliases.")
builder.connect_signals(self)
self.window.set_visible(True)
def add_alias(self, name):
"""
Adds an alias to be displayed on the alias list window.
"""
# Build the new row
row = Gtk.ListBoxRow()
row.set_visible(True)
text = Gtk.Label(name)
icon = None
# If its just the "No Aliases" text, don't use a grid.
if name == "No Aliases.":
row.add(text)
else:
icon = Gtk.Image()
icon.set_from_stock("gtk-dialog-error", 2)
icon.set_visible(True)
grid = Gtk.Grid()
grid.insert_row(0)
grid.insert_column(0)
grid.add(icon)
grid.insert_column(1)
grid.add(text)
grid.set_visible(True)
row.add(grid)
# If we just added a fresh character, clear the list box
if not self.has_aliases:
self.has_aliases = True
self.view_port_aliases.remove(self.alias_list)
self.alias_list = Gtk.ListBox()
self.alias_list.set_visible(True)
self.view_port_aliases.add(self.alias_list)
# Rebind the select event
self.alias_list.connect("row-selected", self.alias_selected)
text.set_visible(True)
self.alias_list.add(row)
return icon
def save_text_buffer(self, element):
# FIXME: Write the unmodified buffer
if self.application.selected_alias is not None:
with open("%s-log.txt" % self.application.selected_alias, "w") as handle:
handle.write(self.application.aliases[self.application.selected_alias]["connection"].buffer)
def toggle_logging(self, element):
"""
Signal that's called when the user clicks the Logging->Logging Enable option.
"""
if self.application.selected_alias is not None:
activated = element.get_active()
self.application.alias_states[self.application.selected_alias]["logging"] = activated
if activated is True:
window = NewLogWindow(self.application, self.application.selected_alias)
else:
self.application.alias_states[self.application.selected_alias]["logfile"] = None
def save_text_buffer(self, element):
if self.application.selected_alias is not None:
window = SaveTextBufferWindow(self.application, self.application.selected_alias)
def key_pressed(self, element, event):
"""
Signal that's called when the user presses any key on the input text box.
"""
state, code = event.get_keycode()
# We only want to send the current input when enter is struck
if code == 36:
self.send_text(self.entry_input)
def show_about_window(self, element):
"""
Signal that's called when the user clicks the Help->About.
"""
window = AboutWindow(self.application)
def show_new_alias_window(self, element):
"""
Signal that's called when the user clicks the File->New Alias.
"""
window = NewAliasWindow(self.application)
def alias_selected(self, element, row):
"""
Signal that's called when an alias is selected.
"""
# FIXME: Why is this signal raised at application exit?
if row is None:
return
children = row.get_children()
if type(children[0]) is Gtk.Grid:
name = children[0].get_child_at(1, 0).get_text()
icon = children[0].get_child_at(0, 0)
icon.set_from_stock("gtk-yes", 2)
self.application.selected_alias = name
if self.application.alias_states[name]["logfile"] is None:
self.logging_enable.set_active(False)
else:
self.logging_enable.set_active(True)
if self.application.alias_states[name]["connection"] is None or self.application.alias_states[name]["connection"].is_connected() is False:
self.application.alias_states[name]["connection"] = Connection(self.application.config["aliases"][name]["address"])
self.label_output.set_markup(self.application.alias_states[name]["connection"].buffer)
self.scroll_window.get_vadjustment().set_value(1.0)
def send_text(self, element):
"""
Signal that's called when the user presses "Send" or presses "Enter" when entering text.
"""
text = self.entry_input.get_text()
self.entry_input.set_text("")
if self.application.selected_alias is not None:
self.application.alias_states[self.application.selected_alias]["connection"].send(text)
def close(self, element):
"""
Signal that's called when the user clicks the X for the window or uses File->Quit.
"""
config_string = json.dumps(self.application.config, sort_keys=True, indent=4, separators=(',', ': '))
with open("config.txt", "w") as handle:
handle.write(config_string)
Gtk.main_quit()
def window_focused(self, element, event):
"""
Called when the window is focused to force focusing on the text input for ease of access.
"""
self.entry_input.grab_focus()
return False
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Checker for file headers
~~~~~~~~~~~~~~~~~~~~~~~~
Make sure each Python file has a correct file header
including copyright and license information.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import print_function
import io
import os
import re
import sys
import getopt
from os.path import join, splitext, abspath
checkers = {}
def checker(*suffixes, **kwds):
only_pkg = kwds.pop('only_pkg', False)
def deco(func):
for suffix in suffixes:
checkers.setdefault(suffix, []).append(func)
func.only_pkg = only_pkg
return func
return deco
name_mail_re = r'[\w ]+(<.*?>)?'
copyright_re = re.compile(r'^ :copyright: Copyright 2006-2015 by '
r'the Pygments team, see AUTHORS\.$', re.UNICODE)
copyright_2_re = re.compile(r'^ %s(, %s)*[,.]$' %
(name_mail_re, name_mail_re), re.UNICODE)
is_const_re = re.compile(r'if.*?==\s+(None|False|True)\b')
misspellings = ["developement", "adress", "verificate", # ALLOW-MISSPELLING
"informations", "unlexer"] # ALLOW-MISSPELLING
@checker('.py')
def check_syntax(fn, lines):
if '#!/' in lines[0]:
lines = lines[1:]
if 'coding:' in lines[0]:
lines = lines[1:]
try:
compile('\n'.join(lines), fn, "exec")
except SyntaxError as err:
yield 0, "not compilable: %s" % err
@checker('.py')
def check_style_and_encoding(fn, lines):
for lno, line in enumerate(lines):
if len(line) > 110:
yield lno+1, "line too long"
if is_const_re.search(line):
yield lno+1, 'using == None/True/False'
@checker('.py', only_pkg=True)
def check_fileheader(fn, lines):
# line number correction
c = 1
if lines[0:1] == ['#!/usr/bin/env python']:
lines = lines[1:]
c = 2
llist = []
docopen = False
for lno, l in enumerate(lines):
llist.append(l)
if lno == 0:
if l != '# -*- coding: utf-8 -*-':
yield 1, "missing coding declaration"
elif lno == 1:
if l != '"""' and l != 'r"""':
yield 2, 'missing docstring begin (""")'
else:
docopen = True
elif docopen:
if l == '"""':
# end of docstring
if lno <= 4:
yield lno+c, "missing module name in docstring"
break
if l != "" and l[:4] != ' ' and docopen:
yield lno+c, "missing correct docstring indentation"
if lno == 2:
# if not in package, don't check the module name
modname = fn[:-3].replace('/', '.').replace('.__init__', '')
while modname:
if l.lower()[4:] == modname:
break
modname = '.'.join(modname.split('.')[1:])
else:
yield 3, "wrong module name in docstring heading"
modnamelen = len(l.strip())
elif lno == 3:
if l.strip() != modnamelen * "~":
yield 4, "wrong module name underline, should be ~~~...~"
else:
yield 0, "missing end and/or start of docstring..."
# check for copyright and license fields
license = llist[-2:-1]
if license != [" :license: BSD, see LICENSE for details."]:
yield 0, "no correct license info"
ci = -3
copyright = llist[ci:ci+1]
while copyright and copyright_2_re.match(copyright[0]):
ci -= 1
copyright = llist[ci:ci+1]
if not copyright or not copyright_re.match(copyright[0]):
yield 0, "no correct copyright info"
def main(argv):
try:
gopts, args = getopt.getopt(argv[1:], "vi:")
except getopt.GetoptError:
print("Usage: %s [-v] [-i ignorepath]* [path]" % argv[0])
return 2
opts = {}
for opt, val in gopts:
if opt == '-i':
val = abspath(val)
opts.setdefault(opt, []).append(val)
if len(args) == 0:
path = '.'
elif len(args) == 1:
path = args[0]
else:
print("Usage: %s [-v] [-i ignorepath]* [path]" % argv[0])
return 2
verbose = '-v' in opts
num = 0
out = io.StringIO()
# TODO: replace os.walk run with iteration over output of
# `svn list -R`.
for root, dirs, files in os.walk(path):
if '.hg' in dirs:
dirs.remove('.hg')
if 'examplefiles' in dirs:
dirs.remove('examplefiles')
if '-i' in opts and abspath(root) in opts['-i']:
del dirs[:]
continue
# XXX: awkward: for the Makefile call: don't check non-package
# files for file headers
in_pygments_pkg = root.startswith('./pygments')
for fn in files:
fn = join(root, fn)
if fn[:2] == './':
fn = fn[2:]
if '-i' in opts and abspath(fn) in opts['-i']:
continue
ext = splitext(fn)[1]
checkerlist = checkers.get(ext, None)
if not checkerlist:
continue
if verbose:
print("Checking %s..." % fn)
try:
lines = open(fn, 'rb').read().decode('utf-8').splitlines()
except (IOError, OSError) as err:
print("%s: cannot open: %s" % (fn, err))
num += 1
continue
for checker in checkerlist:
if not in_pygments_pkg and checker.only_pkg:
continue
for lno, msg in checker(fn, lines):
print(u"%s:%d: %s" % (fn, lno, msg), file=out)
num += 1
if verbose:
print()
if num == 0:
print("No errors found.")
else:
print(out.getvalue().rstrip('\n'))
print("%d error%s found." % (num, num > 1 and "s" or ""))
return int(num > 0)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| |
#!/usr/bin/python
# android-build.py
# Build android
import sys
import os, os.path
import shutil
from optparse import OptionParser
CPP_SAMPLES = ['cpp-empty-test', 'cpp-tests', 'game-controller-test']
LUA_SAMPLES = ['lua-empty-test', 'lua-tests', 'lua-game-controller-test']
ALL_SAMPLES = CPP_SAMPLES + LUA_SAMPLES
def get_num_of_cpu():
''' The build process can be accelerated by running multiple concurrent job processes using the -j-option.
'''
try:
platform = sys.platform
if platform == 'win32':
if 'NUMBER_OF_PROCESSORS' in os.environ:
return int(os.environ['NUMBER_OF_PROCESSORS'])
else:
return 1
else:
from numpy.distutils import cpuinfo
return cpuinfo.cpu._getNCPUs()
except Exception:
print "Can't know cpuinfo, use default 1 cpu"
return 1
def check_environment_variables():
''' Checking the environment NDK_ROOT, which will be used for building
'''
try:
NDK_ROOT = os.environ['NDK_ROOT']
except Exception:
print "NDK_ROOT not defined. Please define NDK_ROOT in your environment"
sys.exit(1)
return NDK_ROOT
def check_environment_variables_sdk():
''' Checking the environment ANDROID_SDK_ROOT, which will be used for building
'''
try:
SDK_ROOT = os.environ['ANDROID_SDK_ROOT']
except Exception:
print "ANDROID_SDK_ROOT not defined. Please define ANDROID_SDK_ROOT in your environment"
sys.exit(1)
return SDK_ROOT
def select_toolchain_version():
pass
def caculate_built_samples(args):
''' Compute the sampels to be built
'cpp' for short of all cpp tests
'lua' for short of all lua tests
'''
if 'all' in args:
return ALL_SAMPLES
targets = []
if 'cpp' in args:
targets += CPP_SAMPLES
args.remove('cpp')
if 'lua' in args:
targets += LUA_SAMPLES
args.remove('lua')
targets += args
# remove duplicate elements, for example
# python android-build.py cpp hellocpp
targets = set(targets)
return list(targets)
def do_build(cocos_root, ndk_root, app_android_root, ndk_build_param,sdk_root,android_platform,build_mode):
ndk_path = os.path.join(ndk_root, "ndk-build")
# windows should use ";" to seperate module paths
platform = sys.platform
if platform == 'win32':
ndk_module_path = 'NDK_MODULE_PATH=%s;%s/external;%s/cocos' % (cocos_root, cocos_root, cocos_root)
else:
ndk_module_path = 'NDK_MODULE_PATH=%s:%s/external:%s/cocos' % (cocos_root, cocos_root, cocos_root)
num_of_cpu = get_num_of_cpu()
if ndk_build_param == None:
command = '%s -j%d -C %s NDK_DEBUG=%d %s' % (ndk_path, num_of_cpu, app_android_root, build_mode=='debug', ndk_module_path)
else:
command = '%s -j%d -C %s NDK_DEBUG=%d %s %s' % (ndk_path, num_of_cpu, app_android_root, build_mode=='debug', ndk_build_param, ndk_module_path)
print command
if os.system(command) != 0:
raise Exception("Build dynamic library for project [ " + app_android_root + " ] fails!")
elif android_platform is not None:
sdk_tool_path = os.path.join(sdk_root, "tools/android")
cocoslib_path = os.path.join(cocos_root, "cocos/platform/android/java")
command = '%s update lib-project -t %s -p %s' % (sdk_tool_path,android_platform,cocoslib_path)
if os.system(command) != 0:
raise Exception("update cocos lib-project [ " + cocoslib_path + " ] fails!")
command = '%s update project -t %s -p %s -s' % (sdk_tool_path,android_platform,app_android_root)
if os.system(command) != 0:
raise Exception("update project [ " + app_android_root + " ] fails!")
buildfile_path = os.path.join(app_android_root, "build.xml")
command = 'ant clean %s -f %s -Dsdk.dir=%s' % (build_mode,buildfile_path,sdk_root)
os.system(command)
def copy_files(src, dst):
for item in os.listdir(src):
path = os.path.join(src, item)
# Android can not package the file that ends with ".gz"
if not item.startswith('.') and not item.endswith('.gz') and os.path.isfile(path):
shutil.copy(path, dst)
if os.path.isdir(path):
new_dst = os.path.join(dst, item)
os.mkdir(new_dst)
copy_files(path, new_dst)
def copy_file(src_file, dst):
if not src_file.startswith('.') and not src_file.endswith('.gz') and os.path.isfile(src_file):
shutil.copy(src_file, dst)
def copy_resources(target, app_android_root):
# remove app_android_root/assets if it exists
assets_dir = os.path.join(app_android_root, "assets")
if os.path.isdir(assets_dir):
shutil.rmtree(assets_dir)
os.mkdir(assets_dir)
# copy resources(cpp samples)
if target in CPP_SAMPLES:
resources_dir = os.path.join(app_android_root, "../Resources")
if os.path.isdir(resources_dir):
copy_files(resources_dir, assets_dir)
# lua samples should copy lua script
if target in LUA_SAMPLES:
resources_dir = os.path.join(app_android_root, "../../res")
assets_res_dir = os.path.join(assets_dir, "res")
os.mkdir(assets_res_dir)
if target != "lua-tests":
copy_files(resources_dir, assets_res_dir)
src_dir = os.path.join(app_android_root, "../../src")
assets_src_dir = os.path.join(assets_dir, "src")
os.mkdir(assets_src_dir)
copy_files(src_dir, assets_src_dir)
common_script_dir = os.path.join(app_android_root, "../../../../cocos/scripting/lua-bindings/script/")
cocos_src_dir = os.path.join(assets_src_dir,"cocos")
if os.path.exists(cocos_src_dir):
shutil.rmtree(cocos_src_dir)
os.mkdir(cocos_src_dir)
copy_files(common_script_dir, cocos_src_dir)
luasocket_script_dir = os.path.join(app_android_root, "../../../../external/lua/luasocket")
for root, dirs, files in os.walk(luasocket_script_dir):
for f in files:
if os.path.splitext(f)[1] == '.lua':
fall = os.path.join(root, f)
shutil.copy(fall, assets_dir)
# lua-tests shared resources with cpp-tests
if target == "lua-tests":
resources_cocosbuilder_res_dir = os.path.join(resources_dir, "cocosbuilderRes")
assets_cocosbuilder_res_dir = os.path.join(assets_res_dir, "cocosbuilderRes")
os.mkdir(assets_cocosbuilder_res_dir)
copy_files(resources_cocosbuilder_res_dir, assets_cocosbuilder_res_dir)
resources_dir = os.path.join(app_android_root, "../../../cpp-tests/Resources")
copy_files(resources_dir, assets_res_dir)
if target == "lua-game-controller-test":
print("coming generator game controller")
resources_dir = os.path.join(app_android_root, "../../../game-controller-test/Resources")
copy_files(resources_dir, assets_res_dir)
def build_samples(target,ndk_build_param,android_platform,build_mode):
ndk_root = check_environment_variables()
sdk_root = None
select_toolchain_version()
build_targets = caculate_built_samples(target)
current_dir = os.path.dirname(os.path.realpath(__file__))
cocos_root = os.path.join(current_dir, "..")
if android_platform is not None:
sdk_root = check_environment_variables_sdk()
if android_platform.isdigit():
android_platform = 'android-'+android_platform
else:
print 'please use vaild android platform'
exit(1)
if build_mode is None:
build_mode = 'debug'
elif build_mode != 'release':
build_mode = 'debug'
app_android_root = ''
target_proj_path_map = {
"cpp-empty-test": "tests/cpp-empty-test/proj.android",
"game-controller-test": "tests/game-controller-test/proj.android",
"cpp-tests": "tests/cpp-tests/proj.android",
"lua-empty-test": "tests/lua-empty-test/project/proj.android",
"lua-tests": "tests/lua-tests/project/proj.android",
"lua-game-controller-test": "tests/lua-game-controller-test/project/proj.android"
}
for target in build_targets:
if target in target_proj_path_map:
app_android_root = os.path.join(cocos_root, target_proj_path_map[target])
else:
print 'unknown target: %s' % target
continue
copy_resources(target, app_android_root)
do_build(cocos_root, ndk_root, app_android_root, ndk_build_param,sdk_root,android_platform,build_mode)
# -------------- main --------------
if __name__ == '__main__':
#parse the params
usage = """
This script is mainy used for building tests built-in with cocos2d-x.
Usage: %prog [options] [cpp-empty-test|cpp-tests|lua-empty-test|lua-tests|cpp|lua|all]
If you are new to cocos2d-x, I recommend you start with cpp-empty-test, lua-empty-test.
You can combine these targets like this:
python android-build.py -p 10 cpp-empty-test lua-empty-test
Note: You should install ant to generate apk while building the andriod tests. But it is optional. You can generate apk with eclipse.
"""
parser = OptionParser(usage=usage)
parser.add_option("-n", "--ndk", dest="ndk_build_param",
help='Parameter for ndk-build')
parser.add_option("-p", "--platform", dest="android_platform",
help='Parameter for android-update. Without the parameter,the script just build dynamic library for the projects. Valid android-platform are:[10|11|12|13|14|15|16|17|18|19]')
parser.add_option("-b", "--build", dest="build_mode",
help='The build mode for java project,debug[default] or release. Get more information,please refer to http://developer.android.com/tools/building/building-cmdline.html')
(opts, args) = parser.parse_args()
if len(args) == 0:
parser.print_help()
sys.exit(1)
else:
try:
build_samples(args, opts.ndk_build_param,opts.android_platform,opts.build_mode)
except Exception as e:
print e
sys.exit(1)
| |
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Preprocessor applying tf.transform to the chicago_taxi data."""
# pytype: skip-file
from __future__ import absolute_import, division, print_function
import argparse
import os
import tensorflow as tf
import tensorflow_transform as transform
import tensorflow_transform.beam as tft_beam
from tensorflow_transform.coders import example_proto_coder
from tensorflow_transform.tf_metadata import dataset_metadata, dataset_schema
import apache_beam as beam
from apache_beam.io.gcp.bigquery import ReadFromBigQuery
from apache_beam.metrics.metric import MetricsFilter
from apache_beam.testing.load_tests.load_test_metrics_utils import (
MeasureTime, MetricsReader)
from trainer import taxi
def _fill_in_missing(x):
"""Replace missing values in a SparseTensor.
Fills in missing values of `x` with '' or 0, and converts to a dense tensor.
Args:
x: A `SparseTensor` of rank 2. Its dense shape should have size at most 1
in the second dimension.
Returns:
A rank 1 tensor where missing values of `x` have been filled in.
"""
default_value = '' if x.dtype == tf.string else 0
return tf.squeeze(
tf.sparse.to_dense(
tf.SparseTensor(x.indices, x.values, [x.dense_shape[0], 1]),
default_value),
axis=1)
def transform_data(
input_handle,
outfile_prefix,
working_dir,
schema_file,
transform_dir=None,
max_rows=None,
pipeline_args=None,
publish_to_bq=False,
project=None,
metrics_table=None,
metrics_dataset=None):
"""The main tf.transform method which analyzes and transforms data.
Args:
input_handle: BigQuery table name to process specified as DATASET.TABLE or
path to csv file with input data.
outfile_prefix: Filename prefix for emitted transformed examples
working_dir: Directory in which transformed examples and transform function
will be emitted.
schema_file: An file path that contains a text-serialized TensorFlow
metadata schema of the input data.
transform_dir: Directory in which the transform output is located. If
provided, this will load the transform_fn from disk instead of computing
it over the data. Hint: this is useful for transforming eval data.
max_rows: Number of rows to query from BigQuery
pipeline_args: additional DataflowRunner or DirectRunner args passed to the
beam pipeline.
"""
def preprocessing_fn(inputs):
"""tf.transform's callback function for preprocessing inputs.
Args:
inputs: map from feature keys to raw not-yet-transformed features.
Returns:
Map from string feature key to transformed feature operations.
"""
outputs = {}
for key in taxi.DENSE_FLOAT_FEATURE_KEYS:
# Preserve this feature as a dense float, setting nan's to the mean.
outputs[taxi.transformed_name(key)] = transform.scale_to_z_score(
_fill_in_missing(inputs[key]))
for key in taxi.VOCAB_FEATURE_KEYS:
# Build a vocabulary for this feature.
outputs[taxi.transformed_name(
key)] = transform.compute_and_apply_vocabulary(
_fill_in_missing(inputs[key]),
top_k=taxi.VOCAB_SIZE,
num_oov_buckets=taxi.OOV_SIZE)
for key in taxi.BUCKET_FEATURE_KEYS:
outputs[taxi.transformed_name(key)] = transform.bucketize(
_fill_in_missing(inputs[key]), taxi.FEATURE_BUCKET_COUNT)
for key in taxi.CATEGORICAL_FEATURE_KEYS:
outputs[taxi.transformed_name(key)] = _fill_in_missing(inputs[key])
# Was this passenger a big tipper?
taxi_fare = _fill_in_missing(inputs[taxi.FARE_KEY])
tips = _fill_in_missing(inputs[taxi.LABEL_KEY])
outputs[taxi.transformed_name(taxi.LABEL_KEY)] = tf.where(
tf.is_nan(taxi_fare),
tf.cast(tf.zeros_like(taxi_fare), tf.int64),
# Test if the tip was > 20% of the fare.
tf.cast(
tf.greater(tips, tf.multiply(taxi_fare, tf.constant(0.2))),
tf.int64))
return outputs
namespace = metrics_table
metrics_monitor = None
if publish_to_bq:
metrics_monitor = MetricsReader(
publish_to_bq=publish_to_bq,
project_name=project,
bq_table=metrics_table,
bq_dataset=metrics_dataset,
namespace=namespace,
filters=MetricsFilter().with_namespace(namespace))
schema = taxi.read_schema(schema_file)
raw_feature_spec = taxi.get_raw_feature_spec(schema)
raw_schema = dataset_schema.from_feature_spec(raw_feature_spec)
raw_data_metadata = dataset_metadata.DatasetMetadata(raw_schema)
pipeline = beam.Pipeline(argv=pipeline_args)
with tft_beam.Context(temp_dir=working_dir):
query = taxi.make_sql(input_handle, max_rows, for_eval=False)
raw_data = (
pipeline
| 'ReadBigQuery' >> ReadFromBigQuery(
query=query, project=project, use_standard_sql=True)
| 'Measure time: start' >> beam.ParDo(MeasureTime(namespace)))
decode_transform = beam.Map(
taxi.clean_raw_data_dict, raw_feature_spec=raw_feature_spec)
if transform_dir is None:
decoded_data = raw_data | 'DecodeForAnalyze' >> decode_transform
transform_fn = ((decoded_data, raw_data_metadata) |
('Analyze' >> tft_beam.AnalyzeDataset(preprocessing_fn)))
_ = (
transform_fn |
('WriteTransformFn' >> tft_beam.WriteTransformFn(working_dir)))
else:
transform_fn = pipeline | tft_beam.ReadTransformFn(transform_dir)
# Shuffling the data before materialization will improve Training
# effectiveness downstream. Here we shuffle the raw_data (as opposed to
# decoded data) since it has a compact representation.
shuffled_data = raw_data | 'RandomizeData' >> beam.transforms.Reshuffle()
decoded_data = shuffled_data | 'DecodeForTransform' >> decode_transform
(transformed_data,
transformed_metadata) = (((decoded_data, raw_data_metadata), transform_fn)
| 'Transform' >> tft_beam.TransformDataset())
coder = example_proto_coder.ExampleProtoCoder(transformed_metadata.schema)
_ = (
transformed_data
| 'SerializeExamples' >> beam.Map(coder.encode)
| 'Measure time: end' >> beam.ParDo(MeasureTime(namespace))
| 'WriteExamples' >> beam.io.WriteToTFRecord(
os.path.join(working_dir, outfile_prefix), file_name_suffix='.gz'))
result = pipeline.run()
result.wait_until_finish()
if metrics_monitor:
metrics_monitor.publish_metrics(result)
def main():
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument(
'--input',
help=('Input BigQuery table to process specified as: '
'DATASET.TABLE'))
parser.add_argument(
'--schema_file', help='File holding the schema for the input data')
parser.add_argument(
'--output_dir',
help=(
'Directory in which transformed examples and function '
'will be emitted.'))
parser.add_argument(
'--outfile_prefix',
help='Filename prefix for emitted transformed examples')
parser.add_argument(
'--transform_dir',
required=False,
default=None,
help='Directory in which the transform output is located')
parser.add_argument(
'--max_rows',
help='Number of rows to query from BigQuery',
default=None,
type=int)
parser.add_argument(
'--publish_to_big_query',
help='Whether to publish to BQ',
default=None,
type=bool)
parser.add_argument(
'--metrics_dataset', help='BQ dataset', default=None, type=str)
parser.add_argument(
'--metrics_table', help='BQ table', default=None, type=str)
parser.add_argument(
'--metric_reporting_project',
help='BQ table project',
default=None,
type=str)
known_args, pipeline_args = parser.parse_known_args()
transform_data(
input_handle=known_args.input,
outfile_prefix=known_args.outfile_prefix,
working_dir=known_args.output_dir,
schema_file=known_args.schema_file,
transform_dir=known_args.transform_dir,
max_rows=known_args.max_rows,
pipeline_args=pipeline_args,
publish_to_bq=known_args.publish_to_big_query,
metrics_dataset=known_args.metrics_dataset,
metrics_table=known_args.metrics_table,
project=known_args.metric_reporting_project)
if __name__ == '__main__':
main()
| |
''' This module computes similarities between samples.
To avoid O(N**2) running time we're using a banded
min hash technique to reduce the number of comparisions when
a new sample is encountered.
* http://en.wikipedia.org/wiki/Jaccard_index
* http://en.wikipedia.org/wiki/MinHash
* http://infolab.stanford.edu/~ullman/mmds/ch3.pdf
'''
import os, sys
import pickle
import random
import hashlib
import collections
from struct import unpack
class MinHash():
''' This class implements MinHash (en.wikipedia.org/wiki/Minhash)
for sparse datasets. It also does banded LSH 'Locality Sensitive Hashing'
so that only candidates with a high probability of being similar are
returned by getCandidatePairs().
'''
def __init__(self, num_hashes=40, lsh_bands=10, lsh_rows=4, bin_limit=1000, load_models=None, drop_duplicates=False, verbose=False):
''' Init for MinHash '''
# Minhash signatures, hashing and banding parameters
self._minhash_sigs = {}
self._num_hashes = num_hashes
self._lsh_bands = lsh_bands
self._lsh_rows = lsh_rows
self._hash_salt = []
self._bin_limit = bin_limit
for i in xrange(num_hashes):
self._hash_salt.append(str(int(random.random()*100)))
# Storage for candidate buckets
def _min_hash_hash_bucket():
''' Defining a hash bucket 'callable' for the candidate buckets '''
return collections.defaultdict(list)
self._candidate_buckets = collections.defaultdict(_min_hash_hash_bucket)
# Set of All 2 All, Candidate Pairs
self._all_candidate_pairs = set()
# Hash storage for instances (used for duplicate detection)
self._instances_hashes = set()
self._drop_duplicates = drop_duplicates
# Verbose flag
self.verbose = verbose
# Existing model load?
if (load_models):
# Salt has to be reloaded, everything else is optional
self._hash_salt = self._load_model_from_disk('min_hash_salt', 'models')
if ('buckets' in load_models):
self._candidate_buckets = self._load_model_from_disk('min_hash_candidate_buckets', 'models')
if ('pairs' in load_models):
self._all_candidate_pairs = self._load_model_from_disk('min_hash_all_candidate_pairs', 'models')
if ('minhash' in load_models):
self._minhash_sigs = self._load_model_from_disk('min_hash_minhash_sigs', 'models')
def vprint(self, args):
if (self.verbose):
for a in args:
sys.stdout.write( a),
sys.stdout.write()
def reset(self):
''' Reset for MinHash '''
# Reset Minhash signatures
self._minhash_sigs = {}
# Rest Storage for candidate buckets
self._candidate_buckets = collections.defaultdict(_min_hash_hash_bucket)
# Reset All 2 All, Candidate Pairs
self._all_candidate_pairs = set()
# Rest Hash storage for instances (used for duplicate detection)
self._instances_hashes = set()
def add_instance(self, name, attribute_list):
''' Add an instance to the min hash model '''
# Make sure the attributes are coming in the right way
if not isinstance(attribute_list, list):
print 'Min_hash.addinstance() : Attributes must be in a list!'
print type(attribute_list)
print 'Ignoring...'
return
if not all(isinstance(x,str) or isinstance(x,unicode) for x in attribute_list):
print 'Min_hash.addinstance() : All attributes must be of str or unicode type!'
print attribute_list
print 'Ignoring...'
return
# Drop duplicates?
if (self._drop_duplicates):
instance_hash = self._hash_list_as_string(attribute_list)
if (instance_hash in self._instances_hashes):
return
else:
self._instances_hashes.add(instance_hash)
# Compute the min hash signature and add to candidate buckets
self._minhash_sigs[name] = self.compute_minhash_sig(attribute_list)
self._add_to_candidate_buckets(name, self._minhash_sigs[name])
def compute_minhash_sig(self, attribute_list):
''' Compute the min hash signature '''
minhash_sig = []
for salt in self._hash_salt:
minhash_sig.append(self._minhash_hash(salt, attribute_list))
return minhash_sig
def candidate_query(self, attribute_list):
# Compute the min hash signature and build a candidate match list
minhash_sig = self.compute_minhash_sig(attribute_list)
# Signature width
bands = self._lsh_bands
rows = self._lsh_rows
sig_width = bands*rows
# Getting matches from Hash Buckets
_candidate_matches = set()
for y_index in xrange(0, sig_width, rows):
candidate_list = self._candidate_buckets[y_index][self._hash_list_as_string(minhash_sig[y_index:y_index+rows])]
for match in candidate_list:
_candidate_matches.add(match)
# Return just the matches
return _candidate_matches
def compute_all_candidate_matches(self):
''' Compute band based candidate list for all instances in the model '''
self.vprint('\tComputing All to All Candidates Matches...')
self._all_to_all_matches()
def get_candidate_pairs(self):
''' Get the candidate pairs for all instances in the model '''
return self._all_candidate_pairs
def save_model_to_disk(self):
''' Save all the minhash internal models to disk '''
self._save_model_to_disk('min_hash_salt', self._hash_salt, 'models')
self._save_model_to_disk('min_hash_candidate_buckets', self._candidate_buckets, 'models')
self._save_model_to_disk('min_hash_all_candidate_pairs', self._all_candidate_pairs, 'models')
self._save_model_to_disk('min_hash_minhash_sigs', self._minhash_sigs, 'models')
# This function needs to be highly optimized
# Compute min hash on a list of items
def _minhash_slow(self, salt, v_list):
''' Compute a hash value for the list of values, the 'salt' is a random permutation factor '''
minhash = 'ffffffffffffffffffffffffffffffff'
for value in v_list:
h_value = hashlib.md5(value+salt).hexdigest()
if (h_value < minhash):
minhash = h_value
return minhash
def _minhash_hash(self, salt, v_list):
''' Compute a hash value for the list of values, the 'salt' is a random permutation factor '''
minhash = sys.maxint
for value in v_list:
h_value = unpack('<IIII', hashlib.md5(value+salt).digest())[0]
if (h_value < minhash):
minhash = h_value
return minhash
# Hash a list of items
def _hash_list_as_string(self, x_list):
''' Compute a hash value for the list of values by turning the list into a string first '''
return hashlib.md5(str(x_list)).hexdigest()
def _add_to_candidate_buckets(self, name, minhash_sig):
''' Add this minhash signature to the candidate buckets '''
# Signature width
bands = self._lsh_bands
rows = self._lsh_rows
sig_width = bands*rows
for y_index in xrange(0, sig_width, rows):
# Fixme: not totally sure what to do as these buckets get really big
hash_key = self._hash_list_as_string(minhash_sig[y_index:y_index+rows])
self._candidate_buckets[y_index][hash_key].append(name)
def _all_to_all_matches(self):
''' Getting the candidate matches for all instances in the model '''
# Linear pass to collapse candidate pairs (the buckets will have repeats)
self.vprint('\t\tCollapsing Candidate Pairs...')
for _key, subdict in self._candidate_buckets.iteritems():
for __key, candidate_list in subdict.iteritems():
# Sanity check
if (len(candidate_list) > self._bin_limit):
print 'Hashing function issue, key: (%s,%s) has %d items in it out of %s slots' % (_key, __key, len(candidate_list), self._bin_limit)
candidate_list = candidate_list[:self._bin_limit]
for source in candidate_list:
for target in candidate_list:
if (source != target):
if (source < target):
self._all_candidate_pairs.add((source, target))
else:
self._all_candidate_pairs.add((target, source))
def _save_model_to_disk(self, name, model, model_dir):
''' Save a particular model to disk '''
# First serialized the model
serialized_model = pickle.dumps(model, protocol=pickle.HIGHEST_PROTOCOL)
# Model directory + model name
model_path = os.path.join(model_dir, name+'.model')
# Now store it to disk
print 'Storing Serialized Model to Disk (%s:%.2fMeg)' % (name, len(serialized_model)/1024.0/1024.0)
open(model_path,'wb').write(serialized_model)
def _load_model_from_disk(self, name, model_dir):
''' Load a particular model from disk '''
# Model directory is relative to this file
model_path = os.path.join(model_dir, name+'.model')
# Put a try/except around the model load in case it fails
try:
model = pickle.loads(open(model_path,'rb').read())
except:
print 'Could not load model: %s from directory %s!' % (name, model_path)
sys.exit(1)
return model
# Simple test of the min_hash functionality
def _test():
import pprint
my_min = MinHash(num_hashes=40, lsh_bands=20, lsh_rows=2, drop_duplicates=True)
my_min.add_instance(1, ['a','b','c','d'])
my_min.add_instance(2, ['a','b','d'])
my_min.add_instance(3, ['a','b','e','d'])
my_min.add_instance(4, ['w','x','y','z'])
my_min.add_instance(5, ['x','y','z'])
my_min.add_instance(6, ['w','x','q','z','y'])
my_min.add_instance(7, ['r','s','t'])
my_min.add_instance(8, ['u','s','t'])
my_min.compute_all_candidate_matches()
pairs = my_min.get_candidate_pairs()
print 'All candidate pairs'
pprint.pprint(pairs)
print 'Query on [x,y,z,h]'
matches = my_min.candidate_query(['x','y','z','h'])
pprint.pprint(matches)
if __name__ == '__main__':
_test()
| |
#!/usr/bin/python
import numpy
from random import randint
# Theano Packages
import theano
import theano.tensor as T
from theano.tensor.signal import downsample
from theano.tensor.nnet import conv
from theano.ifelse import ifelse
"""
# For 3X faster Convolutions
from pylearn2.sandbox.cuda_convnet.filter_acts import FilterActs
from pylearn2.sandbox.cuda_convnet.pool import MaxPool
from theano.sandbox.cuda.basic_ops import gpu_contiguous
"""
#### rectified linear unit
def ReLU(x):
y = T.maximum(0.0, x)
return(y)
#### sigmoid
def Sigmoid(x):
y = T.nnet.sigmoid(x)
return(y)
#### tanh
def Tanh(x):
y = T.tanh(x)
return(y)
#### softmax
def Softmax(x):
return T.nnet.softmax(x)
# SVM layer from the discussions in this group
# https://groups.google.com/forum/#!msg/theano-users/on4D16jqRX8/IWGa-Gl07g0J
class SVMLayer(object):
def __init__(self, input, n_in, n_out, W=None, b=None):
# initialize with 0 the weights W as a matrix of shape (n_in, n_out)
if W is None:
self.W = theano.shared(value=numpy.zeros((n_in, n_out),
dtype=theano.config.floatX),
name='W', borrow=True)
else:
self.W = W
# initialize the baises b as a vector of n_out 0s
if b is None:
self.b = theano.shared(value=numpy.zeros((n_out,),
dtype=theano.config.floatX),
name='b', borrow=True)
else:
self.b =b
# parameters of the model
self.params = [self.W, self.b]
self.output = T.dot(input, self.W) + self.b
self.y_pred = T.argmax(self.output, axis=1)
def hinge(self, u):
return T.maximum(0, 1 - u)
def ova_svm_cost(self, y1):
""" return the one-vs-all svm cost
given ground-truth y in one-hot {-1, 1} form """
y1_printed = theano.printing.Print('this is important')(T.max(y1))
margin = y1 * self.output
cost = self.hinge(margin).mean(axis=0).sum()
return cost
def errors(self, y):
""" compute zero-one loss
note, y is in integer form, not one-hot
"""
# check if y has same dimension of y_pred
if y.ndim != self.y_pred.ndim:
raise TypeError('y should have the same shape as self.y_pred',
('y', target.type, 'y_pred', self.y_pred.type))
# check if y is of the correct datatype
if y.dtype.startswith('int'):
# the T.neq operator returns a vector of 0s and 1s, where 1
# represents a mistake in prediction
return T.sum(T.neq(self.y_pred, y))
else:
raise NotImplementedError()
# Modified From https://github.com/mdenil/dropout/blob/master/mlp.py
class LogisticRegression(object):
def __init__(self, input, n_in, n_out, W=None, b=None ):
# initialize with 0 the weights W as a matrix of shape (n_in, n_out)
if W is None:
self.W = theano.shared(
value=numpy.zeros((n_in, n_out), dtype=theano.config.floatX),
name='W')
else:
self.W = W
# initialize the baises b as a vector of n_out 0s
if b is None:
self.b = theano.shared(
value=numpy.zeros((n_out,), dtype=theano.config.floatX),
name='b')
else:
self.b = b
# compute vector of class-membership probabilities in symbolic form
self.p_y_given_x = T.nnet.softmax(T.dot(input, self.W) + self.b)
# compute prediction as class whose probability is maximal in
# symbolic form
self.y_pred = T.argmax(self.p_y_given_x, axis=1)
# parameters of the model
self.params = [self.W, self.b]
self.probabilities = T.log(self.p_y_given_x)
def negative_log_likelihood(self, y ):
return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])
def categorical_cross_entropy( self, y ):
return T.mean(T.nnet.categorical_crossentropy(self.p_y_given_x,y))
def binary_cross_entropy ( self, y ):
return T.mean(T.nnet.binary_crossentropy(self.p_y_given_x,y))
def errors(self, y):
if y.ndim != self.y_pred.ndim:
raise TypeError('y should have the same shape as self.y_pred',
('y', target.type, 'y_pred', self.y_pred.type))
# check if y is of the correct datatype
if y.dtype.startswith('int'):
return T.sum(T.neq(self.y_pred, y)) # L1 norm of the error.
else:
raise NotImplementedError()
class HiddenLayer(object):
def __init__(self, rng, input, n_in, n_out,
activation, W=None, b=None,
use_bias=False):
self.input = input
self.activation = activation
if W is None:
W_values = numpy.asarray(rng.uniform(
low=-numpy.sqrt(6. / (n_in + n_out)),
high=numpy.sqrt(6. / (n_in + n_out)),
size=(n_in, n_out)
),dtype=theano.config.floatX)
if activation == Sigmoid or activation == T.nnet.sigmoid:
W_values*=4
W = theano.shared(value=W_values, name='W')
if b is None:
b_values = numpy.zeros((n_out,), dtype=theano.config.floatX)
b = theano.shared(value=b_values, name='b')
self.W = W
self.b = b
if use_bias:
lin_output = T.dot(input, self.W) + self.b
else:
lin_output = T.dot(input, self.W)
self.output = (lin_output if activation is None else activation(lin_output))
# parameters of the model
if use_bias:
self.params = [self.W, self.b]
else:
self.params = [self.W]
# dropout thanks to misha denil
# https://github.com/mdenil/dropout
def _dropout_from_layer(rng, layer, p):
srng = theano.tensor.shared_randomstreams.RandomStreams(
rng.randint(999999))
# p=1-p because 1's indicate keep and p is prob of dropping
mask = srng.binomial(n=1, p=1-p, size=layer.shape)
# The cast is important because
# int * float32 = float64 which pulls things off the gpu
output = layer * T.cast(mask, theano.config.floatX)
return output
class DropoutHiddenLayer(HiddenLayer):
def __init__(self, rng, input, n_in, n_out,
activation, dropout_rate, use_bias, W=None, b=None):
super(DropoutHiddenLayer, self).__init__(
rng=rng, input=input, n_in=n_in, n_out=n_out, W=W, b=b,
activation=activation, use_bias=use_bias)
self.output = _dropout_from_layer(rng, self.output, p=dropout_rate)
class MLP(object):
def __init__(self,
rng,
input,
layer_sizes,
dropout_rates,
activations,
use_bias=True,
svm_flag = True,
params = [],
verbose = True):
weight_matrix_sizes = zip(layer_sizes, layer_sizes[1:])
self.layers = []
self.dropout_layers = []
next_layer_input = input
next_dropout_layer_input = _dropout_from_layer(rng, input, p=dropout_rates[0])
layer_counter = 0
self.dropout_L1 = theano.shared(0)
self.dropout_L2 = theano.shared(0)
self.L1 = theano.shared(0)
self.L2 = theano.shared(0)
count = 0
if len(dropout_rates) > 1:
for n_in, n_out in weight_matrix_sizes[:-1]:
if verbose is True:
print " --> initializing mlp Layer with " + str(n_out) + " hidden units taking in input size " + str(n_in)
if len(params) < count + 1:
next_dropout_layer = DropoutHiddenLayer(rng=rng,
input=next_dropout_layer_input,
activation=activations[layer_counter],
n_in=n_in, n_out=n_out, use_bias=use_bias,
dropout_rate=dropout_rates[layer_counter + 1])
else:
next_dropout_layer = DropoutHiddenLayer(rng=rng,
input=next_dropout_layer_input,
activation=activations[layer_counter],
n_in=n_in, n_out=n_out, use_bias=use_bias,
dropout_rate=dropout_rates[layer_counter + 1],
W = params[count],
b= params[count+1])
self.dropout_layers.append(next_dropout_layer)
next_dropout_layer_input = next_dropout_layer.output
self.dropout_L1 = self.dropout_L1 + abs(self.dropout_layers[-1].W).sum()
self.dropout_L2 = self.dropout_L2 + abs(self.dropout_layers[-1].W**2).sum()
# Reuse the paramters from the dropout layer here, in a different
# path through the graph.
next_layer = HiddenLayer(rng=rng,
input=next_layer_input,
activation=activations[layer_counter],
# scale the weight matrix W with (1-p)
W=next_dropout_layer.W * (1 - dropout_rates[layer_counter]),
b=next_dropout_layer.b,
n_in=n_in, n_out=n_out,
use_bias=use_bias)
self.layers.append(next_layer)
next_layer_input = next_layer.output
#first_layer = False
self.L1 = self.L1 + abs(self.layers[-1].W).sum()
self.L2 = self.L2 + abs(self.layers[-1].W**2).sum()
layer_counter += 1
count = count + 2
# Set up the output layer
n_in, n_out = weight_matrix_sizes[-1]
else:
next_layer_input = input
n_in, n_out = weight_matrix_sizes[-1]
# Again, reuse paramters in the dropout output.
if svm_flag is False:
if verbose is True:
print " --> initializing regression layer with " + str(n_out) + " output units and " + str(n_in) + " input units"
if not len(params) < count + 1:
dropout_output_layer = LogisticRegression(
input=next_dropout_layer_input,
n_in=n_in, n_out=n_out,
W = params[count], b = params[count+1])
output_layer = LogisticRegression(
input=next_layer_input,
# scale the weight matrix W with (1-p)
W=dropout_output_layer.W * (1 - dropout_rates[-1]),
b=dropout_output_layer.b,
n_in=n_in, n_out=n_out)
else:
dropout_output_layer = LogisticRegression(
input=next_dropout_layer_input,
n_in=n_in, n_out=n_out
)
output_layer = LogisticRegression(
input=next_layer_input,
# scale the weight matrix W with (1-p)
n_in=n_in, n_out=n_out,
W=dropout_output_layer.W * (1 - dropout_rates[-1]),
b=dropout_output_layer.b
)
self.layers.append(output_layer)
self.dropout_layers.append(dropout_output_layer)
self.dropout_negative_log_likelihood = self.dropout_layers[-1].negative_log_likelihood
self.negative_log_likelihood = self.layers[-1].negative_log_likelihood
self.dropout_cross_entropy = self.dropout_layers[-1].categorical_cross_entropy
self.cross_entropy = self.layers[-1].categorical_cross_entropy
self.dropout_binary_entropy = self.dropout_layers[-1].binary_cross_entropy
self.binary_entropy = self.layers[-1].binary_cross_entropy
self.dropout_L1 = self.dropout_L1 + abs(self.dropout_layers[-1].W).sum()
self.dropout_L2 = self.dropout_L2 + abs(self.dropout_layers[-1].W**2).sum()
self.L1 = self.L1 + abs(self.layers[-1].W).sum()
self.L2 = self.L2 + abs(self.layers[-1].W**2).sum()
else:
if verbose is True:
print " --> iunitializing max-margin layer with " + str(n_out) + " class predictors and " + str(n_in) + " input units."
if len(params) < count + 1:
dropout_output_layer = SVMLayer(
input=next_dropout_layer_input,
n_in=n_in, n_out=n_out )
output_layer = SVMLayer(input = next_layer_input,
W=dropout_output_layer.W * (1 - dropout_rates[-1]),
b=dropout_output_layer.b,
n_in = n_in,
n_out = n_out)
else:
dropout_output_layer = SVMLayer(
input=next_dropout_layer_input,
n_in=n_in, n_out=n_out,
W = params[count], b = params[count+1])
output_layer = SVMLayer(input = next_layer_input,
W=dropout_output_layer.W * (1 - dropout_rates[-1]),
b=dropout_output_layer.b,
n_in = n_in,
n_out = n_out)
self.layers.append(output_layer)
self.dropout_layers.append(dropout_output_layer)
self.hinge_loss = self.dropout_layers[-1].ova_svm_cost
self.hinge_loss = self.layers[-1].ova_svm_cost
# Use the negative log likelihood of the logistic regression layer as
# the objective.
self.dropout_errors = self.dropout_layers[-1].errors
self.errors = self.layers[-1].errors
self.predicts_dropouts = self.layers[-1].y_pred
self.predicts = self.layers[-1].y_pred
self.params = [ param for layer in self.dropout_layers for param in layer.params ]
if svm_flag is True:
self.probabilities = self.layers[-1].output
else:
self.probabilities = self.layers[-1].probabilities
# From theano tutorials
class Conv2DPoolLayer(object):
"""Pool Layer of a convolutional network .. taken from the theano tutorials"""
def __init__(self, rng, input, filter_shape, image_shape, poolsize, activation, W = None, b = None,
#fast_conv = False,
verbose = True):
assert image_shape[1] == filter_shape[1]
if verbose is True:
print " --> initializing convolutional layer with " + str(filter_shape[0]) + " kernels"
print " ....... kernel size [" + str(filter_shape[2]) + " X " + str(filter_shape[3]) +"]"
print " ....... pooling size [" + str(poolsize[0]) + " X " + str(poolsize[1]) + "]"
print " ....... input size [" + str(image_shape[2]) + " " + str(image_shape[3]) + "]"
print " ....... input number of feature maps is " +str(image_shape[1])
self.input = input
# there are "num input feature maps * filter height * filter width"
# inputs to each hidden unit
fan_in = numpy.prod(filter_shape[1:])
# each unit in the lower layer receives a gradient from:
# "num output feature maps * filter height * filter width" /
# pooling size
fan_out = (filter_shape[0] * numpy.prod(filter_shape[2:]) /
numpy.prod(poolsize))
# initialize weights with random weights
W_bound = numpy.sqrt(6. / (fan_in + fan_out))
if W is None:
self.W = theano.shared(
numpy.asarray(
rng.uniform(low=-W_bound, high=W_bound, size =filter_shape),
dtype=theano.config.floatX
),
borrow=True
)
else:
self.W = W
# the bias is a 1D tensor -- one bias per output feature map
if b is None:
b_values = numpy.zeros((filter_shape[0],), dtype=theano.config.floatX)
self.b = theano.shared(value=b_values, borrow=True)
else:
self.b = b
# convolve input feature maps with filters
#if fast_conv is False:
conv_out = conv.conv2d(
input=self.input,
filters=self.W,
filter_shape=filter_shape,
image_shape=image_shape
)
"""
else:
conv_op = FilterActs()
input_shuffled = self.input.dimshuffle(1, 2, 3, 0) # bc01 to c01b
filters_shuffled = self.W.dimshuffle(1, 2, 3, 0) # bc01 to c01b
contiguous_input = gpu_contiguous(input_shuffled)
contiguous_filters = gpu_contiguous(filters_shuffled)
out_shuffled = conv_op(contiguous_input, contiguous_filters)
conv_out = out_shuffled.dimshuffle(3, 0, 1, 2) # c01b to bc01
# directly lifted from http://benanne.github.io/2014/04/03/faster-convolutions-in-theano.html - Thank you.
# I am not sure if the dimshuffle makes the performance update of the conv_op any better. But hey lets give a try,
# if not always revert back to using fast_conv = 0.
"""
# downsample each feature map individually, using maxpooling
#if fast_conv is False:
pooled_out = downsample.max_pool_2d(
input=conv_out,
ds=poolsize,
ignore_border=True
)
"""
else:
pool_op = MaxPool(ds=poolsize, stride = 1)
contiguous_input = gpu_contiguous(out_shuffled)
out_shuffled = pool_op(contiguous_input)
pooled_out = out_shuffled.dimshuffle(3, 0, 1, 2) # c01b to bc01
"""
# add the bias term. Since the bias is a vector (1D array), we first
# reshape it to a tensor of shape (1, n_filters, 1, 1). Each bias will
# thus be broadcasted across mini-batches and feature map
# width & height
self.output = activation(pooled_out + self.b.dimshuffle('x', 0, 'x', 'x'))
# store parameters of this layer
self.params = [self.W, self.b]
self.img_shape = (filter_shape[2], filter_shape[3])
self.tile_shape = (numpy.asarray(numpy.ceil(numpy.sqrt(filter_shape[0]*filter_shape[1])), dtype='int32'),
numpy.asarray(filter_shape[0]*filter_shape[1]/numpy.ceil(filter_shape[0]*filter_shape[1]), dtype='int32') )
self.filter_img = self.W.reshape((filter_shape[0],filter_shape[1],filter_shape[2],filter_shape[3]))
| |
"""Tests for certbot_apache._internal.parser."""
import shutil
import unittest
try:
import mock
except ImportError: # pragma: no cover
from unittest import mock # type: ignore
from certbot import errors
from certbot.compat import os
import util
class BasicParserTest(util.ParserTest):
"""Apache Parser Test."""
def setUp(self): # pylint: disable=arguments-differ
super().setUp()
def tearDown(self):
shutil.rmtree(self.temp_dir)
shutil.rmtree(self.config_dir)
shutil.rmtree(self.work_dir)
def test_bad_parse(self):
self.parser.parse_file(os.path.join(self.parser.root,
"conf-available", "bad_conf_file.conf"))
self.assertRaises(
errors.PluginError, self.parser.check_parsing_errors, "httpd.aug")
def test_bad_save(self):
mock_save = mock.Mock()
mock_save.side_effect = IOError
self.parser.aug.save = mock_save
self.assertRaises(errors.PluginError, self.parser.unsaved_files)
def test_aug_version(self):
mock_match = mock.Mock(return_value=["something"])
self.parser.aug.match = mock_match
# pylint: disable=protected-access
self.assertEqual(self.parser.check_aug_version(),
["something"])
self.parser.aug.match.side_effect = RuntimeError
self.assertFalse(self.parser.check_aug_version())
def test_find_config_root_no_root(self):
# pylint: disable=protected-access
os.remove(self.parser.loc["root"])
self.assertRaises(
errors.NoInstallationError, self.parser._find_config_root)
def test_parse_file(self):
"""Test parse_file.
certbot.conf is chosen as the test file as it will not be
included during the normal course of execution.
"""
file_path = os.path.join(
self.config_path, "not-parsed-by-default", "certbot.conf")
self.parser.parse_file(file_path) # pylint: disable=protected-access
# search for the httpd incl
matches = self.parser.aug.match(
"/augeas/load/Httpd/incl [. ='%s']" % file_path)
self.assertTrue(matches)
def test_find_dir(self):
test = self.parser.find_dir("Listen", "80")
# This will only look in enabled hosts
test2 = self.parser.find_dir("documentroot")
self.assertEqual(len(test), 1)
self.assertEqual(len(test2), 8)
def test_add_dir(self):
aug_default = "/files" + self.parser.loc["default"]
self.parser.add_dir(aug_default, "AddDirective", "test")
self.assertTrue(
self.parser.find_dir("AddDirective", "test", aug_default))
self.parser.add_dir(aug_default, "AddList", ["1", "2", "3", "4"])
matches = self.parser.find_dir("AddList", None, aug_default)
for i, match in enumerate(matches):
self.assertEqual(self.parser.aug.get(match), str(i + 1))
def test_add_dir_beginning(self):
aug_default = "/files" + self.parser.loc["default"]
self.parser.add_dir_beginning(aug_default,
"AddDirectiveBeginning",
"testBegin")
self.assertTrue(
self.parser.find_dir("AddDirectiveBeginning", "testBegin", aug_default))
self.assertEqual(
self.parser.aug.get(aug_default+"/directive[1]"),
"AddDirectiveBeginning")
self.parser.add_dir_beginning(aug_default, "AddList", ["1", "2", "3", "4"])
matches = self.parser.find_dir("AddList", None, aug_default)
for i, match in enumerate(matches):
self.assertEqual(self.parser.aug.get(match), str(i + 1))
for name in ("empty.conf", "no-directives.conf"):
conf = "/files" + os.path.join(self.parser.root, "sites-available", name)
self.parser.add_dir_beginning(conf, "AddDirectiveBeginning", "testBegin")
self.assertTrue(self.parser.find_dir("AddDirectiveBeginning", "testBegin", conf))
def test_empty_arg(self):
self.assertEqual(None,
self.parser.get_arg("/files/whatever/nonexistent"))
def test_add_dir_to_ifmodssl(self):
"""test add_dir_to_ifmodssl.
Path must be valid before attempting to add to augeas
"""
from certbot_apache._internal.parser import get_aug_path
# This makes sure that find_dir will work
self.parser.modules["mod_ssl.c"] = "/fake/path"
self.parser.add_dir_to_ifmodssl(
get_aug_path(self.parser.loc["default"]),
"FakeDirective", ["123"])
matches = self.parser.find_dir("FakeDirective", "123")
self.assertEqual(len(matches), 1)
self.assertTrue("IfModule" in matches[0])
def test_add_dir_to_ifmodssl_multiple(self):
from certbot_apache._internal.parser import get_aug_path
# This makes sure that find_dir will work
self.parser.modules["mod_ssl.c"] = "/fake/path"
self.parser.add_dir_to_ifmodssl(
get_aug_path(self.parser.loc["default"]),
"FakeDirective", ["123", "456", "789"])
matches = self.parser.find_dir("FakeDirective")
self.assertEqual(len(matches), 3)
self.assertTrue("IfModule" in matches[0])
def test_get_aug_path(self):
from certbot_apache._internal.parser import get_aug_path
self.assertEqual("/files/etc/apache", get_aug_path("/etc/apache"))
def test_set_locations(self):
with mock.patch("certbot_apache._internal.parser.os.path") as mock_path:
mock_path.isfile.side_effect = [False, False]
# pylint: disable=protected-access
results = self.parser._set_locations()
self.assertEqual(results["default"], results["listen"])
self.assertEqual(results["default"], results["name"])
@mock.patch("certbot_apache._internal.parser.ApacheParser.find_dir")
@mock.patch("certbot_apache._internal.parser.ApacheParser.get_arg")
def test_parse_modules_bad_syntax(self, mock_arg, mock_find):
mock_find.return_value = ["1", "2", "3", "4", "5", "6", "7", "8"]
mock_arg.return_value = None
with mock.patch("certbot_apache._internal.parser.logger") as mock_logger:
self.parser.parse_modules()
# Make sure that we got None return value and logged the file
self.assertTrue(mock_logger.debug.called)
@mock.patch("certbot_apache._internal.parser.ApacheParser.find_dir")
@mock.patch("certbot_apache._internal.apache_util._get_runtime_cfg")
def test_update_runtime_variables(self, mock_cfg, _):
define_val = (
'ServerRoot: "/etc/apache2"\n'
'Main DocumentRoot: "/var/www"\n'
'Main ErrorLog: "/var/log/apache2/error.log"\n'
'Mutex ssl-stapling: using_defaults\n'
'Mutex ssl-cache: using_defaults\n'
'Mutex default: dir="/var/lock/apache2" mechanism=fcntl\n'
'Mutex watchdog-callback: using_defaults\n'
'PidFile: "/var/run/apache2/apache2.pid"\n'
'Define: TEST\n'
'Define: DUMP_RUN_CFG\n'
'Define: U_MICH\n'
'Define: TLS=443\n'
'Define: WITH_ASSIGNMENT=URL=http://example.com\n'
'Define: EMPTY=\n'
'Define: example_path=Documents/path\n'
'User: name="www-data" id=33 not_used\n'
'Group: name="www-data" id=33 not_used\n'
)
inc_val = (
'Included configuration files:\n'
' (*) /etc/apache2/apache2.conf\n'
' (146) /etc/apache2/mods-enabled/access_compat.load\n'
' (146) /etc/apache2/mods-enabled/alias.load\n'
' (146) /etc/apache2/mods-enabled/auth_basic.load\n'
' (146) /etc/apache2/mods-enabled/authn_core.load\n'
' (146) /etc/apache2/mods-enabled/authn_file.load\n'
' (146) /etc/apache2/mods-enabled/authz_core.load\n'
' (146) /etc/apache2/mods-enabled/authz_host.load\n'
' (146) /etc/apache2/mods-enabled/authz_user.load\n'
' (146) /etc/apache2/mods-enabled/autoindex.load\n'
' (146) /etc/apache2/mods-enabled/deflate.load\n'
' (146) /etc/apache2/mods-enabled/dir.load\n'
' (146) /etc/apache2/mods-enabled/env.load\n'
' (146) /etc/apache2/mods-enabled/filter.load\n'
' (146) /etc/apache2/mods-enabled/mime.load\n'
' (146) /etc/apache2/mods-enabled/mpm_event.load\n'
' (146) /etc/apache2/mods-enabled/negotiation.load\n'
' (146) /etc/apache2/mods-enabled/reqtimeout.load\n'
' (146) /etc/apache2/mods-enabled/setenvif.load\n'
' (146) /etc/apache2/mods-enabled/socache_shmcb.load\n'
' (146) /etc/apache2/mods-enabled/ssl.load\n'
' (146) /etc/apache2/mods-enabled/status.load\n'
' (147) /etc/apache2/mods-enabled/alias.conf\n'
' (147) /etc/apache2/mods-enabled/autoindex.conf\n'
' (147) /etc/apache2/mods-enabled/deflate.conf\n'
)
mod_val = (
'Loaded Modules:\n'
' core_module (static)\n'
' so_module (static)\n'
' watchdog_module (static)\n'
' http_module (static)\n'
' log_config_module (static)\n'
' logio_module (static)\n'
' version_module (static)\n'
' unixd_module (static)\n'
' access_compat_module (shared)\n'
' alias_module (shared)\n'
' auth_basic_module (shared)\n'
' authn_core_module (shared)\n'
' authn_file_module (shared)\n'
' authz_core_module (shared)\n'
' authz_host_module (shared)\n'
' authz_user_module (shared)\n'
' autoindex_module (shared)\n'
' deflate_module (shared)\n'
' dir_module (shared)\n'
' env_module (shared)\n'
' filter_module (shared)\n'
' mime_module (shared)\n'
' mpm_event_module (shared)\n'
' negotiation_module (shared)\n'
' reqtimeout_module (shared)\n'
' setenvif_module (shared)\n'
' socache_shmcb_module (shared)\n'
' ssl_module (shared)\n'
' status_module (shared)\n'
)
def mock_get_vars(cmd):
"""Mock command output"""
if cmd[-1] == "DUMP_RUN_CFG":
return define_val
elif cmd[-1] == "DUMP_INCLUDES":
return inc_val
elif cmd[-1] == "DUMP_MODULES":
return mod_val
return None # pragma: no cover
mock_cfg.side_effect = mock_get_vars
expected_vars = {"TEST": "", "U_MICH": "", "TLS": "443",
"example_path": "Documents/path",
"WITH_ASSIGNMENT": "URL=http://example.com",
"EMPTY": "",
}
self.parser.modules = {}
with mock.patch(
"certbot_apache._internal.parser.ApacheParser.parse_file") as mock_parse:
self.parser.update_runtime_variables()
self.assertEqual(self.parser.variables, expected_vars)
self.assertEqual(len(self.parser.modules), 58)
# None of the includes in inc_val should be in parsed paths.
# Make sure we tried to include them all.
self.assertEqual(mock_parse.call_count, 25)
@mock.patch("certbot_apache._internal.parser.ApacheParser.find_dir")
@mock.patch("certbot_apache._internal.apache_util._get_runtime_cfg")
def test_update_runtime_variables_alt_values(self, mock_cfg, _):
inc_val = (
'Included configuration files:\n'
' (*) {0}\n'
' (146) /etc/apache2/mods-enabled/access_compat.load\n'
' (146) {1}/mods-enabled/alias.load\n'
).format(self.parser.loc["root"],
os.path.dirname(self.parser.loc["root"]))
mock_cfg.return_value = inc_val
self.parser.modules = {}
with mock.patch(
"certbot_apache._internal.parser.ApacheParser.parse_file") as mock_parse:
self.parser.update_runtime_variables()
# No matching modules should have been found
self.assertEqual(len(self.parser.modules), 0)
# Only one of the three includes do not exist in already parsed
# path derived from root configuration Include statements
self.assertEqual(mock_parse.call_count, 1)
@mock.patch("certbot_apache._internal.apache_util.subprocess.run")
def test_update_runtime_vars_bad_ctl(self, mock_run):
mock_run.side_effect = OSError
self.assertRaises(
errors.MisconfigurationError,
self.parser.update_runtime_variables)
@mock.patch("certbot_apache._internal.apache_util.subprocess.run")
def test_update_runtime_vars_bad_exit(self, mock_run):
mock_proc = mock_run.return_value
mock_proc.stdout = ""
mock_proc.stderr = ""
mock_proc.returncode = -1
self.assertRaises(
errors.MisconfigurationError,
self.parser.update_runtime_variables)
def test_add_comment(self):
from certbot_apache._internal.parser import get_aug_path
self.parser.add_comment(get_aug_path(self.parser.loc["name"]), "123456")
comm = self.parser.find_comments("123456")
self.assertEqual(len(comm), 1)
self.assertTrue(self.parser.loc["name"] in comm[0])
class ParserInitTest(util.ApacheTest):
def setUp(self): # pylint: disable=arguments-differ
super().setUp()
def tearDown(self):
shutil.rmtree(self.temp_dir)
shutil.rmtree(self.config_dir)
shutil.rmtree(self.work_dir)
@mock.patch("certbot_apache._internal.parser.init_augeas")
def test_prepare_no_augeas(self, mock_init_augeas):
from certbot_apache._internal.parser import ApacheParser
mock_init_augeas.side_effect = errors.NoInstallationError
self.config.config_test = mock.Mock()
self.assertRaises(
errors.NoInstallationError, ApacheParser,
os.path.relpath(self.config_path), "/dummy/vhostpath",
version=(2, 4, 22), configurator=self.config)
def test_init_old_aug(self):
from certbot_apache._internal.parser import ApacheParser
with mock.patch("certbot_apache._internal.parser.ApacheParser.check_aug_version") as mock_c:
mock_c.return_value = False
self.assertRaises(
errors.NotSupportedError,
ApacheParser, os.path.relpath(self.config_path),
"/dummy/vhostpath", version=(2, 4, 22), configurator=self.config)
@mock.patch("certbot_apache._internal.apache_util._get_runtime_cfg")
def test_unparseable(self, mock_cfg):
from certbot_apache._internal.parser import ApacheParser
mock_cfg.return_value = ('Define: TEST')
self.assertRaises(
errors.PluginError,
ApacheParser, os.path.relpath(self.config_path),
"/dummy/vhostpath", version=(2, 2, 22), configurator=self.config)
def test_root_normalized(self):
from certbot_apache._internal.parser import ApacheParser
with mock.patch("certbot_apache._internal.parser.ApacheParser."
"update_runtime_variables"):
path = os.path.join(
self.temp_dir,
"debian_apache_2_4/////multiple_vhosts/../multiple_vhosts/apache2")
parser = ApacheParser(path, "/dummy/vhostpath", configurator=self.config)
self.assertEqual(parser.root, self.config_path)
def test_root_absolute(self):
from certbot_apache._internal.parser import ApacheParser
with mock.patch("certbot_apache._internal.parser.ApacheParser."
"update_runtime_variables"):
parser = ApacheParser(
os.path.relpath(self.config_path),
"/dummy/vhostpath", configurator=self.config)
self.assertEqual(parser.root, self.config_path)
def test_root_no_trailing_slash(self):
from certbot_apache._internal.parser import ApacheParser
with mock.patch("certbot_apache._internal.parser.ApacheParser."
"update_runtime_variables"):
parser = ApacheParser(
self.config_path + os.path.sep,
"/dummy/vhostpath", configurator=self.config)
self.assertEqual(parser.root, self.config_path)
if __name__ == "__main__":
unittest.main() # pragma: no cover
| |
#!/usr/bin/env python2
# encoding: utf-8
# The MIT License (MIT)
#
# Copyright (c) 2015 Shane O'Connor
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
map_pdb_residues.py
Python functions to map PDB residue IDs to Rosetta/pose IDs by using the features database.
Warning: The inputs to the commands.getstatusoutput (one of these is an executable) are unsanitized. Only use these functions if you trust the caller.
Sample command line:
python map_pdb_residues.py -d ~/rosetta/main/database -e ~/rosetta/main/source/bin/rosetta_scripts.static.linuxgccrelease -f 1QG8.pdb -c A
Created by Shane O'Connor 2013
"""
import sys
import os
import tempfile
import subprocess
import traceback
from optparse import OptionParser # todo: deprecated since Python 2.7
from klab.fs.fsio import write_temp_file
script = '''<ROSETTASCRIPTS>
<MOVERS>
<SavePoseMover name="init_struct" reference_name="init_struct"/>
<ReportToDB name="features_reporter" database_name="%s">
<ResidueFeatures/>
<PdbDataFeatures/>
</ReportToDB>
</MOVERS>
<PROTOCOLS>
<Add mover_name="init_struct"/>
<Add mover_name="features_reporter"/>
</PROTOCOLS>
</ROSETTASCRIPTS>'''
def get_pdb_contents_to_pose_residue_map(pdb_file_contents, rosetta_scripts_path, rosetta_database_path = None, pdb_id = None, extra_flags = ''):
'''Takes a string containing a PDB file, the RosettaScripts executable, and the Rosetta database and then uses the features database to map PDB residue IDs to pose residue IDs.
On success, (True, the residue mapping) is returned. On failure, (False, a list of errors) is returned.
Note: extra_flags should typically include '-ignore_zero_occupancy false' and '-ignore_unrecognized_res'.'''
filename = write_temp_file("/tmp", pdb_file_contents)
success, mapping = get_pdb_to_pose_residue_map(filename, rosetta_scripts_path, rosetta_database_path = rosetta_database_path, pdb_id = pdb_id, extra_flags = extra_flags)
os.remove(filename)
return success, mapping
def get_pdb_to_pose_residue_map(pdb_path, rosetta_scripts_path, rosetta_database_path = None, pdb_id = None, extra_flags = ''):
'''Takes a path to a PDB file, the RosettaScripts executable, and the Rosetta database and then uses the features database to map PDB residue IDs to pose residue IDs.
On success, (True, the residue mapping) is returned. On failure, (False, a list of errors) is returned.
The mapping maps residue IDs to a dict with the three letter residue code and the Rosetta pose id e.g.
mapping = {
u'B 435 ': {'name3': u'GLN', 'pose_residue_id': 370, 'res_type': u'GLN'},
...
}
Note: extra_flags should typically include '-ignore_zero_occupancy false' and '-ignore_unrecognized_res'.'''
errors = []
exit_code = 0
F, script_path = tempfile.mkstemp(dir=".")
script_handle = os.fdopen(F, "w")
try:
db_path = script_path + ".db3"
script_handle.write(script % db_path)
script_handle.close()
if rosetta_database_path:
command_line = '%s -database %s -constant_seed -in:file:s %s -parser:protocol %s -overwrite -out:nooutput %s' % (rosetta_scripts_path, rosetta_database_path, pdb_path, script_path, extra_flags)
else:
command_line = '%s -constant_seed -in:file:s %s -parser:protocol %s -overwrite -out:nooutput %s' % (rosetta_scripts_path, pdb_path, script_path, extra_flags)
exit_code, stdout = subprocess.getstatusoutput(command_line)
if exit_code != 0:
errors.append("An error occured during execution. The exit code was %d. The output was:\n\n%s" % (exit_code, stdout))
else:
try:
mapping = get_mapping_from_db3_file( db_path )
except Exception as e:
errors.append(str(e))
errors.append(traceback.format_exc())
errors.append("The features database does not seem to have been correctly created. Check to see if the command '%s' is correct." % command_line)
except Exception as e:
errors.append(str(e))
errors.append(traceback.format_exc())
exit_code = 1
if errors and ((extra_flags.find('-ignore_zero_occupancy false') == -1) or (extra_flags.find('-ignore_unrecognized_res') == -1)):
errors.append("Note: extra_flags should typically include both '-ignore_zero_occupancy false' and '-ignore_unrecognized_res'.")
if os.path.exists(script_path):
os.remove(script_path)
if os.path.exists(db_path):
os.remove(db_path)
if exit_code or errors:
return False, errors
return True, mapping
def get_mapping_from_db3_file( db_path ):
'''
Does the work of reading the Rosetta SQLite3 .db3 file to retrieve the mapping
'''
import sqlite3 # should be moved to the top but we do this here for CentOS 5 support
conn = sqlite3.connect(db_path)
results = conn.cursor().execute('''
SELECT chain_id, pdb_residue_number, insertion_code, residues.struct_id, residues.resNum, residues.name3, residues.res_type
FROM residue_pdb_identification
INNER JOIN residues ON residue_pdb_identification.struct_id=residues.struct_id AND residue_pdb_identification.residue_number=residues.resNum
''')
# Create the mapping from PDB residues to Rosetta residues
rosetta_residue_ids = []
mapping = {}
for r in results:
mapping["%s%s%s" % (r[0], str(r[1]).rjust(4), r[2])] = {'pose_residue_id' : r[4], 'name3' : r[5], 'res_type' : r[6]}
rosetta_residue_ids.append(r[4])
# Ensure that the the range of the map is exactly the set of Rosetta residues i.e. the map from (a subset of) the PDB residues to the Rosetta residues is surjective
raw_residue_list = [r for r in conn.cursor().execute('''SELECT resNum, name3 FROM residues ORDER BY resNum''')]
assert(sorted([r[0] for r in raw_residue_list]) == sorted(rosetta_residue_ids))
return mapping
def strip_pdb(pdb_path, chains = [], strip_hetatms = False):
'''Takes a PDB file and strips all lines except ATOM and HETATM records. If chains is specified, only those chains are kept. If strip_hetatms is True then HETATM lines are also stripped.
Returns (True, a path to the stripped PDB file) on success and (False, a list of errors) on failure.'''
chains = set(chains)
contents = open(pdb_path).read().split("\n") # file handle should get garbage collected
if strip_hetatms:
if chains:
atom_lines = [l for l in contents if l.startswith("ATOM ") and l[21] in chains]
else:
atom_lines = [l for l in contents if l.startswith("ATOM ")]
else:
if chains:
atom_lines = [l for l in contents if (l.startswith("ATOM ") or l.startswith("HETATM")) and l[21] in chains]
else:
atom_lines = [l for l in contents if (l.startswith("ATOM ") or l.startswith("HETATM"))]
existing_chains = set([l[21] for l in atom_lines])
if chains.difference(existing_chains):
return False, ["Error: The following chains do not exist in the PDB file - %s" % ", ".join(list(chains.difference(existing_chains)))]
F, temp_pdb_path = tempfile.mkstemp(dir=".")
temp_pdb_handle = os.fdopen(F, "w")
temp_pdb_handle.write("\n".join(atom_lines))
temp_pdb_handle.close()
return True, temp_pdb_path
def get_stripped_pdb_to_pose_residue_map(input_pdb_path, rosetta_scripts_path, rosetta_database_path, chains = [], strip_hetatms = False):
'''Takes a path to an input PDB file, the path to the RosettaScripts executable and Rosetta database, an optional list of chains to strip the PDB down to, and an optional flag specifying whether HETATM lines should be stripped from the PDB.
On success, a pair (True, mapping between PDB and pose residues) is returned. On failure, a pair (False, a list of errors) is returned.'''
success, result = strip_pdb(input_pdb_path, chains = chains, strip_hetatms = strip_hetatms)
if success:
assert(os.path.exists(result))
success, mapping = get_pdb_to_pose_residue_map(result, rosetta_scripts_path, rosetta_database_path)
os.remove(result)
if success:
return True, mapping
else:
return False, mapping
return False, result
if __name__ == '__main__':
chains = []
parser = OptionParser()
parser.add_option("-e", "--executable", dest="rosetta_scripts_path", help="The location of the RosettaScripts executable e.g. ~/bin/rosetta_scripts.linuxgccrelease", metavar="EXECUTABLE")
parser.add_option("-d", "--database", dest="rosetta_database_path", help="The location of the Rosetta database", metavar="DATABASE")
parser.add_option("-f", "--file", dest="filename", help="The input PDB", metavar="FILE")
parser.add_option("-c", "--chains", dest="chains", default=[], help="A comma-separated list of chains to keep (all other chains will be discarded). The default behavior is to keep all chains.")
parser.add_option("-s", "--strip_hetatms", dest="strip_hetatms", action="store_true", default=False, help="Use this option to strip HETATM lines from the input PDB file. The default behavior is to keep HETATM lines.")
(options, args) = parser.parse_args()
parser.set_usage(None)
filename = options.filename
rosetta_database_path = options.rosetta_database_path
rosetta_scripts_path = options.rosetta_scripts_path
chains = options.chains
strip_hetatms = options.strip_hetatms
if not filename:
print("\nError: A filename must be specified.\n")
parser.print_help()
sys.exit(1)
elif not(os.path.exists(filename)):
print(("\nError: File '%s' does not exist.\n" % filename))
sys.exit(1)
if not rosetta_database_path:
print("\nError: The path to the Rosetta database corresponding with the RosettaScripts executable must be specified.\n")
parser.print_help()
sys.exit(1)
elif not(os.path.exists(rosetta_database_path)):
print(("\nError: The path '%s' does not exist.\n" % rosetta_database_path))
sys.exit(1)
if not rosetta_scripts_path:
print("\nError: The path to the RosettaScripts executable must be specified.\n")
parser.print_help()
sys.exit(1)
elif not(os.path.exists(rosetta_scripts_path)):
if os.path.exists(os.path.join(os.getcwd(), rosetta_scripts_path)):
rosetta_scripts_path = "./%s" % os.path.join(os.getcwd(), rosetta_scripts_path)
if not os.path.exists(rosetta_scripts_path):
print(("\nError: The path '%s' does not exist.\n" % rosetta_scripts_path))
sys.exit(1)
rosetta_scripts_path = os.path.abspath(rosetta_scripts_path)
if chains:
chains = chains.split(",")
for c in chains:
if not len(c) == 1:
print(("\nError: Chain ID '%s' is invalid. PDB chain identifiers are one character in length.\n" % c))
sys.exit(1)
success, result = get_stripped_pdb_to_pose_residue_map(filename, rosetta_scripts_path, rosetta_database_path, chains = chains, strip_hetatms = strip_hetatms)
if success:
print("{")
for k, v in sorted(result.items()):
print(("'%s': %s," % (k, v)))
print("}")
else:
print(("\n".join(result)))
sys.exit(1)
| |
import collections
import base64
import binascii
import hashlib
import hmac
import json
from datetime import (
date,
datetime,
timedelta,
)
import re
import string
import time
import warnings
from webob.compat import (
PY3,
text_type,
bytes_,
text_,
native_,
string_types,
)
from webob.util import strings_differ
__all__ = ['Cookie', 'CookieProfile', 'SignedCookieProfile', 'SignedSerializer',
'JSONSerializer', 'Base64Serializer', 'make_cookie']
_marker = object()
class RequestCookies(collections.MutableMapping):
_cache_key = 'webob._parsed_cookies'
def __init__(self, environ):
self._environ = environ
@property
def _cache(self):
env = self._environ
header = env.get('HTTP_COOKIE', '')
cache, cache_header = env.get(self._cache_key, ({}, None))
if cache_header == header:
return cache
d = lambda b: b.decode('utf8')
cache = dict((d(k), d(v)) for k,v in parse_cookie(header))
env[self._cache_key] = (cache, header)
return cache
def _mutate_header(self, name, value):
header = self._environ.get('HTTP_COOKIE')
had_header = header is not None
header = header or ''
if PY3: # pragma: no cover
header = header.encode('latin-1')
bytes_name = bytes_(name, 'ascii')
if value is None:
replacement = None
else:
bytes_val = _value_quote(bytes_(value, 'utf-8'))
replacement = bytes_name + b'=' + bytes_val
matches = _rx_cookie.finditer(header)
found = False
for match in matches:
start, end = match.span()
match_name = match.group(1)
if match_name == bytes_name:
found = True
if replacement is None: # remove value
header = header[:start].rstrip(b' ;') + header[end:]
else: # replace value
header = header[:start] + replacement + header[end:]
break
else:
if replacement is not None:
if header:
header += b'; ' + replacement
else:
header = replacement
if header:
self._environ['HTTP_COOKIE'] = native_(header, 'latin-1')
elif had_header:
self._environ['HTTP_COOKIE'] = ''
return found
def _valid_cookie_name(self, name):
if not isinstance(name, string_types):
raise TypeError(name, 'cookie name must be a string')
if not isinstance(name, text_type):
name = text_(name, 'utf-8')
try:
bytes_cookie_name = bytes_(name, 'ascii')
except UnicodeEncodeError:
raise TypeError('cookie name must be encodable to ascii')
if not _valid_cookie_name(bytes_cookie_name):
raise TypeError('cookie name must be valid according to RFC 6265')
return name
def __setitem__(self, name, value):
name = self._valid_cookie_name(name)
if not isinstance(value, string_types):
raise ValueError(value, 'cookie value must be a string')
if not isinstance(value, text_type):
try:
value = text_(value, 'utf-8')
except UnicodeDecodeError:
raise ValueError(
value, 'cookie value must be utf-8 binary or unicode')
self._mutate_header(name, value)
def __getitem__(self, name):
return self._cache[name]
def get(self, name, default=None):
return self._cache.get(name, default)
def __delitem__(self, name):
name = self._valid_cookie_name(name)
found = self._mutate_header(name, None)
if not found:
raise KeyError(name)
def keys(self):
return self._cache.keys()
def values(self):
return self._cache.values()
def items(self):
return self._cache.items()
if not PY3:
def iterkeys(self):
return self._cache.iterkeys()
def itervalues(self):
return self._cache.itervalues()
def iteritems(self):
return self._cache.iteritems()
def __contains__(self, name):
return name in self._cache
def __iter__(self):
return self._cache.__iter__()
def __len__(self):
return len(self._cache)
def clear(self):
self._environ['HTTP_COOKIE'] = ''
def __repr__(self):
return '<RequestCookies (dict-like) with values %r>' % (self._cache,)
class Cookie(dict):
def __init__(self, input=None):
if input:
self.load(input)
def load(self, data):
morsel = {}
for key, val in _parse_cookie(data):
if key.lower() in _c_keys:
morsel[key] = val
else:
morsel = self.add(key, val)
def add(self, key, val):
if not isinstance(key, bytes):
key = key.encode('ascii', 'replace')
if not _valid_cookie_name(key):
return {}
r = Morsel(key, val)
dict.__setitem__(self, key, r)
return r
__setitem__ = add
def serialize(self, full=True):
return '; '.join(m.serialize(full) for m in self.values())
def values(self):
return [m for _, m in sorted(self.items())]
__str__ = serialize
def __repr__(self):
return '<%s: [%s]>' % (self.__class__.__name__,
', '.join(map(repr, self.values())))
def _parse_cookie(data):
if PY3: # pragma: no cover
data = data.encode('latin-1')
for key, val in _rx_cookie.findall(data):
yield key, _unquote(val)
def parse_cookie(data):
"""
Parse cookies ignoring anything except names and values
"""
return ((k,v) for k,v in _parse_cookie(data) if _valid_cookie_name(k))
def cookie_property(key, serialize=lambda v: v):
def fset(self, v):
self[key] = serialize(v)
return property(lambda self: self[key], fset)
def serialize_max_age(v):
if isinstance(v, timedelta):
v = str(v.seconds + v.days*24*60*60)
elif isinstance(v, int):
v = str(v)
return bytes_(v)
def serialize_cookie_date(v):
if v is None:
return None
elif isinstance(v, bytes):
return v
elif isinstance(v, text_type):
return v.encode('ascii')
elif isinstance(v, int):
v = timedelta(seconds=v)
if isinstance(v, timedelta):
v = datetime.utcnow() + v
if isinstance(v, (datetime, date)):
v = v.timetuple()
r = time.strftime('%%s, %d-%%s-%Y %H:%M:%S GMT', v)
return bytes_(r % (weekdays[v[6]], months[v[1]]), 'ascii')
class Morsel(dict):
__slots__ = ('name', 'value')
def __init__(self, name, value):
self.name = bytes_(name, encoding='ascii')
self.value = bytes_(value, encoding='ascii')
assert _valid_cookie_name(self.name)
self.update(dict.fromkeys(_c_keys, None))
path = cookie_property(b'path')
domain = cookie_property(b'domain')
comment = cookie_property(b'comment')
expires = cookie_property(b'expires', serialize_cookie_date)
max_age = cookie_property(b'max-age', serialize_max_age)
httponly = cookie_property(b'httponly', bool)
secure = cookie_property(b'secure', bool)
def __setitem__(self, k, v):
k = bytes_(k.lower(), 'ascii')
if k in _c_keys:
dict.__setitem__(self, k, v)
def serialize(self, full=True):
result = []
add = result.append
add(self.name + b'=' + _value_quote(self.value))
if full:
for k in _c_valkeys:
v = self[k]
if v:
info = _c_renames[k]
name = info['name']
quoter = info['quoter']
add(name + b'=' + quoter(v))
expires = self[b'expires']
if expires:
add(b'expires=' + expires)
if self.secure:
add(b'secure')
if self.httponly:
add(b'HttpOnly')
return native_(b'; '.join(result), 'ascii')
__str__ = serialize
def __repr__(self):
return '<%s: %s=%r>' % (self.__class__.__name__,
native_(self.name),
native_(self.value)
)
#
# parsing
#
_re_quoted = r'"(?:\\"|.)*?"' # any doublequoted string
_legal_special_chars = "~!@#$%^&*()_+=-`.?|:/(){}<>'"
_re_legal_char = r"[\w\d%s]" % re.escape(_legal_special_chars)
_re_expires_val = r"\w{3},\s[\w\d-]{9,11}\s[\d:]{8}\sGMT"
_re_cookie_str_key = r"(%s+?)" % _re_legal_char
_re_cookie_str_equal = r"\s*=\s*"
_re_unquoted_val = r"(?:%s|\\(?:[0-3][0-7][0-7]|.))*" % _re_legal_char
_re_cookie_str_val = r"(%s|%s|%s)" % (_re_quoted, _re_expires_val,
_re_unquoted_val)
_re_cookie_str = _re_cookie_str_key + _re_cookie_str_equal + _re_cookie_str_val
_rx_cookie = re.compile(bytes_(_re_cookie_str, 'ascii'))
_rx_unquote = re.compile(bytes_(r'\\([0-3][0-7][0-7]|.)', 'ascii'))
_bchr = (lambda i: bytes([i])) if PY3 else chr
_ch_unquote_map = dict((bytes_('%03o' % i), _bchr(i))
for i in range(256)
)
_ch_unquote_map.update((v, v) for v in list(_ch_unquote_map.values()))
_b_dollar_sign = ord('$') if PY3 else '$'
_b_quote_mark = ord('"') if PY3 else '"'
def _unquote(v):
#assert isinstance(v, bytes)
if v and v[0] == v[-1] == _b_quote_mark:
v = v[1:-1]
return _rx_unquote.sub(_ch_unquote, v)
def _ch_unquote(m):
return _ch_unquote_map[m.group(1)]
#
# serializing
#
# these chars can be in cookie value see
# http://tools.ietf.org/html/rfc6265#section-4.1.1 and
# https://github.com/Pylons/webob/pull/104#issuecomment-28044314
#
# ! (0x21), "#$%&'()*+" (0x25-0x2B), "-./0123456789:" (0x2D-0x3A),
# "<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[" (0x3C-0x5B),
# "]^_`abcdefghijklmnopqrstuvwxyz{|}~" (0x5D-0x7E)
_allowed_special_chars = "!#$%&'()*+-./:<=>?@[]^_`{|}~"
_allowed_cookie_chars = (string.ascii_letters + string.digits +
_allowed_special_chars)
_allowed_cookie_bytes = bytes_(_allowed_cookie_chars)
# these are the characters accepted in cookie *names*
# From http://tools.ietf.org/html/rfc2616#section-2.2:
# token = 1*<any CHAR except CTLs or separators>
# separators = "(" | ")" | "<" | ">" | "@"
# | "," | ";" | ":" | "\" | <">
# | "/" | "[" | "]" | "?" | "="
# | "{" | "}" | SP | HT
#
# CTL = <any US-ASCII control character
# (octets 0 - 31) and DEL (127)>
#
_valid_token_chars = string.ascii_letters + string.digits + "!#$%&'*+-.^_`|~"
_valid_token_bytes = bytes_(_valid_token_chars)
# this is a map used to escape the values
_escape_noop_chars = _allowed_cookie_chars + ' '
_escape_map = dict((chr(i), '\\%03o' % i) for i in range(256))
_escape_map.update(zip(_escape_noop_chars, _escape_noop_chars))
if PY3: # pragma: no cover
# convert to {int -> bytes}
_escape_map = dict(
(ord(k), bytes_(v, 'ascii')) for k, v in _escape_map.items()
)
_escape_char = _escape_map.__getitem__
weekdays = ('Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun')
months = (None, 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep',
'Oct', 'Nov', 'Dec')
# This is temporary, until we can remove this from _value_quote
_should_raise = None
def __warn_or_raise(text, warn_class, to_raise, raise_reason):
if _should_raise:
raise to_raise(raise_reason)
else:
warnings.warn(text, warn_class, stacklevel=2)
def _value_quote(v):
# This looks scary, but is simple. We remove all valid characters from the
# string, if we end up with leftovers (string is longer than 0, we have
# invalid characters in our value)
leftovers = v.translate(None, _allowed_cookie_bytes)
if leftovers:
__warn_or_raise(
"Cookie value contains invalid bytes: (%s). Future versions "
"will raise ValueError upon encountering invalid bytes." %
(leftovers,),
RuntimeWarning, ValueError, 'Invalid characters in cookie value'
)
#raise ValueError('Invalid characters in cookie value')
return b'"' + b''.join(map(_escape_char, v)) + b'"'
return v
def _valid_cookie_name(key):
return isinstance(key, bytes) and not (
key.translate(None, _valid_token_bytes)
# Not explicitly required by RFC6265, may consider removing later:
or key[0] == _b_dollar_sign
or key.lower() in _c_keys
)
def _path_quote(v):
return b''.join(map(_escape_char, v))
_domain_quote = _path_quote
_max_age_quote = _path_quote
_c_renames = {
b"path" : {'name':b"Path", 'quoter':_path_quote},
b"comment" : {'name':b"Comment", 'quoter':_value_quote},
b"domain" : {'name':b"Domain", 'quoter':_domain_quote},
b"max-age" : {'name':b"Max-Age", 'quoter':_max_age_quote},
}
_c_valkeys = sorted(_c_renames)
_c_keys = set(_c_renames)
_c_keys.update([b'expires', b'secure', b'httponly'])
def make_cookie(name, value, max_age=None, path='/', domain=None,
secure=False, httponly=False, comment=None):
""" Generate a cookie value. If ``value`` is None, generate a cookie value
with an expiration date in the past"""
# We are deleting the cookie, override max_age and expires
if value is None:
value = b''
# Note that the max-age value of zero is technically contraspec;
# RFC6265 says that max-age cannot be zero. However, all browsers
# appear to support this to mean "delete immediately".
# http://www.timwilson.id.au/news-three-critical-problems-with-rfc6265.html
max_age = 0
expires = 'Wed, 31-Dec-97 23:59:59 GMT'
# Convert max_age to seconds
elif isinstance(max_age, timedelta):
max_age = (max_age.days * 60 * 60 * 24) + max_age.seconds
expires = max_age
else:
expires = max_age
morsel = Morsel(name, value)
if domain is not None:
morsel.domain = bytes_(domain)
if path is not None:
morsel.path = bytes_(path)
if httponly:
morsel.httponly = True
if secure:
morsel.secure = True
if max_age is not None:
morsel.max_age = max_age
if expires is not None:
morsel.expires = expires
if comment is not None:
morsel.comment = bytes_(comment)
return morsel.serialize()
class JSONSerializer(object):
""" A serializer which uses `json.dumps`` and ``json.loads``"""
def dumps(self, appstruct):
return bytes_(json.dumps(appstruct), encoding='utf-8')
def loads(self, bstruct):
# NB: json.loads raises ValueError if no json object can be decoded
# so we don't have to do it explicitly here.
return json.loads(text_(bstruct, encoding='utf-8'))
class Base64Serializer(object):
""" A serializer which uses base64 to encode/decode data"""
def __init__(self, serializer=None):
if serializer is None:
serializer = JSONSerializer()
self.serializer = serializer
def dumps(self, appstruct):
"""
Given an ``appstruct``, serialize and sign the data.
Returns a bytestring.
"""
cstruct = self.serializer.dumps(appstruct) # will be bytes
return base64.urlsafe_b64encode(cstruct)
def loads(self, bstruct):
"""
Given a ``bstruct`` (a bytestring), verify the signature and then
deserialize and return the deserialized value.
A ``ValueError`` will be raised if the signature fails to validate.
"""
try:
cstruct = base64.urlsafe_b64decode(bytes_(bstruct))
except (binascii.Error, TypeError) as e:
raise ValueError('Badly formed base64 data: %s' % e)
return self.serializer.loads(cstruct)
class SignedSerializer(object):
"""
A helper to cryptographically sign arbitrary content using HMAC.
The serializer accepts arbitrary functions for performing the actual
serialization and deserialization.
``secret``
A string which is used to sign the cookie. The secret should be at
least as long as the block size of the selected hash algorithm. For
``sha512`` this would mean a 128 bit (64 character) secret.
``salt``
A namespace to avoid collisions between different uses of a shared
secret.
``hashalg``
The HMAC digest algorithm to use for signing. The algorithm must be
supported by the :mod:`hashlib` library. Default: ``'sha512'``.
``serializer``
An object with two methods: `loads`` and ``dumps``. The ``loads`` method
should accept bytes and return a Python object. The ``dumps`` method
should accept a Python object and return bytes. A ``ValueError`` should
be raised for malformed inputs. Default: ``None`, which will use a
derivation of :func:`json.dumps` and ``json.loads``.
"""
def __init__(self,
secret,
salt,
hashalg='sha512',
serializer=None,
):
self.salt = salt
self.secret = secret
self.hashalg = hashalg
try:
# bwcompat with webob <= 1.3.1, leave latin-1 as the default
self.salted_secret = bytes_(salt or '') + bytes_(secret)
except UnicodeEncodeError:
self.salted_secret = (
bytes_(salt or '', 'utf-8') + bytes_(secret, 'utf-8'))
self.digestmod = lambda string=b'': hashlib.new(self.hashalg, string)
self.digest_size = self.digestmod().digest_size
if serializer is None:
serializer = JSONSerializer()
self.serializer = serializer
def dumps(self, appstruct):
"""
Given an ``appstruct``, serialize and sign the data.
Returns a bytestring.
"""
cstruct = self.serializer.dumps(appstruct) # will be bytes
sig = hmac.new(self.salted_secret, cstruct, self.digestmod).digest()
return base64.urlsafe_b64encode(sig + cstruct).rstrip(b'=')
def loads(self, bstruct):
"""
Given a ``bstruct`` (a bytestring), verify the signature and then
deserialize and return the deserialized value.
A ``ValueError`` will be raised if the signature fails to validate.
"""
try:
b64padding = b'=' * (-len(bstruct) % 4)
fstruct = base64.urlsafe_b64decode(bytes_(bstruct) + b64padding)
except (binascii.Error, TypeError) as e:
raise ValueError('Badly formed base64 data: %s' % e)
cstruct = fstruct[self.digest_size:]
expected_sig = fstruct[:self.digest_size]
sig = hmac.new(
self.salted_secret, bytes_(cstruct), self.digestmod).digest()
if strings_differ(sig, expected_sig):
raise ValueError('Invalid signature')
return self.serializer.loads(cstruct)
_default = object()
class CookieProfile(object):
"""
A helper class that helps bring some sanity to the insanity that is cookie
handling.
The helper is capable of generating multiple cookies if necessary to
support subdomains and parent domains.
``cookie_name``
The name of the cookie used for sessioning. Default: ``'session'``.
``max_age``
The maximum age of the cookie used for sessioning (in seconds).
Default: ``None`` (browser scope).
``secure``
The 'secure' flag of the session cookie. Default: ``False``.
``httponly``
Hide the cookie from Javascript by setting the 'HttpOnly' flag of the
session cookie. Default: ``False``.
``path``
The path used for the session cookie. Default: ``'/'``.
``domains``
The domain(s) used for the session cookie. Default: ``None`` (no domain).
Can be passed an iterable containing multiple domains, this will set
multiple cookies one for each domain.
``serializer``
An object with two methods: ``loads`` and ``dumps``. The ``loads`` method
should accept a bytestring and return a Python object. The ``dumps``
method should accept a Python object and return bytes. A ``ValueError``
should be raised for malformed inputs. Default: ``None``, which will use
a derivation of :func:`json.dumps` and :func:`json.loads`.
"""
def __init__(self,
cookie_name,
secure=False,
max_age=None,
httponly=None,
path='/',
domains=None,
serializer=None
):
self.cookie_name = cookie_name
self.secure = secure
self.max_age = max_age
self.httponly = httponly
self.path = path
self.domains = domains
if serializer is None:
serializer = Base64Serializer()
self.serializer = serializer
self.request = None
def __call__(self, request):
""" Bind a request to a copy of this instance and return it"""
return self.bind(request)
def bind(self, request):
""" Bind a request to a copy of this instance and return it"""
selfish = CookieProfile(
self.cookie_name,
self.secure,
self.max_age,
self.httponly,
self.path,
self.domains,
self.serializer,
)
selfish.request = request
return selfish
def get_value(self):
""" Looks for a cookie by name in the currently bound request, and
returns its value. If the cookie profile is not bound to a request,
this method will raise a :exc:`ValueError`.
Looks for the cookie in the cookies jar, and if it can find it it will
attempt to deserialize it. Returns ``None`` if there is no cookie or
if the value in the cookie cannot be successfully deserialized.
"""
if not self.request:
raise ValueError('No request bound to cookie profile')
cookie = self.request.cookies.get(self.cookie_name)
if cookie is not None:
try:
return self.serializer.loads(bytes_(cookie))
except ValueError:
return None
def set_cookies(self, response, value, domains=_default, max_age=_default,
path=_default, secure=_default, httponly=_default):
""" Set the cookies on a response."""
cookies = self.get_headers(
value,
domains=domains,
max_age=max_age,
path=path,
secure=secure,
httponly=httponly
)
response.headerlist.extend(cookies)
return response
def get_headers(self, value, domains=_default, max_age=_default,
path=_default, secure=_default, httponly=_default):
""" Retrieve raw headers for setting cookies.
Returns a list of headers that should be set for the cookies to
be correctly tracked.
"""
if value is None:
max_age = 0
bstruct = None
else:
bstruct = self.serializer.dumps(value)
return self._get_cookies(
bstruct,
domains=domains,
max_age=max_age,
path=path,
secure=secure,
httponly=httponly
)
def _get_cookies(self, value, domains, max_age, path, secure, httponly):
"""Internal function
This returns a list of cookies that are valid HTTP Headers.
:environ: The request environment
:value: The value to store in the cookie
:domains: The domains, overrides any set in the CookieProfile
:max_age: The max_age, overrides any set in the CookieProfile
:path: The path, overrides any set in the CookieProfile
:secure: Set this cookie to secure, overrides any set in CookieProfile
:httponly: Set this cookie to HttpOnly, overrides any set in CookieProfile
"""
# If the user doesn't provide values, grab the defaults
if domains is _default:
domains = self.domains
if max_age is _default:
max_age = self.max_age
if path is _default:
path = self.path
if secure is _default:
secure = self.secure
if httponly is _default:
httponly = self.httponly
# Length selected based upon http://browsercookielimits.x64.me
if value is not None and len(value) > 4093:
raise ValueError(
'Cookie value is too long to store (%s bytes)' %
len(value)
)
cookies = []
if not domains:
cookievalue = make_cookie(
self.cookie_name,
value,
path=path,
max_age=max_age,
httponly=httponly,
secure=secure
)
cookies.append(('Set-Cookie', cookievalue))
else:
for domain in domains:
cookievalue = make_cookie(
self.cookie_name,
value,
path=path,
domain=domain,
max_age=max_age,
httponly=httponly,
secure=secure,
)
cookies.append(('Set-Cookie', cookievalue))
return cookies
class SignedCookieProfile(CookieProfile):
"""
A helper for generating cookies that are signed to prevent tampering.
By default this will create a single cookie, given a value it will
serialize it, then use HMAC to cryptographically sign the data. Finally
the result is base64-encoded for transport. This way a remote user can
not tamper with the value without uncovering the secret/salt used.
``secret``
A string which is used to sign the cookie. The secret should be at
least as long as the block size of the selected hash algorithm. For
``sha512`` this would mean a 128 bit (64 character) secret.
``salt``
A namespace to avoid collisions between different uses of a shared
secret.
``hashalg``
The HMAC digest algorithm to use for signing. The algorithm must be
supported by the :mod:`hashlib` library. Default: ``'sha512'``.
``cookie_name``
The name of the cookie used for sessioning. Default: ``'session'``.
``max_age``
The maximum age of the cookie used for sessioning (in seconds).
Default: ``None`` (browser scope).
``secure``
The 'secure' flag of the session cookie. Default: ``False``.
``httponly``
Hide the cookie from Javascript by setting the 'HttpOnly' flag of the
session cookie. Default: ``False``.
``path``
The path used for the session cookie. Default: ``'/'``.
``domains``
The domain(s) used for the session cookie. Default: ``None`` (no domain).
Can be passed an iterable containing multiple domains, this will set
multiple cookies one for each domain.
``serializer``
An object with two methods: `loads`` and ``dumps``. The ``loads`` method
should accept bytes and return a Python object. The ``dumps`` method
should accept a Python object and return bytes. A ``ValueError`` should
be raised for malformed inputs. Default: ``None`, which will use a
derivation of :func:`json.dumps` and ``json.loads``.
"""
def __init__(self,
secret,
salt,
cookie_name,
secure=False,
max_age=None,
httponly=False,
path="/",
domains=None,
hashalg='sha512',
serializer=None,
):
self.secret = secret
self.salt = salt
self.hashalg = hashalg
self.original_serializer = serializer
signed_serializer = SignedSerializer(
secret,
salt,
hashalg,
serializer=self.original_serializer,
)
CookieProfile.__init__(
self,
cookie_name,
secure=secure,
max_age=max_age,
httponly=httponly,
path=path,
domains=domains,
serializer=signed_serializer,
)
def bind(self, request):
""" Bind a request to a copy of this instance and return it"""
selfish = SignedCookieProfile(
self.secret,
self.salt,
self.cookie_name,
self.secure,
self.max_age,
self.httponly,
self.path,
self.domains,
self.hashalg,
self.original_serializer,
)
selfish.request = request
return selfish
| |
# GUI Application automation and testing library
# Copyright (C) 2006-2018 Mark Mc Mahon and Contributors
# https://github.com/pywinauto/pywinauto/graphs/contributors
# http://pywinauto.readthedocs.io/en/latest/credits.html
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of pywinauto nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Wrap various UIA windows controls"""
import locale
import comtypes
import six
from .. import uia_element_info
from .. import findbestmatch
from .. import timings
from .. import uia_defines as uia_defs
from . import uiawrapper
from . import win32_controls
from . import common_controls
from ..uia_element_info import UIAElementInfo
from ..uia_defines import IUIA
from ..uia_defines import NoPatternInterfaceError
from ..uia_defines import toggle_state_on
from ..uia_defines import get_elem_interface
# ====================================================================
class ButtonWrapper(uiawrapper.UIAWrapper):
"""Wrap a UIA-compatible Button, CheckBox or RadioButton control"""
_control_types = ['Button',
'CheckBox',
'RadioButton',
]
# -----------------------------------------------------------
def __init__(self, elem):
"""Initialize the control"""
super(ButtonWrapper, self).__init__(elem)
# -----------------------------------------------------------
def toggle(self):
"""
An interface to Toggle method of the Toggle control pattern.
Control supporting the Toggle pattern cycles through its
toggle states in the following order:
ToggleState_On, ToggleState_Off and,
if supported, ToggleState_Indeterminate
Usually applied for the check box control.
The radio button control does not implement IToggleProvider,
because it is not capable of cycling through its valid states.
Toggle a state of a check box control. (Use 'select' method instead)
Notice, a radio button control isn't supported by UIA.
https://msdn.microsoft.com/en-us/library/windows/desktop/ee671290(v=vs.85).aspx
"""
name = self.element_info.name
control_type = self.element_info.control_type
self.iface_toggle.Toggle()
if name and control_type:
self.actions.log('Toggled ' + control_type.lower() + ' "' + name + '"')
# Return itself so that action can be chained
return self
# -----------------------------------------------------------
def get_toggle_state(self):
"""
Get a toggle state of a check box control.
The toggle state is represented by an integer
0 - unchecked
1 - checked
2 - indeterminate
The following constants are defined in the uia_defines module
toggle_state_off = 0
toggle_state_on = 1
toggle_state_inderteminate = 2
"""
return self.iface_toggle.CurrentToggleState
# -----------------------------------------------------------
def is_dialog(self):
"""Buttons are never dialogs so return False"""
return False
# -----------------------------------------------------------
def click(self):
"""Click the Button control by using Invoke or Select patterns"""
try:
self.invoke()
except NoPatternInterfaceError:
self.select()
# Return itself so that action can be chained
return self
# ====================================================================
class ComboBoxWrapper(uiawrapper.UIAWrapper):
"""Wrap a UIA CoboBox control"""
_control_types = ['ComboBox']
# -----------------------------------------------------------
def __init__(self, elem):
"""Initialize the control"""
super(ComboBoxWrapper, self).__init__(elem)
# -----------------------------------------------------------
def expand(self):
if self.is_expanded():
return self
try:
super(ComboBoxWrapper, self).expand()
except NoPatternInterfaceError:
# workaround for WinForms combo box using Open button
open_buttons = self.children(title='Open', control_type='Button')
if open_buttons:
open_buttons[0].invoke()
else:
try:
self.invoke()
except NoPatternInterfaceError:
raise NoPatternInterfaceError('There is no ExpandCollapsePattern and ' \
'no "Open" button in .children(). Maybe only .click_input() would help to expand.')
return self
# -----------------------------------------------------------
def collapse(self):
if not self.is_expanded():
return self
try:
super(ComboBoxWrapper, self).collapse()
except NoPatternInterfaceError:
# workaround for WinForms combo box using Open button
close_buttons = self.children(title='Close', control_type='Button')
if not close_buttons:
if self.element_info.framework_id == 'WinForm':
return self # simple WinForms combo box is always expanded
else:
raise RuntimeError('There is no ExpandCollapsePattern and no "Close" button for the combo box')
if self.is_editable():
close_buttons[0].click_input()
else:
close_buttons[0].invoke()
return self
# -----------------------------------------------------------
def is_editable(self):
edit_children = self.children(control_type="Edit")
return len(edit_children) > 0
# -----------------------------------------------------------
def get_expand_state(self):
try:
return super(ComboBoxWrapper, self).get_expand_state()
except NoPatternInterfaceError:
# workaround for WinForms combo box
children_list = self.children(control_type="List")
if children_list and children_list[0].is_visible():
if self.element_info.framework_id == 'Qt':
# TODO: find the way to get expand_collapse_state
return uia_defs.expand_state_collapsed
return uia_defs.expand_state_expanded
else:
return uia_defs.expand_state_collapsed
# -----------------------------------------------------------
def texts(self):
"""Return the text of the items in the combobox"""
texts = []
# ComboBox has to be expanded to populate a list of its children items
try:
super(ComboBoxWrapper, self).expand()
for c in self.children():
texts.append(c.window_text())
except NoPatternInterfaceError:
children_lists = self.children(control_type='List')
if children_lists:
# workaround for Qt5 and WinForms
return children_lists[0].children_texts()
elif self.handle:
# workaround using "win32" backend
win32_combo = win32_controls.ComboBoxWrapper(self.handle)
texts.extend(win32_combo.item_texts())
else:
# Make sure we collapse back
super(ComboBoxWrapper, self).collapse()
return texts
# -----------------------------------------------------------
def select(self, item):
"""
Select the ComboBox item
The item can be either a 0 based index of the item to select
or it can be the string that you want to select
"""
# ComboBox has to be expanded to populate a list of its children items
self.expand()
try:
self._select(item)
except (IndexError, NoPatternInterfaceError):
# Try to access the underlying ListBox explicitly
children_lst = self.children(control_type='List')
if len(children_lst) > 0:
children_lst[0]._select(item)
# do health check and apply workaround for Qt5 combo box if necessary
if isinstance(item, six.string_types):
item = children_lst[0].children(title=item)[0]
if self.selected_text() != item:
# workaround for WinForms combo box
item.invoke()
if self.selected_text() != item:
# workaround for Qt5 combo box
item.click_input()
if self.selected_text() != item:
item.click_input()
elif self.selected_index() != item:
items = children_lst[0].children(control_type='ListItem')
if item < len(items):
items[item].invoke()
else:
raise IndexError('Item number #{} is out of range ' \
'({} items in total)'.format(item, len(items)))
else:
raise IndexError("item '{0}' not found or can't be accessed".format(item))
finally:
# Make sure we collapse back in any case
self.collapse()
return self
# -----------------------------------------------------------
# TODO: add selected_texts for a combobox with a multi-select support
def selected_text(self):
"""
Return the selected text or None
Notice, that in case of multi-select it will be only the text from
a first selected item
"""
try:
selection = self.get_selection()
if selection:
return selection[0].name
else:
return None
except NoPatternInterfaceError:
# Try to fall back to Value interface pattern
return self.iface_value.CurrentValue
# -----------------------------------------------------------
# TODO: add selected_indices for a combobox with multi-select support
def selected_index(self):
"""Return the selected index"""
try:
return self.selected_item_index()
except NoPatternInterfaceError:
# workaround for Qt5 and WinForms
return self.texts().index(self.selected_text())
# -----------------------------------------------------------
def item_count(self):
"""
Return the number of items in the combobox
The interface is kept mostly for a backward compatibility with
the native ComboBox interface
"""
children_list = self.children(control_type="List")
if children_list:
return children_list[0].control_count()
else:
self.expand()
try:
children_list = self.children(control_type="List")
if children_list:
return children_list[0].control_count()
else:
return self.control_count()
finally:
self.collapse()
# ====================================================================
class EditWrapper(uiawrapper.UIAWrapper):
"""Wrap an UIA-compatible Edit control"""
# TODO: this class supports only 1-line textboxes so there is no point
# TODO: in methods such as line_count(), line_length(), get_line(), etc
_control_types = ['Edit']
has_title = False
# -----------------------------------------------------------
def __init__(self, elem):
"""Initialize the control"""
super(EditWrapper, self).__init__(elem)
# -----------------------------------------------------------
@property
def writable_props(self):
"""Extend default properties list."""
props = super(EditWrapper, self).writable_props
props.extend(['selection_indices'])
return props
# -----------------------------------------------------------
def line_count(self):
"""Return how many lines there are in the Edit"""
return self.window_text().count("\n") + 1
# -----------------------------------------------------------
def line_length(self, line_index):
"""Return how many characters there are in the line"""
# need to first get a character index of that line
lines = self.window_text().splitlines()
if line_index < len(lines):
return len(lines[line_index])
elif line_index == self.line_count() - 1:
return 0
else:
raise IndexError("There are only {0} lines but given index is {1}".format(self.line_count(), line_index))
# -----------------------------------------------------------
def get_line(self, line_index):
"""Return the line specified"""
lines = self.window_text().splitlines()
if line_index < len(lines):
return lines[line_index]
elif line_index == self.line_count() - 1:
return ""
else:
raise IndexError("There are only {0} lines but given index is {1}".format(self.line_count(), line_index))
# -----------------------------------------------------------
def get_value(self):
"""Return the current value of the element"""
return self.iface_value.CurrentValue
# -----------------------------------------------------------
def texts(self):
"""Get the text of the edit control"""
texts = [ self.get_line(i) for i in range(self.line_count()) ]
return texts
# -----------------------------------------------------------
def text_block(self):
"""Get the text of the edit control"""
return self.window_text()
# -----------------------------------------------------------
def selection_indices(self):
"""The start and end indices of the current selection"""
selected_text = self.iface_text.GetSelection().GetElement(0).GetText(-1)
start = self.window_text().find(selected_text)
end = start + len(selected_text)
return (start, end)
# -----------------------------------------------------------
def set_window_text(self, text, append=False):
"""Override set_window_text for edit controls because it should not be
used for Edit controls.
Edit Controls should either use set_edit_text() or type_keys() to modify
the contents of the edit control.
"""
self.verify_actionable()
if append:
text = self.window_text() + text
self.set_focus()
# Set text using IUIAutomationValuePattern
self.iface_value.SetValue(text)
raise UserWarning("set_window_text() should probably not be called for Edit Controls")
# -----------------------------------------------------------
def set_edit_text(self, text, pos_start=None, pos_end=None):
"""Set the text of the edit control"""
self.verify_actionable()
# allow one or both of pos_start and pos_end to be None
if pos_start is not None or pos_end is not None:
# if only one has been specified - then set the other
# to the current selection start or end
start, end = self.selection_indices()
if pos_start is None:
pos_start = start
if pos_end is None and not isinstance(start, six.string_types):
pos_end = end
else:
pos_start = 0
pos_end = len(self.window_text())
if isinstance(text, six.text_type):
if six.PY3:
aligned_text = text
else:
aligned_text = text.encode(locale.getpreferredencoding())
elif isinstance(text, six.binary_type):
if six.PY3:
aligned_text = text.decode(locale.getpreferredencoding())
else:
aligned_text = text
else:
# convert a non-string input
if six.PY3:
aligned_text = six.text_type(text)
else:
aligned_text = six.binary_type(text)
# Calculate new text value
current_text = self.window_text()
new_text = current_text[:pos_start] + aligned_text + current_text[pos_end:]
# Set text using IUIAutomationValuePattern
self.iface_value.SetValue(new_text)
#win32functions.WaitGuiThreadIdle(self)
#time.sleep(Timings.after_editsetedittext_wait)
if isinstance(aligned_text, six.text_type):
self.actions.log('Set text to the edit box: ' + aligned_text)
else:
self.actions.log(b'Set text to the edit box: ' + aligned_text)
# return this control so that actions can be chained.
return self
# set set_text as an alias to set_edit_text
set_text = set_edit_text
# -----------------------------------------------------------
def select(self, start=0, end=None):
"""Set the edit selection of the edit control"""
self.verify_actionable()
self.set_focus()
# if we have been asked to select a string
if isinstance(start, six.text_type):
string_to_select = start
elif isinstance(start, six.binary_type):
string_to_select = start.decode(locale.getpreferredencoding())
elif isinstance(start, six.integer_types):
if isinstance(end, six.integer_types) and start > end:
start, end = end, start
string_to_select = self.window_text()[start:end]
if string_to_select:
document_range = self.iface_text.DocumentRange
search_range = document_range.FindText(string_to_select, False, False)
try:
search_range.Select()
except ValueError:
raise RuntimeError("Text '{0}' hasn't been found".format(string_to_select))
# return this control so that actions can be chained.
return self
# ====================================================================
class TabControlWrapper(uiawrapper.UIAWrapper):
"""Wrap an UIA-compatible Tab control"""
_control_types = ['Tab']
# -----------------------------------------------------------
def __init__(self, elem):
"""Initialize the control"""
super(TabControlWrapper, self).__init__(elem)
# ----------------------------------------------------------------
def get_selected_tab(self):
"""Return an index of a selected tab"""
return self.selected_item_index()
# ----------------------------------------------------------------
def tab_count(self):
"""Return a number of tabs"""
return self.control_count()
# ----------------------------------------------------------------
def select(self, item):
"""Select a tab by index or by name"""
self._select(item)
return self
# ----------------------------------------------------------------
def texts(self):
"""Tabs texts"""
return self.children_texts()
# ====================================================================
class SliderWrapper(uiawrapper.UIAWrapper):
"""Wrap an UIA-compatible Slider control"""
_control_types = ['Slider']
has_title = False
# -----------------------------------------------------------
def __init__(self, elem):
"""Initialize the control"""
super(SliderWrapper, self).__init__(elem)
# -----------------------------------------------------------
def min_value(self):
"""Get the minimum value of the Slider"""
return self.iface_range_value.CurrentMinimum
# -----------------------------------------------------------
def max_value(self):
"""Get the maximum value of the Slider"""
return self.iface_range_value.CurrentMaximum
# -----------------------------------------------------------
def small_change(self):
"""
Get a small change of slider's thumb
This change is achieved by pressing left and right arrows
when slider's thumb has keyboard focus.
"""
return self.iface_range_value.CurrentSmallChange
# -----------------------------------------------------------
def large_change(self):
"""
Get a large change of slider's thumb
This change is achieved by pressing PgUp and PgDown keys
when slider's thumb has keyboard focus.
"""
return self.iface_range_value.CurrentLargeChange
# -----------------------------------------------------------
def value(self):
"""Get a current position of slider's thumb"""
return self.iface_range_value.CurrentValue
# -----------------------------------------------------------
def set_value(self, value):
"""Set position of slider's thumb"""
if isinstance(value, float):
value_to_set = value
elif isinstance(value, six.integer_types):
value_to_set = value
elif isinstance(value, six.text_type):
value_to_set = float(value)
else:
raise ValueError("value should be either string or number")
min_value = self.min_value()
max_value = self.max_value()
if not (min_value <= value_to_set <= max_value):
raise ValueError("value should be bigger than {0} and smaller than {1}".format(min_value, max_value))
self.iface_range_value.SetValue(value_to_set)
# ====================================================================
class HeaderWrapper(uiawrapper.UIAWrapper):
"""Wrap an UIA-compatible Header control"""
_control_types = ['Header']
# -----------------------------------------------------------
def __init__(self, elem):
"""Initialize the control"""
super(HeaderWrapper, self).__init__(elem)
# ====================================================================
class HeaderItemWrapper(uiawrapper.UIAWrapper):
"""Wrap an UIA-compatible Header Item control"""
_control_types = ['HeaderItem']
# -----------------------------------------------------------
def __init__(self, elem):
"""Initialize the control"""
super(HeaderItemWrapper, self).__init__(elem)
# ====================================================================
class ListItemWrapper(uiawrapper.UIAWrapper):
"""Wrap an UIA-compatible ListViewItem control"""
_control_types = ['DataItem', 'ListItem', ]
# -----------------------------------------------------------
def __init__(self, elem, container=None):
"""Initialize the control"""
super(ListItemWrapper, self).__init__(elem)
# Init a pointer to the item's container wrapper.
# It must be set by a container wrapper producing the item.
# Notice that the self.parent property isn't the same
# because it results in a different instance of a wrapper.
self.container = container
# -----------------------------------------------------------
def is_checked(self):
"""Return True if the ListItem is checked
Only items supporting Toggle pattern should answer.
Raise NoPatternInterfaceError if the pattern is not supported
"""
return self.iface_toggle.ToggleState_On == toggle_state_on
def texts(self):
"""Return a list of item texts"""
content = [ch.window_text() for ch in self.children(content_only=True)]
if content:
return content
else:
# For native list with small icons
return super(ListItemWrapper, self).texts()
# ====================================================================
class ListViewWrapper(uiawrapper.UIAWrapper):
"""Wrap an UIA-compatible ListView control"""
_control_types = ['DataGrid', 'List', 'Table']
# -----------------------------------------------------------
def __init__(self, elem):
"""Initialize the control"""
super(ListViewWrapper, self).__init__(elem)
# Check if control supports Grid pattern
# Control is actually a DataGrid or a List with Grid pattern support
try:
if self.iface_grid:
self.iface_grid_support = True
except NoPatternInterfaceError:
self.iface_grid_support = False
self.is_table = not self.iface_grid_support and self.element_info.control_type == "Table"
self.row_header = False
self.col_header = False
def __getitem__(self, key):
return self.get_item(key)
def __raise_not_implemented(self):
raise NotImplementedError("This method not work properly for WinForms DataGrid, use cells()")
def __update_row_header(self):
try:
self.row_header = all(isinstance(six.next(row.iter_children()), HeaderWrapper) for row in self.children())
except StopIteration:
self.row_header = False
def __update_col_header(self):
try:
self.col_header = all(isinstance(col, HeaderWrapper) for col in six.next(self.iter_children()).children())
except StopIteration:
self.col_header = False
def __resolve_row_index(self, ind):
self.__update_col_header()
return ind + 1 if self.col_header and self.is_table else ind
def __resolve_col_index(self, ind):
self.__update_row_header()
return ind + 1 if self.row_header and self.is_table else ind
def __resolve_row_count(self, cnt):
self.__update_col_header()
return cnt - 1 if self.col_header and self.is_table else cnt
# -----------------------------------------------------------
def item_count(self):
"""A number of items in the ListView"""
if self.iface_grid_support:
return self.iface_grid.CurrentRowCount
else:
# TODO: This could be implemented by getting custom ItemCount Property using RegisterProperty
# TODO: See https://msdn.microsoft.com/ru-ru/library/windows/desktop/ff486373%28v=vs.85%29.aspx for details
# TODO: comtypes doesn't seem to support IUIAutomationRegistrar interface
return self.__resolve_row_count(len(self.children()))
# -----------------------------------------------------------
def column_count(self):
"""Return the number of columns"""
if self.iface_grid_support:
return self.iface_grid.CurrentColumnCount
elif self.is_table:
self.__raise_not_implemented()
# ListBox doesn't have columns
return 0
# -----------------------------------------------------------
def get_header_controls(self):
"""Return Header controls associated with the Table"""
return [cell for row in self.children() for cell in row.children() if isinstance(cell, HeaderWrapper)]
# -----------------------------------------------------------
def get_header_control(self):
"""Return Header control associated with the ListView"""
try:
# A data grid control may have no header
hdr = self.children(control_type="Header")[0]
except(IndexError, NoPatternInterfaceError):
hdr = None
return hdr
# -----------------------------------------------------------
def get_column(self, col_index):
"""Get the information for a column of the ListView"""
col = None
try:
col = self.columns()[col_index]
except comtypes.COMError:
raise IndexError
return col
# -----------------------------------------------------------
def columns(self):
"""Get the information on the columns of the ListView"""
if self.iface_grid_support:
arr = self.iface_table.GetCurrentColumnHeaders()
cols = uia_element_info.elements_from_uia_array(arr)
return [uiawrapper.UIAWrapper(e) for e in cols]
elif self.is_table:
self.__raise_not_implemented()
else:
return []
# -----------------------------------------------------------
def cells(self):
"""Return list of list of cells for any type of contol"""
row_start_index = self.__resolve_row_index(0)
col_start_index = self.__resolve_col_index(0)
rows = self.children(content_only=True)
return [row.children(content_only=True)[col_start_index:] for row in rows[row_start_index:]]
# -----------------------------------------------------------
def cell(self, row, column):
"""Return a cell in the ListView control
Only for controls with Grid pattern support
* **row** is an index of a row in the list.
* **column** is an index of a column in the specified row.
The returned cell can be of different control types.
Mostly: TextBlock, ImageControl, EditControl, DataItem
or even another layer of data items (Group, DataGrid)
"""
if not isinstance(row, six.integer_types) or not isinstance(column, six.integer_types):
raise TypeError("row and column must be numbers")
if self.iface_grid_support:
try:
e = self.iface_grid.GetItem(row, column)
elem_info = uia_element_info.UIAElementInfo(e)
cell_elem = uiawrapper.UIAWrapper(elem_info)
except (comtypes.COMError, ValueError):
raise IndexError
elif self.is_table:
# Workaround for WinForms, DataGrid equals list of lists
_row = self.get_item(row)
cell_elem = _row.children()[self.__resolve_col_index(column)]
else:
return None
return cell_elem
# -----------------------------------------------------------
def get_item(self, row):
"""Return an item of the ListView control
* **row** can be either an index of the row or a string
with the text of a cell in the row you want returned.
"""
# Verify arguments
if isinstance(row, six.string_types):
# Try to find item using FindItemByProperty
# That way we can get access to virtualized (unloaded) items
try:
com_elem = self.iface_item_container.FindItemByProperty(0, IUIA().UIA_dll.UIA_NamePropertyId, row)
# Try to load element using VirtualizedItem pattern
try:
get_elem_interface(com_elem, "VirtualizedItem").Realize()
itm = uiawrapper.UIAWrapper(uia_element_info.UIAElementInfo(com_elem))
except NoPatternInterfaceError:
# Item doesn't support VirtualizedItem pattern - item is already on screen or com_elem is NULL
itm = uiawrapper.UIAWrapper(uia_element_info.UIAElementInfo(com_elem))
except (NoPatternInterfaceError, ValueError):
# com_elem is NULL pointer or item doesn't support ItemContainer pattern
# Get DataGrid row
try:
itm = self.descendants(title=row)[0]
# Applications like explorer.exe usually return ListItem
# directly while other apps can return only a cell.
# In this case we need to take its parent - the whole row.
if not isinstance(itm, ListItemWrapper):
itm = itm.parent()
except IndexError:
raise ValueError("Element '{0}' not found".format(row))
elif isinstance(row, six.integer_types):
# Get the item by a row index
# TODO: Can't get virtualized items that way
# TODO: See TODO section of item_count() method for details
list_items = self.children(content_only=True)
itm = list_items[self.__resolve_row_index(row)]
else:
raise TypeError("String type or integer is expected")
# Give to the item a pointer on its container
itm.container = self
return itm
item = get_item # this is an alias to be consistent with other content elements
# -----------------------------------------------------------
def get_items(self):
"""Return all items of the ListView control"""
return self.children(content_only=True)
items = get_items # this is an alias to be consistent with other content elements
# -----------------------------------------------------------
def get_item_rect(self, item_index):
"""Return the bounding rectangle of the list view item
The method is kept mostly for a backward compatibility
with the native ListViewWrapper interface
"""
itm = self.get_item(item_index)
return itm.rectangle()
# -----------------------------------------------------------
def get_selected_count(self):
"""Return a number of selected items
The call can be quite expensieve as we retrieve all
the selected items in order to count them
"""
selection = self.get_selection()
if selection:
return len(selection)
else:
return 0
# -----------------------------------------------------------
def texts(self):
"""Return a list of item texts"""
return [elem.texts() for elem in self.children(content_only=True)]
# -----------------------------------------------------------
@property
def writable_props(self):
"""Extend default properties list."""
props = super(ListViewWrapper, self).writable_props
props.extend(['column_count',
'item_count',
'columns',
# 'items',
])
return props
# ====================================================================
class MenuItemWrapper(uiawrapper.UIAWrapper):
"""Wrap an UIA-compatible MenuItem control"""
_control_types = ['MenuItem']
# -----------------------------------------------------------
def __init__(self, elem):
"""Initialize the control"""
super(MenuItemWrapper, self).__init__(elem)
# -----------------------------------------------------------
def items(self):
"""Find all items of the menu item"""
return self.children(control_type="MenuItem")
# -----------------------------------------------------------
def select(self):
"""Apply Select pattern"""
try:
self.iface_selection_item.Select()
except(NoPatternInterfaceError):
try:
self.iface_invoke.Invoke()
except(NoPatternInterfaceError):
raise AttributeError
# ====================================================================
class MenuWrapper(uiawrapper.UIAWrapper):
"""Wrap an UIA-compatible MenuBar or Menu control"""
_control_types = ['MenuBar', 'Menu', ]
# -----------------------------------------------------------
def __init__(self, elem):
"""Initialize the control"""
super(MenuWrapper, self).__init__(elem)
# -----------------------------------------------------------
def items(self):
"""Find all menu items"""
return self.children(control_type="MenuItem")
# -----------------------------------------------------------
def item_by_index(self, idx):
"""Find a menu item specified by the index"""
item = self.items()[idx]
return item
# -----------------------------------------------------------
@staticmethod
def _activate(item):
"""Activate the specified item"""
if not item.is_active():
item.set_focus()
try:
item.expand()
except(NoPatternInterfaceError):
pass
# -----------------------------------------------------------
def _sub_item_by_text(self, menu, name, exact):
"""Find a menu sub-item by the specified text"""
sub_item = None
items = menu.items()
if items:
if exact:
for i in items:
if name == i.window_text():
sub_item = i
break
else:
texts = []
for i in items:
texts.append(i.window_text())
sub_item = findbestmatch.find_best_match(name, texts, items)
self._activate(sub_item)
return sub_item
# -----------------------------------------------------------
def _sub_item_by_idx(self, menu, idx):
"""Find a menu sub-item by the specified index"""
sub_item = None
items = menu.items()
if items:
sub_item = items[idx]
self._activate(sub_item)
return sub_item
# -----------------------------------------------------------
def item_by_path(self, path, exact=False):
"""Find a menu item specified by the path
The full path syntax is specified in:
:py:meth:`.controls.menuwrapper.Menu.get_menu_path`
Note: $ - specifier is not supported
"""
# Get the path parts
part0, parts = path.split("->", 1)
part0 = part0.strip()
if len(part0) == 0:
raise IndexError()
# Find a top level menu item and select it. After selecting this item
# a new Menu control is created and placed on the dialog. It can be
# a direct child or a descendant.
# Sometimes we need to re-discover Menu again
try:
menu = None
if part0.startswith("#"):
menu = self._sub_item_by_idx(self, int(part0[1:]))
else:
menu = self._sub_item_by_text(self, part0, exact)
if not menu.items():
self._activate(menu)
timings.wait_until(
timings.Timings.window_find_timeout,
timings.Timings.window_find_retry,
lambda: len(self.top_level_parent().descendants(control_type="Menu")) > 0)
menu = self.top_level_parent().descendants(control_type="Menu")[0]
for cur_part in [p.strip() for p in parts.split("->")]:
if cur_part.startswith("#"):
menu = self._sub_item_by_idx(menu, int(cur_part[1:]))
else:
menu = self._sub_item_by_text(menu, cur_part, exact)
except(AttributeError):
raise IndexError()
return menu
# ====================================================================
class TooltipWrapper(uiawrapper.UIAWrapper):
"""Wrap an UIA-compatible Tooltip control"""
_control_types = ['ToolTip']
# -----------------------------------------------------------
def __init__(self, elem):
"""Initialize the control"""
super(TooltipWrapper, self).__init__(elem)
# ====================================================================
class ToolbarWrapper(uiawrapper.UIAWrapper):
"""Wrap an UIA-compatible ToolBar control
The control's children usually are: Buttons, SplitButton,
MenuItems, ThumbControls, TextControls, Separators, CheckBoxes.
Notice that ToolTip controls are children of the top window and
not of the toolbar.
"""
_control_types = ['ToolBar']
# -----------------------------------------------------------
def __init__(self, elem):
"""Initialize the control"""
super(ToolbarWrapper, self).__init__(elem)
self.win32_wrapper = None
if not self.children() and self.element_info.handle is not None:
self.win32_wrapper = common_controls.ToolbarWrapper(self.element_info.handle)
@property
def writable_props(self):
"""Extend default properties list."""
props = super(ToolbarWrapper, self).writable_props
props.extend(['button_count'])
return props
# ----------------------------------------------------------------
def texts(self):
"""Return texts of the Toolbar"""
return [c.window_text() for c in self.buttons()]
#----------------------------------------------------------------
def button_count(self):
"""Return a number of buttons on the ToolBar"""
if self.win32_wrapper is not None:
return self.win32_wrapper.button_count()
else:
return len(self.children())
# ----------------------------------------------------------------
def buttons(self):
"""Return all available buttons"""
if self.win32_wrapper is not None:
btn_count = self.win32_wrapper.button_count()
cc = []
for btn_num in range(btn_count):
relative_point = self.win32_wrapper.get_button_rect(btn_num).mid_point()
button_coord_x, button_coord_y = self.client_to_screen(relative_point)
btn_elem_info = UIAElementInfo.from_point(button_coord_x, button_coord_y)
cc.append(uiawrapper.UIAWrapper(btn_elem_info))
else:
cc = self.children()
return cc
# ----------------------------------------------------------------
def button(self, button_identifier, exact=True):
"""Return a button by the specified identifier
* **button_identifier** can be either an index of a button or
a string with the text of the button.
* **exact** flag specifies if the exact match for the text look up
has to be applied.
"""
cc = self.buttons()
texts = [c.window_text() for c in cc]
if isinstance(button_identifier, six.string_types):
self.actions.log('Toolbar buttons: ' + str(texts))
if exact:
try:
button_index = texts.index(button_identifier)
except ValueError:
raise findbestmatch.MatchError(items=texts, tofind=button_identifier)
else:
# one of these will be returned for the matching text
indices = [i for i in range(0, len(texts))]
# find which index best matches that text
button_index = findbestmatch.find_best_match(button_identifier, texts, indices)
else:
button_index = button_identifier
return cc[button_index]
# ----------------------------------------------------------------
def check_button(self, button_identifier, make_checked, exact=True):
"""Find where the button is and toggle it
* **button_identifier** can be either an index of the button or
a string with the text on the button.
* **make_checked** specifies the required toggled state of the button.
If the button is already in the specified state the state isn't changed.
* **exact** flag specifies if the exact match for the text look up
has to be applied
"""
self.actions.logSectionStart('Checking "' + self.window_text() +
'" toolbar button "' + str(button_identifier) + '"')
button = self.button(button_identifier, exact=exact)
if make_checked:
self.actions.log('Pressing down toolbar button "' + str(button_identifier) + '"')
else:
self.actions.log('Pressing up toolbar button "' + str(button_identifier) + '"')
if not button.is_enabled():
self.actions.log('Toolbar button is not enabled!')
raise RuntimeError("Toolbar button is not enabled!")
res = (button.get_toggle_state() == toggle_state_on)
if res != make_checked:
button.toggle()
self.actions.logSectionEnd()
return button
# ====================================================================
class TreeItemWrapper(uiawrapper.UIAWrapper):
"""Wrap an UIA-compatible TreeItem control
In addition to the provided methods of the wrapper
additional inherited methods can be especially helpful:
select(), extend(), collapse(), is_extended(), is_collapsed(),
click_input(), rectangle() and many others
"""
_control_types = ['TreeItem']
# -----------------------------------------------------------
def __init__(self, elem):
"""Initialize the control"""
super(TreeItemWrapper, self).__init__(elem)
# -----------------------------------------------------------
def is_checked(self):
"""Return True if the TreeItem is checked
Only items supporting Toggle pattern should answer.
Raise NoPatternInterfaceError if the pattern is not supported
"""
return (self.iface_toggle.ToggleState_On == toggle_state_on)
# -----------------------------------------------------------
def ensure_visible(self):
"""Make sure that the TreeView item is visible"""
self.iface_scroll_item.ScrollIntoView()
# -----------------------------------------------------------
def get_child(self, child_spec, exact=False):
"""Return the child item of this item
Accepts either a string or an index.
If a string is passed then it returns the child item
with the best match for the string.
"""
cc = self.children(control_type='TreeItem')
if isinstance(child_spec, six.string_types):
texts = [c.window_text() for c in cc]
if exact:
if child_spec in texts:
index = texts.index(child_spec)
else:
raise IndexError('There is no child equal to "' + str(child_spec) + '" in ' + str(texts))
else:
indices = range(0, len(texts))
index = findbestmatch.find_best_match(
child_spec, texts, indices, limit_ratio=.6)
else:
index = child_spec
return cc[index]
# -----------------------------------------------------------
def _calc_click_coords(self):
"""Override the BaseWrapper helper method
Try to get coordinates of a text box inside the item.
If no text box found just set coordinates
close to a left part of the item rectangle
The returned coordinates are always absolute
"""
tt = self.children(control_type="Text")
if tt:
point = tt[0].rectangle().mid_point()
# convert from POINT to a simple tuple
coords = (point.x, point.y)
else:
rect = self.rectangle()
coords = (rect.left + int(float(rect.width()) / 4.),
rect.top + int(float(rect.height()) / 2.))
return coords
# -----------------------------------------------------------
def sub_elements(self):
"""Return a list of all visible sub-items of this control"""
return self.descendants(control_type="TreeItem")
# ====================================================================
class TreeViewWrapper(uiawrapper.UIAWrapper):
"""Wrap an UIA-compatible Tree control"""
_control_types = ['Tree']
# -----------------------------------------------------------
def __init__(self, elem):
"""Initialize the control"""
super(TreeViewWrapper, self).__init__(elem)
@property
def writable_props(self):
"""Extend default properties list."""
props = super(TreeViewWrapper, self).writable_props
props.extend(['item_count'])
return props
# -----------------------------------------------------------
def item_count(self):
"""Return a number of items in TreeView"""
return len(self.descendants(control_type="TreeItem"))
# -----------------------------------------------------------
def roots(self):
"""Return root elements of TreeView"""
return self.children(control_type="TreeItem")
# -----------------------------------------------------------
def get_item(self, path, exact=False):
r"""Read a TreeView item
* **path** a path to the item to return. This can be one of
the following:
* A string separated by \\ characters. The first character must
be \\. This string is split on the \\ characters and each of
these is used to find the specific child at each level. The
\\ represents the root item - so you don't need to specify the
root itself.
* A list/tuple of strings - The first item should be the root
element.
* A list/tuple of integers - The first item the index which root
to select. Indexing always starts from zero: get_item((0, 2, 3))
* **exact** a flag to request exact match of strings in the path
or apply a fuzzy logic of best_match thus allowing non-exact
path specifiers
"""
if not self.item_count():
return None
# Ensure the path is absolute
if isinstance(path, six.string_types):
if not path.startswith("\\"):
raise RuntimeError(
"Only absolute paths allowed - "
"please start the path with \\")
path = path.split("\\")[1:]
current_elem = None
# find the correct root elem
if isinstance(path[0], int):
current_elem = self.roots()[path[0]]
else:
roots = self.roots()
texts = [r.window_text() for r in roots]
if exact:
if path[0] in texts:
current_elem = roots[texts.index(path[0])]
else:
raise IndexError("There is no root element equal to '{0}'".format(path[0]))
else:
try:
current_elem = findbestmatch.find_best_match(
path[0], texts, roots, limit_ratio=.6)
except IndexError:
raise IndexError("There is no root element similar to '{0}'".format(path[0]))
# now for each of the lower levels
# just index into it's children
for child_spec in path[1:]:
try:
# ensure that the item is expanded as this is sometimes
# required for loading tree view branches
current_elem.expand()
current_elem = current_elem.get_child(child_spec, exact)
except IndexError:
if isinstance(child_spec, six.string_types):
raise IndexError("Item '{0}' does not have a child '{1}'".format(
current_elem.window_text(), child_spec))
else:
raise IndexError("Item '{0}' does not have {1} children".format(
current_elem.window_text(), child_spec + 1))
except comtypes.COMError:
raise IndexError("Item '{0}' does not have a child '{1}'".format(
current_elem.window_text(), child_spec))
return current_elem
# -----------------------------------------------------------
def print_items(self):
"""Print all items with line indents"""
self.text = ""
def _print_one_level(item, ident):
"""Get texts for the item and its children"""
self.text += " " * ident + item.window_text() + "\n"
for child in item.children(control_type="TreeItem"):
_print_one_level(child, ident + 1)
for root in self.roots():
_print_one_level(root, 0)
return self.text
# ====================================================================
class StaticWrapper(uiawrapper.UIAWrapper):
"""Wrap an UIA-compatible Text control"""
_control_types = ['Text']
can_be_label = True
# -----------------------------------------------------------
def __init__(self, elem):
"""Initialize the control"""
super(StaticWrapper, self).__init__(elem)
| |
from urllib.request import urlretrieve
from os.path import isfile, isdir
from tqdm import tqdm
#----------------------------------------------------------------------------------------------------
# download the vgg net parameters
vgg_dir = 'tensorflow_vgg/'
# Make sure vgg exists
if not isdir(vgg_dir):
raise Exception("VGG directory doesn't exist!")
class DLProgress(tqdm):
last_block = 0
def hook(self, block_num=1, block_size=1, total_size=None):
self.total = total_size
self.update((block_num - self.last_block) * block_size)
self.last_block = block_num
if not isfile(vgg_dir + "vgg16.npy"):
with DLProgress(unit='B', unit_scale=True, miniters=1, desc='VGG16 Parameters') as pbar:
urlretrieve(
'https://s3.amazonaws.com/content.udacity-data.com/nd101/vgg16.npy',
vgg_dir + 'vgg16.npy',
pbar.hook)
else:
print("Parameter file already exists!")
#----------------------------------------------------------------------------------------------------
# download the flowers dataset
import tarfile
dataset_folder_path = 'flower_photos'
class DLProgress(tqdm):
last_block = 0
def hook(self, block_num=1, block_size=1, total_size=None):
self.total = total_size
self.update((block_num - self.last_block) * block_size)
self.last_block = block_num
if not isfile('flower_photos.tar.gz'):
with DLProgress(unit='B', unit_scale=True, miniters=1, desc='Flowers Dataset') as pbar:
urlretrieve(
'http://download.tensorflow.org/example_images/flower_photos.tgz',
'flower_photos.tar.gz',
pbar.hook)
if not isdir(dataset_folder_path):
with tarfile.open('flower_photos.tar.gz') as tar:
tar.extractall()
tar.close()
#----------------------------------------------------------------------------------------------------
# Record the convnet code
import os
import numpy as np
import tensorflow as tf
from tensorflow_vgg import vgg16
from tensorflow_vgg import utils
data_dir = 'flower_photos/'
contents = os.listdir(data_dir)
classes = [each for each in contents if os.path.isdir(data_dir + each)]
# Set the batch size higher if you can fit in in your GPU memory
batch_size = 25
codes_list = []
labels = []
batch = []
codes = None
with tf.Session() as sess:
vgg = vgg16.Vgg16()
input_=tf.placeholder(tf.float32,[None,224,224,3])
with tf.name_scope("content_vgg"):
vgg.build(input_)
for each in classes:
print("Starting {} images".format(each))
class_path = data_dir + each
files = os.listdir(class_path)
for ii, file in enumerate(files, 1):
# Add images to the current batch
# utils.load_image crops the input images for us, from the center
img = utils.load_image(os.path.join(class_path, file))
batch.append(img.reshape((1, 224, 224, 3)))
labels.append(each)
# Running the batch through the network to get the codes
if ii % batch_size == 0 or ii == len(files):
# Image batch to pass to VGG network
images = np.concatenate(batch)
# TODO: Get the values from the relu6 layer of the VGG network
codes_batch = sess.run(vgg.relu6,{input_:images})
# Here I'm building an array of the codes
if codes is None:
codes = codes_batch
else:
codes = np.concatenate((codes, codes_batch))
# Reset to start building the next batch
batch = []
print('{} images processed'.format(ii))
# write codes to file
with open('codes', 'w') as f:
codes.tofile(f)
# write labels to file
import csv
with open('labels', 'w') as f:
writer = csv.writer(f, delimiter='\n')
writer.writerow(labels)
#----------------------------------------------------------------------------------------------------
# Building the classifier
# read codes and labels from file
import csv
with open('labels') as f:
reader = csv.reader(f, delimiter='\n')
labels = np.array([each for each in reader if len(each) > 0]).squeeze()
with open('codes') as f:
codes = np.fromfile(f, dtype=np.float32)
codes = codes.reshape((len(labels), -1))
#----------------------------------------------------------------------------------------------------
# Data prep
from sklearn import preprocessing
lb=preprocessing.LabelBinarizer()
lb.fit(labels)
labels_vecs = lb.transform(labels)
# split and shuffle the dataset using sklearn
from sklearn.model_selection import StratifiedShuffleSplit
sss=StratifiedShuffleSplit(1,test_size=0.2)
splitter = sss.split(codes,labels_vecs)
# get the indices for the taining and validation
train_idx, val_idx = next(splitter)
# now take 50% of the validation samples to be used as test set
val_set_size = int(len(val_idx)/2)
val_idx,test_idx=val_idx[:val_set_size],val_idx[val_set_size:]
train_x, train_y = codes[train_idx],labels_vecs[train_idx]
val_x, val_y = codes[val_idx], labels_vecs[val_idx]
test_x, test_y = codes[test_idx], labels_vecs[test_idx]
print("Train shapes (x, y):", train_x.shape, train_y.shape)
print("Validation shapes (x, y):", val_x.shape, val_y.shape)
print("Test shapes (x, y):", test_x.shape, test_y.shape)
#-------------------------------------------------------------------------------------
# Classifier layers
inputs_ = tf.placeholder(tf.float32, shape=[None, codes.shape[1]])
labels_ = tf.placeholder(tf.int64, shape=[None, labels_vecs.shape[1]])
# TODO: Classifier layers and operations
fc = tf.contrib.layers.fully_connected(inputs_,256)
logits = tf.layers.dense(fc,labels_vecs.shape[1],activation=None)
# cross entropy loss
ce = tf.nn.softmax_cross_entropy_with_logits(labels = labels_,logits = logits)
cost = tf.reduce_mean(ce)
# training optimizer
optimizer = tf.train.AdamOptimizer().minimize(cost)
# Operations for validation/test accuracy
predicted = tf.nn.softmax(logits)
correct_pred = tf.equal(tf.argmax(predicted, 1), tf.argmax(labels_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
#-------------------------------------------------------------------------------------
# Batches!
def get_batches(x, y, n_batches=10):
""" Return a generator that yields batches from arrays x and y. """
batch_size = len(x) // n_batches
for ii in range(0, n_batches * batch_size, batch_size):
# If we're not on the last batch, grab data with size batch_size
if ii != (n_batches - 1) * batch_size:
X, Y = x[ii: ii + batch_size], y[ii: ii + batch_size]
# On the last batch, grab the rest of the data
else:
X, Y = x[ii:], y[ii:]
# I love generators
yield X, Y
# -------------------------------------------------------------------------------------
# Training
num_epochs=100
saver = tf.train.Saver()
with tf.Session() as sess:
# TODO: Your training code here
sess.run(tf.global_variables_initializer())
for epoch_i in range(num_epochs):
for batch_i,(x,y) in enumerate(get_batches(train_x,train_y)):
feed = {inputs_: x,
labels_: y}
_,train_loss = sess.run([optimizer,cost],feed_dict=feed)
print("Epoch:{}/{}".format(epoch_i,num_epochs),"Iteration:{}".format(batch_i),"train loss:{:.5f}".format(train_loss))
if batch_i % 5 == 0: #every 5 iterations check accuracy on validation set
feed = {inputs_: val_x, labels_: val_y}
val_acc = sess.run(accuracy,feed_dict=feed)
print("------Epoch:{}/{}".format(epoch_i, num_epochs), "Iteration:{}".format(batch_i),
"Validation acc:{:.4f}".format(val_acc))
saver.save(sess, "checkpoints/flowers.ckpt")
# -------------------------------------------------------------------------------------
# Testing
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))
feed = {inputs_: test_x,
labels_: test_y}
test_acc = sess.run(accuracy, feed_dict=feed)
print("Test accuracy: {:.4f}".format(test_acc))
import matplotlib.pyplot as plt
from scipy.ndimage import imread
test_img_path = 'flower_photos/roses/10894627425_ec76bbc757_n.jpg'
test_img = imread(test_img_path)
plt.imshow(test_img)
with tf.Session() as sess:
img = utils.load_image(test_img_path)
img = img.reshape((1, 224, 224, 3))
feed_dict = {input_: img}
code = sess.run(vgg.relu6, feed_dict=feed_dict)
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))
feed = {inputs_: code}
prediction = sess.run(predicted, feed_dict=feed).squeeze()
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops to manipulate lists of tensors."""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_list_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_list_ops import *
# pylint: enable=wildcard-import
from tensorflow.python.util.lazy_loader import LazyLoader
# list_ops -> control_flow_ops -> tensor_array_ops -> list_ops
control_flow_ops = LazyLoader(
"control_flow_ops", globals(),
"tensorflow.python.ops.control_flow_ops")
ops.NotDifferentiable("TensorListConcatLists")
ops.NotDifferentiable("TensorListElementShape")
ops.NotDifferentiable("TensorListLength")
ops.NotDifferentiable("TensorListPushBackBatch")
def empty_tensor_list(element_shape,
element_dtype,
max_num_elements=None,
name=None):
if max_num_elements is None:
max_num_elements = -1
return gen_list_ops.empty_tensor_list(
element_shape=_build_element_shape(element_shape),
element_dtype=element_dtype,
max_num_elements=max_num_elements,
name=name)
def tensor_list_reserve(element_shape, num_elements, element_dtype, name=None):
return gen_list_ops.tensor_list_reserve(
element_shape=_build_element_shape(element_shape),
num_elements=num_elements,
element_dtype=element_dtype,
name=name)
def tensor_list_from_tensor(tensor, element_shape, name=None):
return gen_list_ops.tensor_list_from_tensor(
tensor=tensor,
element_shape=_build_element_shape(element_shape),
name=name)
def tensor_list_get_item(input_handle, index, element_dtype, name=None):
return gen_list_ops.tensor_list_get_item(
input_handle=input_handle,
index=index,
element_shape=-1,
element_dtype=element_dtype,
name=name)
def tensor_list_pop_back(input_handle, element_dtype, name=None):
return gen_list_ops.tensor_list_pop_back(
input_handle=input_handle,
element_shape=-1,
element_dtype=element_dtype,
name=name)
def tensor_list_gather(input_handle, indices, element_dtype, name=None):
return gen_list_ops.tensor_list_gather(
input_handle=input_handle,
indices=indices,
element_shape=-1,
element_dtype=element_dtype,
name=name)
def tensor_list_stack(input_handle, element_dtype, num_elements=-1, name=None):
return gen_list_ops.tensor_list_stack(
input_handle=input_handle,
element_shape=-1,
element_dtype=element_dtype,
num_elements=num_elements,
name=name)
def tensor_list_concat(input_handle, element_dtype, element_shape=None,
name=None):
# Ignore the lengths output of TensorListConcat. It is only used during
# gradient computation.
return gen_list_ops.tensor_list_concat(
input_handle=input_handle, element_dtype=element_dtype,
element_shape=element_shape, name=name)[0]
def tensor_list_split(tensor, element_shape, lengths, name=None):
return gen_list_ops.tensor_list_split(
tensor=tensor,
element_shape=_build_element_shape(element_shape),
lengths=lengths,
name=name)
def tensor_list_set_item(input_handle,
index,
item,
resize_if_index_out_of_bounds=False,
name=None):
"""Sets `item` at `index` in input list."""
if resize_if_index_out_of_bounds:
input_list_size = gen_list_ops.tensor_list_length(input_handle)
# TODO(srbs): This could cause some slowdown. Consider fusing resize
# functionality in the SetItem op.
input_handle = control_flow_ops.cond(
index >= input_list_size,
lambda: gen_list_ops.tensor_list_resize( # pylint: disable=g-long-lambda
input_handle, index + 1),
lambda: input_handle)
return gen_list_ops.tensor_list_set_item(
input_handle=input_handle, index=index, item=item, name=name)
@ops.RegisterGradient("TensorListPushBack")
def _PushBackGrad(op, dresult):
return gen_list_ops.tensor_list_pop_back(
dresult,
element_shape=array_ops.shape(op.inputs[1]),
element_dtype=op.get_attr("element_dtype"))
@ops.RegisterGradient("TensorListPopBack")
def _PopBackGrad(op, dlist, delement):
if dlist is None:
dlist = empty_tensor_list(
element_dtype=delement.dtype,
element_shape=gen_list_ops.tensor_list_element_shape(
op.outputs[0], shape_type=dtypes.int32))
return gen_list_ops.tensor_list_push_back(dlist, delement), None
@ops.RegisterGradient("TensorListStack")
def _TensorListStackGrad(unused_op, dtensor):
return tensor_list_from_tensor(dtensor, element_shape=dtensor.shape[1:]), None
@ops.RegisterGradient("TensorListConcat")
def _TensorListConcatGrad(op, dtensor, unused_dlengths):
# TODO(srbs): We lose the element_shape information in tensor_list_concat.
# Consider providing that as an output of TensorListConcat?
if dtensor.shape.rank is None:
element_shape = None
else:
element_shape = [None] + dtensor.shape.as_list()[1:]
return tensor_list_split(
dtensor,
element_shape=_build_element_shape(element_shape),
lengths=op.outputs[1])
@ops.RegisterGradient("TensorListSplit")
def _TensorListSplitGrad(op, dlist):
return tensor_list_concat(dlist, element_dtype=op.inputs[0].dtype), None, None
@ops.RegisterGradient("TensorListFromTensor")
def _TensorListFromTensorGrad(op, dlist):
"""Gradient for TensorListFromTensor."""
t = op.inputs[0]
if t.shape.dims and t.shape.dims[0].value is not None:
num_elements = t.shape.dims[0].value
else:
num_elements = None
if dlist is None:
dlist = empty_tensor_list(
element_dtype=t.dtype,
element_shape=gen_list_ops.tensor_list_element_shape(
op.outputs[0], shape_type=dtypes.int32))
tensor_grad = gen_list_ops.tensor_list_stack(
dlist,
element_shape=array_ops.slice(array_ops.shape(t), [1], [-1]),
element_dtype=t.dtype,
num_elements=num_elements)
shape_grad = None
return tensor_grad, shape_grad
@ops.RegisterGradient("TensorListGetItem")
def _TensorListGetItemGrad(op, ditem):
"""Gradient for TensorListGetItem."""
list_size = gen_list_ops.tensor_list_length(op.inputs[0])
list_grad = gen_list_ops.tensor_list_set_item(
gen_list_ops.tensor_list_reserve(
gen_list_ops.tensor_list_element_shape(op.inputs[0],
shape_type=dtypes.int32),
list_size, element_dtype=ditem.dtype),
index=op.inputs[1],
item=ditem)
index_grad = None
element_shape_grad = None
return list_grad, index_grad, element_shape_grad
@ops.RegisterGradient("TensorListSetItem")
def _TensorListSetItemGrad(op, dlist):
"""Gradient function for TensorListSetItem."""
_, index, item = op.inputs
list_grad = gen_list_ops.tensor_list_set_item(
dlist, index=index, item=array_ops.zeros_like(item))
index_grad = None
element_grad = gen_list_ops.tensor_list_get_item(
dlist,
index,
element_shape=array_ops.shape(item),
element_dtype=item.dtype)
return list_grad, index_grad, element_grad
@ops.RegisterGradient("TensorListResize")
def _TensorListResizeGrad(op, dlist):
input_list, _ = op.inputs
input_list_size = gen_list_ops.tensor_list_length(input_list)
return gen_list_ops.tensor_list_resize(dlist, input_list_size), None
@ops.RegisterGradient("TensorListGather")
def _TensorListGatherGrad(op, dtensor):
"""Gradient function for TensorListGather."""
input_list, indices, _ = op.inputs
dlist = gen_list_ops.tensor_list_scatter(
tensor=dtensor,
indices=indices,
element_shape=ops.convert_to_tensor(-1, dtype=dtypes.int32))
# TensorListScatter returns a list with size `max(indices) + 1`
# so we manually resize it to match the size of the input list.
input_list_size = gen_list_ops.tensor_list_length(input_list)
dlist = gen_list_ops.tensor_list_resize(dlist, input_list_size)
return dlist, None, None
@ops.RegisterGradient("TensorListScatter")
def _TensorListScatterGrad(op, dlist):
t, indices, _ = op.inputs
return gen_list_ops.tensor_list_gather(
dlist,
indices,
element_shape=array_ops.slice(array_ops.shape(t), [1], [-1]),
element_dtype=t.dtype), None, None
def _build_element_shape(shape):
"""Converts shape to a format understood by list_ops for element_shape.
If `shape` is already a `Tensor` it is returned as-is. We do not perform a
type check here.
If shape is None or a TensorShape with unknown rank, -1 is returned.
If shape is a scalar, an int32 tensor with empty list is returned. Note we
do directly return an empty list since ops.convert_to_tensor would conver it
to a float32 which is not a valid type for element_shape.
If shape is a sequence of dims, None's in the list are replaced with -1. We
do not check the dtype of the other dims.
Args:
shape: Could be None, Tensor, TensorShape or a list of dims (each dim could
be a None, scalar or Tensor).
Returns:
A None-free shape that can be converted to a tensor.
"""
if isinstance(shape, ops.Tensor):
return shape
if isinstance(shape, tensor_shape.TensorShape):
# `TensorShape.as_list` requires rank to be known.
shape = shape.as_list() if shape else None
# Shape is unknown.
if shape is None:
return -1
# Shape is a scalar.
if not shape:
return ops.convert_to_tensor(shape, dtype=dtypes.int32)
# Shape is a sequence of dimensions. Convert None dims to -1.
return [d if d is not None else -1 for d in shape]
| |
# -*- coding: utf-8 -*-
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2016 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import sys
import copy
import traceback
from functools import partial
from typing import List, TYPE_CHECKING, Tuple, NamedTuple, Any, Dict, Optional, Union
from . import bitcoin
from . import keystore
from . import mnemonic
from .bip32 import is_bip32_derivation, xpub_type, normalize_bip32_derivation, BIP32Node
from .keystore import bip44_derivation, purpose48_derivation, Hardware_KeyStore, KeyStore, bip39_to_seed
from .wallet import (Imported_Wallet, Standard_Wallet, Multisig_Wallet,
wallet_types, Wallet, Abstract_Wallet)
from .storage import WalletStorage, StorageEncryptionVersion
from .wallet_db import WalletDB
from .i18n import _
from .util import UserCancelled, InvalidPassword, WalletFileException, UserFacingException
from .simple_config import SimpleConfig
from .plugin import Plugins, HardwarePluginLibraryUnavailable
from .logging import Logger
from .plugins.hw_wallet.plugin import OutdatedHwFirmwareException, HW_PluginBase
if TYPE_CHECKING:
from .plugin import DeviceInfo, BasePlugin
# hardware device setup purpose
HWD_SETUP_NEW_WALLET, HWD_SETUP_DECRYPT_WALLET = range(0, 2)
class ScriptTypeNotSupported(Exception): pass
class GoBack(Exception): pass
class ReRunDialog(Exception): pass
class ChooseHwDeviceAgain(Exception): pass
class WizardStackItem(NamedTuple):
action: Any
args: Any
kwargs: Dict[str, Any]
db_data: dict
class WizardWalletPasswordSetting(NamedTuple):
password: Optional[str]
encrypt_storage: bool
storage_enc_version: StorageEncryptionVersion
encrypt_keystore: bool
class BaseWizard(Logger):
def __init__(self, config: SimpleConfig, plugins: Plugins):
super(BaseWizard, self).__init__()
Logger.__init__(self)
self.config = config
self.plugins = plugins
self.data = {}
self.pw_args = None # type: Optional[WizardWalletPasswordSetting]
self._stack = [] # type: List[WizardStackItem]
self.plugin = None # type: Optional[BasePlugin]
self.keystores = [] # type: List[KeyStore]
self.is_kivy = config.get('gui') == 'kivy'
self.seed_type = None
def set_icon(self, icon):
pass
def run(self, *args, **kwargs):
action = args[0]
args = args[1:]
db_data = copy.deepcopy(self.data)
self._stack.append(WizardStackItem(action, args, kwargs, db_data))
if not action:
return
if type(action) is tuple:
self.plugin, action = action
if self.plugin and hasattr(self.plugin, action):
f = getattr(self.plugin, action)
f(self, *args, **kwargs)
elif hasattr(self, action):
f = getattr(self, action)
f(*args, **kwargs)
else:
raise Exception("unknown action", action)
def can_go_back(self):
return len(self._stack) > 1
def go_back(self, *, rerun_previous: bool = True) -> None:
if not self.can_go_back():
return
# pop 'current' frame
self._stack.pop()
prev_frame = self._stack[-1]
# try to undo side effects since we last entered 'previous' frame
# FIXME only self.data is properly restored
self.data = copy.deepcopy(prev_frame.db_data)
if rerun_previous:
# pop 'previous' frame
self._stack.pop()
# rerun 'previous' frame
self.run(prev_frame.action, *prev_frame.args, **prev_frame.kwargs)
def reset_stack(self):
self._stack = []
def new(self):
title = _("Create new wallet")
message = '\n'.join([
_("What kind of wallet do you want to create?")
])
wallet_kinds = [
('standard', _("Standard wallet")),
('2fa', _("Wallet with two-factor authentication")),
('multisig', _("Multi-signature wallet")),
('imported', _("Import Bitcoin addresses or private keys")),
]
choices = [pair for pair in wallet_kinds if pair[0] in wallet_types]
self.choice_dialog(title=title, message=message, choices=choices, run_next=self.on_wallet_type)
def upgrade_db(self, storage, db):
exc = None # type: Optional[Exception]
def on_finished():
if exc is None:
self.terminate(storage=storage, db=db)
else:
raise exc
def do_upgrade():
nonlocal exc
try:
db.upgrade()
except Exception as e:
exc = e
self.waiting_dialog(do_upgrade, _('Upgrading wallet format...'), on_finished=on_finished)
def run_task_without_blocking_gui(self, task, *, msg: str = None) -> Any:
"""Perform a task in a thread without blocking the GUI.
Returns the result of 'task', or raises the same exception.
This method blocks until 'task' is finished.
"""
raise NotImplementedError()
def load_2fa(self):
self.data['wallet_type'] = '2fa'
self.data['use_trustedcoin'] = True
self.plugin = self.plugins.load_plugin('trustedcoin')
def on_wallet_type(self, choice):
self.data['wallet_type'] = self.wallet_type = choice
if choice == 'standard':
action = 'choose_keystore'
elif choice == 'multisig':
action = 'choose_multisig'
elif choice == '2fa':
self.load_2fa()
action = self.plugin.get_action(self.data)
elif choice == 'imported':
action = 'import_addresses_or_keys'
self.run(action)
def choose_multisig(self):
def on_multisig(m, n):
multisig_type = "%dof%d" % (m, n)
self.data['wallet_type'] = multisig_type
self.n = n
self.run('choose_keystore')
self.multisig_dialog(run_next=on_multisig)
def choose_keystore(self):
assert self.wallet_type in ['standard', 'multisig']
i = len(self.keystores)
title = _('Add cosigner') + ' (%d of %d)'%(i+1, self.n) if self.wallet_type=='multisig' else _('Keystore')
if self.wallet_type =='standard' or i==0:
message = _('Do you want to create a new seed, or to restore a wallet using an existing seed?')
choices = [
('choose_seed_type', _('Create a new seed')),
('restore_from_seed', _('I already have a seed')),
('restore_from_key', _('Use a master key')),
]
if not self.is_kivy:
choices.append(('choose_hw_device', _('Use a hardware device')))
else:
message = _('Add a cosigner to your multi-sig wallet')
choices = [
('restore_from_key', _('Enter cosigner key')),
('restore_from_seed', _('Enter cosigner seed')),
]
if not self.is_kivy:
choices.append(('choose_hw_device', _('Cosign with hardware device')))
self.choice_dialog(title=title, message=message, choices=choices, run_next=self.run)
def import_addresses_or_keys(self):
v = lambda x: keystore.is_address_list(x) or keystore.is_private_key_list(x, raise_on_error=True)
title = _("Import Monacoin Addresses")
message = _("Enter a list of Monacoin addresses (this will create a watching-only wallet), or a list of private keys(Please do not enter except secret key which starts with T).")
self.add_xpub_dialog(title=title, message=message, run_next=self.on_import,
is_valid=v, allow_multi=True, show_wif_help=True)
def on_import(self, text):
# text is already sanitized by is_address_list and is_private_keys_list
if keystore.is_address_list(text):
self.data['addresses'] = {}
for addr in text.split():
assert bitcoin.is_address(addr)
self.data['addresses'][addr] = {}
elif keystore.is_private_key_list(text):
self.data['addresses'] = {}
k = keystore.Imported_KeyStore({})
keys = keystore.get_private_keys(text)
for pk in keys:
assert bitcoin.is_private_key(pk)
txin_type, pubkey = k.import_privkey(pk, None)
addr = bitcoin.pubkey_to_address(txin_type, pubkey)
self.data['addresses'][addr] = {'type':txin_type, 'pubkey':pubkey}
self.keystores.append(k)
else:
return self.terminate(aborted=True)
return self.run('create_wallet')
def restore_from_key(self):
if self.wallet_type == 'standard':
v = keystore.is_master_key
title = _("Create keystore from a master key")
message = ' '.join([
_("To create a watching-only wallet, please enter your master public key (xpub/ypub/zpub)."),
_("To create a spending wallet, please enter a master private key (xprv/yprv/zprv).")
])
self.add_xpub_dialog(title=title, message=message, run_next=self.on_restore_from_key, is_valid=v)
else:
i = len(self.keystores) + 1
self.add_cosigner_dialog(index=i, run_next=self.on_restore_from_key, is_valid=keystore.is_bip32_key)
def on_restore_from_key(self, text):
k = keystore.from_master_key(text)
self.on_keystore(k)
def choose_hw_device(self, purpose=HWD_SETUP_NEW_WALLET, *, storage: WalletStorage = None):
while True:
try:
self._choose_hw_device(purpose=purpose, storage=storage)
except ChooseHwDeviceAgain:
pass
else:
break
def _choose_hw_device(self, *, purpose, storage: WalletStorage = None):
title = _('Hardware Keystore')
# check available plugins
supported_plugins = self.plugins.get_hardware_support()
devices = [] # type: List[Tuple[str, DeviceInfo]]
devmgr = self.plugins.device_manager
debug_msg = ''
def failed_getting_device_infos(name, e):
nonlocal debug_msg
err_str_oneline = ' // '.join(str(e).splitlines())
self.logger.warning(f'error getting device infos for {name}: {err_str_oneline}')
indented_error_msg = ' '.join([''] + str(e).splitlines(keepends=True))
debug_msg += f' {name}: (error getting device infos)\n{indented_error_msg}\n'
# scan devices
try:
scanned_devices = self.run_task_without_blocking_gui(task=devmgr.scan_devices,
msg=_("Scanning devices..."))
except BaseException as e:
self.logger.info('error scanning devices: {}'.format(repr(e)))
debug_msg = ' {}:\n {}'.format(_('Error scanning devices'), e)
else:
for splugin in supported_plugins:
name, plugin = splugin.name, splugin.plugin
# plugin init errored?
if not plugin:
e = splugin.exception
indented_error_msg = ' '.join([''] + str(e).splitlines(keepends=True))
debug_msg += f' {name}: (error during plugin init)\n'
debug_msg += ' {}\n'.format(_('You might have an incompatible library.'))
debug_msg += f'{indented_error_msg}\n'
continue
# see if plugin recognizes 'scanned_devices'
try:
# FIXME: side-effect: unpaired_device_info sets client.handler
device_infos = devmgr.unpaired_device_infos(None, plugin, devices=scanned_devices,
include_failing_clients=True)
except HardwarePluginLibraryUnavailable as e:
failed_getting_device_infos(name, e)
continue
except BaseException as e:
self.logger.exception('')
failed_getting_device_infos(name, e)
continue
device_infos_failing = list(filter(lambda di: di.exception is not None, device_infos))
for di in device_infos_failing:
failed_getting_device_infos(name, di.exception)
device_infos_working = list(filter(lambda di: di.exception is None, device_infos))
devices += list(map(lambda x: (name, x), device_infos_working))
if not debug_msg:
debug_msg = ' {}'.format(_('No exceptions encountered.'))
if not devices:
msg = (_('No hardware device detected.') + '\n' +
_('To trigger a rescan, press \'Next\'.') + '\n\n')
if sys.platform == 'win32':
msg += _('If your device is not detected on Windows, go to "Settings", "Devices", "Connected devices", '
'and do "Remove device". Then, plug your device again.') + '\n'
msg += _('While this is less than ideal, it might help if you run Electrum as Administrator.') + '\n'
else:
msg += _('On Linux, you might have to add a new permission to your udev rules.') + '\n'
msg += '\n\n'
msg += _('Debug message') + '\n' + debug_msg
self.confirm_dialog(title=title, message=msg,
run_next=lambda x: None)
raise ChooseHwDeviceAgain()
# select device
self.devices = devices
choices = []
for name, info in devices:
state = _("initialized") if info.initialized else _("wiped")
label = info.label or _("An unnamed {}").format(name)
try: transport_str = info.device.transport_ui_string[:20]
except: transport_str = 'unknown transport'
descr = f"{label} [{info.model_name or name}, {state}, {transport_str}]"
choices.append(((name, info), descr))
msg = _('Select a device') + ':'
self.choice_dialog(title=title, message=msg, choices=choices,
run_next=lambda *args: self.on_device(*args, purpose=purpose, storage=storage))
def on_device(self, name, device_info: 'DeviceInfo', *, purpose, storage: WalletStorage = None):
self.plugin = self.plugins.get_plugin(name)
assert isinstance(self.plugin, HW_PluginBase)
devmgr = self.plugins.device_manager
try:
client = self.plugin.setup_device(device_info, self, purpose)
except OSError as e:
self.show_error(_('We encountered an error while connecting to your device:')
+ '\n' + str(e) + '\n'
+ _('To try to fix this, we will now re-pair with your device.') + '\n'
+ _('Please try again.'))
devmgr.unpair_id(device_info.device.id_)
raise ChooseHwDeviceAgain()
except OutdatedHwFirmwareException as e:
if self.question(e.text_ignore_old_fw_and_continue(), title=_("Outdated device firmware")):
self.plugin.set_ignore_outdated_fw()
# will need to re-pair
devmgr.unpair_id(device_info.device.id_)
raise ChooseHwDeviceAgain()
except GoBack:
raise ChooseHwDeviceAgain()
except (UserCancelled, ReRunDialog):
raise
except UserFacingException as e:
self.show_error(str(e))
raise ChooseHwDeviceAgain()
except BaseException as e:
self.logger.exception('')
self.show_error(str(e))
raise ChooseHwDeviceAgain()
if purpose == HWD_SETUP_NEW_WALLET:
def f(derivation, script_type):
derivation = normalize_bip32_derivation(derivation)
self.run('on_hw_derivation', name, device_info, derivation, script_type)
self.derivation_and_script_type_dialog(f)
elif purpose == HWD_SETUP_DECRYPT_WALLET:
password = client.get_password_for_storage_encryption()
try:
storage.decrypt(password)
except InvalidPassword:
# try to clear session so that user can type another passphrase
if hasattr(client, 'clear_session'): # FIXME not all hw wallet plugins have this
client.clear_session()
raise
else:
raise Exception('unknown purpose: %s' % purpose)
def derivation_and_script_type_dialog(self, f, *, get_account_xpub=None):
message1 = _('Choose the type of addresses in your wallet.')
message2 = ' '.join([
_('You can override the suggested derivation path.'),
_('If you are not sure what this is, leave this field unchanged.')
])
hide_choices = False
if self.wallet_type == 'multisig':
# There is no general standard for HD multisig.
# For legacy, this is partially compatible with BIP45; assumes index=0
# For segwit, a custom path is used, as there is no standard at all.
default_choice_idx = 2
choices = [
('standard', 'legacy multisig (p2sh)', normalize_bip32_derivation("m/45'/0")),
('p2wsh-p2sh', 'p2sh-segwit multisig (p2wsh-p2sh)', purpose48_derivation(0, xtype='p2wsh-p2sh')),
('p2wsh', 'native segwit multisig (p2wsh)', purpose48_derivation(0, xtype='p2wsh')),
]
# if this is not the first cosigner, pre-select the expected script type,
# and hide the choices
script_type = self.get_script_type_of_wallet()
if script_type is not None:
script_types = [*zip(*choices)][0]
chosen_idx = script_types.index(script_type)
default_choice_idx = chosen_idx
hide_choices = True
else:
default_choice_idx = 2
choices = [
('standard', 'legacy (p2pkh)', bip44_derivation(0, bip43_purpose=44)),
('p2wpkh-p2sh', 'p2sh-segwit (p2wpkh-p2sh)', bip44_derivation(0, bip43_purpose=49)),
('p2wpkh', 'native segwit (p2wpkh)', bip44_derivation(0, bip43_purpose=84)),
]
while True:
try:
self.derivation_and_script_type_gui_specific_dialog(
run_next=f,
title=_('Script type and Derivation path'),
message1=message1,
message2=message2,
choices=choices,
test_text=is_bip32_derivation,
default_choice_idx=default_choice_idx,
get_account_xpub=get_account_xpub,
hide_choices=hide_choices,
)
return
except ScriptTypeNotSupported as e:
self.show_error(e)
# let the user choose again
def on_hw_derivation(self, name, device_info: 'DeviceInfo', derivation, xtype):
from .keystore import hardware_keystore
devmgr = self.plugins.device_manager
assert isinstance(self.plugin, HW_PluginBase)
try:
xpub = self.plugin.get_xpub(device_info.device.id_, derivation, xtype, self)
client = devmgr.client_by_id(device_info.device.id_, scan_now=False)
if not client: raise Exception("failed to find client for device id")
root_fingerprint = client.request_root_fingerprint_from_device()
label = client.label() # use this as device_info.label might be outdated!
soft_device_id = client.get_soft_device_id() # use this as device_info.device_id might be outdated!
except ScriptTypeNotSupported:
raise # this is handled in derivation_dialog
except BaseException as e:
self.logger.exception('')
self.show_error(e)
raise ChooseHwDeviceAgain()
d = {
'type': 'hardware',
'hw_type': name,
'derivation': derivation,
'root_fingerprint': root_fingerprint,
'xpub': xpub,
'label': label,
'soft_device_id': soft_device_id,
}
try:
client.manipulate_keystore_dict_during_wizard_setup(d)
except Exception as e:
self.logger.exception('')
self.show_error(e)
raise ChooseHwDeviceAgain()
k = hardware_keystore(d)
self.on_keystore(k)
def passphrase_dialog(self, run_next, is_restoring=False):
title = _('Seed extension')
message = '\n'.join([
_('You may extend your seed with custom words.'),
_('Your seed extension must be saved together with your seed.'),
])
warning = '\n'.join([
_('Note that this is NOT your encryption password.'),
_('If you do not know what this is, leave this field empty.'),
])
warn_issue4566 = is_restoring and self.seed_type == 'bip39'
self.line_dialog(title=title, message=message, warning=warning,
default='', test=lambda x:True, run_next=run_next,
warn_issue4566=warn_issue4566)
def restore_from_seed(self):
self.opt_bip39 = True
self.opt_slip39 = True
self.opt_ext = True
is_cosigning_seed = lambda x: mnemonic.seed_type(x) in ['standard', 'segwit']
test = mnemonic.is_seed if self.wallet_type == 'standard' else is_cosigning_seed
f = lambda *args: self.run('on_restore_seed', *args)
self.restore_seed_dialog(run_next=f, test=test)
def on_restore_seed(self, seed, seed_type, is_ext):
self.seed_type = seed_type if seed_type != 'electrum' else mnemonic.seed_type(seed)
if self.seed_type == 'bip39':
def f(passphrase):
root_seed = bip39_to_seed(seed, passphrase)
self.on_restore_bip43(root_seed)
self.passphrase_dialog(run_next=f, is_restoring=True) if is_ext else f('')
elif self.seed_type == 'slip39':
def f(passphrase):
root_seed = seed.decrypt(passphrase)
self.on_restore_bip43(root_seed)
self.passphrase_dialog(run_next=f, is_restoring=True) if is_ext else f('')
elif self.seed_type in ['standard', 'segwit']:
f = lambda passphrase: self.run('create_keystore', seed, passphrase)
self.passphrase_dialog(run_next=f, is_restoring=True) if is_ext else f('')
elif self.seed_type == 'old':
self.run('create_keystore', seed, '')
elif mnemonic.is_any_2fa_seed_type(self.seed_type):
self.load_2fa()
self.run('on_restore_seed', seed, is_ext)
else:
raise Exception('Unknown seed type', self.seed_type)
def on_restore_bip43(self, root_seed):
def f(derivation, script_type):
derivation = normalize_bip32_derivation(derivation)
self.run('on_bip43', root_seed, derivation, script_type)
if self.wallet_type == 'standard':
def get_account_xpub(account_path):
root_node = BIP32Node.from_rootseed(root_seed, xtype="standard")
account_node = root_node.subkey_at_private_derivation(account_path)
account_xpub = account_node.to_xpub()
return account_xpub
else:
get_account_xpub = None
self.derivation_and_script_type_dialog(f, get_account_xpub=get_account_xpub)
def create_keystore(self, seed, passphrase):
k = keystore.from_seed(seed, passphrase, self.wallet_type == 'multisig')
if k.can_have_deterministic_lightning_xprv():
self.data['lightning_xprv'] = k.get_lightning_xprv(None)
self.on_keystore(k)
def on_bip43(self, root_seed, derivation, script_type):
k = keystore.from_bip43_rootseed(root_seed, derivation, xtype=script_type)
self.on_keystore(k)
def get_script_type_of_wallet(self) -> Optional[str]:
if len(self.keystores) > 0:
ks = self.keystores[0]
if isinstance(ks, keystore.Xpub):
return xpub_type(ks.xpub)
return None
def on_keystore(self, k: KeyStore):
has_xpub = isinstance(k, keystore.Xpub)
if has_xpub:
t1 = xpub_type(k.xpub)
if self.wallet_type == 'standard':
if has_xpub and t1 not in ['standard', 'p2wpkh', 'p2wpkh-p2sh']:
self.show_error(_('Wrong key type') + ' %s'%t1)
self.run('choose_keystore')
return
self.keystores.append(k)
self.run('create_wallet')
elif self.wallet_type == 'multisig':
assert has_xpub
if t1 not in ['standard', 'p2wsh', 'p2wsh-p2sh']:
self.show_error(_('Wrong key type') + ' %s'%t1)
self.run('choose_keystore')
return
if k.xpub in map(lambda x: x.xpub, self.keystores):
self.show_error(_('Error: duplicate master public key'))
self.run('choose_keystore')
return
if len(self.keystores)>0:
t2 = xpub_type(self.keystores[0].xpub)
if t1 != t2:
self.show_error(_('Cannot add this cosigner:') + '\n' + "Their key type is '%s', we are '%s'"%(t1, t2))
self.run('choose_keystore')
return
if len(self.keystores) == 0:
xpub = k.get_master_public_key()
self.reset_stack()
self.keystores.append(k)
self.run('show_xpub_and_add_cosigners', xpub)
return
self.reset_stack()
self.keystores.append(k)
if len(self.keystores) < self.n:
self.run('choose_keystore')
else:
self.run('create_wallet')
def create_wallet(self):
encrypt_keystore = any(k.may_have_password() for k in self.keystores)
# note: the following condition ("if") is duplicated logic from
# wallet.get_available_storage_encryption_version()
if self.wallet_type == 'standard' and isinstance(self.keystores[0], Hardware_KeyStore):
# offer encrypting with a pw derived from the hw device
k = self.keystores[0] # type: Hardware_KeyStore
assert isinstance(self.plugin, HW_PluginBase)
try:
k.handler = self.plugin.create_handler(self)
password = k.get_password_for_storage_encryption()
except UserCancelled:
devmgr = self.plugins.device_manager
devmgr.unpair_xpub(k.xpub)
raise ChooseHwDeviceAgain()
except BaseException as e:
self.logger.exception('')
self.show_error(str(e))
raise ChooseHwDeviceAgain()
self.request_storage_encryption(
run_next=lambda encrypt_storage: self.on_password(
password,
encrypt_storage=encrypt_storage,
storage_enc_version=StorageEncryptionVersion.XPUB_PASSWORD,
encrypt_keystore=False))
else:
# reset stack to disable 'back' button in password dialog
self.reset_stack()
# prompt the user to set an arbitrary password
self.request_password(
run_next=lambda password, encrypt_storage: self.on_password(
password,
encrypt_storage=encrypt_storage,
storage_enc_version=StorageEncryptionVersion.USER_PASSWORD,
encrypt_keystore=encrypt_keystore),
force_disable_encrypt_cb=not encrypt_keystore)
def on_password(self, password, *, encrypt_storage: bool,
storage_enc_version=StorageEncryptionVersion.USER_PASSWORD,
encrypt_keystore: bool):
for k in self.keystores:
if k.may_have_password():
k.update_password(None, password)
if self.wallet_type == 'standard':
self.data['seed_type'] = self.seed_type
keys = self.keystores[0].dump()
self.data['keystore'] = keys
elif self.wallet_type == 'multisig':
for i, k in enumerate(self.keystores):
self.data['x%d/'%(i+1)] = k.dump()
elif self.wallet_type == 'imported':
if len(self.keystores) > 0:
keys = self.keystores[0].dump()
self.data['keystore'] = keys
else:
raise Exception('Unknown wallet type')
self.pw_args = WizardWalletPasswordSetting(password=password,
encrypt_storage=encrypt_storage,
storage_enc_version=storage_enc_version,
encrypt_keystore=encrypt_keystore)
self.terminate()
def create_storage(self, path) -> Tuple[WalletStorage, WalletDB]:
if os.path.exists(path):
raise Exception('file already exists at path')
assert self.pw_args, f"pw_args not set?!"
pw_args = self.pw_args
self.pw_args = None # clean-up so that it can get GC-ed
storage = WalletStorage(path)
if pw_args.encrypt_storage:
storage.set_password(pw_args.password, enc_version=pw_args.storage_enc_version)
db = WalletDB('', manual_upgrades=False)
db.set_keystore_encryption(bool(pw_args.password) and pw_args.encrypt_keystore)
for key, value in self.data.items():
db.put(key, value)
db.load_plugins()
db.write(storage)
return storage, db
def terminate(self, *, storage: WalletStorage = None,
db: WalletDB = None,
aborted: bool = False) -> None:
raise NotImplementedError() # implemented by subclasses
def show_xpub_and_add_cosigners(self, xpub):
self.show_xpub_dialog(xpub=xpub, run_next=lambda x: self.run('choose_keystore'))
def choose_seed_type(self):
seed_type = 'standard' if self.config.get('nosegwit') else 'segwit'
self.create_seed(seed_type)
def create_seed(self, seed_type):
from . import mnemonic
self.seed_type = seed_type
seed = mnemonic.Mnemonic('en').make_seed(seed_type=self.seed_type)
self.opt_bip39 = False
self.opt_ext = True
self.opt_slip39 = False
f = lambda x: self.request_passphrase(seed, x)
self.show_seed_dialog(run_next=f, seed_text=seed)
def request_passphrase(self, seed, opt_passphrase):
if opt_passphrase:
f = lambda x: self.confirm_seed(seed, x)
self.passphrase_dialog(run_next=f)
else:
self.run('confirm_seed', seed, '')
def confirm_seed(self, seed, passphrase):
f = lambda x: self.confirm_passphrase(seed, passphrase)
self.confirm_seed_dialog(run_next=f, seed=seed if self.config.get('debug_seed') else '', test=lambda x: x==seed)
def confirm_passphrase(self, seed, passphrase):
f = lambda x: self.run('create_keystore', seed, x)
if passphrase:
title = _('Confirm Seed Extension')
message = '\n'.join([
_('Your seed extension must be saved together with your seed.'),
_('Please type it here.'),
])
self.line_dialog(run_next=f, title=title, message=message, default='', test=lambda x: x==passphrase)
else:
f('')
def show_error(self, msg: Union[str, BaseException]) -> None:
raise NotImplementedError()
| |
import collections
import re
import occi
from render_base import Renderer, check_url
eol = '\r\n'
def text_attribute_def(ad=None):
s = ad['name']
immutable = ('immutable' in ad) and ad['immutable']
required = ('required' in ad) and ad['required']
if immutable and required:
s += '{immutable required}'
elif immutable and not required:
s += '{immutable}'
elif not immutable and required:
s += '{required}'
return s
def text_attribute_defs(ads=None):
text_ads = []
if ads:
for ad in ads:
text_ads.append(text_attribute_def(ad))
return ' '.join(text_ads)
def text_actions(actions=None):
if actions:
return ' '.join(actions)
else:
return None
def text_category(category=None):
s = '%s;scheme="%s";class="%s"' % (category['term'], category['scheme'], category['class'])
for item in ['title', 'rel', 'location']:
if item in category:
s += ';%s="%s"' % (item, category[item])
if 'attributes' in category:
s += ';%s="%s"' % ('attributes', text_attribute_defs(category['attributes']))
if 'actions' in category:
s += ';%s="%s"' % ('actions', text_actions(category['actions']))
return s
def text_attribute_value(attribute):
"""Render OCCI Attribute value.
:param occi.Attribute attribute: attribute with a value to render
:return: attribute value rendering
:rtype: string
"""
if 'type' in attribute:
type = attribute['type']
else:
type = 'string'
value = attribute['value']
if type in ['string', 'enum']:
return '"' + value + '"'
elif type == 'number':
return repr(value)
elif type == 'bool':
if value:
return "true"
else:
return "false"
def text_attribute_repr(attribute):
"""Render one OCCI Attribute.
"""
return attribute['name'] + '=' + text_attribute_value(attribute)
def text_link_attribute(key, value):
"""Render Link Attribute
"""
# term or quoted string, using only the quotes now
return key + '=' + '"' + value + '"'
def text_link(link):
s = '<%s>;rel="%s"' % (link['uri'], ' '.join(link['rel']))
if 'self' in link:
s += ';self="%s"' % link['self']
if 'category' in link:
s += ';category="%s"' % ' '.join(link['category'])
if 'attributes' in link:
for key, value in link['attributes'].iteritems():
s += ';%s' % text_link_attribute(key, value)
return s
class TextRenderer(Renderer):
"""Plain Text OCCI Renderer
Empty array is always returned as headers during rendering.
"""
reChunks = re.compile(r';\s*')
reCategory = re.compile(r'^(?i)category:\s*')
reLink = re.compile(r'^(?i)link:\s*')
reAttribute = re.compile(r'^(?i)x-occi-attribute:\s*')
reKeyValue = re.compile(r'\s*?=\s*')
reKeyCheck = re.compile(r'[A-Za-z0-9_\.-]*$')
reQuoted = re.compile(r'^"(.*)"$')
reSP = re.compile(r'\s+')
reAttributes = re.compile(r'([^\{ ]+)(\{[^\}]*\})?\s*')
reLocation = re.compile(r'^((?i)x-occi-location|location):\s*(.*)')
reQuotedLink = re.compile(r'^<(.*)>$')
reStringUnescape = re.compile(r'\\(.)')
reNumber = re.compile(r'^([0-9\.+-]+)$')
reIntNumber = re.compile(r'^([0-9+-]+)$')
reBool = re.compile(r'^(true|false)$')
def render_category(self, category):
"""Render OCCI Category
:param occi.Category category: OCCI Category object
:return: render result
:rtype: [string, string[]]
"""
return ['Category: ' + text_category(category), []]
def render_categories(self, categories):
"""Render OCCI Category collection
:param occi.Category category[]: OCCI Category array
:return: render result
:rtype: [string, string[]]
"""
res = []
for category in categories:
cat_s, cat_h = self.render_category(category)
res.append(cat_s)
return [eol.join(res) + eol, []]
def render_resource(self, categories, links=None, attributes=None):
"""Render OCCI Resource instance
:param occi.Category category: OCCI Category object
:param occi.Link links[]: OCCI Link array
:param occi.Attribute attributes[]: OCCI Attribute array
:return: render result
:rtype: [string, string[]]
:return: render result
"""
Renderer.render_resource(self, categories, links, attributes)
cat_s, cat_h = self.render_categories(categories)
res = []
if links is not None:
for link in links:
res.append(self.render_link(link))
if attributes is not None:
for attr in attributes:
res.append(self.render_attribute(attr))
if res:
return [cat_s + eol.join(res) + eol, []]
else:
return [cat_s, []]
def render_link(self, link):
""" Render OCCI Link
:param occi.Link link: OCCI Link object
:return: render result
:rtype: string
"""
return 'Link: ' + text_link(link)
def render_attribute(self, attribute):
""" Render Attribute
:param occi.Attribute attribute: OCCI Attribute object
:return: render result
:rtype: string
"""
return 'X-OCCI-Attribute: ' + text_attribute_repr(attribute)
def render_locations(self, locations):
""" Render Locations
:param string location[]: location URI
:return: render result
:rtype: [string, string[]]
"""
if not locations:
return ''
s = []
for location in locations:
s.append('X-OCCI-Location: ' + location + eol)
return [''.join(s), []]
def parse_attribute_defs(self, body):
""" Parse OCCI Attribute Definitions.
Example::
occi.core.id{immutable required} occi.core.title occi.core.target occi.core.source{required}
:param string body: text to parse
:return: array of OCCI Attribute Definition
:rtype: occi.AttributeDefinition[]
"""
result = []
m = True
while m:
m = TextRenderer.reAttributes.match(body)
if not m:
break
matches = m.groups()
name = matches[0]
attrs = matches[1]
body = body[m.end():]
if attrs:
attrs = attrs[1:-1]
attrs = TextRenderer.reSP.split(attrs)
attribute = occi.AttributeDefinition({'name': name})
if attrs:
for a in attrs:
if a == 'required':
attribute['required'] = True
elif a == 'immutable':
attribute['immutable'] = True
else:
raise occi.ParseError('Unknown field in OCCI attribute definitions', a)
result.append(attribute)
if body:
raise occi.ParseError('Error parsing OCCI attribute definitions', body)
return result
def parse_actions(self, body):
"""Parse OCCI Actions.
Example::
http://schemas.ogf.org/occi/infrastructure/compute/action#start http://schemas.ogf.org/occi/infrastructure/compute/action#stop http://schemas.ogf.org/occi/infrastructure/compute/action#restart http://schemas.ogf.org/occi/infrastructure/compute/action#suspend
:param string body: text to parse
:return: array of string
:rtype: string[]
"""
actions = TextRenderer.reSP.split(body)
for action in actions:
# let's require scheme and hostname in scheme URI
if not check_url(action, scheme=True, host=True):
raise occi.ParseError('URI expected as an action', action)
return actions
def parse_category_body(self, body):
"""Parse OCCI Category body
Example::
entity;scheme="http://schemas.ogf.org/occi/core#";class="kind";title="entity";location="/entity/";attributes="occi.core.id{immutable required} occi.core.title"
:param string body: text to parse
:return: OCCI Category
:rtype: occi.Category
"""
category = occi.Category()
chunks = TextRenderer.reChunks.split(body)
if not chunks[0]:
raise occi.ParseError('Invalid format of category, term expected', body)
category['term'] = chunks[0]
# skip the first chunk (category term)
for chunk in chunks[1:]:
keyvalue = TextRenderer.reKeyValue.split(chunk, 1)
if len(keyvalue) != 2:
raise occi.ParseError('Key/value pair expected in category', chunk)
key = keyvalue[0]
value = keyvalue[1]
keymatch = TextRenderer.reKeyCheck.match(key)
if keymatch is None:
raise occi.ParseError('Invalid characters in category property', chunk)
# every value quoted, only class has quoting optional
valuematch = TextRenderer.reQuoted.match(value)
if valuematch is None and key != 'class':
raise occi.ParseError('Category value not properly quoted or unexpected EOF', chunk)
if valuematch:
value = valuematch.group(1)
# sanity check: there should not be any quotes now
if value[0] == '"' or (len(value) >= 2 and value[-1] == '"'):
raise occi.ParseError('Unexpected quotes in category', chunk)
if key == 'location':
if not check_url(value):
raise occi.ParseError('URL is not valid in OCCI Category location', chunk)
category[key] = value
elif key == 'scheme':
if not check_url(value):
raise occi.ParseError('URL is not valid in OCCI Category scheme', chunk)
category[key] = value
elif key == 'attributes':
category[key] = self.parse_attribute_defs(value)
elif key == 'actions':
category[key] = self.parse_actions(value)
elif key in ['class', 'title', 'rel']:
category[key] = value
else:
raise occi.ParseError('Unknown key "%s" in category' % key, chunk)
if not category.validate():
raise occi.ParseError('Missing fields in OCCI Category', body)
return category
def parse_link_body(self, body):
"""Parse OCCI Link body
Example::
</storage/0>;rel="http://schemas.ogf.org/occi/infrastructure#storage";self="/link/storagelink/compute_103_disk_0";category="http://schemas.ogf.org/occi/infrastructure#storagelink http://opennebula.org/occi/infrastructure#storagelink";occi.core.id="compute_103_disk_0";occi.core.title="ttylinux";occi.core.target="/storage/0";occi.core.source="/compute/103";occi.storagelink.deviceid="/dev/hda";occi.storagelink.state="active"
:param string body: text to parse
:return: OCCI Link
:rtype: occi.Link
"""
link = occi.Link()
chunks = TextRenderer.reChunks.split(body)
if not chunks[0]:
raise occi.ParseError('Invalid format of OCCI Link, URI and "rel" expected', body)
matched = TextRenderer.reQuotedLink.match(chunks[0])
if not matched:
raise occi.ParseError('URI is not properly quoted in OCCI Link', body)
link['uri'] = matched.group(1)
if not check_url(link['uri']):
raise occi.ParseError('URL is not valid in OCCI Link', link['uri'])
# skip the first chunk (URI)
for chunk in chunks[1:]:
keyvalue = TextRenderer.reKeyValue.split(chunk, 1)
key = keyvalue[0]
value = keyvalue[1]
keymatch = TextRenderer.reKeyCheck.match(key)
if keymatch is None:
raise occi.ParseError('Invalid characters in link property', chunk)
valuematch = TextRenderer.reQuoted.match(value)
# mandatory quoting
if key in ['rel', 'self', 'category']:
if valuematch is None:
raise occi.ParseError('Link value not properly quoted or unexpected EOF', chunk)
# quoting of the other attributes optional
if valuematch is not None:
value = valuematch.group(1)
# sanity check: there should not be any quotes now
if value[0] == '"' or (len(value) >= 2 and value[-1] == '"'):
raise occi.ParseError('Unexpected quotes in OCCI Link values', chunk)
if key == 'scheme':
if not check_url(value):
raise occi.ParseError('URL is not valid in OCCI Category scheme', chunk)
link[key] = value
elif key in ['rel', 'category']:
link[key] = TextRenderer.reSP.split(value)
elif key in ['self']:
link[key] = value
else:
if 'attributes' not in link:
link['attributes'] = collections.OrderedDict()
link['attributes'][key] = value
if not link.validate():
raise occi.ParseError('Missing fields in OCCI Link', body)
return link
def parse_attribute_value(self, body):
"""Parse OCCI Attribute value and detect its type
string, number, and boolean types are detected, enum is returned as string.
:param string body: text to parse
:return: attribute type and value
:rtype: [string, any]
"""
if not body:
raise occi.ParseError('OCCI Attribute value expected')
matched = TextRenderer.reQuoted.match(body)
if matched is not None:
t = 'string'
value = matched.group(1)
value = TextRenderer.reStringUnescape.sub(r'\1', value)
if len(value) + 2 < len(body):
raise occi.ParseError('Unexpected quotes in OCCI Attribute value', body)
return [t, value]
matched = TextRenderer.reNumber.match(body)
if matched is not None:
t = 'number'
if TextRenderer.reIntNumber.match(body) is not None:
value = int(matched.group(1))
else:
value = float(matched.group(1))
return [t, value]
matched = TextRenderer.reBool.match(body)
if matched is not None:
t = 'boolean'
if matched.group(1) == 'false':
value = False
else:
value = True
return [t, value]
raise occi.ParseError('Unexpected format of OCCI Attribute value', body)
def parse_attribute_body(self, body):
"""Parse OCCI Attribute body
:param string body: text to parse
:return: attribute type and value
:rtype: occi.Attribute
"""
keyvalue = TextRenderer.reKeyValue.split(body, 1)
if len(keyvalue) != 2:
raise occi.ParseError('Attribute invalid syntax', body)
key = keyvalue[0]
value = keyvalue[1]
keymatch = TextRenderer.reKeyCheck.match(key)
if keymatch is None:
raise occi.ParseError('Invalid characters in attribute name', key)
t, v = self.parse_attribute_value(value)
return occi.Attribute({'name': key, 'type': t, 'value': v})
def parse_categories(self, body, headers):
"""Parse OCCI Category Collection
:param string body[]: text to parse
:param string headers[]: headers to parse (unused in plain/text)
:return: Array of OCCI Categories
:rtype: occi.Category[]
"""
categories = []
category_ids = set()
for line in body:
if not line.strip():
continue
matched = TextRenderer.reCategory.match(line)
if not matched:
raise occi.ParseError('"category" expected', line)
category = self.parse_category_body(line[matched.end():])
# check uniqueness
key = category['term'] + category['scheme']
if key in category_ids:
raise occi.ParseError('Category not unique (term "%s", scheme "%s")' % (category['term'], category['scheme']), line)
category_ids.add(key)
categories.append(category)
return categories
def parse_locations(self, body, headers):
"""Parse OCCI Entity collection
:param string body[]: text to parse
:param string headers[]: headers to parse (unused in text/plain)
:return: Array of links
:rtype: string[]
"""
locations = []
for line in body:
if not line.strip():
continue
matched = TextRenderer.reLocation.match(line)
if not matched:
raise occi.ParseError('OCCI Location expected in OCCI Entity collection', line)
uri = matched.group(2)
if not check_url(uri, scheme=True, host=True):
raise occi.ParseError('Invalid URI in OCCI Entity collection', line)
locations.append(uri)
return locations
def parse_resource(self, body, header):
"""Parse OCCI Resource instance
:param string body[]: text to parse
:param string headers[]: headers to parse (unused in text/plain)
:return: categories, links, and attributes
:rtype: [occi.Category categories[], occi.Link links[], occi.Attribute attributes[]]
"""
categories = []
links = []
attributes = []
for line in body:
if not line.strip():
continue
line = line.rstrip('\r\n')
matched = TextRenderer.reCategory.match(line)
if matched is not None:
s = line[matched.end():]
categories.append(self.parse_category_body(s))
continue
matched = TextRenderer.reLink.match(line)
if matched is not None:
s = line[matched.end():]
links.append(self.parse_link_body(s))
continue
matched = TextRenderer.reAttribute.match(line)
if matched is not None:
s = line[matched.end():]
attributes.append(self.parse_attribute_body(s))
continue
else:
raise occi.ParseError('Unexpected content of OCCI Resource instance')
return [categories, links, attributes]
| |
#########
# Copyright (c) 2013 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
from datetime import datetime
import mock
from sqlalchemy.orm.exc import DetachedInstanceError
from cloudify.models_states import VisibilityState
from manager_rest import manager_exceptions, utils
from manager_rest.test import base_test
from manager_rest.storage import models, db
class StorageManagerTests(base_test.BaseServerTestCase):
def test_store_load_delete_blueprint(self):
now = utils.get_formatted_timestamp()
blueprint = models.Blueprint(id='blueprint-id',
created_at=now,
updated_at=now,
description=None,
plan={'name': 'my-bp'},
main_file_name='aaa')
self.sm.put(blueprint)
blueprint_from_list = self.sm.list(models.Blueprint)[0]
blueprint_restored = self.sm.get(models.Blueprint, 'blueprint-id')
bp_from_delete = self.sm.delete(blueprint_restored)
self.assertEqual(blueprint.to_dict(), blueprint_from_list.to_dict())
self.assertEqual(blueprint.to_dict(), blueprint_restored.to_dict())
# in bp returned from delete operation only 'id' is guaranteed to
# return
self.assertEqual(blueprint.id, bp_from_delete.id)
blueprints_list = self.sm.list(models.Blueprint)
self.assertEqual(0, len(blueprints_list))
def test_get_blueprint_deployments(self):
now = utils.get_formatted_timestamp()
blueprint = models.Blueprint(id='blueprint-id',
created_at=now,
updated_at=now,
description=None,
plan={'name': 'my-bp'},
main_file_name='aaa')
another_blueprint = models.Blueprint(id='another-blueprint-id',
created_at=now,
updated_at=now,
description=None,
plan={'name': 'my-bp'},
main_file_name='aaa')
self.sm.put(blueprint)
self.sm.put(another_blueprint)
deployment1 = models.Deployment(id='dep-1',
created_at=now,
updated_at=now,
permalink=None,
description=None,
workflows={},
inputs={},
policy_types={},
policy_triggers={},
groups={},
scaling_groups={},
outputs={})
deployment1.blueprint = blueprint
self.sm.put(deployment1)
deployment2 = models.Deployment(id='dep-2',
created_at=now,
updated_at=now,
permalink=None,
description=None,
workflows={},
inputs={},
policy_types={},
policy_triggers={},
groups={},
scaling_groups={},
outputs={})
deployment2.blueprint = blueprint
self.sm.put(deployment2)
deployment3 = models.Deployment(id='dep-3',
created_at=now,
updated_at=now,
description=None,
permalink=None,
workflows={},
inputs={},
policy_types={},
policy_triggers={},
groups={},
scaling_groups={},
outputs={})
deployment3.blueprint = another_blueprint
self.sm.put(deployment3)
filters_bp = {'blueprint_id': 'blueprint-id'}
blueprint_deployments = \
self.sm.list(models.Deployment, filters=filters_bp)
self.assertEqual(2, len(blueprint_deployments))
if blueprint_deployments[0].id == deployment1.id:
self.assertEqual(deployment1.to_dict(),
blueprint_deployments[0].to_dict())
self.assertEqual(deployment2.to_dict(),
blueprint_deployments[1].to_dict())
else:
self.assertEqual(deployment2.to_dict(),
blueprint_deployments[0].to_dict())
self.assertEqual(deployment1.to_dict(),
blueprint_deployments[1].to_dict())
def test_model_serialization(self):
now = utils.get_formatted_timestamp()
blueprint = models.Blueprint(id='blueprint-id',
created_at=now,
updated_at=now,
description=None,
plan={'name': 'my-bp'},
main_file_name='aaa')
self.sm.put(blueprint)
now2 = utils.get_formatted_timestamp()
dep = models.Deployment(id='dep-id',
created_at=now2,
updated_at=now2,
permalink=None,
description=None,
workflows={},
inputs={},
policy_types={},
policy_triggers={},
groups={},
scaling_groups={},
outputs={},
capabilities={})
dep.blueprint = blueprint
self.sm.put(dep)
serialized_dep = dep.to_response()
self.assertEqual(40, len(serialized_dep))
self.assertEqual(dep.id, serialized_dep['id'])
self.assertEqual(dep.created_at, serialized_dep['created_at'])
self.assertEqual(dep.updated_at, serialized_dep['updated_at'])
self.assertEqual(dep.blueprint_id, serialized_dep['blueprint_id'])
self.assertEqual(dep.permalink, serialized_dep['permalink'])
self.assertEqual(dep.tenant.name, serialized_dep['tenant_name'])
self.assertEqual(dep.description, None)
self.assertEqual(dep.resource_tags, None)
# `blueprint_id` isn't a regular column, but a relationship
serialized_dep.pop('blueprint_id')
serialized_dep.pop('tenant_name')
serialized_dep.pop('created_by')
serialized_dep.pop('site_name')
serialized_dep.pop('latest_execution_status')
serialized_dep.pop('environment_type')
serialized_dep.pop('latest_execution_total_operations')
serialized_dep.pop('latest_execution_finished_operations')
serialized_dep.pop('has_sub_deployments')
serialized_dep.pop('create_execution')
serialized_dep.pop('latest_execution')
# Deprecated columns, for backwards compatibility -
# was added to the response
serialized_dep.pop('resource_availability')
serialized_dep.pop('private_resource')
deserialized_dep = models.Deployment(**serialized_dep)
self.assertEqual(dep.id, deserialized_dep.id)
self.assertEqual(dep.created_at, deserialized_dep.created_at)
self.assertEqual(dep.updated_at, deserialized_dep.updated_at)
self.assertEqual(dep.permalink, deserialized_dep.permalink)
self.assertEqual(dep.description, deserialized_dep.description)
def test_fields_query(self):
now = utils.get_formatted_timestamp()
blueprint = models.Blueprint(id='blueprint-id',
created_at=now,
updated_at=now,
description=None,
plan={'name': 'my-bp'},
main_file_name='aaa')
self.sm.put(blueprint)
db.session.expunge(blueprint)
blueprint_restored = self.sm.get(
models.Blueprint,
'blueprint-id',
include=['id', 'created_at']
)
self.assertEqual('blueprint-id', blueprint_restored.id)
self.assertEqual(now, blueprint_restored.created_at)
db.session.expunge(blueprint_restored)
for attrname in ['updated_at', 'plan', 'main_file_name']:
with self.assertRaises(DetachedInstanceError):
# the attribute cannot be loaded - and it was not loaded before
getattr(blueprint_restored, attrname)
@mock.patch('manager_rest.storage.storage_manager.'
'config.instance.default_page_size',
10)
def test_all_results_query(self):
for i in range(20):
secret = models.Secret(id='secret_{}'.format(i),
value='value',
tenant=self.tenant,
creator=self.user,
visibility=VisibilityState.TENANT)
db.session.add(secret)
secret_list = self.sm.list(
models.Secret,
include=['id'],
)
self.assertEqual(10, len(secret_list))
secret_list = self.sm.list(
models.Secret,
include=['id'],
get_all_results=True
)
self.assertEqual(20, len(secret_list))
def test_substr_filter_uses_or_operator(self):
now = utils.get_formatted_timestamp()
for i in range(3):
secret = models.Secret(id=f'secret_{i}',
value=f'value_{i}',
created_at=now,
updated_at=now,
visibility=VisibilityState.TENANT)
self.sm.put(secret)
secrets_list = self.sm.list(
models.Secret,
substr_filters={'id': 'secret_0', 'value': 'value_2'}
)
self.assertEqual({secret.id for secret in secrets_list},
{'secret_0', 'secret_2'})
def test_list_with_empty_filter(self):
secret = models.Secret(id='secret',
value='value',
tenant=self.tenant,
creator=self.user,
visibility=VisibilityState.TENANT)
db.session.add(secret)
retrieved = self.sm.list(models.Secret, filters={'_storage_id': []})
assert len(retrieved) == 0
class TestTransactions(base_test.BaseServerTestCase):
def _make_secret(self, id, value):
# these tests are using secrets, but they could just as well
# use any other model, we just need to create _something_ in the db
now = datetime.utcnow()
return models.Secret(
id=id,
value=value,
created_at=now,
updated_at=now,
visibility=VisibilityState.TENANT
)
def test_commits(self):
"""Items created in the transaction are stored"""
with self.sm.transaction():
self.sm.put(self._make_secret('tx_secret', 'value'))
# rollback the current transaction - if the secret was committed
# indeed, then this will be a no-op
db.session.rollback()
secret = self.sm.get(models.Secret, 'tx_secret')
assert secret.value == 'value'
def test_before_commits(self):
"""Items created before the transaction are stored as well"""
self.sm.put(self._make_secret('tx_secret1', 'value1'))
with self.sm.transaction():
self.sm.put(self._make_secret('tx_secret2', 'value2'))
assert self.sm.get(models.Secret, 'tx_secret1').value == 'value1'
assert self.sm.get(models.Secret, 'tx_secret2').value == 'value2'
def test_exception_rollback(self):
"""If the transaction throws, items created in it are not stored"""
with self.assertRaisesRegex(RuntimeError, 'test error'):
with self.sm.transaction():
self.sm.put(self._make_secret('tx_secret', 'value'))
raise RuntimeError('test error')
with self.assertRaises(manager_exceptions.NotFoundError):
self.sm.get(models.Secret, 'tx_secret')
def test_exception_before_commits(self):
"""items created before a transaction that throws, are still stored"""
with self.assertRaisesRegex(RuntimeError, 'test error'):
self.sm.put(self._make_secret('tx_secret1', 'value1'))
with self.sm.transaction():
self.sm.put(self._make_secret('tx_secret2', 'value2'))
raise RuntimeError('test error')
assert self.sm.get(models.Secret, 'tx_secret1').value == 'value1'
with self.assertRaises(manager_exceptions.NotFoundError):
self.sm.get(models.Secret, 'tx_secret2')
def test_subtransactions(self):
with self.assertRaisesRegex(RuntimeError, 'disallowed'):
with self.sm.transaction():
with self.sm.transaction():
pass
class TestGetErrorFormat(base_test.BaseServerTestCase):
"""Tests for the 404 not found error message formatting"""
def test_get_by_id(self):
# Requested `Deployment` with ID `dep` was not found
message = self._get_err_message('dep')
assert 'not found' in message
assert 'with ID `dep`' in message
assert 'filters' not in message
def test_get_by_id_and_filters(self):
# `Providing an element_id with filters is ambiguous`
# (filters: {'blueprint_id': 'bp'})
message = self._get_err_message('dep',
{'id': 'dep', 'blueprint_id': 'bp'})
assert 'element_id' in message
assert 'filters' in message
assert 'ambiguous' in message
def test_get_by_filters(self):
# Requested `Deployment` with ID `dep` was not found
# (filters: {'blueprint_id': 'bp'})"
message = self._get_err_message(None, {'id': 'dep',
'blueprint_id': 'bp'})
assert 'filters' in message
assert "'blueprint_id': 'bp'" in message
def _get_err_message(self, element_id, filters=None):
message = ''
try:
self.sm.get(models.Deployment, element_id, filters=filters)
except (manager_exceptions.NotFoundError, RuntimeError) as e:
message = str(e)
return message
| |
import unittest
import itertools
import numpy as np
from numba.misc.dummyarray import Array
class TestSlicing(unittest.TestCase):
def assertSameContig(self, arr, nparr):
attrs = 'C_CONTIGUOUS', 'F_CONTIGUOUS'
for attr in attrs:
if arr.flags[attr] != nparr.flags[attr]:
if arr.size == 0 and nparr.size == 0:
# numpy <=1.7 bug that some empty array are contiguous and
# some are not
pass
else:
self.fail("contiguous flag mismatch:\ngot=%s\nexpect=%s" %
(arr.flags, nparr.flags))
#### 1D
def test_slice0_1d(self):
nparr = np.empty(4)
arr = Array.from_desc(0, nparr.shape, nparr.strides,
nparr.dtype.itemsize)
self.assertSameContig(arr, nparr)
xx = -2, -1, 0, 1, 2
for x in xx:
expect = nparr[x:]
got = arr[x:]
self.assertSameContig(got, expect)
self.assertEqual(got.shape, expect.shape)
self.assertEqual(got.strides, expect.strides)
def test_slice1_1d(self):
nparr = np.empty(4)
arr = Array.from_desc(0, nparr.shape, nparr.strides,
nparr.dtype.itemsize)
xx = -2, -1, 0, 1, 2
for x in xx:
expect = nparr[:x]
got = arr[:x]
self.assertSameContig(got, expect)
self.assertEqual(got.shape, expect.shape)
self.assertEqual(got.strides, expect.strides)
def test_slice2_1d(self):
nparr = np.empty(4)
arr = Array.from_desc(0, nparr.shape, nparr.strides,
nparr.dtype.itemsize)
xx = -2, -1, 0, 1, 2
for x, y in itertools.product(xx, xx):
expect = nparr[x:y]
got = arr[x:y]
self.assertSameContig(got, expect)
self.assertEqual(got.shape, expect.shape)
self.assertEqual(got.strides, expect.strides)
#### 2D
def test_slice0_2d(self):
nparr = np.empty((4, 5))
arr = Array.from_desc(0, nparr.shape, nparr.strides,
nparr.dtype.itemsize)
xx = -2, 0, 1, 2
for x in xx:
expect = nparr[x:]
got = arr[x:]
self.assertSameContig(got, expect)
self.assertEqual(got.shape, expect.shape)
self.assertEqual(got.strides, expect.strides)
for x, y in itertools.product(xx, xx):
expect = nparr[x:, y:]
got = arr[x:, y:]
self.assertSameContig(got, expect)
self.assertEqual(got.shape, expect.shape)
self.assertEqual(got.strides, expect.strides)
def test_slice1_2d(self):
nparr = np.empty((4, 5))
arr = Array.from_desc(0, nparr.shape, nparr.strides,
nparr.dtype.itemsize)
xx = -2, 0, 2
for x in xx:
expect = nparr[:x]
got = arr[:x]
self.assertEqual(got.shape, expect.shape)
self.assertEqual(got.strides, expect.strides)
self.assertSameContig(got, expect)
for x, y in itertools.product(xx, xx):
expect = nparr[:x, :y]
got = arr[:x, :y]
self.assertEqual(got.shape, expect.shape)
self.assertEqual(got.strides, expect.strides)
self.assertSameContig(got, expect)
def test_slice2_2d(self):
nparr = np.empty((4, 5))
arr = Array.from_desc(0, nparr.shape, nparr.strides,
nparr.dtype.itemsize)
xx = -2, 0, 2
for s, t, u, v in itertools.product(xx, xx, xx, xx):
expect = nparr[s:t, u:v]
got = arr[s:t, u:v]
self.assertSameContig(got, expect)
self.assertEqual(got.shape, expect.shape)
self.assertEqual(got.strides, expect.strides)
for x, y in itertools.product(xx, xx):
expect = nparr[s:t, u:v]
got = arr[s:t, u:v]
self.assertSameContig(got, expect)
self.assertEqual(got.shape, expect.shape)
self.assertEqual(got.strides, expect.strides)
#### Strided
def test_strided_1d(self):
nparr = np.empty(4)
arr = Array.from_desc(0, nparr.shape, nparr.strides,
nparr.dtype.itemsize)
xx = -2, -1, 1, 2
for x in xx:
expect = nparr[::x]
got = arr[::x]
self.assertSameContig(got, expect)
self.assertEqual(got.shape, expect.shape)
self.assertEqual(got.strides, expect.strides)
def test_strided_2d(self):
nparr = np.empty((4, 5))
arr = Array.from_desc(0, nparr.shape, nparr.strides,
nparr.dtype.itemsize)
xx = -2, -1, 1, 2
for a, b in itertools.product(xx, xx):
expect = nparr[::a, ::b]
got = arr[::a, ::b]
self.assertSameContig(got, expect)
self.assertEqual(got.shape, expect.shape)
self.assertEqual(got.strides, expect.strides)
def test_strided_3d(self):
nparr = np.empty((4, 5, 6))
arr = Array.from_desc(0, nparr.shape, nparr.strides,
nparr.dtype.itemsize)
xx = -2, -1, 1, 2
for a, b, c in itertools.product(xx, xx, xx):
expect = nparr[::a, ::b, ::c]
got = arr[::a, ::b, ::c]
self.assertSameContig(got, expect)
self.assertEqual(got.shape, expect.shape)
self.assertEqual(got.strides, expect.strides)
def test_issue_2766(self):
z = np.empty((1, 2, 3))
z = np.transpose(z, axes=(2, 0, 1))
arr = Array.from_desc(0, z.shape, z.strides, z.itemsize)
self.assertEqual(z.flags['C_CONTIGUOUS'], arr.flags['C_CONTIGUOUS'])
self.assertEqual(z.flags['F_CONTIGUOUS'], arr.flags['F_CONTIGUOUS'])
class TestReshape(unittest.TestCase):
def test_reshape_2d2d(self):
nparr = np.empty((4, 5))
arr = Array.from_desc(0, nparr.shape, nparr.strides,
nparr.dtype.itemsize)
expect = nparr.reshape(5, 4)
got = arr.reshape(5, 4)[0]
self.assertEqual(got.shape, expect.shape)
self.assertEqual(got.strides, expect.strides)
def test_reshape_2d1d(self):
nparr = np.empty((4, 5))
arr = Array.from_desc(0, nparr.shape, nparr.strides,
nparr.dtype.itemsize)
expect = nparr.reshape(5 * 4)
got = arr.reshape(5 * 4)[0]
self.assertEqual(got.shape, expect.shape)
self.assertEqual(got.strides, expect.strides)
def test_reshape_3d3d(self):
nparr = np.empty((3, 4, 5))
arr = Array.from_desc(0, nparr.shape, nparr.strides,
nparr.dtype.itemsize)
expect = nparr.reshape(5, 3, 4)
got = arr.reshape(5, 3, 4)[0]
self.assertEqual(got.shape, expect.shape)
self.assertEqual(got.strides, expect.strides)
def test_reshape_3d2d(self):
nparr = np.empty((3, 4, 5))
arr = Array.from_desc(0, nparr.shape, nparr.strides,
nparr.dtype.itemsize)
expect = nparr.reshape(3 * 4, 5)
got = arr.reshape(3 * 4, 5)[0]
self.assertEqual(got.shape, expect.shape)
self.assertEqual(got.strides, expect.strides)
def test_reshape_3d1d(self):
nparr = np.empty((3, 4, 5))
arr = Array.from_desc(0, nparr.shape, nparr.strides,
nparr.dtype.itemsize)
expect = nparr.reshape(3 * 4 * 5)
got = arr.reshape(3 * 4 * 5)[0]
self.assertEqual(got.shape, expect.shape)
self.assertEqual(got.strides, expect.strides)
def test_reshape_infer2d2d(self):
nparr = np.empty((4, 5))
arr = Array.from_desc(0, nparr.shape, nparr.strides,
nparr.dtype.itemsize)
expect = nparr.reshape(-1, 4)
got = arr.reshape(-1, 4)[0]
self.assertEqual(got.shape, expect.shape)
self.assertEqual(got.strides, expect.strides)
def test_reshape_infer2d1d(self):
nparr = np.empty((4, 5))
arr = Array.from_desc(0, nparr.shape, nparr.strides,
nparr.dtype.itemsize)
expect = nparr.reshape(-1)
got = arr.reshape(-1)[0]
self.assertEqual(got.shape, expect.shape)
self.assertEqual(got.strides, expect.strides)
def test_reshape_infer3d3d(self):
nparr = np.empty((3, 4, 5))
arr = Array.from_desc(0, nparr.shape, nparr.strides,
nparr.dtype.itemsize)
expect = nparr.reshape(5, -1, 4)
got = arr.reshape(5, -1, 4)[0]
self.assertEqual(got.shape, expect.shape)
self.assertEqual(got.strides, expect.strides)
def test_reshape_infer3d2d(self):
nparr = np.empty((3, 4, 5))
arr = Array.from_desc(0, nparr.shape, nparr.strides,
nparr.dtype.itemsize)
expect = nparr.reshape(3, -1)
got = arr.reshape(3, -1)[0]
self.assertEqual(got.shape, expect.shape)
self.assertEqual(got.strides, expect.strides)
def test_reshape_infer3d1d(self):
nparr = np.empty((3, 4, 5))
arr = Array.from_desc(0, nparr.shape, nparr.strides,
nparr.dtype.itemsize)
expect = nparr.reshape(-1)
got = arr.reshape(-1)[0]
self.assertEqual(got.shape, expect.shape)
self.assertEqual(got.strides, expect.strides)
def test_reshape_infer_two_unknowns(self):
nparr = np.empty((3, 4, 5))
arr = Array.from_desc(0, nparr.shape, nparr.strides,
nparr.dtype.itemsize)
with self.assertRaises(ValueError) as raises:
arr.reshape(-1, -1, 3)
self.assertIn('can only specify one unknown dimension', str(raises.exception))
def test_reshape_infer_invalid_shape(self):
nparr = np.empty((3, 4, 5))
arr = Array.from_desc(0, nparr.shape, nparr.strides,
nparr.dtype.itemsize)
with self.assertRaises(ValueError) as raises:
arr.reshape(-1, 7)
self.assertIn('cannot infer valid shape for unknown dimension', str(raises.exception))
class TestSqueeze(unittest.TestCase):
def test_squeeze(self):
nparr = np.empty((1, 2, 1, 4, 1, 3))
arr = Array.from_desc(
0, nparr.shape, nparr.strides, nparr.dtype.itemsize
)
def _assert_equal_shape_strides(arr1, arr2):
self.assertEqual(arr1.shape, arr2.shape)
self.assertEqual(arr1.strides, arr2.strides)
_assert_equal_shape_strides(arr, nparr)
_assert_equal_shape_strides(arr.squeeze()[0], nparr.squeeze())
for axis in (0, 2, 4, (0, 2), (0, 4), (2, 4), (0, 2, 4)):
_assert_equal_shape_strides(
arr.squeeze(axis=axis)[0], nparr.squeeze(axis=axis)
)
def test_squeeze_invalid_axis(self):
nparr = np.empty((1, 2, 1, 4, 1, 3))
arr = Array.from_desc(
0, nparr.shape, nparr.strides, nparr.dtype.itemsize
)
with self.assertRaises(ValueError):
arr.squeeze(axis=1)
with self.assertRaises(ValueError):
arr.squeeze(axis=(2, 3))
class TestExtent(unittest.TestCase):
def test_extent_1d(self):
nparr = np.empty(4)
arr = Array.from_desc(0, nparr.shape, nparr.strides,
nparr.dtype.itemsize)
s, e = arr.extent
self.assertEqual(e - s, nparr.size * nparr.dtype.itemsize)
def test_extent_2d(self):
nparr = np.empty((4, 5))
arr = Array.from_desc(0, nparr.shape, nparr.strides,
nparr.dtype.itemsize)
s, e = arr.extent
self.assertEqual(e - s, nparr.size * nparr.dtype.itemsize)
def test_extent_iter_1d(self):
nparr = np.empty(4)
arr = Array.from_desc(0, nparr.shape, nparr.strides,
nparr.dtype.itemsize)
[ext] = list(arr.iter_contiguous_extent())
self.assertEqual(ext, arr.extent)
def test_extent_iter_2d(self):
nparr = np.empty((4, 5))
arr = Array.from_desc(0, nparr.shape, nparr.strides,
nparr.dtype.itemsize)
[ext] = list(arr.iter_contiguous_extent())
self.assertEqual(ext, arr.extent)
self.assertEqual(len(list(arr[::2].iter_contiguous_extent())), 2)
class TestIterate(unittest.TestCase):
def test_for_loop(self):
# for #4201
N = 5
nparr = np.empty(N)
arr = Array.from_desc(0, nparr.shape, nparr.strides,
nparr.dtype.itemsize)
x = 0 # just a placeholder
# this loop should not raise AssertionError
for val in arr:
x = val
if __name__ == '__main__':
unittest.main()
| |
""" Test functions for linalg module
"""
from __future__ import division, absolute_import, print_function
import os
import sys
import itertools
import traceback
import numpy as np
from numpy import array, single, double, csingle, cdouble, dot, identity
from numpy import multiply, atleast_2d, inf, asarray, matrix
from numpy import linalg
from numpy.linalg import matrix_power, norm, matrix_rank
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_array_equal,
assert_almost_equal, assert_allclose, run_module_suite,
dec
)
def ifthen(a, b):
return not a or b
def imply(a, b):
return not a or b
old_assert_almost_equal = assert_almost_equal
def assert_almost_equal(a, b, **kw):
if asarray(a).dtype.type in (single, csingle):
decimal = 6
else:
decimal = 12
old_assert_almost_equal(a, b, decimal=decimal, **kw)
def get_real_dtype(dtype):
return {single: single, double: double,
csingle: single, cdouble: double}[dtype]
def get_complex_dtype(dtype):
return {single: csingle, double: cdouble,
csingle: csingle, cdouble: cdouble}[dtype]
def get_rtol(dtype):
# Choose a safe rtol
if dtype in (single, csingle):
return 1e-5
else:
return 1e-11
class LinalgCase(object):
def __init__(self, name, a, b, exception_cls=None):
assert isinstance(name, str)
self.name = name
self.a = a
self.b = b
self.exception_cls = exception_cls
def check(self, do):
if self.exception_cls is None:
do(self.a, self.b)
else:
assert_raises(self.exception_cls, do, self.a, self.b)
def __repr__(self):
return "<LinalgCase: %s>" % (self.name,)
#
# Base test cases
#
np.random.seed(1234)
SQUARE_CASES = [
LinalgCase("single",
array([[1., 2.], [3., 4.]], dtype=single),
array([2., 1.], dtype=single)),
LinalgCase("double",
array([[1., 2.], [3., 4.]], dtype=double),
array([2., 1.], dtype=double)),
LinalgCase("double_2",
array([[1., 2.], [3., 4.]], dtype=double),
array([[2., 1., 4.], [3., 4., 6.]], dtype=double)),
LinalgCase("csingle",
array([[1.+2j, 2+3j], [3+4j, 4+5j]], dtype=csingle),
array([2.+1j, 1.+2j], dtype=csingle)),
LinalgCase("cdouble",
array([[1.+2j, 2+3j], [3+4j, 4+5j]], dtype=cdouble),
array([2.+1j, 1.+2j], dtype=cdouble)),
LinalgCase("cdouble_2",
array([[1.+2j, 2+3j], [3+4j, 4+5j]], dtype=cdouble),
array([[2.+1j, 1.+2j, 1+3j], [1-2j, 1-3j, 1-6j]], dtype=cdouble)),
LinalgCase("empty",
atleast_2d(array([], dtype = double)),
atleast_2d(array([], dtype = double)),
linalg.LinAlgError),
LinalgCase("8x8",
np.random.rand(8, 8),
np.random.rand(8)),
LinalgCase("1x1",
np.random.rand(1, 1),
np.random.rand(1)),
LinalgCase("nonarray",
[[1, 2], [3, 4]],
[2, 1]),
LinalgCase("matrix_b_only",
array([[1., 2.], [3., 4.]]),
matrix([2., 1.]).T),
LinalgCase("matrix_a_and_b",
matrix([[1., 2.], [3., 4.]]),
matrix([2., 1.]).T),
]
NONSQUARE_CASES = [
LinalgCase("single_nsq_1",
array([[1., 2., 3.], [3., 4., 6.]], dtype=single),
array([2., 1.], dtype=single)),
LinalgCase("single_nsq_2",
array([[1., 2.], [3., 4.], [5., 6.]], dtype=single),
array([2., 1., 3.], dtype=single)),
LinalgCase("double_nsq_1",
array([[1., 2., 3.], [3., 4., 6.]], dtype=double),
array([2., 1.], dtype=double)),
LinalgCase("double_nsq_2",
array([[1., 2.], [3., 4.], [5., 6.]], dtype=double),
array([2., 1., 3.], dtype=double)),
LinalgCase("csingle_nsq_1",
array([[1.+1j, 2.+2j, 3.-3j], [3.-5j, 4.+9j, 6.+2j]], dtype=csingle),
array([2.+1j, 1.+2j], dtype=csingle)),
LinalgCase("csingle_nsq_2",
array([[1.+1j, 2.+2j], [3.-3j, 4.-9j], [5.-4j, 6.+8j]], dtype=csingle),
array([2.+1j, 1.+2j, 3.-3j], dtype=csingle)),
LinalgCase("cdouble_nsq_1",
array([[1.+1j, 2.+2j, 3.-3j], [3.-5j, 4.+9j, 6.+2j]], dtype=cdouble),
array([2.+1j, 1.+2j], dtype=cdouble)),
LinalgCase("cdouble_nsq_2",
array([[1.+1j, 2.+2j], [3.-3j, 4.-9j], [5.-4j, 6.+8j]], dtype=cdouble),
array([2.+1j, 1.+2j, 3.-3j], dtype=cdouble)),
LinalgCase("cdouble_nsq_1_2",
array([[1.+1j, 2.+2j, 3.-3j], [3.-5j, 4.+9j, 6.+2j]], dtype=cdouble),
array([[2.+1j, 1.+2j], [1-1j, 2-2j]], dtype=cdouble)),
LinalgCase("cdouble_nsq_2_2",
array([[1.+1j, 2.+2j], [3.-3j, 4.-9j], [5.-4j, 6.+8j]], dtype=cdouble),
array([[2.+1j, 1.+2j], [1-1j, 2-2j], [1-1j, 2-2j]], dtype=cdouble)),
LinalgCase("8x11",
np.random.rand(8, 11),
np.random.rand(11)),
LinalgCase("1x5",
np.random.rand(1, 5),
np.random.rand(5)),
LinalgCase("5x1",
np.random.rand(5, 1),
np.random.rand(1)),
]
HERMITIAN_CASES = [
LinalgCase("hsingle",
array([[1., 2.], [2., 1.]], dtype=single),
None),
LinalgCase("hdouble",
array([[1., 2.], [2., 1.]], dtype=double),
None),
LinalgCase("hcsingle",
array([[1., 2+3j], [2-3j, 1]], dtype=csingle),
None),
LinalgCase("hcdouble",
array([[1., 2+3j], [2-3j, 1]], dtype=cdouble),
None),
LinalgCase("hempty",
atleast_2d(array([], dtype = double)),
None,
linalg.LinAlgError),
LinalgCase("hnonarray",
[[1, 2], [2, 1]],
None),
LinalgCase("matrix_b_only",
array([[1., 2.], [2., 1.]]),
None),
LinalgCase("hmatrix_a_and_b",
matrix([[1., 2.], [2., 1.]]),
None),
LinalgCase("hmatrix_1x1",
np.random.rand(1, 1),
None),
]
#
# Gufunc test cases
#
GENERALIZED_SQUARE_CASES = []
GENERALIZED_NONSQUARE_CASES = []
GENERALIZED_HERMITIAN_CASES = []
for tgt, src in ((GENERALIZED_SQUARE_CASES, SQUARE_CASES),
(GENERALIZED_NONSQUARE_CASES, NONSQUARE_CASES),
(GENERALIZED_HERMITIAN_CASES, HERMITIAN_CASES)):
for case in src:
if not isinstance(case.a, np.ndarray):
continue
a = np.array([case.a, 2*case.a, 3*case.a])
if case.b is None:
b = None
else:
b = np.array([case.b, 7*case.b, 6*case.b])
new_case = LinalgCase(case.name + "_tile3", a, b,
case.exception_cls)
tgt.append(new_case)
a = np.array([case.a]*2*3).reshape((3, 2) + case.a.shape)
if case.b is None:
b = None
else:
b = np.array([case.b]*2*3).reshape((3, 2) + case.b.shape)
new_case = LinalgCase(case.name + "_tile213", a, b,
case.exception_cls)
tgt.append(new_case)
#
# Generate stride combination variations of the above
#
def _stride_comb_iter(x):
"""
Generate cartesian product of strides for all axes
"""
if not isinstance(x, np.ndarray):
yield x, "nop"
return
stride_set = [(1,)]*x.ndim
stride_set[-1] = (1, 3, -4)
if x.ndim > 1:
stride_set[-2] = (1, 3, -4)
if x.ndim > 2:
stride_set[-3] = (1, -4)
for repeats in itertools.product(*tuple(stride_set)):
new_shape = [abs(a*b) for a, b in zip(x.shape, repeats)]
slices = tuple([slice(None, None, repeat) for repeat in repeats])
# new array with different strides, but same data
xi = np.empty(new_shape, dtype=x.dtype)
xi.view(np.uint32).fill(0xdeadbeef)
xi = xi[slices]
xi[...] = x
xi = xi.view(x.__class__)
assert np.all(xi == x)
yield xi, "stride_" + "_".join(["%+d" % j for j in repeats])
# generate also zero strides if possible
if x.ndim >= 1 and x.shape[-1] == 1:
s = list(x.strides)
s[-1] = 0
try:
xi = np.lib.stride_tricks.as_strided(x, strides=s)
yield xi, "stride_xxx_0"
except NotImplementedError:
pass
if x.ndim >= 2 and x.shape[-2] == 1:
s = list(x.strides)
s[-2] = 0
try:
xi = np.lib.stride_tricks.as_strided(x, strides=s)
yield xi, "stride_xxx_0_x"
except NotImplementedError:
pass
if x.ndim >= 2 and x.shape[:-2] == (1, 1):
s = list(x.strides)
s[-1] = 0
s[-2] = 0
try:
xi = np.lib.stride_tricks.as_strided(x, strides=s)
yield xi, "stride_xxx_0_0"
except NotImplementedError:
pass
for src in (SQUARE_CASES,
NONSQUARE_CASES,
HERMITIAN_CASES,
GENERALIZED_SQUARE_CASES,
GENERALIZED_NONSQUARE_CASES,
GENERALIZED_HERMITIAN_CASES):
new_cases = []
for case in src:
for a, a_tag in _stride_comb_iter(case.a):
for b, b_tag in _stride_comb_iter(case.b):
new_case = LinalgCase(case.name + "_" + a_tag + "_" + b_tag, a, b,
exception_cls=case.exception_cls)
new_cases.append(new_case)
src.extend(new_cases)
#
# Test different routines against the above cases
#
def _check_cases(func, cases):
for case in cases:
try:
case.check(func)
except Exception:
msg = "In test case: %r\n\n" % case
msg += traceback.format_exc()
raise AssertionError(msg)
class LinalgTestCase(object):
def test_sq_cases(self):
_check_cases(self.do, SQUARE_CASES)
class LinalgNonsquareTestCase(object):
def test_sq_cases(self):
_check_cases(self.do, NONSQUARE_CASES)
class LinalgGeneralizedTestCase(object):
@dec.slow
def test_generalized_sq_cases(self):
_check_cases(self.do, GENERALIZED_SQUARE_CASES)
class LinalgGeneralizedNonsquareTestCase(object):
@dec.slow
def test_generalized_nonsq_cases(self):
_check_cases(self.do, GENERALIZED_NONSQUARE_CASES)
class HermitianTestCase(object):
def test_herm_cases(self):
_check_cases(self.do, HERMITIAN_CASES)
class HermitianGeneralizedTestCase(object):
@dec.slow
def test_generalized_herm_cases(self):
_check_cases(self.do, GENERALIZED_HERMITIAN_CASES)
def dot_generalized(a, b):
a = asarray(a)
if a.ndim >= 3:
if a.ndim == b.ndim:
# matrix x matrix
new_shape = a.shape[:-1] + b.shape[-1:]
elif a.ndim == b.ndim + 1:
# matrix x vector
new_shape = a.shape[:-1]
else:
raise ValueError("Not implemented...")
r = np.empty(new_shape, dtype=np.common_type(a, b))
for c in itertools.product(*map(range, a.shape[:-2])):
r[c] = dot(a[c], b[c])
return r
else:
return dot(a, b)
def identity_like_generalized(a):
a = asarray(a)
if a.ndim >= 3:
r = np.empty(a.shape, dtype=a.dtype)
for c in itertools.product(*map(range, a.shape[:-2])):
r[c] = identity(a.shape[-2])
return r
else:
return identity(a.shape[0])
class TestSolve(LinalgTestCase, LinalgGeneralizedTestCase):
def do(self, a, b):
x = linalg.solve(a, b)
assert_almost_equal(b, dot_generalized(a, x))
assert_(imply(isinstance(b, matrix), isinstance(x, matrix)))
def test_types(self):
def check(dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
assert_equal(linalg.solve(x, x).dtype, dtype)
for dtype in [single, double, csingle, cdouble]:
yield check, dtype
def test_0_size(self):
class ArraySubclass(np.ndarray):
pass
# Test system of 0x0 matrices
a = np.arange(8).reshape(2, 2, 2)
b = np.arange(6).reshape(1, 2, 3).view(ArraySubclass)
expected = linalg.solve(a, b)[:, 0:0,:]
result = linalg.solve(a[:, 0:0, 0:0], b[:, 0:0,:])
assert_array_equal(result, expected)
assert_(isinstance(result, ArraySubclass))
# Test errors for non-square and only b's dimension being 0
assert_raises(linalg.LinAlgError, linalg.solve, a[:, 0:0, 0:1], b)
assert_raises(ValueError, linalg.solve, a, b[:, 0:0,:])
# Test broadcasting error
b = np.arange(6).reshape(1, 3, 2) # broadcasting error
assert_raises(ValueError, linalg.solve, a, b)
assert_raises(ValueError, linalg.solve, a[0:0], b[0:0])
# Test zero "single equations" with 0x0 matrices.
b = np.arange(2).reshape(1, 2).view(ArraySubclass)
expected = linalg.solve(a, b)[:, 0:0]
result = linalg.solve(a[:, 0:0, 0:0], b[:, 0:0])
assert_array_equal(result, expected)
assert_(isinstance(result, ArraySubclass))
b = np.arange(3).reshape(1, 3)
assert_raises(ValueError, linalg.solve, a, b)
assert_raises(ValueError, linalg.solve, a[0:0], b[0:0])
assert_raises(ValueError, linalg.solve, a[:, 0:0, 0:0], b)
def test_0_size_k(self):
# test zero multiple equation (K=0) case.
class ArraySubclass(np.ndarray):
pass
a = np.arange(4).reshape(1, 2, 2)
b = np.arange(6).reshape(3, 2, 1).view(ArraySubclass)
expected = linalg.solve(a, b)[:,:, 0:0]
result = linalg.solve(a, b[:,:, 0:0])
assert_array_equal(result, expected)
assert_(isinstance(result, ArraySubclass))
# test both zero.
expected = linalg.solve(a, b)[:, 0:0, 0:0]
result = linalg.solve(a[:, 0:0, 0:0], b[:,0:0, 0:0])
assert_array_equal(result, expected)
assert_(isinstance(result, ArraySubclass))
class TestInv(LinalgTestCase, LinalgGeneralizedTestCase):
def do(self, a, b):
a_inv = linalg.inv(a)
assert_almost_equal(dot_generalized(a, a_inv),
identity_like_generalized(a))
assert_(imply(isinstance(a, matrix), isinstance(a_inv, matrix)))
def test_types(self):
def check(dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
assert_equal(linalg.inv(x).dtype, dtype)
for dtype in [single, double, csingle, cdouble]:
yield check, dtype
def test_0_size(self):
# Check that all kinds of 0-sized arrays work
class ArraySubclass(np.ndarray):
pass
a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass)
res = linalg.inv(a)
assert_(res.dtype.type is np.float64)
assert_equal(a.shape, res.shape)
assert_(isinstance(a, ArraySubclass))
a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass)
res = linalg.inv(a)
assert_(res.dtype.type is np.complex64)
assert_equal(a.shape, res.shape)
class TestEigvals(LinalgTestCase, LinalgGeneralizedTestCase):
def do(self, a, b):
ev = linalg.eigvals(a)
evalues, evectors = linalg.eig(a)
assert_almost_equal(ev, evalues)
def test_types(self):
def check(dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
assert_equal(linalg.eigvals(x).dtype, dtype)
x = np.array([[1, 0.5], [-1, 1]], dtype=dtype)
assert_equal(linalg.eigvals(x).dtype, get_complex_dtype(dtype))
for dtype in [single, double, csingle, cdouble]:
yield check, dtype
class TestEig(LinalgTestCase, LinalgGeneralizedTestCase):
def do(self, a, b):
evalues, evectors = linalg.eig(a)
assert_allclose(dot_generalized(a, evectors),
np.asarray(evectors) * np.asarray(evalues)[...,None,:],
rtol=get_rtol(evalues.dtype))
assert_(imply(isinstance(a, matrix), isinstance(evectors, matrix)))
def test_types(self):
def check(dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
w, v = np.linalg.eig(x)
assert_equal(w.dtype, dtype)
assert_equal(v.dtype, dtype)
x = np.array([[1, 0.5], [-1, 1]], dtype=dtype)
w, v = np.linalg.eig(x)
assert_equal(w.dtype, get_complex_dtype(dtype))
assert_equal(v.dtype, get_complex_dtype(dtype))
for dtype in [single, double, csingle, cdouble]:
yield check, dtype
class TestSVD(LinalgTestCase, LinalgGeneralizedTestCase):
def do(self, a, b):
u, s, vt = linalg.svd(a, 0)
assert_allclose(a, dot_generalized(np.asarray(u) * np.asarray(s)[...,None,:],
np.asarray(vt)),
rtol=get_rtol(u.dtype))
assert_(imply(isinstance(a, matrix), isinstance(u, matrix)))
assert_(imply(isinstance(a, matrix), isinstance(vt, matrix)))
def test_types(self):
def check(dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
u, s, vh = linalg.svd(x)
assert_equal(u.dtype, dtype)
assert_equal(s.dtype, get_real_dtype(dtype))
assert_equal(vh.dtype, dtype)
s = linalg.svd(x, compute_uv=False)
assert_equal(s.dtype, get_real_dtype(dtype))
for dtype in [single, double, csingle, cdouble]:
yield check, dtype
class TestCondSVD(LinalgTestCase, LinalgGeneralizedTestCase):
def do(self, a, b):
c = asarray(a) # a might be a matrix
s = linalg.svd(c, compute_uv=False)
old_assert_almost_equal(s[0]/s[-1], linalg.cond(a), decimal=5)
class TestCond2(LinalgTestCase):
def do(self, a, b):
c = asarray(a) # a might be a matrix
s = linalg.svd(c, compute_uv=False)
old_assert_almost_equal(s[0]/s[-1], linalg.cond(a, 2), decimal=5)
class TestCondInf(object):
def test(self):
A = array([[1., 0, 0], [0, -2., 0], [0, 0, 3.]])
assert_almost_equal(linalg.cond(A, inf), 3.)
class TestPinv(LinalgTestCase):
def do(self, a, b):
a_ginv = linalg.pinv(a)
assert_almost_equal(dot(a, a_ginv), identity(asarray(a).shape[0]))
assert_(imply(isinstance(a, matrix), isinstance(a_ginv, matrix)))
class TestDet(LinalgTestCase, LinalgGeneralizedTestCase):
def do(self, a, b):
d = linalg.det(a)
(s, ld) = linalg.slogdet(a)
if asarray(a).dtype.type in (single, double):
ad = asarray(a).astype(double)
else:
ad = asarray(a).astype(cdouble)
ev = linalg.eigvals(ad)
assert_almost_equal(d, multiply.reduce(ev, axis=-1))
assert_almost_equal(s * np.exp(ld), multiply.reduce(ev, axis=-1))
s = np.atleast_1d(s)
ld = np.atleast_1d(ld)
m = (s != 0)
assert_almost_equal(np.abs(s[m]), 1)
assert_equal(ld[~m], -inf)
def test_zero(self):
assert_equal(linalg.det([[0.0]]), 0.0)
assert_equal(type(linalg.det([[0.0]])), double)
assert_equal(linalg.det([[0.0j]]), 0.0)
assert_equal(type(linalg.det([[0.0j]])), cdouble)
assert_equal(linalg.slogdet([[0.0]]), (0.0, -inf))
assert_equal(type(linalg.slogdet([[0.0]])[0]), double)
assert_equal(type(linalg.slogdet([[0.0]])[1]), double)
assert_equal(linalg.slogdet([[0.0j]]), (0.0j, -inf))
assert_equal(type(linalg.slogdet([[0.0j]])[0]), cdouble)
assert_equal(type(linalg.slogdet([[0.0j]])[1]), double)
def test_types(self):
def check(dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
assert_equal(np.linalg.det(x).dtype, dtype)
ph, s = np.linalg.slogdet(x)
assert_equal(s.dtype, get_real_dtype(dtype))
assert_equal(ph.dtype, dtype)
for dtype in [single, double, csingle, cdouble]:
yield check, dtype
class TestLstsq(LinalgTestCase, LinalgNonsquareTestCase):
def do(self, a, b):
arr = np.asarray(a)
m, n = arr.shape
u, s, vt = linalg.svd(a, 0)
x, residuals, rank, sv = linalg.lstsq(a, b)
if m <= n:
assert_almost_equal(b, dot(a, x))
assert_equal(rank, m)
else:
assert_equal(rank, n)
assert_almost_equal(sv, sv.__array_wrap__(s))
if rank == n and m > n:
expect_resids = (np.asarray(abs(np.dot(a, x) - b))**2).sum(axis=0)
expect_resids = np.asarray(expect_resids)
if len(np.asarray(b).shape) == 1:
expect_resids.shape = (1,)
assert_equal(residuals.shape, expect_resids.shape)
else:
expect_resids = np.array([]).view(type(x))
assert_almost_equal(residuals, expect_resids)
assert_(np.issubdtype(residuals.dtype, np.floating))
assert_(imply(isinstance(b, matrix), isinstance(x, matrix)))
assert_(imply(isinstance(b, matrix), isinstance(residuals, matrix)))
class TestMatrixPower(object):
R90 = array([[0, 1], [-1, 0]])
Arb22 = array([[4, -7], [-2, 10]])
noninv = array([[1, 0], [0, 0]])
arbfloat = array([[0.1, 3.2], [1.2, 0.7]])
large = identity(10)
t = large[1,:].copy()
large[1,:] = large[0,:]
large[0,:] = t
def test_large_power(self):
assert_equal(matrix_power(self.R90, 2**100+2**10+2**5+1), self.R90)
def test_large_power_trailing_zero(self):
assert_equal(matrix_power(self.R90, 2**100+2**10+2**5), identity(2))
def testip_zero(self):
def tz(M):
mz = matrix_power(M, 0)
assert_equal(mz, identity(M.shape[0]))
assert_equal(mz.dtype, M.dtype)
for M in [self.Arb22, self.arbfloat, self.large]:
yield tz, M
def testip_one(self):
def tz(M):
mz = matrix_power(M, 1)
assert_equal(mz, M)
assert_equal(mz.dtype, M.dtype)
for M in [self.Arb22, self.arbfloat, self.large]:
yield tz, M
def testip_two(self):
def tz(M):
mz = matrix_power(M, 2)
assert_equal(mz, dot(M, M))
assert_equal(mz.dtype, M.dtype)
for M in [self.Arb22, self.arbfloat, self.large]:
yield tz, M
def testip_invert(self):
def tz(M):
mz = matrix_power(M, -1)
assert_almost_equal(identity(M.shape[0]), dot(mz, M))
for M in [self.R90, self.Arb22, self.arbfloat, self.large]:
yield tz, M
def test_invert_noninvertible(self):
import numpy.linalg
assert_raises(numpy.linalg.linalg.LinAlgError,
lambda: matrix_power(self.noninv, -1))
class TestBoolPower(object):
def test_square(self):
A = array([[True, False], [True, True]])
assert_equal(matrix_power(A, 2), A)
class TestEigvalsh(HermitianTestCase, HermitianGeneralizedTestCase):
def do(self, a, b):
# note that eigenvalue arrays must be sorted since
# their order isn't guaranteed.
ev = linalg.eigvalsh(a, 'L')
evalues, evectors = linalg.eig(a)
ev.sort(axis=-1)
evalues.sort(axis=-1)
assert_allclose(ev, evalues,
rtol=get_rtol(ev.dtype))
ev2 = linalg.eigvalsh(a, 'U')
ev2.sort(axis=-1)
assert_allclose(ev2, evalues,
rtol=get_rtol(ev.dtype))
def test_types(self):
def check(dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
w = np.linalg.eigvalsh(x)
assert_equal(w.dtype, get_real_dtype(dtype))
for dtype in [single, double, csingle, cdouble]:
yield check, dtype
def test_invalid(self):
x = np.array([[1, 0.5], [0.5, 1]], dtype=np.float32)
assert_raises(ValueError, np.linalg.eigvalsh, x, UPLO="lrong")
assert_raises(ValueError, np.linalg.eigvalsh, x, "lower")
assert_raises(ValueError, np.linalg.eigvalsh, x, "upper")
def test_UPLO(self):
Klo = np.array([[0, 0],[1, 0]], dtype=np.double)
Kup = np.array([[0, 1],[0, 0]], dtype=np.double)
tgt = np.array([-1, 1], dtype=np.double)
rtol = get_rtol(np.double)
# Check default is 'L'
w = np.linalg.eigvalsh(Klo)
assert_allclose(np.sort(w), tgt, rtol=rtol)
# Check 'L'
w = np.linalg.eigvalsh(Klo, UPLO='L')
assert_allclose(np.sort(w), tgt, rtol=rtol)
# Check 'l'
w = np.linalg.eigvalsh(Klo, UPLO='l')
assert_allclose(np.sort(w), tgt, rtol=rtol)
# Check 'U'
w = np.linalg.eigvalsh(Kup, UPLO='U')
assert_allclose(np.sort(w), tgt, rtol=rtol)
# Check 'u'
w = np.linalg.eigvalsh(Kup, UPLO='u')
assert_allclose(np.sort(w), tgt, rtol=rtol)
class TestEigh(HermitianTestCase, HermitianGeneralizedTestCase):
def do(self, a, b):
# note that eigenvalue arrays must be sorted since
# their order isn't guaranteed.
ev, evc = linalg.eigh(a)
evalues, evectors = linalg.eig(a)
ev.sort(axis=-1)
evalues.sort(axis=-1)
assert_almost_equal(ev, evalues)
assert_allclose(dot_generalized(a, evc),
np.asarray(ev)[...,None,:] * np.asarray(evc),
rtol=get_rtol(ev.dtype))
ev2, evc2 = linalg.eigh(a, 'U')
ev2.sort(axis=-1)
assert_almost_equal(ev2, evalues)
assert_allclose(dot_generalized(a, evc2),
np.asarray(ev2)[...,None,:] * np.asarray(evc2),
rtol=get_rtol(ev.dtype), err_msg=repr(a))
def test_types(self):
def check(dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
w, v = np.linalg.eigh(x)
assert_equal(w.dtype, get_real_dtype(dtype))
assert_equal(v.dtype, dtype)
for dtype in [single, double, csingle, cdouble]:
yield check, dtype
def test_invalid(self):
x = np.array([[1, 0.5], [0.5, 1]], dtype=np.float32)
assert_raises(ValueError, np.linalg.eigh, x, UPLO="lrong")
assert_raises(ValueError, np.linalg.eigh, x, "lower")
assert_raises(ValueError, np.linalg.eigh, x, "upper")
def test_UPLO(self):
Klo = np.array([[0, 0],[1, 0]], dtype=np.double)
Kup = np.array([[0, 1],[0, 0]], dtype=np.double)
tgt = np.array([-1, 1], dtype=np.double)
rtol = get_rtol(np.double)
# Check default is 'L'
w, v = np.linalg.eigh(Klo)
assert_allclose(np.sort(w), tgt, rtol=rtol)
# Check 'L'
w, v = np.linalg.eigh(Klo, UPLO='L')
assert_allclose(np.sort(w), tgt, rtol=rtol)
# Check 'l'
w, v = np.linalg.eigh(Klo, UPLO='l')
assert_allclose(np.sort(w), tgt, rtol=rtol)
# Check 'U'
w, v = np.linalg.eigh(Kup, UPLO='U')
assert_allclose(np.sort(w), tgt, rtol=rtol)
# Check 'u'
w, v = np.linalg.eigh(Kup, UPLO='u')
assert_allclose(np.sort(w), tgt, rtol=rtol)
class _TestNorm(object):
dt = None
dec = None
def test_empty(self):
assert_equal(norm([]), 0.0)
assert_equal(norm(array([], dtype=self.dt)), 0.0)
assert_equal(norm(atleast_2d(array([], dtype=self.dt))), 0.0)
def test_vector(self):
a = [1, 2, 3, 4]
b = [-1, -2, -3, -4]
c = [-1, 2, -3, 4]
def _test(v):
np.testing.assert_almost_equal(norm(v), 30**0.5,
decimal=self.dec)
np.testing.assert_almost_equal(norm(v, inf), 4.0,
decimal=self.dec)
np.testing.assert_almost_equal(norm(v, -inf), 1.0,
decimal=self.dec)
np.testing.assert_almost_equal(norm(v, 1), 10.0,
decimal=self.dec)
np.testing.assert_almost_equal(norm(v, -1), 12.0/25,
decimal=self.dec)
np.testing.assert_almost_equal(norm(v, 2), 30**0.5,
decimal=self.dec)
np.testing.assert_almost_equal(norm(v, -2), ((205./144)**-0.5),
decimal=self.dec)
np.testing.assert_almost_equal(norm(v, 0), 4,
decimal=self.dec)
for v in (a, b, c,):
_test(v)
for v in (array(a, dtype=self.dt), array(b, dtype=self.dt),
array(c, dtype=self.dt)):
_test(v)
def test_matrix(self):
A = matrix([[1, 3], [5, 7]], dtype=self.dt)
assert_almost_equal(norm(A), 84**0.5)
assert_almost_equal(norm(A, 'fro'), 84**0.5)
assert_almost_equal(norm(A, inf), 12.0)
assert_almost_equal(norm(A, -inf), 4.0)
assert_almost_equal(norm(A, 1), 10.0)
assert_almost_equal(norm(A, -1), 6.0)
assert_almost_equal(norm(A, 2), 9.1231056256176615)
assert_almost_equal(norm(A, -2), 0.87689437438234041)
assert_raises(ValueError, norm, A, 'nofro')
assert_raises(ValueError, norm, A, -3)
assert_raises(ValueError, norm, A, 0)
def test_axis(self):
# Vector norms.
# Compare the use of `axis` with computing the norm of each row
# or column separately.
A = array([[1, 2, 3], [4, 5, 6]], dtype=self.dt)
for order in [None, -1, 0, 1, 2, 3, np.Inf, -np.Inf]:
expected0 = [norm(A[:, k], ord=order) for k in range(A.shape[1])]
assert_almost_equal(norm(A, ord=order, axis=0), expected0)
expected1 = [norm(A[k,:], ord=order) for k in range(A.shape[0])]
assert_almost_equal(norm(A, ord=order, axis=1), expected1)
# Matrix norms.
B = np.arange(1, 25, dtype=self.dt).reshape(2, 3, 4)
for order in [None, -2, 2, -1, 1, np.Inf, -np.Inf, 'fro']:
assert_almost_equal(norm(A, ord=order), norm(A, ord=order,
axis=(0, 1)))
n = norm(B, ord=order, axis=(1, 2))
expected = [norm(B[k], ord=order) for k in range(B.shape[0])]
assert_almost_equal(n, expected)
n = norm(B, ord=order, axis=(2, 1))
expected = [norm(B[k].T, ord=order) for k in range(B.shape[0])]
assert_almost_equal(n, expected)
n = norm(B, ord=order, axis=(0, 2))
expected = [norm(B[:, k,:], ord=order) for k in range(B.shape[1])]
assert_almost_equal(n, expected)
n = norm(B, ord=order, axis=(0, 1))
expected = [norm(B[:,:, k], ord=order) for k in range(B.shape[2])]
assert_almost_equal(n, expected)
def test_bad_args(self):
# Check that bad arguments raise the appropriate exceptions.
A = array([[1, 2, 3], [4, 5, 6]], dtype=self.dt)
B = np.arange(1, 25, dtype=self.dt).reshape(2, 3, 4)
# Using `axis=<integer>` or passing in a 1-D array implies vector
# norms are being computed, so also using `ord='fro'` raises a
# ValueError.
assert_raises(ValueError, norm, A, 'fro', 0)
assert_raises(ValueError, norm, [3, 4], 'fro', None)
# Similarly, norm should raise an exception when ord is any finite
# number other than 1, 2, -1 or -2 when computing matrix norms.
for order in [0, 3]:
assert_raises(ValueError, norm, A, order, None)
assert_raises(ValueError, norm, A, order, (0, 1))
assert_raises(ValueError, norm, B, order, (1, 2))
# Invalid axis
assert_raises(ValueError, norm, B, None, 3)
assert_raises(ValueError, norm, B, None, (2, 3))
assert_raises(ValueError, norm, B, None, (0, 1, 2))
def test_longdouble_norm(self):
# Non-regression test: p-norm of longdouble would previously raise
# UnboundLocalError.
x = np.arange(10, dtype=np.longdouble)
old_assert_almost_equal(norm(x, ord=3), 12.65, decimal=2)
def test_intmin(self):
# Non-regression test: p-norm of signed integer would previously do
# float cast and abs in the wrong order.
x = np.array([-2 ** 31], dtype=np.int32)
old_assert_almost_equal(norm(x, ord=3), 2 ** 31, decimal=5)
def test_complex_high_ord(self):
# gh-4156
d = np.empty((2,), dtype=np.clongdouble)
d[0] = 6+7j
d[1] = -6+7j
res = 11.615898132184
old_assert_almost_equal(np.linalg.norm(d, ord=3), res, decimal=10)
d = d.astype(np.complex128)
old_assert_almost_equal(np.linalg.norm(d, ord=3), res, decimal=9)
d = d.astype(np.complex64)
old_assert_almost_equal(np.linalg.norm(d, ord=3), res, decimal=5)
class TestNormDouble(_TestNorm):
dt = np.double
dec = 12
class TestNormSingle(_TestNorm):
dt = np.float32
dec = 6
class TestNormInt64(_TestNorm):
dt = np.int64
dec = 12
class TestMatrixRank(object):
def test_matrix_rank(self):
# Full rank matrix
yield assert_equal, 4, matrix_rank(np.eye(4))
# rank deficient matrix
I=np.eye(4); I[-1, -1] = 0.
yield assert_equal, matrix_rank(I), 3
# All zeros - zero rank
yield assert_equal, matrix_rank(np.zeros((4, 4))), 0
# 1 dimension - rank 1 unless all 0
yield assert_equal, matrix_rank([1, 0, 0, 0]), 1
yield assert_equal, matrix_rank(np.zeros((4,))), 0
# accepts array-like
yield assert_equal, matrix_rank([1]), 1
# greater than 2 dimensions raises error
yield assert_raises, TypeError, matrix_rank, np.zeros((2, 2, 2))
# works on scalar
yield assert_equal, matrix_rank(1), 1
def test_reduced_rank():
# Test matrices with reduced rank
rng = np.random.RandomState(20120714)
for i in range(100):
# Make a rank deficient matrix
X = rng.normal(size=(40, 10))
X[:, 0] = X[:, 1] + X[:, 2]
# Assert that matrix_rank detected deficiency
assert_equal(matrix_rank(X), 9)
X[:, 3] = X[:, 4] + X[:, 5]
assert_equal(matrix_rank(X), 8)
class TestQR(object):
def check_qr(self, a):
# This test expects the argument `a` to be an ndarray or
# a subclass of an ndarray of inexact type.
a_type = type(a)
a_dtype = a.dtype
m, n = a.shape
k = min(m, n)
# mode == 'complete'
q, r = linalg.qr(a, mode='complete')
assert_(q.dtype == a_dtype)
assert_(r.dtype == a_dtype)
assert_(isinstance(q, a_type))
assert_(isinstance(r, a_type))
assert_(q.shape == (m, m))
assert_(r.shape == (m, n))
assert_almost_equal(dot(q, r), a)
assert_almost_equal(dot(q.T.conj(), q), np.eye(m))
assert_almost_equal(np.triu(r), r)
# mode == 'reduced'
q1, r1 = linalg.qr(a, mode='reduced')
assert_(q1.dtype == a_dtype)
assert_(r1.dtype == a_dtype)
assert_(isinstance(q1, a_type))
assert_(isinstance(r1, a_type))
assert_(q1.shape == (m, k))
assert_(r1.shape == (k, n))
assert_almost_equal(dot(q1, r1), a)
assert_almost_equal(dot(q1.T.conj(), q1), np.eye(k))
assert_almost_equal(np.triu(r1), r1)
# mode == 'r'
r2 = linalg.qr(a, mode='r')
assert_(r2.dtype == a_dtype)
assert_(isinstance(r2, a_type))
assert_almost_equal(r2, r1)
def test_qr_empty(self):
a = np.zeros((0, 2))
assert_raises(linalg.LinAlgError, linalg.qr, a)
def test_mode_raw(self):
# The factorization is not unique and varies between libraries,
# so it is not possible to check against known values. Functional
# testing is a possibility, but awaits the exposure of more
# of the functions in lapack_lite. Consequently, this test is
# very limited in scope. Note that the results are in FORTRAN
# order, hence the h arrays are transposed.
a = array([[1, 2], [3, 4], [5, 6]], dtype=np.double)
b = a.astype(np.single)
# Test double
h, tau = linalg.qr(a, mode='raw')
assert_(h.dtype == np.double)
assert_(tau.dtype == np.double)
assert_(h.shape == (2, 3))
assert_(tau.shape == (2,))
h, tau = linalg.qr(a.T, mode='raw')
assert_(h.dtype == np.double)
assert_(tau.dtype == np.double)
assert_(h.shape == (3, 2))
assert_(tau.shape == (2,))
def test_mode_all_but_economic(self):
a = array([[1, 2], [3, 4]])
b = array([[1, 2], [3, 4], [5, 6]])
for dt in "fd":
m1 = a.astype(dt)
m2 = b.astype(dt)
self.check_qr(m1)
self.check_qr(m2)
self.check_qr(m2.T)
self.check_qr(matrix(m1))
for dt in "fd":
m1 = 1 + 1j * a.astype(dt)
m2 = 1 + 1j * b.astype(dt)
self.check_qr(m1)
self.check_qr(m2)
self.check_qr(m2.T)
self.check_qr(matrix(m1))
def test_byteorder_check():
# Byte order check should pass for native order
if sys.byteorder == 'little':
native = '<'
else:
native = '>'
for dtt in (np.float32, np.float64):
arr = np.eye(4, dtype=dtt)
n_arr = arr.newbyteorder(native)
sw_arr = arr.newbyteorder('S').byteswap()
assert_equal(arr.dtype.byteorder, '=')
for routine in (linalg.inv, linalg.det, linalg.pinv):
# Normal call
res = routine(arr)
# Native but not '='
assert_array_equal(res, routine(n_arr))
# Swapped
assert_array_equal(res, routine(sw_arr))
def test_generalized_raise_multiloop():
# It should raise an error even if the error doesn't occur in the
# last iteration of the ufunc inner loop
invertible = np.array([[1, 2], [3, 4]])
non_invertible = np.array([[1, 1], [1, 1]])
x = np.zeros([4, 4, 2, 2])[1::2]
x[...] = invertible
x[0, 0] = non_invertible
assert_raises(np.linalg.LinAlgError, np.linalg.inv, x)
def test_xerbla_override():
# Check that our xerbla has been successfully linked in. If it is not,
# the default xerbla routine is called, which prints a message to stdout
# and may, or may not, abort the process depending on the LAPACK package.
from nose import SkipTest
try:
pid = os.fork()
except (OSError, AttributeError):
# fork failed, or not running on POSIX
raise SkipTest("Not POSIX or fork failed.")
if pid == 0:
# child; close i/o file handles
os.close(1)
os.close(0)
# Avoid producing core files.
import resource
resource.setrlimit(resource.RLIMIT_CORE, (0, 0))
# These calls may abort.
try:
np.linalg.lapack_lite.xerbla()
except ValueError:
pass
except:
os._exit(os.EX_CONFIG)
try:
a = np.array([[1.]])
np.linalg.lapack_lite.dorgqr(
1, 1, 1, a,
0, # <- invalid value
a, a, 0, 0)
if "DORGQR parameter number 5" in str(e):
# success
os._exit(os.EX_OK)
except:
# Did not abort, but our xerbla was not linked in.
os._exit(os.EX_CONFIG)
else:
# parent
pid, status = os.wait()
if os.WEXITSTATUS(status) != os.EX_OK or os.WIFSIGNALED(status):
raise SkipTest('Numpy xerbla not linked in.')
if __name__ == "__main__":
run_module_suite()
| |
#
# Module which supports allocation of memory from an mmap
#
# multiprocessing/heap.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
import bisect
from collections import defaultdict
import mmap
import os
import sys
import tempfile
import threading
from .context import reduction, assert_spawning
from . import util
__all__ = ['BufferWrapper']
#
# Inheritable class which wraps an mmap, and from which blocks can be allocated
#
if sys.platform == 'win32':
import _winapi
class Arena(object):
"""
A shared memory area backed by anonymous memory (Windows).
"""
_rand = tempfile._RandomNameSequence()
def __init__(self, size):
self.size = size
for i in range(100):
name = 'pym-%d-%s' % (os.getpid(), next(self._rand))
buf = mmap.mmap(-1, size, tagname=name)
if _winapi.GetLastError() == 0:
break
# We have reopened a preexisting mmap.
buf.close()
else:
raise FileExistsError('Cannot find name for new mmap')
self.name = name
self.buffer = buf
self._state = (self.size, self.name)
def __getstate__(self):
assert_spawning(self)
return self._state
def __setstate__(self, state):
self.size, self.name = self._state = state
# Reopen existing mmap
self.buffer = mmap.mmap(-1, self.size, tagname=self.name)
# XXX Temporarily preventing buildbot failures while determining
# XXX the correct long-term fix. See issue 23060
#assert _winapi.GetLastError() == _winapi.ERROR_ALREADY_EXISTS
else:
class Arena(object):
"""
A shared memory area backed by a temporary file (POSIX).
"""
if sys.platform == 'linux':
_dir_candidates = ['/dev/shm']
else:
_dir_candidates = []
def __init__(self, size, fd=-1):
self.size = size
self.fd = fd
if fd == -1:
# Arena is created anew (if fd != -1, it means we're coming
# from rebuild_arena() below)
self.fd, name = tempfile.mkstemp(
prefix='pym-%d-'%os.getpid(),
dir=self._choose_dir(size))
os.unlink(name)
util.Finalize(self, os.close, (self.fd,))
os.ftruncate(self.fd, size)
self.buffer = mmap.mmap(self.fd, self.size)
def _choose_dir(self, size):
# Choose a non-storage backed directory if possible,
# to improve performance
for d in self._dir_candidates:
st = os.statvfs(d)
if st.f_bavail * st.f_frsize >= size: # enough free space?
return d
return util.get_temp_dir()
def reduce_arena(a):
if a.fd == -1:
raise ValueError('Arena is unpicklable because '
'forking was enabled when it was created')
return rebuild_arena, (a.size, reduction.DupFd(a.fd))
def rebuild_arena(size, dupfd):
return Arena(size, dupfd.detach())
reduction.register(Arena, reduce_arena)
#
# Class allowing allocation of chunks of memory from arenas
#
class Heap(object):
# Minimum malloc() alignment
_alignment = 8
_DISCARD_FREE_SPACE_LARGER_THAN = 4 * 1024 ** 2 # 4 MB
_DOUBLE_ARENA_SIZE_UNTIL = 4 * 1024 ** 2
def __init__(self, size=mmap.PAGESIZE):
self._lastpid = os.getpid()
self._lock = threading.Lock()
# Current arena allocation size
self._size = size
# A sorted list of available block sizes in arenas
self._lengths = []
# Free block management:
# - map each block size to a list of `(Arena, start, stop)` blocks
self._len_to_seq = {}
# - map `(Arena, start)` tuple to the `(Arena, start, stop)` block
# starting at that offset
self._start_to_block = {}
# - map `(Arena, stop)` tuple to the `(Arena, start, stop)` block
# ending at that offset
self._stop_to_block = {}
# Map arenas to their `(Arena, start, stop)` blocks in use
self._allocated_blocks = defaultdict(set)
self._arenas = []
# List of pending blocks to free - see comment in free() below
self._pending_free_blocks = []
# Statistics
self._n_mallocs = 0
self._n_frees = 0
@staticmethod
def _roundup(n, alignment):
# alignment must be a power of 2
mask = alignment - 1
return (n + mask) & ~mask
def _new_arena(self, size):
# Create a new arena with at least the given *size*
length = self._roundup(max(self._size, size), mmap.PAGESIZE)
# We carve larger and larger arenas, for efficiency, until we
# reach a large-ish size (roughly L3 cache-sized)
if self._size < self._DOUBLE_ARENA_SIZE_UNTIL:
self._size *= 2
util.info('allocating a new mmap of length %d', length)
arena = Arena(length)
self._arenas.append(arena)
return (arena, 0, length)
def _discard_arena(self, arena):
# Possibly delete the given (unused) arena
length = arena.size
# Reusing an existing arena is faster than creating a new one, so
# we only reclaim space if it's large enough.
if length < self._DISCARD_FREE_SPACE_LARGER_THAN:
return
blocks = self._allocated_blocks.pop(arena)
assert not blocks
del self._start_to_block[(arena, 0)]
del self._stop_to_block[(arena, length)]
self._arenas.remove(arena)
seq = self._len_to_seq[length]
seq.remove((arena, 0, length))
if not seq:
del self._len_to_seq[length]
self._lengths.remove(length)
def _malloc(self, size):
# returns a large enough block -- it might be much larger
i = bisect.bisect_left(self._lengths, size)
if i == len(self._lengths):
return self._new_arena(size)
else:
length = self._lengths[i]
seq = self._len_to_seq[length]
block = seq.pop()
if not seq:
del self._len_to_seq[length], self._lengths[i]
(arena, start, stop) = block
del self._start_to_block[(arena, start)]
del self._stop_to_block[(arena, stop)]
return block
def _add_free_block(self, block):
# make block available and try to merge with its neighbours in the arena
(arena, start, stop) = block
try:
prev_block = self._stop_to_block[(arena, start)]
except KeyError:
pass
else:
start, _ = self._absorb(prev_block)
try:
next_block = self._start_to_block[(arena, stop)]
except KeyError:
pass
else:
_, stop = self._absorb(next_block)
block = (arena, start, stop)
length = stop - start
try:
self._len_to_seq[length].append(block)
except KeyError:
self._len_to_seq[length] = [block]
bisect.insort(self._lengths, length)
self._start_to_block[(arena, start)] = block
self._stop_to_block[(arena, stop)] = block
def _absorb(self, block):
# deregister this block so it can be merged with a neighbour
(arena, start, stop) = block
del self._start_to_block[(arena, start)]
del self._stop_to_block[(arena, stop)]
length = stop - start
seq = self._len_to_seq[length]
seq.remove(block)
if not seq:
del self._len_to_seq[length]
self._lengths.remove(length)
return start, stop
def _remove_allocated_block(self, block):
arena, start, stop = block
blocks = self._allocated_blocks[arena]
blocks.remove((start, stop))
if not blocks:
# Arena is entirely free, discard it from this process
self._discard_arena(arena)
def _free_pending_blocks(self):
# Free all the blocks in the pending list - called with the lock held.
while True:
try:
block = self._pending_free_blocks.pop()
except IndexError:
break
self._add_free_block(block)
self._remove_allocated_block(block)
def free(self, block):
# free a block returned by malloc()
# Since free() can be called asynchronously by the GC, it could happen
# that it's called while self._lock is held: in that case,
# self._lock.acquire() would deadlock (issue #12352). To avoid that, a
# trylock is used instead, and if the lock can't be acquired
# immediately, the block is added to a list of blocks to be freed
# synchronously sometimes later from malloc() or free(), by calling
# _free_pending_blocks() (appending and retrieving from a list is not
# strictly thread-safe but under CPython it's atomic thanks to the GIL).
if os.getpid() != self._lastpid:
raise ValueError(
"My pid ({0:n}) is not last pid {1:n}".format(
os.getpid(),self._lastpid))
if not self._lock.acquire(False):
# can't acquire the lock right now, add the block to the list of
# pending blocks to free
self._pending_free_blocks.append(block)
else:
# we hold the lock
try:
self._n_frees += 1
self._free_pending_blocks()
self._add_free_block(block)
self._remove_allocated_block(block)
finally:
self._lock.release()
def malloc(self, size):
# return a block of right size (possibly rounded up)
if size < 0:
raise ValueError("Size {0:n} out of range".format(size))
if sys.maxsize <= size:
raise OverflowError("Size {0:n} too large".format(size))
if os.getpid() != self._lastpid:
self.__init__() # reinitialize after fork
with self._lock:
self._n_mallocs += 1
# allow pending blocks to be marked available
self._free_pending_blocks()
size = self._roundup(max(size, 1), self._alignment)
(arena, start, stop) = self._malloc(size)
real_stop = start + size
if real_stop < stop:
# if the returned block is larger than necessary, mark
# the remainder available
self._add_free_block((arena, real_stop, stop))
self._allocated_blocks[arena].add((start, real_stop))
return (arena, start, real_stop)
#
# Class wrapping a block allocated out of a Heap -- can be inherited by child process
#
class BufferWrapper(object):
_heap = Heap()
def __init__(self, size):
if size < 0:
raise ValueError("Size {0:n} out of range".format(size))
if sys.maxsize <= size:
raise OverflowError("Size {0:n} too large".format(size))
block = BufferWrapper._heap.malloc(size)
self._state = (block, size)
util.Finalize(self, BufferWrapper._heap.free, args=(block,))
def create_memoryview(self):
(arena, start, stop), size = self._state
return memoryview(arena.buffer)[start:start+size]
| |
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
"""main module."""
import logging
import os
import pathlib
import platform
import pprint
import shutil
import traceback
from logging.handlers import TimedRotatingFileHandler
from tempfile import NamedTemporaryFile
from typing import Any, Dict, Iterator, List, Optional, Tuple
from urllib.parse import urlparse
import cfscrape
import click
import mechanicalsoup
import requests
import structlog
import werkzeug
from bs4 import BeautifulSoup
from flask import Flask
from flask import __version__ as flask_version # type: ignore
from flask import cli as flask_cli
from flask import send_from_directory
from flask_admin import Admin
from flask_restful import Api
from hydrus import Client
from hydrus.utils import yield_chunks
from . import models, parse, views
from .__init__ import __version__, db_version
from .models import iqdb_url_dict
from .utils import default_db_path, thumb_folder, user_data_dir
db = "~/images/! tagged"
DEFAULT_PLACE = "iqdb"
minsim = 75
services = ["1", "2", "3", "4", "5", "6", "10", "11"]
forcegray = False
log = structlog.getLogger()
def get_iqdb_result(image: str, iqdb_url: str = "http://iqdb.org/") -> Any:
"""Get iqdb result."""
files = {"file": open(image, "rb")}
resp = requests.post(iqdb_url, files=files, timeout=10)
html_text = BeautifulSoup(resp.text, "lxml")
return parse.parse_result(html_text)
def init_program(db_path: str = default_db_path) -> None:
"""Init program."""
# create user data dir
pathlib.Path(user_data_dir).mkdir(parents=True, exist_ok=True)
pathlib.Path(thumb_folder).mkdir(parents=True, exist_ok=True)
models.init_db(db_path, db_version)
def write_url_from_match_result(match_result: models.ImageMatch, folder: str = None) -> None:
"""Write url from match result."""
netloc = urlparse(match_result.link).netloc
sanitized_netloc = netloc.replace(".", "_")
text_file_basename = sanitized_netloc + ".txt"
text_file = os.path.join(folder, text_file_basename) if folder is not None else text_file_basename
with open(text_file, "a") as f:
f.write(match_result.link)
f.write("\n")
def get_result_on_windows(
image: str,
place: str,
resize: Optional[bool] = False,
size: Optional[Tuple[int, int]] = None,
browser: Optional[mechanicalsoup.StatefulBrowser] = None,
) -> List[models.ImageMatch]:
"""Get result on Windows.
Args:
image: image path
place: iqdb place code
resize: resize the image
size: resized image size
browser: browser instance
Returns:
matching items
"""
result = []
# temp_f
with NamedTemporaryFile(mode="w+t", delete=False) as temp_f, NamedTemporaryFile(mode="w+t", delete=False) as thumb_temp_f:
temp_file_name = temp_f.name
thumb_temp_file_name = thumb_temp_f.name
# copy to temp file
shutil.copyfile(image, temp_f.name)
# get image to be posted based on user input
try:
post_img = models.get_posted_image(img_path=temp_f.name, resize=resize, size=size, thumb_path=thumb_temp_f.name)
except OSError as e:
raise OSError(str(e) + " when processing {}".format(image)) from e
# append data to result
for img_m_rel_set in post_img.imagematchrelationship_set:
for item_set in img_m_rel_set.imagematch_set:
if item_set.search_place_verbose == place:
result.append(item_set)
if not result:
url, im_place = iqdb_url_dict[place]
use_requests = place != "e621"
post_img_path = temp_f.name if not resize else thumb_temp_f.name
page = models.get_page_result(image=post_img_path, url=url, browser=browser, use_requests=use_requests)
# if ok, will output: <Response [200]>
page_soup = BeautifulSoup(page, "lxml")
result = list(parse.get_or_create_image_match_from_page(page=page_soup, image=post_img, place=im_place))
result = [x[0] for x in result]
for item in [temp_file_name, thumb_temp_file_name]:
try:
os.remove(item)
except Exception: # pylint: disable=broad-except
log.exception("error removing {}".format(item))
return result
def run_program_for_single_img( # pylint: disable=too-many-branches, too-many-statements
image: str,
resize: bool = False,
size: Optional[Tuple[int, int]] = None,
place: str = DEFAULT_PLACE,
match_filter: Optional[str] = None,
browser: Optional[mechanicalsoup.StatefulBrowser] = None,
scraper: Optional[cfscrape.CloudflareScraper] = None,
disable_tag_print: Optional[bool] = False,
write_tags: Optional[bool] = False,
write_url: Optional[bool] = False,
minimum_similarity: Optional[int] = None,
) -> Dict[str, Any]:
"""Run program for single image.
Args:
image: image path
resize: resize the image
size: resized image size
place: iqdb place, see `iqdb_url_dict`
match_filter: whitelist matched items
browser: mechanicalsoup browser instance
scraper: cfscrape instance
disable_tag_print: don't print the tag
write_tags: write tags as hydrus tag file
write_url: write matching items' url to file
minimum_similarity: filter result items with minimum similarity
Returns:
iqdb result and collected errors
"""
# compatibility
br = browser # type: ignore
error_set = [] # List[Exception]
tag_textfile = image + ".txt"
folder = os.path.dirname(image)
result = [] # type: List[models.ImageMatch]
if platform.system() == "Windows":
result = get_result_on_windows(image, place, resize=resize, size=size, browser=br)
else:
with NamedTemporaryFile(delete=False) as temp, NamedTemporaryFile(delete=False) as thumb_temp:
shutil.copyfile(image, temp.name)
try:
post_img = models.get_posted_image(
img_path=temp.name,
resize=resize,
size=size,
thumb_path=thumb_temp.name,
)
except OSError as e:
raise OSError(str(e) + " when processing {}".format(image)) from e
for img_m_rel_set in post_img.imagematchrelationship_set:
for item_set in img_m_rel_set.imagematch_set:
if item_set.search_place_verbose == place:
result.append(item_set)
if not result:
url, im_place = iqdb_url_dict[place]
use_requests = place != "e621"
post_img_path = temp.name if not resize else thumb_temp.name
page = models.get_page_result(image=post_img_path, url=url, browser=br, use_requests=use_requests)
# if ok, will output: <Response [200]>
page_soup = BeautifulSoup(page, "lxml")
result = list(parse.get_or_create_image_match_from_page(page=page_soup, image=post_img, place=im_place))
result = [x[0] for x in result]
if match_filter == "best-match":
result = [x for x in result if x.status == x.STATUS_BEST_MATCH]
if minimum_similarity:
result = [x for x in result if float(x.similarity) >= minimum_similarity]
log.debug("Number of valid result", n=len(result))
match_result_tag_pairs = [] # type: List[Tuple[models.Match, List[models.Tag]]]
for item in result:
match_result = item.match.match_result # type: models.Match
url = match_result.link
log.debug("match status", similarity=item.similarity, status=item.status_verbose)
log.debug("url", v=url)
try:
tags = models.get_tags_from_match_result(match_result, browser, scraper)
tags_verbose = [x.full_name for x in tags]
match_result_tag_pairs.append((match_result, tags))
log.debug("{} tag(s) founds".format(len(tags_verbose)))
if tags and not disable_tag_print:
print("\n".join(tags_verbose))
if tags and write_tags:
with open(tag_textfile, "a") as f:
f.write("\n".join(tags_verbose))
f.write("\n")
log.debug("tags written")
if write_url:
write_url_from_match_result(match_result, folder)
except Exception as e: # pylint:disable=broad-except
log.error("Error", e=str(e))
error_set.append(e)
return {"error": error_set, "match result tag pairs": match_result_tag_pairs}
def thumb(basename: str) -> Any:
"""Get thumbnail."""
return send_from_directory(thumb_folder, basename)
def create_app(script_info: Optional[Any] = None) -> Any:
"""Create app."""
app = Flask(__name__)
# logging
if not os.path.exists(user_data_dir):
os.makedirs(user_data_dir)
log_dir = os.path.join(user_data_dir, "log")
if not os.path.exists(log_dir):
os.makedirs(log_dir)
peewee_logger = logging.getLogger("peewee")
peewee_logger.setLevel(logging.INFO)
chardet_logger = logging.getLogger("chardet")
chardet_logger.setLevel(logging.INFO)
default_log_file = os.path.join(log_dir, "iqdb_tagger_server.log")
file_handler = TimedRotatingFileHandler(default_log_file, "midnight")
file_handler.setLevel(logging.WARNING)
file_handler.setFormatter(logging.Formatter("<%(asctime)s> <%(levelname)s> %(message)s"))
app.logger.addHandler(file_handler)
app.logger.addHandler(peewee_logger)
app.logger.addHandler(chardet_logger)
# reloader
reloader = app.config["TEMPLATES_AUTO_RELOAD"] = bool(os.getenv("IQDB_TAGGER_RELOADER")) or app.config["TEMPLATES_AUTO_RELOAD"] # NOQA
if reloader:
app.jinja_env.auto_reload = True
app.config["SECRET_KEY"] = os.getenv("IQDB_TAGGER_SECRET_KEY") or os.urandom(24)
app.config["WTF_CSRF_ENABLED"] = False
# debug
debug = app.config["DEBUG"] = bool(os.getenv("IQDB_TAGGER_DEBUG")) or app.config["DEBUG"]
if debug:
app.config["DEBUG"] = True
app.config["LOGGER_HANDLER_POLICY"] = "debug"
logging.basicConfig(level=logging.DEBUG)
pprint.pprint(app.config)
print("Log file: {}".format(default_log_file))
print("script info:{}".format(script_info))
db_path = os.getenv("IQDB_TAGGER_DB_PATH") or default_db_path
init_program()
models.init_db(db_path)
# app and db
app.app_context().push()
@app.shell_context_processor
def shell_context() -> Dict["str", Any]: # pylint: disable=unused-variable
return {"app": app}
# api
api = Api(app)
api.add_resource(views.MatchViewList, "/api/matchview")
# flask-admin
app_admin = Admin(
app,
name="IQDB Tagger",
template_mode="bootstrap3",
index_view=views.HomeView(name="Home", template="iqdb_tagger/index.html", url="/"),
)
app_admin.add_view(views.MatchView())
# app_admin.add_view(ModelView(ImageMatch, category='DB'))
# app_admin.add_view(ModelView(ImageMatchRelationship, category='DB'))
# app_admin.add_view(ModelView(ImageModel, category='DB'))
# app_admin.add_view(ModelView(MatchTagRelationship, category='DB'))
# routing
app.add_url_rule("/thumb/<path:basename>", view_func=thumb)
return app
class FlaskGroup(flask_cli.FlaskGroup):
"""Custom Flask Group."""
def __init__(self, **kwargs: Any) -> None:
"""Class init."""
super().__init__(**kwargs)
if hasattr(self.params[0], "help"):
self.params[0].help = "Show the program version" # type: ignore
self.params[0].callback = get_version
def get_version(ctx: Any, _: Any, value: Any):
"""Get version."""
if not value or ctx.resilient_parsing:
return
message = "%(app_name)s %(app_version)s\nPython %(python)s\nFlask %(flask)s\nWerkzeug %(werkzeug)s"
click.echo(
message
% {
"app_name": "Iqdb-Tagger",
"app_version": __version__,
"python": platform.python_version(),
"flask": flask_version,
"werkzeug": werkzeug.__version__,
},
color=ctx.color,
)
ctx.exit()
@click.group(cls=FlaskGroup, create_app=create_app)
def cli() -> None:
"""Run cli. This is a management script for application."""
@cli.command()
@click.version_option()
@click.option(
"--place",
type=click.Choice(iqdb_url_dict.keys()),
default=DEFAULT_PLACE,
help="Specify iqdb place, default:{}".format(DEFAULT_PLACE),
)
@click.option("--minimum-similarity", type=float, help="Minimum similarity.")
@click.option("--resize", is_flag=True, help="Use resized image.")
@click.option("--size", help="Specify resized image, format: 'w,h'.")
@click.option("--db-path", help="Specify Database path.")
@click.option(
"--match-filter",
type=click.Choice(["default", "best-match"]),
default="default",
help="Filter the result.",
)
@click.option("--write-tags", is_flag=True, help="Write best match's tags to text.")
@click.option("--write-url", is_flag=True, help="Write match url to text.")
@click.option(
"--input-mode",
type=click.Choice(["default", "folder"]),
default="default",
help="Set input mode.",
)
@click.option("--verbose", "-v", is_flag=True, help="Verbose output.")
@click.option("--debug", "-d", is_flag=True, help="Print debug output.")
@click.option("--abort-on-error", is_flag=True, help="Stop program when error occured") # pylint: disable=too-many-branches
@click.argument("prog-input")
def cli_run(
prog_input: str = None,
resize: bool = False,
size: Optional[str] = None,
db_path: str = default_db_path,
place: str = DEFAULT_PLACE,
match_filter: str = "default",
input_mode: str = "default",
verbose: bool = False,
debug: bool = False,
abort_on_error: bool = False,
write_tags: bool = False,
write_url: bool = False,
minimum_similarity: bool = None,
) -> None:
"""Get similar image from iqdb."""
assert prog_input is not None, "Input is not a valid path"
# logging
log_level = None
if verbose:
log_level = logging.INFO
if debug:
log_level = logging.DEBUG
if log_level:
logging.basicConfig(
handlers=[logging.FileHandler(os.path.join(user_data_dir, "output.log"), "w", "utf-8")],
level=log_level,
)
init_program(db_path)
br = mechanicalsoup.StatefulBrowser(soup_config={"features": "lxml"})
br.raise_on_404 = True
scraper = cfscrape.CloudflareScraper()
# variable used in both input mode
error_set = []
size_tuple: Optional[Tuple[int, int]] = None
if size is not None:
size_tuple = tuple(map(int, size.split(",", 1))) # type: ignore
if input_mode == "folder":
assert os.path.isdir(prog_input), "Input is not valid folder"
files = [os.path.join(prog_input, x) for x in os.listdir(prog_input)]
if not files:
print("No files found.")
return
sorted_files = sorted(files, key=lambda x: os.path.splitext(x)[1])
for idx, ff in enumerate(sorted_files):
log.debug("file", f=os.path.basename(ff), idx=idx, total=len(files))
result = {}
try:
result = run_program_for_single_img(
ff,
resize,
size_tuple,
place,
match_filter,
browser=br,
scraper=scraper,
disable_tag_print=True,
write_tags=write_tags,
write_url=write_url,
minimum_similarity=minimum_similarity,
)
except Exception as e: # pylint:disable=broad-except
if abort_on_error:
raise e
error_set.append((ff, e))
if result is not None and result.get("error"):
error_set.extend([(ff, x) for x in result["error"]])
else:
image = prog_input
result = run_program_for_single_img(
image,
resize,
size_tuple,
place,
match_filter,
browser=br,
scraper=scraper,
write_tags=write_tags,
write_url=write_url,
minimum_similarity=minimum_similarity,
)
if result is not None and result.get("error"):
error_set.extend([(image, x) for x in result["error"]])
if error_set:
log.error("Found error(s)")
for x in error_set:
log.error("path: " + x[0] + "\nerror: " + str(x[1]))
def get_hydrus_set(search_tags: List[str], client: Client, resize: bool = True) -> Iterator[Dict[str, Any]]:
"""Get hydrus result.
Args:
search_tags: tags used to search hydrus
client: client instance
Returns:
hydrus metadata and iqdb results
"""
# compatibility
cl = client
file_ids = cl.search_files(search_tags)
if not file_ids:
print("No File id found.")
return
file_ids_chunks = yield_chunks(file_ids, 100)
metadata_sets = [] # type: List[Dict[str, Any]]
for file_ids in file_ids_chunks:
metadata_sets.extend(cl.file_metadata(file_ids=file_ids, only_identifiers=True))
for idx, metadata in enumerate(metadata_sets):
f_id, f_hash = metadata["file_id"], metadata["hash"]
log.info("Metadata", idx=idx, total=len(metadata_sets), id=f_id, hash=f_hash)
f_content = cl.get_file(file_id=f_id).content
init_program()
with NamedTemporaryFile(delete=False) as f:
try:
f.write(f_content)
except TypeError:
f.write(f_content.content)
try:
res_set = run_program_for_single_img(
f.name,
resize=resize,
place="iqdb",
match_filter="best-match",
disable_tag_print=True,
)
except OSError as err:
if "can't identify image file" in str(err):
log.error("File is not identified as an image")
else:
log.error(str(err))
continue
yield {"metadata": metadata, "iqdb_result": res_set}
@cli.command()
@click.argument("tag", nargs=-1)
@click.option("--access_key", help="Hydrus access key")
@click.option("--hydrus_url", help="URL for hydrus client e.g. http://127.0.0.1:45869/")
@click.option("--no-resize", help="Don't resize image when upload", is_flag=True)
def search_hydrus_and_send_url(
tag: List[str],
access_key: Optional[str] = None,
hydrus_url: Optional[str] = "http://127.0.0.1:45869/",
no_resize: bool = False,
) -> None:
"""Search hydrus and send url."""
# compatibility
search_tags = tag
if Client is None:
print("Hydrus package is required")
return
args = [access_key]
if hydrus_url:
args.append(hydrus_url)
cl = Client(*args)
for res_dict in get_hydrus_set(search_tags, cl, resize=not no_resize):
match_results = [x[0] for x in res_dict["iqdb_result"]["match result tag pairs"]]
if match_results:
for item in match_results:
cl.add_url(item.link)
@cli.command()
@click.argument("tag", nargs=-1)
@click.option("--access_key", help="Hydrus access key")
@click.option("--hydrus_url", help="URL for hydrus client e.g. http://127.0.0.1:45869/")
@click.option("--tag_repo", help="tag repo name e.g. local tags", default="local tags")
@click.option("--no-resize", help="Don't resize image when upload", is_flag=True)
def search_hydrus_and_send_tag(
tag: List[str],
access_key: Optional[str] = None,
hydrus_url: Optional[str] = "http://127.0.0.1:45869/",
tag_repo: Optional[str] = "local tags",
no_resize: bool = False,
) -> None:
"""Search hydrus and send tag."""
# compatibility
search_tags = tag
if Client is None:
print("Hydrus package is required")
return
args = [access_key]
if hydrus_url:
args.append(hydrus_url)
cl = Client(*args)
for res_dict in get_hydrus_set(search_tags, cl, resize=not no_resize):
f_hash = res_dict["metadata"]["hash"]
tag_sets = [x[1] for x in res_dict["iqdb_result"]["match result tag pairs"]]
tags = list(set(sum(tag_sets, [])))
full_name_tags = [x.full_name for x in tags]
if full_name_tags:
try:
cl.add_tags([f_hash], service_to_tags={tag_repo: full_name_tags})
except Exception: # pylint: disable=broad-except
traceback.print_exc()
if __name__ == "__main__":
cli()
| |
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Provides interfaces to various longitudinal commands provided by freesurfer
Change directory to provide relative paths for doctests
>>> import os
>>> filepath = os.path.dirname( os.path.realpath( __file__ ) )
>>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data'))
>>> os.chdir(datadir)
"""
from __future__ import print_function, division, unicode_literals, absolute_import
import os
import os.path
from ... import logging
from ...utils.filemanip import split_filename, copyfile
from .base import (FSCommand, FSTraitedSpec,
FSScriptCommand,
FSScriptOutputSpec,
FSCommandOpenMP,
FSTraitedSpecOpenMP)
from ..base import (isdefined, TraitedSpec, File, traits, Directory)
__docformat__ = 'restructuredtext'
iflogger = logging.getLogger('interface')
class MPRtoMNI305InputSpec(FSTraitedSpec):
# environment variables, required
# usedefault=True is hack for on_trait_change in __init__
reference_dir = Directory(
"", exists=True, mandatory=True, usedefault=True, desc="TODO")
target = traits.String(
"", mandatory=True, usedefault=True, desc="input atlas file")
# required
in_file = File(argstr='%s', usedefault=True,
desc="the input file prefix for MPRtoMNI305")
class MPRtoMNI305OutputSpec(FSScriptOutputSpec):
out_file = File(
exists=False, desc="The output file '<in_file>_to_<target>_t4_vox2vox.txt'")
class MPRtoMNI305(FSScriptCommand):
"""
For complete details, see FreeSurfer documentation
Examples
========
>>> from nipype.interfaces.freesurfer import MPRtoMNI305, Info
>>> mprtomni305 = MPRtoMNI305()
>>> mprtomni305.inputs.target = 'structural.nii'
>>> mprtomni305.inputs.reference_dir = '.' # doctest: +SKIP
>>> mprtomni305.cmdline # doctest: +SKIP
'mpr2mni305 output'
>>> mprtomni305.inputs.out_file = 'struct_out' # doctest: +SKIP
>>> mprtomni305.cmdline # doctest: +SKIP
'mpr2mni305 struct_out' # doctest: +SKIP
>>> mprtomni305.inputs.environ['REFDIR'] == os.path.join(Info.home(), 'average') # doctest: +SKIP
True
>>> mprtomni305.inputs.environ['MPR2MNI305_TARGET'] # doctest: +SKIP
'structural'
>>> mprtomni305.run() # doctest: +SKIP
"""
_cmd = "mpr2mni305"
input_spec = MPRtoMNI305InputSpec
output_spec = MPRtoMNI305OutputSpec
def __init__(self, **inputs):
super(MPRtoMNI305, self).__init__(**inputs)
self.inputs.on_trait_change(self._environ_update, 'target')
self.inputs.on_trait_change(self._environ_update, 'reference_dir')
def _format_arg(self, opt, spec, val):
if opt in ['target', 'reference_dir']:
return ""
elif opt == 'in_file':
_, retval, ext = split_filename(val)
# Need to copy file to working cache directory!
copyfile(val, os.path.abspath(retval + ext),
copy=True, hashmethod='content')
return retval
return super(MPRtoMNI305, self)._format_arg(opt, spec, val)
def _environ_update(self):
# refdir = os.path.join(Info.home(), val)
refdir = self.inputs.reference_dir
target = self.inputs.target
self.inputs.environ['MPR2MNI305_TARGET'] = target
self.inputs.environ["REFDIR"] = refdir
def _get_fname(self, fname):
return split_filename(fname)[1]
def _list_outputs(self):
outputs = super(MPRtoMNI305, self)._list_outputs()
fullname = "_".join([self._get_fname(self.inputs.in_file), "to",
self.inputs.target, "t4", "vox2vox.txt"])
outputs['out_file'] = os.path.abspath(fullname)
return outputs
class RegisterAVItoTalairachInputSpec(FSTraitedSpec):
in_file = File(argstr='%s', exists=True, mandatory=True,
position=0, desc="The input file")
target = File(argstr='%s', exists=True, mandatory=True,
position=1, desc="The target file")
vox2vox = File(argstr='%s', exists=True, mandatory=True,
position=2, desc="The vox2vox file")
out_file = File('talairach.auto.xfm', usedefault=True,
argstr='%s',
position=3, desc="The transform output")
class RegisterAVItoTalairachOutputSpec(FSScriptOutputSpec):
out_file = traits.File(
exists=False, desc="The output file for RegisterAVItoTalairach")
class RegisterAVItoTalairach(FSScriptCommand):
"""
converts the vox2vox from talairach_avi to a talairach.xfm file
This is a script that converts the vox2vox from talairach_avi to a
talairach.xfm file. It is meant to replace the following cmd line:
tkregister2_cmdl \
--mov $InVol \
--targ $FREESURFER_HOME/average/mni305.cor.mgz \
--xfmout ${XFM} \
--vox2vox talsrcimg_to_${target}_t4_vox2vox.txt \
--noedit \
--reg talsrcimg.reg.tmp.dat
set targ = $FREESURFER_HOME/average/mni305.cor.mgz
set subject = mgh-02407836-v2
set InVol = $SUBJECTS_DIR/$subject/mri/orig.mgz
set vox2vox = $SUBJECTS_DIR/$subject/mri/transforms/talsrcimg_to_711-2C_as_mni_average_305_t4_vox2vox.txt
Examples
========
>>> from nipype.interfaces.freesurfer import RegisterAVItoTalairach
>>> register = RegisterAVItoTalairach()
>>> register.inputs.in_file = 'structural.mgz' # doctest: +SKIP
>>> register.inputs.target = 'mni305.cor.mgz' # doctest: +SKIP
>>> register.inputs.vox2vox = 'talsrcimg_to_structural_t4_vox2vox.txt' # doctest: +SKIP
>>> register.cmdline # doctest: +SKIP
'avi2talxfm structural.mgz mni305.cor.mgz talsrcimg_to_structural_t4_vox2vox.txt talairach.auto.xfm'
>>> register.run() # doctest: +SKIP
"""
_cmd = "avi2talxfm"
input_spec = RegisterAVItoTalairachInputSpec
output_spec = RegisterAVItoTalairachOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['out_file'] = os.path.abspath(self.inputs.out_file)
return outputs
class EMRegisterInputSpec(FSTraitedSpecOpenMP):
# required
in_file = File(argstr="%s", exists=True, mandatory=True,
position=-3, desc="in brain volume")
template = File(argstr="%s", exists=True, mandatory=True,
position=-2, desc="template gca")
out_file = File(argstr="%s", exists=False,
name_source=['in_file'], name_template="%s_transform.lta",
hash_files=False, keep_extension=False,
position=-1, desc="output transform")
# optional
skull = traits.Bool(
argstr="-skull", desc="align to atlas containing skull (uns=5)")
mask = File(argstr="-mask %s", exists=True,
desc="use volume as a mask")
nbrspacing = traits.Int(argstr="-uns %d",
desc="align to atlas containing skull setting unknown_nbr_spacing = nbrspacing")
transform = File(argstr="-t %s", exists=True,
desc="Previously computed transform")
class EMRegisterOutputSpec(TraitedSpec):
out_file = File(exists=False, desc="output transform")
class EMRegister(FSCommandOpenMP):
""" This program creates a tranform in lta format
Examples
========
>>> from nipype.interfaces.freesurfer import EMRegister
>>> register = EMRegister()
>>> register.inputs.in_file = 'norm.mgz'
>>> register.inputs.template = 'aseg.mgz'
>>> register.inputs.out_file = 'norm_transform.lta'
>>> register.inputs.skull = True
>>> register.inputs.nbrspacing = 9
>>> register.cmdline # doctest: +IGNORE_UNICODE
'mri_em_register -uns 9 -skull norm.mgz aseg.mgz norm_transform.lta'
"""
_cmd = 'mri_em_register'
input_spec = EMRegisterInputSpec
output_spec = EMRegisterOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['out_file'] = os.path.abspath(self.inputs.out_file)
return outputs
class RegisterInputSpec(FSTraitedSpec):
# required
in_surf = File(argstr="%s", exists=True, mandatory=True, position=-3,
copyfile=True,
desc="Surface to register, often {hemi}.sphere")
target = File(argstr="%s", exists=True, mandatory=True, position=-2,
desc="The data to register to. In normal recon-all usage, " +
"this is a template file for average surface.")
in_sulc = File(exists=True, mandatory=True, copyfile=True,
desc="Undocumented mandatory input file ${SUBJECTS_DIR}/surf/{hemisphere}.sulc ")
out_file = File(argstr="%s", exists=False, position=-1, genfile=True,
desc="Output surface file to capture registration")
# optional
curv = traits.Bool(argstr="-curv", requires=['in_smoothwm'],
desc="Use smoothwm curvature for final alignment")
in_smoothwm = File(exists=True, copyfile=True,
desc="Undocumented input file ${SUBJECTS_DIR}/surf/{hemisphere}.smoothwm ")
class RegisterOutputSpec(TraitedSpec):
out_file = File(
exists=False, desc="Output surface file to capture registration")
class Register(FSCommand):
""" This program registers a surface to an average surface template.
Examples
========
>>> from nipype.interfaces.freesurfer import Register
>>> register = Register()
>>> register.inputs.in_surf = 'lh.pial'
>>> register.inputs.in_smoothwm = 'lh.pial'
>>> register.inputs.in_sulc = 'lh.pial'
>>> register.inputs.target = 'aseg.mgz'
>>> register.inputs.out_file = 'lh.pial.reg'
>>> register.inputs.curv = True
>>> register.cmdline # doctest: +IGNORE_UNICODE
'mris_register -curv lh.pial aseg.mgz lh.pial.reg'
"""
_cmd = 'mris_register'
input_spec = RegisterInputSpec
output_spec = RegisterOutputSpec
def _format_arg(self, opt, spec, val):
if opt == 'curv':
return spec.argstr
return super(Register, self)._format_arg(opt, spec, val)
def _gen_filename(self, name):
if name == 'out_file':
return self._list_outputs()[name]
return None
def _list_outputs(self):
outputs = self.output_spec().get()
if isdefined(self.inputs.out_file):
outputs['out_file'] = os.path.abspath(self.inputs.out_file)
else:
outputs['out_file'] = os.path.abspath(self.inputs.in_surf) + '.reg'
return outputs
class PaintInputSpec(FSTraitedSpec):
# required
in_surf = File(argstr="%s", exists=True, mandatory=True, position=-2,
desc="Surface file with grid (vertices) onto which the " +
"template data is to be sampled or 'painted'")
template = File(argstr="%s", exists=True, mandatory=True, position=-3,
desc="Template file")
# optional
template_param = traits.Int(desc="Frame number of the input template")
averages = traits.Int(argstr="-a %d",
desc="Average curvature patterns")
out_file = File(argstr="%s", exists=False, position=-1,
name_template="%s.avg_curv", hash_files=False,
name_source=['in_surf'], keep_extension=False,
desc="File containing a surface-worth of per-vertex values, " +
"saved in 'curvature' format.")
class PaintOutputSpec(TraitedSpec):
out_file = File(exists=False,
desc="File containing a surface-worth of per-vertex values, saved in 'curvature' format.")
class Paint(FSCommand):
"""
This program is useful for extracting one of the arrays ("a variable")
from a surface-registration template file. The output is a file
containing a surface-worth of per-vertex values, saved in "curvature"
format. Because the template data is sampled to a particular surface
mesh, this conjures the idea of "painting to a surface".
Examples
========
>>> from nipype.interfaces.freesurfer import Paint
>>> paint = Paint()
>>> paint.inputs.in_surf = 'lh.pial'
>>> paint.inputs.template = 'aseg.mgz'
>>> paint.inputs.averages = 5
>>> paint.inputs.out_file = 'lh.avg_curv'
>>> paint.cmdline # doctest: +IGNORE_UNICODE
'mrisp_paint -a 5 aseg.mgz lh.pial lh.avg_curv'
"""
_cmd = 'mrisp_paint'
input_spec = PaintInputSpec
output_spec = PaintOutputSpec
def _format_arg(self, opt, spec, val):
if opt == 'template':
if isdefined(self.inputs.template_param):
return spec.argstr % (val + '#' + str(self.inputs.template_param))
return super(Paint, self)._format_arg(opt, spec, val)
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['out_file'] = os.path.abspath(self.inputs.out_file)
return outputs
| |
import copy
import json
import logging
import math
import os
import random
import shutil
from typing import Callable, Dict, List, Optional, Tuple, Union
from ray.tune import trial_runner
from ray.tune import trial_executor
from ray.tune.error import TuneError
from ray.tune.result import DEFAULT_METRIC, TRAINING_ITERATION
from ray.tune.suggest import SearchGenerator
from ray.tune.utils.util import SafeFallbackEncoder
from ray.tune.sample import Domain, Function
from ray.tune.schedulers import FIFOScheduler, TrialScheduler
from ray.tune.suggest.variant_generator import format_vars
from ray.tune.trial import Trial, Checkpoint
from ray.util.debug import log_once
logger = logging.getLogger(__name__)
class PBTTrialState:
"""Internal PBT state tracked per-trial."""
def __init__(self, trial: Trial):
self.orig_tag = trial.experiment_tag
self.last_score = None
self.last_checkpoint = None
self.last_perturbation_time = 0
self.last_train_time = 0 # Used for synchronous mode.
self.last_result = None # Used for synchronous mode.
def __repr__(self) -> str:
return str(
(
self.last_score,
self.last_checkpoint,
self.last_train_time,
self.last_perturbation_time,
)
)
def explore(
config: Dict,
mutations: Dict,
resample_probability: float,
custom_explore_fn: Optional[Callable],
) -> Dict:
"""Return a config perturbed as specified.
Args:
config (dict): Original hyperparameter configuration.
mutations (dict): Specification of mutations to perform as documented
in the PopulationBasedTraining scheduler.
resample_probability (float): Probability of allowing resampling of a
particular variable.
custom_explore_fn (func): Custom explore fn applied after built-in
config perturbations are.
"""
new_config = copy.deepcopy(config)
for key, distribution in mutations.items():
if isinstance(distribution, dict):
new_config.update(
{key: explore(config[key], mutations[key], resample_probability, None)}
)
elif isinstance(distribution, list):
if (
random.random() < resample_probability
or config[key] not in distribution
):
new_config[key] = random.choice(distribution)
elif random.random() > 0.5:
new_config[key] = distribution[
max(0, distribution.index(config[key]) - 1)
]
else:
new_config[key] = distribution[
min(len(distribution) - 1, distribution.index(config[key]) + 1)
]
else:
if random.random() < resample_probability:
new_config[key] = (
distribution.sample(None)
if isinstance(distribution, Domain)
else distribution()
)
elif random.random() > 0.5:
new_config[key] = config[key] * 1.2
else:
new_config[key] = config[key] * 0.8
if isinstance(config[key], int):
new_config[key] = int(new_config[key])
if custom_explore_fn:
new_config = custom_explore_fn(new_config)
assert new_config is not None, "Custom explore fn failed to return new config"
return new_config
def make_experiment_tag(orig_tag: str, config: Dict, mutations: Dict) -> str:
"""Appends perturbed params to the trial name to show in the console."""
resolved_vars = {}
for k in mutations.keys():
resolved_vars[("config", k)] = config[k]
return "{}@perturbed[{}]".format(orig_tag, format_vars(resolved_vars))
def fill_config(
config: Dict, attr: str, search_space: Union[Callable, Domain, list, dict]
):
"""Add attr to config by sampling from search_space."""
if callable(search_space):
config[attr] = search_space()
elif isinstance(search_space, Domain):
config[attr] = search_space.sample(None)
elif isinstance(search_space, list):
config[attr] = random.choice(search_space)
elif isinstance(search_space, dict):
config[attr] = {}
for k, v in search_space.items():
fill_config(config[attr], k, v)
class PopulationBasedTraining(FIFOScheduler):
"""Implements the Population Based Training (PBT) algorithm.
https://deepmind.com/blog/population-based-training-neural-networks
PBT trains a group of models (or agents) in parallel. Periodically, poorly
performing models clone the state of the top performers, and a random
mutation is applied to their hyperparameters in the hopes of
outperforming the current top models.
Unlike other hyperparameter search algorithms, PBT mutates hyperparameters
during training time. This enables very fast hyperparameter discovery and
also automatically discovers good annealing schedules.
This Tune PBT implementation considers all trials added as part of the
PBT population. If the number of trials exceeds the cluster capacity,
they will be time-multiplexed as to balance training progress across the
population. To run multiple trials, use `tune.run(num_samples=<int>)`.
In {LOG_DIR}/{MY_EXPERIMENT_NAME}/, all mutations are logged in
`pbt_global.txt` and individual policy perturbations are recorded
in pbt_policy_{i}.txt. Tune logs: [target trial tag, clone trial tag,
target trial iteration, clone trial iteration, old config, new config]
on each perturbation step.
Args:
time_attr (str): The training result attr to use for comparing time.
Note that you can pass in something non-temporal such as
`training_iteration` as a measure of progress, the only requirement
is that the attribute should increase monotonically.
metric (str): The training result objective value attribute. Stopping
procedures will use this attribute. If None but a mode was passed,
the `ray.tune.result.DEFAULT_METRIC` will be used per default.
mode (str): One of {min, max}. Determines whether objective is
minimizing or maximizing the metric attribute.
perturbation_interval (float): Models will be considered for
perturbation at this interval of `time_attr`. Note that
perturbation incurs checkpoint overhead, so you shouldn't set this
to be too frequent.
burn_in_period (float): Models will not be considered for
perturbation before this interval of `time_attr` has passed. This
guarantees that models are trained for at least a certain amount
of time or timesteps before being perturbed.
hyperparam_mutations (dict): Hyperparams to mutate. The format is
as follows: for each key, either a list, function,
or a tune search space object (tune.loguniform, tune.uniform,
etc.) can be provided. A list specifies an allowed set of
categorical values. A function or tune search space object
specifies the distribution of a continuous parameter. You must
use tune.choice, tune.uniform, tune.loguniform, etc.. Arbitrary
tune.sample_from objects are not supported.
You must specify at least one of `hyperparam_mutations` or
`custom_explore_fn`.
Tune will use the search space provided by
`hyperparam_mutations` for the initial samples if the
corresponding attributes are not present in `config`.
quantile_fraction (float): Parameters are transferred from the top
`quantile_fraction` fraction of trials to the bottom
`quantile_fraction` fraction. Needs to be between 0 and 0.5.
Setting it to 0 essentially implies doing no exploitation at all.
resample_probability (float): The probability of resampling from the
original distribution when applying `hyperparam_mutations`. If not
resampled, the value will be perturbed by a factor of 1.2 or 0.8
if continuous, or changed to an adjacent value if discrete.
custom_explore_fn (func): You can also specify a custom exploration
function. This function is invoked as `f(config)` after built-in
perturbations from `hyperparam_mutations` are applied, and should
return `config` updated as needed. You must specify at least one of
`hyperparam_mutations` or `custom_explore_fn`.
log_config (bool): Whether to log the ray config of each model to
local_dir at each exploit. Allows config schedule to be
reconstructed.
require_attrs (bool): Whether to require time_attr and metric to appear
in result for every iteration. If True, error will be raised
if these values are not present in trial result.
synch (bool): If False, will use asynchronous implementation of
PBT. Trial perturbations occur every perturbation_interval for each
trial independently. If True, will use synchronous implementation
of PBT. Perturbations will occur only after all trials are
synced at the same time_attr every perturbation_interval.
Defaults to False. See Appendix A.1 here
https://arxiv.org/pdf/1711.09846.pdf.
.. code-block:: python
import random
from ray import tune
from ray.tune.schedulers import PopulationBasedTraining
pbt = PopulationBasedTraining(
time_attr="training_iteration",
metric="episode_reward_mean",
mode="max",
perturbation_interval=10, # every 10 `time_attr` units
# (training_iterations in this case)
hyperparam_mutations={
# Perturb factor1 by scaling it by 0.8 or 1.2. Resampling
# resets it to a value sampled from the lambda function.
"factor_1": lambda: random.uniform(0.0, 20.0),
# Alternatively, use tune search space primitives.
# The search space for factor_1 is equivalent to factor_2.
"factor_2": tune.uniform(0.0, 20.0),
# Perturb factor3 by changing it to an adjacent value, e.g.
# 10 -> 1 or 10 -> 100. Resampling will choose at random.
"factor_3": [1, 10, 100, 1000, 10000],
# Using tune.choice is NOT equivalent to the above.
# factor_4 is treated as a continuous hyperparameter.
"factor_4": tune.choice([1, 10, 100, 1000, 10000]),
})
tune.run({...}, num_samples=8, scheduler=pbt)
"""
def __init__(
self,
time_attr: str = "time_total_s",
metric: Optional[str] = None,
mode: Optional[str] = None,
perturbation_interval: float = 60.0,
burn_in_period: float = 0.0,
hyperparam_mutations: Dict = None,
quantile_fraction: float = 0.25,
resample_probability: float = 0.25,
custom_explore_fn: Optional[Callable] = None,
log_config: bool = True,
require_attrs: bool = True,
synch: bool = False,
):
hyperparam_mutations = hyperparam_mutations or {}
for value in hyperparam_mutations.values():
if not (isinstance(value, (list, dict, Domain)) or callable(value)):
raise TypeError(
"`hyperparam_mutation` values must be either "
"a List, Dict, a tune search space object, or "
"a callable."
)
if isinstance(value, Function):
raise ValueError(
"arbitrary tune.sample_from objects are not "
"supported for `hyperparam_mutation` values."
"You must use other built in primitives like"
"tune.uniform, tune.loguniform, etc."
)
if not hyperparam_mutations and not custom_explore_fn:
raise TuneError(
"You must specify at least one of `hyperparam_mutations` "
"or `custom_explore_fn` to use PBT."
)
if quantile_fraction > 0.5 or quantile_fraction < 0:
raise ValueError(
"You must set `quantile_fraction` to a value between 0 and"
"0.5. Current value: '{}'".format(quantile_fraction)
)
if perturbation_interval <= 0:
raise ValueError(
"perturbation_interval must be a positive number greater "
"than 0. Current value: '{}'".format(perturbation_interval)
)
if mode:
assert mode in ["min", "max"], "`mode` must be 'min' or 'max'."
FIFOScheduler.__init__(self)
self._metric = metric
self._mode = mode
self._metric_op = None
if self._mode == "max":
self._metric_op = 1.0
elif self._mode == "min":
self._metric_op = -1.0
self._time_attr = time_attr
self._perturbation_interval = perturbation_interval
self._burn_in_period = burn_in_period
self._hyperparam_mutations = hyperparam_mutations
self._quantile_fraction = quantile_fraction
self._resample_probability = resample_probability
self._trial_state = {}
self._custom_explore_fn = custom_explore_fn
self._log_config = log_config
self._require_attrs = require_attrs
self._synch = synch
self._next_perturbation_sync = self._perturbation_interval
# Metrics
self._num_checkpoints = 0
self._num_perturbations = 0
def set_search_properties(
self, metric: Optional[str], mode: Optional[str], **spec
) -> bool:
if self._metric and metric:
return False
if self._mode and mode:
return False
if metric:
self._metric = metric
if mode:
self._mode = mode
if self._mode == "max":
self._metric_op = 1.0
elif self._mode == "min":
self._metric_op = -1.0
if self._metric is None and self._mode:
# If only a mode was passed, use anonymous metric
self._metric = DEFAULT_METRIC
return True
def on_trial_add(self, trial_runner: "trial_runner.TrialRunner", trial: Trial):
if trial_runner.search_alg is not None and isinstance(
trial_runner.search_alg, SearchGenerator
):
raise ValueError(
"Search algorithms cannot be used with {} "
"schedulers. Please remove {}.".format(
self.__class__.__name__, trial_runner.search_alg
)
)
if not self._metric or not self._metric_op:
raise ValueError(
"{} has been instantiated without a valid `metric` ({}) or "
"`mode` ({}) parameter. Either pass these parameters when "
"instantiating the scheduler, or pass them as parameters "
"to `tune.run()`".format(
self.__class__.__name__, self._metric, self._mode
)
)
self._trial_state[trial] = PBTTrialState(trial)
for attr in self._hyperparam_mutations.keys():
if attr not in trial.config:
if log_once(attr + "-missing"):
logger.debug(
"Cannot find {} in config. Using search "
"space provided by hyperparam_mutations."
)
# Add attr to trial's config by sampling search space from
# hyperparam_mutations.
fill_config(trial.config, attr, self._hyperparam_mutations[attr])
# Make sure this attribute is added to CLI output.
trial.evaluated_params[attr] = trial.config[attr]
def on_trial_result(
self, trial_runner: "trial_runner.TrialRunner", trial: Trial, result: Dict
) -> str:
if self._time_attr not in result:
time_missing_msg = (
"Cannot find time_attr {} "
"in trial result {}. Make sure that this "
"attribute is returned in the "
"results of your Trainable.".format(self._time_attr, result)
)
if self._require_attrs:
raise RuntimeError(
time_missing_msg
+ "If this error is expected, you can change this to "
"a warning message by "
"setting PBT(require_attrs=False)"
)
else:
if log_once("pbt-time_attr-error"):
logger.warning(time_missing_msg)
if self._metric not in result:
metric_missing_msg = (
"Cannot find metric {} in trial result {}. "
"Make sure that this attribute is returned "
"in the "
"results of your Trainable.".format(self._metric, result)
)
if self._require_attrs:
raise RuntimeError(
metric_missing_msg + "If this error is expected, "
"you can change this to a warning message by "
"setting PBT(require_attrs=False)"
)
else:
if log_once("pbt-metric-error"):
logger.warning(metric_missing_msg)
if self._metric not in result or self._time_attr not in result:
return TrialScheduler.CONTINUE
time = result[self._time_attr]
state = self._trial_state[trial]
# Continue training if burn-in period has not been reached, yet.
if time < self._burn_in_period:
return TrialScheduler.CONTINUE
# Continue training if perturbation interval has not been reached, yet.
if time - state.last_perturbation_time < self._perturbation_interval:
return TrialScheduler.CONTINUE # avoid checkpoint overhead
self._save_trial_state(state, time, result, trial)
if not self._synch:
state.last_perturbation_time = time
lower_quantile, upper_quantile = self._quantiles()
decision = TrialScheduler.CONTINUE
for other_trial in trial_runner.get_trials():
if other_trial.status in [Trial.PENDING, Trial.PAUSED]:
decision = TrialScheduler.PAUSE
break
self._checkpoint_or_exploit(
trial, trial_runner.trial_executor, upper_quantile, lower_quantile
)
return TrialScheduler.NOOP if trial.status == Trial.PAUSED else decision
else:
# Synchronous mode.
if any(
self._trial_state[t].last_train_time < self._next_perturbation_sync
and t != trial
for t in trial_runner.get_trials()
):
logger.debug("Pausing trial {}".format(trial))
else:
# All trials are synced at the same timestep.
lower_quantile, upper_quantile = self._quantiles()
all_trials = trial_runner.get_trials()
not_in_quantile = []
for t in all_trials:
if t not in lower_quantile and t not in upper_quantile:
not_in_quantile.append(t)
# Move upper quantile trials to beginning and lower quantile
# to end. This ensures that checkpointing of strong trials
# occurs before exploiting of weaker ones.
all_trials = upper_quantile + not_in_quantile + lower_quantile
for t in all_trials:
logger.debug("Perturbing Trial {}".format(t))
self._trial_state[t].last_perturbation_time = time
self._checkpoint_or_exploit(
t, trial_runner.trial_executor, upper_quantile, lower_quantile
)
all_train_times = [
self._trial_state[t].last_train_time
for t in trial_runner.get_trials()
]
max_last_train_time = max(all_train_times)
self._next_perturbation_sync = max(
self._next_perturbation_sync + self._perturbation_interval,
max_last_train_time,
)
# In sync mode we should pause all trials once result comes in.
# Once a perturbation step happens for all trials, they should
# still all be paused.
# choose_trial_to_run will then pick the next trial to run out of
# the paused trials.
return (
TrialScheduler.NOOP
if trial.status == Trial.PAUSED
else TrialScheduler.PAUSE
)
def _save_trial_state(
self, state: PBTTrialState, time: int, result: Dict, trial: Trial
):
"""Saves necessary trial information when result is received.
Args:
state (PBTTrialState): The state object for the trial.
time (int): The current timestep of the trial.
result (dict): The trial's result dictionary.
trial (dict): The trial object.
"""
# This trial has reached its perturbation interval.
# Record new state in the state object.
score = self._metric_op * result[self._metric]
state.last_score = score
state.last_train_time = time
state.last_result = result
return score
def _checkpoint_or_exploit(
self,
trial: Trial,
trial_executor: "trial_runner.RayTrialExecutor",
upper_quantile: List[Trial],
lower_quantile: List[Trial],
):
"""Checkpoint if in upper quantile, exploits if in lower."""
state = self._trial_state[trial]
if trial in upper_quantile:
# The trial last result is only updated after the scheduler
# callback. So, we override with the current result.
logger.debug("Trial {} is in upper quantile".format(trial))
logger.debug("Checkpointing {}".format(trial))
if trial.status == Trial.PAUSED:
# Paused trial will always have an in-memory checkpoint.
state.last_checkpoint = trial.checkpoint
else:
state.last_checkpoint = trial_executor.save(
trial, Checkpoint.MEMORY, result=state.last_result
)
self._num_checkpoints += 1
else:
state.last_checkpoint = None # not a top trial
if trial in lower_quantile:
logger.debug("Trial {} is in lower quantile".format(trial))
trial_to_clone = random.choice(upper_quantile)
assert trial is not trial_to_clone
if not self._trial_state[trial_to_clone].last_checkpoint:
logger.info(
"[pbt]: no checkpoint for trial."
" Skip exploit for Trial {}".format(trial)
)
return
self._exploit(trial_executor, trial, trial_to_clone)
def _log_config_on_step(
self,
trial_state: PBTTrialState,
new_state: PBTTrialState,
trial: Trial,
trial_to_clone: Trial,
new_config: Dict,
):
"""Logs transition during exploit/exploit step.
For each step, logs: [target trial tag, clone trial tag, target trial
iteration, clone trial iteration, old config, new config].
"""
trial_name, trial_to_clone_name = (trial_state.orig_tag, new_state.orig_tag)
trial_id = trial.trial_id
trial_to_clone_id = trial_to_clone.trial_id
trial_path = os.path.join(trial.local_dir, "pbt_policy_" + trial_id + ".txt")
trial_to_clone_path = os.path.join(
trial_to_clone.local_dir, "pbt_policy_" + trial_to_clone_id + ".txt"
)
policy = [
trial_name,
trial_to_clone_name,
trial.last_result.get(TRAINING_ITERATION, 0),
trial_to_clone.last_result.get(TRAINING_ITERATION, 0),
trial_to_clone.config,
new_config,
]
# Log to global file.
with open(os.path.join(trial.local_dir, "pbt_global.txt"), "a+") as f:
print(json.dumps(policy, cls=SafeFallbackEncoder), file=f)
# Overwrite state in target trial from trial_to_clone.
if os.path.exists(trial_to_clone_path):
shutil.copyfile(trial_to_clone_path, trial_path)
# Log new exploit in target trial log.
with open(trial_path, "a+") as f:
f.write(json.dumps(policy, cls=SafeFallbackEncoder) + "\n")
def _get_new_config(self, trial, trial_to_clone):
"""Gets new config for trial by exploring trial_to_clone's config."""
return explore(
trial_to_clone.config,
self._hyperparam_mutations,
self._resample_probability,
self._custom_explore_fn,
)
def _exploit(
self,
trial_executor: "trial_executor.TrialExecutor",
trial: Trial,
trial_to_clone: Trial,
):
"""Transfers perturbed state from trial_to_clone -> trial.
If specified, also logs the updated hyperparam state.
"""
trial_state = self._trial_state[trial]
new_state = self._trial_state[trial_to_clone]
logger.info(
"[exploit] transferring weights from trial "
"{} (score {}) -> {} (score {})".format(
trial_to_clone, new_state.last_score, trial, trial_state.last_score
)
)
new_config = self._get_new_config(trial, trial_to_clone)
# Only log mutated hyperparameters and not entire config.
old_hparams = {
k: v
for k, v in trial_to_clone.config.items()
if k in self._hyperparam_mutations
}
new_hparams = {
k: v for k, v in new_config.items() if k in self._hyperparam_mutations
}
logger.info(
"[explore] perturbed config from {} -> {}".format(old_hparams, new_hparams)
)
if self._log_config:
self._log_config_on_step(
trial_state, new_state, trial, trial_to_clone, new_config
)
new_tag = make_experiment_tag(
trial_state.orig_tag, new_config, self._hyperparam_mutations
)
if trial.status == Trial.PAUSED:
# If trial is paused we update it with a new checkpoint.
# When the trial is started again, the new checkpoint is used.
if not self._synch:
raise TuneError(
"Trials should be paused here only if in "
"synchronous mode. If you encounter this error"
" please raise an issue on Ray Github."
)
else:
trial_executor.stop_trial(trial)
trial_executor.set_status(trial, Trial.PAUSED)
trial.set_experiment_tag(new_tag)
trial.set_config(new_config)
trial.on_checkpoint(new_state.last_checkpoint)
self._num_perturbations += 1
# Transfer over the last perturbation time as well
trial_state.last_perturbation_time = new_state.last_perturbation_time
trial_state.last_train_time = new_state.last_train_time
def _quantiles(self) -> Tuple[List[Trial], List[Trial]]:
"""Returns trials in the lower and upper `quantile` of the population.
If there is not enough data to compute this, returns empty lists.
"""
trials = []
for trial, state in self._trial_state.items():
logger.debug("Trial {}, state {}".format(trial, state))
if trial.is_finished():
logger.debug("Trial {} is finished".format(trial))
if state.last_score is not None and not trial.is_finished():
trials.append(trial)
trials.sort(key=lambda t: self._trial_state[t].last_score)
if len(trials) <= 1:
return [], []
else:
num_trials_in_quantile = int(
math.ceil(len(trials) * self._quantile_fraction)
)
if num_trials_in_quantile > len(trials) / 2:
num_trials_in_quantile = int(math.floor(len(trials) / 2))
return (trials[:num_trials_in_quantile], trials[-num_trials_in_quantile:])
def choose_trial_to_run(
self, trial_runner: "trial_runner.TrialRunner"
) -> Optional[Trial]:
"""Ensures all trials get fair share of time (as defined by time_attr).
This enables the PBT scheduler to support a greater number of
concurrent trials than can fit in the cluster at any given time.
"""
candidates = []
for trial in trial_runner.get_trials():
if (
trial.status
in [
Trial.PENDING,
Trial.PAUSED,
]
and trial_runner.trial_executor.has_resources_for_trial(trial)
):
if not self._synch:
candidates.append(trial)
elif (
self._trial_state[trial].last_train_time
< self._next_perturbation_sync
):
candidates.append(trial)
candidates.sort(key=lambda trial: self._trial_state[trial].last_train_time)
return candidates[0] if candidates else None
# Unit test only. TODO(xwjiang): Remove test-specific APIs.
def reset_stats(self):
self._num_perturbations = 0
self._num_checkpoints = 0
# Unit test only. TODO(xwjiang): Remove test-specific APIs.
def last_scores(self, trials: List[Trial]) -> List[float]:
scores = []
for trial in trials:
state = self._trial_state[trial]
if state.last_score is not None and not trial.is_finished():
scores.append(state.last_score)
return scores
def debug_string(self) -> str:
return "PopulationBasedTraining: {} checkpoints, {} perturbs".format(
self._num_checkpoints, self._num_perturbations
)
class PopulationBasedTrainingReplay(FIFOScheduler):
"""Replays a Population Based Training run.
Population Based Training does not return a single hyperparameter
configuration, but rather a schedule of configurations. For instance,
PBT might discover that a larger learning rate leads to good results
in the first training iterations, but that a smaller learning rate
is preferable later.
This scheduler enables replaying these parameter schedules from
a finished PBT run. This requires that population based training has
been run with ``log_config=True``, which is the default setting.
The scheduler will only accept and train a single trial. It will
start with the initial config of the existing trial and update the
config according to the schedule.
Args:
policy_file (str): The PBT policy file. Usually this is
stored in ``~/ray_results/experiment_name/pbt_policy_xxx.txt``
where ``xxx`` is the trial ID.
Example:
.. code-block:: python
# Replaying a result from ray.tune.examples.pbt_convnet_example
from ray import tune
from ray.tune.examples.pbt_convnet_example import PytorchTrainable
from ray.tune.schedulers import PopulationBasedTrainingReplay
replay = PopulationBasedTrainingReplay(
"~/ray_results/pbt_test/pbt_policy_XXXXX_00001.txt")
tune.run(
PytorchTrainable,
scheduler=replay,
stop={"training_iteration": 100})
"""
def __init__(self, policy_file: str):
policy_file = os.path.expanduser(policy_file)
if not os.path.exists(policy_file):
raise ValueError("Policy file not found: {}".format(policy_file))
self.policy_file = policy_file
# Find and read pbt policy file, potentially raise error
initial_config, self._policy = self._load_policy(self.policy_file)
self.experiment_tag = "replay_{}".format(os.path.basename(self.policy_file))
self.config = initial_config
self.current_config = self.config
self._trial = None
self._current_step = 0
self._num_perturbations = 0
self._policy_iter = iter(self._policy)
self._next_policy = next(self._policy_iter, None)
def _load_policy(self, policy_file: str) -> Tuple[Dict, List[Tuple[int, Dict]]]:
raw_policy = []
with open(policy_file, "rt") as fp:
for row in fp.readlines():
try:
parsed_row = json.loads(row)
except json.JSONDecodeError:
raise ValueError(
"Could not read PBT policy file: {}.".format(policy_file)
) from None
raw_policy.append(tuple(parsed_row))
# Loop through policy from end to start to obtain changepoints
policy = []
last_new_tag = None
last_old_conf = None
for (old_tag, new_tag, old_step, new_step, old_conf, new_conf) in reversed(
raw_policy
):
if last_new_tag and old_tag != last_new_tag:
# Tag chain ended. This means that previous changes were
# overwritten by the last change and should be ignored.
break
last_new_tag = new_tag
last_old_conf = old_conf
policy.append((new_step, new_conf))
return last_old_conf, list(reversed(policy))
def on_trial_add(self, trial_runner: "trial_runner.TrialRunner", trial: Trial):
if self._trial:
raise ValueError(
"More than one trial added to PBT replay run. This "
"means the same schedule will be trained multiple "
"times. Do you want to set `n_samples=1`?"
)
self._trial = trial
if self._trial.config and self._policy:
logger.warning(
"Trial was initialized with a config, which was overwritten. "
"Did you start the PBT replay with a `config` parameter?"
)
elif self._trial.config and not self._policy:
# Only train with initial policy
self.config = self._trial.config
elif not self._trial.config and not self._policy:
raise ValueError(
"No replay policy found and trial initialized without a "
"valid config. Either pass a `config` argument to `tune.run()`"
"or consider not using PBT replay for this run."
)
self._trial.set_config(self.config)
def on_trial_result(
self, trial_runner: "trial_runner.TrialRunner", trial: Trial, result: Dict
) -> str:
if TRAINING_ITERATION not in result:
# No time reported
return TrialScheduler.CONTINUE
if not self._next_policy:
# No more changes in the config
return TrialScheduler.CONTINUE
step = result[TRAINING_ITERATION]
self._current_step = step
change_at, new_config = self._next_policy
if step < change_at:
# Don't change the policy just yet
return TrialScheduler.CONTINUE
logger.info(
"Population Based Training replay is now at step {}. "
"Configuration will be changed to {}.".format(step, new_config)
)
checkpoint = trial_runner.trial_executor.save(
trial, Checkpoint.MEMORY, result=result
)
new_tag = make_experiment_tag(self.experiment_tag, new_config, new_config)
trial_executor = trial_runner.trial_executor
trial_executor.stop_trial(trial)
trial_executor.set_status(trial, Trial.PAUSED)
trial.set_experiment_tag(new_tag)
trial.set_config(new_config)
trial.on_checkpoint(checkpoint)
self.current_config = new_config
self._num_perturbations += 1
self._next_policy = next(self._policy_iter, None)
return TrialScheduler.NOOP
def debug_string(self) -> str:
return "PopulationBasedTraining replay: Step {}, perturb {}".format(
self._current_step, self._num_perturbations
)
| |
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo.config import cfg
from oslo.utils import excutils
import six
from six.moves.urllib import parse as urlparse
from sahara import conductor as c
from sahara import context
from sahara.openstack.common import log as logging
from sahara.plugins import base as plugin_base
from sahara.plugins import provisioning
from sahara.utils import general as g
from sahara.utils.notification import sender
from sahara.utils.openstack import nova
conductor = c.API
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
OPS = None
def setup_service_api(ops):
global OPS
OPS = ops
# Cluster ops
def get_clusters():
return conductor.cluster_get_all(context.ctx())
def get_cluster(id):
return conductor.cluster_get(context.ctx(), id)
def scale_cluster(id, data):
ctx = context.ctx()
cluster = conductor.cluster_get(ctx, id)
plugin = plugin_base.PLUGINS.get_plugin(cluster.plugin_name)
existing_node_groups = data.get('resize_node_groups', [])
additional_node_groups = data.get('add_node_groups', [])
# the next map is the main object we will work with
# to_be_enlarged : {node_group_id: desired_amount_of_instances}
to_be_enlarged = {}
for ng in existing_node_groups:
ng_id = g.find(cluster.node_groups, name=ng['name'])['id']
to_be_enlarged.update({ng_id: ng['count']})
additional = construct_ngs_for_scaling(cluster, additional_node_groups)
cluster = conductor.cluster_get(ctx, cluster)
try:
cluster = g.change_cluster_status(cluster, "Validating")
plugin.validate_scaling(cluster, to_be_enlarged, additional)
except Exception:
with excutils.save_and_reraise_exception():
g.clean_cluster_from_empty_ng(cluster)
g.change_cluster_status(cluster, "Active")
# If we are here validation is successful.
# So let's update to_be_enlarged map:
to_be_enlarged.update(additional)
for node_group in cluster.node_groups:
if node_group.id not in to_be_enlarged:
to_be_enlarged[node_group.id] = node_group.count
OPS.provision_scaled_cluster(id, to_be_enlarged)
return cluster
def create_cluster(values):
ctx = context.ctx()
cluster = conductor.cluster_create(ctx, values)
sender.notify(ctx, cluster.id, cluster.name, "New",
"create")
plugin = plugin_base.PLUGINS.get_plugin(cluster.plugin_name)
# validating cluster
try:
cluster = g.change_cluster_status(cluster, "Validating")
plugin.validate(cluster)
except Exception as e:
with excutils.save_and_reraise_exception():
g.change_cluster_status(cluster, "Error",
status_description=six.text_type(e))
OPS.provision_cluster(cluster.id)
return cluster
def terminate_cluster(id):
cluster = g.change_cluster_status(id, "Deleting")
OPS.terminate_cluster(id)
sender.notify(context.ctx(), cluster.id, cluster.name, cluster.status,
"delete")
# ClusterTemplate ops
def get_cluster_templates():
return conductor.cluster_template_get_all(context.ctx())
def get_cluster_template(id):
return conductor.cluster_template_get(context.ctx(), id)
def create_cluster_template(values):
return conductor.cluster_template_create(context.ctx(), values)
def terminate_cluster_template(id):
return conductor.cluster_template_destroy(context.ctx(), id)
# NodeGroupTemplate ops
def get_node_group_templates():
return conductor.node_group_template_get_all(context.ctx())
def get_node_group_template(id):
return conductor.node_group_template_get(context.ctx(), id)
def create_node_group_template(values):
return conductor.node_group_template_create(context.ctx(), values)
def terminate_node_group_template(id):
return conductor.node_group_template_destroy(context.ctx(), id)
# Plugins ops
def get_plugins():
return plugin_base.PLUGINS.get_plugins(
base=provisioning.ProvisioningPluginBase)
def get_plugin(plugin_name, version=None):
plugin = plugin_base.PLUGINS.get_plugin(plugin_name)
if plugin:
res = plugin.as_resource()
if version:
if version in plugin.get_versions():
configs = plugin.get_configs(version)
res._info['configs'] = [c.dict for c in configs]
processes = plugin.get_node_processes(version)
res._info['node_processes'] = processes
required_image_tags = plugin.get_required_image_tags(version)
res._info['required_image_tags'] = required_image_tags
else:
return None
return res
def convert_to_cluster_template(plugin_name, version, template_name,
config_file):
plugin = plugin_base.PLUGINS.get_plugin(plugin_name)
return plugin.convert(config_file, plugin_name, version,
urlparse.unquote(template_name),
conductor.cluster_template_create)
def construct_ngs_for_scaling(cluster, additional_node_groups):
ctx = context.ctx()
additional = {}
for ng in additional_node_groups:
count = ng['count']
ng['count'] = 0
ng_id = conductor.node_group_add(ctx, cluster, ng)
additional.update({ng_id: count})
return additional
# Image Registry
def get_images(tags):
return nova.client().images.list_registered(tags)
def get_image(**kwargs):
if len(kwargs) == 1 and 'id' in kwargs:
return nova.client().images.get(kwargs['id'])
else:
return nova.client().images.find(**kwargs)
def register_image(image_id, username, description=None):
client = nova.client()
client.images.set_description(image_id, username, description)
return client.images.get(image_id)
def unregister_image(image_id):
client = nova.client()
client.images.unset_description(image_id)
return client.images.get(image_id)
def add_image_tags(image_id, tags):
client = nova.client()
client.images.tag(image_id, tags)
return client.images.get(image_id)
def remove_image_tags(image_id, tags):
client = nova.client()
client.images.untag(image_id, tags)
return client.images.get(image_id)
| |
#!/usr/bin/env python3
import itertools
import argparse
import socket
import curses
import time
from multiprocessing import Process
from subprocess import Popen, PIPE, DEVNULL
import pychromecast
import chardet
from twisted.web import http
from twisted.web.server import Site, Request, NOT_DONE_YET
from twisted.web.resource import Resource
from twisted.internet import reactor, endpoints
from twisted.web.static import File, Data, NoRangeStaticProducer
from twisted.python.compat import networkString
VIDEO_PATH = 'video'
SUB_PATH = 'sub'
DEFAULT_MIME = 'video/mp4'
DEFAULT_BITRATE = '6000k'
def to_webvtt(sub_file, video_file=None):
encoding = None
if sub_file:
encoding = detect_encoding(sub_file)
sub_transcoder = Popen(['ffmpeg',
'-y', '-nostdin'] +
(['-sub_charenc', encoding] if encoding else []) +
['-i', sub_file or video_file,
'-map', 's?',
'-f', 'webvtt',
'-loglevel', 'error',
'-'],
stdout=PIPE,
stderr=DEVNULL)
return sub_transcoder.stdout.read()
def detect_encoding(filename):
with open(filename, 'rb') as f:
result = chardet.detect(f.read())
return result['encoding']
def serve(port, video_path, vtt_data, interface='',
chunked=False, transcode_bitrate=None):
if transcode_bitrate:
video = ChunkedPipe(get_transcoder(video_path, transcode_bitrate))
elif chunked:
video = ChunkedFile(video_path,
defaultType=DEFAULT_MIME)
else:
video = File(video_path, defaultType=DEFAULT_MIME)
root = Resource()
root.putChild(SUB_PATH.encode('utf-8'), Data(vtt_data, 'text/vtt'))
root.putChild(VIDEO_PATH.encode('utf-8'), video)
endpoint = endpoints.TCP4ServerEndpoint(reactor, port, interface=interface)
endpoint.listen(Site(root, requestFactory=CORSRequest))
reactor.run()
class CORSRequest(Request):
def process(self):
self.setHeader(b'Access-Control-Allow-Origin', b'*')
super().process()
class ChunkedFile(File):
def makeProducer(self, request, fileForReading):
self._setContentHeaders(request)
request.setResponseCode(http.OK)
return NoRangeStaticProducer(request, fileForReading)
def render_GET(self, request):
res = super().render_GET(request)
request.responseHeaders.removeHeader(b'accept-ranges')
request.responseHeaders.removeHeader(b'content-length')
return res
class ChunkedPipe(ChunkedFile):
def __init__(self, fileForReading, defaultType=DEFAULT_MIME):
Resource.__init__(self)
self.fileForReading = fileForReading
self.type = self.defaultType = defaultType
def render_GET(self, request):
if request.method == b'HEAD':
self._setContentHeaders(request)
return b''
producer = self.makeProducer(request, self.fileForReading)
producer.start()
return NOT_DONE_YET
def _setContentHeaders(self, request, size=None):
if self.type:
request.setHeader(b'content-type', networkString(self.type))
def get_src_ip_addr(dest_addr='8.8.8.8', dest_port=53):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect((dest_addr, dest_port))
src_addr, _src_port = s.getsockname()
return src_addr
def find_cast(friendly_name=None):
chromecasts = pychromecast.get_chromecasts()
return next(cc for cc in chromecasts
if not friendly_name or
cc.device.friendly_name == friendly_name)
def get_transcoder(infile, video_bitrate):
transcoder = Popen(['ffmpeg',
'-y', '-nostdin',
'-i', infile,
'-preset', 'ultrafast',
'-f', 'mp4',
'-frag_duration', '3000',
'-b:v', video_bitrate,
'-loglevel', 'error',
'-vcodec', 'h264',
'-acodec', 'aac',
'-'],
stdout=PIPE)
return transcoder.stdout
def play(cast, video_url, sub_url=None, unseekable=False):
cast.wait()
mc = cast.media_controller
mc.play_media(video_url,
DEFAULT_MIME,
subtitles=sub_url)
mc.block_until_active()
control_loop(cast, mc, unseekable=unseekable)
def control_loop(cast, mc, unseekable=False):
# Based on https://github.com/stefanor/chromecastplayer
stdscr = curses.initscr()
curses.noecho()
curses.cbreak()
stdscr.keypad(True)
stdscr.nodelay(True)
started = False
try:
while True:
try:
c = stdscr.getch()
if c == curses.ERR:
time.sleep(1)
elif c == ord(' '):
if mc.status.player_state == 'PAUSED':
mc.play()
else:
mc.pause()
elif c == ord('q'):
mc.stop()
break
elif c == curses.KEY_RIGHT and not unseekable:
mc.seek(mc.status.current_time + 10)
elif c == curses.KEY_LEFT and not unseekable:
mc.seek(max(mc.status.current_time - 10, 0))
elif c == curses.KEY_PPAGE and not unseekable:
mc.seek(mc.status.current_time + 60)
elif c == curses.KEY_NPAGE and not unseekable:
mc.seek(max(mc.status.current_time - 60, 0))
elif c == curses.KEY_UP:
cast.set_volume(min(cast.status.volume_level + 0.1, 1))
elif c == curses.KEY_DOWN:
cast.set_volume(max(cast.status.volume_level - 0.1, 0))
if mc.status:
stdscr.addstr(0, 0, mc.status.player_state)
stdscr.clrtoeol()
minutes, seconds = divmod(mc.status.current_time, 60)
hours, minutes = divmod(minutes, 60)
stdscr.addstr(1, 0, "%02i:%02i:%02i"
% (hours, minutes, seconds))
idle_state = mc.status.player_state == 'IDLE'
if not idle_state:
started = True
if started and idle_state:
break
mc.update_status()
stdscr.move(2, 0)
stdscr.refresh()
except pychromecast.error.UnsupportedNamespace:
pass
finally:
curses.nocbreak()
stdscr.keypad(False)
curses.echo()
curses.endwin()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--video', required=True,
help='Video file')
parser.add_argument('-c', '--chunked', action='store_true',
help='Unseekable stream using chunked-encoding '
'(for incomplete files)')
parser.add_argument('-t', '--transcode', action='store_true',
help='Transcode to mp4 using ffmpeg (implies -c)')
parser.add_argument('-b', '--bitrate',
help='Video bitrate for transcoding (implies -t)')
parser.add_argument('-s', '--subtitles',
help='Subtitle file (.vtt or .srt)')
parser.add_argument('-p', '--port', type=int, default=7000,
help='Port to listen')
parser.add_argument('-i', '--ip',
help='IPv4 address to listen')
parser.add_argument('-d', '--device',
help='ChromeCast device name')
args = parser.parse_args()
cast = find_cast(friendly_name=args.device)
port = args.port
ip = args.ip or get_src_ip_addr()
video = args.video
transcode = args.transcode or args.bitrate
chunked = transcode or args.chunked
transcode_bitrate = transcode and (args.bitrate or DEFAULT_BITRATE)
subtitles = to_webvtt(args.subtitles, video)
base_url = 'http://{}:{}'.format(ip, port)
video_url = '{}/{}'.format(base_url, VIDEO_PATH)
sub_url = subtitles and '{}/{}'.format(base_url, SUB_PATH)
server = Process(target=serve,
args=(port,
video,
subtitles,
ip,
chunked,
transcode_bitrate))
server.start()
play(cast, video_url, sub_url=sub_url, unseekable=chunked)
server.terminate()
if __name__ == '__main__':
main()
| |
# -*- coding: utf8 -*-
import sys
import umsgpack
from twisted.python import log
from twisted.internet import reactor, defer
from brainer.lib.base import BaseREP
from brainer.lib.hash import ConsistentHash
from brainer.lib.mixins import SerializerMixin
from brainer.lib.exceptions import ZeroNodeError
from brainer.node.client import NodeClient
class Broker(BaseREP, SerializerMixin):
"""This is a broker.
"""
def __init__(self, factory, endpoint, *args, **kwargs):
"""
:param factory: A `txzmq.ZmqFactory` object.
:param endpoint: A `txzmq.ZmqEndpoint` object.
:param node_manager: A Node Manager. Defaults to `NodeManager`.
:param serializer: A serializer, defaults to umsgpack.
:param debug: If True, will log debug messages. Defaults to False.
"""
self._init_instance(*args, **kwargs)
log.msg('Broker started!!! Serializer: {}'.format(
self._serializer.__name__))
super(Broker, self).__init__(factory, endpoint)
def _init_instance(self, *args, **kwargs):
self._serializer = kwargs.pop('serializer', umsgpack)
self._debug = kwargs.pop('debug', False)
self._publisher_address = kwargs.get(
'publisher', 'ipc:///tmp/publisher.sock')
# A list of node-ids, the index is the node_number
self._nodes = []
# Key: Value = node-id: connection obj
self._nodes_connections = {}
self._allowed_actions = (
'register', 'unregister', 'ping',
'route', 'set', 'get', 'remove')
def register_node(self, node_id, address):
"""
:param node_id: The node id sent down by the Node.
"""
self._nodes.append(node_id)
node_number = self._nodes.index(node_id)
node_connection = NodeClient.create(address)
self._nodes_connections[node_id] = node_connection
return node_number
def clean_connection(self, node_id):
"""Shutdown the connection and remove any reference
to it. Called when the node unregisters.
:param node_id: The node id sent down by the Node.
"""
if node_id not in self._nodes_connections:
return
connection = self._nodes_connections[node_id]
connection.shutdown()
del self._nodes_connections[node_id]
def unregister_node(self, node_id):
"""Entry-point for unregistering a node.
It shuts down the connection, removes it from the list of nodes.
:param node_id: The node id.
"""
self.clean_connection(node_id)
if node_id in self._nodes:
self._nodes.remove(node_id)
def get_node_by_key(self, key):
"""Gets the right machine based on the ky.
:param key: The key to be set. It will be used to define which
node that key should go.
:returns: A `brainer.node.client.NodeClient` object.
"""
if not self._nodes:
raise ZeroNodeError
hashing = ConsistentHash(len(self._nodes))
node_number = hashing.get_machine(key)
node_id = self._nodes[node_number]
if self._debug:
log.msg('Machine {} ({}) picked for key {}'.format(
node_id, node_number, key))
return self._nodes_connections[node_id]
def gotMessage(self, message_id, *messageParts):
"""Any message received is processed here.
:param message_id: The message id (Generated by ZeroMQ).
:param messageParts: The message itself.
"""
message = self.unpack(messageParts[0])
if self._debug:
log.msg("New Message: {}".format(message))
action = message['action']
if action not in self._allowed_actions:
self.reply_error(
message_id, "FORBBIDEN", "You cannot run this command.")
method = getattr(self, action, None)
if method is None:
self.reply_error(
message_id, "NOT_IMPLEMENTED", "Command not implemented.")
try:
d = defer.maybeDeferred(method, message_id, message)
except ZeroNodeError:
self.reply_error(
message_id, "ZERO_NODES", "There are no nodes registered.")
return
d.addErrback(self._on_error, method, message_id)
def _on_error(self, f, method, message_id):
"""On error, we reply a failure to the customer.
This is not proper failure handling. Needs to be improved.
:param f: A `twisted.python.failure.Failure`.
:param method: The method that the client requested.
:param message_id: The request message id.
"""
log.err(f, "Method {} failed.".format(method))
self.reply_error(message_id, "UNKNOWN_ERROR", "Verify server log")
def register(self, message_id, message):
"""Registers a node and kick of the process of
creating connections.
:param message_id: The message id (generated by ZeroMQ)
:param message: The message itself.
"""
server_id, address = message['id'], message['address']
self.register_node(server_id, address)
is_first = len(self._nodes_connections) == 1
if not is_first:
d = self.snapshot(server_id)
else:
d = defer.succeed(None)
d.addCallback(lambda snapshot: self.reply(
message_id, {"action": "register", "snapshot": snapshot}))
def unregister(self, message_id, message):
"""Unregisters a node and kicks off the process of
cleaning connections.
:param message_id: The message id (generated by ZeroMQ)
:param message: The message itself.
"""
node_id = message['id']
if self._debug:
log.msg(
'Unregister request received for node ID {}'.format(node_id))
self.unregister_node(node_id)
self.reply(message_id, {"action": "unregister", "unregistered": True})
def snapshot(self, requester_id):
"""Requests a snapshot from any other node that is not the requester.
:param requester_id: A server id to filter out.
"""
node = None
for server_id, connection in self._nodes_connections.items():
if server_id != requester_id:
node = connection
break
return node.snapshot()
def batch(self, main_node, wait_all, method, *args, **kwargs):
"""Performs an operation in every node. Used for write operations.
:param main_node: The main node connection picked by the
hashing method.
:param wait_all: If True, the deferred will fire only when all
nodes have replied to the write. If False, it fires immediately
after the main node replies the write.
:param method: A method to call in every node connection.
E.g.: 'set'
"""
func = getattr(main_node, method)
main_defer = func(*args, **kwargs)
dlist = [main_defer]
for server_id, connection in self._nodes_connections.items():
if connection == main_node:
continue
func = getattr(connection, method)
d = func(*args, **kwargs)
if wait_all:
dlist.append(d)
d = defer.DeferredList(dlist)
return d
def set(self, message_id, message):
"""Sets a key-value pair in the nodes.
The set is tunable per-query. You can pick speed or consistency.
This is defined by the parameter 'wait_all' in the message.
If set to True, we only reply to the customer after all nodes
get the data. On False, we reply as soon as the main node get
the data (defined by the hashing).
It currently defaults to True (consistency). So in the case a
node goes down, it's harder to lose data, by default.
:param message_id: The message id (generated by ZeroMQ)
:param message: The message itself.
"""
return self._fire_batch('set', message_id, message)
def get(self, message_id, message):
"""Get a value based on a key.
We will consult the right node based on the key (determined
by hashing). But in case a node goes down, all other nodes
should have the same data (see `set` documentation).
:param message_id: The message id (generated by ZeroMQ)
:param message: The message itself.
"""
node = self.get_node_by_key(message['key'])
d = node.get(message)
d.addCallback(lambda reply: self.reply(message_id, reply))
return d
def remove(self, message_id, message):
"""Removes a key.
:param message_id: The message id (generated by ZeroMQ)
:param message: The message itself.
"""
return self._fire_batch('remove', message_id, message)
def _fire_batch(self, action, message_id, message):
"""Convenience method that wraps any write-operation.
For now, write and remove use it.
:param action: The action to perform. This needs to map to
an actual method to be called.
:param message_id: The message id (generated by ZeroMQ)
:param message: The message itself.
"""
main_node = self.get_node_by_key(message['key'])
wait_all = message.get('wait_all', True)
d = self.batch(main_node, wait_all, action, message)
d.addCallback(
lambda replies: self.reply(message_id, replies[0][1]))
return d
def run_broker(host, debug=False):
log.startLogging(sys.stdout)
Broker.create(host, debug=debug)
reactor.run()
if __name__ == '__main__':
run_broker()
| |
# As a test suite for the os module, this is woefully inadequate, but this
# does add tests for a few functions which have been determined to be more
# portable than they had been thought to be.
import os
import errno
import unittest
import warnings
import sys
import signal
import subprocess
import time
from test import test_support
import mmap
import uuid
warnings.filterwarnings("ignore", "tempnam", RuntimeWarning, __name__)
warnings.filterwarnings("ignore", "tmpnam", RuntimeWarning, __name__)
# Tests creating TESTFN
class FileTests(unittest.TestCase):
def setUp(self):
if os.path.exists(test_support.TESTFN):
os.unlink(test_support.TESTFN)
tearDown = setUp
def test_access(self):
f = os.open(test_support.TESTFN, os.O_CREAT|os.O_RDWR)
os.close(f)
self.assertTrue(os.access(test_support.TESTFN, os.W_OK))
def test_closerange(self):
first = os.open(test_support.TESTFN, os.O_CREAT|os.O_RDWR)
# We must allocate two consecutive file descriptors, otherwise
# it will mess up other file descriptors (perhaps even the three
# standard ones).
second = os.dup(first)
try:
retries = 0
while second != first + 1:
os.close(first)
retries += 1
if retries > 10:
# XXX test skipped
self.skipTest("couldn't allocate two consecutive fds")
first, second = second, os.dup(second)
finally:
os.close(second)
# close a fd that is open, and one that isn't
os.closerange(first, first + 2)
self.assertRaises(OSError, os.write, first, "a")
@test_support.cpython_only
def test_rename(self):
path = unicode(test_support.TESTFN)
old = sys.getrefcount(path)
self.assertRaises(TypeError, os.rename, path, 0)
new = sys.getrefcount(path)
self.assertEqual(old, new)
class TemporaryFileTests(unittest.TestCase):
def setUp(self):
self.files = []
os.mkdir(test_support.TESTFN)
def tearDown(self):
for name in self.files:
os.unlink(name)
os.rmdir(test_support.TESTFN)
def check_tempfile(self, name):
# make sure it doesn't already exist:
self.assertFalse(os.path.exists(name),
"file already exists for temporary file")
# make sure we can create the file
open(name, "w")
self.files.append(name)
def test_tempnam(self):
if not hasattr(os, "tempnam"):
return
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "tempnam", RuntimeWarning,
r"test_os$")
warnings.filterwarnings("ignore", "tempnam", DeprecationWarning)
self.check_tempfile(os.tempnam())
name = os.tempnam(test_support.TESTFN)
self.check_tempfile(name)
name = os.tempnam(test_support.TESTFN, "pfx")
self.assertTrue(os.path.basename(name)[:3] == "pfx")
self.check_tempfile(name)
def test_tmpfile(self):
if not hasattr(os, "tmpfile"):
return
# As with test_tmpnam() below, the Windows implementation of tmpfile()
# attempts to create a file in the root directory of the current drive.
# On Vista and Server 2008, this test will always fail for normal users
# as writing to the root directory requires elevated privileges. With
# XP and below, the semantics of tmpfile() are the same, but the user
# running the test is more likely to have administrative privileges on
# their account already. If that's the case, then os.tmpfile() should
# work. In order to make this test as useful as possible, rather than
# trying to detect Windows versions or whether or not the user has the
# right permissions, just try and create a file in the root directory
# and see if it raises a 'Permission denied' OSError. If it does, then
# test that a subsequent call to os.tmpfile() raises the same error. If
# it doesn't, assume we're on XP or below and the user running the test
# has administrative privileges, and proceed with the test as normal.
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "tmpfile", DeprecationWarning)
if sys.platform == 'win32':
name = '\\python_test_os_test_tmpfile.txt'
if os.path.exists(name):
os.remove(name)
try:
fp = open(name, 'w')
except IOError, first:
# open() failed, assert tmpfile() fails in the same way.
# Although open() raises an IOError and os.tmpfile() raises an
# OSError(), 'args' will be (13, 'Permission denied') in both
# cases.
try:
fp = os.tmpfile()
except OSError, second:
self.assertEqual(first.args, second.args)
else:
self.fail("expected os.tmpfile() to raise OSError")
return
else:
# open() worked, therefore, tmpfile() should work. Close our
# dummy file and proceed with the test as normal.
fp.close()
os.remove(name)
fp = os.tmpfile()
fp.write("foobar")
fp.seek(0,0)
s = fp.read()
fp.close()
self.assertTrue(s == "foobar")
def test_tmpnam(self):
if not hasattr(os, "tmpnam"):
return
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "tmpnam", RuntimeWarning,
r"test_os$")
warnings.filterwarnings("ignore", "tmpnam", DeprecationWarning)
name = os.tmpnam()
if sys.platform in ("win32",):
# The Windows tmpnam() seems useless. From the MS docs:
#
# The character string that tmpnam creates consists of
# the path prefix, defined by the entry P_tmpdir in the
# file STDIO.H, followed by a sequence consisting of the
# digit characters '0' through '9'; the numerical value
# of this string is in the range 1 - 65,535. Changing the
# definitions of L_tmpnam or P_tmpdir in STDIO.H does not
# change the operation of tmpnam.
#
# The really bizarre part is that, at least under MSVC6,
# P_tmpdir is "\\". That is, the path returned refers to
# the root of the current drive. That's a terrible place to
# put temp files, and, depending on privileges, the user
# may not even be able to open a file in the root directory.
self.assertFalse(os.path.exists(name),
"file already exists for temporary file")
else:
self.check_tempfile(name)
# Test attributes on return values from os.*stat* family.
class StatAttributeTests(unittest.TestCase):
def setUp(self):
os.mkdir(test_support.TESTFN)
self.fname = os.path.join(test_support.TESTFN, "f1")
f = open(self.fname, 'wb')
f.write("ABC")
f.close()
def tearDown(self):
os.unlink(self.fname)
os.rmdir(test_support.TESTFN)
def test_stat_attributes(self):
if not hasattr(os, "stat"):
return
import stat
result = os.stat(self.fname)
# Make sure direct access works
self.assertEqual(result[stat.ST_SIZE], 3)
self.assertEqual(result.st_size, 3)
# Make sure all the attributes are there
members = dir(result)
for name in dir(stat):
if name[:3] == 'ST_':
attr = name.lower()
if name.endswith("TIME"):
def trunc(x): return int(x)
else:
def trunc(x): return x
self.assertEqual(trunc(getattr(result, attr)),
result[getattr(stat, name)])
self.assertIn(attr, members)
try:
result[200]
self.fail("No exception thrown")
except IndexError:
pass
# Make sure that assignment fails
try:
result.st_mode = 1
self.fail("No exception thrown")
except (AttributeError, TypeError):
pass
try:
result.st_rdev = 1
self.fail("No exception thrown")
except (AttributeError, TypeError):
pass
try:
result.parrot = 1
self.fail("No exception thrown")
except AttributeError:
pass
# Use the stat_result constructor with a too-short tuple.
try:
result2 = os.stat_result((10,))
self.fail("No exception thrown")
except TypeError:
pass
# Use the constructor with a too-long tuple.
try:
result2 = os.stat_result((0,1,2,3,4,5,6,7,8,9,10,11,12,13,14))
except TypeError:
pass
def test_statvfs_attributes(self):
if not hasattr(os, "statvfs"):
return
try:
result = os.statvfs(self.fname)
except OSError, e:
# On AtheOS, glibc always returns ENOSYS
if e.errno == errno.ENOSYS:
return
# Make sure direct access works
self.assertEqual(result.f_bfree, result[3])
# Make sure all the attributes are there.
members = ('bsize', 'frsize', 'blocks', 'bfree', 'bavail', 'files',
'ffree', 'favail', 'flag', 'namemax')
for value, member in enumerate(members):
self.assertEqual(getattr(result, 'f_' + member), result[value])
# Make sure that assignment really fails
try:
result.f_bfree = 1
self.fail("No exception thrown")
except TypeError:
pass
try:
result.parrot = 1
self.fail("No exception thrown")
except AttributeError:
pass
# Use the constructor with a too-short tuple.
try:
result2 = os.statvfs_result((10,))
self.fail("No exception thrown")
except TypeError:
pass
# Use the constructor with a too-long tuple.
try:
result2 = os.statvfs_result((0,1,2,3,4,5,6,7,8,9,10,11,12,13,14))
except TypeError:
pass
def test_utime_dir(self):
delta = 1000000
st = os.stat(test_support.TESTFN)
# round to int, because some systems may support sub-second
# time stamps in stat, but not in utime.
os.utime(test_support.TESTFN, (st.st_atime, int(st.st_mtime-delta)))
st2 = os.stat(test_support.TESTFN)
self.assertEqual(st2.st_mtime, int(st.st_mtime-delta))
# Restrict test to Win32, since there is no guarantee other
# systems support centiseconds
if sys.platform == 'win32':
def get_file_system(path):
root = os.path.splitdrive(os.path.abspath(path))[0] + '\\'
import ctypes
kernel32 = ctypes.windll.kernel32
buf = ctypes.create_string_buffer("", 100)
if kernel32.GetVolumeInformationA(root, None, 0, None, None, None, buf, len(buf)):
return buf.value
if get_file_system(test_support.TESTFN) == "NTFS":
def test_1565150(self):
t1 = 1159195039.25
os.utime(self.fname, (t1, t1))
self.assertEqual(os.stat(self.fname).st_mtime, t1)
def test_large_time(self):
t1 = 5000000000 # some day in 2128
os.utime(self.fname, (t1, t1))
self.assertEqual(os.stat(self.fname).st_mtime, t1)
def test_1686475(self):
# Verify that an open file can be stat'ed
try:
os.stat(r"c:\pagefile.sys")
except WindowsError, e:
if e.errno == 2: # file does not exist; cannot run test
return
self.fail("Could not stat pagefile.sys")
from test import mapping_tests
class EnvironTests(mapping_tests.BasicTestMappingProtocol):
"""check that os.environ object conform to mapping protocol"""
type2test = None
def _reference(self):
return {"KEY1":"VALUE1", "KEY2":"VALUE2", "KEY3":"VALUE3"}
def _empty_mapping(self):
os.environ.clear()
return os.environ
def setUp(self):
self.__save = dict(os.environ)
os.environ.clear()
def tearDown(self):
os.environ.clear()
os.environ.update(self.__save)
# Bug 1110478
def test_update2(self):
if os.path.exists("/bin/sh"):
os.environ.update(HELLO="World")
with os.popen("/bin/sh -c 'echo $HELLO'") as popen:
value = popen.read().strip()
self.assertEqual(value, "World")
# On FreeBSD < 7 and OS X < 10.6, unsetenv() doesn't return a value (issue
# #13415).
@unittest.skipIf(sys.platform.startswith(('freebsd', 'darwin')),
"due to known OS bug: see issue #13415")
def test_unset_error(self):
if sys.platform == "win32":
# an environment variable is limited to 32,767 characters
key = 'x' * 50000
self.assertRaises(ValueError, os.environ.__delitem__, key)
else:
# "=" is not allowed in a variable name
key = 'key='
self.assertRaises(OSError, os.environ.__delitem__, key)
class WalkTests(unittest.TestCase):
"""Tests for os.walk()."""
def test_traversal(self):
import os
from os.path import join
# Build:
# TESTFN/
# TEST1/ a file kid and two directory kids
# tmp1
# SUB1/ a file kid and a directory kid
# tmp2
# SUB11/ no kids
# SUB2/ a file kid and a dirsymlink kid
# tmp3
# link/ a symlink to TESTFN.2
# TEST2/
# tmp4 a lone file
walk_path = join(test_support.TESTFN, "TEST1")
sub1_path = join(walk_path, "SUB1")
sub11_path = join(sub1_path, "SUB11")
sub2_path = join(walk_path, "SUB2")
tmp1_path = join(walk_path, "tmp1")
tmp2_path = join(sub1_path, "tmp2")
tmp3_path = join(sub2_path, "tmp3")
link_path = join(sub2_path, "link")
t2_path = join(test_support.TESTFN, "TEST2")
tmp4_path = join(test_support.TESTFN, "TEST2", "tmp4")
# Create stuff.
os.makedirs(sub11_path)
os.makedirs(sub2_path)
os.makedirs(t2_path)
for path in tmp1_path, tmp2_path, tmp3_path, tmp4_path:
f = file(path, "w")
f.write("I'm " + path + " and proud of it. Blame test_os.\n")
f.close()
if hasattr(os, "symlink"):
os.symlink(os.path.abspath(t2_path), link_path)
sub2_tree = (sub2_path, ["link"], ["tmp3"])
else:
sub2_tree = (sub2_path, [], ["tmp3"])
# Walk top-down.
all = list(os.walk(walk_path))
self.assertEqual(len(all), 4)
# We can't know which order SUB1 and SUB2 will appear in.
# Not flipped: TESTFN, SUB1, SUB11, SUB2
# flipped: TESTFN, SUB2, SUB1, SUB11
flipped = all[0][1][0] != "SUB1"
all[0][1].sort()
self.assertEqual(all[0], (walk_path, ["SUB1", "SUB2"], ["tmp1"]))
self.assertEqual(all[1 + flipped], (sub1_path, ["SUB11"], ["tmp2"]))
self.assertEqual(all[2 + flipped], (sub11_path, [], []))
self.assertEqual(all[3 - 2 * flipped], sub2_tree)
# Prune the search.
all = []
for root, dirs, files in os.walk(walk_path):
all.append((root, dirs, files))
# Don't descend into SUB1.
if 'SUB1' in dirs:
# Note that this also mutates the dirs we appended to all!
dirs.remove('SUB1')
self.assertEqual(len(all), 2)
self.assertEqual(all[0], (walk_path, ["SUB2"], ["tmp1"]))
self.assertEqual(all[1], sub2_tree)
# Walk bottom-up.
all = list(os.walk(walk_path, topdown=False))
self.assertEqual(len(all), 4)
# We can't know which order SUB1 and SUB2 will appear in.
# Not flipped: SUB11, SUB1, SUB2, TESTFN
# flipped: SUB2, SUB11, SUB1, TESTFN
flipped = all[3][1][0] != "SUB1"
all[3][1].sort()
self.assertEqual(all[3], (walk_path, ["SUB1", "SUB2"], ["tmp1"]))
self.assertEqual(all[flipped], (sub11_path, [], []))
self.assertEqual(all[flipped + 1], (sub1_path, ["SUB11"], ["tmp2"]))
self.assertEqual(all[2 - 2 * flipped], sub2_tree)
if hasattr(os, "symlink"):
# Walk, following symlinks.
for root, dirs, files in os.walk(walk_path, followlinks=True):
if root == link_path:
self.assertEqual(dirs, [])
self.assertEqual(files, ["tmp4"])
break
else:
self.fail("Didn't follow symlink with followlinks=True")
def tearDown(self):
# Tear everything down. This is a decent use for bottom-up on
# Windows, which doesn't have a recursive delete command. The
# (not so) subtlety is that rmdir will fail unless the dir's
# kids are removed first, so bottom up is essential.
for root, dirs, files in os.walk(test_support.TESTFN, topdown=False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
dirname = os.path.join(root, name)
if not os.path.islink(dirname):
os.rmdir(dirname)
else:
os.remove(dirname)
os.rmdir(test_support.TESTFN)
class MakedirTests (unittest.TestCase):
def setUp(self):
os.mkdir(test_support.TESTFN)
def test_makedir(self):
base = test_support.TESTFN
path = os.path.join(base, 'dir1', 'dir2', 'dir3')
os.makedirs(path) # Should work
path = os.path.join(base, 'dir1', 'dir2', 'dir3', 'dir4')
os.makedirs(path)
# Try paths with a '.' in them
self.assertRaises(OSError, os.makedirs, os.curdir)
path = os.path.join(base, 'dir1', 'dir2', 'dir3', 'dir4', 'dir5', os.curdir)
os.makedirs(path)
path = os.path.join(base, 'dir1', os.curdir, 'dir2', 'dir3', 'dir4',
'dir5', 'dir6')
os.makedirs(path)
def tearDown(self):
path = os.path.join(test_support.TESTFN, 'dir1', 'dir2', 'dir3',
'dir4', 'dir5', 'dir6')
# If the tests failed, the bottom-most directory ('../dir6')
# may not have been created, so we look for the outermost directory
# that exists.
while not os.path.exists(path) and path != test_support.TESTFN:
path = os.path.dirname(path)
os.removedirs(path)
class DevNullTests (unittest.TestCase):
def test_devnull(self):
f = file(os.devnull, 'w')
f.write('hello')
f.close()
f = file(os.devnull, 'r')
self.assertEqual(f.read(), '')
f.close()
class URandomTests (unittest.TestCase):
def test_urandom_length(self):
self.assertEqual(len(os.urandom(0)), 0)
self.assertEqual(len(os.urandom(1)), 1)
self.assertEqual(len(os.urandom(10)), 10)
self.assertEqual(len(os.urandom(100)), 100)
self.assertEqual(len(os.urandom(1000)), 1000)
def test_urandom_value(self):
data1 = os.urandom(16)
data2 = os.urandom(16)
self.assertNotEqual(data1, data2)
def get_urandom_subprocess(self, count):
# We need to use repr() and eval() to avoid line ending conversions
# under Windows.
code = '\n'.join((
'import os, sys',
'data = os.urandom(%s)' % count,
'sys.stdout.write(repr(data))',
'sys.stdout.flush()',
'print >> sys.stderr, (len(data), data)'))
cmd_line = [sys.executable, '-c', code]
p = subprocess.Popen(cmd_line, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
self.assertEqual(p.wait(), 0, (p.wait(), err))
out = eval(out)
self.assertEqual(len(out), count, err)
return out
def test_urandom_subprocess(self):
data1 = self.get_urandom_subprocess(16)
data2 = self.get_urandom_subprocess(16)
self.assertNotEqual(data1, data2)
def test_execvpe_with_bad_arglist(self):
self.assertRaises(ValueError, os.execvpe, 'notepad', [], None)
class Win32ErrorTests(unittest.TestCase):
def test_rename(self):
self.assertRaises(WindowsError, os.rename, test_support.TESTFN, test_support.TESTFN+".bak")
def test_remove(self):
self.assertRaises(WindowsError, os.remove, test_support.TESTFN)
def test_chdir(self):
self.assertRaises(WindowsError, os.chdir, test_support.TESTFN)
def test_mkdir(self):
f = open(test_support.TESTFN, "w")
try:
self.assertRaises(WindowsError, os.mkdir, test_support.TESTFN)
finally:
f.close()
os.unlink(test_support.TESTFN)
def test_utime(self):
self.assertRaises(WindowsError, os.utime, test_support.TESTFN, None)
def test_chmod(self):
self.assertRaises(WindowsError, os.chmod, test_support.TESTFN, 0)
class TestInvalidFD(unittest.TestCase):
singles = ["fchdir", "fdopen", "dup", "fdatasync", "fstat",
"fstatvfs", "fsync", "tcgetpgrp", "ttyname"]
#singles.append("close")
#We omit close because it doesn'r raise an exception on some platforms
def get_single(f):
def helper(self):
if hasattr(os, f):
self.check(getattr(os, f))
return helper
for f in singles:
locals()["test_"+f] = get_single(f)
def check(self, f, *args):
try:
f(test_support.make_bad_fd(), *args)
except OSError as e:
self.assertEqual(e.errno, errno.EBADF)
else:
self.fail("%r didn't raise a OSError with a bad file descriptor"
% f)
def test_isatty(self):
if hasattr(os, "isatty"):
self.assertEqual(os.isatty(test_support.make_bad_fd()), False)
def test_closerange(self):
if hasattr(os, "closerange"):
fd = test_support.make_bad_fd()
# Make sure none of the descriptors we are about to close are
# currently valid (issue 6542).
for i in range(10):
try: os.fstat(fd+i)
except OSError:
pass
else:
break
if i < 2:
raise unittest.SkipTest(
"Unable to acquire a range of invalid file descriptors")
self.assertEqual(os.closerange(fd, fd + i-1), None)
def test_dup2(self):
if hasattr(os, "dup2"):
self.check(os.dup2, 20)
def test_fchmod(self):
if hasattr(os, "fchmod"):
self.check(os.fchmod, 0)
def test_fchown(self):
if hasattr(os, "fchown"):
self.check(os.fchown, -1, -1)
def test_fpathconf(self):
if hasattr(os, "fpathconf"):
self.check(os.fpathconf, "PC_NAME_MAX")
def test_ftruncate(self):
if hasattr(os, "ftruncate"):
self.check(os.ftruncate, 0)
def test_lseek(self):
if hasattr(os, "lseek"):
self.check(os.lseek, 0, 0)
def test_read(self):
if hasattr(os, "read"):
self.check(os.read, 1)
def test_tcsetpgrpt(self):
if hasattr(os, "tcsetpgrp"):
self.check(os.tcsetpgrp, 0)
def test_write(self):
if hasattr(os, "write"):
self.check(os.write, " ")
if sys.platform != 'win32':
class Win32ErrorTests(unittest.TestCase):
pass
class PosixUidGidTests(unittest.TestCase):
if hasattr(os, 'setuid'):
def test_setuid(self):
if os.getuid() != 0:
self.assertRaises(os.error, os.setuid, 0)
self.assertRaises(OverflowError, os.setuid, 1<<32)
if hasattr(os, 'setgid'):
def test_setgid(self):
if os.getuid() != 0:
self.assertRaises(os.error, os.setgid, 0)
self.assertRaises(OverflowError, os.setgid, 1<<32)
if hasattr(os, 'seteuid'):
def test_seteuid(self):
if os.getuid() != 0:
self.assertRaises(os.error, os.seteuid, 0)
self.assertRaises(OverflowError, os.seteuid, 1<<32)
if hasattr(os, 'setegid'):
def test_setegid(self):
if os.getuid() != 0:
self.assertRaises(os.error, os.setegid, 0)
self.assertRaises(OverflowError, os.setegid, 1<<32)
if hasattr(os, 'setreuid'):
def test_setreuid(self):
if os.getuid() != 0:
self.assertRaises(os.error, os.setreuid, 0, 0)
self.assertRaises(OverflowError, os.setreuid, 1<<32, 0)
self.assertRaises(OverflowError, os.setreuid, 0, 1<<32)
def test_setreuid_neg1(self):
# Needs to accept -1. We run this in a subprocess to avoid
# altering the test runner's process state (issue8045).
subprocess.check_call([
sys.executable, '-c',
'import os,sys;os.setreuid(-1,-1);sys.exit(0)'])
if hasattr(os, 'setregid'):
def test_setregid(self):
if os.getuid() != 0:
self.assertRaises(os.error, os.setregid, 0, 0)
self.assertRaises(OverflowError, os.setregid, 1<<32, 0)
self.assertRaises(OverflowError, os.setregid, 0, 1<<32)
def test_setregid_neg1(self):
# Needs to accept -1. We run this in a subprocess to avoid
# altering the test runner's process state (issue8045).
subprocess.check_call([
sys.executable, '-c',
'import os,sys;os.setregid(-1,-1);sys.exit(0)'])
else:
class PosixUidGidTests(unittest.TestCase):
pass
@unittest.skipUnless(sys.platform == "win32" and hasattr(os,'kill'),
"Win32 specific tests")
class Win32KillTests(unittest.TestCase):
def _kill(self, sig):
# Start sys.executable as a subprocess and communicate from the
# subprocess to the parent that the interpreter is ready. When it
# becomes ready, send *sig* via os.kill to the subprocess and check
# that the return code is equal to *sig*.
import ctypes
from ctypes import wintypes
import msvcrt
# Since we can't access the contents of the process' stdout until the
# process has exited, use PeekNamedPipe to see what's inside stdout
# without waiting. This is done so we can tell that the interpreter
# is started and running at a point where it could handle a signal.
PeekNamedPipe = ctypes.windll.kernel32.PeekNamedPipe
PeekNamedPipe.restype = wintypes.BOOL
PeekNamedPipe.argtypes = (wintypes.HANDLE, # Pipe handle
ctypes.POINTER(ctypes.c_char), # stdout buf
wintypes.DWORD, # Buffer size
ctypes.POINTER(wintypes.DWORD), # bytes read
ctypes.POINTER(wintypes.DWORD), # bytes avail
ctypes.POINTER(wintypes.DWORD)) # bytes left
msg = "running"
proc = subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.stdout.write('{}');"
"sys.stdout.flush();"
"input()".format(msg)],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE)
self.addCleanup(proc.stdout.close)
self.addCleanup(proc.stderr.close)
self.addCleanup(proc.stdin.close)
count, max = 0, 100
while count < max and proc.poll() is None:
# Create a string buffer to store the result of stdout from the pipe
buf = ctypes.create_string_buffer(len(msg))
# Obtain the text currently in proc.stdout
# Bytes read/avail/left are left as NULL and unused
rslt = PeekNamedPipe(msvcrt.get_osfhandle(proc.stdout.fileno()),
buf, ctypes.sizeof(buf), None, None, None)
self.assertNotEqual(rslt, 0, "PeekNamedPipe failed")
if buf.value:
self.assertEqual(msg, buf.value)
break
time.sleep(0.1)
count += 1
else:
self.fail("Did not receive communication from the subprocess")
os.kill(proc.pid, sig)
self.assertEqual(proc.wait(), sig)
def test_kill_sigterm(self):
# SIGTERM doesn't mean anything special, but make sure it works
self._kill(signal.SIGTERM)
def test_kill_int(self):
# os.kill on Windows can take an int which gets set as the exit code
self._kill(100)
def _kill_with_event(self, event, name):
tagname = "test_os_%s" % uuid.uuid1()
m = mmap.mmap(-1, 1, tagname)
m[0] = '0'
# Run a script which has console control handling enabled.
proc = subprocess.Popen([sys.executable,
os.path.join(os.path.dirname(__file__),
"win_console_handler.py"), tagname],
creationflags=subprocess.CREATE_NEW_PROCESS_GROUP)
# Let the interpreter startup before we send signals. See #3137.
count, max = 0, 20
while count < max and proc.poll() is None:
if m[0] == '1':
break
time.sleep(0.5)
count += 1
else:
self.fail("Subprocess didn't finish initialization")
os.kill(proc.pid, event)
# proc.send_signal(event) could also be done here.
# Allow time for the signal to be passed and the process to exit.
time.sleep(0.5)
if not proc.poll():
# Forcefully kill the process if we weren't able to signal it.
os.kill(proc.pid, signal.SIGINT)
self.fail("subprocess did not stop on {}".format(name))
@unittest.skip("subprocesses aren't inheriting CTRL+C property")
def test_CTRL_C_EVENT(self):
from ctypes import wintypes
import ctypes
# Make a NULL value by creating a pointer with no argument.
NULL = ctypes.POINTER(ctypes.c_int)()
SetConsoleCtrlHandler = ctypes.windll.kernel32.SetConsoleCtrlHandler
SetConsoleCtrlHandler.argtypes = (ctypes.POINTER(ctypes.c_int),
wintypes.BOOL)
SetConsoleCtrlHandler.restype = wintypes.BOOL
# Calling this with NULL and FALSE causes the calling process to
# handle CTRL+C, rather than ignore it. This property is inherited
# by subprocesses.
SetConsoleCtrlHandler(NULL, 0)
self._kill_with_event(signal.CTRL_C_EVENT, "CTRL_C_EVENT")
def test_CTRL_BREAK_EVENT(self):
self._kill_with_event(signal.CTRL_BREAK_EVENT, "CTRL_BREAK_EVENT")
def test_main():
test_support.run_unittest(
FileTests,
TemporaryFileTests,
StatAttributeTests,
EnvironTests,
WalkTests,
MakedirTests,
DevNullTests,
URandomTests,
Win32ErrorTests,
TestInvalidFD,
PosixUidGidTests,
Win32KillTests
)
if __name__ == "__main__":
test_main()
| |
#
# This sets up how models are displayed
# in the web admin interface.
#
from django import forms
from django.conf import settings
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.contrib.auth.forms import UserChangeForm, UserCreationForm
from evennia.players.models import PlayerDB
from evennia.typeclasses.admin import AttributeInline, TagInline
from evennia.utils import create
# handle the custom User editor
class PlayerDBChangeForm(UserChangeForm):
"""
Modify the playerdb class.
"""
class Meta:
model = PlayerDB
fields = '__all__'
username = forms.RegexField(
label="Username",
max_length=30,
regex=r'^[\w. @+-]+$',
widget=forms.TextInput(
attrs={'size': '30'}),
error_messages={
'invalid': "This value may contain only letters, spaces, numbers "
"and @/./+/-/_ characters."},
help_text="30 characters or fewer. Letters, spaces, digits and "
"@/./+/-/_ only.")
def clean_username(self):
"""
Clean the username and check its existence.
"""
username = self.cleaned_data['username']
if username.upper() == self.instance.username.upper():
return username
elif PlayerDB.objects.filter(username__iexact=username):
raise forms.ValidationError('A player with that name '
'already exists.')
return self.cleaned_data['username']
class PlayerDBCreationForm(UserCreationForm):
"""
Create a new PlayerDB instance.
"""
class Meta:
model = PlayerDB
fields = '__all__'
username = forms.RegexField(
label="Username",
max_length=30,
regex=r'^[\w. @+-]+$',
widget=forms.TextInput(
attrs={'size': '30'}),
error_messages={
'invalid': "This value may contain only letters, spaces, numbers "
"and @/./+/-/_ characters."},
help_text="30 characters or fewer. Letters, spaces, digits and "
"@/./+/-/_ only.")
def clean_username(self):
username = self.cleaned_data['username']
if PlayerDB.objects.filter(username__iexact=username):
raise forms.ValidationError('A player with that name already '
'exists.')
return username
class PlayerForm(forms.ModelForm):
"""
Defines how to display Players
"""
class Meta:
model = PlayerDB
fields = '__all__'
db_key = forms.RegexField(
label="Username",
initial="PlayerDummy",
max_length=30,
regex=r'^[\w. @+-]+$',
required=False,
widget=forms.TextInput(attrs={'size': '30'}),
error_messages={
'invalid': "This value may contain only letters, spaces, numbers"
" and @/./+/-/_ characters."},
help_text="This should be the same as the connected Player's key "
"name. 30 characters or fewer. Letters, spaces, digits and "
"@/./+/-/_ only.")
db_typeclass_path = forms.CharField(
label="Typeclass",
initial=settings.BASE_PLAYER_TYPECLASS,
widget=forms.TextInput(
attrs={'size': '78'}),
help_text="Required. Defines what 'type' of entity this is. This "
"variable holds a Python path to a module with a valid "
"Evennia Typeclass. Defaults to "
"settings.BASE_PLAYER_TYPECLASS.")
db_permissions = forms.CharField(
label="Permissions",
initial=settings.PERMISSION_PLAYER_DEFAULT,
required=False,
widget=forms.TextInput(
attrs={'size': '78'}),
help_text="In-game permissions. A comma-separated list of text "
"strings checked by certain locks. They are often used for "
"hierarchies, such as letting a Player have permission "
"'Wizards', 'Builders' etc. A Player permission can be "
"overloaded by the permissions of a controlled Character. "
"Normal players use 'Players' by default.")
db_lock_storage = forms.CharField(
label="Locks",
widget=forms.Textarea(attrs={'cols': '100', 'rows': '2'}),
required=False,
help_text="In-game lock definition string. If not given, defaults "
"will be used. This string should be on the form "
"<i>type:lockfunction(args);type2:lockfunction2(args);...")
db_cmdset_storage = forms.CharField(
label="cmdset",
initial=settings.CMDSET_PLAYER,
widget=forms.TextInput(attrs={'size': '78'}),
required=False,
help_text="python path to player cmdset class (set in "
"settings.CMDSET_PLAYER by default)")
class PlayerInline(admin.StackedInline):
"""
Inline creation of Player
"""
model = PlayerDB
template = "admin/players/stacked.html"
form = PlayerForm
fieldsets = (
("In-game Permissions and Locks",
{'fields': ('db_lock_storage',),
#{'fields': ('db_permissions', 'db_lock_storage'),
'description': "<i>These are permissions/locks for in-game use. "
"They are unrelated to website access rights.</i>"}),
("In-game Player data",
{'fields': ('db_typeclass_path', 'db_cmdset_storage'),
'description': "<i>These fields define in-game-specific properties "
"for the Player object in-game.</i>"}))
extra = 1
max_num = 1
class PlayerTagInline(TagInline):
"""
Inline Player Tags.
"""
model = PlayerDB.db_tags.through
class PlayerAttributeInline(AttributeInline):
"""
Inline Player Attributes.
"""
model = PlayerDB.db_attributes.through
class PlayerDBAdmin(BaseUserAdmin):
"""
This is the main creation screen for Users/players
"""
list_display = ('username', 'email', 'is_staff', 'is_superuser')
form = PlayerDBChangeForm
add_form = PlayerDBCreationForm
inlines = [PlayerTagInline, PlayerAttributeInline]
fieldsets = (
(None, {'fields': ('username', 'password', 'email')}),
('Website profile', {
'fields': ('first_name', 'last_name'),
'description': "<i>These are not used "
"in the default system.</i>"}),
('Website dates', {
'fields': ('last_login', 'date_joined'),
'description': '<i>Relevant only to the website.</i>'}),
('Website Permissions', {
'fields': ('is_active', 'is_staff', 'is_superuser',
'user_permissions', 'groups'),
'description': "<i>These are permissions/permission groups for "
"accessing the admin site. They are unrelated to "
"in-game access rights.</i>"}),
('Game Options', {
'fields': ('db_typeclass_path', 'db_cmdset_storage',
'db_lock_storage'),
'description': '<i>These are attributes that are more relevant '
'to gameplay.</i>'}))
# ('Game Options', {'fields': (
# 'db_typeclass_path', 'db_cmdset_storage',
# 'db_permissions', 'db_lock_storage'),
# 'description': '<i>These are attributes that are '
# 'more relevant to gameplay.</i>'}))
add_fieldsets = (
(None,
{'fields': ('username', 'password1', 'password2', 'email'),
'description': "<i>These account details are shared by the admin "
"system and the game.</i>"},),)
def save_model(self, request, obj, form, change):
"""
Custom save actions.
Args:
request (Request): Incoming request.
obj (Object): Object to save.
form (Form): Related form instance.
change (bool): False if this is a new save and not an update.
"""
obj.save()
if not change:
#calling hooks for new player
ply = obj
ply.basetype_setup()
ply.at_player_creation()
## TODO! Remove User reference!
#def save_formset(self, request, form, formset, change):
# """
# Run all hooks on the player object
# """
# super(PlayerDBAdmin, self).save_formset(request, form, formset, change)
# userobj = form.instance
# userobj.name = userobj.username
# if not change:
# # uname, passwd, email = str(request.POST.get(u"username")), \
# # str(request.POST.get(u"password1")), \
# # str(request.POST.get(u"email"))
# typeclass = str(request.POST.get(
# u"playerdb_set-0-db_typeclass_path"))
# create.create_player("", "", "",
# user=userobj,
# typeclass=typeclass,
# player_dbobj=userobj)
admin.site.register(PlayerDB, PlayerDBAdmin)
| |
# Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
import logging
import time
import threading
from botocore.vendored import six
from botocore.awsrequest import create_request_object
from botocore.exceptions import HTTPClientError
from botocore.httpsession import URLLib3Session
from botocore.utils import is_valid_endpoint_url, get_environ_proxies
from botocore.hooks import first_non_none_response
from botocore.history import get_global_history_recorder
from botocore.response import StreamingBody
from botocore import parsers
logger = logging.getLogger(__name__)
history_recorder = get_global_history_recorder()
DEFAULT_TIMEOUT = 60
MAX_POOL_CONNECTIONS = 10
def convert_to_response_dict(http_response, operation_model):
"""Convert an HTTP response object to a request dict.
This converts the requests library's HTTP response object to
a dictionary.
:type http_response: botocore.vendored.requests.model.Response
:param http_response: The HTTP response from an AWS service request.
:rtype: dict
:return: A response dictionary which will contain the following keys:
* headers (dict)
* status_code (int)
* body (string or file-like object)
"""
response_dict = {
'headers': http_response.headers,
'status_code': http_response.status_code,
'context': {
'operation_name': operation_model.name,
}
}
if response_dict['status_code'] >= 300:
response_dict['body'] = http_response.content
elif operation_model.has_event_stream_output:
response_dict['body'] = http_response.raw
elif operation_model.has_streaming_output:
length = response_dict['headers'].get('content-length')
response_dict['body'] = StreamingBody(http_response.raw, length)
else:
response_dict['body'] = http_response.content
return response_dict
class Endpoint(object):
"""
Represents an endpoint for a particular service in a specific
region. Only an endpoint can make requests.
:ivar service: The Service object that describes this endpoints
service.
:ivar host: The fully qualified endpoint hostname.
:ivar session: The session object.
"""
def __init__(self, host, endpoint_prefix, event_emitter,
response_parser_factory=None, http_session=None):
self._endpoint_prefix = endpoint_prefix
self._event_emitter = event_emitter
self.host = host
self._lock = threading.Lock()
if response_parser_factory is None:
response_parser_factory = parsers.ResponseParserFactory()
self._response_parser_factory = response_parser_factory
self.http_session = http_session
if self.http_session is None:
self.http_session = URLLib3Session()
def __repr__(self):
return '%s(%s)' % (self._endpoint_prefix, self.host)
def make_request(self, operation_model, request_dict):
logger.debug("Making request for %s with params: %s",
operation_model, request_dict)
return self._send_request(request_dict, operation_model)
def create_request(self, params, operation_model=None):
request = create_request_object(params)
if operation_model:
request.stream_output = any([
operation_model.has_streaming_output,
operation_model.has_event_stream_output
])
service_id = operation_model.service_model.service_id.hyphenize()
event_name = 'request-created.{service_id}.{op_name}'.format(
service_id=service_id,
op_name=operation_model.name)
self._event_emitter.emit(event_name, request=request,
operation_name=operation_model.name)
prepared_request = self.prepare_request(request)
return prepared_request
def _encode_headers(self, headers):
# In place encoding of headers to utf-8 if they are unicode.
for key, value in headers.items():
if isinstance(value, six.text_type):
headers[key] = value.encode('utf-8')
def prepare_request(self, request):
self._encode_headers(request.headers)
return request.prepare()
def _send_request(self, request_dict, operation_model):
attempts = 1
request = self.create_request(request_dict, operation_model)
context = request_dict['context']
success_response, exception = self._get_response(
request, operation_model, context)
while self._needs_retry(attempts, operation_model, request_dict,
success_response, exception):
attempts += 1
# If there is a stream associated with the request, we need
# to reset it before attempting to send the request again.
# This will ensure that we resend the entire contents of the
# body.
request.reset_stream()
# Create a new request when retried (including a new signature).
request = self.create_request(
request_dict, operation_model)
success_response, exception = self._get_response(
request, operation_model, context)
if success_response is not None and \
'ResponseMetadata' in success_response[1]:
# We want to share num retries, not num attempts.
total_retries = attempts - 1
success_response[1]['ResponseMetadata']['RetryAttempts'] = \
total_retries
if exception is not None:
raise exception
else:
return success_response
def _get_response(self, request, operation_model, context):
# This will return a tuple of (success_response, exception)
# and success_response is itself a tuple of
# (http_response, parsed_dict).
# If an exception occurs then the success_response is None.
# If no exception occurs then exception is None.
success_response, exception = self._do_get_response(
request, operation_model)
kwargs_to_emit = {
'response_dict': None,
'parsed_response': None,
'context': context,
'exception': exception,
}
if success_response is not None:
http_response, parsed_response = success_response
kwargs_to_emit['parsed_response'] = parsed_response
kwargs_to_emit['response_dict'] = convert_to_response_dict(
http_response, operation_model)
service_id = operation_model.service_model.service_id.hyphenize()
self._event_emitter.emit(
'response-received.%s.%s' % (
service_id, operation_model.name), **kwargs_to_emit)
return success_response, exception
def _do_get_response(self, request, operation_model):
try:
logger.debug("Sending http request: %s", request)
history_recorder.record('HTTP_REQUEST', {
'method': request.method,
'headers': request.headers,
'streaming': operation_model.has_streaming_input,
'url': request.url,
'body': request.body
})
service_id = operation_model.service_model.service_id.hyphenize()
event_name = 'before-send.%s.%s' % (service_id, operation_model.name)
responses = self._event_emitter.emit(event_name, request=request)
http_response = first_non_none_response(responses)
if http_response is None:
http_response = self._send(request)
except HTTPClientError as e:
return (None, e)
except Exception as e:
logger.debug("Exception received when sending HTTP request.",
exc_info=True)
return (None, e)
# This returns the http_response and the parsed_data.
response_dict = convert_to_response_dict(http_response, operation_model)
http_response_record_dict = response_dict.copy()
http_response_record_dict['streaming'] = \
operation_model.has_streaming_output
history_recorder.record('HTTP_RESPONSE', http_response_record_dict)
protocol = operation_model.metadata['protocol']
parser = self._response_parser_factory.create_parser(protocol)
parsed_response = parser.parse(
response_dict, operation_model.output_shape)
history_recorder.record('PARSED_RESPONSE', parsed_response)
return (http_response, parsed_response), None
def _needs_retry(self, attempts, operation_model, request_dict,
response=None, caught_exception=None):
service_id = operation_model.service_model.service_id.hyphenize()
event_name = 'needs-retry.%s.%s' % (
service_id,
operation_model.name)
responses = self._event_emitter.emit(
event_name, response=response, endpoint=self,
operation=operation_model, attempts=attempts,
caught_exception=caught_exception, request_dict=request_dict)
handler_response = first_non_none_response(responses)
if handler_response is None:
return False
else:
# Request needs to be retried, and we need to sleep
# for the specified number of times.
logger.debug("Response received to retry, sleeping for "
"%s seconds", handler_response)
time.sleep(handler_response)
return True
def _send(self, request):
return self.http_session.send(request)
class EndpointCreator(object):
def __init__(self, event_emitter):
self._event_emitter = event_emitter
def create_endpoint(self, service_model, region_name, endpoint_url,
verify=None, response_parser_factory=None,
timeout=DEFAULT_TIMEOUT,
max_pool_connections=MAX_POOL_CONNECTIONS,
http_session_cls=URLLib3Session,
proxies=None,
socket_options=None,
client_cert=None):
if not is_valid_endpoint_url(endpoint_url):
raise ValueError("Invalid endpoint: %s" % endpoint_url)
if proxies is None:
proxies = self._get_proxies(endpoint_url)
endpoint_prefix = service_model.endpoint_prefix
logger.debug('Setting %s timeout as %s', endpoint_prefix, timeout)
http_session = http_session_cls(
timeout=timeout,
proxies=proxies,
verify=self._get_verify_value(verify),
max_pool_connections=max_pool_connections,
socket_options=socket_options,
client_cert=client_cert,
)
return Endpoint(
endpoint_url,
endpoint_prefix=endpoint_prefix,
event_emitter=self._event_emitter,
response_parser_factory=response_parser_factory,
http_session=http_session
)
def _get_proxies(self, url):
# We could also support getting proxies from a config file,
# but for now proxy support is taken from the environment.
return get_environ_proxies(url)
def _get_verify_value(self, verify):
# This is to account for:
# https://github.com/kennethreitz/requests/issues/1436
# where we need to honor REQUESTS_CA_BUNDLE because we're creating our
# own request objects.
# First, if verify is not None, then the user explicitly specified
# a value so this automatically wins.
if verify is not None:
return verify
# Otherwise use the value from REQUESTS_CA_BUNDLE, or default to
# True if the env var does not exist.
return os.environ.get('REQUESTS_CA_BUNDLE', True)
| |
import aslam_backend as aopt
import aslam_splines as asp
import IccUtil as util
import incremental_calibration as inc
import kalibr_common as kc
import sm
import gc
import numpy as np
import multiprocessing
import sys
# make numpy print prettier
np.set_printoptions(suppress=True)
CALIBRATION_GROUP_ID = 0
HELPER_GROUP_ID = 1
def addSplineDesignVariables(problem, dvc, setActive=True, group_id=HELPER_GROUP_ID):
for i in range(0,dvc.numDesignVariables()):
dv = dvc.designVariable(i)
dv.setActive(setActive)
problem.addDesignVariable(dv, group_id)
class IccCalibrator(object):
def __init__(self):
self.ImuList = []
def initDesignVariables(self, problem, poseSpline, noTimeCalibration, noChainExtrinsics=True, \
estimateGravityLength=False, initialGravityEstimate=np.array([0.0,9.81,0.0])):
# Initialize the system pose spline (always attached to imu0)
self.poseDv = asp.BSplinePoseDesignVariable( poseSpline )
addSplineDesignVariables(problem, self.poseDv)
# Add the calibration target orientation design variable. (expressed as gravity vector in target frame)
if estimateGravityLength:
self.gravityDv = aopt.EuclideanPointDv( initialGravityEstimate )
else:
self.gravityDv = aopt.EuclideanDirection( initialGravityEstimate )
self.gravityExpression = self.gravityDv.toExpression()
self.gravityDv.setActive( True )
problem.addDesignVariable(self.gravityDv, HELPER_GROUP_ID)
#Add all DVs for all IMUs
for imu in self.ImuList:
imu.addDesignVariables( problem )
#Add all DVs for the camera chain
self.CameraChain.addDesignVariables( problem, noTimeCalibration, noChainExtrinsics )
def addPoseMotionTerms(self, problem, tv, rv):
wt = 1.0/tv;
wr = 1.0/rv
W = np.diag([wt,wt,wt,wr,wr,wr])
asp.addMotionErrorTerms(problem, self.poseDv, W, errorOrder)
#add camera to sensor list (create list if necessary)
def registerCamChain(self, sensor):
self.CameraChain = sensor
def registerImu(self, sensor):
self.ImuList.append( sensor )
def buildProblem( self,
splineOrder=6,
poseKnotsPerSecond=70,
biasKnotsPerSecond=70,
doPoseMotionError=False,
mrTranslationVariance=1e6,
mrRotationVariance=1e5,
doBiasMotionError=True,
blakeZisserCam=-1,
huberAccel=-1,
huberGyro=-1,
noTimeCalibration=False,
noChainExtrinsics=True,
maxIterations=20,
gyroNoiseScale=1.0,
accelNoiseScale=1.0,
timeOffsetPadding=0.02,
verbose=False ):
print "\tSpline order: %d" % (splineOrder)
print "\tPose knots per second: %d" % (poseKnotsPerSecond)
print "\tDo pose motion regularization: %s" % (doPoseMotionError)
print "\t\txddot translation variance: %f" % (mrTranslationVariance)
print "\t\txddot rotation variance: %f" % (mrRotationVariance)
print "\tBias knots per second: %d" % (biasKnotsPerSecond)
print "\tDo bias motion regularization: %s" % (doBiasMotionError)
print "\tBlake-Zisserman on reprojection errors %s" % blakeZisserCam
print "\tAcceleration Huber width (sigma): %f" % (huberAccel)
print "\tGyroscope Huber width (sigma): %f" % (huberGyro)
print "\tDo time calibration: %s" % (not noTimeCalibration)
print "\tMax iterations: %d" % (maxIterations)
print "\tTime offset padding: %f" % (timeOffsetPadding)
############################################
## initialize camera chain
############################################
#estimate the timeshift for all cameras to the main imu
self.noTimeCalibration = noTimeCalibration
if not noTimeCalibration:
for cam in self.CameraChain.camList:
cam.findTimeshiftCameraImuPrior(self.ImuList[0], verbose)
#obtain orientation prior between main imu and camera chain (if no external input provided)
#and initial estimate for the direction of gravity
self.CameraChain.findOrientationPriorCameraChainToImu(self.ImuList[0])
estimatedGravity = self.CameraChain.getEstimatedGravity()
############################################
## init optimization problem
############################################
#initialize a pose spline using the camera poses in the camera chain
poseSpline = self.CameraChain.initializePoseSplineFromCameraChain(splineOrder, poseKnotsPerSecond, timeOffsetPadding)
# Initialize bias splines for all IMUs
for imu in self.ImuList:
imu.initBiasSplines(poseSpline, splineOrder, biasKnotsPerSecond)
# Now I can build the problem
problem = inc.CalibrationOptimizationProblem()
# Initialize all design variables.
self.initDesignVariables(problem, poseSpline, noTimeCalibration, noChainExtrinsics, initialGravityEstimate = estimatedGravity)
############################################
## add error terms
############################################
#Add calibration target reprojection error terms for all camera in chain
self.CameraChain.addCameraChainErrorTerms(problem, self.poseDv, blakeZissermanDf=blakeZisserCam, timeOffsetPadding=timeOffsetPadding)
# Initialize IMU error terms.
for imu in self.ImuList:
imu.addAccelerometerErrorTerms(problem, self.poseDv, self.gravityExpression, mSigma=huberAccel, accelNoiseScale=accelNoiseScale)
imu.addGyroscopeErrorTerms(problem, self.poseDv, mSigma=huberGyro, gyroNoiseScale=gyroNoiseScale, g_w=self.gravityExpression)
# Add the bias motion terms.
if doBiasMotionError:
imu.addBiasMotionTerms(problem)
# Add the pose motion terms.
if doPoseMotionError:
self.addPoseMotionTerms(problem, mrTranslationVariance, mrRotationVariance)
# Add a gravity prior
self.problem = problem
def optimize(self, options=None, maxIterations=30, recoverCov=False):
if options is None:
options = aopt.Optimizer2Options()
options.verbose = True
options.doLevenbergMarquardt = True
options.levenbergMarquardtLambdaInit = 10.0
options.nThreads = max(1,multiprocessing.cpu_count()-1)
options.convergenceDeltaX = 1e-5
options.convergenceDeltaJ = 1e-2
options.maxIterations = maxIterations
options.trustRegionPolicy = aopt.LevenbergMarquardtTrustRegionPolicy(options.levenbergMarquardtLambdaInit)
options.linearSolver = aopt.BlockCholeskyLinearSystemSolver()
#run the optimization
self.optimizer = aopt.Optimizer2(options)
self.optimizer.setProblem(self.problem)
optimizationFailed=False
try:
retval = self.optimizer.optimize()
if retval.linearSolverFailure:
optimizationFailed = True
except:
optimizationFailed = True
if optimizationFailed:
sm.logError("Optimization failed!")
raise RuntimeError("Optimization failed!")
#free some memory
del self.optimizer
gc.collect()
if recoverCov:
self.recoverCovariance()
def recoverCovariance(self):
#Covariance ordering (=dv ordering)
#ORDERING: N=num cams
# 1. transformation imu-cam0 --> 6
# 2. camera time2imu --> 1*numCams (only if enabled)
print "Recovering covariance..."
estimator = inc.IncrementalEstimator(CALIBRATION_GROUP_ID)
rval = estimator.addBatch(self.problem, True)
est_stds = np.sqrt(estimator.getSigma2Theta().diagonal())
#split and store the variance
self.std_trafo_ic = np.array(est_stds[0:6])
self.std_times = np.array(est_stds[6:])
def saveImuSetParametersYaml(self, resultFile):
imuSetConfig = kc.ImuSetParameters(resultFile, True)
for imu in self.ImuList:
imuConfig = imu.getImuConfig()
imuSetConfig.addImuParameters(imu_parameters=imuConfig)
imuSetConfig.writeYaml(resultFile)
def saveCamChainParametersYaml(self, resultFile):
chain = self.CameraChain.chainConfig
nCams = len(self.CameraChain.camList)
# Calibration results
for camNr in range(0,nCams):
#cam-cam baselines
if camNr > 0:
T_cB_cA, baseline = self.CameraChain.getResultBaseline(camNr-1, camNr)
chain.setExtrinsicsLastCamToHere(camNr, T_cB_cA)
#imu-cam trafos
T_ci = self.CameraChain.getResultTrafoImuToCam(camNr)
chain.setExtrinsicsImuToCam(camNr, T_ci)
if not self.noTimeCalibration:
#imu to cam timeshift
timeshift = float(self.CameraChain.getResultTimeShift(camNr))
chain.setTimeshiftCamImu(camNr, timeshift)
try:
chain.writeYaml(resultFile)
except:
print "ERROR: Could not write parameters to file: {0}\n".format(resultFile)
| |
# -*- coding: utf-8 -*-
"""Tests for gam.AdditiveModel and GAM with Polynomials compared to OLS and GLM
Created on Sat Nov 05 14:16:07 2011
Author: Josef Perktold
License: BSD
Notes
-----
TODO: TestGAMGamma: has test failure (GLM looks good),
adding log-link did not help
resolved: gamma does not fail anymore after tightening the
convergence criterium (rtol=1e-6)
TODO: TestGAMNegativeBinomial: rvs generation does not work,
nbinom needs 2 parameters
TODO: TestGAMGaussianLogLink: test failure,
but maybe precision issue, not completely off
but something is wrong, either the testcase or with the link
>>> tt3.__class__
<class '__main__.TestGAMGaussianLogLink'>
>>> tt3.res2.mu_pred.mean()
3.5616368292650766
>>> tt3.res1.mu_pred.mean()
3.6144278964707679
>>> tt3.mu_true.mean()
34.821904835958122
>>>
>>> tt3.y_true.mean()
2.685225067611543
>>> tt3.res1.y_pred.mean()
0.52991541684645616
>>> tt3.res2.y_pred.mean()
0.44626406889363229
one possible change
~~~~~~~~~~~~~~~~~~~
add average, integral based tests, instead of or additional to sup
* for example mean squared error for mu and eta (predict, fittedvalues)
or mean absolute error, what's the scale for this? required precision?
* this will also work for real non-parametric tests
example: Gamma looks good in average bias and average RMSE (RMISE)
>>> tt3 = _estGAMGamma()
>>> np.mean((tt3.res2.mu_pred - tt3.mu_true))/tt3.mu_true.mean()
-0.0051829977497423706
>>> np.mean((tt3.res2.y_pred - tt3.y_true))/tt3.y_true.mean()
0.00015255264651864049
>>> np.mean((tt3.res1.y_pred - tt3.y_true))/tt3.y_true.mean()
0.00015255538823786711
>>> np.mean((tt3.res1.mu_pred - tt3.mu_true))/tt3.mu_true.mean()
-0.0051937668989744494
>>> np.sqrt(np.mean((tt3.res1.mu_pred - tt3.mu_true)**2))/tt3.mu_true.mean()
0.022946118520401692
>>> np.sqrt(np.mean((tt3.res2.mu_pred - tt3.mu_true)**2))/tt3.mu_true.mean()
0.022953913332599746
>>> maxabs = lambda x: np.max(np.abs(x))
>>> maxabs((tt3.res1.mu_pred - tt3.mu_true))/tt3.mu_true.mean()
0.079540546242707733
>>> maxabs((tt3.res2.mu_pred - tt3.mu_true))/tt3.mu_true.mean()
0.079578857986784574
>>> maxabs((tt3.res2.y_pred - tt3.y_true))/tt3.y_true.mean()
0.016282852522951426
>>> maxabs((tt3.res1.y_pred - tt3.y_true))/tt3.y_true.mean()
0.016288391235613865
"""
from statsmodels.compat.python import lrange
import numpy as np
from numpy.testing import assert_almost_equal, assert_equal
from scipy import stats
import pytest
from statsmodels.sandbox.gam import AdditiveModel
from statsmodels.sandbox.gam import Model as GAM #?
from statsmodels.genmod.families import family, links
from statsmodels.genmod.generalized_linear_model import GLM
from statsmodels.regression.linear_model import OLS
class Dummy(object):
pass
class CheckAM(object):
def test_predict(self):
assert_almost_equal(self.res1.y_pred,
self.res2.y_pred, decimal=2)
assert_almost_equal(self.res1.y_predshort,
self.res2.y_pred[:10], decimal=2)
@pytest.mark.xfail(reason="Unknown, results do not match expected",
raises=AssertionError, strict=True)
def test_fitted(self):
# check definition of fitted in GLM: eta or mu
assert_almost_equal(self.res1.y_pred,
self.res2.fittedvalues, decimal=2)
assert_almost_equal(self.res1.y_predshort,
self.res2.fittedvalues[:10], decimal=2)
def test_params(self):
#note: only testing slope coefficients
#constant is far off in example 4 versus 2
assert_almost_equal(self.res1.params[1:],
self.res2.params[1:], decimal=2)
#constant
assert_almost_equal(self.res1.params[1],
self.res2.params[1], decimal=2)
@pytest.mark.xfail(reason="res_ps attribute does not exist",
strict=True, raises=AttributeError)
def test_df(self):
# not used yet, copied from PolySmoother tests
assert_equal(self.res_ps.df_model(), self.res2.df_model)
assert_equal(self.res_ps.df_fit(), self.res2.df_model) #alias
assert_equal(self.res_ps.df_resid(), self.res2.df_resid)
class CheckGAM(CheckAM):
def test_mu(self):
# problem with scale for precision
assert_almost_equal(self.res1.mu_pred,
self.res2.mu_pred, decimal=0)
def test_prediction(self):
# problem with scale for precision
assert_almost_equal(self.res1.y_predshort,
self.res2.y_pred[:10], decimal=2)
class BaseAM(object):
@classmethod
def setup_class(cls):
#DGP: simple polynomial
order = 3
nobs = 200
lb, ub = -3.5, 3
x1 = np.linspace(lb, ub, nobs)
x2 = np.sin(2*x1)
x = np.column_stack((x1/x1.max()*1, 1.*x2))
exog = (x[:,:,None]**np.arange(order+1)[None, None, :]).reshape(nobs, -1)
idx = lrange((order+1)*2)
del idx[order+1]
exog_reduced = exog[:,idx] #remove duplicate constant
y_true = exog.sum(1) #/ 4.
#z = y_true #alias check
#d = x
cls.nobs = nobs
cls.y_true, cls.x, cls.exog = y_true, x, exog_reduced
class TestAdditiveModel(BaseAM, CheckAM):
@classmethod
def setup_class(cls):
super(TestAdditiveModel, cls).setup_class() #initialize DGP
nobs = cls.nobs
y_true, x, exog = cls.y_true, cls.x, cls.exog
np.random.seed(8765993)
sigma_noise = 0.1
y = y_true + sigma_noise * np.random.randn(nobs)
m = AdditiveModel(x)
m.fit(y)
res_gam = m.results #TODO: currently attached to class
res_ols = OLS(y, exog).fit()
#Note: there still are some naming inconsistencies
cls.res1 = res1 = Dummy() #for gam model
#res2 = Dummy() #for benchmark
cls.res2 = res2 = res_ols #reuse existing ols results, will add additional
res1.y_pred = res_gam.predict(x)
res2.y_pred = res_ols.model.predict(res_ols.params, exog)
res1.y_predshort = res_gam.predict(x[:10])
slopes = [i for ss in m.smoothers for i in ss.params[1:]]
const = res_gam.alpha + sum([ss.params[1] for ss in m.smoothers])
#print const, slopes
res1.params = np.array([const] + slopes)
def test_fitted(self):
# We have to override the base class because this case does not fail,
# while all others in this module do (as of 2019-05-22)
super(TestAdditiveModel, self).test_fitted()
class BaseGAM(BaseAM, CheckGAM):
@classmethod
def init(cls):
nobs = cls.nobs
y_true, x, exog = cls.y_true, cls.x, cls.exog
if not hasattr(cls, 'scale'):
scale = 1
else:
scale = cls.scale
f = cls.family
cls.mu_true = mu_true = f.link.inverse(y_true)
np.random.seed(8765993)
# Discrete distributions do not take `scale`.
try:
y_obs = cls.rvs(mu_true, scale=scale, size=nobs)
except TypeError:
y_obs = cls.rvs(mu_true, size=nobs)
m = GAM(y_obs, x, family=f) #TODO: y_obs is twice __init__ and fit
m.fit(y_obs, maxiter=100)
res_gam = m.results
cls.res_gam = res_gam #attached for debugging
cls.mod_gam = m #attached for debugging
res_glm = GLM(y_obs, exog, family=f).fit()
#Note: there still are some naming inconsistencies
cls.res1 = res1 = Dummy() #for gam model
#res2 = Dummy() #for benchmark
cls.res2 = res2 = res_glm #reuse existing glm results, will add additional
#eta in GLM terminology
res2.y_pred = res_glm.model.predict(res_glm.params, exog, linear=True)
res1.y_pred = res_gam.predict(x)
res1.y_predshort = res_gam.predict(x[:10]) #, linear=True)
#mu
res2.mu_pred = res_glm.model.predict(res_glm.params, exog, linear=False)
res1.mu_pred = res_gam.mu
#parameters
slopes = [i for ss in m.smoothers for i in ss.params[1:]]
const = res_gam.alpha + sum([ss.params[1] for ss in m.smoothers])
res1.params = np.array([const] + slopes)
class TestGAMPoisson(BaseGAM):
@classmethod
def setup_class(cls):
super(TestGAMPoisson, cls).setup_class() #initialize DGP
cls.family = family.Poisson()
cls.rvs = stats.poisson.rvs
cls.init()
class TestGAMBinomial(BaseGAM):
@classmethod
def setup_class(cls):
super(TestGAMBinomial, cls).setup_class() #initialize DGP
cls.family = family.Binomial()
cls.rvs = stats.bernoulli.rvs
cls.init()
@pytest.mark.xfail(reason="Unknown, results do not match expected.",
strict=True, raises=AssertionError)
class TestGAMGaussianLogLink(BaseGAM):
#test failure, but maybe precision issue, not far off
#>>> np.mean(np.abs(tt.res2.mu_pred - tt.mu_true))
#0.80409736263199649
#>>> np.mean(np.abs(tt.res2.mu_pred - tt.mu_true))/tt.mu_true.mean()
#0.023258245077813208
#>>> np.mean((tt.res2.mu_pred - tt.mu_true)**2)/tt.mu_true.mean()
#0.022989403735692578
@classmethod
def setup_class(cls):
super(TestGAMGaussianLogLink, cls).setup_class() # initialize DGP
cls.family = family.Gaussian(links.log())
cls.rvs = stats.norm.rvs
cls.scale = 5
cls.init()
class TestGAMGamma(BaseGAM):
@classmethod
def setup_class(cls):
super(TestGAMGamma, cls).setup_class() #initialize DGP
cls.family = family.Gamma(links.log())
cls.rvs = stats.gamma.rvs
cls.init()
@pytest.mark.xfail(reason="Passing wrong number of args/kwargs "
"to _parse_args_rvs",
strict=True, raises=TypeError)
class TestGAMNegativeBinomial(BaseGAM):
# TODO: rvs generation does not work, nbinom needs 2 parameters
@classmethod
def setup_class(cls):
super(TestGAMNegativeBinomial, cls).setup_class() # initialize DGP
cls.family = family.NegativeBinomial()
cls.rvs = stats.nbinom.rvs
cls.init()
@pytest.mark.xfail(reason="Passing wrong number of args/kwargs "
"to _parse_args_rvs",
strict=True, raises=TypeError)
def test_fitted(self):
# We have to override the base class method in order to correctly
# specify the type of failure we are expecting.
super(TestGAMNegativeBinomial, self).test_fitted()
@pytest.mark.xfail(reason="Passing wrong number of args/kwargs "
"to _parse_args_rvs",
strict=True, raises=TypeError)
def test_df(self):
# We have to override the base class method in order to correctly
# specify the type of failure we are expecting.
super(TestGAMNegativeBinomial, self).test_df()
| |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from pants.backend.jvm.tasks.jvm_compile.zinc.zinc_compile import ZincCompile
from pants.fs.archive import TarArchiver
from pants.util.contextutil import temporary_dir
from pants.util.dirutil import safe_walk
from pants_test.backend.jvm.tasks.jvm_compile.base_compile_integration_test import BaseCompileIT
from pants_test.cache.cache_server import cache_server
class JavaCompileIntegrationTest(BaseCompileIT):
def test_basic_binary(self):
with temporary_dir() as cache_dir:
config = {'cache.compile.zinc': {'write_to': [cache_dir]}}
with self.temporary_workdir() as workdir:
pants_run = self.run_pants_with_workdir(
['compile',
'testprojects/src/java/org/pantsbuild/testproject/publish/hello/main:',
],
workdir, config)
self.assert_success(pants_run)
def test_nocache(self):
with temporary_dir() as cache_dir:
bad_artifact_dir = os.path.join(cache_dir,
ZincCompile.stable_name(),
'testprojects.src.java.org.pantsbuild.testproject.nocache.nocache')
good_artifact_dir = os.path.join(cache_dir,
ZincCompile.stable_name(),
'testprojects.src.java.org.pantsbuild.testproject.nocache.cache_me')
config = {'cache.compile.zinc': {'write_to': [cache_dir]}}
pants_run = self.run_pants(['compile',
'testprojects/src/java/org/pantsbuild/testproject/nocache::'],
config)
self.assert_success(pants_run)
# The nocache target is labeled with no_cache so it should not be written to the
# artifact cache.
self.assertFalse(os.path.exists(bad_artifact_dir))
# But cache_me should be written.
self.assertEqual(len(os.listdir(good_artifact_dir)), 1)
# TODO(John Sirois): Factor up a shared utility for reuse by
# tests/python/pants_test/backend/core/tasks/test_cache_cleanup_integration.py
def create_platform_args(self, version):
return [("""--jvm-platform-platforms={{'default': {{'target': '{version}'}}}}"""
.format(version=version)),
'--jvm-platform-default-platform=default']
def test_java_compile_produces_different_artifact_depending_on_java_version(self):
# Ensure that running java compile with java 6 and then java 7
# produces two different artifacts.
with temporary_dir() as cache_dir:
artifact_dir = os.path.join(cache_dir, ZincCompile.stable_name(),
'testprojects.src.java.org.pantsbuild.testproject.unicode.main.main')
config = {'cache.compile.zinc': {'write_to': [cache_dir]}}
pants_run = self.run_pants(self.create_platform_args(6) +
['compile',
'testprojects/src/java/org/pantsbuild/testproject/unicode/main'],
config)
self.assert_success(pants_run)
# One artifact for java 6
self.assertEqual(len(os.listdir(artifact_dir)), 1)
# Rerun for java 7
pants_run = self.run_pants(self.create_platform_args(7) +
['compile',
'testprojects/src/java/org/pantsbuild/testproject/unicode/main'],
config)
self.assert_success(pants_run)
# One artifact for java 6 and one for 7
self.assertEqual(len(os.listdir(artifact_dir)), 2)
def test_java_compile_reads_resource_mapping(self):
# Ensure that if an annotation processor produces a resource-mapping,
# the artifact contains that resource mapping.
with temporary_dir() as cache_dir:
artifact_dir = os.path.join(cache_dir, ZincCompile.stable_name(),
'testprojects.src.java.org.pantsbuild.testproject.annotation.main.main')
config = {'cache.compile.zinc': {'write_to': [cache_dir]}}
pants_run = self.run_pants(['compile',
'testprojects/src/java/org/pantsbuild/testproject/annotation/main'],
config)
self.assert_success(pants_run)
self.assertTrue(os.path.exists(artifact_dir))
artifacts = os.listdir(artifact_dir)
self.assertEqual(len(artifacts), 1)
with temporary_dir() as extract_dir:
TarArchiver.extract(os.path.join(artifact_dir, artifacts[0]), extract_dir)
all_files = set()
for dirpath, dirs, files in safe_walk(extract_dir):
for name in files:
path = os.path.join(dirpath, name)
all_files.add(path)
# Locate the report file on the classpath.
report_file_name = 'deprecation_report.txt'
reports = [f for f in all_files if f.endswith(report_file_name)]
self.assertEquals(1, len(reports),
'Expected exactly one {} file; got: {}'.format(report_file_name,
all_files))
with open(reports[0]) as fp:
annotated_classes = [line.rstrip() for line in fp.read().splitlines()]
self.assertEquals(
{'org.pantsbuild.testproject.annotation.main.Main',
'org.pantsbuild.testproject.annotation.main.Main$TestInnerClass'},
set(annotated_classes))
def test_java_compile_with_changes_in_resources_dependencies(self):
with self.source_clone('testprojects/src/java/org/pantsbuild/testproject/resdependency') as resdependency:
with self.temporary_workdir() as workdir:
with self.temporary_cachedir() as cachedir:
target = os.path.join(resdependency, 'java:testsources')
first_run = self.run_test_compile(workdir, cachedir, target, clean_all=True)
self.assert_success(first_run)
self.assertTrue("Compiling" in first_run.stdout_data)
with open(os.path.join(resdependency, 'resources/resource.xml'), 'w') as xml_resource:
xml_resource.write('<xml>Changed Hello World</xml>\n')
second_run = self.run_test_compile(workdir, cachedir, target, clean_all=False)
self.assert_success(second_run)
self.assertTrue("Compiling" not in second_run.stdout_data,
"In case of resources change nothing should be recompiled")
def test_java_compile_with_different_resolved_jars_produce_different_artifacts(self):
# Since unforced dependencies resolve to the highest version including transitive jars,
# We want to ensure that running java compile with binary incompatible libraries will
# produces two different artifacts.
with self.temporary_workdir() as workdir, temporary_dir() as cache_dir:
path_prefix = 'testprojects/src/java/org/pantsbuild/testproject/jarversionincompatibility'
dotted_path = path_prefix.replace(os.path.sep, '.')
artifact_dir = os.path.join(cache_dir, ZincCompile.stable_name(),
'{}.jarversionincompatibility'.format(dotted_path))
config = {
'cache.compile.zinc': {
'write_to': [cache_dir],
'read_from': [cache_dir],
},
'compile.zinc': {
'incremental_caching': True,
},
}
pants_run = self.run_pants_with_workdir(['compile',
('{}:only-15-directly'.format(path_prefix))],
workdir,
config)
self.assert_success(pants_run)
# One artifact for guava 15
self.assertEqual(len(os.listdir(artifact_dir)), 1)
# Rerun for guava 16
pants_run = self.run_pants_with_workdir(['compile',
(u'{}:alongside-16'.format(path_prefix))],
workdir,
config)
self.assert_success(pants_run)
# One artifact for guava 15 and one for guava 16
self.assertEqual(len(os.listdir(artifact_dir)), 2)
def test_java_compile_with_corrupt_remote(self):
"""Tests that a corrupt artifact in the remote cache still results in a successful compile."""
with self.temporary_workdir() as workdir, temporary_dir() as cachedir:
with cache_server(cache_root=cachedir) as server:
target = 'testprojects/tests/java/org/pantsbuild/testproject/matcher'
config = {
'cache.compile.zinc': {
'write_to': [server.url],
'read_from': [server.url],
},
}
# Compile to populate the cache, and actually run the tests to help verify runtime.
first_run = self.run_pants_with_workdir(['test', target], workdir, config)
self.assert_success(first_run)
self.assertTrue("Compiling" in first_run.stdout_data)
# Build again to hit the cache.
second_run = self.run_pants_with_workdir(['clean-all', 'test', target], workdir, config)
self.assert_success(second_run)
self.assertFalse("Compiling" in second_run.stdout_data)
# Corrupt the remote artifact.
self.assertTrue(server.corrupt_artifacts(r'.*zinc.*matcher.*') == 1)
# Ensure that the third run succeeds, despite a failed attempt to fetch.
third_run = self.run_pants_with_workdir(['clean-all', 'test', target], workdir, config)
self.assert_success(third_run)
self.assertTrue("Compiling" in third_run.stdout_data)
class JavaCompileIntegrationTestWithZjar(JavaCompileIntegrationTest):
_EXTRA_TASK_ARGS = ['--compile-zinc-use-classpath-jars']
| |
from flask import (Blueprint, render_template, request, current_app, flash,
redirect, url_for, session, g)
from forms import LoginForm, RegisterForm, ForgotForm, ResetPassForm
from libs.User import User
from flask.ext.login import login_user, login_required, logout_user, confirm_login, current_user
from flask.ext.principal import Identity, AnonymousIdentity, identity_changed
from app import login_manager, flask_bcrypt, mail, app
from flask.ext.mail import Message
from models import Comment, PasswordResetRequest
from uuid import uuid4
from datetime import datetime
from decorators import async
MIN_PASSWORD_LENGTH = 4
auth_login = Blueprint('auth_login', __name__, template_folder='templates')
# General Request Handlers
@app.before_request
def before_request():
g.user = current_user
# Implement user last seen from Miguel Grinberg part 6
@auth_login.route('/register', methods=["GET", "POST"])
def register():
form = RegisterForm(request.form)
current_app.logger.info(request.form)
if request.method == "POST" and form.validate() == False:
current_app.logger.info(form.errors)
return "Registration Error"
elif request.method == "POST" and form.validate():
email = request.form['email']
username = request.form['username']
# generate password hash
password_hash = flask_bcrypt.generate_password_hash(request.form['password'])
user = User(email, password_hash, True, username)
try:
user.save()
if login_user(user, remember="no"):
flash("Logged in!")
return redirect(request.args.get('next') or '/jobs')
else:
flash("Unable to log you in")
except:
flash("Unable to register with that email address")
current_app.logger.error("Error on registration - possible duplicate emails")
return render_template('forms/register.html', form = form)
@auth_login.route('/login', methods=["GET", "POST"])
def login():
if g.user is not None and g.user.is_authenticated():
return redirect(url_for('index'))
if request.method == "POST" and "email" in request.form:
email = request.form["email"]
userObj = User()
user = userObj.get_by_email_w_password(email)
if user and user.is_active() and flask_bcrypt.check_password_hash(user.password, request.form["password"]):
remember = request.form.get("remember", "no") == "yes"
if login_user(user, remember=remember):
flash("Logged In!")
identity_changed.send(current_app._get_current_object(),
identity = Identity(user.id))
return redirect(request.args.get('next') or '/jobs')
else:
flash("Unable to log you in")
form = LoginForm(request.form)
return render_template('forms/login.html', form=form)
def generate_password_reset_link(user_id):
# Clear out all old requests for this member
for r in PasswordResetRequest.objects(user_id=user_id):
session.pop(r.user_id, None)
# Generate new password reset request
reset_code = uuid4()
reset_code_hash = flask_bcrypt.generate_password_hash(reset_code)
reset_request = PasswordResetRequest(reset_code_hash=reset_code_hash,
user_id=user_id,
timestamp=datetime.now())
reset_request.save()
session.update({user_id : reset_code})
# Return the reset password link
return url_for('auth_login.reset_password', _external=True,
id=reset_request.id, reset_code=reset_code)
@async
def send_async_email(message):
with mail.connect() as conn:
with app.app_context():
conn.send(message)
def send_email(subject, sender, recipients, text_body, html_body):
message = Message(subject, sender=sender, recipients=recipients)
message.body = text_body
message.html = html_body
send_async_email(message)
@auth_login.route('/forgot', methods=["GET", "POST"])
def forgot():
if request.method == "POST" and "email" in request.form:
email = request.form["email"]
userObj = User()
user = userObj.get_by_email_w_password(email)
if user:
reset_link = generate_password_reset_link(str(user.id))
subject = "Plytos Password Reset"
recipients = [email]
body = ("Hello {name}! We received a password reset request "
"from you. If you did not make this request, please "
"ignore this email.\n"
"\n"
"You can reset your password using this link:\n"
"{reset_link}\n"
"\n"
"Thank you,\n"
"\n"
"Plytos Team"
).format(name="placeholder", reset_link=reset_link)
send_email(subject=subject,
sender="team@plytos.com",
recipients=recipients,
text_body=body,
html_body=None)
flash("Request has been sent! Check your email for a link "
"to reset your password.", "success")
return redirect(url_for('index'))
else:
flash("Email not found", "danger")
form = ForgotForm(request.form)
return render_template('forms/forgot.html', form = form)
@auth_login.route('/reset_password', methods=['GET', 'POST'])
def reset_password():
reset_request_id = request.args.get('id')
reset_code = request.args.get('reset_code')
reset_request = PasswordResetRequest.objects.get_or_404(id=reset_request_id)
if not reset_request:
flash("You do not have access to that page.", "danger")
return redirect(url_for('index'))
if not reset_request.validate_reset_code(reset_code):
flash("You do not have access to that page", "danger")
return redirect(url_for('index'))
if not reset_request.validate_timestamp():
flash("Password reset has expired", "danger")
return redirect(url_for('index'))
if request.method == "POST":
password = request.form.get('password').strip()
confirm = request.form.get('confirm').strip()
has_errors = False
if len(password) < MIN_PASSWORD_LENGTH:
flash("Password must be at least {0} "
"characters".format(MIN_PASSWORD_LENGTH), "danger")
has_errors = True
if password != confirm:
flash("Password and confirmation do not match", "danger")
has_errors = True
if not has_errors:
userObj = User()
password_hash = flask_bcrypt.generate_password_hash(password)
try:
userObj.reset_password(reset_request.user_id, password_hash)
reset_request.delete()
session.pop(reset_request.user_id, None)
flash("You have successfully reset your password!", "success")
return redirect(url_for('auth_login.login'))
except:
flash("Unable to reset password", "danger")
current_app.logger.error("Error on registration - possible duplicate emails")
form = ResetPassForm(request.form)
return render_template('forms/reset_password.html', form = form)
@auth_login.route("/logout")
@login_required
def logout():
logout_user()
for key in ('identity.name', 'identity.auth_type'):
session.pop(key, None)
identity_changed.send(current_app._get_current_object(),
identity=AnonymousIdentity())
flash("Logged out.")
return redirect(request.args.get('next') or '/')
@login_manager.unauthorized_handler
def unauthorized_callback():
flash('Please login', 'warning')
return redirect('/login?next=' + request.path)
@login_manager.user_loader
def load_user(id):
if id is None:
redirect('/login')
user = User()
user.get_by_id(id)
if user.is_active():
return user
else:
return None
@auth_login.route('/<username>/')
@login_required
def profile(username):
userObj = User()
user = userObj.get_by_username(username)
if user == None:
flash('User ' + username + ' not found.')
return redirect(url_for('index'))
#FIXME: just mock data
posts = [
{ 'author': user, 'body': 'Test post #1' },
{ 'author': user, 'body': 'Test post #2' }
]
return render_template('profile/user.html',
user = user,
posts = posts)
| |
"""
Support "typing" onto an object.
This method creates a Texture and PNMImage, makes that texture a second
TextureStage on the original object, and copies changes when a character is struck.
"""
import random
from panda3d.core import Vec3, Point3, Texture, PNMImage, TextureStage, PNMTextMaker, Vec4
from panda3d.core import Geom
from panda3d.core import GeomNode
from panda3d.core import GeomPoints
from panda3d.core import GeomVertexArrayFormat
from panda3d.core import GeomVertexData
from panda3d.core import GeomVertexFormat
from panda3d.core import GeomVertexWriter
from panda3d.core import RenderAttrib
from panda3d.core import Shader
from panda3d.core import TexGenAttrib
from panda3d.core import TextureStage
from panda3d.core import Vec3, NodePath, Texture, Vec4
from panda3d.core import loadPrcFileData
from direct.interval.LerpInterval import LerpPosInterval
from scheduler import Scheduler
from utils import fonts
global globalClock
class Typist(object):
TARGETS = { 'paper': {
'model': 'paper',
'textureRoot': 'Front',
'scale': Point3(0.85, 0.85, 1),
'hpr' : Point3(0, 0, 0),
}
}
def __init__(self, base, typewriterNP, underDeskClip, sounds):
self.base = base
self.sounds = sounds
self.underDeskClip = underDeskClip
self.typeIndex = 0
self.typewriterNP = typewriterNP
self.rollerAssemblyNP = typewriterNP.find("**/roller assembly")
assert self.rollerAssemblyNP
self.rollerNP = typewriterNP.find("**/roller")
assert self.rollerNP
self.carriageNP = typewriterNP.find("**/carriage")
assert self.carriageNP
self.baseCarriagePos = self.carriageNP.getPos()
self.carriageBounds = self.carriageNP.getTightBounds()
self.font = base.loader.loadFont('Harting.ttf', pointSize=32)
self.pnmFont = PNMTextMaker(self.font)
self.fontCharSize, _, _ = fonts.measureFont(self.pnmFont, 32)
print "font char size: ",self.fontCharSize
self.pixelsPerLine = int(round(self.pnmFont.getLineHeight()))
self.target = None
""" panda3d.core.NodePath """
self.targetRoot = None
""" panda3d.core.NodePath """
self.paperY = 0.0
""" range from 0 to 1 """
self.paperX = 0.0
""" range from 0 to 1 """
self.createRollerBase()
self.tex = None
self.texImage = None
self.setupTexture()
self.scheduler = Scheduler()
task = self.base.taskMgr.add(self.tick, 'timerTask')
task.setDelay(0.01)
def tick(self, task):
self.scheduler.tick(globalClock.getRealTime())
return task.cont
def setupTexture(self):
"""
This is the overlay/decal/etc. which contains the typed characters.
The texture size and the font size are currently tied together.
:return:
"""
self.texImage = PNMImage(1024, 1024)
self.texImage.addAlpha()
self.texImage.fill(1.0)
self.texImage.alphaFill(1.0)
self.tex = Texture('typing')
self.tex.setMagfilter(Texture.FTLinear)
self.tex.setMinfilter(Texture.FTLinear)
self.typingStage = TextureStage('typing')
self.typingStage.setMode(TextureStage.MModulate)
self.tex.load(self.texImage)
# ensure we can quickly update subimages
self.tex.setKeepRamImage(True)
# temp for drawing chars
self.chImage = PNMImage(*self.fontCharSize)
def drawCharacter(self, ch, px, py):
"""
Draw a character onto the texture
:param ch:
:param px: paperX
:param py: paperY
:return: the paper-relative size of the character
"""
h = self.fontCharSize[1]
if ch != ' ':
# position -> pixel, applying margins
x = int(self.tex.getXSize() * (px * 0.8 + 0.1))
y = int(self.tex.getYSize() * (py * 0.8 + 0.1))
# always draw onto the paper, to capture
# incremental character overstrikes
self.pnmFont.generateInto(ch, self.texImage, x, y)
if False:
#print ch,"to",x,y,"w=",g.getWidth()
self.tex.load(self.texImage)
else:
# copy an area (presumably) encompassing the character
g = self.pnmFont.getGlyph(ord(ch))
cx, cy = self.fontCharSize
# a glyph is minimally sized and "moves around" in its text box
# (think ' vs. ,), so it has been drawn somewhere relative to
# the 'x' and 'y' we wanted.
x += g.getLeft()
y -= g.getTop()
self.chImage.copySubImage(
self.texImage,
0, 0, # from
x, y, # to
cx, cy # size
)
self.tex.loadSubImage(self.chImage, x, y)
# toggle for a typewriter that uses non-proportional spacing
#w = self.paperCharWidth(g.getWidth())
w = self.paperCharWidth()
else:
w = self.paperCharWidth()
return w, h
def start(self):
self.target = None
self.setTarget('paper')
self.hookKeyboard()
def createRollerBase(self):
""" The paper moves such that it is tangent to the roller.
This nodepath keeps a coordinate space relative to that, so that
the paper can be positioned from (0,0,0) to (0,0,1) to "roll" it
along the roller.
"""
bb = self.rollerNP.getTightBounds()
#self.rollerNP.showTightBounds()
self.paperRollerBase = self.rollerAssemblyNP.attachNewNode('rollerBase')
self.paperRollerBase.setHpr(0, -20, 0)
print "roller:",bb
rad = abs(bb[0].y - bb[1].y) / 2
center = Vec3(-(bb[0].x+bb[1].x)/2 - 0.03,
(bb[0].y-bb[1].y)/2,
(bb[0].z+bb[1].z)/2)
self.paperRollerBase.setPos(center)
def setTarget(self, name):
if self.target:
self.target.removeNode()
# load and transform the model
target = self.TARGETS[name]
self.target = self.base.loader.loadModel(target['model'])
#self.target.setScale(target['scale'])
self.target.setHpr(target['hpr'])
# put it in the world
self.target.reparentTo(self.paperRollerBase)
rbb = self.rollerNP.getTightBounds()
tbb = self.target.getTightBounds()
rs = (rbb[1] - rbb[0])
ts = (tbb[1] - tbb[0])
self.target.setScale(rs.x / ts.x, 1, 1)
# apply the texture
self.targetRoot = self.target
if 'textureRoot' in target:
self.targetRoot = self.target.find("**/" + target['textureRoot'])
assert self.targetRoot
self.targetRoot.setTexture(self.typingStage, self.tex)
#self.setupTargetClip()
# reset
self.paperX = self.paperY = 0.
newPos = self.calcPaperPos(self.paperY)
self.target.setPos(newPos)
self.moveCarriage()
def setupTargetClip(self):
"""
The target is fed in to the typewriter but until we invent "geom curling",
it shouldn't be visible under the typewriter under the desk.
The @underDeskClip node has a world-relative bounding box, which
we can convert to the target-relative bounding box, and pass to a
shader that can clip the nodes.
"""
shader = Shader.make(
Shader.SLGLSL,
"""
#version 120
attribute vec4 p3d_MultiTexCoord0;
attribute vec4 p3d_MultiTexCoord1;
void main() {
gl_Position = gl_ModelViewProjectionMatrix * gl_Vertex;
gl_TexCoord[0] = p3d_MultiTexCoord0;
gl_TexCoord[1] = p3d_MultiTexCoord1;
}
""",
"""
#version 120
uniform sampler2D baseTex;
uniform sampler2D charTex;
const vec4 zero = vec4(0, 0, 0, 0);
const vec4 one = vec4(1, 1, 1, 1);
const vec4 half = vec4(0.5, 0.5, 0.5, 0);
void main() {
vec4 baseColor = texture2D(baseTex, gl_TexCoord[0].st);
vec4 typeColor = texture2D(charTex, gl_TexCoord[1].st);
gl_FragColor = baseColor * typeColor;
}"""
)
self.target.setShader(shader)
baseTex = self.targetRoot.getTexture()
print "Base Texture:",baseTex
self.target.setShaderInput("baseTex", baseTex)
self.target.setShaderInput("charTex", self.tex)
def hookKeyboard(self):
"""
Hook events so we can respond to keypresses.
"""
self.base.buttonThrowers[0].node().setKeystrokeEvent('keystroke')
self.base.accept('keystroke', self.schedTypeCharacter)
self.base.accept('backspace', self.schedBackspace)
self.base.accept('arrow_up', lambda: self.schedAdjustPaper(-5))
self.base.accept('arrow_up-repeat', lambda: self.schedAdjustPaper(-1))
self.base.accept('arrow_down', lambda:self.schedAdjustPaper(5))
self.base.accept('arrow_down-repeat', lambda:self.schedAdjustPaper(1))
self.base.accept('arrow_left', lambda: self.schedAdjustCarriage(-1))
self.base.accept('arrow_left-repeat', lambda: self.schedAdjustCarriage(-1))
self.base.accept('arrow_right', lambda:self.schedAdjustCarriage(1))
self.base.accept('arrow_right-repeat', lambda:self.schedAdjustCarriage(1))
def paperCharWidth(self, pixels=None):
if not pixels:
pixels = self.fontCharSize[0]
return float(pixels) / self.tex.getXSize()
def paperLineHeight(self):
return float(self.fontCharSize[1] * 1.2) / self.tex.getYSize()
def schedScroll(self):
if self.scheduler.isQueueEmpty():
self.schedRollPaper(1)
self.schedResetCarriage()
def schedBackspace(self):
if self.scheduler.isQueueEmpty():
def doit():
if self.paperX > 0:
self.schedAdjustCarriage(-1)
self.scheduler.schedule(0.01, doit)
def createMoveCarriageInterval(self, newX, curX=None):
if curX is None:
curX = self.paperX
here = self.calcCarriage(curX)
there = self.calcCarriage(newX)
posInterval = LerpPosInterval(
self.carriageNP, abs(newX - curX),
there,
startPos = here,
blendType='easeIn')
posInterval.setDoneEvent('carriageReset')
def isReset():
self.paperX = newX
self.base.acceptOnce('carriageReset', isReset)
return posInterval
def schedResetCarriage(self):
if self.paperX > 0.1:
self.sounds['pullback'].play()
invl = self.createMoveCarriageInterval(0)
self.scheduler.scheduleInterval(0, invl)
def calcCarriage(self, paperX):
"""
Calculate where the carriage should be offset based
on the position on the paper
:param paperX: 0...1
:return: pos for self.carriageNP
"""
x = (0.5 - paperX) * 0.69 * 0.8 + 0.01
bb = self.carriageBounds
return self.baseCarriagePos + Point3(x * (bb[1].x-bb[0].x), 0, 0)
def moveCarriage(self):
pos = self.calcCarriage(self.paperX)
self.carriageNP.setPos(pos)
def schedMoveCarriage(self, curX, newX):
if self.scheduler.isQueueEmpty():
#self.scheduler.schedule(0.1, self.moveCarriage)
invl = self.createMoveCarriageInterval(newX, curX=curX)
invl.start()
def schedAdjustCarriage(self, bx):
if self.scheduler.isQueueEmpty():
def doit():
self.paperX = max(0.0, min(1.0, self.paperX + bx * self.paperCharWidth()))
self.moveCarriage()
self.scheduler.schedule(0.1, doit)
def calcPaperPos(self, paperY):
# center over roller, peek out a little
z = paperY * 0.8 - 0.5 + 0.175
bb = self.target.getTightBounds()
return Point3(-0.5, 0, z * (bb[1].z-bb[0].z))
def createMovePaperInterval(self, newY):
here = self.calcPaperPos(self.paperY)
there = self.calcPaperPos(newY)
posInterval = LerpPosInterval(
self.target, abs(newY - self.paperY),
there,
startPos = here,
blendType='easeInOut')
posInterval.setDoneEvent('scrollDone')
def isDone():
self.paperY = newY
self.base.acceptOnce('scrollDone', isDone)
return posInterval
def schedAdjustPaper(self, by):
if self.scheduler.isQueueEmpty():
def doit():
self.schedRollPaper(by)
self.scheduler.schedule(0.1, doit)
def schedRollPaper(self, by):
"""
Position the paper such that @percent of it is rolled over roller
:param percent:
:return:
"""
def doit():
self.sounds['scroll'].play()
newY = min(1.0, max(0.0, self.paperY + self.paperLineHeight() * by))
invl = self.createMovePaperInterval(newY)
invl.start()
self.scheduler.schedule(0.1, doit)
def schedTypeCharacter(self, keyname):
# filter for visibility
if ord(keyname) == 13:
self.schedScroll()
elif ord(keyname) >= 32 and ord(keyname) != 127:
if self.scheduler.isQueueEmpty():
curX, curY = self.paperX, self.paperY
self.typeCharacter(keyname, curX, curY)
def typeCharacter(self, ch, curX, curY):
newX = curX
w, h = self.drawCharacter(ch, curX, curY)
newX += w
if ch != ' ':
# alternate typing sound
#self.typeIndex = (self.typeIndex+1) % 3
self.typeIndex = random.randint(0, 2)
self.sounds['type' + str(self.typeIndex+1)].play()
else:
self.sounds['advance'].play()
if newX >= 1:
self.sounds['bell'].play()
newX = 1
self.schedMoveCarriage(self.paperX, newX)
# move first, to avoid overtype
self.paperX = newX
| |
"""
Test case for iperf example.
This test case might have problem running on windows:
1. direct use of `make`
2. use `sudo killall iperf` to force kill iperf, didn't implement windows version
The test env Example_ShieldBox do need the following config::
Example_ShieldBox:
ap_list:
- ssid: "ssid"
password: "password"
outlet: 1
apc_ip: "192.168.1.88"
attenuator_port: "/dev/ttyUSB0"
iperf: "/dev/ttyUSB1"
apc_ip: "192.168.1.88"
pc_nic: "eth0"
"""
from __future__ import division
from __future__ import unicode_literals
from builtins import str
from builtins import range
from builtins import object
import re
import os
import time
import subprocess
from tiny_test_fw import TinyFW, DUT, Utility
import ttfw_idf
from idf_iperf_test_util import (Attenuator, PowerControl, LineChart, TestReport)
# configurations
TEST_TIME = TEST_TIMEOUT = 60
WAIT_AP_POWER_ON_TIMEOUT = 90
SCAN_TIMEOUT = 3
SCAN_RETRY_COUNT = 3
RETRY_COUNT_FOR_BEST_PERFORMANCE = 2
ATTEN_VALUE_LIST = range(0, 60, 2)
# constants
FAILED_TO_SCAN_RSSI = -97
INVALID_HEAP_SIZE = 0xFFFFFFFF
PC_IPERF_TEMP_LOG_FILE = ".tmp_iperf.log"
CONFIG_NAME_PATTERN = re.compile(r"sdkconfig\.ci\.(.+)")
# We need to auto compare the difference between adjacent configs (01 -> 00, 02 -> 01, ...) and put them to reports.
# Using numbers for config will make this easy.
# Use default value `99` for config with best performance.
BEST_PERFORMANCE_CONFIG = "99"
class TestResult(object):
""" record, analysis test result and convert data to output format """
PC_BANDWIDTH_LOG_PATTERN = re.compile(r"(\d+).0\s*-\s*(\d+).0\s+sec\s+[\d.]+\s+MBytes\s+([\d.]+)\s+Mbits/sec")
DUT_BANDWIDTH_LOG_PATTERN = re.compile(r"(\d+)-\s+(\d+)\s+sec\s+([\d.]+)\s+Mbits/sec")
ZERO_POINT_THRESHOLD = -88 # RSSI, dbm
ZERO_THROUGHPUT_THRESHOLD = -92 # RSSI, dbm
BAD_POINT_RSSI_THRESHOLD = -85 # RSSI, dbm
BAD_POINT_MIN_THRESHOLD = 3 # Mbps
BAD_POINT_PERCENTAGE_THRESHOLD = 0.3
# we need at least 1/2 valid points to qualify the test result
THROUGHPUT_QUALIFY_COUNT = TEST_TIME // 2
def __init__(self, proto, direction, config_name):
self.proto = proto
self.direction = direction
self.config_name = config_name
self.throughput_by_rssi = dict()
self.throughput_by_att = dict()
self.att_rssi_map = dict()
self.heap_size = INVALID_HEAP_SIZE
self.error_list = []
def _save_result(self, throughput, ap_ssid, att, rssi, heap_size):
"""
save the test results:
* record the better throughput if att/rssi is the same.
* record the min heap size.
"""
if ap_ssid not in self.att_rssi_map:
# for new ap, create empty dict()
self.throughput_by_att[ap_ssid] = dict()
self.throughput_by_rssi[ap_ssid] = dict()
self.att_rssi_map[ap_ssid] = dict()
self.att_rssi_map[ap_ssid][att] = rssi
def record_throughput(database, key_value):
try:
# we save the larger value for same att
if throughput > database[ap_ssid][key_value]:
database[ap_ssid][key_value] = throughput
except KeyError:
database[ap_ssid][key_value] = throughput
record_throughput(self.throughput_by_att, att)
record_throughput(self.throughput_by_rssi, rssi)
if int(heap_size) < self.heap_size:
self.heap_size = int(heap_size)
def add_result(self, raw_data, ap_ssid, att, rssi, heap_size):
"""
add result for one test
:param raw_data: iperf raw data
:param ap_ssid: ap ssid that tested
:param att: attenuate value
:param rssi: AP RSSI
:param heap_size: min heap size during test
:return: throughput
"""
fall_to_0_recorded = 0
throughput_list = []
result_list = self.PC_BANDWIDTH_LOG_PATTERN.findall(raw_data)
if not result_list:
# failed to find raw data by PC pattern, it might be DUT pattern
result_list = self.DUT_BANDWIDTH_LOG_PATTERN.findall(raw_data)
for result in result_list:
if int(result[1]) - int(result[0]) != 1:
# this could be summary, ignore this
continue
throughput_list.append(float(result[2]))
if float(result[2]) == 0 and rssi > self.ZERO_POINT_THRESHOLD \
and fall_to_0_recorded < 1:
# throughput fall to 0 error. we only record 1 records for one test
self.error_list.append("[Error][fall to 0][{}][att: {}][rssi: {}]: 0 throughput interval: {}-{}"
.format(ap_ssid, att, rssi, result[0], result[1]))
fall_to_0_recorded += 1
if len(throughput_list) > self.THROUGHPUT_QUALIFY_COUNT:
throughput = sum(throughput_list) / len(throughput_list)
else:
throughput = 0.0
if throughput == 0 and rssi > self.ZERO_THROUGHPUT_THRESHOLD:
self.error_list.append("[Error][Fatal][{}][att: {}][rssi: {}]: No throughput data found"
.format(ap_ssid, att, rssi))
self._save_result(throughput, ap_ssid, att, rssi, heap_size)
return throughput
def post_analysis(self):
"""
some rules need to be checked after we collected all test raw data:
1. throughput value 30% worse than the next point with lower RSSI
2. throughput value 30% worse than the next point with larger attenuate
"""
def analysis_bad_point(data, index_type):
for ap_ssid in data:
result_dict = data[ap_ssid]
index_list = list(result_dict.keys())
index_list.sort()
if index_type == "att":
index_list.reverse()
for i, index_value in enumerate(index_list[1:]):
if index_value < self.BAD_POINT_RSSI_THRESHOLD or \
result_dict[index_list[i]] < self.BAD_POINT_MIN_THRESHOLD:
continue
_percentage = result_dict[index_value] / result_dict[index_list[i]]
if _percentage < 1 - self.BAD_POINT_PERCENTAGE_THRESHOLD:
self.error_list.append("[Error][Bad point][{}][{}: {}]: drop {:.02f}%"
.format(ap_ssid, index_type, index_value,
(1 - _percentage) * 100))
analysis_bad_point(self.throughput_by_rssi, "rssi")
analysis_bad_point(self.throughput_by_att, "att")
@staticmethod
def _convert_to_draw_format(data, label):
keys = data.keys()
keys.sort()
return {
"x-axis": keys,
"y-axis": [data[x] for x in keys],
"label": label,
}
def draw_throughput_figure(self, path, ap_ssid, draw_type):
"""
:param path: folder to save figure. make sure the folder is already created.
:param ap_ssid: ap ssid string or a list of ap ssid string
:param draw_type: "att" or "rssi"
:return: file_name
"""
if draw_type == "rssi":
type_name = "RSSI"
data = self.throughput_by_rssi
elif draw_type == "att":
type_name = "Att"
data = self.throughput_by_att
else:
raise AssertionError("draw type not supported")
if isinstance(ap_ssid, list):
file_name = "ThroughputVs{}_{}_{}_{}.png".format(type_name, self.proto, self.direction,
hash(ap_ssid)[:6])
data_list = [self._convert_to_draw_format(data[_ap_ssid], _ap_ssid)
for _ap_ssid in ap_ssid]
else:
file_name = "ThroughputVs{}_{}_{}_{}.png".format(type_name, self.proto, self.direction, ap_ssid)
data_list = [self._convert_to_draw_format(data[ap_ssid], ap_ssid)]
LineChart.draw_line_chart(os.path.join(path, file_name),
"Throughput Vs {} ({} {})".format(type_name, self.proto, self.direction),
"Throughput (Mbps)",
"{} (dbm)".format(type_name),
data_list)
return file_name
def draw_rssi_vs_att_figure(self, path, ap_ssid):
"""
:param path: folder to save figure. make sure the folder is already created.
:param ap_ssid: ap to use
:return: file_name
"""
if isinstance(ap_ssid, list):
file_name = "AttVsRSSI_{}.png".format(hash(ap_ssid)[:6])
data_list = [self._convert_to_draw_format(self.att_rssi_map[_ap_ssid], _ap_ssid)
for _ap_ssid in ap_ssid]
else:
file_name = "AttVsRSSI_{}.png".format(ap_ssid)
data_list = [self._convert_to_draw_format(self.att_rssi_map[ap_ssid], ap_ssid)]
LineChart.draw_line_chart(os.path.join(path, file_name),
"Att Vs RSSI",
"Att (dbm)",
"RSSI (dbm)",
data_list)
return file_name
def get_best_throughput(self):
""" get the best throughput during test """
best_for_aps = [max(self.throughput_by_att[ap_ssid].values())
for ap_ssid in self.throughput_by_att]
return max(best_for_aps)
def __str__(self):
"""
returns summary for this test:
1. test result (success or fail)
2. best performance for each AP
3. min free heap size during test
"""
if self.throughput_by_att:
ret = "[{}_{}][{}]: {}\r\n\r\n".format(self.proto, self.direction, self.config_name,
"Fail" if self.error_list else "Success")
ret += "Performance for each AP:\r\n"
for ap_ssid in self.throughput_by_att:
ret += "[{}]: {:.02f} Mbps\r\n".format(ap_ssid, max(self.throughput_by_att[ap_ssid].values()))
if self.heap_size != INVALID_HEAP_SIZE:
ret += "Minimum heap size: {}".format(self.heap_size)
else:
ret = ""
return ret
class IperfTestUtility(object):
""" iperf test implementation """
def __init__(self, dut, config_name, ap_ssid, ap_password,
pc_nic_ip, pc_iperf_log_file, test_result=None):
self.config_name = config_name
self.dut = dut
self.pc_iperf_log_file = pc_iperf_log_file
self.ap_ssid = ap_ssid
self.ap_password = ap_password
self.pc_nic_ip = pc_nic_ip
if test_result:
self.test_result = test_result
else:
self.test_result = {
"tcp_tx": TestResult("tcp", "tx", config_name),
"tcp_rx": TestResult("tcp", "rx", config_name),
"udp_tx": TestResult("udp", "tx", config_name),
"udp_rx": TestResult("udp", "rx", config_name),
}
def setup(self):
"""
setup iperf test:
1. kill current iperf process
2. reboot DUT (currently iperf is not very robust, need to reboot DUT)
3. scan to get AP RSSI
4. connect to AP
"""
try:
subprocess.check_output("sudo killall iperf 2>&1 > /dev/null", shell=True)
except subprocess.CalledProcessError:
pass
self.dut.write("restart")
self.dut.expect("esp32>")
self.dut.write("scan {}".format(self.ap_ssid))
for _ in range(SCAN_RETRY_COUNT):
try:
rssi = int(self.dut.expect(re.compile(r"\[{}]\[rssi=(-\d+)]".format(self.ap_ssid)),
timeout=SCAN_TIMEOUT)[0])
break
except DUT.ExpectTimeout:
continue
else:
raise AssertionError("Failed to scan AP")
self.dut.write("sta {} {}".format(self.ap_ssid, self.ap_password))
dut_ip = self.dut.expect(re.compile(r"sta ip: ([\d.]+), mask: ([\d.]+), gw: ([\d.]+)"))[0]
return dut_ip, rssi
def _save_test_result(self, test_case, raw_data, att, rssi, heap_size):
return self.test_result[test_case].add_result(raw_data, self.ap_ssid, att, rssi, heap_size)
def _test_once(self, proto, direction):
""" do measure once for one type """
# connect and scan to get RSSI
dut_ip, rssi = self.setup()
assert direction in ["rx", "tx"]
assert proto in ["tcp", "udp"]
# run iperf test
if direction == "tx":
with open(PC_IPERF_TEMP_LOG_FILE, "w") as f:
if proto == "tcp":
process = subprocess.Popen(["iperf", "-s", "-B", self.pc_nic_ip,
"-t", str(TEST_TIME), "-i", "1", "-f", "m"],
stdout=f, stderr=f)
self.dut.write("iperf -c {} -i 1 -t {}".format(self.pc_nic_ip, TEST_TIME))
else:
process = subprocess.Popen(["iperf", "-s", "-u", "-B", self.pc_nic_ip,
"-t", str(TEST_TIME), "-i", "1", "-f", "m"],
stdout=f, stderr=f)
self.dut.write("iperf -c {} -u -i 1 -t {}".format(self.pc_nic_ip, TEST_TIME))
for _ in range(TEST_TIMEOUT):
if process.poll() is not None:
break
time.sleep(1)
else:
process.terminate()
with open(PC_IPERF_TEMP_LOG_FILE, "r") as f:
pc_raw_data = server_raw_data = f.read()
else:
with open(PC_IPERF_TEMP_LOG_FILE, "w") as f:
if proto == "tcp":
self.dut.write("iperf -s -i 1 -t {}".format(TEST_TIME))
process = subprocess.Popen(["iperf", "-c", dut_ip,
"-t", str(TEST_TIME), "-f", "m"],
stdout=f, stderr=f)
else:
self.dut.write("iperf -s -u -i 1 -t {}".format(TEST_TIME))
process = subprocess.Popen(["iperf", "-c", dut_ip, "-u", "-b", "100M",
"-t", str(TEST_TIME), "-f", "m"],
stdout=f, stderr=f)
for _ in range(TEST_TIMEOUT):
if process.poll() is not None:
break
time.sleep(1)
else:
process.terminate()
server_raw_data = self.dut.read()
with open(PC_IPERF_TEMP_LOG_FILE, "r") as f:
pc_raw_data = f.read()
# save PC iperf logs to console
with open(self.pc_iperf_log_file, "a+") as f:
f.write("## [{}] `{}`\r\n##### {}"
.format(self.config_name,
"{}_{}".format(proto, direction),
time.strftime("%m-%d %H:%M:%S", time.localtime(time.time()))))
f.write('\r\n```\r\n\r\n' + pc_raw_data + '\r\n```\r\n')
self.dut.write("heap")
heap_size = self.dut.expect(re.compile(r"min heap size: (\d+)\D"))[0]
# return server raw data (for parsing test results) and RSSI
return server_raw_data, rssi, heap_size
def run_test(self, proto, direction, atten_val):
"""
run test for one type, with specified atten_value and save the test result
:param proto: tcp or udp
:param direction: tx or rx
:param atten_val: attenuate value
"""
rssi = FAILED_TO_SCAN_RSSI
heap_size = INVALID_HEAP_SIZE
try:
server_raw_data, rssi, heap_size = self._test_once(proto, direction)
throughput = self._save_test_result("{}_{}".format(proto, direction),
server_raw_data, atten_val,
rssi, heap_size)
Utility.console_log("[{}][{}_{}][{}][{}]: {:.02f}"
.format(self.config_name, proto, direction, rssi, self.ap_ssid, throughput))
except Exception as e:
self._save_test_result("{}_{}".format(proto, direction), "", atten_val, rssi, heap_size)
Utility.console_log("Failed during test: {}".format(e))
def run_all_cases(self, atten_val):
"""
run test for all types (udp_tx, udp_rx, tcp_tx, tcp_rx).
:param atten_val: attenuate value
"""
self.run_test("tcp", "tx", atten_val)
self.run_test("tcp", "rx", atten_val)
self.run_test("udp", "tx", atten_val)
self.run_test("udp", "rx", atten_val)
def wait_ap_power_on(self):
"""
AP need to take sometime to power on. It changes for different APs.
This method will scan to check if the AP powers on.
:return: True or False
"""
self.dut.write("restart")
self.dut.expect("esp32>")
for _ in range(WAIT_AP_POWER_ON_TIMEOUT // SCAN_TIMEOUT):
try:
self.dut.write("scan {}".format(self.ap_ssid))
self.dut.expect(re.compile(r"\[{}]\[rssi=(-\d+)]".format(self.ap_ssid)),
timeout=SCAN_TIMEOUT)
ret = True
break
except DUT.ExpectTimeout:
pass
else:
ret = False
return ret
@ttfw_idf.idf_example_test(env_tag="Example_ShieldBox_Basic", category="stress")
def test_wifi_throughput_with_different_configs(env, extra_data):
"""
steps: |
1. build iperf with specified configs
2. test throughput for all routers
"""
pc_nic_ip = env.get_pc_nic_info("pc_nic", "ipv4")["addr"]
pc_iperf_log_file = os.path.join(env.log_path, "pc_iperf_log.md")
ap_info = {
"ssid": env.get_variable("ap_ssid"),
"password": env.get_variable("ap_password"),
}
config_names_raw = subprocess.check_output(["ls", os.path.dirname(os.path.abspath(__file__))])
config_names = CONFIG_NAME_PATTERN.findall(config_names_raw)
if not config_names:
raise ValueError("no configs found in {}".format(os.path.dirname(__file__)))
test_result = dict()
sdkconfig_files = dict()
for config_name in config_names:
# 1. get the config
sdkconfig_files[config_name] = os.path.join(os.path.dirname(__file__),
"sdkconfig.ci.{}".format(config_name))
# 2. get DUT and download
dut = env.get_dut("iperf", "examples/wifi/iperf", dut_class=ttfw_idf.ESP32DUT,
app_config_name=config_name)
dut.start_app()
dut.expect("esp32>")
# 3. run test for each required att value
test_result[config_name] = {
"tcp_tx": TestResult("tcp", "tx", config_name),
"tcp_rx": TestResult("tcp", "rx", config_name),
"udp_tx": TestResult("udp", "tx", config_name),
"udp_rx": TestResult("udp", "rx", config_name),
}
test_utility = IperfTestUtility(dut, config_name, ap_info["ssid"],
ap_info["password"], pc_nic_ip, pc_iperf_log_file, test_result[config_name])
for _ in range(RETRY_COUNT_FOR_BEST_PERFORMANCE):
test_utility.run_all_cases(0)
for result_type in test_result[config_name]:
summary = str(test_result[config_name][result_type])
if summary:
Utility.console_log(summary, color="orange")
# 4. check test results
env.close_dut("iperf")
# 5. generate report
report = TestReport.ThroughputForConfigsReport(os.path.join(env.log_path, "ThroughputForConfigsReport"),
ap_info["ssid"], test_result, sdkconfig_files)
report.generate_report()
@ttfw_idf.idf_example_test(env_tag="Example_ShieldBox", category="stress")
def test_wifi_throughput_vs_rssi(env, extra_data):
"""
steps: |
1. build with best performance config
2. switch on one router
3. set attenuator value from 0-60 for each router
4. test TCP tx rx and UDP tx rx throughput
"""
att_port = env.get_variable("attenuator_port")
ap_list = env.get_variable("ap_list")
pc_nic_ip = env.get_pc_nic_info("pc_nic", "ipv4")["addr"]
apc_ip = env.get_variable("apc_ip")
pc_iperf_log_file = os.path.join(env.log_path, "pc_iperf_log.md")
test_result = {
"tcp_tx": TestResult("tcp", "tx", BEST_PERFORMANCE_CONFIG),
"tcp_rx": TestResult("tcp", "rx", BEST_PERFORMANCE_CONFIG),
"udp_tx": TestResult("udp", "tx", BEST_PERFORMANCE_CONFIG),
"udp_rx": TestResult("udp", "rx", BEST_PERFORMANCE_CONFIG),
}
# 1. get DUT and download
dut = env.get_dut("iperf", "examples/wifi/iperf", dut_class=ttfw_idf.ESP32DUT,
app_config_name=BEST_PERFORMANCE_CONFIG)
dut.start_app()
dut.expect("esp32>")
# 2. run test for each required att value
for ap_info in ap_list:
test_utility = IperfTestUtility(dut, BEST_PERFORMANCE_CONFIG, ap_info["ssid"], ap_info["password"],
pc_nic_ip, pc_iperf_log_file, test_result)
PowerControl.Control.control_rest(apc_ip, ap_info["outlet"], "OFF")
PowerControl.Control.control(apc_ip, {ap_info["outlet"]: "ON"})
Attenuator.set_att(att_port, 0)
if not test_utility.wait_ap_power_on():
Utility.console_log("[{}] failed to power on, skip testing this AP"
.format(ap_info["ssid"]), color="red")
continue
for atten_val in ATTEN_VALUE_LIST:
assert Attenuator.set_att(att_port, atten_val) is True
test_utility.run_all_cases(atten_val)
# 3. check test results
env.close_dut("iperf")
# 4. generate report
report = TestReport.ThroughputVsRssiReport(os.path.join(env.log_path, "ThroughputVsRssiReport"),
test_result)
report.generate_report()
@ttfw_idf.idf_example_test(env_tag="Example_ShieldBox_Basic")
def test_wifi_throughput_basic(env, extra_data):
"""
steps: |
1. test TCP tx rx and UDP tx rx throughput
2. compare with the pre-defined pass standard
"""
pc_nic_ip = env.get_pc_nic_info("pc_nic", "ipv4")["addr"]
pc_iperf_log_file = os.path.join(env.log_path, "pc_iperf_log.md")
ap_info = {
"ssid": env.get_variable("ap_ssid"),
"password": env.get_variable("ap_password"),
}
# 1. get DUT
dut = env.get_dut("iperf", "examples/wifi/iperf", dut_class=ttfw_idf.ESP32DUT,
app_config_name=BEST_PERFORMANCE_CONFIG)
dut.start_app()
dut.expect("esp32>")
# 2. preparing
test_result = {
"tcp_tx": TestResult("tcp", "tx", BEST_PERFORMANCE_CONFIG),
"tcp_rx": TestResult("tcp", "rx", BEST_PERFORMANCE_CONFIG),
"udp_tx": TestResult("udp", "tx", BEST_PERFORMANCE_CONFIG),
"udp_rx": TestResult("udp", "rx", BEST_PERFORMANCE_CONFIG),
}
test_utility = IperfTestUtility(dut, BEST_PERFORMANCE_CONFIG, ap_info["ssid"],
ap_info["password"], pc_nic_ip, pc_iperf_log_file, test_result)
# 3. run test for TCP Tx, Rx and UDP Tx, Rx
for _ in range(RETRY_COUNT_FOR_BEST_PERFORMANCE):
test_utility.run_all_cases(0)
# 4. log performance and compare with pass standard
performance_items = []
for throughput_type in test_result:
ttfw_idf.log_performance("{}_throughput".format(throughput_type),
"{:.02f} Mbps".format(test_result[throughput_type].get_best_throughput()))
performance_items.append(["{}_throughput".format(throughput_type),
"{:.02f} Mbps".format(test_result[throughput_type].get_best_throughput())])
# 5. save to report
TinyFW.JunitReport.update_performance(performance_items)
# do check after logging, otherwise test will exit immediately if check fail, some performance can't be logged.
for throughput_type in test_result:
ttfw_idf.check_performance("{}_throughput".format(throughput_type),
test_result[throughput_type].get_best_throughput())
env.close_dut("iperf")
if __name__ == '__main__':
test_wifi_throughput_basic(env_config_file="EnvConfig.yml")
test_wifi_throughput_with_different_configs(env_config_file="EnvConfig.yml")
test_wifi_throughput_vs_rssi(env_config_file="EnvConfig.yml")
| |
#!/usr/bin/env python3
# Copyright 2020 Timothy Trippel
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import collections
import glob
import itertools
import os
import sys
from dataclasses import dataclass
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from hwfutils.string_color import color_str_green as green
from hwfutils.string_color import color_str_red as red
from hwfutils.string_color import color_str_yellow as yellow
from scipy import stats
from scipy.stats.mstats import winsorize
# ------------------------------------------------------------------------------
# Plot parameters
# ------------------------------------------------------------------------------
MARKER_SIZE = 5
ZSCORE_THRESHOLD = 3
# ------------------------------------------------------------------------------
# Plot Labels
# ------------------------------------------------------------------------------
RUN_TIME_LABEL = "Time to Full FSM Coverage (Relative)"
NUM_STATES_LABEL = "# states"
WIDTH_LABEL = "width"
INSTR_TYPE_LABEL = "Components Instrumented"
OPT_TYPE_LABEL = "Fork Server Init."
INSTR_TYPE_MAPPINGS = {
"full": "All",
"duttb": "DUT & TB",
"dut": "DUT only",
# "full": "full",
# "duttb": "duttb",
# "dut": "dut",
}
OPT_TYPE_MAPPINGS = {
False: "TB Entrypoint",
True: "After DUT Reset",
}
# ------------------------------------------------------------------------------
# Experiment Status Labels
# ------------------------------------------------------------------------------
NUM_TRIALS_COMPLETED_LABEL = "# Trials Completed"
TRIALS_MISSING_LABEL = "Trials Missing"
# ------------------------------------------------------------------------------
# Experiment Parameters
# ------------------------------------------------------------------------------
EXP_BASE_NAMES = [
"exp002-cpp-afl-lock-%dstates-%dwidth-full-instr",
"exp003-cpp-afl-lock-%dstates-%dwidth-duttb-instr",
"exp004-cpp-afl-lock-%dstates-%dwidth-dut-instr",
"exp005-cpp-afl-lock-%dstates-%dwidth-full-instr-wopt",
"exp006-cpp-afl-lock-%dstates-%dwidth-duttb-instr-wopt",
"exp007-cpp-afl-lock-%dstates-%dwidth-dut-instr-wopt",
]
STATES = [8, 16, 32, 64]
# STATES = [16, 32, 64, 128]
WIDTHS = [4]
TRIALS = range(50)
# ------------------------------------------------------------------------------
# Other defines
# ------------------------------------------------------------------------------
TERMINAL_ROWS, TERMINAL_COLS = os.popen('stty size', 'r').read().split()
LINE_SEP = "=" * int(TERMINAL_COLS)
@dataclass
class FuzzingData:
num_states: int = -1
width: int = -1
instr_type: str = ""
fs_opt: bool = False
trial_num: int = -1
data_path: str = ""
def __post_init__(self):
self.afl_data = self._load_afl_data()
self.runtime = self._load_run_time()
def _load_csv_data(self, csv_file):
return pd.read_csv(csv_file,
delimiter=',',
index_col=None,
engine='python')
def _load_run_time(self):
run_time_path = "%s/logs/fuzz_time.log" % self.data_path
if not os.path.exists(run_time_path):
print(red("ERROR: run time data (%s) does not exist." % run_time_path))
sys.exit(1)
with open(run_time_path, "r") as lf:
for line in lf:
line = line.strip()
if line.startswith("real"):
line_list = line.split()
rt_min = float(line_list[1].split("m")[0])
rt_sec = float(line_list[1].split("m")[1].rstrip("s"))
return ((rt_min * 60) + rt_sec)
def _load_afl_data(self):
afl_glob_path = os.path.join(self.data_path, "out", "afl_*_interactive",
"plot_data")
afl_plot_data_files = glob.glob(afl_glob_path)
if len(afl_plot_data_files) != 1:
print(red("ERROR: AFL plot_data file no found."))
sys.exit(1)
# Load data into Pandas DataFrame
afl_df = self._load_csv_data(afl_plot_data_files[0])
# Remove leading/trailing white space from column names
afl_df = afl_df.rename(columns=lambda x: x.strip())
# Adjust time stamps to be relative to start time
afl_df.loc[:, "# unix_time"] -= afl_df.loc[0, "# unix_time"]
return afl_df
def _drop_outliers_in_range(values, lower_percentile=30, upper_percentile=70):
lower_bound, upper_bound = np.percentile(
values, [lower_percentile, upper_percentile])
trimmed_values = []
for i in range(len(values)):
if lower_bound <= values[i] < upper_bound:
trimmed_values.append(values[i])
return trimmed_values
def _aggregrate_instr_complex_rts(exp2data):
exp2rts = {}
for exp_name, fd_list in exp2data.items():
if fd_list[0].instr_type != "duttb" and fd_list[0].fs_opt is False:
# if fd_list[0].fs_opt is False:
runtimes = []
for trial in TRIALS:
fd = fd_list[trial]
runtimes.append(fd.runtime)
runtimes = _drop_outliers_in_range(runtimes)
exp2rts[(fd.num_states, fd.instr_type, fd.fs_opt)] = runtimes
return exp2rts
def build_instr_complex_rts_df(exp2data):
print(yellow("Building instruction complexity dataframe ..."))
INSTR_TYPE_BASELINE = "dut"
# Create empty dictionary that will be used to create Pandas
# a DataFrame that look like the following:
# +------------------------------------------------+
# | # states | instrumentation level | runtime (s) |
# +------------------------------------------------+
# | ... | ... | ... |
runtimes_dict = {
NUM_STATES_LABEL: [],
INSTR_TYPE_LABEL: [],
RUN_TIME_LABEL: [],
}
# Aggregate data into a dictionary
exp2rts = _aggregrate_instr_complex_rts(exp2data)
# Compute scale factors for each set of num_states experiments
states2scales = {}
for (num_states, instr_type, fs_opt), runtimes in exp2rts.items():
if instr_type == INSTR_TYPE_BASELINE and fs_opt is False:
scale_factor = np.median(runtimes)
states2scales[num_states] = scale_factor
# Build the dataframe for plotting
for (num_states, instr_type, fs_opt), runtimes in exp2rts.items():
runtimes = list(map(lambda x: x / states2scales[num_states], runtimes))
runtimes_dict[NUM_STATES_LABEL].extend([num_states] * len(runtimes))
runtimes_dict[INSTR_TYPE_LABEL].extend([INSTR_TYPE_MAPPINGS[instr_type]] *
len(runtimes))
runtimes_dict[RUN_TIME_LABEL].extend(runtimes)
print(green("Done."))
print(LINE_SEP)
return pd.DataFrame.from_dict(runtimes_dict)
def _aggregrate_fs_opt_rts(exp2data):
exp2rts = {}
for exp_name, fd_list in exp2data.items():
if fd_list[0].instr_type == "full":
runtimes = []
for trial in TRIALS:
fd = fd_list[trial]
runtimes.append(fd.runtime)
runtimes = _drop_outliers_in_range(runtimes)
exp2rts[(fd.num_states, fd.instr_type, fd.fs_opt)] = runtimes
return exp2rts
def build_fs_opt_rts_df(exp2data):
print(yellow("Building fork server optimization dataframe ..."))
FS_OPT_BASELINE = True
# Create empty dictionary that will be used to create Pandas
# a DataFrame that look like the following:
# +----------------------------------------------------+
# | # states | fork server optimization? | runtime (s) |
# +----------------------------------------------------+
# | ... | ... | ... |
runtimes_dict = {
NUM_STATES_LABEL: [],
OPT_TYPE_LABEL: [],
RUN_TIME_LABEL: [],
}
# Aggregate data into a dictionary
exp2rts = _aggregrate_fs_opt_rts(exp2data)
# Compute scale factors for each set of num_states experiments
states2scales = {}
for (num_states, instr_type, fs_opt), runtimes in exp2rts.items():
if instr_type == "full" and fs_opt is FS_OPT_BASELINE:
scale_factor = np.median(runtimes)
states2scales[num_states] = scale_factor
# Build the dataframe for plotting
for (num_states, instr_type, fs_opt), runtimes in exp2rts.items():
runtimes = list(map(lambda x: x / states2scales[num_states], runtimes))
runtimes_dict[NUM_STATES_LABEL].extend([num_states] * len(runtimes))
runtimes_dict[OPT_TYPE_LABEL].extend([OPT_TYPE_MAPPINGS[fs_opt]] *
len(runtimes))
runtimes_dict[RUN_TIME_LABEL].extend(runtimes)
print(green("Done."))
print(LINE_SEP)
return pd.DataFrame.from_dict(runtimes_dict)
def load_fuzzing_data(data_root):
print(yellow("Loading data ..."))
exp2data = collections.defaultdict(list)
# TODO: change this to automatically extract names from a single exp. number
# extract each data file into a Pandas dataframe
exp_combos = list(itertools.product(STATES, WIDTHS, EXP_BASE_NAMES))
for num_states, width, exp_base_name in exp_combos:
for trial in TRIALS:
# Build complete path to data files
exp_name_wo_trialnum = exp_base_name % (num_states, width)
exp_name = "%s-%d" % (exp_name_wo_trialnum, trial)
data_path = os.path.join(data_root, exp_name)
# Extract experiment info.
exp_name_list = exp_name.split("-")
instr_type = exp_name_list[6]
if len(exp_name_list) > 9:
fs_opt = True
else:
fs_opt = False
# Load fuzzing data into an object
exp2data[exp_name_wo_trialnum].append(
FuzzingData(num_states, width, instr_type, fs_opt, trial, data_path))
print(green("Done."))
print(LINE_SEP)
return exp2data
def compute_instr_type_mann_whitney(instr_rts):
print(
yellow(
"Computing Mann-Whitney U-test on instrumentation complexity data ..."
))
for num_states in STATES:
sub_rt_df = instr_rts[instr_rts[NUM_STATES_LABEL] == num_states]
full_instr_data = sub_rt_df[sub_rt_df[INSTR_TYPE_LABEL] ==
INSTR_TYPE_MAPPINGS["full"]][RUN_TIME_LABEL]
duttb_instr_data = sub_rt_df[sub_rt_df[INSTR_TYPE_LABEL] ==
INSTR_TYPE_MAPPINGS["duttb"]][RUN_TIME_LABEL]
dut_instr_data = sub_rt_df[sub_rt_df[INSTR_TYPE_LABEL] ==
INSTR_TYPE_MAPPINGS["dut"]][RUN_TIME_LABEL]
# mw_full_duttb = stats.mannwhitneyu(full_instr_data, duttb_instr_data)
mw_full_dut = stats.mannwhitneyu(full_instr_data, dut_instr_data)
# mw_duttb_dut = stats.mannwhitneyu(duttb_instr_data, dut_instr_data)
print("%d States - Mann-Whitney:" % num_states)
# print(
# "\t%s vs. %s:" %
# (INSTR_TYPE_MAPPINGS["full"], INSTR_TYPE_MAPPINGS["duttb"]),
# mw_full_duttb)
print(
"\t%s vs. %s:" %
(INSTR_TYPE_MAPPINGS["full"], INSTR_TYPE_MAPPINGS["dut"]), mw_full_dut)
# print(
# "\t%s vs. %s:" %
# (INSTR_TYPE_MAPPINGS["duttb"], INSTR_TYPE_MAPPINGS["dut"]),
# mw_duttb_dut)
print(green("Done."))
print(LINE_SEP)
def compute_fs_opt_mann_whitney(instr_rts):
print(yellow("Computing Mann-Whitney U-test on fork server opt. data ..."))
for num_states in STATES:
sub_rt_df = instr_rts[instr_rts[NUM_STATES_LABEL] == num_states]
no_opt_data = sub_rt_df[sub_rt_df[OPT_TYPE_LABEL] ==
OPT_TYPE_MAPPINGS[False]][RUN_TIME_LABEL]
opt_data = sub_rt_df[sub_rt_df[OPT_TYPE_LABEL] ==
OPT_TYPE_MAPPINGS[True]][RUN_TIME_LABEL]
mw = stats.mannwhitneyu(no_opt_data, opt_data)
print("%d States - Mann-Whitney:" % num_states)
print("\t%s vs. %s:" % (OPT_TYPE_MAPPINGS[False], OPT_TYPE_MAPPINGS[True]),
mw.pvalue)
print(green("Done."))
print(LINE_SEP)
def plot_opt_strategies(instr_rts, fsopt_rts, plot_type="violin"):
print(yellow("Generating plots ..."))
LABEL_FONT_SIZE = 14
sns.set()
# HW fuzzing instrumentation levels
if plot_type == "violin":
ax1 = sns.violinplot(x=NUM_STATES_LABEL,
y=RUN_TIME_LABEL,
hue=INSTR_TYPE_LABEL,
data=instr_rts)
else:
ax1 = sns.stripplot(x=NUM_STATES_LABEL,
y=RUN_TIME_LABEL,
hue=INSTR_TYPE_LABEL,
data=instr_rts,
dodge=True,
jitter=0.3,
size=MARKER_SIZE)
ax1.axhline(y=1.0, color='r', linestyle='-')
ax1.set_ylim(0.5, 3)
ax1.set_xlabel(NUM_STATES_LABEL, fontsize=LABEL_FONT_SIZE)
ax1.set_ylabel(RUN_TIME_LABEL, fontsize=LABEL_FONT_SIZE)
ax1.tick_params("x", labelsize=LABEL_FONT_SIZE)
ax1.tick_params("y", labelsize=LABEL_FONT_SIZE)
plt.legend(title=INSTR_TYPE_LABEL,
fontsize=LABEL_FONT_SIZE,
title_fontsize=LABEL_FONT_SIZE)
plt.tight_layout()
# plt.savefig("hwf_instrumentation_levels.png", format="png")
plt.savefig("hwf_instrumentation_levels.pdf", format="pdf")
plt.close()
# HW fork server optimization
if plot_type == "violin":
ax2 = sns.violinplot(x=NUM_STATES_LABEL,
y=RUN_TIME_LABEL,
hue=OPT_TYPE_LABEL,
data=fsopt_rts)
else:
ax2 = sns.stripplot(x=NUM_STATES_LABEL,
y=RUN_TIME_LABEL,
hue=OPT_TYPE_LABEL,
data=fsopt_rts,
dodge=True,
jitter=0.3,
size=MARKER_SIZE)
ax2.axhline(y=1.0, color='r', linestyle='-')
ax1.set_ylim(0.5, 3)
ax2.set_xlabel(NUM_STATES_LABEL, fontsize=LABEL_FONT_SIZE)
ax2.set_ylabel(RUN_TIME_LABEL, fontsize=LABEL_FONT_SIZE)
ax2.tick_params("x", labelsize=LABEL_FONT_SIZE)
ax2.tick_params("y", labelsize=LABEL_FONT_SIZE)
plt.legend(title=OPT_TYPE_LABEL,
fontsize=LABEL_FONT_SIZE,
title_fontsize=LABEL_FONT_SIZE)
plt.tight_layout()
# plt.savefig("hwf_fs_opt.png", format="png")
plt.savefig("hwf_fs_opt.pdf", format="pdf")
print(green("Done."))
print(LINE_SEP)
def main(argv):
parser = argparse.ArgumentParser(description="Plotting script for exp. 004.")
parser.add_argument("data_root")
args = parser.parse_args()
# Load runtime data
exp2data = load_fuzzing_data(args.data_root)
instr_rts = build_instr_complex_rts_df(exp2data)
fsopt_rts = build_fs_opt_rts_df(exp2data)
# Compute stats
compute_instr_type_mann_whitney(instr_rts)
compute_fs_opt_mann_whitney(fsopt_rts)
# Plot the data
plot_opt_strategies(instr_rts, fsopt_rts)
if __name__ == "__main__":
main(sys.argv[1:])
| |
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import socket
from PyQt5.QtGui import *
from PyQt5.QtCore import *
import PyQt5.QtCore as QtCore
from electroncash.i18n import _
from electroncash.network import DEFAULT_PORTS
from electroncash.network import serialize_server, deserialize_server
from util import *
protocol_names = ['TCP', 'SSL']
protocol_letters = 'ts'
class NetworkDialog(QDialog):
def __init__(self, network, config, network_updated_signal_obj):
QDialog.__init__(self)
self.setWindowTitle(_('Network'))
self.setMinimumSize(500, 20)
self.nlayout = NetworkChoiceLayout(network, config)
self.network_updated_signal_obj = network_updated_signal_obj
vbox = QVBoxLayout(self)
vbox.addLayout(self.nlayout.layout())
vbox.addLayout(Buttons(CloseButton(self)))
self.network_updated_signal_obj.network_updated_signal.connect(
self.on_update)
network.register_callback(self.on_network, ['updated', 'interfaces'])
def on_network(self, event, *args):
self.network_updated_signal_obj.network_updated_signal.emit(event, args)
def on_update(self):
self.nlayout.update()
class NodesListWidget(QTreeWidget):
def __init__(self, parent):
QTreeWidget.__init__(self)
self.parent = parent
self.setHeaderLabels([_('Node'), _('Height')])
self.setContextMenuPolicy(Qt.CustomContextMenu)
self.customContextMenuRequested.connect(self.create_menu)
def create_menu(self, position):
item = self.currentItem()
if not item:
return
is_server = not bool(item.data(0, Qt.UserRole).toInt()[0])
menu = QMenu()
if is_server:
server = item.data(1, Qt.UserRole)
menu.addAction(_("Use as server"), lambda: self.parent.follow_server(server))
else:
index = item.data(1, Qt.UserRole).toInt()[0]
menu.addAction(_("Follow this branch"), lambda: self.parent.follow_branch(index))
menu.exec_(self.viewport().mapToGlobal(position))
def keyPressEvent(self, event):
if event.key() in [ Qt.Key_F2, Qt.Key_Return ]:
self.on_activated(self.currentItem(), self.currentColumn())
else:
QTreeWidget.keyPressEvent(self, event)
def on_activated(self, item, column):
# on 'enter' we show the menu
pt = self.visualItemRect(item).bottomLeft()
pt.setX(50)
self.customContextMenuRequested.emit(pt)
def update(self, network):
self.clear()
self.addChild = self.addTopLevelItem
chains = network.get_blockchains()
n_chains = len(chains)
for k, items in chains.items():
b = network.blockchains[k]
name = b.get_name()
if n_chains >1:
x = QTreeWidgetItem([name + '@%d'%b.get_checkpoint(), '%d'%b.height()])
x.setData(0, Qt.UserRole, 1)
x.setData(1, Qt.UserRole, b.checkpoint)
else:
x = self
for i in items:
star = ' *' if i == network.interface else ''
item = QTreeWidgetItem([i.host + star, '%d'%i.tip])
item.setData(0, Qt.UserRole, 0)
item.setData(1, Qt.UserRole, i.server)
x.addChild(item)
if n_chains>1:
self.addTopLevelItem(x)
x.setExpanded(True)
h = self.header()
h.setStretchLastSection(False)
h.setSectionResizeMode(0, QHeaderView.Stretch)
h.setSectionResizeMode(1, QHeaderView.ResizeToContents)
class ServerListWidget(QTreeWidget):
def __init__(self, parent):
QTreeWidget.__init__(self)
self.parent = parent
self.setHeaderLabels([_('Host'), _('Port')])
self.setContextMenuPolicy(Qt.CustomContextMenu)
self.customContextMenuRequested.connect(self.create_menu)
def create_menu(self, position):
item = self.currentItem()
if not item:
return
menu = QMenu()
server = item.data(1, Qt.UserRole)
menu.addAction(_("Use as server"), lambda: self.set_server(server))
menu.exec_(self.viewport().mapToGlobal(position))
def set_server(self, s):
host, port, protocol = s.split(':')
self.parent.server_host.setText(host)
self.parent.server_port.setText(port)
self.parent.set_server()
def keyPressEvent(self, event):
if event.key() in [ Qt.Key_F2, Qt.Key_Return ]:
self.on_activated(self.currentItem(), self.currentColumn())
else:
QTreeWidget.keyPressEvent(self, event)
def on_activated(self, item, column):
# on 'enter' we show the menu
pt = self.visualItemRect(item).bottomLeft()
pt.setX(50)
self.customContextMenuRequested.emit(pt)
def update(self, servers, protocol, use_tor):
self.clear()
for _host, d in sorted(servers.items()):
if _host.endswith('.onion') and not use_tor:
continue
port = d.get(protocol)
if port:
x = QTreeWidgetItem([_host, port])
server = _host+':'+port+':'+protocol
x.setData(1, Qt.UserRole, server)
self.addTopLevelItem(x)
h = self.header()
h.setStretchLastSection(False)
h.setSectionResizeMode(0, QHeaderView.Stretch)
h.setSectionResizeMode(1, QHeaderView.ResizeToContents)
class NetworkChoiceLayout(object):
def __init__(self, network, config, wizard=False):
self.network = network
self.config = config
self.protocol = None
self.tor_proxy = None
self.tabs = tabs = QTabWidget()
server_tab = QWidget()
proxy_tab = QWidget()
blockchain_tab = QWidget()
tabs.addTab(blockchain_tab, _('Overview'))
tabs.addTab(proxy_tab, _('Proxy'))
tabs.addTab(server_tab, _('Server'))
if wizard:
tabs.setCurrentIndex(2)
# server tab
grid = QGridLayout(server_tab)
grid.setSpacing(8)
self.server_host = QLineEdit()
self.server_host.setFixedWidth(200)
self.server_port = QLineEdit()
self.server_port.setFixedWidth(60)
self.ssl_cb = QCheckBox(_('Use SSL'))
self.autoconnect_cb = QCheckBox(_('Select server automatically'))
self.autoconnect_cb.setEnabled(self.config.is_modifiable('auto_connect'))
self.server_host.editingFinished.connect(self.set_server)
self.server_port.editingFinished.connect(self.set_server)
self.ssl_cb.clicked.connect(self.change_protocol)
self.autoconnect_cb.clicked.connect(self.set_server)
self.autoconnect_cb.clicked.connect(self.update)
grid.addWidget(QLabel(_('Server') + ':'), 1, 0)
grid.addWidget(self.server_host, 1, 1, 1, 2)
grid.addWidget(self.server_port, 1, 3)
label = _('Server peers') if network.is_connected() else _('Default Servers')
grid.addWidget(QLabel(label), 2, 0, 1, 5)
self.servers_list = ServerListWidget(self)
grid.addWidget(self.servers_list, 3, 0, 1, 5)
# Proxy tab
grid = QGridLayout(proxy_tab)
grid.setSpacing(8)
# proxy setting
self.proxy_mode = QComboBox()
self.proxy_mode.addItems(['NONE', 'SOCKS4', 'SOCKS5', 'HTTP'])
self.proxy_host = QLineEdit()
self.proxy_host.setFixedWidth(200)
self.proxy_port = QLineEdit()
self.proxy_port.setFixedWidth(60)
self.proxy_user = QLineEdit()
self.proxy_user.setPlaceholderText(_("Proxy user"))
self.proxy_password = QLineEdit()
self.proxy_password.setPlaceholderText(_("Password"))
self.proxy_password.setEchoMode(QLineEdit.Password)
self.proxy_password.setFixedWidth(60)
self.proxy_mode.currentIndexChanged.connect(self.set_proxy)
self.proxy_host.editingFinished.connect(self.set_proxy)
self.proxy_port.editingFinished.connect(self.set_proxy)
self.proxy_user.editingFinished.connect(self.set_proxy)
self.proxy_password.editingFinished.connect(self.set_proxy)
self.check_disable_proxy()
self.proxy_mode.currentIndexChanged.connect(self.check_disable_proxy)
self.proxy_mode.currentIndexChanged.connect(self.proxy_settings_changed)
self.proxy_host.textEdited.connect(self.proxy_settings_changed)
self.proxy_port.textEdited.connect(self.proxy_settings_changed)
self.proxy_user.textEdited.connect(self.proxy_settings_changed)
self.proxy_password.textEdited.connect(self.proxy_settings_changed)
self.tor_cb = QCheckBox(_("Use Tor Proxy"))
self.tor_cb.setIcon(QIcon(":icons/tor_logo.png"))
self.tor_cb.hide()
self.tor_cb.clicked.connect(self.use_tor_proxy)
grid.addWidget(self.ssl_cb, 0, 0, 1, 3)
grid.addWidget(self.tor_cb, 1, 0, 1, 3)
grid.addWidget(self.proxy_mode, 4, 1)
grid.addWidget(self.proxy_host, 4, 2)
grid.addWidget(self.proxy_port, 4, 3)
grid.addWidget(self.proxy_user, 5, 2)
grid.addWidget(self.proxy_password, 5, 3)
grid.setRowStretch(6, 1)
# Blockchain Tab
grid = QGridLayout(blockchain_tab)
msg = ' '.join([
_("Electrum connects to several nodes in order to download block headers and find out the longest blockchain."),
_("This blockchain is used to verify the transactions sent by your transaction server.")
])
self.status_label = QLabel('')
grid.addWidget(QLabel(_('Status') + ':'), 0, 0)
grid.addWidget(self.status_label, 0, 1, 1, 3)
grid.addWidget(HelpButton(msg), 0, 4)
self.server_label = QLabel('')
msg = _("Electrum sends your wallet addresses to a single server, in order to receive your transaction history.")
grid.addWidget(QLabel(_('Server') + ':'), 1, 0)
grid.addWidget(self.server_label, 1, 1, 1, 3)
grid.addWidget(HelpButton(msg), 1, 4)
self.height_label = QLabel('')
msg = _('This is the height of your local copy of the blockchain.')
grid.addWidget(QLabel(_('Blockchain') + ':'), 2, 0)
grid.addWidget(self.height_label, 2, 1)
grid.addWidget(HelpButton(msg), 2, 4)
self.split_label = QLabel('')
grid.addWidget(self.split_label, 3, 0, 1, 3)
msg = ' '.join([
_("If auto-connect is enabled, Electrum will always use a server that is on the longest blockchain."),
_("If it is disabled, you have to choose a server you want to use. Electrum will warn you if your server is lagging.")
])
grid.addWidget(self.autoconnect_cb, 4, 0, 1, 3)
grid.addWidget(HelpButton(msg), 4, 4)
self.nodes_list_widget = NodesListWidget(self)
grid.addWidget(self.nodes_list_widget, 5, 0, 1, 5)
vbox = QVBoxLayout()
vbox.addWidget(tabs)
self.layout_ = vbox
# tor detector
self.td = td = TorDetector()
td.found_proxy.connect(self.suggest_proxy)
td.start()
self.update()
def check_disable_proxy(self, index = False):
if self.config.is_modifiable('proxy'):
for w in [self.proxy_host, self.proxy_port, self.proxy_user, self.proxy_password]:
w.setEnabled(self.proxy_mode.currentText() != 'NONE')
else:
for w in [self.proxy_host, self.proxy_port, self.proxy_mode]: w.setEnabled(False)
def enable_set_server(self):
if self.config.is_modifiable('server'):
enabled = not self.autoconnect_cb.isChecked()
self.server_host.setEnabled(enabled)
self.server_port.setEnabled(enabled)
self.servers_list.setEnabled(enabled)
self.tabs.setTabEnabled(2, enabled)
else:
for w in [self.autoconnect_cb, self.server_host, self.server_port, self.ssl_cb, self.servers_list]:
w.setEnabled(False)
def update(self):
host, port, protocol, proxy_config, auto_connect = self.network.get_parameters()
if not proxy_config:
proxy_config = { "mode":"none", "host":"localhost", "port":"9050"}
self.server_host.setText(host)
self.server_port.setText(port)
self.ssl_cb.setChecked(protocol=='s')
self.autoconnect_cb.setChecked(auto_connect)
host = self.network.interface.host if self.network.interface else _('None')
self.server_label.setText(host)
self.set_protocol(protocol)
self.servers = self.network.get_servers()
self.servers_list.update(self.servers, self.protocol, self.tor_cb.isChecked())
self.enable_set_server()
# proxy tab
self.proxy_mode.setCurrentIndex(self.proxy_mode.findText(str(proxy_config.get("mode").upper())))
self.proxy_host.setText(proxy_config.get("host"))
self.proxy_port.setText(proxy_config.get("port"))
self.proxy_user.setText(proxy_config.get("user", ""))
self.proxy_password.setText(proxy_config.get("password", ""))
height_str = "%d "%(self.network.get_local_height()) + _('blocks')
self.height_label.setText(height_str)
n = len(self.network.get_interfaces())
status = _("Connected to %d nodes.")%n if n else _("Not connected")
self.status_label.setText(status)
chains = self.network.get_blockchains()
if len(chains)>1:
chain = self.network.blockchain()
checkpoint = chain.get_checkpoint()
name = chain.get_name()
msg = _('Chain split detected at block %d')%checkpoint + '\n'
msg += (_('You are following branch') if auto_connect else _('Your server is on branch'))+ ' ' + name
msg += ' (%d %s)' % (chain.get_branch_size(), _('blocks'))
else:
msg = ''
self.split_label.setText(msg)
self.nodes_list_widget.update(self.network)
def layout(self):
return self.layout_
def set_protocol(self, protocol):
if protocol != self.protocol:
self.protocol = protocol
def change_protocol(self, use_ssl):
p = 's' if use_ssl else 't'
host = unicode(self.server_host.text())
pp = self.servers.get(host, DEFAULT_PORTS)
if p not in pp.keys():
p = pp.keys()[0]
port = pp[p]
self.server_host.setText(host)
self.server_port.setText(port)
self.set_protocol(p)
self.set_server()
def follow_branch(self, index):
self.network.follow_chain(index)
self.update()
def follow_server(self, server):
self.network.switch_to_interface(server)
host, port, protocol, proxy, auto_connect = self.network.get_parameters()
host, port, protocol = server.split(':')
self.network.set_parameters(host, port, protocol, proxy, auto_connect)
self.update()
def server_changed(self, x):
if x:
self.change_server(str(x.text(0)), self.protocol)
def change_server(self, host, protocol):
pp = self.servers.get(host, DEFAULT_PORTS)
if protocol and protocol not in protocol_letters:
protocol = None
if protocol:
port = pp.get(protocol)
if port is None:
protocol = None
if not protocol:
if 's' in pp.keys():
protocol = 's'
port = pp.get(protocol)
else:
protocol = pp.keys()[0]
port = pp.get(protocol)
self.server_host.setText(host)
self.server_port.setText(port)
self.ssl_cb.setChecked(protocol=='s')
def accept(self):
pass
def set_server(self):
host, port, protocol, proxy, auto_connect = self.network.get_parameters()
host = str(self.server_host.text())
port = str(self.server_port.text())
protocol = 's' if self.ssl_cb.isChecked() else 't'
auto_connect = self.autoconnect_cb.isChecked()
self.network.set_parameters(host, port, protocol, proxy, auto_connect)
def set_proxy(self):
host, port, protocol, proxy, auto_connect = self.network.get_parameters()
if self.proxy_mode.currentText() != 'NONE':
proxy = { 'mode':str(self.proxy_mode.currentText()).lower(),
'host':str(self.proxy_host.text()),
'port':str(self.proxy_port.text()),
'user':str(self.proxy_user.text()),
'password':str(self.proxy_password.text())}
else:
proxy = None
self.network.set_parameters(host, port, protocol, proxy, auto_connect)
def suggest_proxy(self, found_proxy):
self.tor_proxy = found_proxy
self.tor_cb.setText("Use Tor proxy at port " + str(found_proxy[1]))
if self.proxy_mode.currentIndex() == 2 \
and self.proxy_host.text() == "127.0.0.1" \
and self.proxy_port.text() == str(found_proxy[1]):
self.tor_cb.setChecked(True)
self.tor_cb.show()
def use_tor_proxy(self, use_it):
# 2 = SOCKS5
if not use_it:
self.proxy_mode.setCurrentIndex(0)
self.tor_cb.setChecked(False)
else:
self.proxy_mode.setCurrentIndex(2)
self.proxy_host.setText("127.0.0.1")
self.proxy_port.setText(str(self.tor_proxy[1]))
self.proxy_user.setText("")
self.proxy_password.setText("")
self.tor_cb.setChecked(True)
self.set_proxy()
def proxy_settings_changed(self):
self.tor_cb.setChecked(False)
class TorDetector(QThread):
found_proxy = pyqtSignal(object)
def __init__(self):
QThread.__init__(self)
def run(self):
# Probable ports for Tor to listen at
ports = [9050, 9150]
for p in ports:
if TorDetector.is_tor_port(p):
self.found_proxy.emit(("127.0.0.1", p))
return
@staticmethod
def is_tor_port(port):
try:
s = socket._socketobject(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(0.1)
s.connect(("127.0.0.1", port))
# Tor responds uniquely to HTTP-like requests
s.send("GET\n")
if "Tor is not an HTTP Proxy" in s.recv(1024):
return True
except socket.error:
pass
return False
| |
#!/usr/bin/python
#
# Copyright (c) 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import pprint
def read_until(line, fd, end):
out = [line]
while True:
idx = line.find(end)
if idx < 0:
line = clean_line(fd.readline(), fd)
out.append(line)
else:
out.append(line[idx + len(end):])
return out
def remove_comment(line, fd):
out = []
while True:
idx = line.find('/*')
if idx < 0:
idx = line.find('//')
if idx < 0:
out.append(line)
else:
out.append(line[:idx])
return ' '.join(out)
out.append(line[:idx])
line = read_until(line[idx:], fd, '*/')[-1]
def clean_line(line, fd):
return remove_comment(line, fd).strip()
def parse_node_name(line):
line = line[:-1]
if '@' in line:
line, addr = line.split('@')
else:
addr = None
if ':' in line:
label, name = line.split(':')
else:
name = line
label = None
if addr is None:
return label, name.strip(), None
return label, name.strip(), int(addr, 16)
def parse_values_internal(value, start, end, separator):
out = []
inside = False
accum = []
for ch in value:
if not inside:
if ch == start:
inside = True
accum = []
else:
if ch == end:
inside = False
out.append(''.join(accum))
accum = []
else:
accum.append(ch)
if separator == ' ':
out = [v.split() for v in out]
if len(out) == 1:
return parse_value(out[0])
return [parse_value(v) for v in out]
def parse_values(value, start, end, separator):
out = parse_values_internal(value, start, end, separator)
if isinstance(out, list) and all(isinstance(v, str) and len(v) == 1 and not v.isalpha() for v in out):
return bytearray(out)
return out
def parse_value(value):
if value == '':
return value
if isinstance(value, list):
out = [parse_value(v) for v in value]
return out[0] if len(out) == 1 else out
if value[0] == '<':
return parse_values(value, '<', '>', ' ')
if value[0] == '"':
return parse_values(value, '"', '"', ',')
if value[0] == '[':
return parse_values(value, '[', ']', ' ')
if value[0] == '&':
return {'ref': value[1:]}
if value[0].isdigit():
if value.startswith("0x"):
return int(value, 16)
if value[0] == '0':
return int(value, 8)
return int(value, 10)
return value
def parse_property(property, fd):
if '=' in property:
key, value = property.split('=', 1)
value = ' '.join(read_until(value, fd, ';')).strip()
if not value.endswith(';'):
raise SyntaxError("parse_property: missing semicolon: %s" % value)
return key.strip(), parse_value(value[:-1])
property = property.strip()
if not property.endswith(';'):
raise SyntaxError("parse_property: missing semicolon: %s" % property)
return property[:-1].strip(), True
def build_node_name(name, addr):
if addr is None:
return name
return '%s@%x' % (name, addr)
def parse_node(line, fd):
label, name, addr = parse_node_name(line)
node = {
'label': label,
'type': type,
'addr': addr,
'children': {},
'props': {},
'name': build_node_name(name, addr)
}
while True:
line = fd.readline()
if not line:
raise SyntaxError("parse_node: Missing } while parsing node")
line = clean_line(line, fd)
if not line:
continue
if line == "};":
break
if line.endswith('{'):
new_node = parse_node(line, fd)
node['children'][new_node['name']] = new_node
else:
key, value = parse_property(line, fd)
node['props'][key] = value
return node
def parse_file(fd, ignore_dts_version=False):
nodes = {}
has_v1_tag = False
while True:
line = fd.readline()
if not line:
break
line = clean_line(line, fd)
if not line:
continue
if line.startswith('/include/ '):
tag, filename = line.split()
with open(filename.strip()[1:-1], "r") as new_fd:
nodes.update(parse_file(new_fd, True))
elif line == '/dts-v1/;':
has_v1_tag = True
elif line.startswith('/memreserve/ ') and line.endswith(';'):
tag, start, end = line.split()
start = int(start, 16)
end = int(end[:-1], 16)
label = "reserved_memory_0x%x_0x%x" % (start, end)
nodes[label] = {
'type': 'memory',
'reg': [start, end],
'label': label,
'addr': start,
'name': build_node_name(name, start)
}
elif line.endswith('{'):
if not has_v1_tag and not ignore_dts_version:
raise SyntaxError("parse_file: Missing /dts-v1/ tag")
new_node = parse_node(line, fd)
nodes[new_node['name']] = new_node
else:
raise SyntaxError("parse_file: Couldn't understand the line: %s" % line)
return nodes
def dump_refs(name, value, indent=0):
spaces = ' ' * indent
out = []
if isinstance(value, dict) and 'ref' in value:
out.append('%s\"%s\" -> \"%s\";' % (spaces, name, value['ref']))
elif isinstance(value, list):
for elem in value:
out.extend(dump_refs(name, elem, indent))
return out
def dump_all_refs(name, props, indent=0):
out = []
for key, value in props.items():
out.extend(dump_refs(name, value, indent))
return out
def next_subgraph(count=[0]):
count[0] += 1
return 'subgraph cluster_%d' % count[0]
def get_dot_node_name(node):
name = node['name']
return name[1:] if name[0] == '&' else name
def dump_to_dot(nodes, indent=0, start_string='digraph devicetree', name=None):
spaces = ' ' * indent
print("%s%s {" % (spaces, start_string))
if name is not None:
print("%slabel = \"%s\";" % (spaces, name))
print("%s\"%s\";" % (spaces, name))
ref_list = []
for key, value in nodes.items():
if value.get('children'):
refs = dump_to_dot(value['children'], indent + 1, next_subgraph(), get_dot_node_name(value))
ref_list.extend(refs)
else:
print("%s\"%s\";" % (spaces, get_dot_node_name(value)))
for key, value in nodes.items():
refs = dump_all_refs(get_dot_node_name(value), value.get('props', {}), indent)
ref_list.extend(refs)
if start_string.startswith("digraph"):
print("%s%s" % (spaces, '\n'.join(ref_list)))
print("%s}" % spaces)
return ref_list
def main(args):
if len(args) == 1:
print('Usage: %s filename.dts' % args[0])
return 1
if '--dot' in args:
formatter = dump_to_dot
args.remove('--dot')
else:
formatter = lambda nodes: pprint.pprint(nodes, indent=2)
with open(args[1], "r") as fd:
formatter(parse_file(fd))
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| |
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Top-level presubmit script for Skia.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into gcl.
"""
import fnmatch
import os
import re
import sys
import traceback
REVERT_CL_SUBJECT_PREFIX = 'Revert '
SKIA_TREE_STATUS_URL = 'http://skia-tree-status.appspot.com'
# Please add the complete email address here (and not just 'xyz@' or 'xyz').
PUBLIC_API_OWNERS = (
'reed@chromium.org',
'reed@google.com',
'bsalomon@chromium.org',
'bsalomon@google.com',
'djsollen@chromium.org',
'djsollen@google.com',
)
AUTHORS_FILE_NAME = 'AUTHORS'
def _CheckChangeHasEol(input_api, output_api, source_file_filter=None):
"""Checks that files end with atleast one \n (LF)."""
eof_files = []
for f in input_api.AffectedSourceFiles(source_file_filter):
contents = input_api.ReadFile(f, 'rb')
# Check that the file ends in atleast one newline character.
if len(contents) > 1 and contents[-1:] != '\n':
eof_files.append(f.LocalPath())
if eof_files:
return [output_api.PresubmitPromptWarning(
'These files should end in a newline character:',
items=eof_files)]
return []
def _PythonChecks(input_api, output_api):
"""Run checks on any modified Python files."""
pylint_disabled_warnings = (
'F0401', # Unable to import.
'E0611', # No name in module.
'W0232', # Class has no __init__ method.
'E1002', # Use of super on an old style class.
'W0403', # Relative import used.
'R0201', # Method could be a function.
'E1003', # Using class name in super.
'W0613', # Unused argument.
)
# Run Pylint on only the modified python files. Unfortunately it still runs
# Pylint on the whole file instead of just the modified lines.
affected_python_files = []
for affected_file in input_api.AffectedSourceFiles(None):
affected_file_path = affected_file.LocalPath()
if affected_file_path.endswith('.py'):
affected_python_files.append(affected_file_path)
return input_api.canned_checks.RunPylint(
input_api, output_api,
disabled_warnings=pylint_disabled_warnings,
white_list=affected_python_files)
def _CommonChecks(input_api, output_api):
"""Presubmit checks common to upload and commit."""
results = []
sources = lambda x: (x.LocalPath().endswith('.h') or
x.LocalPath().endswith('.gypi') or
x.LocalPath().endswith('.gyp') or
x.LocalPath().endswith('.py') or
x.LocalPath().endswith('.sh') or
x.LocalPath().endswith('.cpp'))
results.extend(
_CheckChangeHasEol(
input_api, output_api, source_file_filter=sources))
results.extend(_PythonChecks(input_api, output_api))
return results
def CheckChangeOnUpload(input_api, output_api):
"""Presubmit checks for the change on upload.
The following are the presubmit checks:
* Check change has one and only one EOL.
"""
results = []
results.extend(_CommonChecks(input_api, output_api))
return results
def _CheckTreeStatus(input_api, output_api, json_url):
"""Check whether to allow commit.
Args:
input_api: input related apis.
output_api: output related apis.
json_url: url to download json style status.
"""
tree_status_results = input_api.canned_checks.CheckTreeIsOpen(
input_api, output_api, json_url=json_url)
if not tree_status_results:
# Check for caution state only if tree is not closed.
connection = input_api.urllib2.urlopen(json_url)
status = input_api.json.loads(connection.read())
connection.close()
if ('caution' in status['message'].lower() and
os.isatty(sys.stdout.fileno())):
# Display a prompt only if we are in an interactive shell. Without this
# check the commit queue behaves incorrectly because it considers
# prompts to be failures.
short_text = 'Tree state is: ' + status['general_state']
long_text = status['message'] + '\n' + json_url
tree_status_results.append(
output_api.PresubmitPromptWarning(
message=short_text, long_text=long_text))
else:
# Tree status is closed. Put in message about contacting sheriff.
connection = input_api.urllib2.urlopen(
SKIA_TREE_STATUS_URL + '/current-sheriff')
sheriff_details = input_api.json.loads(connection.read())
if sheriff_details:
tree_status_results[0]._message += (
'\n\nPlease contact the current Skia sheriff (%s) if you are trying '
'to submit a build fix\nand do not know how to submit because the '
'tree is closed') % sheriff_details['username']
return tree_status_results
def _CheckOwnerIsInAuthorsFile(input_api, output_api):
results = []
issue = input_api.change.issue
if issue and input_api.rietveld:
issue_properties = input_api.rietveld.get_issue_properties(
issue=int(issue), messages=False)
owner_email = issue_properties['owner_email']
try:
authors_content = ''
for line in open(AUTHORS_FILE_NAME):
if not line.startswith('#'):
authors_content += line
email_fnmatches = re.findall('<(.*)>', authors_content)
for email_fnmatch in email_fnmatches:
if fnmatch.fnmatch(owner_email, email_fnmatch):
# Found a match, the user is in the AUTHORS file break out of the loop
break
else:
# TODO(rmistry): Remove the below CLA messaging once a CLA checker has
# been added to the CQ.
results.append(
output_api.PresubmitError(
'The email %s is not in Skia\'s AUTHORS file.\n'
'Issue owner, this CL must include an addition to the Skia AUTHORS '
'file.\n'
'Googler reviewers, please check that the AUTHORS entry '
'corresponds to an email address in http://goto/cla-signers. If it '
'does not then ask the issue owner to sign the CLA at '
'https://developers.google.com/open-source/cla/individual '
'(individual) or '
'https://developers.google.com/open-source/cla/corporate '
'(corporate).'
% owner_email))
except IOError:
# Do not fail if authors file cannot be found.
traceback.print_exc()
input_api.logging.error('AUTHORS file not found!')
return results
def _CheckLGTMsForPublicAPI(input_api, output_api):
"""Check LGTMs for public API changes.
For public API files make sure there is an LGTM from the list of owners in
PUBLIC_API_OWNERS.
"""
results = []
requires_owner_check = False
for affected_file in input_api.AffectedFiles():
affected_file_path = affected_file.LocalPath()
file_path, file_ext = os.path.splitext(affected_file_path)
# We only care about files that end in .h and are under the top-level
# include dir.
if file_ext == '.h' and 'include' == file_path.split(os.path.sep)[0]:
requires_owner_check = True
if not requires_owner_check:
return results
lgtm_from_owner = False
issue = input_api.change.issue
if issue and input_api.rietveld:
issue_properties = input_api.rietveld.get_issue_properties(
issue=int(issue), messages=True)
if re.match(REVERT_CL_SUBJECT_PREFIX, issue_properties['subject'], re.I):
# It is a revert CL, ignore the public api owners check.
return results
match = re.search(r'^TBR=(.*)$', issue_properties['description'], re.M)
if match:
tbr_entries = match.group(1).strip().split(',')
for owner in PUBLIC_API_OWNERS:
if owner in tbr_entries or owner.split('@')[0] in tbr_entries:
# If an owner is specified in the TBR= line then ignore the public
# api owners check.
return results
if issue_properties['owner_email'] in PUBLIC_API_OWNERS:
# An owner created the CL that is an automatic LGTM.
lgtm_from_owner = True
messages = issue_properties.get('messages')
if messages:
for message in messages:
if (message['sender'] in PUBLIC_API_OWNERS and
'lgtm' in message['text'].lower()):
# Found an lgtm in a message from an owner.
lgtm_from_owner = True
break
if not lgtm_from_owner:
results.append(
output_api.PresubmitError(
'Since the CL is editing public API, you must have an LGTM from '
'one of: %s' % str(PUBLIC_API_OWNERS)))
return results
def CheckChangeOnCommit(input_api, output_api):
"""Presubmit checks for the change on commit.
The following are the presubmit checks:
* Check change has one and only one EOL.
* Ensures that the Skia tree is open in
http://skia-tree-status.appspot.com/. Shows a warning if it is in 'Caution'
state and an error if it is in 'Closed' state.
"""
results = []
results.extend(_CommonChecks(input_api, output_api))
results.extend(
_CheckTreeStatus(input_api, output_api, json_url=(
SKIA_TREE_STATUS_URL + '/banner-status?format=json')))
results.extend(_CheckLGTMsForPublicAPI(input_api, output_api))
results.extend(_CheckOwnerIsInAuthorsFile(input_api, output_api))
return results
| |
#! /usr/bin/python
"""versioneer.py
(like a rocketeer, but for versions)
* https://github.com/warner/python-versioneer
* Brian Warner (modified by Florian Wilhelm)
* License: Public Domain
* Version: 0.8+
This file helps distutils-based projects manage their version number by just
creating version-control tags.
For developers who work from a VCS-generated tree (e.g. 'git clone' etc),
each 'setup.py version', 'setup.py build', 'setup.py sdist' will compute a
version number by asking your version-control tool about the current
checkout. The version number will be written into a generated _version.py
file of your choosing, where it can be included by your __init__.py
For users who work from a VCS-generated tarball (e.g. 'git archive'), it will
compute a version number by looking at the name of the directory created when
te tarball is unpacked. This conventionally includes both the name of the
project and a version number.
For users who work from a tarball built by 'setup.py sdist', it will get a
version number from a previously-generated _version.py file.
As a result, loading code directly from the source tree will not result in a
real version. If you want real versions from VCS trees (where you frequently
update from the upstream repository, or do new development), you will need to
do a 'setup.py version' after each update, and load code from the build/
directory.
You need to provide this code with a few configuration values:
versionfile_source:
A project-relative pathname into which the generated version strings
should be written. This is usually a _version.py next to your project's
main __init__.py file. If your project uses src/myproject/__init__.py,
this should be 'src/myproject/_version.py'. This file should be checked
in to your VCS as usual: the copy created below by 'setup.py
update_files' will include code that parses expanded VCS keywords in
generated tarballs. The 'build' and 'sdist' commands will replace it with
a copy that has just the calculated version string.
versionfile_build:
Like versionfile_source, but relative to the build directory instead of
the source directory. These will differ when your setup.py uses
'package_dir='. If you have package_dir={'myproject': 'src/myproject'},
then you will probably have versionfile_build='myproject/_version.py' and
versionfile_source='src/myproject/_version.py'.
tag_prefix: a string, like 'PROJECTNAME-', which appears at the start of all
VCS tags. If your tags look like 'myproject-1.2.0', then you
should use tag_prefix='myproject-'. If you use unprefixed tags
like '1.2.0', this should be an empty string.
parentdir_prefix: a string, frequently the same as tag_prefix, which
appears at the start of all unpacked tarball filenames. If
your tarball unpacks into 'myproject-1.2.0', this should
be 'myproject-'.
To use it:
1: include this file in the top level of your project
2: make the following changes to the top of your setup.py:
import versioneer
versioneer.versionfile_source = 'src/myproject/_version.py'
versioneer.versionfile_build = 'myproject/_version.py'
versioneer.tag_prefix = '' # tags are like 1.2.0
versioneer.parentdir_prefix = 'myproject-' # dirname like 'myproject-1.2.0'
3: add the following arguments to the setup() call in your setup.py:
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
4: run 'setup.py update_files', which will create _version.py, and will
modify your __init__.py to define __version__ (by calling a function
from _version.py)
5: modify your MANIFEST.in to include versioneer.py
6: add both versioneer.py and the generated _version.py to your VCS
"""
import os, sys, re
from distutils.core import Command
from distutils.command.sdist import sdist as _sdist
from distutils.command.build import build as _build
versionfile_source = None
versionfile_build = None
tag_prefix = None
parentdir_prefix = None
VCS = "git"
IN_LONG_VERSION_PY = False
LONG_VERSION_PY = '''
IN_LONG_VERSION_PY = True
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (build by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.8+ (https://github.com/warner/python-versioneer)
# these strings will be replaced by git during git-archive
git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s"
git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s"
import subprocess
import sys
def run_command(args, cwd=None, verbose=False, hide_stderr=False):
try:
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen(args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr else None))
except EnvironmentError:
e = sys.exc_info()[1]
if verbose:
print("unable to run %%s" %% args[0])
print(e)
return None
stdout = p.communicate()[0].strip()
if sys.version >= '3':
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %%s (error)" %% args[0])
return None
return stdout
import sys
import re
import os.path
def get_expanded_variables(versionfile_source):
# the code embedded in _version.py can just fetch the value of these
# variables. When used from setup.py, we don't want to import
# _version.py, so we do it with a regexp instead. This function is not
# used from _version.py.
variables = {}
try:
f = open(versionfile_source,"r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
variables["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
variables["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return variables
def versions_from_expanded_variables(variables, tag_prefix, verbose=False):
refnames = variables["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("variables are unexpanded, not using")
return {} # unexpanded, so not in an unpacked git-archive tarball
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %%d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%%s', no digits" %% ",".join(refs-tags))
if verbose:
print("likely tags: %%s" %% ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %%s" %% r)
return { "version": r,
"full": variables["full"].strip() }
# no suitable tags, so we use the full revision id
if verbose:
print("no suitable tags, using full revision id")
return { "version": variables["full"].strip(),
"full": variables["full"].strip() }
def versions_from_vcs(tag_prefix, versionfile_source, verbose=False):
# this runs 'git' from the root of the source tree. That either means
# someone ran a setup.py command (and this code is in versioneer.py, so
# IN_LONG_VERSION_PY=False, thus the containing directory is the root of
# the source tree), or someone ran a project-specific entry point (and
# this code is in _version.py, so IN_LONG_VERSION_PY=True, thus the
# containing directory is somewhere deeper in the source tree). This only
# gets called if the git-archive 'subst' variables were *not* expanded,
# and _version.py hasn't already been rewritten with a short version
# string, meaning we're inside a checked out source tree.
try:
here = os.path.abspath(__file__)
except NameError:
# some py2exe/bbfreeze/non-CPython implementations don't do __file__
return {} # not always correct
GIT = "git"
if sys.platform == "win32":
GIT = "git.cmd"
# versionfile_source is the relative path from the top of the source tree
# (where the .git directory might live) to this file. Invert this to find
# the root from __file__.
root = here
if IN_LONG_VERSION_PY:
for i in range(len(versionfile_source.split("/"))):
root = os.path.dirname(root)
else:
toplevel = run_command([GIT, "rev-parse", "--show-toplevel"],
hide_stderr=True)
root = (toplevel.strip() if toplevel else os.path.dirname(here))
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %%s" %% root)
return {}
stdout = run_command([GIT, "describe", "--tags", "--dirty", "--always"],
cwd=root)
if stdout is None:
return {}
if not stdout.startswith(tag_prefix):
if verbose:
print("tag '%%s' doesn't start with prefix '%%s'" %% (stdout, tag_prefix))
return {}
tag = stdout[len(tag_prefix):]
stdout = run_command([GIT, "rev-parse", "HEAD"], cwd=root)
if stdout is None:
return {}
full = stdout.strip()
if tag.endswith("-dirty"):
full += "-dirty"
return {"version": tag, "full": full}
def versions_from_parentdir(parentdir_prefix, versionfile_source, verbose=False):
if IN_LONG_VERSION_PY:
# We're running from _version.py. If it's from a source tree
# (execute-in-place), we can work upwards to find the root of the
# tree, and then check the parent directory for a version string. If
# it's in an installed application, there's no hope.
try:
here = os.path.abspath(__file__)
except NameError:
# py2exe/bbfreeze/non-CPython don't have __file__
return {} # without __file__, we have no hope
# versionfile_source is the relative path from the top of the source
# tree to _version.py. Invert this to find the root from __file__.
root = here
for i in range(len(versionfile_source.split("/"))):
root = os.path.dirname(root)
else:
# we're running from versioneer.py, which means we're running from
# the setup.py in a source tree. sys.argv[0] is setup.py in the root.
here = os.path.abspath(sys.argv[0])
root = os.path.dirname(here)
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%%s', but '%%s' doesn't start with prefix '%%s'" %%
(root, dirname, parentdir_prefix))
return None
return {"version": dirname[len(parentdir_prefix):], "full": ""}
tag_prefix = "%(TAG_PREFIX)s"
parentdir_prefix = "%(PARENTDIR_PREFIX)s"
versionfile_source = "%(VERSIONFILE_SOURCE)s"
def get_versions(default={"version": "unknown", "full": ""}, verbose=False):
variables = { "refnames": git_refnames, "full": git_full }
ver = versions_from_expanded_variables(variables, tag_prefix, verbose)
if not ver:
ver = versions_from_vcs(tag_prefix, versionfile_source, verbose)
if not ver:
ver = versions_from_parentdir(parentdir_prefix, versionfile_source,
verbose)
if not ver:
ver = default
return rep_by_pep440(ver)
def git2pep440(ver_str):
try:
tag, commits, _ = ver_str.split('-', 2)
return ".post".join([tag, commits])
except ValueError:
return ver_str
def rep_by_pep440(ver):
ver["version"] = git2pep440(ver["version"])
return ver
'''
import subprocess
import sys
def run_command(args, cwd=None, verbose=False, hide_stderr=False):
try:
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen(args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr else None))
except EnvironmentError:
e = sys.exc_info()[1]
if verbose:
print("unable to run %s" % args[0])
print(e)
return None
stdout = p.communicate()[0].strip()
if sys.version >= '3':
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % args[0])
return None
return stdout
import sys
import re
import os.path
def get_expanded_variables(versionfile_source):
# the code embedded in _version.py can just fetch the value of these
# variables. When used from setup.py, we don't want to import
# _version.py, so we do it with a regexp instead. This function is not
# used from _version.py.
variables = {}
try:
f = open(versionfile_source, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
variables["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
variables["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return variables
def versions_from_expanded_variables(variables, tag_prefix, verbose=False):
refnames = variables["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("variables are unexpanded, not using")
return {} # unexpanded, so not in an unpacked git-archive tarball
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return { "version": r,
"full": variables["full"].strip() }
# no suitable tags, so we use the full revision id
if verbose:
print("no suitable tags, using full revision id")
return { "version": variables["full"].strip(),
"full": variables["full"].strip() }
def versions_from_vcs(tag_prefix, versionfile_source, verbose=False):
# this runs 'git' from the root of the source tree. That either means
# someone ran a setup.py command (and this code is in versioneer.py, so
# IN_LONG_VERSION_PY=False, thus the containing directory is the root of
# the source tree), or someone ran a project-specific entry point (and
# this code is in _version.py, so IN_LONG_VERSION_PY=True, thus the
# containing directory is somewhere deeper in the source tree). This only
# gets called if the git-archive 'subst' variables were *not* expanded,
# and _version.py hasn't already been rewritten with a short version
# string, meaning we're inside a checked out source tree.
try:
here = os.path.abspath(__file__)
except NameError:
# some py2exe/bbfreeze/non-CPython implementations don't do __file__
return {} # not always correct
GIT = "git"
if sys.platform == "win32":
GIT = "git.cmd"
# versionfile_source is the relative path from the top of the source tree
# (where the .git directory might live) to this file. Invert this to find
# the root from __file__.
root = here
if IN_LONG_VERSION_PY:
for i in range(len(versionfile_source.split("/"))):
root = os.path.dirname(root)
else:
toplevel = run_command([GIT, "rev-parse", "--show-toplevel"],
hide_stderr=True)
root = (toplevel.strip() if toplevel else os.path.dirname(here))
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %s" % root)
return {}
stdout = run_command([GIT, "describe", "--tags", "--dirty", "--always"],
cwd=root)
if stdout is None:
return {}
if not stdout.startswith(tag_prefix):
if verbose:
print("tag '%s' doesn't start with prefix '%s'" % (stdout, tag_prefix))
return {}
tag = stdout[len(tag_prefix):]
stdout = run_command([GIT, "rev-parse", "HEAD"], cwd=root)
if stdout is None:
return {}
full = stdout.strip()
if tag.endswith("-dirty"):
full += "-dirty"
return {"version": tag, "full": full}
def versions_from_parentdir(parentdir_prefix, versionfile_source, verbose=False):
if IN_LONG_VERSION_PY:
# We're running from _version.py. If it's from a source tree
# (execute-in-place), we can work upwards to find the root of the
# tree, and then check the parent directory for a version string. If
# it's in an installed application, there's no hope.
try:
here = os.path.abspath(__file__)
except NameError:
# py2exe/bbfreeze/non-CPython don't have __file__
return {} # without __file__, we have no hope
# versionfile_source is the relative path from the top of the source
# tree to _version.py. Invert this to find the root from __file__.
root = here
for i in range(len(versionfile_source.split("/"))):
root = os.path.dirname(root)
else:
# we're running from versioneer.py, which means we're running from
# the setup.py in a source tree. sys.argv[0] is setup.py in the root.
here = os.path.abspath(sys.argv[0])
root = os.path.dirname(here)
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%s', but '%s' doesn't start with prefix '%s'" %
(root, dirname, parentdir_prefix))
return None
return {"version": dirname[len(parentdir_prefix):], "full": ""}
import os.path
import sys
# os.path.relpath only appeared in Python-2.6 . Define it here for 2.5.
def os_path_relpath(path, start=os.path.curdir):
"""Return a relative version of a path"""
if not path:
raise ValueError("no path specified")
start_list = [x for x in os.path.abspath(start).split(os.path.sep) if x]
path_list = [x for x in os.path.abspath(path).split(os.path.sep) if x]
# Work out how much of the filepath is shared by start and path.
i = len(os.path.commonprefix([start_list, path_list]))
rel_list = [os.path.pardir] * (len(start_list) - i) + path_list[i:]
if not rel_list:
return os.path.curdir
return os.path.join(*rel_list)
def do_vcs_install(versionfile_source, ipy):
GIT = "git"
if sys.platform == "win32":
GIT = "git.cmd"
files = [versionfile_source, ipy]
try:
me = __file__
if me.endswith(".pyc") or me.endswith(".pyo"):
me = os.path.splitext(me)[0] + ".py"
versioneer_file = os_path_relpath(me)
except NameError:
versioneer_file = "versioneer.py"
files.append(versioneer_file)
present = False
try:
f = open(".gitattributes", "r")
for line in f.readlines():
if line.strip().startswith(versionfile_source):
if "export-subst" in line.strip().split()[1:]:
present = True
f.close()
except EnvironmentError:
pass
if not present:
f = open(".gitattributes", "a+")
f.write("%s export-subst\n" % versionfile_source)
f.close()
files.append(".gitattributes")
run_command([GIT, "add", "--"] + files)
SHORT_VERSION_PY = """
# This file was generated by 'versioneer.py' (0.8+) from
# revision-control system data, or from the parent directory name of an
# unpacked source archive. Distribution tarballs contain a pre-generated copy
# of this file.
version_version = '%(version)s'
version_full = '%(full)s'
def get_versions(default={}, verbose=False):
return {'version': version_version, 'full': version_full}
"""
DEFAULT = {"version": "unknown", "full": "unknown"}
def versions_from_file(filename):
versions = {}
try:
f = open(filename)
except EnvironmentError:
return versions
for line in f.readlines():
mo = re.match("version_version = '([^']+)'", line)
if mo:
versions["version"] = mo.group(1)
mo = re.match("version_full = '([^']+)'", line)
if mo:
versions["full"] = mo.group(1)
f.close()
return versions
def write_to_version_file(filename, versions):
f = open(filename, "w")
f.write(SHORT_VERSION_PY % versions)
f.close()
print("set %s to '%s'" % (filename, versions["version"]))
def get_best_versions(versionfile, tag_prefix, parentdir_prefix,
default=DEFAULT, verbose=False):
# returns dict with two keys: 'version' and 'full'
#
# extract version from first of _version.py, 'git describe', parentdir.
# This is meant to work for developers using a source checkout, for users
# of a tarball created by 'setup.py sdist', and for users of a
# tarball/zipball created by 'git archive' or github's download-from-tag
# feature.
variables = get_expanded_variables(versionfile_source)
if variables:
ver = versions_from_expanded_variables(variables, tag_prefix)
if ver:
if verbose: print("got version from expanded variable %s" % ver)
return rep_by_pep440(ver)
ver = versions_from_file(versionfile)
if ver:
if verbose: print("got version from file %s %s" % (versionfile, ver))
return rep_by_pep440(ver)
ver = versions_from_vcs(tag_prefix, versionfile_source, verbose)
if ver:
if verbose: print("got version from git %s" % ver)
return rep_by_pep440(ver)
ver = versions_from_parentdir(parentdir_prefix, versionfile_source, verbose)
if ver:
if verbose: print("got version from parentdir %s" % ver)
return rep_by_pep440(ver)
if verbose: print("got version from default %s" % ver)
return default
def get_versions(default=DEFAULT, verbose=False):
assert versionfile_source is not None, "please set versioneer.versionfile_source"
assert tag_prefix is not None, "please set versioneer.tag_prefix"
assert parentdir_prefix is not None, "please set versioneer.parentdir_prefix"
return get_best_versions(versionfile_source, tag_prefix, parentdir_prefix,
default=default, verbose=verbose)
def get_version(verbose=False):
return get_versions(verbose=verbose)["version"]
class cmd_version(Command):
description = "report generated version string"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
ver = get_version(verbose=True)
print("Version is currently: %s" % ver)
class cmd_build(_build):
def run(self):
versions = get_versions(verbose=True)
_build.run(self)
# now locate _version.py in the new build/ directory and replace it
# with an updated value
target_versionfile = os.path.join(self.build_lib, versionfile_build)
print("UPDATING %s" % target_versionfile)
os.unlink(target_versionfile)
f = open(target_versionfile, "w")
f.write(SHORT_VERSION_PY % versions)
f.close()
class cmd_sdist(_sdist):
def run(self):
versions = get_versions(verbose=True)
self._versioneer_generated_versions = versions
# unless we update this, the command will keep using the old version
self.distribution.metadata.version = versions["version"]
return _sdist.run(self)
def make_release_tree(self, base_dir, files):
_sdist.make_release_tree(self, base_dir, files)
# now locate _version.py in the new base_dir directory (remembering
# that it may be a hardlink) and replace it with an updated value
target_versionfile = os.path.join(base_dir, versionfile_source)
print("UPDATING %s" % target_versionfile)
os.unlink(target_versionfile)
f = open(target_versionfile, "w")
f.write(SHORT_VERSION_PY % self._versioneer_generated_versions)
f.close()
INIT_PY_SNIPPET = """
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
"""
class cmd_update_files(Command):
description = "modify __init__.py and create _version.py"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
ipy = os.path.join(os.path.dirname(versionfile_source), "__init__.py")
print(" creating %s" % versionfile_source)
f = open(versionfile_source, "w")
f.write(LONG_VERSION_PY % {"DOLLAR": "$",
"TAG_PREFIX": tag_prefix,
"PARENTDIR_PREFIX": parentdir_prefix,
"VERSIONFILE_SOURCE": versionfile_source,
})
f.close()
try:
old = open(ipy, "r").read()
except EnvironmentError:
old = ""
if INIT_PY_SNIPPET not in old:
print(" appending to %s" % ipy)
f = open(ipy, "a")
f.write(INIT_PY_SNIPPET)
f.close()
else:
print(" %s unmodified" % ipy)
do_vcs_install(versionfile_source, ipy)
def get_cmdclass():
return {'version': cmd_version,
'update_files': cmd_update_files,
'build': cmd_build,
'sdist': cmd_sdist,
}
def git2pep440(ver_str):
try:
tag, commits, _ = ver_str.split('-', 2)
return ".post".join([tag, commits])
except ValueError:
return ver_str
def rep_by_pep440(ver):
ver["version"] = git2pep440(ver["version"])
return ver
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Salvatore Orlando, VMware
#
import logging
import mock
from oslo.config import cfg
import webob.exc as webexc
import webtest
from neutron.api import extensions
from neutron.common import exceptions as q_exc
from neutron import context
from neutron.db import api as db_api
from neutron.db import servicetype_db as st_db
from neutron.extensions import servicetype
from neutron import manager
from neutron.plugins.common import constants
from neutron.services import provider_configuration as provconf
from neutron.tests import base
from neutron.tests.unit import dummy_plugin as dp
from neutron.tests.unit import test_api_v2
from neutron.tests.unit import test_db_plugin
from neutron.tests.unit import test_extensions
from neutron.tests.unit import testlib_api
LOG = logging.getLogger(__name__)
DEFAULT_SERVICE_DEFS = [{'service_class': constants.DUMMY,
'plugin': dp.DUMMY_PLUGIN_NAME}]
_uuid = test_api_v2._uuid
_get_path = test_api_v2._get_path
class ServiceTypeManagerTestCase(base.BaseTestCase):
def setUp(self):
super(ServiceTypeManagerTestCase, self).setUp()
st_db.ServiceTypeManager._instance = None
self.manager = st_db.ServiceTypeManager.get_instance()
self.ctx = context.get_admin_context()
def test_service_provider_driver_not_unique(self):
cfg.CONF.set_override('service_provider',
[constants.LOADBALANCER +
':lbaas:driver'],
'service_providers')
prov = {'service_type': constants.LOADBALANCER,
'name': 'name2',
'driver': 'driver',
'default': False}
self.manager._load_conf()
self.assertRaises(
q_exc.Invalid, self.manager.conf.add_provider, prov)
def test_get_service_providers(self):
cfg.CONF.set_override('service_provider',
[constants.LOADBALANCER +
':lbaas:driver_path',
constants.DUMMY + ':dummy:dummy_dr'],
'service_providers')
ctx = context.get_admin_context()
provconf.parse_service_provider_opt()
self.manager._load_conf()
res = self.manager.get_service_providers(ctx)
self.assertEqual(len(res), 2)
res = self.manager.get_service_providers(
ctx,
filters=dict(service_type=[constants.DUMMY])
)
self.assertEqual(len(res), 1)
res = self.manager.get_service_providers(
ctx,
filters=dict(service_type=[constants.LOADBALANCER])
)
self.assertEqual(len(res), 1)
def test_multiple_default_providers_specified_for_service(self):
cfg.CONF.set_override('service_provider',
[constants.LOADBALANCER +
':lbaas1:driver_path:default',
constants.LOADBALANCER +
':lbaas2:driver_path:default'],
'service_providers')
self.assertRaises(q_exc.Invalid, self.manager._load_conf)
def test_get_default_provider(self):
cfg.CONF.set_override('service_provider',
[constants.LOADBALANCER +
':lbaas1:driver_path:default',
constants.DUMMY +
':lbaas2:driver_path2'],
'service_providers')
self.manager._load_conf()
# can pass None as a context
p = self.manager.get_default_service_provider(None,
constants.LOADBALANCER)
self.assertEqual(p, {'service_type': constants.LOADBALANCER,
'name': 'lbaas1',
'driver': 'driver_path',
'default': True})
self.assertRaises(
provconf.DefaultServiceProviderNotFound,
self.manager.get_default_service_provider,
None, constants.DUMMY
)
def test_add_resource_association(self):
cfg.CONF.set_override('service_provider',
[constants.LOADBALANCER +
':lbaas1:driver_path:default',
constants.DUMMY +
':lbaas2:driver_path2'],
'service_providers')
self.manager._load_conf()
ctx = context.get_admin_context()
self.manager.add_resource_association(ctx,
constants.LOADBALANCER,
'lbaas1', '123-123')
self.assertEqual(ctx.session.
query(st_db.ProviderResourceAssociation).count(),
1)
assoc = ctx.session.query(st_db.ProviderResourceAssociation).one()
ctx.session.delete(assoc)
def test_invalid_resource_association(self):
cfg.CONF.set_override('service_provider',
[constants.LOADBALANCER +
':lbaas1:driver_path:default',
constants.DUMMY +
':lbaas2:driver_path2'],
'service_providers')
self.manager._load_conf()
ctx = context.get_admin_context()
self.assertRaises(provconf.ServiceProviderNotFound,
self.manager.add_resource_association,
ctx, 'BLABLA_svc', 'name', '123-123')
class TestServiceTypeExtensionManager(object):
"""Mock extensions manager."""
def get_resources(self):
return (servicetype.Servicetype.get_resources() +
dp.Dummy.get_resources())
def get_actions(self):
return []
def get_request_extensions(self):
return []
class ServiceTypeExtensionTestCaseBase(testlib_api.WebTestCase):
fmt = 'json'
def setUp(self):
# This is needed because otherwise a failure will occur due to
# nonexisting core_plugin
cfg.CONF.set_override('core_plugin', test_db_plugin.DB_PLUGIN_KLASS)
cfg.CONF.set_override('service_plugins',
["%s.%s" % (dp.__name__,
dp.DummyServicePlugin.__name__)])
self.addCleanup(cfg.CONF.reset)
# Make sure at each test a new instance of the plugin is returned
manager.NeutronManager._instance = None
# Ensure existing ExtensionManager is not used
extensions.PluginAwareExtensionManager._instance = None
ext_mgr = TestServiceTypeExtensionManager()
self.ext_mdw = test_extensions.setup_extensions_middleware(ext_mgr)
self.api = webtest.TestApp(self.ext_mdw)
self.resource_name = servicetype.RESOURCE_NAME.replace('-', '_')
super(ServiceTypeExtensionTestCaseBase, self).setUp()
class ServiceTypeExtensionTestCase(ServiceTypeExtensionTestCaseBase):
def setUp(self):
self._patcher = mock.patch(
"neutron.db.servicetype_db.ServiceTypeManager",
autospec=True)
self.addCleanup(self._patcher.stop)
self.mock_mgr = self._patcher.start()
self.mock_mgr.get_instance.return_value = self.mock_mgr.return_value
super(ServiceTypeExtensionTestCase, self).setUp()
def test_service_provider_list(self):
instance = self.mock_mgr.return_value
res = self.api.get(_get_path('service-providers', fmt=self.fmt))
instance.get_service_providers.assert_called_with(mock.ANY,
filters={},
fields=[])
self.assertEqual(res.status_int, webexc.HTTPOk.code)
class ServiceTypeExtensionTestCaseXML(ServiceTypeExtensionTestCase):
fmt = 'xml'
class ServiceTypeManagerExtTestCase(ServiceTypeExtensionTestCaseBase):
"""Tests ServiceTypemanager as a public API."""
def setUp(self):
# Blank out service type manager instance
st_db.ServiceTypeManager._instance = None
cfg.CONF.set_override('service_provider',
[constants.LOADBALANCER +
':lbaas:driver_path',
constants.DUMMY + ':dummy:dummy_dr'],
'service_providers')
self.addCleanup(db_api.clear_db)
super(ServiceTypeManagerExtTestCase, self).setUp()
def _list_service_providers(self):
return self.api.get(_get_path('service-providers', fmt=self.fmt))
def test_list_service_providers(self):
res = self._list_service_providers()
self.assertEqual(res.status_int, webexc.HTTPOk.code)
data = self.deserialize(res)
self.assertTrue('service_providers' in data)
self.assertEqual(len(data['service_providers']), 2)
class ServiceTypeManagerExtTestCaseXML(ServiceTypeManagerExtTestCase):
fmt = 'xml'
| |
import unittest
from contextlib import contextmanager
from StringIO import StringIO
from rulekey_diff2 import *
@contextmanager
def captured_output():
new_out, new_err = StringIO(), StringIO()
old_out, old_err = sys.stdout, sys.stderr
try:
sys.stdout, sys.stderr = new_out, new_err
yield sys.stdout, sys.stderr
finally:
sys.stdout, sys.stderr = old_out, old_err
class MockFile(object):
def __init__(self, lines):
self._index = -1
self._lines = lines
def readline(self):
self._index += 1
return self._lines[self._index]
def readlines(self):
return self._lines
def __iter__(self):
return iter(self._lines)
class TestRuleKeyDiff2(unittest.TestCase):
def test_gather(self):
self.assertEquals(
gather(range(10), lambda x: x % 3),
{0: [0, 3, 6, 9], 1: [1, 4, 7], 2: [2, 5, 8]})
def test_ordinals(self):
self.assertEquals(
ordinals([2, 3, 5, 7, 11]),
{2: 0, 3: 1, 5: 2, 7: 3, 11: 4})
def test_tokenize_rulekey_line(self):
self.assertEquals(
tokenize_rulekey_line('type1(val1a:val1b):type2(val2):type3("abc:def"):'),
['type3("abc:def")', 'type2(val2)', 'type1(val1a:val1b)'])
def test_read_rulekeys_from_diag_file(self):
file1 = MockFile([
'rulekey1 type1(val1a:val1b):type2(val2):',
'rulekey2 type3(val3):type4(val4):type5(val5):',
])
self.assertDictEqual(
read_rulekeys(file1, '', ' '),
{
'rulekey1': ['type2(val2)', 'type1(val1a:val1b)'],
'rulekey2': ['type5(val5)', 'type4(val4)', 'type3(val3)'],
})
def test_read_rulekeys_from_bucklog_file(self):
file1 = MockFile([
'[12:34:56][blah blah ...][com.facebook.buck.rules.keys.RuleKeyBuilder] ' +
'RuleKey rulekey1=type1(val1a:val1b):type2(val2):',
'[12:37:11][blah blah ...][com.facebook.buck.rules.keys.RuleKeyFactory] ' +
'I am a rulekey factory! Blah! Blah! Ignore me!',
'[13:41:08][blah blah ...][com.facebook.buck.rules.keys.RuleKeyBuilder] ' +
'RuleKey rulekey2=type3(val3):type4(val4):type5(val5):',
'[12:37:11][blah blah ...][com.facebook.buck.parser.Parser] ' +
'I am a parser! Blah! Blah! Ignore me!',
])
self.assertDictEqual(
read_rulekeys(file1, '[com.facebook.buck.rules.keys.RuleKeyBuilder] RuleKey ', '='),
{
'rulekey1': ['type2(val2)', 'type1(val1a:val1b)'],
'rulekey2': ['type5(val5)', 'type4(val4)', 'type3(val3)'],
})
def test_token_type(self):
self.assertEquals(token_type('key(field1)'), 'key')
self.assertEquals(token_type('wrapper(OPTIONAL)'), 'wrapper')
self.assertEquals(token_type('container(LIST,len=5)'), 'container')
self.assertEquals(token_type('container(TUPLE,len=2)'), 'container')
self.assertEquals(token_type('number(42)'), 'number')
self.assertEquals(token_type('string("ab(c)")'), 'string')
def test_token_value(self):
self.assertEquals(token_value('key(field1)'), 'field1')
self.assertEquals(token_value('wrapper(OPTIONAL)'), 'OPTIONAL')
self.assertEquals(token_value('container(LIST,len=5)'), 'LIST,len=5')
self.assertEquals(token_value('container(TUPLE,len=2)'), 'TUPLE,len=2')
self.assertEquals(token_value('number(42)'), '42')
self.assertEquals(token_value('string("ab(c)")'), '"ab(c)"')
def test_token_length(self):
self.assertEquals(token_length('key(field1)'), 1)
self.assertEquals(token_length('wrapper(OPTIONAL)'), 1)
self.assertEquals(token_length('container(LIST,len=5)'), 5)
self.assertEquals(token_length('container(TUPLE,len=2)'), 2)
self.assertEquals(token_length('number(42)'), 0)
self.assertEquals(token_length('string("ab(c)")'), 0)
def test_print_rulekey(self):
with captured_output() as (out, err):
print_rulekey([
'key(field1)',
'container(TUPLE,len=2)',
'container(LIST,len=3)',
'string("s1")',
'string("s2")',
'string("s3")',
'wrapper(OPTIONAL)',
'string("s4")',
'key(field2)',
'number(42)',
])
self.assertEquals('\n'.join([
'key(field1)',
' container(TUPLE,len=2)',
' container(LIST,len=3)',
' string("s1")',
' string("s2")',
' string("s3")',
' wrapper(OPTIONAL)',
' string("s4")',
'key(field2)',
' number(42)',
''
]), out.getvalue())
def test_reconstruct_rulekey(self):
s = reconstruct_rulekey([
'key(field1)',
'container(TUPLE,len=2)',
'container(LIST,len=3)',
'string("s1")',
'string("s2")',
'string("s3")',
'wrapper(OPTIONAL)',
'string("s4")',
'key(field2)',
'number(42)',
])
self.assertEquals(s.token, 'root()')
self.assertEquals(len(s), 2)
self.assertEquals(s[0].token, 'key(field1)')
self.assertEquals(len(s[0]), 1)
self.assertEquals(s[0][0].token, 'container(TUPLE,len=2)')
self.assertEquals(len(s[0][0]), 2)
self.assertEquals(s[0][0][0].token, 'container(LIST,len=3)')
self.assertEquals(len(s[0][0][0]), 3)
self.assertEquals(s[0][0][0][0].token, 'string("s1")')
self.assertEquals(len(s[0][0][0][0]), 0)
self.assertEquals(s[0][0][0][1].token, 'string("s2")')
self.assertEquals(len(s[0][0][0][1]), 0)
self.assertEquals(s[0][0][0][2].token, 'string("s3")')
self.assertEquals(len(s[0][0][0][2]), 0)
self.assertEquals(s[0][0][1].token, 'wrapper(OPTIONAL)')
self.assertEquals(len(s[0][0][1]), 1)
self.assertEquals(s[0][0][1][0].token, 'string("s4")')
self.assertEquals(len(s[0][0][1][0]), 0)
self.assertEquals(s[1].token, 'key(field2)')
self.assertEquals(len(s[1]), 1)
self.assertEquals(s[1][0].token, 'number(42)')
self.assertEquals(len(s[1][0]), 0)
@staticmethod
def diff_rulekeys_result(s1, s2):
res = []
def visitor(p1, _s1, p2, _s2): res.append((p1, p2))
diff_rulekeys(s1, s2, visitor)
return res
def test_diff_rulekeys_insert_or_remove_element(self):
s1 = reconstruct_rulekey(
['key(k1)', 'container(LIST,len=2)', 'string("s1")', 'string("s3")'])
s2 = reconstruct_rulekey(
['key(k1)', 'container(LIST,len=3)', 'string("s1")', 'string("s2")', 'string("s3")'])
self.assertEquals(
self.diff_rulekeys_result(s1, s2),
[
# report different length
('/root():0/key(k1):0/container(LIST,len=2)',
'/root():0/key(k1):0/container(LIST,len=3)'),
# report 'None' on the left != 'string("s2")' on the right
('/root():0/key(k1):0/container(LIST,len=2):None',
'/root():0/key(k1):0/container(LIST,len=3):1/string("s2")')
])
def test_diff_rulekeys_change_element_order(self):
s1 = reconstruct_rulekey(
['key(k1)', 'container(LIST,len=3)', 'string("s1")', 'string("s2")', 'string("s3")'])
s2 = reconstruct_rulekey(
['key(k1)', 'container(LIST,len=3)', 'string("s2")', 'string("s3")', 'string("s1")'])
self.assertEquals(
self.diff_rulekeys_result(s1, s2),
[
# report different order
('/root():0/key(k1):0/container(LIST,len=3):order[0, 1, 2]',
'/root():0/key(k1):0/container(LIST,len=3):order[2, 0, 1]'),
])
def test_diff_rulekeys_insert_or_remove_key(self):
s1 = reconstruct_rulekey(
['key(k1)', 'string("s1")', 'key(k3)', 'string("s3")'])
s2 = reconstruct_rulekey(
['key(k1)', 'string("s1")', 'key(k2)', 'string("s2")', 'key(k3)', 'string("s3")'])
self.assertEquals(
self.diff_rulekeys_result(s1, s2),
[
# report 'None' on the left != 'key(k2)' on the right
('/root():None',
'/root():1/key(k2)'),
])
def test_diff_rulekeys_change_key_order(self):
s1 = reconstruct_rulekey(
['key(k1)', 'string("s1")', 'key(k2)', 'string("s2")', 'key(k3)', 'string("s3")'])
s2 = reconstruct_rulekey(
['key(k2)', 'string("s2")', 'key(k3)', 'string("s3")', 'key(k1)', 'string("s1")'])
self.assertEquals(
self.diff_rulekeys_result(s1, s2),
[
# report different order
('/root():order[0, 1, 2]',
'/root():order[2, 0, 1]'),
])
def test_find_children(self):
s = reconstruct_rulekey([
'key(field1)',
'container(TUPLE,len=2)',
'container(LIST,len=3)',
'string("s1")',
'string("s2")',
'string("s3")',
'wrapper(OPTIONAL)',
'string("s4")',
'key(field2)',
'number(42)',
])
self.assertEquals(find_children(s, r'field1', 0), [])
self.assertEquals(find_children(s, r'field1', 1), [s[0]])
self.assertEquals(find_children(s, r'field2', 1), [s[1]])
t = s[0][0] # 'container(TUPLE,len=2)'
self.assertEquals(find_children(s, r'string'), [t[0][0], t[0][1], t[0][2], t[1][0]])
def get_keys1(self):
return {
'rulekey1': [
'key(deps)',
'container(LIST,len=2)',
'string("//fake:ruleB")',
'string("//fake:ruleC")',
'key(.rule_key_type)',
'string("default")',
'key(.target_name)',
'string("//fake:ruleA")',
],
'rulekey2': [
'key(.rule_key_type)',
'string("input")',
'key(.target_name)',
'string("//fake:ruleA")',
],
'rulekey3': [
'key(.rule_key_type)',
'string("default")',
'key(.target_name)',
'string("//fake:ruleB")',
],
}
def test_find_keys(self):
keys = self.get_keys1()
self.assertEquals(
sorted(find_keys(keys, [])),
[])
self.assertEquals(
sorted(find_keys(keys, [(r'fake:ruleA', None)])),
['rulekey1', 'rulekey2'])
self.assertEquals(
sorted(find_keys(
keys,
[(r'fake:ruleA', '.target_name'), (r'default', '.rule_key_type')])),
['rulekey1'])
self.assertEquals(
sorted(find_keys(keys, [(r'fake:ruleB', None)])),
['rulekey1', 'rulekey3'])
self.assertEquals(
sorted(find_keys(keys, [(r'fake:ruleB', '.target_name')])),
['rulekey3'])
def test_extract_target(self):
tokens = [
'key(.rule_key_type)',
'string("default")',
'key(.target_name)',
'string("//fake:ruleB")']
self.assertEquals(extract_target(reconstruct_rulekey(tokens)), '//fake:ruleB')
def test_build_targets_to_rulekeys_index(self):
keys = self.get_keys1()
self.assertDictEqual(
build_targets_to_rulekeys_index(keys),
{
'//fake:ruleA': ['rulekey1', 'rulekey2'],
'//fake:ruleB': ['rulekey3'],
})
if __name__ == '__main__':
unittest.main()
| |
# Copyright 2017-2021 TensorHub, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
import datetime
import inspect
import json
import logging
import os
import re
import six
# IMPORTANT: Keep expensive imports out of this list. This module is
# used by several commands and any latency here will be automatically
# applied to those commands. If the import is used once or twice, move
# it into the applicable function(s). If it's used more than once or
# twice, move the command impl into a separate module (see
# publish_impl for example).
from guild import cli
from guild import cmd_impl_support
from guild import config
from guild import exit_code
from guild import flag_util
from guild import op_util
from guild import remote_run_support
from guild import run as runlib
from guild import run_util
from guild import util
from guild import var
from guild import yaml_util
from . import remote_impl_support
log = logging.getLogger("guild")
RUN_DETAIL = [
"id",
"operation",
"from",
"status",
"started",
"stopped",
"marked",
"label",
"sourcecode_digest",
"vcs_commit",
"run_dir",
"command",
"exit_status",
"pid",
]
ALL_RUNS_ARG = [":"]
LATEST_RUN_ARG = ["1"]
CORE_RUN_ATTRS = [
"cmd",
"comments",
"compare",
"deps",
"env",
"exit_status",
"flags",
"host",
"id",
"initialized",
"label",
"marked",
"objective",
"op",
"pip_freeze",
"platform",
"random_seed",
"run_params",
"sourcecode_digest",
"started",
"stopped",
"tags",
"user",
"user_flags",
"vcs_commit",
]
LEGACY_RUN_ATTRS = [
"resolved_deps",
"opdef",
]
RUNS_PER_GROUP = 20
FILTERABLE = [
("completed", "status_completed"),
("error", "status_error"),
("pending", "status_pending"),
("running", "status_running"),
("staged", "status_staged"),
("terminated", "status_terminated"),
]
if not os.getenv("SHELL"):
# Windows command prompt wants a space buffer to avoid wrapping.
STYLE_TABLE_WIDTH_ADJ = -1
else:
STYLE_TABLE_WIDTH_ADJ = 0
STOP_TIMEOUT = 30
CHILD_TERM_TIMEOUT = 5
def runs_for_args(args, ctx=None):
filtered = filtered_runs(args, ctx)
return select_runs(filtered, args.runs, ctx)
def filtered_runs(args, ctx=None):
if getattr(args, "remote", None):
return remote_impl_support.filtered_runs(args)
else:
return var.runs(
_runs_root_for_args(args),
sort=["-timestamp"],
filter=_runs_filter(args, ctx),
)
def _runs_root_for_args(args):
archive = getattr(args, "archive", None)
deleted = getattr(args, "deleted", False)
if archive and deleted:
cli.error("--archive and --deleted cannot both be used")
if archive:
return archive
else:
return var.runs_dir(deleted=deleted)
def _runs_filter(args, ctx):
filters = []
_apply_status_filter(args, filters)
_apply_ops_filter(args, filters)
_apply_labels_filter(args, filters)
_apply_tags_filter(args, filters)
_apply_comments_filter(args, filters)
_apply_marked_filter(args, filters)
_apply_started_filter(args, ctx, filters)
_apply_sourcecode_digest_filter(args, filters)
return var.run_filter("all", filters)
def _apply_status_filter(args, filters):
status_filters = [
var.run_filter("attr", "status", status)
for status, args_attr in FILTERABLE
if getattr(args, args_attr, False)
]
if status_filters:
filters.append(var.run_filter("any", status_filters))
def _apply_ops_filter(args, filters):
if args.filter_ops:
filters.append(_op_run_filter(args.filter_ops))
def _op_run_filter(op_refs):
def f(run):
opspec = run_util.format_operation(run, nowarn=True)
return any((_compare_op(ref, opspec) for ref in op_refs))
return f
def _compare_op(ref, opspec):
if ref.startswith("^") or ref.endswith("$"):
return _re_match(ref, opspec)
else:
return _opspec_match(ref, opspec)
def _re_match(pattern, target):
try:
return re.search(pattern, target)
except re.error:
return False
def _opspec_match(ref, opspec):
ref_parts = _split_opspec(ref)
opspec_parts = _split_opspec(opspec)
assert len(ref_parts) == 3 and len(opspec_parts) == 3, (ref_parts, opspec_parts)
for ref_part, opspec_part in zip(ref_parts, opspec_parts):
if not _opspec_part_match(ref_part, opspec_part):
return False
return True
def _split_opspec(opspec):
parsed = op_util.parse_opspec(opspec)
if parsed:
model, op = parsed
pkg, model = _split_model_pkg(model)
return pkg, model, op
return None, None, None
def _split_model_pkg(model):
if model:
parts = model.split("/", 1)
if len(parts) == 2:
return parts
return None, model
def _opspec_part_match(ref, part):
if not ref:
return True
if not part:
return False
if "*" in ref:
return _opspec_part_fnmatch(ref, part)
else:
return ref == part
def _opspec_part_fnmatch(ref, part):
from fnmatch import fnmatch
return fnmatch(part, ref)
def _apply_labels_filter(args, filters):
if args.filter_labels and args.filter_unlabeled:
cli.error("--label and --unlabeled cannot both be used")
if args.filter_labels:
filters.append(_labels_filter(args.filter_labels))
elif args.filter_unlabeled:
filters.append(_unlabeled_filter())
def _labels_filter(filter_vals):
def f(run):
run_label = str(run.get("label", "")).strip()
return any((_match_label(s, run_label) for s in filter_vals))
return f
def _match_label(s, run_label):
if s == "-":
return not run_label
return s in run_label
def _unlabeled_filter():
def f(run):
return not run.get("label", "").strip()
return f
def _apply_tags_filter(args, filters):
if args.filter_tags:
filters.append(_tags_filter(args.filter_tags))
def _tags_filter(tags):
def f(run):
run_tags = run.get("tags") or []
return any((t in run_tags for t in tags))
return f
def _apply_comments_filter(args, filters):
if args.filter_comments:
filters.append(_comments_filter(args.filter_comments))
def _comments_filter(filter_vals):
def f(run):
comment_text = _run_comments_text(run)
return any((_match_comments(s, comment_text) for s in filter_vals))
return f
def _run_comments_text(run):
comments = run.get("comments") or []
return "\n".join([_run_comment_filter_text(comment) for comment in comments])
def _run_comment_filter_text(comment):
return "\n".join(
[
(comment.get("user") or "").lower(),
(comment.get("host") or "").lower(),
(comment.get("body") or "").lower(),
]
)
def _match_comments(s, comment_text):
if s == "-":
return not comment_text
return s.lower() in comment_text
def _apply_marked_filter(args, filters):
if args.filter_marked and args.filter_unmarked:
cli.error("--marked and --unmarked cannot both be used")
if args.filter_marked:
filters.append(_marked_filter())
if args.filter_unmarked:
filters.append(_marked_filter(False))
def _marked_filter(test_for=True):
def f(run):
marked = bool(run.get("marked"))
return marked if test_for is True else not marked
return f
def _apply_started_filter(args, ctx, filters):
if args.filter_started:
start, end = _parse_timerange(args.filter_started, ctx)
log.debug("time range filter: %s to %s", start, end)
filters.append(_started_filter(start, end))
def _parse_timerange(spec, ctx):
from guild import timerange
try:
return timerange.parse_spec(spec)
except ValueError as e:
cli.error("invalid RANGE: %s%s" % (e, _range_help_suffix(ctx)))
def _apply_sourcecode_digest_filter(args, filters):
if args.filter_digest:
filters.append(_digest_filter(args.filter_digest))
def _digest_filter(prefix):
def f(run):
return run.get("sourcecode_digest", "").startswith(prefix)
return f
def _range_help_suffix(ctx):
if not ctx:
return ""
return "\nTry '%s --help' for help specifying time ranges." % ctx.command_path
def _started_filter(start, end):
def f(run):
started = run.timestamp
if not started:
log.debug("%s no timestamp, skipping", run.id)
return False
started = datetime.datetime.fromtimestamp(started // 1000000)
if start and started < start:
log.debug("%s timestamp %s < %s, skipping", run.id, started, start)
return False
if end and started >= end:
log.debug("%s timestamp %s >= %s, skipping", run.id, started, start)
return False
log.debug("%s timestamp %s in range", run.id, started)
return True
return f
def select_runs(runs, select_specs, ctx=None):
if not select_specs:
return runs
selected = []
for spec in select_specs:
try:
slice_start, slice_end = _parse_slice(spec)
except ValueError:
selected.append(_find_run_by_id(spec, runs, ctx))
else:
if _in_range(slice_start, slice_end, runs):
selected.extend(runs[slice_start:slice_end])
else:
selected.append(_find_run_by_id(spec, runs, ctx))
return selected
def _parse_slice(spec):
try:
index = int(spec)
except ValueError:
m = re.match("(\\d+)?:(\\d+)?", spec)
if m:
try:
return (_slice_part(m.group(1), decr=True), _slice_part(m.group(2)))
except ValueError:
pass
raise ValueError(spec)
else:
return index - 1, index
def _slice_part(s, decr=False):
if s is None:
return None
elif decr:
return int(s) - 1
else:
return int(s)
def _find_run_by_id(id_part, runs, ctx):
matches = [run for run in runs if run.id.startswith(id_part)]
return cmd_impl_support.one_run(matches, id_part, ctx)
def _in_range(slice_start, slice_end, l):
return (slice_start is None or slice_start >= 0) and (
slice_end is None or slice_end <= len(l)
)
def list_runs(args, ctx=None):
if args.remote:
remote_impl_support.list_runs(args)
else:
_check_list_runs_args(args, ctx)
_list_runs(args, ctx)
def _check_list_runs_args(args, ctx):
cmd_impl_support.check_incompatible_args(
[
("comments", "verbose"),
("comments", "json"),
("json", "verbose"),
("archive", "deleted"),
],
args,
ctx,
)
def _list_runs(args, ctx):
if args.archive and not os.path.exists(args.archive):
cli.error("%s does not exist" % args.archive)
runs = filtered_runs(args, ctx=ctx)
if args.comments:
_list_runs_comments(_limit_runs(runs, args), comment_index_format=False)
elif args.json:
if args.limit or args.more or args.all:
cli.note("--json option always shows all runs")
_list_runs_json(runs)
else:
_list_runs_(_limit_runs(runs, args), args)
def _list_runs_json(runs):
runs_data = [_listed_run_json_data(run) for run in runs]
cli.out(json.dumps(runs_data))
def _listed_run_json_data(run):
run_data = _run_data(
run,
(
"exit_status",
"cmd",
"comments",
"marked",
"label",
"started",
"status",
"stopped",
"tags",
),
)
_apply_batch_proto(run, run_data)
return run_data
def _run_data(run, attrs):
data = {
"id": run.id,
"run_dir": run.path,
"opref": str(run.opref) if run.opref else "",
}
data.update({name: _run_attr(run, name) for name in attrs})
return data
def _run_attr(run, name):
base_attrs = ("status",)
if name in base_attrs:
return getattr(run, name)
else:
return run.get(name)
def _apply_batch_proto(run, data):
proto_dir = run.guild_path("proto")
if os.path.exists(proto_dir):
proto = runlib.for_dir(proto_dir)
data["batch_proto"] = _listed_run_json_data(proto)
def _list_runs_(runs, args):
formatted = format_runs(_limit_runs(runs, args))
cols = [
"index",
"op_desc",
"started",
"status_with_remote",
"label",
]
detail = RUN_DETAIL if args.verbose else None
cli.table(formatted, cols=cols, detail=detail, max_width_adj=STYLE_TABLE_WIDTH_ADJ)
def _limit_runs(runs, args):
if args.all:
if args.limit is not None:
cli.error("--all and --limit cannot both be used")
return runs
if args.limit and args.limit > 0:
return runs[: args.limit]
limited = runs[: (args.more + 1) * RUNS_PER_GROUP]
if len(limited) < len(runs):
cli.note(
"Showing the first %i runs (%i total) - use --all "
"to show all or -m to show more" % (len(limited), len(runs))
)
return limited
def format_runs(runs):
formatted = []
for i, run in enumerate(runs):
try:
formatted_run = run_util.format_run(run, i + 1)
except Exception:
log.exception("formatting run in %s", run.path)
else:
formatted.append(formatted_run)
_apply_op_desc(formatted)
return formatted
def _apply_op_desc(formatted):
for fmt_run in formatted:
op_desc = _op_desc_base(fmt_run)
marked_suffix = " [marked]" if fmt_run["marked"] == "yes" else ""
fmt_run["op_desc"] = op_desc + marked_suffix
def _op_desc_base(fmt_run, apply_style=True):
op = fmt_run["operation"]
op_dir = _run_op_dir(fmt_run["_run"])
if not op_dir:
return _empty_style(op, apply_style)
return "%s%s" % (op, _styled_op_dir_suffix(op_dir, apply_style))
def _run_op_dir(run):
run = run.batch_proto or run
opref = run.opref
if opref.pkg_type == "guildfile":
return os.path.dirname(opref.pkg_name)
elif opref.pkg_type == "script":
return opref.pkg_name
else:
return None
def _empty_style(s, apply_style):
# Pad a string with an empty style for alignment in tables.
if apply_style:
return s + cli.style("", dim=True)
return s
def _styled_op_dir_suffix(op_dir, apply_style):
cwd = os.path.abspath(config.cwd())
if util.compare_paths(op_dir, cwd):
return _empty_style("", apply_style)
shortened_op_dir = run_util.shorten_op_dir(op_dir, cwd)
return _dim_style(" (%s)" % shortened_op_dir, apply_style)
def _dim_style(s, apply_style):
if apply_style:
return cli.style(s, dim=True)
return s
def format_run(run):
formatted = format_runs([run])
if not formatted:
raise ValueError("error formatting %s" % run)
assert len(formatted) == 1, formatted
return formatted[0]
def _no_selected_runs_exit(help_msg=None):
help_msg = (
help_msg or "No matching runs\n" "Try 'guild runs list' to list available runs."
)
cli.out(help_msg, err=True)
raise SystemExit(0)
def runs_op(
args,
ctx,
preview_msg,
confirm_prompt,
no_runs_help,
op_callback,
default_runs_arg=None,
confirm_default=False,
runs_callback=None,
):
get_selected = runs_callback or runs_op_selected
selected = get_selected(args, ctx, default_runs_arg)
if not selected:
_no_selected_runs_exit(no_runs_help)
formatted = None # expensive, lazily init as needed
if not args.yes:
formatted = formatted = format_runs(selected)
cli.out(preview_msg, err=True)
cols = [
"short_index",
"op_desc",
"started",
"status_with_remote",
"label",
]
cli.table(formatted, cols=cols, indent=2, err=True)
fmt_confirm_prompt = confirm_prompt.format(count=len(selected))
if not args.yes and not cli.confirm(fmt_confirm_prompt, confirm_default):
raise SystemExit(exit_code.ABORTED)
# pylint: disable=deprecated-method
if len(inspect.getargspec(op_callback).args) == 2:
formatted = formatted = format_runs(selected)
op_callback(selected, formatted)
else:
op_callback(selected)
def runs_op_selected(args, ctx, default_runs_arg=None):
default_runs_arg = default_runs_arg or ALL_RUNS_ARG
runs_arg = _remove_duplicates(args.runs or default_runs_arg)
filtered = filtered_runs(args, ctx)
return select_runs(filtered, runs_arg, ctx)
def _remove_duplicates(vals):
deduped = []
for val in vals:
if val not in deduped:
deduped.append(val)
return deduped
def delete_runs(args, ctx=None):
if args.remote:
remote_impl_support.delete_runs(args)
else:
_delete_runs(args, ctx)
def _delete_runs(args, ctx):
if args.permanent:
preview = cmd_impl_support.format_warn(
"WARNING: You are about to permanently delete the following runs:"
)
confirm = "Permanently delete {count} run(s)?"
else:
preview = "You are about to delete the following runs:"
confirm = "Delete {count} run(s)?"
no_runs_help = "Nothing to delete."
def delete(selected):
stoppable = [
run for run in selected if run.status == "running" and not run.remote
]
if stoppable and not args.yes:
cli.out(
cmd_impl_support.format_warn(
"WARNING: One or more runs are still running "
"and will be stopped before being deleted."
),
err=True,
)
if not cli.confirm("Really delete these runs?"):
raise SystemExit(exit_code.ABORTED)
for run in stoppable:
_stop_run(run, no_wait=True)
var.delete_runs(selected, args.permanent)
if args.permanent:
cli.out("Permanently deleted %i run(s)" % len(selected), err=True)
else:
cli.out("Deleted %i run(s)" % len(selected), err=True)
runs_op(
args,
ctx,
preview,
confirm,
no_runs_help,
delete,
confirm_default=not args.permanent,
)
def purge_runs(args, ctx):
if args.remote:
remote_impl_support.purge_runs(args)
else:
_purge_runs(args, ctx)
def _purge_runs(args, ctx):
preview = cmd_impl_support.format_warn(
"WARNING: You are about to permanently delete the following runs:"
)
confirm = "Permanently delete {count} run(s)?"
no_runs_help = "Nothing to purge."
def purge(selected):
var.purge_runs(selected)
cli.out("Permanently deleted %i run(s)" % len(selected), err=True)
runs_op(args.copy(deleted=True), ctx, preview, confirm, no_runs_help, purge)
def restore_runs(args, ctx):
if args.remote:
remote_impl_support.restore_runs(args)
else:
_restore_runs(args, ctx)
def _restore_runs(args, ctx):
preview = "You are about to restore the following runs:"
confirm = "Restore {count} run(s)?"
no_runs_help = "Nothing to restore."
def restore(selected):
var.restore_runs(selected)
cli.out("Restored %i run(s)" % len(selected), err=True)
runs_op(
args.copy(deleted=True),
ctx,
preview,
confirm,
no_runs_help,
restore,
confirm_default=True,
)
def run_info(args, ctx):
if args.remote:
remote_impl_support.run_info(args)
else:
_run_info(args, ctx)
def _run_info(args, ctx):
run = one_run(args, ctx)
_print_run_info(run, args)
def one_run(args, ctx):
filtered = filtered_runs(args, ctx=ctx)
if not filtered:
cli.error("no matching runs")
runspec = args.run or "1"
selected = select_runs(filtered, [runspec], ctx)
return cmd_impl_support.one_run(selected, runspec, ctx)
def _print_run_info(run, args):
data = _run_info_data(run, args)
if args.json:
_print_run_info_json(data)
else:
_print_run_info_ordered(data)
def _run_info_data(run, args):
data = []
_append_attr_data(run, args.private_attrs, data)
data.append(("tags", run.get("tags") or []))
data.append(("flags", run.get("flags") or {}))
proto = run.batch_proto
if proto:
data.append(("proto-flags", proto.get("flags") or {}))
data.append(("scalars", _scalar_info(run, args)))
if args.comments:
data.append(("comments", _format_comments_for_run_info(run)))
if args.env:
data.append(("environment", run.get("env") or {}))
if args.deps:
data.append(("dependencies", run.get("deps") or {}))
if args.private_attrs and args.json:
_maybe_append_proto_data(run, data)
return data
def _format_comments_for_run_info(run):
return [
_format_comment_for_run_info(comment) for comment in (run.get("comments") or [])
]
def _format_comment_for_run_info(comment):
if not isinstance(comment, dict):
return repr(comment)
return {
"user": comment.get("user") or "",
"host": comment.get("host") or "",
"time": util.format_timestamp(comment.get("time")),
"body": (comment.get("body") or "").strip(),
}
def _append_attr_data(run, include_private, data):
fmt_run = format_run(run)
for name in RUN_DETAIL:
data.append((name, fmt_run[name]))
for name in other_attr_names(run, include_private):
data.append((name, run_util.format_attr(run.get(name))))
if include_private:
data.append(("opref", str(run.opref)))
data.append(("op", run.get("op")))
def other_attr_names(run, include_private=False):
core_attrs = CORE_RUN_ATTRS + LEGACY_RUN_ATTRS
if include_private:
include = lambda x: x not in core_attrs
else:
include = lambda x: x[0] != "_" and x not in core_attrs
return [name for name in sorted(run.attr_names()) if include(name)]
def _scalar_info(run, args):
try:
return _scalar_info_(run, args)
except Exception as e:
if log.getEffectiveLevel() <= logging.DEBUG:
log.exception("get scalars")
return cmd_impl_support.format_warn("ERROR: %s" % e)
def _scalar_info_(run, args):
return {
key: val
for key, val in _iter_scalars(run, args)
if args.all_scalars or filter_default_scalar(key)
}
def filter_default_scalar(key):
_prefix, tag = _split_scalar_key(key)
return not tag.startswith("sys/")
def _split_scalar_key(key):
parts = key.split("#", 1)
return ("", parts[0]) if len(parts) == 1 else (parts[0], parts[1])
def _iter_scalars(run, args):
from guild import index as indexlib # expensive
for s in indexlib.iter_run_scalars(run):
key = run_util.run_scalar_key(s)
if args.all_scalars:
yield key, _scalar_vals(s, args)
else:
yield key, _scalar_last_val(s, args)
def _scalar_vals(s, args):
return {
"first": _scalar_val(s, "first_val", "first_step", args.json),
"last": _scalar_val(s, "last_val", "last_step", args.json),
"min": _scalar_val(s, "min_val", "min_step", args.json),
"max": _scalar_val(s, "max_val", "min_step", args.json),
"avg": _scalar_val(s, "avg_val", "count", args.json),
"total": _scalar_val(s, "total", "count", args.json),
}
def _scalar_last_val(s, args):
return _scalar_val(s, "last_val", "last_step", args.json)
def _scalar_val(s, val_key, step_key, format_json):
val = s[val_key]
step = s[step_key]
if format_json:
return val, step
else:
return _format_scalar_val(val, step)
def _format_scalar_val(val, step):
if isinstance(val, float):
return "%f (step %i)" % (val, step)
# Defensive here - val should None but we don't assert because
# this is a summary op.
val = "nan" if val is None else val
return "%s (step %i)" % (val, step)
def _comments_info(run, args):
return [
_format_comment_info(comment, args) for comment in run.get("comments") or []
]
def _format_comment_info(comment, args):
if args.json:
return comment
return "%s %s\n%s" % (
_format_comment_user(comment),
util.format_timestamp(comment.get("time")),
comment.get("body") or "",
)
def _res_sources_paths(sources):
paths = []
for source_paths in sources.values():
paths.extend(source_paths)
return sorted(paths)
def _maybe_append_proto_data(run, data):
proto = run.batch_proto
if proto:
proto_data = []
_append_attr_data(proto, True, proto_data)
data.append(("proto-run", proto_data))
def _print_run_info_json(data):
data = _tuple_lists_to_dict(data)
cli.out(json.dumps(data))
def _tuple_lists_to_dict(data):
if isinstance(data, list):
if data and isinstance(data[0], tuple):
return {name: _tuple_lists_to_dict(val) for name, val in data}
else:
return [_tuple_lists_to_dict(val) for val in data]
else:
return data
def _print_run_info_ordered(data):
for name, val in data:
if isinstance(val, list):
_print_run_info_list(name, val)
elif isinstance(val, dict):
_print_run_info_dict(name, val)
else:
cli.out("%s: %s" % (name, val))
def _print_run_info_list(name, val):
cli.out("%s:" % name)
for item in val:
if isinstance(item, dict):
cli.out(" -")
for item_name, item_val in sorted(item.items()):
encoded = _fix_quoted_string(flag_util.encode_flag_val(item_val))
if "\n" in encoded:
cli.out(_indent("%s: |" % item_name, 4))
cli.out(_indent(_unindent(encoded), 6))
else:
cli.out(_indent("%s: %s" % (item_name, encoded), 4))
else:
cli.out(" - %s" % flag_util.encode_flag_val(item))
def _print_run_info_dict(name, val):
cli.out("%s:" % name)
for item_name, item_val in _sort_run_info_attr(name, val):
if isinstance(item_val, list):
cli.out(" %s:" % item_name)
for item_item in item_val:
cli.out(" - %s" % flag_util.encode_flag_val(item_item))
elif isinstance(item_val, dict):
cli.out(" %s:" % item_name)
# Use full YAML formatting for config blocks.
cli.out(_indent(yaml_util.encode_yaml(item_val), 4))
else:
cli.out(" %s: %s" % (item_name, flag_util.encode_flag_val(item_val)))
def _sort_run_info_attr(name, val):
if name == "scalars":
return _sort_run_info_scalars(val)
else:
return sorted(val.items())
def _sort_run_info_scalars(val):
key = lambda item: _split_scalar_key(item[0])
return sorted(val.items(), key=key)
def _indent(s, spaces):
prefix = " " * spaces
return "\n".join(["%s%s" % (prefix, line) for line in s.split("\n")])
def _fix_quoted_string(s):
if s.startswith("'") and s.endswith("'"):
return s[1:-1]
return s
def _unindent(s):
return "\n".join([line.strip() for line in s.split("\n")])
def label(args, ctx):
_check_label_args(args, ctx)
if args.remote:
remote_impl_support.label_runs(args)
else:
_set_labels(args, ctx)
def _check_label_args(args, ctx):
cmd_impl_support.check_required_args(
[
"set",
"append",
"prepend",
"remove",
"clear",
],
args,
ctx,
)
cmd_impl_support.check_incompatible_args(
[
("set", "append"),
("set", "prepend"),
("set", "remove"),
("set", "clear"),
("append", "prepend"),
("append", "clear"),
("append", "remove"),
("prepend", "clear"),
("prepend", "remove"),
],
args,
ctx,
)
def _set_labels(args, ctx):
preview = _set_labels_preview(args)
confirm = "Continue?"
no_runs = "No runs to modify."
def set_labels(selected):
for run in selected:
if args.clear:
run.del_attr("label")
else:
run.write_attr("label", _label_for_run(run, args).strip())
if args.clear:
cli.out("Cleared label for %i run(s)" % len(selected), err=True)
else:
cli.out("Labeled %i run(s)" % len(selected), err=True)
runs_op(args, ctx, preview, confirm, no_runs, set_labels, LATEST_RUN_ARG, True)
def _set_labels_preview(args):
if args.set:
return "You are about to label the following runs with '%s':" % args.set
elif args.prepend:
return (
"You are about to prepend '%s' to the label of the following runs:"
% args.prepend
)
elif args.append:
return (
"You are about to append '%s' to the label of the following runs:"
% args.append
)
elif args.remove:
return (
"You are about to remove '%s' from the label of the following runs:"
% args.remove
)
elif args.clear:
return "You are about to clear the label of the following runs:"
else:
assert False, args
def _label_for_run(run, args):
if args.set:
return format_run_label(args.set, run)
elif args.prepend:
return "%s %s" % (format_run_label(args.prepend, run), _run_label(run))
elif args.append:
return "%s %s" % (_run_label(run), format_run_label(args.append, run))
elif args.remove:
return _remove_label_parts(args.remove, _run_label(run))
def format_run_label(template, run):
fmt_params = run.get("flags") or {}
fmt_params["label"] = _run_label(run)
return op_util.run_label(template, fmt_params).strip()
def _run_label(run):
return run.get("label") or ""
def _remove_label_parts(parts, label):
for part in parts:
label = _remove_label_part(part, label)
return label
def _remove_label_part(part, label):
try:
split_parts = re.split(r"(^|\s)%s($|\s)" % part, label)
except Exception as e:
cli.error("cannot remove label part %r: %s" % e)
else:
return " ".join([s for s in [t.strip() for t in split_parts] if s])
def stop_runs(args, ctx=None):
if args.remote:
remote_impl_support.stop_runs(args)
else:
_stop_runs(args, ctx)
def _stop_runs(args, ctx):
preview = cmd_impl_support.format_warn("You are about to stop the following runs:")
confirm = "Stop {count} run(s)?"
no_runs_help = "Nothing to stop."
if not args.runs:
args.status_running = True
def stop_f(selected):
for run in selected:
_stop_run(run, args.no_wait)
def select_runs_f(args, ctx, default_runs_arg):
runs = runs_op_selected(args, ctx, default_runs_arg)
return [run for run in runs if not run.remote]
runs_op(
args,
ctx,
preview,
confirm,
no_runs_help,
stop_f,
None,
False,
select_runs_f,
)
def _stop_run(run, no_wait):
remote_lock = remote_run_support.lock_for_run(run)
if remote_lock:
_try_stop_remote_run(run, remote_lock, no_wait)
else:
_try_stop_local_run(run)
def _try_stop_remote_run(run, remote_lock, no_wait):
from guild import plugin as pluginlib # expensive
try:
plugin = pluginlib.for_name(remote_lock.plugin_name)
except LookupError:
log.warning(
"error syncing run '%s': plugin '%s' not available",
run.id,
remote_lock.plugin_name,
)
else:
cli.out("Stopping %s (remote)" % run.id, err=True)
plugin.stop_run(run, dict(no_wait=no_wait))
def _try_stop_local_run(run):
pid = run.pid
if pid and util.pid_exists(pid):
cli.out("Stopping %s (pid %i)" % (run.id, run.pid), err=True)
_gone, alive = util.kill_process_tree(
pid, timeout=STOP_TIMEOUT, child_term_timeout=CHILD_TERM_TIMEOUT
)
if alive:
_handle_non_stopped_pids(alive)
def _handle_non_stopped_pids(alive):
alive_desc = ", ".join(alive)
cli.out("The following processes did not stop as expected: %s" % alive_desc)
cli.error()
def export(args, ctx):
preview = "You are about to %s the following runs to '%s':" % (
args.move and "move" or "copy",
args.location,
)
confirm = "Continue?"
no_runs = "No runs to export."
def export_f(selected):
if args.copy_resources and not args.yes:
cli.out(
cmd_impl_support.format_warn(
"WARNING: You specified --copy-resources, which will "
"copy resources used by each run."
),
err=True,
)
if not cli.confirm("Really copy resources exported runs?"):
raise SystemExit(exit_code.ABORTED)
try:
exported = run_util.export_runs(
selected,
args.location,
move=args.move,
copy_resources=args.copy_resources,
)
except run_util.RunsExportError as e:
cli.error(e.args[0])
else:
cli.out(
"Exported %i run(s) to %s" % (len(exported), args.location), err=True
)
runs_op(args, ctx, preview, confirm, no_runs, export_f, ALL_RUNS_ARG, True)
def import_(args, ctx):
if not os.path.exists(args.archive):
cli.error("archive '%s' does not exist" % args.archive)
if _is_zip_archive(args.archive):
if args.move:
cli.error("'--move' cannot be used with zip archives")
elif os.path.isfile(args.archive):
cli.error(
"invalid archive %s - expected a directory or a zip file" % args.archive
)
preview = "You are about to import (%s) the following runs from '%s':" % (
args.move and "move" or "copy",
args.archive,
)
confirm = "Continue?"
no_runs = "No runs to import."
def import_f(selected):
if args.copy_resources and not args.yes:
cli.out(
cmd_impl_support.format_warn(
"WARNING: You specified --copy-resources, which will "
"copy resources used by each run."
),
err=True,
)
if not cli.confirm("Really copy resources exported runs?"):
raise SystemExit(exit_code.ABORTED)
try:
imported = run_util.import_runs(
selected,
move=args.move,
copy_resources=args.copy_resources,
)
except run_util.RunsImportError as e:
cli.error(e.args[0])
cli.out("Imported %i run(s) from %s" % (len(imported), args.archive), err=True)
runs_op(args, ctx, preview, confirm, no_runs, import_f, ALL_RUNS_ARG, True)
def _is_zip_archive(path):
return path.lower().endswith(".zip")
def push(args, ctx):
preview = "You are about to copy (push%s) the following runs to %s:" % (
_delete_clause(args),
args.remote,
)
confirm = "Continue?"
no_runs = "No runs to copy."
def push_f(runs):
remote_impl_support.push_runs(runs, args)
runs_op(
args.copy(remote=None),
ctx,
preview,
confirm,
no_runs,
push_f,
ALL_RUNS_ARG,
True,
)
def _delete_clause(args):
if args.delete:
return " with delete"
else:
return ""
def pull(args, ctx):
preview = "You are about to copy (pull%s) the following runs from %s:" % (
_delete_clause(args),
args.remote,
)
confirm = "Continue?"
no_runs = "No runs to copy."
def pull_f(runs):
remote_impl_support.pull_runs(runs, args)
def filtered_runs_f(args, _ctx, _default_runs_arg):
filtered = remote_impl_support.filtered_runs(args)
return select_runs(filtered, args.runs, ctx)
runs_op(
args,
ctx,
preview,
confirm,
no_runs,
pull_f,
ALL_RUNS_ARG,
True,
filtered_runs_f,
)
def mark(args, ctx=None):
if args.clear:
_clear_marked(args, ctx)
else:
_mark(args, ctx)
def _clear_marked(args, ctx):
preview = "You are about to unmark the following runs:"
confirm = "Continue?"
no_runs = "No runs to unmark."
def clear(selected):
for run in selected:
run.del_attr("marked")
cli.out("Unmarked %i run(s)" % len(selected), err=True)
if not args.runs:
args.filter_marked = True
runs_op(args, ctx, preview, confirm, no_runs, clear, ALL_RUNS_ARG, True)
def _mark(args, ctx):
preview = "You are about to mark the following runs:"
confirm = "Continue?"
no_runs = "No runs to mark."
def mark(selected):
for run in selected:
run.write_attr("marked", True)
cli.out("Marked %i run(s)" % len(selected), err=True)
if not args.runs:
args.filter_marked = True
runs_op(args, ctx, preview, confirm, no_runs, mark, LATEST_RUN_ARG, True)
def select(args, ctx):
_check_select_args(args, ctx)
_maybe_apply_select_all(args)
if args.all:
_print_all_selected_runs(args, ctx)
else:
_print_latest_selected_run(args, ctx)
def _check_select_args(args, ctx):
cmd_impl_support.check_incompatible_args(
[
("short_id", "attr"),
("min", "max"),
],
args,
ctx,
)
def _maybe_apply_select_all(args):
if len(args.runs) > 1 and not args.min and not args.max:
args.all = True
elif args.min or args.max:
args.all = False
def _print_all_selected_runs(args, ctx):
for run in _select_runs(args, ctx):
_print_select_info(run, args)
def _print_latest_selected_run(args, ctx):
run = select_run(args, ctx)
_print_select_info(run, args)
def select_run(args, ctx=None):
_check_select_run_args(args, ctx)
if args.min:
return _select_min_run(args, ctx, args.min)
elif args.max:
return _select_min_run(args, ctx, args.max, reverse=True)
else:
args.run = args.runs[0] if args.runs else None
return one_run(args, ctx)
def _check_select_run_args(args, ctx):
cmd_impl_support.check_incompatible_args([("min", "max")], args, ctx)
def _select_min_run(args, ctx, colspec, reverse=False):
runs = _select_runs(args, ctx)
assert runs # _select_runs exits early if nothing matches.
return _sort_selected_runs(runs, colspec, reverse)[0]
def _select_runs(args, ctx):
return runs_for_args(args, ctx=ctx)
def _sort_selected_runs(runs, colspec, reverse):
from guild import index as indexlib # expensive
colspec_val_for_run = _colspec_val_f(colspec)
index = indexlib.RunIndex()
index.refresh(runs, ["scalar", "flag", "attr"])
def key(run):
val = colspec_val_for_run(run, index)
log.debug("got %r for '%s' for run %s", val, colspec, run.id)
return val
return util.natsorted(runs, key=key, reverse=reverse)
def _colspec_val_f(colspec):
from guild import query
try:
cols = query.parse_colspec(colspec).cols
except query.ParseError as e:
cli.error("invalid col spec '%s': %s" % (colspec, e))
else:
assert cols, colspec
if len(cols) > 1:
cli.error("invalid col spec '%s': multiple cols not supported" % colspec)
col = cols[0]
if isinstance(col, query.Scalar):
return _scalar_val_f(col)
elif isinstance(col, query.Flag):
return _flag_val_f(col)
elif isinstance(col, query.Attr):
return _attr_val_f(col)
def _scalar_val_f(col):
if col.named_as:
log.warning("ignoring 'as %s' in scalar", col.named_as)
prefix, tag = col.split_key()
def f(run, index):
return index.run_scalar(run, prefix, tag, col.qualifier, col.step)
return f
def _flag_val_f(col):
def f(run, index):
return index.run_flag(run, col.name)
return f
def _attr_val_f(col):
def f(run, index):
return index.run_attr(run, col.name)
return f
def _print_select_info(run, args):
if args.attr:
_print_run_attr(run, args.attr)
elif args.short_id:
print(run.short_id)
elif args.path:
print(run.dir)
else:
print(run.id)
def _print_run_attr(run, attr_name):
util.try_apply(
[
lambda: _try_print_formatted_run_attr(run, attr_name),
lambda: _try_print_raw_run_attr(run, attr_name),
lambda: _no_such_run_attr_error(attr_name),
]
)
def _try_print_formatted_run_attr(run, attr_name):
formatted = run_util.format_run(run)
try:
val = formatted[attr_name]
except KeyError:
raise util.TryFailed()
else:
print(val)
def _try_print_raw_run_attr(run, attr_name):
try:
val = run[attr_name]
except KeyError:
raise util.TryFailed()
else:
print(yaml_util.encode_yaml(val))
def _no_such_run_attr_error(attr_name):
cli.error("no such run attribute '%s'" % attr_name)
def tag(args, ctx):
_check_tag_args(args, ctx)
if args.remote:
remote_impl_support.tag_runs(args)
else:
_set_tags(args, ctx)
def _check_tag_args(args, ctx):
cmd_impl_support.check_required_args(
[
"add",
"delete",
"clear",
],
args,
ctx,
)
def _set_tags(args, ctx):
preview = _set_tags_preview(args)
confirm = "Continue?"
no_runs = "No runs to modify."
def set_tags(selected):
for run in selected:
old_tags = run.get("tags")
run.write_attr("tags", _tags_for_run(old_tags, args))
if args.sync_labels:
run.write_attr("label", _synced_label_for_tags(run, old_tags, args))
cli.out("Modified tags for %i run(s)" % len(selected), err=True)
runs_op(args, ctx, preview, confirm, no_runs, set_tags, LATEST_RUN_ARG, True)
def _set_tags_preview(args):
lines = ["You are about to modify tags for the following runs:"]
if args.sync_labels:
lines.append(
cmd_impl_support.format_warn(
"Labels are updated to reflect the latest tags."
)
)
else:
lines.append(
cmd_impl_support.format_warn(
"Labels are not updated - use --sync-labels to "
"apply changes run labels."
)
)
return "\n".join(lines)
def _tags_for_run(old_tags, args):
tags = set(old_tags or [])
tags.difference_update(old_tags if args.clear else args.delete)
tags.update(args.add)
return sorted(tags)
def _synced_label_for_tags(run, old_tags, args):
tags_to_delete = set(old_tags if args.clear else args.delete)
old_label = run.get("label") or ""
new_label = _remove_label_parts(tags_to_delete, old_label)
tags_to_prepend = _tags_not_in_label(args.add, old_label)
if tags_to_prepend:
new_label = "%s %s" % (" ".join(tags_to_prepend), new_label)
return new_label
def _tags_not_in_label(tags, label):
if not tags:
return []
label_parts = util.shlex_split(label)
return [tag for tag in tags if tag not in label_parts]
def comment(args, ctx):
if args.remote:
_check_comment_args_for_remote(args, ctx)
remote_impl_support.comment_runs(args)
else:
_check_comment_args(args, ctx)
_comment(args, ctx)
def _check_comment_args_for_remote(args, ctx):
_check_comment_args(args, ctx)
cmd_impl_support.check_incompatible_args(
[
("remote", "edit"),
],
args,
ctx,
)
cmd_impl_support.check_required_args(
[
"list",
"add",
"delete",
"clear",
],
args,
ctx,
msg_template="--remote option required on of: %s",
)
def _check_comment_args(args, ctx):
cmd_impl_support.check_incompatible_args(
[
("list", "add"),
("list", "delete"),
("list", "clear"),
("add", "delete"),
("add", "clear"),
("edit", "delete"),
("edit", "clear"),
("delete", "clear"),
],
args,
ctx,
)
def _comment(args, ctx):
if args.list:
_list_comments(args, ctx)
elif args.delete:
_delete_comment(args.delete, args, ctx)
elif args.clear:
_clear_comments(args, ctx)
else:
_add_comment(args, ctx)
def _list_comments(args, ctx):
_list_runs_comments(runs_op_selected(args, ctx, LATEST_RUN_ARG))
def _list_runs_comments(runs, comment_index_format=True):
formatted_runs = format_runs(runs)
cols = [
_col1_for_comments_header(comment_index_format),
"op_desc",
"started",
"status_with_remote",
"label",
]
cli.table(
formatted_runs,
cols,
detail=["_run"],
detail_cb=_run_comments_detail_cb(comment_index_format),
max_width_adj=STYLE_TABLE_WIDTH_ADJ,
fg=_fg_for_comments_header(comment_index_format),
)
def _col1_for_comments_header(comment_index_format):
if comment_index_format:
return "short_id"
else:
return "index"
def _fg_for_comments_header(comment_index_format):
if comment_index_format:
return "yellow"
else:
return None
def _run_comments_detail_cb(comment_index_format):
def f(formatted_run):
run = formatted_run["_run"]
comments = run.get("comments")
if comments:
index = 1
for comment in comments:
_print_comment(index, comment, comment_index_format)
index += 1
else:
_print_no_comments(comment_index_format)
return f
def _print_comment(index, comment, comment_index_format):
from guild import help
out = help.ConsoleFormatter()
out.write_text(_format_comment_header(index, comment, comment_index_format))
out.write_paragraph()
if comment_index_format:
out.indent()
else:
out.indent()
out.indent()
out.write_text(_format_comment_body(comment))
cli.out("".join(out.buffer))
def _format_comment_header(index, comment, comment_index_format):
user = _format_comment_user(comment)
time = _format_comment_time(comment)
if comment_index_format:
return "[%i] %s %s" % (index, user, time)
else:
return " %s %s" % (user, time)
def _format_comment_user(comment):
user = comment.get("user") or ""
host = comment.get("host") or ""
if not host:
return user
return "%s@%s" % (user, host)
def _format_comment_time(comment):
time_attr = comment.get("time")
try:
return util.format_timestamp(time_attr)
except (ValueError, TypeError):
return str(time_attr)
def _format_comment_body(comment):
return comment.get("body") or ""
def _print_no_comments(comment_index_format):
if comment_index_format:
cli.out(" <no comments>")
def _delete_comment(comment_index, args, ctx):
preview = (
"You are about to delete comment %i from the following runs:" % comment_index
)
confirm = "Continue?"
no_runs = "No runs to modify."
def delete_comments(selected):
for run in selected:
new_comments = _delete_run_comment(run, comment_index)
run.write_attr("comments", new_comments)
cli.out("Deleted comment for %i run(s)" % len(selected), err=True)
runs_op(
args,
ctx,
preview,
confirm,
no_runs,
delete_comments,
LATEST_RUN_ARG,
True,
)
def _delete_run_comment(run, comment_index):
comments = run.get("comments")
try:
del comments[comment_index - 1]
except IndexError:
pass
return comments
def _clear_comments(args, ctx):
preview = cmd_impl_support.format_warn(
"WARNING: You are about to delete ALL comments from the following runs:"
)
confirm = "Continue?"
no_runs = "No runs to modify."
def clear_comments(selected):
for run in selected:
run.del_attr("comments")
cli.out("Deleted all comments for %i run(s)" % len(selected), err=True)
runs_op(
args,
ctx,
preview,
confirm,
no_runs,
clear_comments,
LATEST_RUN_ARG,
)
def _add_comment(args, ctx):
runs = runs_op_selected(args, ctx, LATEST_RUN_ARG)
comment, edited = _comment_for_args(args, runs)
if not comment:
cli.out("Aborting due to an empty comment.", err=True)
cli.error()
def add_comment(selected):
for run in selected:
new_comments = _add_run_comment(run, comment, args.user)
run.write_attr("comments", new_comments)
cli.out("Added comment to %i run(s)" % len(selected), err=True)
if edited:
# Skip prompt below because the editor serves as a prompt.
add_comment(runs)
return
preview = "You are about to add a comment to the following runs:"
confirm = "Continue?"
no_runs = "No runs to modify."
runs_op(
args,
ctx,
preview,
confirm,
no_runs,
add_comment,
LATEST_RUN_ARG,
True,
lambda *_args: runs,
)
def _comment_for_args(args, runs):
comment = args.add
edited = False
if not comment or args.edit:
comment = _get_comment_with_editor(comment, runs)
edited = True
return comment.strip(), edited
def _get_comment_with_editor(initial_comment, runs):
msg_lines = [
initial_comment or "",
"# Type a comment for the runs below. Lines starting with '#' are ",
"# ignored. An empty comment aborts the command.",
"#",
"# Runs:",
]
formatted_runs = _format_runs_for_comment_msg(runs)
msg_lines.extend(["# %s" % line for line in formatted_runs.split("\n")])
return util.edit(
"\n".join(msg_lines),
extension=".GUILD_COMMENT",
strip_comment_lines=True,
)
def _format_runs_for_comment_msg(runs):
out = six.StringIO()
formatted = format_runs(runs)
cols = [
"short_index",
"op_desc",
"started",
"status_with_remote",
"label",
]
cli.table(formatted, cols=cols, indent=2, file=out)
return out.getvalue().strip()
def _add_run_comment(run, comment, user):
from . import run_impl
comments = run.get("comments") or []
if user:
user, host = _split_comment_user(user)
if not host:
host = util.hostname()
else:
user = util.user()
host = util.hostname()
comments.append(
{
"body": comment,
"user": user,
"host": host,
"time": run_impl.comment_timestamp(),
}
)
return comments
def _split_comment_user(user):
parts = user.split("@", 1)
if len(parts) == 2:
return parts
return parts[0], None
| |
# -*- coding: utf-8 -*-
# Copyright (C) 2012-2013 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Reflection module.
.. versionadded:: 1.1
"""
import inspect
import logging
import operator
import types
try:
_TYPE_TYPE = types.TypeType
except AttributeError:
_TYPE_TYPE = type
# See: https://docs.python.org/2/library/__builtin__.html#module-__builtin__
# and see https://docs.python.org/2/reference/executionmodel.html (and likely
# others)...
_BUILTIN_MODULES = ('builtins', '__builtin__', '__builtins__', 'exceptions')
LOG = logging.getLogger(__name__)
Parameter = inspect.Parameter
Signature = inspect.Signature
get_signature = inspect.signature
def get_members(obj, exclude_hidden=True):
"""Yields the members of an object, filtering by hidden/not hidden.
.. versionadded:: 2.3
"""
for (name, value) in inspect.getmembers(obj):
if name.startswith("_") and exclude_hidden:
continue
yield (name, value)
def get_member_names(obj, exclude_hidden=True):
"""Get all the member names for a object."""
return [name for (name, _obj) in
get_members(obj, exclude_hidden=exclude_hidden)]
def get_class_name(obj, fully_qualified=True, truncate_builtins=True):
"""Get class name for object.
If object is a type, returns name of the type. If object is a bound
method or a class method, returns its ``self`` object's class name.
If object is an instance of class, returns instance's class name.
Else, name of the type of the object is returned. If fully_qualified
is True, returns fully qualified name of the type. For builtin types,
just name is returned. TypeError is raised if can't get class name from
object.
"""
if inspect.isfunction(obj):
raise TypeError("Can't get class name.")
if inspect.ismethod(obj):
obj = get_method_self(obj)
if not isinstance(obj, type):
obj = type(obj)
if truncate_builtins:
try:
built_in = obj.__module__ in _BUILTIN_MODULES
except AttributeError: # nosec
pass
else:
if built_in:
return obj.__name__
if fully_qualified and hasattr(obj, '__module__'):
return '%s.%s' % (obj.__module__, obj.__name__)
else:
return obj.__name__
def get_all_class_names(obj, up_to=object,
fully_qualified=True, truncate_builtins=True):
"""Get class names of object parent classes.
Iterate over all class names object is instance or subclass of,
in order of method resolution (mro). If up_to parameter is provided,
only name of classes that are sublcasses to that class are returned.
"""
if not isinstance(obj, type):
obj = type(obj)
for cls in obj.mro():
if issubclass(cls, up_to):
yield get_class_name(cls,
fully_qualified=fully_qualified,
truncate_builtins=truncate_builtins)
def get_callable_name(function):
"""Generate a name from callable.
Tries to do the best to guess fully qualified callable name.
"""
method_self = get_method_self(function)
if method_self is not None:
# This is a bound method.
if isinstance(method_self, type):
# This is a bound class method.
im_class = method_self
else:
im_class = type(method_self)
try:
parts = (im_class.__module__, function.__qualname__)
except AttributeError:
parts = (im_class.__module__, im_class.__name__, function.__name__)
elif inspect.ismethod(function) or inspect.isfunction(function):
# This could be a function, a static method, a unbound method...
try:
parts = (function.__module__, function.__qualname__)
except AttributeError:
if hasattr(function, 'im_class'):
# This is a unbound method, which exists only in python 2.x
im_class = function.im_class
parts = (im_class.__module__,
im_class.__name__, function.__name__)
else:
parts = (function.__module__, function.__name__)
else:
im_class = type(function)
if im_class is _TYPE_TYPE:
im_class = function
try:
parts = (im_class.__module__, im_class.__qualname__)
except AttributeError:
parts = (im_class.__module__, im_class.__name__)
return '.'.join(parts)
def get_method_self(method):
"""Gets the ``self`` object attached to this method (or none)."""
if not inspect.ismethod(method):
return None
try:
return operator.attrgetter("__self__")(method)
except AttributeError:
return None
def is_same_callback(callback1, callback2, strict=True):
"""Returns if the two callbacks are the same.
'strict' arg has no meaning for python 3.8 onwards and will
always return the equality of both callback based on 'self'
comparison only.
"""
if callback1 is callback2:
# This happens when plain methods are given (or static/non-bound
# methods).
return True
if callback1 == callback2:
# NOTE(gmann): python3.8 onward, comparison of bound methods is
# changed. It no longer decide the bound method's equality based
# on their bounded objects equality instead it checks the identity
# of their '__self__'. So python3.8 onward, two different bound
# methods are no longer equal even __eq__ method return True.
# Or in other term, 'strict' arg has no meaning from python 3.8
# onwards above if condition never satisfy if both callback are
# bounded to two different objects.
# For backward compatibility for python <3.8, we can keep the 'strict'
# arg and the below code of comparing 'self' and once minimum
# supported python version is 3.8 we can remove both because python
# 3.8 onward == operator itself checks identity of 'self'.
# Ref bug: https://bugs.launchpad.net/oslo.utils/+bug/1841072
if not strict:
LOG.warning('"strict" arg is deprecated because it no '
'longer work for python 3.8 onwards')
return True
# Until python 3.7, two bound methods are equal if functions
# themselves are equal and objects they are applied to are equal.
# This means that a bound method could be the same bound method on
# another object if the objects have __eq__ methods that return true
# (when in fact it is a different bound method). Python u so crazy!
try:
self1 = operator.attrgetter("__self__")(callback1)
self2 = operator.attrgetter("__self__")(callback2)
return self1 is self2
except AttributeError: # nosec
pass
return False
def is_bound_method(method):
"""Returns if the given method is bound to an object."""
return get_method_self(method) is not None
def is_subclass(obj, cls):
"""Returns if the object is class and it is subclass of a given class."""
return inspect.isclass(obj) and issubclass(obj, cls)
def get_callable_args(function, required_only=False):
"""Get names of callable arguments.
Special arguments (like ``*args`` and ``**kwargs``) are not included into
output.
If required_only is True, optional arguments (with default values)
are not included into output.
"""
sig = get_signature(function)
function_args = list(sig.parameters.keys())
for param_name, p in sig.parameters.items():
if (p.kind in (Parameter.VAR_POSITIONAL, Parameter.VAR_KEYWORD) or
(required_only and p.default is not Parameter.empty)):
function_args.remove(param_name)
return function_args
def accepts_kwargs(function):
"""Returns ``True`` if function accepts kwargs otherwise ``False``."""
sig = get_signature(function)
return any(
p.kind == Parameter.VAR_KEYWORD for p in sig.parameters.values()
)
| |
#!/usr/bin/env python
''' Access will always be in control values (maybe value?) '''
import pickle
from matplotlib import use
use('TkAgg')
from matplotlib import rcParams
rcParams['ps.useafm'] = True
rcParams['pdf.use14corefonts'] = True
from matplotlib.pyplot import gca, grid, subplots_adjust,figure, xlabel, ylabel, title, savefig, show, legend, subplot, boxplot, axes, hist, savefig, xlim, ylim, plot, hist2d, axis, close as figclose
from matplotlib.colors import LogNorm
from numpy import median, arange, zeros, searchsorted, genfromtxt, argsort, diff, sqrt, where, cos, sin, fmod, mean, array, pi, isnan, vstack, hstack
from sys import argv
from time import time as clock
from math import sin, cos, sqrt, asin, isnan, isinf, pi
import dpkt
import datetime
import socket
import cStringIO
import string
from re import search
from operator import itemgetter
from sys import exc_info
ykernel = 'butterfly2';yamplitude = 2.0;yfrequency = 1.0/100.00;psikernel = 'butterfly1';psiamplitude = 0*pi/180;psifrequency = 1.0/125.0;xkernel = 'butterfly1';xamplitude = 2.0;xfrequency = 1.0/100.00
#Reference function -- gets reference at a time t
def ref(time,kernel,amplitude,frequency):
if kernel == 'butterfly1':
val = amplitude*cos(2*pi*time*frequency)*sin(4*pi*time*frequency);
elif kernel == 'butterfly2':
val = amplitude*cos(2*pi*time*frequency)*sin(2*pi*time*frequency);
elif kernel == 'square':
if sin(frequency*2*pi*time) < 0:
val = -amplitude;
else:
val = amplitude;
elif kernel == 'sin':
val = amplitude*sin(frequency*2*pi*time);
elif kernel == 'cos':
val = amplitude*cos(frequency*2*pi*time);
return val
def process(filename):
data = genfromtxt(filename, skip_header=1, skip_footer=1,invalid_raise=False)
sorted_indices = argsort(data[:,5])
data = data[sorted_indices,:]
#d0max=searchsorted(data[:,0],[00,7100], side='left')
#data = data[d0max[0]+1:d0max[1],:]
#data = array([i for i in data if not isnan(i[1])])
return data
broker_ip = "128.110.152.120"
host_ip = "192.168.0.6"
def ip_to_str(address):
"""Print out an IP address given a string
Args:
address: the string representation of a MAC address
Returns:
printable IP address
"""
return socket.inet_ntop(socket.AF_INET, address)
# def isclose(a, b, allowed_error = .001):
# return abs(a - b) <= allowed_error
def remove_duplicates(seq):
final_list = []
seen = set()
seen_add = seen.add
for item in seq:
time = round(item[0], 2)
if time not in seen:
final_list.append(item)
seen_add(time)
sorted_indices = argsort([i[0] for i in final_list])
final_list = [final_list[i] for i in sorted_indices]
return final_list
def analyzetcp(capturefile, remote='130.127.48.67'):
## custom code to recognize data to and from 130.127.48.67
f = open(capturefile)
pcap = dpkt.pcap.Reader(f)
state_list = []
control_list = []
times = []
t0 = None
for ts, buf in pcap:
eth = dpkt.ethernet.Ethernet(buf)
if t0 is None: t0 = datetime.datetime.utcfromtimestamp(ts) ;
# IP object includes information such as IP Address
ip = eth.data;
if type(ip.data) is dpkt.tcp.TCP:
#print ts, len(buf), ip.data.data, socket.inet_ntoa(ip.src), socket.inet_ntoa(ip.dst)
try:
# This includes TCP header information
tcp = ip.data;
# if "]" in tcp.data and "value" not in tcp.data and " " in tcp.data:
if socket.inet_ntoa(ip.src) in remote and "value" in tcp.data and "&t=" in tcp.data:
#print tcp.data
#print 'Timestamp: ', str(datetime.datetime.utcfromtimestamp(ts)) , 'IP: %s -> %s (len=%d, syn=%d, ack=%d)' % (ip_to_str(ip.src), ip_to_str(ip.dst), ip.len, syn_flag, ack_flag)
#print tcp.data
time = tcp.data.rsplit("t=")[1].rsplit("&")[0]
control_list.append([float(time), datetime.datetime.utcfromtimestamp(ts) - t0])
elif socket.inet_ntoa(ip.src) in remote and "value" in tcp.data and "&time=" in tcp.data:
#print tcp.data
#print 'Timestamp: ', str(datetime.datetime.utcfromtimestamp(ts)) , 'IP: %s -> %s (len=%d, syn=%d, ack=%d)' % (ip_to_str(ip.src), ip_to_str(ip.dst), ip.len, syn_flag, ack_flag)
#print tcp.data
time = tcp.data.rsplit("time=")[1].rsplit("&")[0];
control_list.append([float(time), datetime.datetime.utcfromtimestamp(ts) - t0])
elif socket.inet_ntoa(ip.dst) in remote and len(tcp.data)> 20 and ']' in tcp.data and "value" not in tcp.data :
m = search('\[(.+?) (.+?) (.+?) (.+?) (.+?)\](.+?)\[(.+?) (.+?) (.+?) (.+?) (.+?)\]', tcp.data)
if m:
if len(m.groups()) == 11:
time = m.group(5).split()[-1]
state_list.append([float(time), datetime.datetime.utcfromtimestamp(ts) - t0])
time = m.group(11).split()[-1]
state_list.append([float(time), datetime.datetime.utcfromtimestamp(ts) - t0])
else:
print "should be long", tcp.data
else:
m = search('\[(.+?) (.+?) (.+?) (.+?) (.+?)\]', tcp.data)
if m:
time = m.groups()[-1].split()[-1];
state_list.append([float(time), datetime.datetime.utcfromtimestamp(ts) - t0])
#else:
# print "should be short", tcp.data
#else:
#print "don't know", tcp.data, socket.inet_ntoa(ip.dst)
except :
print tcp.data, "error!", exc_info(),"\n";
else: #packet is not a TCP packet
pass
'''
figure(101)
mydat=array([ (i[1]-control_list[0][1]).total_seconds() for i in control_list])
latency = mydat- arange(0,round(len(mydat)*.2,2),.2)
hist(latency - min(latency), 100, log=True);xlim([-.1,.6]);ylim([1,37000]);grid("on",which="both");
xlabel('Relative latency in control request (s)')
savefig(capturefile.replace("/","_")[3:-3]+"eps" , format='eps')
figclose(101)
'''
state_list = remove_duplicates(state_list)
control_list = remove_duplicates(control_list)
state_count, control_count = 0,0
while state_count < len(state_list) and control_count < len(control_list):
#print state_list[state_count][0], control_list[control_count][0]
if state_list[state_count][0] == control_list[control_count][0]:
delta = state_list[state_count][1] - control_list[control_count][1]
times.append([delta,state_list[state_count][0],state_list[state_count][1], control_list[control_count][1]])
state_count+=1
control_count+=1
elif state_list[state_count][0] > control_list[control_count][0]:
times.append([float("inf"),state_list[state_count][0],state_list[state_count][1], control_list[control_count][1]])
control_count+=1
elif state_list[state_count][0] < control_list[control_count][0]:
times.append([float("inf"),state_list[state_count][0],state_list[state_count][1], control_list[control_count][1]])
state_count+=1
f.close()
return times;
def errors(data):
number=16
q=array([item for item in data[0:2000,:] if not isnan(item[1])]);xnew=arange(q[0,0]+.01,q[-1,0],.01);ius=interp1d(q[:,0],q[:,2], kind='linear', axis=-1, copy=True, bounds_error=True);ynew1=ius(xnew);ius=interp1d(q[:,0],q[:,7], kind='linear', axis=-1, copy=True, bounds_error=True); ynew2=ius(xnew);xcorr = correlate(ynew1,ynew2);datadt=arange(1-xnew.shape[0],xnew.shape[0])[xcorr.argmax()];
e_data0 = array([[item[0]-datadt*.01,item[1] - ref(item[0]-datadt*.01,xkernel ,xamplitude ,xfrequency) ,item[2] - ref(item[0]-datadt*.01,ykernel ,yamplitude ,yfrequency)] for item in data if not isnan(item[1])]);
#e_data1 = array([[item[14]-datadt*.01,item[1] - ref(item[14]-datadt*.01,xkernel ,xamplitude ,xfrequency) ,item[2] - ref(item[14]-datadt*.01,ykernel ,yamplitude ,yfrequency)] for item in data if not isnan(item[1])]);
errors = [None]*number;
counts = [None]*number;
for count in range(1,number+1):
#get the range of indices that are relevant for this loop
d0max=searchsorted(e_data0[:,0],[.1+100*count,.1+100*count+100], side='left');
'''since the controller can get delayed, it's possible that the time is off by a cycle.. and instead of the 0 index, the 14 index has the correct time.
To work around this issue we take the minimum error of both time calculations.
'''
#errors[count-1] = sum(abs(data[d0max[0]:d0max[1],1]))/diff(d0max)+sum(abs(data[d0max[0]:d0max[1],2]))/diff(d0max); #old value not correct for some cases
errors[count-1] = min(sum(abs(e_data0[d0max[0]:d0max[1],1]))/diff(d0max)+sum(abs(e_data0[d0max[0]:d0max[1],2]))/diff(d0max),sum(abs(e_data1[d0max[0]:d0max[1],1]))/diff(d0max)+sum(abs(e_data1[d0max[0]:d0max[1],2]))/diff(d0max))
counts[count-1] = d0max[1]-d0max[0];
e_data=errors;
e_datac=counts;
return e_data, e_datac
if __name__ == "__main__":
path = argv[1]
controller1_l5=process(path+"/mqtt_bar_control_action__1__plant_state__1.txt");
controller1_l4=analyzetcp(path+'/Controller.pcap','10.0.0.3');
hist([i[0].total_seconds() for i in controller1_l4 if type(i[0]) is not float],100);
grid("on",which="both");#xlim([0,.8]);ylim([0,1e5])
title("Histogram of the Transport Layer RTT n="+str(len(controller1_l4)))
figure();plot(controller1_l5[:,0],controller1_l5[:,1]);plot(controller1_l5[:,0],controller1_l5[:,6]);title("x-axis");legend(["actual","target"])
figure();plot(controller1_l5[:,0],controller1_l5[:,2]);plot(controller1_l5[:,0],controller1_l5[:,7]);title("y-axis");legend(["actual","target"])
show()
| |
#!/usr/bin/python
#
# wikify.py - Convert from wikitext to HTML
# Based on large portions of JeremyRuston's TiddlyWiki JS Wikifier
# Changed to GoogleCode wiki syntax, python by Michael Crawford <mike@dataunity.com>
""" Convert wikitext to HTML """
# Jeremy's license:
# Copyright (c) UnaMesa Association 2004-2007
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or other
# materials provided with the distribution.
#
# Neither the name of the UnaMesa Association nor the names of its contributors may be
# used to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# My license:
# Copyright (c) Data Unity 2007
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or other
# materials provided with the distribution.
#
# Neither the name of the Data Unity nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import re, os, os.path, htmlentitydefs, urllib
class _HTML:
""" An HTML node factory factory. """
class Node:
""" An HTML element. """
def __init__(self, parent, tagname, text="", attribs={}, empty=False, **kwargs):
self.tagname = tagname
self.attribs = dict(attribs)
self.children = list()
self.empty = empty
if text != "":
self.appendText(text)
if parent is not None:
parent.children.append(self)
self.parent = parent
def appendText(self, text):
if text == "": return
_HTML.Text(self, text)
def __str__(self):
attrs = " ".join([ '%s="%s"' % i for i in self.attribs.iteritems() ])
if attrs: attrs = " " + attrs
if self.empty:
return "<%s%s/>" % (self.tagname, attrs)
children = "".join([str(c) for c in self.children])
return "<%s%s>%s</%s>" % (self.tagname, attrs, children, self.tagname)
def isInside(self, tagname):
k = self
while k is not None:
if k.tagname == tagname:
return True
k = k.parent
return False
class Text:
""" Simple text node. """
entities = [ (k,v)
for k,v in htmlentitydefs.entitydefs.iteritems()
if k != "amp" and k[0] != "#" ]
def __init__(self, parent, text=""):
self.text = self._clean(text)
if parent is not None:
parent.children.append(self)
def _clean(self, text):
text = text.replace("&", "&")
for k,v in self.entities:
text = text.replace(v, "&%s;" % k)
return text
def __str__(self):
return self.text
def __getattr__(self, attr):
""" Return an element constructor using the attribute as the tagname """
def factory(parent=None, **kwargs):
return self.Node(parent, attr, **kwargs)
return factory
HTML = _HTML()
URLSTR = r"(?:file|http|https|mailto|ftp|irc|news|data):[^\s'\"]+(?:/|\b)"
URL = re.compile(URLSTR, re.M)
IMGURLSTR = r".+((\.[Pp][Nn][Gg])|(\.[Gg][Ii][Ff])|(\.[Jj][Pp][Ee]?[Gg]))"
IMGURL = re.compile(IMGURLSTR, re.M)
YOUTUBESTR = r"http://www.youtube.com/watch\?v=([A-Za-z0-9_-]+)"
YOUTUBEURL = re.compile(YOUTUBESTR, re.M)
YOUTUBEREPL = r'<object width="425" height="355"><param name="movie" value="http://www.youtube.com/v/%s&rel=1"></param><param name="wmode" value="transparent"></param><embed src="http://www.youtube.com/v/hQPHf_8J8Eg&rel=1" type="application/x-shockwave-flash" wmode="transparent" width="425" height="355"></embed></object>'
VIDEOURLSTR = r".+((\.[Aa][Vv][Ii])|(\.[Mm][Oo][Vv])|(\.[Mm][Pp][Ee]?[Gg]))"
VIDEOURL = re.compile(VIDEOURLSTR, re.M)
VIDEOREPL = r'<embed src = "%s" width="400" height="350" hidden=false autostart=true loop=1>'
CODEURLSTR = r"http://([^\.]+).googlecode.com/svn/trunk/([^#]+)#((?:(?:(?:[\d]+)?\-)?[\d]+)|(?:[\d]+\-?))((?:\:(?:[\:]|[^\W])+))?"
CODEURL = re.compile(CODEURLSTR, re.M)
CODEREPL = r'<a href="%(url)s">svn://%(site)s/trunk/%(file)s</a><pre name="code" class="%(class)s">%(lines)s</pre>'
def GoogleCode_ReadSVNFile(wikifier, domain, path, start, end):
""" Try to read a file from subversion for inclusion in the wiki. """
gcurl = "http://%s.googlecode.com/svn/trunk/%s" % (domain,path)
fdata = urllib.urlopen(gcurl).readlines()
return gcurl, fdata[start-1:end]
def GoogleCode_IsExternalLink(wikifier, link):
""" See if the link points outside of the wiki. """
if GoogleCode_Exists(wikifier, link):
return False;
if URL.match(link):
return True
if '.' in link or '\\' in link or '/' in link or '#' in link:
return True
return False
def GoogleCode_Exists(wikifier, wikipage):
""" See if a wiki page exists inside this wiki. """
path = os.path.join(wikifier.srcdir, "%s.wiki" % wikipage)
if os.path.exists(path):
return True
return False
def GoogleCode_Heading(wikifier, termRegExp=None, **kwargs):
termMatch = termRegExp.search(wikifier.source, wikifier.nextMatch)
if termMatch is None: return
if (len(wikifier.output.children) and
"br" == getattr(wikifier.output.children[-1], 'tagname', '')):
wikifier.output.children.pop(-1)
if (len(wikifier.output.children) and
"br" == getattr(wikifier.output.children[-1], 'tagname', '')):
wikifier.output.children.pop(-1)
output = HTML.Node(wikifier.output, "h%i" % wikifier.matchLength)
wikifier.outputText(output, wikifier.nextMatch, termMatch.start())
wikifier.nextMatch = termMatch.end()
def GoogleCode_SimpleElement(wikifier, termRegExp=None, tagName=None, **kwargs):
if wikifier.output.isInside(tagName):
wikifier.outputText(wikifier.output, wikifier.matchStart, wikifier.nextMatch)
return
elif wikifier.source[wikifier.nextMatch-1] == "_":
wikifier.outputText(wikifier.output, wikifier.matchStart, wikifier.nextMatch-1)
if termRegExp.search(wikifier.source, wikifier.nextMatch) is None: return
output = HTML.Node(wikifier.output, tagName, **kwargs)
wikifier.subWikifyTerm(output, termRegExp)
#if wikifier.source[wikifer.nextMatch-2] == "_":
# wikifier.nextMatch -= 1
def GoogleCode_Blockquote(wikifier, termRegExp=None, **kwargs):
sibs = wikifier.output.children
if len(sibs) and getattr(sibs[-1], 'tagname', None) == "blockquote":
wikifier.subWikifyTerm(sibs[-1], termRegExp)
else:
output = HTML.blockquote(wikifier.output, **kwargs)
wikifier.subWikifyTerm(output, termRegExp)
def GoogleCode_Codeblock(wikifier, tagName=None, termRegExp=None, initRegExp=None, **kwargs):
if 'attribs' not in kwargs:
kwargs['attribs'] = {}
kwargs['attribs']['name'] = 'code'
if 'class' not in kwargs['attribs']:
kwargs['attribs']['class'] = wikifier.defaultHiLang.lower()
else:
kwargs['attribs']['class'] += " " + wikifier.defaultHiLang.lower()
output = HTML.Node(wikifier.output, tagName, **kwargs)
tcount = 1
matchStart = wikifier.nextMatch
# Find the matching terminator
while tcount > 0:
nextTermMatch = termRegExp.search(wikifier.source, wikifier.nextMatch)
nextInitMatch = initRegExp.search(wikifier.source, wikifier.nextMatch)
if not nextTermMatch:
# No terminator. Syntax error, just ignore it.
matchEnd = matchStart
tcount = 0
break
elif not nextInitMatch or nextTermMatch.start() <= nextInitMatch.start():
# Terminator goes first.
nextMatch = nextTermMatch
tcount -= 1
if tcount > 0:
matchEnd = nextMatch.end()
else:
matchEnd = nextMatch.start()
else:
nextMatch = nextInitMatch
tcount += 1
matchEnd = nextMatch.end()
wikifier.nextMatch = nextMatch.end()
# Copy the content
wikifier.outputText(output, matchStart, matchEnd)
if "\n" not in wikifier.source[matchStart:matchEnd]:
output.tagname = "code"
def GoogleCode_WikiWord(wikifier, **kwargs):
if wikifier.matchStart > 0:
# Make sure we're at the start of a word?
preRegExp = re.compile("[!A-Za-z0-9]", re.M)
preMatch = preRegExp.search(wikifier.source, wikifier.matchStart-1)
if (preMatch is not None and
preMatch.start() == wikifier.matchStart-1):
wikifier.outputText(wikifier.output,wikifier.matchStart,wikifier.nextMatch)
return
if wikifier.source[wikifier.matchStart] == "!":
wikifier.outputText(wikifier.output,wikifier.matchStart+1,wikifier.nextMatch)
elif GoogleCode_Exists(wikifier, wikifier.matchText):
# Full link, everybody sees it
HTML.a(wikifier.output, text=wikifier.matchText, attribs={"href": wikifier.matchText + wikifier.suffix})
elif wikifier.autolink:
# Partial link - only authorized users
wikifier.outputText(wikifier.output,wikifier.matchStart,wikifier.nextMatch)
link = HTML.a(wikifier.output, text="?", attribs={"href": wikifier.matchText + wikifier.suffix})
else:
wikifier.outputText(wikifier.output,wikifier.matchStart,wikifier.nextMatch)
def GoogleCode_LineBreak(wikifier, **kwargs):
sibs = wikifier.output.children
if wikifier.multibreak:
HTML.p(wikifier.output, **kwargs)
elif len(sibs) and (not hasattr(sibs[-1], 'tagname') or
sibs[-1].tagname == "img"):
# Only after an inline or header block.
HTML.p(wikifier.output, **kwargs)
HTML.p(wikifier.output, **kwargs)
def GoogleCode_PrettyLink(wikifier, lookaheadRegExp=None, **kwargs):
lookMatch = lookaheadRegExp.search(wikifier.source, wikifier.matchStart)
if lookMatch and lookMatch.start() == wikifier.matchStart:
text = lookMatch.group(1)
if lookMatch.group(2):
# Pretty bracketted link
link = text
text = lookMatch.group(2)
if GoogleCode_IsExternalLink(wikifier, link):
# External link
attribs={"href":link, "target": "_blank" }
else:
# Internal link
attribs={"href":link + wikifier.suffix}
e = HTML.a(wikifier.output, attribs=attribs)
if URL.match(text):
HTML.img(e, attribs={'src':text,
'border': '0'})
HTML.br(wikifier.output)
else:
HTML.Text(e, text)
else:
if GoogleCode_IsExternalLink(wikifier, text):
# External link
attribs={"href":link, "target": "_blank" }
else:
# Internal link
attribs={"href":text + wikifier.suffix}
# Simple bracketted link
e = HTML.a(wikifier.output, text=text, attribs=attribs)
wikifier.nextMatch = lookMatch.end()
def GoogleCode_UrlLink(wikifier, **kwargs):
attribs = {"href": wikifier.matchText}
if GoogleCode_IsExternalLink(wikifier, wikifier.matchText):
attribs["target"] = "_blank"
if IMGURL.match(wikifier.matchText):
HTML.img(wikifier.output, attribs={'src':wikifier.matchText})
HTML.br(wikifier.output)
elif YOUTUBEURL.match(wikifier.matchText):
match = YOUTUBEURL.match(wikifier.matchText)
# Raw html ;)
wikifier.output.children.append(YOUTUBEREPL % match.group(1))
elif VIDEOURL.match(wikifier.matchText):
# Raw html ;)
wikifier.output.children.append(VIDEOREPL % wikifier.matchText)
elif CODEURL.match(wikifier.matchText):
# Raw html ;)
# http://([^\.]+).googlecode.com/svn/trunk/([^\#]+)#([^\:]+)(?:\:([^\W]+))?
codeMatch = CODEURL.match(wikifier.matchText)
parts = { "class": (codeMatch.group(4) or "").lower()[1:],
"file": codeMatch.group(2),
"site": codeMatch.group(1)}
lines = codeMatch.group(3)
if '-' in lines:
lines = lines.split('-')
lines[0] = int(lines[0])
lines[1] = int(lines[1])
else:
lines = [int(lines), int(lines)]
parts['class'] += ":firstline[%i]" % lines[0]
url, parts['lines'] = GoogleCode_ReadSVNFile(wikifier, parts['site'],
parts['file'], *lines)
parts['url'] = url
parts['lines'] = "".join(parts['lines'])
wikifier.output.children.append(CODEREPL % parts)
else:
HTML.a(wikifier.output, text=wikifier.matchText, attribs=attribs)
def GoogleCode_Table(wikifier, sepRegExp=None, termRegExp=None, **kwargs):
sibs = wikifier.output.children
if len(sibs) and getattr(sibs[-1], 'tagname', None) == "table":
table = sibs[-1]
else:
table = HTML.table(wikifier.output)
row = HTML.tr(table)
termMatch = termRegExp.search(wikifier.source, wikifier.matchStart)
if termMatch is None:
termEnd = termStart = len(wikifier.source)
else:
termStart, termEnd = termMatch.start(), termMatch.end()
# Skip over the leading separator
sepMatch = sepRegExp.search(wikifier.source, wikifier.matchStart)
wikifier.nextMatch = wikifier.matchStart = sepMatch.end()
sepMatch = sepRegExp.search(wikifier.source, wikifier.matchStart)
attribs = { "style": "border: 1px solid #aaa; padding: 5px;" }
while sepMatch and sepMatch.end() <= termStart:
cell = HTML.td(row, attribs=attribs)
wikifier.subWikifyTerm(cell, sepRegExp)
wikifier.nextMatch = sepMatch.end()
sepMatch = sepRegExp.search(wikifier.source, wikifier.nextMatch)
wikifier.nextMatch = termEnd
def GoogleCode_List(wikifier, lookaheadRegExp=None, termRegExp=None, **kwargs):
currLevel = 0
currType = None
stack = [wikifier.output]
indents = [currLevel]
wikifier.nextMatch = wikifier.matchStart
lookMatch = lookaheadRegExp.search(wikifier.source, wikifier.nextMatch)
while lookMatch and lookMatch.start() == wikifier.nextMatch:
# See what kind of list it is
if lookMatch.group(1):
listType = "ul"
itemType = "li"
elif lookMatch.group(2):
listType = "ol"
itemType = "li"
listLevel = len(lookMatch.group(0))
wikifier.nextMatch += len(lookMatch.group(0))
# Check for any changes in list type or indentation
if listLevel > currLevel:
# Indent further
indents.append(listLevel)
if currLevel == 0:
target = stack[-1]
else:
target = stack[-1].children[-1]
stack.append(HTML.Node(target, listType))
elif listLevel < currLevel:
# Indent less
while indents[-1] > listLevel:
stack.pop(-1)
indents.pop(-1)
elif listLevel == currLevel and listType != currType:
# Same level, different kind of list
stack.pop(-1)
stack.append(HTML.Node(stack[-1].children[-1], listType))
currLevel = listLevel
currType = listType
# Output the item
output = HTML.Node(stack[-1],itemType)
wikifier.subWikifyTerm(output,termRegExp)
# Roll again
lookMatch = lookaheadRegExp.search(wikifier.source, wikifier.nextMatch)
GoogleCodeWikiFormat = [
{
"name": "tablerow",
"match": r"^(?:\|\|.+\|\|)",
"termRegExp": re.compile(r"(\n)", re.M),
"sepRegExp": re.compile(r"(\|\|)", re.M),
"handler": GoogleCode_Table
},
{ "name": "heading",
"match": r"^={1,6}",
"termRegExp": re.compile(r"([=]+)", re.M),
"handler": GoogleCode_Heading
},
{ "name": "list",
"match": r"^(?:[ ]+)(?:[\*#])",
"lookaheadRegExp": re.compile(r"^(?:[ ]+)(?:(\*)|(#))",re.M),
"termRegExp": re.compile(r"(\n)", re.M),
"handler": GoogleCode_List
},
{ "name": "blockquote",
"match": r"^(?:[ ]+)",
"termRegExp": re.compile(r"(\n)", re.M),
"handler": GoogleCode_Blockquote,
"tagName": "blockquote"
},
{ "name": "codeword",
"match": r"\`",
"initRegExp": re.compile(r"(\`)", re.M),
"termRegExp": re.compile(r"(\`)", re.M),
"handler": GoogleCode_Codeblock,
"tagName": "code"
},
{ "name": "codeblock",
"match": r"\{\{\{",
"initRegExp": re.compile(r"(\{\{\{)", re.M),
"termRegExp": re.compile(r"(\}\}\})", re.M),
"handler": GoogleCode_Codeblock,
"tagName": "pre",
"attribs": { "class": "codeblock" }
},
{ "name": "bold",
"match": r"[\*]",
"termRegExp": re.compile(r"([\*])", re.M),
"handler": GoogleCode_SimpleElement,
"tagName": "b"
},
{ "name": "italic",
"match": r"(?:[^\w\b]|^)[\_]",
"termRegExp": re.compile(r"([\_])[^\w\b]", re.M),
"handler": GoogleCode_SimpleElement,
"tagName": "i"
},
{ "name": "strike",
"match": r"\~\~",
"termRegExp": re.compile(r"(\~\~)", re.M),
"handler": GoogleCode_SimpleElement,
"tagName": "strike"
},
{ "name": "superscript",
"match": r"\^",
"termRegExp": re.compile(r"(\^)", re.M),
"handler": GoogleCode_SimpleElement,
"tagName": "sup"
},
{ "name": "subscript",
"match": r",,",
"termRegExp": re.compile(r"(,,)", re.M),
"handler": GoogleCode_SimpleElement,
"tagName": "sub"
},
{ "name": "prettyLink",
"match": r"\[(?:(?:[A-Za-z][A-Za-z0-9\_\-]+)|(?:(?:file|http|https|mailto|ftp|irc|news|data):[^\s'\"]+(?:/|\b)))(?: .*?)?\]",
"lookaheadRegExp": re.compile(r'\[(.*?)(?: (.*?))?\]', re.M),
"handler": GoogleCode_PrettyLink
},
{ "name": "wikiword",
"match": r"(?:\!?(?:[A-Z]+[a-z]+[A-Z][A-Za-z]*)|(?:[A-Z]{2,}[a-z]+))",
"handler": GoogleCode_WikiWord
},
{ "name": "urlLink",
"match": URLSTR,
"handler": GoogleCode_UrlLink
},
{ "name": "linebreak",
"match": r"\n\n",
"handler": GoogleCode_LineBreak,
"empty": True
},
]
class Wikifier:
def __init__(self, formatters, autolink=False, srcdir=os.getcwd(),
multibreak=False, tabwidth=8, suffix=".html",
hiLang="Python"):
# Create the master regex
forms = [ "(%s)" % r['match'] for r in formatters ]
self.formatterRegExp = re.compile("|".join(forms), re.M)
# Save the individual format handlers
self.formatters = formatters
self.autolink = autolink
self.srcdir = srcdir
self.multibreak = multibreak and True or False
self.tabwidth = tabwidth
self.suffix = suffix
self.defaultHiLang = hiLang
def _clean(self, text):
text = text.replace("\r\n", "\n")
# Out, out, damned tabs
text = text.replace("\t", " " * self.tabwidth)
if not self.multibreak:
# Remove redundant line breaks
tlen = len(text) + 1
while tlen > len(text):
tlen = len(text)
text = text.replace("\n\n\n", "\n\n")
while text.startswith("#"):
# Process any wiki-headers
line, text = text.split("\n", 1)
self._header(line)
return text
def _header(self, line):
tagname, content = line.split(" ", 1)
if tagname == "#summary":
self.summary = content
elif tagname == "#labels":
self.labels = tuple(content.split(","))
def wikify(self, source, labels=None, summary=None):
self.labels = labels
self.summary = summary
# Clean up the content
self.source = self._clean(source)
self.nextMatch = 0
# Do it
self.output = HTML.div(None)
self.subWikifyUnterm()
return "".join([str(c) for c in self.output.children])
def findMatch(self, source, start):
return self.formatterRegExp.search(source, start)
def subWikifyUnterm(self, output=None):
oldOutput = self.output
if output is not None:
self.output = output
match = self.findMatch(self.source, self.nextMatch)
while match:
# Output any text before the match
if match.start() > self.nextMatch:
self.outputText(self.output, self.nextMatch, match.start())
# Set the match parameters for the handler
self.matchStart = match.start()
self.matchLength = len(match.group(0))
self.matchText = match.group(0)
self.nextMatch = match.end()
# Figure out which sub-group matched (zero-indexed)
t,submatch = [ (t,s) for t, s in enumerate(match.groups()) if s ][0]
# Handle it
self.formatters[t]['handler'](self, **self.formatters[t])
# Go back for more matches
match = self.findMatch(self.source, self.nextMatch)
if self.nextMatch < len(self.source):
self.outputText(self.output, self.nextMatch, len(self.source))
self.nextMatch = len(self.source)
# Restore the destination node
self.output = oldOutput
def subWikifyTerm(self, output, termRegExp):
oldOutput = self.output
if output is not None:
self.output = output
# Get the first matches for the formatter and terminator RegExps
termMatch = termRegExp.search(self.source, self.nextMatch)
if termMatch:
match = self.findMatch(self.source[:termMatch.start()], self.nextMatch)
else:
match = self.findMatch(self.source, self.nextMatch)
while termMatch or match:
# If the terminator comes before the next formatter match, we're done
if termMatch and (not match or termMatch.start() <= match.start()):
if termMatch.start() > self.nextMatch:
self.outputText(self.output,self.nextMatch,termMatch.start())
self.matchText = termMatch.group(1)
self.matchLength = len(self.matchText)
self.matchStart = termMatch.start()
self.nextMatch = self.matchStart + self.matchLength
self.output = oldOutput
return
# Output any text before the match
if match.start() > self.nextMatch:
self.outputText(self.output, self.nextMatch, match.start())
# Set the match parameters for the handler
self.matchStart = match.start()
self.matchLength = len(match.group(0))
self.matchText = match.group(0)
self.nextMatch = match.end()
# Figure out which sub-group matched (zero-indexed)
t,submatch = [ (t,s) for t, s in enumerate(match.groups()) if s ][0]
# Handle it
self.formatters[t]['handler'](self, **self.formatters[t])
termMatch = termRegExp.search(self.source, self.nextMatch)
if termMatch:
match = self.findMatch(self.source[:termMatch.start()], self.nextMatch)
else:
match = self.findMatch(self.source, self.nextMatch)
if self.nextMatch < len(self.source):
self.outputText(self.output, self.nextMatch,len(self.source))
self.nextMatch = len(self.source)
self.output = oldOutput
def outputText(self, output, startPos, endPos):
HTML.Text(output, self.source[startPos:endPos])
DEFAULT_TEMPLATE = '''
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN">
<html>
<head>
</head>
<body>
<div id="page">
<div id='header'>
<br style="clear: both" /><br/>
</div>
<div id="pagecontent">
<div class="index">
<!-- This is a (PRE) block. Make sure it's left aligned or your toc title will be off. -->
%(toc)s
</div>
<i>%(title)s</i>
<div class="summary">
%(summary)s
</div>
<div class="narrow">
%(wiki)s
</div>
</div>
</div>
</body>
</html>
'''
DEFAULT_TEMPLATE = '''
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN">
<html>
<head>
</head>
<body>
<div class="summary">
%(summary)s
</div>
<div class="narrow">
%(wiki)s
</div>
</body>
</html>
'''
def wikify(pages, options=None):
# See options definition below.
# Pass any object with those (potential) attributes
srcdir = getattr(options, 'srcdir', os.getcwd())
destdir = getattr(options, 'destdir', None)
# Find all requested files
onlyStale = False
if getattr(options, 'all', False):
pages = [ k for k in os.listdir(srcdir)
if k.endswith(".wiki") ]
onlyStale = True
if destdir is None:
destdir = os.getcwd()
# Create the magic 8-ball
w = Wikifier(GoogleCodeWikiFormat,
autolink=getattr(options, 'autolink', False),
tabwidth=getattr(options, 'tabwidth', 8),
multibreak=getattr(options, 'multibreak', False),
srcdir=srcdir,
suffix=".html")
rets = []
for wikiname in pages:
# Clean up the page name
if wikiname.endswith(".wiki"):
wikiname = wikiname[:-5]
wikifilename = os.path.join(srcdir, "%s.wiki" % wikiname)
if onlyStale:
# See if the output is fresh, and if so, skip it
wikidestname = os.path.join(destdir, "%s.html" % wikiname)
try:
sstat = os.stat(wikifilename)
except:
continue
try:
dstat = os.stat(wikidestname)
except:
pass
else:
if dstat.st_mtime > sstat.st_mtime:
continue
# Load the wiki content
wikifilename = os.path.join(srcdir, "%s.wiki" % wikiname)
wikisrc = file(wikifilename).read()
# Ask a question
wikified = w.wikify(wikisrc)
reFind = re.compile(r'<h(\d)>\s*([^\<]*[\S])\s*</h\d>')
strRepl = r'<h\g<1>><a name="\g<2>">\g<2></a></h\g<1>>'
# Number the sections
if getattr(options, 'number', True):
sectstack = []
matches = []
curLevel = 0
match = reFind.search(wikified)
while match is not None:
level = int(match.group(1))
while level > len(sectstack):
sectstack.append(1)
while len(sectstack) > level:
sectstack.pop(-1)
if curLevel >= level:
sectstack[-1] += 1
curLevel = len(sectstack)
sectnum = ".".join([str(n) for n in sectstack]) + "."
matches.append((sectnum, match))
match = reFind.search(wikified, match.end())
matches.reverse()
for sectnum, match in matches:
wikified = wikified[:match.start()+4] + sectnum + " " + wikified[match.start()+4:]
# Generate the TOC
if getattr(options, 'toc', True):
matches = [ '<b>%s: Contents</b>' % wikiname ]
for match in reFind.findall(wikified):
if int(match[0]) > getattr(options, 'levels', 3): continue
indent = " " * ((int(match[0])) * 2)
href = "#" + match[1]
anchor = '%s<a href="%s">%s</a>' % (indent, href, match[1])
matches.append(anchor)
toc = "<br>".join(matches)
else:
toc = "" #-e -d /home/adam/src/CSpaceWiki/
# Generate the body links
if getattr(options, 'links', True):
wikified = reFind.sub(strRepl, wikified)
# Find a summary
summary = ""
if w.summary is not None:
summary = w.summary
if not getattr(options, 'raw', False):
# Fill the template
wikified = options.template % {
"toc": toc,
"title": wikiname,
"wiki": wikified,
"summary": summary }
# Save it or write it
if destdir is not None:
outputname = os.path.join(destdir, "%s.html" % wikiname)
file(outputname,"w").write(wikified)
mainpage = getattr(options, 'mainpage', 'MainPage')
if wikiname == mainpage:
rets.append((wikiname, outputname))
outputname = os.path.join(destdir, "index.html")
file(outputname,"w").write(wikified)
wikified = outputname
rets.append((wikiname, wikified))
return rets
if __name__ == "__main__":
from optparse import OptionParser
import sys
parser = OptionParser()
# Output format options
parser.add_option("-t", "--template", dest="template",
help="use TPLTFILE to wrap wiki output", metavar="TPLTFILE")
parser.add_option("-n", "--number", dest="number", metavar="NUMSTART",
help="number the headings in the body and table of contents starting with level NUMSTART")
parser.add_option("-l", "--levels", dest="levels", type="int",
help="create toc to depth LEVELS", metavar="LEVELS")
parser.add_option("-c", "--skiptoc", dest="toc", action="store_false",
help="leave toc out, even if template has slot")
parser.add_option("-u", "--unlink", dest="links", action="store_false",
help="don't create named anchors for toc links")
parser.add_option("-a", "--autolink", dest="autolink", action="store_false",
help="autolink wiki words that don't exist")
parser.add_option("-w", "--tabwidth", dest="tabwidth", type="int",
help="replace tabs by WIDTH spaces", metavar="WIDTH")
parser.add_option("-m", "--multibreak", dest="multibreak", action="store_true",
help="don't collapse multiple line breaks")
parser.add_option("-r", "--raw", dest="raw", action="store_true",
help="raw wiki translation -- no wrapping, no toc, no links")
parser.add_option("-p", "--mainpage", dest="mainpage", metavar="PAGENAME",
help="set main page to PAGENAME")
# Batch / Location options
parser.add_option("-s", "--srcdir", dest="srcdir",
help="wiki format sources in SRCDIR", metavar="SRCDIR")
parser.add_option("-d", "--destdir", dest="destdir",
help="write html output into DESTDIR", metavar="DESTDIR")
parser.add_option("-e", "--stale", dest="all", action="store_true",
help="convert all wiki files that are stale or missing from DESTDIR")
parser.set_default('toc', True)
parser.set_default('links', True)
parser.set_default('template', None)
parser.set_default('number', False)
parser.set_default('levels', 3)
parser.set_default('tabwidth', 8)
parser.set_default('multibreak', False)
parser.set_default('mainpage', "MainPage") # Identity of index
parser.set_default('srcdir', os.getcwd())
parser.set_default('destdir', None)
parser.set_default('all', False)
# Parse the command line
(options, args) = parser.parse_args()
if options.template is None:
options.template = DEFAULT_TEMPLATE
elif os.path.exists(options.template):
options.template = file(options.template).read()
else:
print "Template not found: %s" % options.template
parser.print_usage()
sys.exit()
#sys.exit()
for wikiname, htmldata in wikify(args, options):
if options.destdir:
#print wikiname + ":",
if htmldata is not None:
pass
#print htmldata
else:
print "Complete."
elif htmldata is not None:
print htmldata
| |
import datetime
import json
import logging
import os
import random
import subprocess
import sys
import time
try:
from cloghandler import (
ConcurrentRotatingFileHandler as RotatingFileHandler
)
except ImportError:
from logging.handlers import RotatingFileHandler
import requests
from requests.exceptions import ConnectionError
from util.cygwin import regularize_path
class MultyvacError(Exception):
pass
class RequestError(MultyvacError):
"""Exception class for errors when making web requests to the
Multyvac API."""
def __init__(self, http_status_code, code, message, hint=None,
retry=False):
Exception.__init__(self, http_status_code, code, message, hint, retry)
self.http_status_code = http_status_code
self.code = code
self.message = message
self.hint = hint
self.retry = retry
def __str__(self):
return '%s (Code: %s Hint: %s)' % (self.message, self.code, self.hint)
def __repr__(self):
return 'RequestError({code}, "{message}", {hint})'.format(
code=self.code,
message=self.message,
hint=self.hint,
)
class SyncError(MultyvacError):
"""Encapsulates errors when making rsync requests to Multyvac."""
def __init__(self, exit_status, message):
Exception.__init__(self, exit_status, message)
self.exit_status = exit_status
self.message = message
def __repr__(self):
return 'SyncError({exit_status}, "{message}")'.format(
exit_status=self.exit_status,
message=self.message,
)
class Multyvac(object):
"""
Multyvac
The primary object for interacting with the Multyvac API.
All Multyvac modules are exposed through this.
"""
_ASK_GET = 'GET'
_ASK_POST = 'POST'
_ASK_PUT = 'PUT'
_ASK_PATCH = 'PATCH'
def __init__(self, api_key=None, api_secret_key=None, api_url=None):
self._session = requests.session()
from .config import ConfigModule
# Note: At this time, the rest of the Multyvac modules have not been
# initialized. So the constructor should not do anything that requires
# any other modules (ie. Do not use the ApiKey module).
self.config = ConfigModule(self, api_key, api_secret_key, api_url)
if os.name == 'nt':
self._rsync_bin = os.path.join(self.config.get_multyvac_path(),
'bin/rsync.exe')
self._ssh_bin = os.path.join(self.config.get_multyvac_path(),
'bin/ssh.exe')
else:
self._rsync_bin = 'rsync'
self._ssh_bin = 'ssh'
# Must be after config
self._setup_logger()
from .job import JobModule
self.job = JobModule(self)
from .layer import LayerModule
self.layer = LayerModule(self)
from .volume import VolumeModule
self.volume = VolumeModule(self)
from .cluster import ClusterModule
self.cluster = ClusterModule(self)
from .api_key import ApiKeyModule
self.api_key = ApiKeyModule(self)
def _setup_logger(self):
"""
Sets up a rotating file logger.
TODO: Have config option for printing to screen.
"""
logs_path = os.path.join(self.config.get_multyvac_path(), 'log')
if not os.path.exists(logs_path):
self.config._create_path_ignore_existing(logs_path)
log_path = os.path.join(logs_path, 'multyvac.log')
self._logger = logging.getLogger('multyvac')
self._logger.setLevel(logging.ERROR)
if os.name == 'nt':
from logging import FileHandler
try:
handler = FileHandler(log_path, 'a')
except Exception as e:
print >> sys.stderr, 'Could not open logging file handler:', e
else:
try:
handler = RotatingFileHandler(log_path, 'a', 1024*1024, 10)
except OSError as e:
if e.errno == 13:
# Assume the permission denied is because the log is owned by
# a different user (probably due to sudo). Use a different log
# file with the user's uid embedded.
# TODO: send_log_to_support() does not handle this case.
log_path = os.path.join(logs_path,
'multyvac.%s.log' % os.getuid())
handler = RotatingFileHandler(log_path, 'a', 1024*1024, 10)
# TODO: Fixing the permissions here only helps so much. If the user ran
# this as sudo, the rotated logs will be owned by sudo's target user.
# Our only option is to modify RotatingFileHandler to try to set the
# calling user as the owner.
self.config._fix_permission(log_path)
lock_path = os.path.join(logs_path, 'multyvac.lock')
self.config._fix_permission(lock_path)
formatter = logging.Formatter(
'[%(asctime)s] - [%(levelname)s] - %(name)s: %(message)s'
)
handler.setFormatter(formatter)
self._logger.addHandler(handler)
def _get_session_method(self, method):
"""
Returns a function that can be used to make an API request.
:param method: The HTTP verb to be used by the request.
"""
if method == self._ASK_POST:
return self._session.post
elif method == self._ASK_GET:
return self._session.get
elif method == self._ASK_PUT:
return self._session.put
elif method == self._ASK_PATCH:
return self._session.patch
else:
raise KeyError('Unknown method "%s"' % method)
def _log_ask(self, method, uri, params, data, headers, files):
"""Use this to log a request. It only logs params and data elements
that are not overly large to prevent filling up the log."""
self._logger.info('%s request to %s with params %r data %r files %r',
method,
uri,
self._log_ask_element(params),
self._log_ask_element(data),
[path for path, _ in files.values()] if files else None)
def _log_ask_element(self, ele):
"""Recurses into dict and list objects replacing elements that are too
large for a log file. This way we still see small elements, but filter
our large things like stdin."""
max_element_byte_size = 150
ele_size = sys.getsizeof(ele)
if isinstance(ele, dict):
d = {}
for k, v in ele.items():
d[k] = self._log_ask_element(v)
return d
elif isinstance(ele, (tuple, list)):
return [self._log_ask_element(v) for v in ele]
elif ele_size > max_element_byte_size:
return 'Too large to log: %s bytes' % ele_size
else:
return ele
def _ask(self, method, uri, auth=None, params=None, data=None,
headers=None, files=None, content_type_json=False):
"""
Makes an HTTP request to Multyvac.
:param method: HTTP Verb.
:param uri: Resource path.
:param auth: Authentication override. If not specified, falls back to
any credentials available in the MultyvacConfigModule.
:param params: Query string parameters specified as a dict.
:param data: Specify as dict. If not a JSON request, the dict is
form encoded and put into the body (typical POST). If its a JSON
request, then the data is serialized JSON in the request body.
:param files: List of tuples [(path, content), ...] specifying files
that should be uploaded as part of a multipart request.
:param content_type_json: Whether the request body should be encoded as
JSON, along with the appropriate content-type header. If False,
regular form encoding is used.
"""
if content_type_json:
headers = headers or {}
headers['content-type'] = 'application/json'
final_data = json.dumps(data)
else:
final_data = data
attempt = 0
max_attempts = 5
while True:
self._log_ask(method, uri, params, data, headers, files)
try:
r = self._ask_helper(method,
uri,
auth=auth,
params=params,
data=final_data,
headers=headers,
files=files)
return r
except (RequestError, ConnectionError) as e:
attempt += 1
min_delay = 1.0
if isinstance(e, RequestError) and e.http_status_code == 429:
# Add another attempt if error was due to rate limiting.
# This also increases the range of exponential backoff.
max_attempts += 1
min_delay = 5.0
if ((isinstance(e, ConnectionError) or e.retry)
and attempt < max_attempts):
delay = max(2**attempt * random.random(), min_delay)
self._logger.info('Request failed. Retrying in %.1fs',
delay)
time.sleep(delay)
continue
else:
raise
def _ask_helper(self, method, uri, auth, params, data, headers, files):
"""See _ask()."""
if not auth:
auth = self.config.get_auth()
r = self._get_session_method(method)(
self.config.api_url + uri,
auth=auth,
params=params,
data=data,
headers=headers,
files=files,
)
try:
obj = r.json()
except ValueError:
if r.status_code >= 500:
# Retry on 5** error codes returning non-JSON (probably HTML)
# Bad gateway is a common example where we want to do this.
raise RequestError(r.status_code,
None,
r.text,
retry=True)
else:
# Unexpected error
raise RequestError(r.status_code,
None,
'Could not parse body',
hint=r.text)
if 'error' in obj:
raise RequestError(r.status_code,
obj['error']['code'],
obj['error']['message'],
obj['error'].get('hint'),
obj['error'].get('retry'))
return obj
def _sync_up(self, local_path, remote_address, remote_path, port):
"""Sync from local path to Multyvac."""
dest = 'multyvac@{address}:{path}'.format(address=remote_address,
path=remote_path)
return self._sync(local_path, dest, port)
def _sync_down(self, remote_address, remote_path, port, local_path):
"""Sync from Multyvac to local path."""
src = 'multyvac@{address}:{path}'.format(address=remote_address,
path=remote_path)
return self._sync(src, local_path, port)
def _sync(self, src, dest, port):
"""Sync from source to destination using rsync."""
attempt = 0
max_attempts = 5
while True:
try:
return self._sync_helper(src, dest, port)
except SyncError as e:
attempt += 1
# connection refused errors return 255
if e.exit_status == 255 and attempt < max_attempts:
delay = 2**attempt * random.random()
self._logger.info('Sync failed. Retrying in %.1fs',
delay)
time.sleep(delay)
continue
else:
raise
def _sync_helper(self, src, dest, port):
"""The port might apply to either the src or the dest, depending on
which one is remote."""
on_windows = os.name == 'nt'
cmd = ('{rsync_bin} -avz -L -e "{ssh_bin} -o UserKnownHostsFile=/dev/null '
'-o StrictHostKeyChecking=no -p {port} -i {key_path}" {chmod} '
'{src} {dest}'.format(
rsync_bin=self._rsync_bin,
ssh_bin=self._ssh_bin,
port=port,
key_path=regularize_path(self.config.path_to_private_key()),
chmod='--chmod=u+rwx' if on_windows else '',
src=src,
dest=dest)
)
p = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
# close_fds not supported on Windows with stdout/stderr redirection
close_fds=not on_windows,
shell=True,
)
_, stderr = p.communicate()
if p.poll() != 0:
self._logger.info('Sync had error:\n%s',
stderr)
raise SyncError(p.poll(), stderr)
def on_multyvac(self):
"""Returns True if this process is currently running on Multyvac."""
return os.getenv('ON_MULTYVAC') == 'true'
def send_log_to_support(self):
"""Sends this machine's log file to Multyvac support."""
log_path = os.path.join(self.config.get_multyvac_path(),
'log/multyvac.log')
if os.path.exists(log_path):
with open(log_path) as f:
files = {'file': ('multyvac.log', f.read())}
self._ask(self._ASK_POST,
'/report/client_log/',
files=files)
return True
else:
return False
class MultyvacModule(object):
"""All modules should extend this class."""
def __init__(self, multyvac):
self.multyvac = multyvac
logger_name = self.__class__.__name__.lower()[:-len('module')]
self._logger = logging.getLogger('multyvac.%s' % logger_name)
@staticmethod
def clear_null_entries(d):
for k, v in d.items():
if v is None:
del d[k]
@staticmethod
def convert_str_to_datetime(s):
try:
return datetime.datetime.strptime(s, '%Y-%m-%d %H:%M:%S.%f')
except ValueError:
# FIXME
return datetime.datetime.strptime(s, '%Y-%m-%d %H:%M:%S')
@staticmethod
def check_success(r):
return r['status'] == 'ok'
@staticmethod
def is_iterable_list(obj):
return hasattr(obj, '__iter__')
@staticmethod
def list_chunker(seq, size):
return (seq[pos:pos + size] for pos in xrange(0, len(seq), size))
class MultyvacModel(object):
def __init__(self, multyvac=None, **kwargs):
if multyvac:
self.multyvac = multyvac
else:
raise Exception('Needs multyvac object for now')
def __str__(self):
return repr(self)
| |
import time
from django.http import HttpResponseNotAllowed, HttpResponseForbidden, HttpResponse, HttpResponseBadRequest
from django.core.urlresolvers import reverse
from django.core.cache import cache
from django import get_version as django_version
from django.core.mail import send_mail, mail_admins
from django.conf import settings
from django.utils.translation import ugettext as _
from django.template import loader, TemplateDoesNotExist
from django.contrib.sites.models import Site
from decorator import decorator
from datetime import datetime, timedelta
__version__ = '0.2.3rc1'
def get_version():
return __version__
def format_error(error):
return u"Piston/%s (Django %s) crash report:\n\n%s" % \
(get_version(), django_version(), error)
class rc_factory(object):
"""
Status codes.
"""
CODES = dict(ALL_OK = ('OK', 200),
CREATED = ('Created', 201),
DELETED = ('', 204), # 204 says "Don't send a body!"
BAD_REQUEST = ('Bad Request', 400),
FORBIDDEN = ('Forbidden', 401),
NOT_FOUND = ('Not Found', 404),
DUPLICATE_ENTRY = ('Conflict/Duplicate', 409),
NOT_HERE = ('Gone', 410),
INTERNAL_ERROR = ('Internal Error', 500),
NOT_IMPLEMENTED = ('Not Implemented', 501),
THROTTLED = ('Throttled', 503))
def __getattr__(self, attr):
"""
Returns a fresh `HttpResponse` when getting
an "attribute". This is backwards compatible
with 0.2, which is important.
"""
try:
(r, c) = self.CODES.get(attr)
except TypeError:
raise AttributeError(attr)
return HttpResponse(r, content_type='text/plain', status=c)
rc = rc_factory()
class FormValidationError(Exception):
def __init__(self, form):
self.form = form
class HttpStatusCode(Exception):
def __init__(self, response):
self.response = response
def validate(v_form, operation='POST'):
@decorator
def wrap(f, self, request, *a, **kwa):
form = v_form(getattr(request, operation))
if form.is_valid():
return f(self, request, *a, **kwa)
else:
raise FormValidationError(form)
return wrap
def throttle(max_requests, timeout=60*60, extra=''):
"""
Simple throttling decorator, caches
the amount of requests made in cache.
If used on a view where users are required to
log in, the username is used, otherwise the
IP address of the originating request is used.
Parameters::
- `max_requests`: The maximum number of requests
- `timeout`: The timeout for the cache entry (default: 1 hour)
"""
@decorator
def wrap(f, self, request, *args, **kwargs):
if request.user.is_authenticated():
ident = request.user.username
else:
ident = request.META.get('REMOTE_ADDR', None)
if hasattr(request, 'throttle_extra'):
"""
Since we want to be able to throttle on a per-
application basis, it's important that we realize
that `throttle_extra` might be set on the request
object. If so, append the identifier name with it.
"""
ident += ':%s' % str(request.throttle_extra)
if ident:
"""
Preferrably we'd use incr/decr here, since they're
atomic in memcached, but it's in django-trunk so we
can't use it yet. If someone sees this after it's in
stable, you can change it here.
"""
ident += ':%s' % extra
now = time.time()
count, expiration = cache.get(ident, (1, None))
if expiration is None:
expiration = now + timeout
if count >= max_requests and expiration > now:
t = rc.THROTTLED
wait = int(expiration - now)
t.content = 'Throttled, wait %d seconds.' % wait
t['Retry-After'] = wait
return t
cache.set(ident, (count+1, expiration), (expiration - now))
return f(self, request, *args, **kwargs)
return wrap
def coerce_put_post(request):
"""
Django doesn't particularly understand REST.
In case we send data over PUT, Django won't
actually look at the data and load it. We need
to twist its arm here.
The try/except abominiation here is due to a bug
in mod_python. This should fix it.
"""
if request.method == "PUT":
try:
request.method = "POST"
request._load_post_and_files()
request.method = "PUT"
except AttributeError:
request.META['REQUEST_METHOD'] = 'POST'
request._load_post_and_files()
request.META['REQUEST_METHOD'] = 'PUT'
request.PUT = request.POST
class MimerDataException(Exception):
"""
Raised if the content_type and data don't match
"""
pass
class Mimer(object):
TYPES = dict()
def __init__(self, request):
self.request = request
def is_multipart(self):
content_type = self.content_type()
if content_type is not None:
return content_type.lstrip().startswith('multipart')
return False
def loader_for_type(self, ctype):
"""
Gets a function ref to deserialize content
for a certain mimetype.
"""
for loadee, mimes in Mimer.TYPES.iteritems():
for mime in mimes:
if ctype.startswith(mime):
return loadee
def content_type(self):
"""
Returns the content type of the request in all cases where it is
different than a submitted form - application/x-www-form-urlencoded
"""
type_formencoded = "application/x-www-form-urlencoded"
ctype = self.request.META.get('CONTENT_TYPE', type_formencoded)
if ctype.startswith(type_formencoded):
return None
return ctype
def translate(self):
"""
Will look at the `Content-type` sent by the client, and maybe
deserialize the contents into the format they sent. This will
work for JSON, YAML, XML and Pickle. Since the data is not just
key-value (and maybe just a list), the data will be placed on
`request.data` instead, and the handler will have to read from
there.
It will also set `request.content_type` so the handler has an easy
way to tell what's going on. `request.content_type` will always be
None for form-encoded and/or multipart form data (what your browser sends.)
"""
ctype = self.content_type()
self.request.content_type = ctype
if not self.is_multipart() and ctype:
loadee = self.loader_for_type(ctype)
if loadee:
try:
self.request.data = loadee(self.request.raw_post_data)
# Reset both POST and PUT from request, as its
# misleading having their presence around.
self.request.POST = self.request.PUT = dict()
except (TypeError, ValueError):
# This also catches if loadee is None.
raise MimerDataException
else:
self.request.data = None
return self.request
@classmethod
def register(cls, loadee, types):
cls.TYPES[loadee] = types
@classmethod
def unregister(cls, loadee):
return cls.TYPES.pop(loadee)
def translate_mime(request):
request = Mimer(request).translate()
def require_mime(*mimes):
"""
Decorator requiring a certain mimetype. There's a nifty
helper called `require_extended` below which requires everything
we support except for post-data via form.
"""
@decorator
def wrap(f, self, request, *args, **kwargs):
m = Mimer(request)
realmimes = set()
rewrite = { 'json': 'application/json',
'yaml': 'application/x-yaml',
'xml': 'text/xml',
'pickle': 'application/python-pickle' }
for idx, mime in enumerate(mimes):
realmimes.add(rewrite.get(mime, mime))
if not m.content_type() in realmimes:
return rc.BAD_REQUEST
return f(self, request, *args, **kwargs)
return wrap
require_extended = require_mime('json', 'yaml', 'xml', 'pickle')
def send_consumer_mail(consumer):
"""
Send a consumer an email depending on what their status is.
"""
try:
subject = settings.PISTON_OAUTH_EMAIL_SUBJECTS[consumer.status]
except AttributeError:
subject = "Your API Consumer for %s " % Site.objects.get_current().name
if consumer.status == "accepted":
subject += "was accepted!"
elif consumer.status == "canceled":
subject += "has been canceled."
elif consumer.status == "rejected":
subject += "has been rejected."
else:
subject += "is awaiting approval."
template = "piston/mails/consumer_%s.txt" % consumer.status
try:
body = loader.render_to_string(template,
{ 'consumer' : consumer, 'user' : consumer.user })
except TemplateDoesNotExist:
"""
They haven't set up the templates, which means they might not want
these emails sent.
"""
return
try:
sender = settings.PISTON_FROM_EMAIL
except AttributeError:
sender = settings.DEFAULT_FROM_EMAIL
if consumer.user:
send_mail(_(subject), body, sender, [consumer.user.email], fail_silently=True)
if consumer.status == 'pending' and len(settings.ADMINS):
mail_admins(_(subject), body, fail_silently=True)
if settings.DEBUG and consumer.user:
print "Mail being sent, to=%s" % consumer.user.email
print "Subject: %s" % _(subject)
print body
| |
#! /usr/bin/env python
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import logging
from errno import EEXIST
from itertools import islice
from operator import itemgetter
from os import mkdir
from os.path import basename, abspath, dirname, exists, join as pathjoin
from sys import argv as sys_argv, exit, stderr, stdout
from textwrap import wrap
from time import time
from datetime import timedelta
import optparse
import math
from six.moves import zip as izip
from six.moves import input
from swift.common import exceptions
from swift.common.ring import RingBuilder, Ring, RingData
from swift.common.ring.builder import MAX_BALANCE
from swift.common.ring.utils import validate_args, \
validate_and_normalize_ip, build_dev_from_opts, \
parse_builder_ring_filename_args, parse_search_value, \
parse_search_values_from_opts, parse_change_values_from_opts, \
dispersion_report, parse_add_value
from swift.common.utils import lock_parent_directory
MAJOR_VERSION = 1
MINOR_VERSION = 3
EXIT_SUCCESS = 0
EXIT_WARNING = 1
EXIT_ERROR = 2
global argv, backup_dir, builder, builder_file, ring_file
argv = backup_dir = builder = builder_file = ring_file = None
def format_device(dev):
"""
Format a device for display.
"""
copy_dev = dev.copy()
for key in ('ip', 'replication_ip'):
if ':' in copy_dev[key]:
copy_dev[key] = '[' + copy_dev[key] + ']'
return ('d%(id)sr%(region)sz%(zone)s-%(ip)s:%(port)sR'
'%(replication_ip)s:%(replication_port)s/%(device)s_'
'"%(meta)s"' % copy_dev)
def _parse_search_values(argvish):
new_cmd_format, opts, args = validate_args(argvish)
# We'll either parse the all-in-one-string format or the
# --options format,
# but not both. If both are specified, raise an error.
try:
search_values = {}
if len(args) > 0:
if new_cmd_format or len(args) != 1:
print(Commands.search.__doc__.strip())
exit(EXIT_ERROR)
search_values = parse_search_value(args[0])
else:
search_values = parse_search_values_from_opts(opts)
return search_values
except ValueError as e:
print(e)
exit(EXIT_ERROR)
def _find_parts(devs):
devs = [d['id'] for d in devs]
if not devs or not builder._replica2part2dev:
return None
partition_count = {}
for replica in builder._replica2part2dev:
for partition, device in enumerate(replica):
if device in devs:
if partition not in partition_count:
partition_count[partition] = 0
partition_count[partition] += 1
# Sort by number of found replicas to keep the output format
sorted_partition_count = sorted(
partition_count.items(), key=itemgetter(1), reverse=True)
return sorted_partition_count
def _parse_list_parts_values(argvish):
new_cmd_format, opts, args = validate_args(argvish)
# We'll either parse the all-in-one-string format or the
# --options format,
# but not both. If both are specified, raise an error.
try:
devs = []
if len(args) > 0:
if new_cmd_format:
print(Commands.list_parts.__doc__.strip())
exit(EXIT_ERROR)
for arg in args:
devs.extend(
builder.search_devs(parse_search_value(arg)) or [])
else:
devs.extend(builder.search_devs(
parse_search_values_from_opts(opts)) or [])
return devs
except ValueError as e:
print(e)
exit(EXIT_ERROR)
def _parse_add_values(argvish):
"""
Parse devices to add as specified on the command line.
Will exit on error and spew warnings.
:returns: array of device dicts
"""
new_cmd_format, opts, args = validate_args(argvish)
# We'll either parse the all-in-one-string format or the
# --options format,
# but not both. If both are specified, raise an error.
parsed_devs = []
if len(args) > 0:
if new_cmd_format or len(args) % 2 != 0:
print(Commands.add.__doc__.strip())
exit(EXIT_ERROR)
devs_and_weights = izip(islice(args, 0, len(args), 2),
islice(args, 1, len(args), 2))
for devstr, weightstr in devs_and_weights:
dev_dict = parse_add_value(devstr)
if dev_dict['region'] is None:
stderr.write('WARNING: No region specified for %s. '
'Defaulting to region 1.\n' % devstr)
dev_dict['region'] = 1
if dev_dict['replication_ip'] is None:
dev_dict['replication_ip'] = dev_dict['ip']
if dev_dict['replication_port'] is None:
dev_dict['replication_port'] = dev_dict['port']
weight = float(weightstr)
if weight < 0:
raise ValueError('Invalid weight value: %s' % devstr)
dev_dict['weight'] = weight
parsed_devs.append(dev_dict)
else:
parsed_devs.append(build_dev_from_opts(opts))
return parsed_devs
def _set_weight_values(devs, weight):
if not devs:
print('Search value matched 0 devices.\n'
'The on-disk ring builder is unchanged.')
exit(EXIT_ERROR)
if len(devs) > 1:
print('Matched more than one device:')
for dev in devs:
print(' %s' % format_device(dev))
if input('Are you sure you want to update the weight for '
'these %s devices? (y/N) ' % len(devs)) != 'y':
print('Aborting device modifications')
exit(EXIT_ERROR)
for dev in devs:
builder.set_dev_weight(dev['id'], weight)
print('%s weight set to %s' % (format_device(dev),
dev['weight']))
def _parse_set_weight_values(argvish):
new_cmd_format, opts, args = validate_args(argvish)
# We'll either parse the all-in-one-string format or the
# --options format,
# but not both. If both are specified, raise an error.
try:
devs = []
if not new_cmd_format:
if len(args) % 2 != 0:
print(Commands.set_weight.__doc__.strip())
exit(EXIT_ERROR)
devs_and_weights = izip(islice(argvish, 0, len(argvish), 2),
islice(argvish, 1, len(argvish), 2))
for devstr, weightstr in devs_and_weights:
devs.extend(builder.search_devs(
parse_search_value(devstr)) or [])
weight = float(weightstr)
_set_weight_values(devs, weight)
else:
if len(args) != 1:
print(Commands.set_weight.__doc__.strip())
exit(EXIT_ERROR)
devs.extend(builder.search_devs(
parse_search_values_from_opts(opts)) or [])
weight = float(args[0])
_set_weight_values(devs, weight)
except ValueError as e:
print(e)
exit(EXIT_ERROR)
def _set_info_values(devs, change):
if not devs:
print("Search value matched 0 devices.\n"
"The on-disk ring builder is unchanged.")
exit(EXIT_ERROR)
if len(devs) > 1:
print('Matched more than one device:')
for dev in devs:
print(' %s' % format_device(dev))
if input('Are you sure you want to update the info for '
'these %s devices? (y/N) ' % len(devs)) != 'y':
print('Aborting device modifications')
exit(EXIT_ERROR)
for dev in devs:
orig_dev_string = format_device(dev)
test_dev = dict(dev)
for key in change:
test_dev[key] = change[key]
for check_dev in builder.devs:
if not check_dev or check_dev['id'] == test_dev['id']:
continue
if check_dev['ip'] == test_dev['ip'] and \
check_dev['port'] == test_dev['port'] and \
check_dev['device'] == test_dev['device']:
print('Device %d already uses %s:%d/%s.' %
(check_dev['id'], check_dev['ip'],
check_dev['port'], check_dev['device']))
exit(EXIT_ERROR)
for key in change:
dev[key] = change[key]
print('Device %s is now %s' % (orig_dev_string,
format_device(dev)))
def _parse_set_info_values(argvish):
new_cmd_format, opts, args = validate_args(argvish)
# We'll either parse the all-in-one-string format or the
# --options format,
# but not both. If both are specified, raise an error.
if not new_cmd_format:
if len(args) % 2 != 0:
print(Commands.search.__doc__.strip())
exit(EXIT_ERROR)
searches_and_changes = izip(islice(argvish, 0, len(argvish), 2),
islice(argvish, 1, len(argvish), 2))
for search_value, change_value in searches_and_changes:
devs = builder.search_devs(parse_search_value(search_value))
change = {}
ip = ''
if change_value and change_value[0].isdigit():
i = 1
while (i < len(change_value) and
change_value[i] in '0123456789.'):
i += 1
ip = change_value[:i]
change_value = change_value[i:]
elif change_value and change_value.startswith('['):
i = 1
while i < len(change_value) and change_value[i] != ']':
i += 1
i += 1
ip = change_value[:i].lstrip('[').rstrip(']')
change_value = change_value[i:]
if ip:
change['ip'] = validate_and_normalize_ip(ip)
if change_value.startswith(':'):
i = 1
while i < len(change_value) and change_value[i].isdigit():
i += 1
change['port'] = int(change_value[1:i])
change_value = change_value[i:]
if change_value.startswith('R'):
change_value = change_value[1:]
replication_ip = ''
if change_value and change_value[0].isdigit():
i = 1
while (i < len(change_value) and
change_value[i] in '0123456789.'):
i += 1
replication_ip = change_value[:i]
change_value = change_value[i:]
elif change_value and change_value.startswith('['):
i = 1
while i < len(change_value) and change_value[i] != ']':
i += 1
i += 1
replication_ip = \
change_value[:i].lstrip('[').rstrip(']')
change_value = change_value[i:]
if replication_ip:
change['replication_ip'] = \
validate_and_normalize_ip(replication_ip)
if change_value.startswith(':'):
i = 1
while i < len(change_value) and change_value[i].isdigit():
i += 1
change['replication_port'] = int(change_value[1:i])
change_value = change_value[i:]
if change_value.startswith('/'):
i = 1
while i < len(change_value) and change_value[i] != '_':
i += 1
change['device'] = change_value[1:i]
change_value = change_value[i:]
if change_value.startswith('_'):
change['meta'] = change_value[1:]
change_value = ''
if change_value or not change:
raise ValueError('Invalid set info change value: %s' %
repr(argvish[1]))
_set_info_values(devs, change)
else:
devs = builder.search_devs(parse_search_values_from_opts(opts))
change = parse_change_values_from_opts(opts)
_set_info_values(devs, change)
def _parse_remove_values(argvish):
new_cmd_format, opts, args = validate_args(argvish)
# We'll either parse the all-in-one-string format or the
# --options format,
# but not both. If both are specified, raise an error.
try:
devs = []
if len(args) > 0:
if new_cmd_format:
print(Commands.remove.__doc__.strip())
exit(EXIT_ERROR)
for arg in args:
devs.extend(builder.search_devs(
parse_search_value(arg)) or [])
else:
devs.extend(builder.search_devs(
parse_search_values_from_opts(opts)))
return devs
except ValueError as e:
print(e)
exit(EXIT_ERROR)
class Commands(object):
def unknown():
print('Unknown command: %s' % argv[2])
exit(EXIT_ERROR)
def create():
"""
swift-ring-builder <builder_file> create <part_power> <replicas>
<min_part_hours>
Creates <builder_file> with 2^<part_power> partitions and <replicas>.
<min_part_hours> is number of hours to restrict moving a partition more
than once.
"""
if len(argv) < 6:
print(Commands.create.__doc__.strip())
exit(EXIT_ERROR)
builder = RingBuilder(int(argv[3]), float(argv[4]), int(argv[5]))
backup_dir = pathjoin(dirname(builder_file), 'backups')
try:
mkdir(backup_dir)
except OSError as err:
if err.errno != EEXIST:
raise
builder.save(pathjoin(backup_dir,
'%d.' % time() + basename(builder_file)))
builder.save(builder_file)
exit(EXIT_SUCCESS)
def default():
"""
swift-ring-builder <builder_file>
Shows information about the ring and the devices within.
Flags:
DEL - marked for removal and will be removed next rebalance.
"""
print('%s, build version %d' % (builder_file, builder.version))
regions = 0
zones = 0
balance = 0
dev_count = 0
if builder.devs:
regions = len(set(d['region'] for d in builder.devs
if d is not None))
zones = len(set((d['region'], d['zone']) for d in builder.devs
if d is not None))
dev_count = len([dev for dev in builder.devs
if dev is not None])
balance = builder.get_balance()
dispersion_trailer = '' if builder.dispersion is None else (
', %.02f dispersion' % (builder.dispersion))
print('%d partitions, %.6f replicas, %d regions, %d zones, '
'%d devices, %.02f balance%s' % (
builder.parts, builder.replicas, regions, zones, dev_count,
balance, dispersion_trailer))
print('The minimum number of hours before a partition can be '
'reassigned is %s (%s remaining)' % (
builder.min_part_hours,
timedelta(seconds=builder.min_part_seconds_left)))
print('The overload factor is %0.2f%% (%.6f)' % (
builder.overload * 100, builder.overload))
# compare ring file against builder file
if not exists(ring_file):
print('Ring file %s not found, '
'probably it hasn\'t been written yet' % ring_file)
else:
builder_dict = builder.get_ring().to_dict()
try:
ring_dict = RingData.load(ring_file).to_dict()
except Exception as exc:
print('Ring file %s is invalid: %r' % (ring_file, exc))
else:
if builder_dict == ring_dict:
print('Ring file %s is up-to-date' % ring_file)
else:
print('Ring file %s is obsolete' % ring_file)
if builder.devs:
balance_per_dev = builder._build_balance_per_dev()
print('Devices: id region zone ip address port '
'replication ip replication port name '
'weight partitions balance flags meta')
for dev in builder._iter_devs():
flags = 'DEL' if dev in builder._remove_devs else ''
print(' %5d %7d %5d %15s %5d %15s %17d %9s %6.02f '
'%10s %7.02f %5s %s' %
(dev['id'], dev['region'], dev['zone'], dev['ip'],
dev['port'], dev['replication_ip'],
dev['replication_port'], dev['device'], dev['weight'],
dev['parts'], balance_per_dev[dev['id']], flags,
dev['meta']))
exit(EXIT_SUCCESS)
def search():
"""
swift-ring-builder <builder_file> search <search-value>
or
swift-ring-builder <builder_file> search
--region <region> --zone <zone> --ip <ip or hostname> --port <port>
--replication-ip <r_ip or r_hostname> --replication-port <r_port>
--device <device_name> --meta <meta> --weight <weight>
Where <r_ip>, <r_hostname> and <r_port> are replication ip, hostname
and port.
Any of the options are optional in both cases.
Shows information about matching devices.
"""
if len(argv) < 4:
print(Commands.search.__doc__.strip())
print()
print(parse_search_value.__doc__.strip())
exit(EXIT_ERROR)
devs = builder.search_devs(_parse_search_values(argv[3:]))
if not devs:
print('No matching devices found')
exit(EXIT_ERROR)
print('Devices: id region zone ip address port '
'replication ip replication port name weight partitions '
'balance meta')
weighted_parts = builder.parts * builder.replicas / \
sum(d['weight'] for d in builder.devs if d is not None)
for dev in devs:
if not dev['weight']:
if dev['parts']:
balance = MAX_BALANCE
else:
balance = 0
else:
balance = 100.0 * dev['parts'] / \
(dev['weight'] * weighted_parts) - 100.0
print(' %5d %7d %5d %15s %5d %15s %17d %9s %6.02f %10s '
'%7.02f %s' %
(dev['id'], dev['region'], dev['zone'], dev['ip'],
dev['port'], dev['replication_ip'], dev['replication_port'],
dev['device'], dev['weight'], dev['parts'], balance,
dev['meta']))
exit(EXIT_SUCCESS)
def list_parts():
"""
swift-ring-builder <builder_file> list_parts <search-value> [<search-value>] ..
or
swift-ring-builder <builder_file> list_parts
--region <region> --zone <zone> --ip <ip or hostname> --port <port>
--replication-ip <r_ip or r_hostname> --replication-port <r_port>
--device <device_name> --meta <meta> --weight <weight>
Where <r_ip>, <r_hostname> and <r_port> are replication ip, hostname
and port.
Any of the options are optional in both cases.
Returns a 2 column list of all the partitions that are assigned to any of
the devices matching the search values given. The first column is the
assigned partition number and the second column is the number of device
matches for that partition. The list is ordered from most number of matches
to least. If there are a lot of devices to match against, this command
could take a while to run.
"""
if len(argv) < 4:
print(Commands.list_parts.__doc__.strip())
print()
print(parse_search_value.__doc__.strip())
exit(EXIT_ERROR)
if not builder._replica2part2dev:
print('Specified builder file \"%s\" is not rebalanced yet. '
'Please rebalance first.' % builder_file)
exit(EXIT_ERROR)
devs = _parse_list_parts_values(argv[3:])
if not devs:
print('No matching devices found')
exit(EXIT_ERROR)
sorted_partition_count = _find_parts(devs)
if not sorted_partition_count:
print('No matching devices found')
exit(EXIT_ERROR)
print('Partition Matches')
for partition, count in sorted_partition_count:
print('%9d %7d' % (partition, count))
exit(EXIT_SUCCESS)
def add():
"""
swift-ring-builder <builder_file> add
[r<region>]z<zone>-<ip>:<port>[R<r_ip>:<r_port>]/<device_name>_<meta>
<weight>
[[r<region>]z<zone>-<ip>:<port>[R<r_ip>:<r_port>]/<device_name>_<meta>
<weight>] ...
Where <r_ip> and <r_port> are replication ip and port.
or
swift-ring-builder <builder_file> add
--region <region> --zone <zone> --ip <ip or hostname> --port <port>
[--replication-ip <r_ip or r_hostname>] [--replication-port <r_port>]
--device <device_name> --weight <weight>
[--meta <meta>]
Adds devices to the ring with the given information. No partitions will be
assigned to the new device until after running 'rebalance'. This is so you
can make multiple device changes and rebalance them all just once.
"""
if len(argv) < 5:
print(Commands.add.__doc__.strip())
exit(EXIT_ERROR)
try:
for new_dev in _parse_add_values(argv[3:]):
for dev in builder.devs:
if dev is None:
continue
if dev['ip'] == new_dev['ip'] and \
dev['port'] == new_dev['port'] and \
dev['device'] == new_dev['device']:
print('Device %d already uses %s:%d/%s.' %
(dev['id'], dev['ip'],
dev['port'], dev['device']))
print("The on-disk ring builder is unchanged.\n")
exit(EXIT_ERROR)
dev_id = builder.add_dev(new_dev)
print('Device %s with %s weight got id %s' %
(format_device(new_dev), new_dev['weight'], dev_id))
except ValueError as err:
print(err)
print('The on-disk ring builder is unchanged.')
exit(EXIT_ERROR)
builder.save(builder_file)
exit(EXIT_SUCCESS)
def set_weight():
"""
swift-ring-builder <builder_file> set_weight <search-value> <weight>
[<search-value> <weight] ...
or
swift-ring-builder <builder_file> set_weight
--region <region> --zone <zone> --ip <ip or hostname> --port <port>
--replication-ip <r_ip or r_hostname> --replication-port <r_port>
--device <device_name> --meta <meta> --weight <weight>
Where <r_ip>, <r_hostname> and <r_port> are replication ip, hostname
and port.
Any of the options are optional in both cases.
Resets the devices' weights. No partitions will be reassigned to or from
the device until after running 'rebalance'. This is so you can make
multiple device changes and rebalance them all just once.
"""
# if len(argv) < 5 or len(argv) % 2 != 1:
if len(argv) < 5:
print(Commands.set_weight.__doc__.strip())
print()
print(parse_search_value.__doc__.strip())
exit(EXIT_ERROR)
_parse_set_weight_values(argv[3:])
builder.save(builder_file)
exit(EXIT_SUCCESS)
def set_info():
"""
swift-ring-builder <builder_file> set_info
<search-value> <ip>:<port>[R<r_ip>:<r_port>]/<device_name>_<meta>
[<search-value> <ip>:<port>[R<r_ip>:<r_port>]/<device_name>_<meta>] ...
or
swift-ring-builder <builder_file> set_info
--ip <ip or hostname> --port <port>
--replication-ip <r_ip or r_hostname> --replication-port <r_port>
--device <device_name> --meta <meta>
--change-ip <ip or hostname> --change-port <port>
--change-replication-ip <r_ip or r_hostname>
--change-replication-port <r_port>
--change-device <device_name>
--change-meta <meta>
Where <r_ip>, <r_hostname> and <r_port> are replication ip, hostname
and port.
Any of the options are optional in both cases.
For each search-value, resets the matched device's information.
This information isn't used to assign partitions, so you can use
'write_ring' afterward to rewrite the current ring with the newer
device information. Any of the parts are optional in the final
<ip>:<port>/<device_name>_<meta> parameter; just give what you
want to change. For instance set_info d74 _"snet: 5.6.7.8" would
just update the meta data for device id 74.
"""
if len(argv) < 5:
print(Commands.set_info.__doc__.strip())
print()
print(parse_search_value.__doc__.strip())
exit(EXIT_ERROR)
try:
_parse_set_info_values(argv[3:])
except ValueError as err:
print(err)
exit(EXIT_ERROR)
builder.save(builder_file)
exit(EXIT_SUCCESS)
def remove():
"""
swift-ring-builder <builder_file> remove <search-value> [search-value ...]
or
swift-ring-builder <builder_file> search
--region <region> --zone <zone> --ip <ip or hostname> --port <port>
--replication-ip <r_ip or r_hostname> --replication-port <r_port>
--device <device_name> --meta <meta> --weight <weight>
Where <r_ip>, <r_hostname> and <r_port> are replication ip, hostname
and port.
Any of the options are optional in both cases.
Removes the device(s) from the ring. This should normally just be used for
a device that has failed. For a device you wish to decommission, it's best
to set its weight to 0, wait for it to drain all its data, then use this
remove command. This will not take effect until after running 'rebalance'.
This is so you can make multiple device changes and rebalance them all just
once.
"""
if len(argv) < 4:
print(Commands.remove.__doc__.strip())
print()
print(parse_search_value.__doc__.strip())
exit(EXIT_ERROR)
devs = _parse_remove_values(argv[3:])
if not devs:
print('Search value matched 0 devices.\n'
'The on-disk ring builder is unchanged.')
exit(EXIT_ERROR)
if len(devs) > 1:
print('Matched more than one device:')
for dev in devs:
print(' %s' % format_device(dev))
if input('Are you sure you want to remove these %s '
'devices? (y/N) ' % len(devs)) != 'y':
print('Aborting device removals')
exit(EXIT_ERROR)
for dev in devs:
try:
builder.remove_dev(dev['id'])
except exceptions.RingBuilderError as e:
print('-' * 79)
print(
'An error occurred while removing device with id %d\n'
'This usually means that you attempted to remove\n'
'the last device in a ring. If this is the case,\n'
'consider creating a new ring instead.\n'
'The on-disk ring builder is unchanged.\n'
'Original exception message: %s' %
(dev['id'], e))
print('-' * 79)
exit(EXIT_ERROR)
print('%s marked for removal and will '
'be removed next rebalance.' % format_device(dev))
builder.save(builder_file)
exit(EXIT_SUCCESS)
def rebalance():
"""
swift-ring-builder <builder_file> rebalance [options]
Attempts to rebalance the ring by reassigning partitions that haven't been
recently reassigned.
"""
usage = Commands.rebalance.__doc__.strip()
parser = optparse.OptionParser(usage)
parser.add_option('-f', '--force', action='store_true',
help='Force a rebalanced ring to save even '
'if < 1% of parts changed')
parser.add_option('-s', '--seed', help="seed to use for rebalance")
parser.add_option('-d', '--debug', action='store_true',
help="print debug information")
options, args = parser.parse_args(argv)
def get_seed(index):
if options.seed:
return options.seed
try:
return args[index]
except IndexError:
pass
if options.debug:
logger = logging.getLogger("swift.ring.builder")
logger.disabled = False
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler(stdout)
formatter = logging.Formatter("%(levelname)s: %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
if builder.min_part_seconds_left > 0 and not options.force:
print('No partitions could be reassigned.')
print('The time between rebalances must be at least '
'min_part_hours: %s hours (%s remaining)' % (
builder.min_part_hours,
timedelta(seconds=builder.min_part_seconds_left)))
exit(EXIT_WARNING)
devs_changed = builder.devs_changed
try:
last_balance = builder.get_balance()
parts, balance, removed_devs = builder.rebalance(seed=get_seed(3))
except exceptions.RingBuilderError as e:
print('-' * 79)
print("An error has occurred during ring validation. Common\n"
"causes of failure are rings that are empty or do not\n"
"have enough devices to accommodate the replica count.\n"
"Original exception message:\n %s" %
(e,))
print('-' * 79)
exit(EXIT_ERROR)
if not (parts or options.force or removed_devs):
print('No partitions could be reassigned.')
print('There is no need to do so at this time')
exit(EXIT_WARNING)
# If we set device's weight to zero, currently balance will be set
# special value(MAX_BALANCE) until zero weighted device return all
# its partitions. So we cannot check balance has changed.
# Thus we need to check balance or last_balance is special value.
if not options.force and \
not devs_changed and abs(last_balance - balance) < 1 and \
not (last_balance == MAX_BALANCE and balance == MAX_BALANCE):
print('Cowardly refusing to save rebalance as it did not change '
'at least 1%.')
exit(EXIT_WARNING)
try:
builder.validate()
except exceptions.RingValidationError as e:
print('-' * 79)
print("An error has occurred during ring validation. Common\n"
"causes of failure are rings that are empty or do not\n"
"have enough devices to accommodate the replica count.\n"
"Original exception message:\n %s" %
(e,))
print('-' * 79)
exit(EXIT_ERROR)
print('Reassigned %d (%.02f%%) partitions. '
'Balance is now %.02f. '
'Dispersion is now %.02f' % (
parts, 100.0 * parts / builder.parts,
balance,
builder.dispersion))
status = EXIT_SUCCESS
if builder.dispersion > 0:
print('-' * 79)
print(
'NOTE: Dispersion of %.06f indicates some parts are not\n'
' optimally dispersed.\n\n'
' You may want to adjust some device weights, increase\n'
' the overload or review the dispersion report.' %
builder.dispersion)
status = EXIT_WARNING
print('-' * 79)
elif balance > 5 and balance / 100.0 > builder.overload:
print('-' * 79)
print('NOTE: Balance of %.02f indicates you should push this ' %
balance)
print(' ring, wait at least %d hours, and rebalance/repush.'
% builder.min_part_hours)
print('-' * 79)
status = EXIT_WARNING
ts = time()
builder.get_ring().save(
pathjoin(backup_dir, '%d.' % ts + basename(ring_file)))
builder.save(pathjoin(backup_dir, '%d.' % ts + basename(builder_file)))
builder.get_ring().save(ring_file)
builder.save(builder_file)
exit(status)
def dispersion():
"""
swift-ring-builder <builder_file> dispersion <search_filter> [options]
Output report on dispersion.
--verbose option will display dispersion graph broken down by tier
You can filter which tiers are evaluated to drill down using a regex
in the optional search_filter arguemnt. i.e.
swift-ring-builder <builder_file> dispersion "r\d+z\d+$" -v
... would only display rows for the zone tiers
swift-ring-builder <builder_file> dispersion ".*\-[^/]*$" -v
... would only display rows for the server tiers
The reports columns are:
Tier : the name of the tier
parts : the total number of partitions with assignment in the tier
% : the percentage of parts in the tier with replicas over assigned
max : maximum replicas a part should have assigned at the tier
0 - N : the number of parts with that many replicas assigned
e.g.
Tier: parts % max 0 1 2 3
r1z1 1022 79.45 1 2 210 784 28
r1z1 has 1022 total parts assigned, 79% of them have more than the
recommend max replica count of 1 assigned. Only 2 parts in the ring
are *not* assigned in this tier (0 replica count), 210 parts have
the recommend replica count of 1, 784 have 2 replicas, and 28 sadly
have all three replicas in this tier.
"""
status = EXIT_SUCCESS
if not builder._replica2part2dev:
print('Specified builder file \"%s\" is not rebalanced yet. '
'Please rebalance first.' % builder_file)
exit(EXIT_ERROR)
usage = Commands.dispersion.__doc__.strip()
parser = optparse.OptionParser(usage)
parser.add_option('-v', '--verbose', action='store_true',
help='Display dispersion report for tiers')
options, args = parser.parse_args(argv)
if args[3:]:
search_filter = args[3]
else:
search_filter = None
report = dispersion_report(builder, search_filter=search_filter,
verbose=options.verbose)
print('Dispersion is %.06f, Balance is %.06f, Overload is %0.2f%%' % (
builder.dispersion, builder.get_balance(), builder.overload * 100))
print('Required overload is %.6f%%' % (
builder.get_required_overload() * 100))
if report['worst_tier']:
status = EXIT_WARNING
print('Worst tier is %.06f (%s)' % (report['max_dispersion'],
report['worst_tier']))
if report['graph']:
replica_range = range(int(math.ceil(builder.replicas + 1)))
part_count_width = '%%%ds' % max(len(str(builder.parts)), 5)
replica_counts_tmpl = ' '.join(part_count_width for i in
replica_range)
tiers = (tier for tier, _junk in report['graph'])
tier_width = max(max(map(len, tiers)), 30)
header_line = ('%-' + str(tier_width) +
's ' + part_count_width +
' %6s %6s ' + replica_counts_tmpl) % tuple(
['Tier', 'Parts', '%', 'Max'] + replica_range)
underline = '-' * len(header_line)
print(underline)
print(header_line)
print(underline)
for tier_name, dispersion in report['graph']:
replica_counts_repr = replica_counts_tmpl % tuple(
dispersion['replicas'])
template = ''.join([
'%-', str(tier_width), 's ',
part_count_width,
' %6.02f %6d %s',
])
args = (
tier_name,
dispersion['placed_parts'],
dispersion['dispersion'],
dispersion['max_replicas'],
replica_counts_repr,
)
print(template % args)
exit(status)
def validate():
"""
swift-ring-builder <builder_file> validate
Just runs the validation routines on the ring.
"""
builder.validate()
exit(EXIT_SUCCESS)
def write_ring():
"""
swift-ring-builder <builder_file> write_ring
Just rewrites the distributable ring file. This is done automatically after
a successful rebalance, so really this is only useful after one or more
'set_info' calls when no rebalance is needed but you want to send out the
new device information.
"""
ring_data = builder.get_ring()
if not ring_data._replica2part2dev_id:
if ring_data.devs:
print('Warning: Writing a ring with no partition '
'assignments but with devices; did you forget to run '
'"rebalance"?')
else:
print('Warning: Writing an empty ring')
ring_data.save(
pathjoin(backup_dir, '%d.' % time() + basename(ring_file)))
ring_data.save(ring_file)
exit(EXIT_SUCCESS)
def write_builder():
"""
swift-ring-builder <ring_file> write_builder [min_part_hours]
Recreate a builder from a ring file (lossy) if you lost your builder
backups. (Protip: don't lose your builder backups).
[min_part_hours] is one of those numbers lost to the builder,
you can change it with set_min_part_hours.
"""
if exists(builder_file):
print('Cowardly refusing to overwrite existing '
'Ring Builder file: %s' % builder_file)
exit(EXIT_ERROR)
if len(argv) > 3:
min_part_hours = int(argv[3])
else:
stderr.write("WARNING: default min_part_hours may not match "
"the value in the lost builder.\n")
min_part_hours = 24
ring = Ring(ring_file)
for dev in ring.devs:
if dev is None:
continue
dev.update({
'parts': 0,
'parts_wanted': 0,
})
builder_dict = {
'part_power': 32 - ring._part_shift,
'replicas': float(ring.replica_count),
'min_part_hours': min_part_hours,
'parts': ring.partition_count,
'devs': ring.devs,
'devs_changed': False,
'version': 0,
'_replica2part2dev': ring._replica2part2dev_id,
'_last_part_moves_epoch': None,
'_last_part_moves': None,
'_last_part_gather_start': 0,
'_remove_devs': [],
}
builder = RingBuilder.from_dict(builder_dict)
for parts in builder._replica2part2dev:
for dev_id in parts:
builder.devs[dev_id]['parts'] += 1
builder.save(builder_file)
def pretend_min_part_hours_passed():
"""
swift-ring-builder <builder_file> pretend_min_part_hours_passed
Resets the clock on the last time a rebalance happened, thus
circumventing the min_part_hours check.
*****************************
USE THIS WITH EXTREME CAUTION
*****************************
If you run this command and deploy rebalanced rings before a replication
pass completes, you may introduce unavailability in your cluster. This
has an end-user impact.
"""
builder.pretend_min_part_hours_passed()
builder.save(builder_file)
exit(EXIT_SUCCESS)
def set_min_part_hours():
"""
swift-ring-builder <builder_file> set_min_part_hours <hours>
Changes the <min_part_hours> to the given <hours>. This should be set to
however long a full replication/update cycle takes. We're working on a way
to determine this more easily than scanning logs.
"""
if len(argv) < 4:
print(Commands.set_min_part_hours.__doc__.strip())
exit(EXIT_ERROR)
builder.change_min_part_hours(int(argv[3]))
print('The minimum number of hours before a partition can be '
'reassigned is now set to %s' % argv[3])
builder.save(builder_file)
exit(EXIT_SUCCESS)
def set_replicas():
"""
swift-ring-builder <builder_file> set_replicas <replicas>
Changes the replica count to the given <replicas>. <replicas> may
be a floating-point value, in which case some partitions will have
floor(<replicas>) replicas and some will have ceiling(<replicas>)
in the correct proportions.
A rebalance is needed to make the change take effect.
"""
if len(argv) < 4:
print(Commands.set_replicas.__doc__.strip())
exit(EXIT_ERROR)
new_replicas = argv[3]
try:
new_replicas = float(new_replicas)
except ValueError:
print(Commands.set_replicas.__doc__.strip())
print("\"%s\" is not a valid number." % new_replicas)
exit(EXIT_ERROR)
if new_replicas < 1:
print("Replica count must be at least 1.")
exit(EXIT_ERROR)
builder.set_replicas(new_replicas)
print('The replica count is now %.6f.' % builder.replicas)
print('The change will take effect after the next rebalance.')
builder.save(builder_file)
exit(EXIT_SUCCESS)
def set_overload():
"""
swift-ring-builder <builder_file> set_overload <overload>[%]
Changes the overload factor to the given <overload>.
A rebalance is needed to make the change take effect.
"""
if len(argv) < 4:
print(Commands.set_overload.__doc__.strip())
exit(EXIT_ERROR)
new_overload = argv[3]
if new_overload.endswith('%'):
percent = True
new_overload = new_overload.rstrip('%')
else:
percent = False
try:
new_overload = float(new_overload)
except ValueError:
print(Commands.set_overload.__doc__.strip())
print("%r is not a valid number." % new_overload)
exit(EXIT_ERROR)
if percent:
new_overload *= 0.01
if new_overload < 0:
print("Overload must be non-negative.")
exit(EXIT_ERROR)
if new_overload > 1 and not percent:
print("!?! Warning overload is greater than 100% !?!")
status = EXIT_WARNING
else:
status = EXIT_SUCCESS
builder.set_overload(new_overload)
print('The overload factor is now %0.2f%% (%.6f)' % (
builder.overload * 100, builder.overload))
print('The change will take effect after the next rebalance.')
builder.save(builder_file)
exit(status)
def main(arguments=None):
global argv, backup_dir, builder, builder_file, ring_file
if arguments:
argv = arguments
else:
argv = sys_argv
if len(argv) < 2:
print("swift-ring-builder %(MAJOR_VERSION)s.%(MINOR_VERSION)s\n" %
globals())
print(Commands.default.__doc__.strip())
print()
cmds = [c for c, f in Commands.__dict__.items()
if f.__doc__ and not c.startswith('_') and c != 'default']
cmds.sort()
for cmd in cmds:
print(Commands.__dict__[cmd].__doc__.strip())
print()
print(parse_search_value.__doc__.strip())
print()
for line in wrap(' '.join(cmds), 79, initial_indent='Quick list: ',
subsequent_indent=' '):
print(line)
print('Exit codes: 0 = operation successful\n'
' 1 = operation completed with warnings\n'
' 2 = error')
exit(EXIT_SUCCESS)
builder_file, ring_file = parse_builder_ring_filename_args(argv)
if builder_file != argv[1]:
print('Note: using %s instead of %s as builder file' % (
builder_file, argv[1]))
try:
builder = RingBuilder.load(builder_file)
except exceptions.UnPicklingError as e:
print(e)
exit(EXIT_ERROR)
except (exceptions.FileNotFoundError, exceptions.PermissionError) as e:
if len(argv) < 3 or argv[2] not in('create', 'write_builder'):
print(e)
exit(EXIT_ERROR)
except Exception as e:
print('Problem occurred while reading builder file: %s. %s' %
(builder_file, e))
exit(EXIT_ERROR)
backup_dir = pathjoin(dirname(builder_file), 'backups')
try:
mkdir(backup_dir)
except OSError as err:
if err.errno != EEXIST:
raise
if len(argv) == 2:
command = "default"
else:
command = argv[2]
if argv[0].endswith('-safe'):
try:
with lock_parent_directory(abspath(builder_file), 15):
Commands.__dict__.get(command, Commands.unknown.__func__)()
except exceptions.LockTimeout:
print("Ring/builder dir currently locked.")
exit(2)
else:
Commands.__dict__.get(command, Commands.unknown.__func__)()
if __name__ == '__main__':
main()
| |
# Tweepy
# Copyright 2009-2010 Joshua Roesslein
# See LICENSE for details.
from __future__ import print_function
import os
import mimetypes
import six
if six.PY2:
from urllib import urlencode
elif six.PY3:
from urllib.parse import urlencode
from tweepy.binder import bind_api
from tweepy.error import TweepError
from tweepy.parsers import ModelParser, Parser, RawParser
from tweepy.utils import list_to_csv
IMAGE_MIMETYPES = ('image/gif', 'image/jpeg', 'image/png', 'image/webp')
CHUNKED_MIMETYPES = ('image/gif', 'image/jpeg', 'image/png', 'image/webp', 'video/mp4')
class API(object):
"""Twitter API"""
max_size_standard = 5120 # standard uploads must be less then 5 MB
max_size_chunked = 15360 # chunked uploads must be less than 15 MB
def __init__(self, auth_handler=None,
host='api.twitter.com', search_host='search.twitter.com',
upload_host='upload.twitter.com', cache=None, api_root='/1.1',
search_root='', upload_root='/1.1', retry_count=0,
retry_delay=0, retry_errors=None, timeout=60, parser=None,
compression=False, wait_on_rate_limit=False,
wait_on_rate_limit_notify=False, proxy=''):
""" Api instance Constructor
:param auth_handler:
:param host: url of the server of the rest api, default:'api.twitter.com'
:param search_host: url of the search server, default:'search.twitter.com'
:param upload_host: url of the upload server, default:'upload.twitter.com'
:param cache: Cache to query if a GET method is used, default:None
:param api_root: suffix of the api version, default:'/1.1'
:param search_root: suffix of the search version, default:''
:param upload_root: suffix of the upload version, default:'/1.1'
:param retry_count: number of allowed retries, default:0
:param retry_delay: delay in second between retries, default:0
:param retry_errors: default:None
:param timeout: delay before to consider the request as timed out in seconds, default:60
:param parser: ModelParser instance to parse the responses, default:None
:param compression: If the response is compressed, default:False
:param wait_on_rate_limit: If the api wait when it hits the rate limit, default:False
:param wait_on_rate_limit_notify: If the api print a notification when the rate limit is hit, default:False
:param proxy: Url to use as proxy during the HTTP request, default:''
:raise TypeError: If the given parser is not a ModelParser instance.
"""
self.auth = auth_handler
self.host = host
self.search_host = search_host
self.upload_host = upload_host
self.api_root = api_root
self.search_root = search_root
self.upload_root = upload_root
self.cache = cache
self.compression = compression
self.retry_count = retry_count
self.retry_delay = retry_delay
self.retry_errors = retry_errors
self.timeout = timeout
self.wait_on_rate_limit = wait_on_rate_limit
self.wait_on_rate_limit_notify = wait_on_rate_limit_notify
self.parser = parser or ModelParser()
self.proxy = {}
if proxy:
self.proxy['https'] = proxy
# Attempt to explain more clearly the parser argument requirements
# https://github.com/tweepy/tweepy/issues/421
#
parser_type = Parser
if not isinstance(self.parser, parser_type):
raise TypeError(
'"parser" argument has to be an instance of "{required}".'
' It is currently a {actual}.'.format(
required=parser_type.__name__,
actual=type(self.parser)
)
)
@property
def home_timeline(self):
""" :reference: https://dev.twitter.com/rest/reference/get/statuses/home_timeline
:allowed_param:'since_id', 'max_id', 'count'
"""
return bind_api(
api=self,
path='/statuses/home_timeline.json',
payload_type='status', payload_list=True,
allowed_param=['since_id', 'max_id', 'count'],
require_auth=True
)
def statuses_lookup(self, id_, include_entities=None,
trim_user=None, map_=None):
return self._statuses_lookup(list_to_csv(id_), include_entities,
trim_user, map_)
@property
def _statuses_lookup(self):
""" :reference: https://dev.twitter.com/rest/reference/get/statuses/lookup
:allowed_param:'id', 'include_entities', 'trim_user', 'map'
"""
return bind_api(
api=self,
path='/statuses/lookup.json',
payload_type='status', payload_list=True,
allowed_param=['id', 'include_entities', 'trim_user', 'map'],
require_auth=True
)
@property
def user_timeline(self):
""" :reference: https://dev.twitter.com/rest/reference/get/statuses/user_timeline
:allowed_param:'id', 'user_id', 'screen_name', 'since_id', 'max_id', 'count', 'include_rts'
"""
return bind_api(
api=self,
path='/statuses/user_timeline.json',
payload_type='status', payload_list=True,
allowed_param=['id', 'user_id', 'screen_name', 'since_id',
'max_id', 'count', 'include_rts']
)
@property
def mentions_timeline(self):
""" :reference: https://dev.twitter.com/rest/reference/get/statuses/mentions_timeline
:allowed_param:'since_id', 'max_id', 'count'
"""
return bind_api(
api=self,
path='/statuses/mentions_timeline.json',
payload_type='status', payload_list=True,
allowed_param=['since_id', 'max_id', 'count'],
require_auth=True
)
@property
def related_results(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/get/related_results/show/%3id.format
:allowed_param:'id'
"""
return bind_api(
api=self,
path='/related_results/show/{id}.json',
payload_type='relation', payload_list=True,
allowed_param=['id'],
require_auth=False
)
@property
def retweets_of_me(self):
""" :reference: https://dev.twitter.com/rest/reference/get/statuses/retweets_of_me
:allowed_param:'since_id', 'max_id', 'count'
"""
return bind_api(
api=self,
path='/statuses/retweets_of_me.json',
payload_type='status', payload_list=True,
allowed_param=['since_id', 'max_id', 'count'],
require_auth=True
)
@property
def get_status(self):
""" :reference: https://dev.twitter.com/rest/reference/get/statuses/show/%3Aid
:allowed_param:'id'
"""
return bind_api(
api=self,
path='/statuses/show.json',
payload_type='status',
allowed_param=['id']
)
def update_status(self, *args, **kwargs):
""" :reference: https://dev.twitter.com/rest/reference/post/statuses/update
:allowed_param:'status', 'in_reply_to_status_id', 'in_reply_to_status_id_str', 'auto_populate_reply_metadata', 'lat', 'long', 'source', 'place_id', 'display_coordinates', 'media_ids'
"""
post_data = {}
media_ids = kwargs.pop("media_ids", None)
if media_ids is not None:
post_data["media_ids"] = list_to_csv(media_ids)
return bind_api(
api=self,
path='/statuses/update.json',
method='POST',
payload_type='status',
allowed_param=['status', 'in_reply_to_status_id', 'in_reply_to_status_id_str', 'auto_populate_reply_metadata', 'lat', 'long', 'source', 'place_id', 'display_coordinates'],
require_auth=True
)(post_data=post_data, *args, **kwargs)
def media_upload(self, filename, *args, **kwargs):
""" :reference: https://dev.twitter.com/rest/reference/post/media/upload
:reference https://dev.twitter.com/rest/reference/post/media/upload-chunked
:allowed_param:
"""
f = kwargs.pop('file', None)
mime, _ = mimetypes.guess_type(filename)
try:
size = os.path.getsize(filename)
except OSError:
f.seek(0, 2)
size = f.tell()
f.seek(0)
if mime in IMAGE_MIMETYPES and size < self.max_size_standard:
return self.image_upload(filename, f=f, *args, **kwargs)
elif mime in CHUNKED_MIMETYPES:
return self.upload_chunked(filename, f=f, *args, **kwargs)
else:
raise TweepError("Can't upload media with mime type %s" % mime)
def image_upload(self, filename, *args, **kwargs):
""" :reference: https://dev.twitter.com/rest/reference/post/media/upload
:allowed_param:
"""
f = kwargs.pop('file', None)
headers, post_data = API._pack_image(filename, self.max_size_standard, form_field='media', f=f)
kwargs.update({'headers': headers, 'post_data': post_data})
return bind_api(
api=self,
path='/media/upload.json',
method='POST',
payload_type='media',
allowed_param=[],
require_auth=True,
upload_api=True
)(*args, **kwargs)
def upload_chunked(self, filename, *args, **kwargs):
""" :reference https://dev.twitter.com/rest/reference/post/media/upload-chunked
:allowed_param:
"""
f = kwargs.pop('file', None)
# Media category is dependant on whether media is attached to a tweet
# or to a direct message. Assume tweet by default.
is_direct_message = kwargs.pop('is_direct_message', False)
# Initialize upload (Twitter cannot handle videos > 15 MB)
headers, post_data, fp = API._chunk_media('init', filename, self.max_size_chunked, form_field='media', f=f, is_direct_message=is_direct_message)
kwargs.update({ 'headers': headers, 'post_data': post_data })
# Send the INIT request
media_info = bind_api(
api=self,
path='/media/upload.json',
method='POST',
payload_type='media',
allowed_param=[],
require_auth=True,
upload_api=True
)(*args, **kwargs)
# If a media ID has been generated, we can send the file
if media_info.media_id:
# default chunk size is 1MB, can be overridden with keyword argument.
# minimum chunk size is 16K, which keeps the maximum number of chunks under 999
chunk_size = kwargs.pop('chunk_size', 1024 * 1024)
chunk_size = max(chunk_size, 16 * 2014)
fsize = os.path.getsize(filename)
nloops = int(fsize / chunk_size) + (1 if fsize % chunk_size > 0 else 0)
for i in range(nloops):
headers, post_data, fp = API._chunk_media('append', filename, self.max_size_chunked, chunk_size=chunk_size, f=fp, media_id=media_info.media_id, segment_index=i, is_direct_message=is_direct_message)
kwargs.update({ 'headers': headers, 'post_data': post_data, 'parser': RawParser() })
# The APPEND command returns an empty response body
bind_api(
api=self,
path='/media/upload.json',
method='POST',
payload_type='media',
allowed_param=[],
require_auth=True,
upload_api=True
)(*args, **kwargs)
# When all chunks have been sent, we can finalize.
headers, post_data, fp = API._chunk_media('finalize', filename, self.max_size_chunked, media_id=media_info.media_id, is_direct_message=is_direct_message)
kwargs = {'headers': headers, 'post_data': post_data}
# The FINALIZE command returns media information
return bind_api(
api=self,
path='/media/upload.json',
method='POST',
payload_type='media',
allowed_param=[],
require_auth=True,
upload_api=True
)(*args, **kwargs)
else:
return media_info
def update_with_media(self, filename, *args, **kwargs):
""" :reference: https://dev.twitter.com/rest/reference/post/statuses/update_with_media
:allowed_param:'status', 'possibly_sensitive', 'in_reply_to_status_id', 'in_reply_to_status_id_str', 'auto_populate_reply_metadata', 'lat', 'long', 'place_id', 'display_coordinates'
"""
f = kwargs.pop('file', None)
headers, post_data = API._pack_image(filename, 3072, form_field='media[]', f=f)
kwargs.update({'headers': headers, 'post_data': post_data})
return bind_api(
api=self,
path='/statuses/update_with_media.json',
method='POST',
payload_type='status',
allowed_param=[
'status', 'possibly_sensitive', 'in_reply_to_status_id', 'in_reply_to_status_id_str',
'auto_populate_reply_metadata', 'lat', 'long', 'place_id', 'display_coordinates'
],
require_auth=True
)(*args, **kwargs)
@property
def destroy_status(self):
""" :reference: https://dev.twitter.com/rest/reference/post/statuses/destroy/%3Aid
:allowed_param:'id'
"""
return bind_api(
api=self,
path='/statuses/destroy/{id}.json',
method='POST',
payload_type='status',
allowed_param=['id'],
require_auth=True
)
@property
def retweet(self):
""" :reference: https://dev.twitter.com/rest/reference/post/statuses/retweet/%3Aid
:allowed_param:'id'
"""
return bind_api(
api=self,
path='/statuses/retweet/{id}.json',
method='POST',
payload_type='status',
allowed_param=['id'],
require_auth=True
)
@property
def unretweet(self):
""" :reference: https://dev.twitter.com/rest/reference/post/statuses/unretweet/%3Aid
:allowed_param:'id'
"""
return bind_api(
api=self,
path='/statuses/unretweet/{id}.json',
method='POST',
payload_type='status',
allowed_param=['id'],
require_auth=True
)
@property
def retweets(self):
""" :reference: https://dev.twitter.com/rest/reference/get/statuses/retweets/%3Aid
:allowed_param:'id', 'count'
"""
return bind_api(
api=self,
path='/statuses/retweets/{id}.json',
payload_type='status', payload_list=True,
allowed_param=['id', 'count'],
require_auth=True
)
@property
def retweeters(self):
""" :reference: https://dev.twitter.com/rest/reference/get/statuses/retweeters/ids
:allowed_param:'id', 'cursor', 'stringify_ids
"""
return bind_api(
api=self,
path='/statuses/retweeters/ids.json',
payload_type='ids',
allowed_param=['id', 'cursor', 'stringify_ids']
)
@property
def get_user(self):
""" :reference: https://dev.twitter.com/rest/reference/get/users/show
:allowed_param:'id', 'user_id', 'screen_name'
"""
return bind_api(
api=self,
path='/users/show.json',
payload_type='user',
allowed_param=['id', 'user_id', 'screen_name']
)
@property
def get_oembed(self):
""" :reference: https://dev.twitter.com/rest/reference/get/statuses/oembed
:allowed_param:'id', 'url', 'maxwidth', 'hide_media', 'omit_script', 'align', 'related', 'lang'
"""
return bind_api(
api=self,
path='/statuses/oembed.json',
payload_type='json',
allowed_param=['id', 'url', 'maxwidth', 'hide_media', 'omit_script', 'align', 'related', 'lang']
)
def lookup_users(self, user_ids=None, screen_names=None, include_entities=None):
""" Perform bulk look up of users from user ID or screenname """
post_data = {}
if include_entities is not None:
include_entities = 'true' if include_entities else 'false'
post_data['include_entities'] = include_entities
if user_ids:
post_data['user_id'] = list_to_csv(user_ids)
if screen_names:
post_data['screen_name'] = list_to_csv(screen_names)
return self._lookup_users(post_data=post_data)
@property
def _lookup_users(self):
""" :reference: https://dev.twitter.com/rest/reference/get/users/lookup
allowed_param='user_id', 'screen_name', 'include_entities'
"""
return bind_api(
api=self,
path='/users/lookup.json',
payload_type='user', payload_list=True,
method='POST',
allowed_param=['user_id', 'screen_name', 'include_entities']
)
def me(self):
""" Get the authenticated user """
return self.get_user(screen_name=self.auth.get_username())
@property
def search_users(self):
""" :reference: https://dev.twitter.com/rest/reference/get/users/search
:allowed_param:'q', 'count', 'page'
"""
return bind_api(
api=self,
path='/users/search.json',
payload_type='user', payload_list=True,
require_auth=True,
allowed_param=['q', 'count', 'page']
)
@property
def suggested_users(self):
""" :reference: https://dev.twitter.com/rest/reference/get/users/suggestions/%3Aslug
:allowed_param:'slug', 'lang'
"""
return bind_api(
api=self,
path='/users/suggestions/{slug}.json',
payload_type='user', payload_list=True,
require_auth=True,
allowed_param=['slug', 'lang']
)
@property
def suggested_categories(self):
""" :reference: https://dev.twitter.com/rest/reference/get/users/suggestions
:allowed_param:'lang'
"""
return bind_api(
api=self,
path='/users/suggestions.json',
payload_type='category', payload_list=True,
allowed_param=['lang'],
require_auth=True
)
@property
def suggested_users_tweets(self):
""" :reference: https://dev.twitter.com/rest/reference/get/users/suggestions/%3Aslug/members
:allowed_param:'slug'
"""
return bind_api(
api=self,
path='/users/suggestions/{slug}/members.json',
payload_type='status', payload_list=True,
allowed_param=['slug'],
require_auth=True
)
@property
def direct_messages(self):
""" :reference: https://dev.twitter.com/rest/reference/get/direct_messages
:allowed_param:'since_id', 'max_id', 'count', 'full_text'
"""
return bind_api(
api=self,
path='/direct_messages.json',
payload_type='direct_message', payload_list=True,
allowed_param=['since_id', 'max_id', 'count', 'full_text'],
require_auth=True
)
@property
def get_direct_message(self):
""" :reference: https://dev.twitter.com/rest/reference/get/direct_messages/show
:allowed_param:'id', 'full_text'
"""
return bind_api(
api=self,
path='/direct_messages/show/{id}.json',
payload_type='direct_message',
allowed_param=['id', 'full_text'],
require_auth=True
)
@property
def sent_direct_messages(self):
""" :reference: https://dev.twitter.com/rest/reference/get/direct_messages/sent
:allowed_param:'since_id', 'max_id', 'count', 'page', 'full_text'
"""
return bind_api(
api=self,
path='/direct_messages/sent.json',
payload_type='direct_message', payload_list=True,
allowed_param=['since_id', 'max_id', 'count', 'page', 'full_text'],
require_auth=True
)
@property
def send_direct_message(self):
""" :reference: https://dev.twitter.com/rest/reference/post/direct_messages/new
:allowed_param:'user', 'screen_name', 'user_id', 'text'
"""
return bind_api(
api=self,
path='/direct_messages/new.json',
method='POST',
payload_type='direct_message',
allowed_param=['user', 'screen_name', 'user_id', 'text'],
require_auth=True
)
@property
def destroy_direct_message(self):
""" :reference: https://dev.twitter.com/rest/reference/post/direct_messages/destroy
:allowed_param:'id'
"""
return bind_api(
api=self,
path='/direct_messages/destroy.json',
method='POST',
payload_type='direct_message',
allowed_param=['id'],
require_auth=True
)
@property
def create_friendship(self):
""" :reference: https://dev.twitter.com/rest/reference/post/friendships/create
:allowed_param:'id', 'user_id', 'screen_name', 'follow'
"""
return bind_api(
api=self,
path='/friendships/create.json',
method='POST',
payload_type='user',
allowed_param=['id', 'user_id', 'screen_name', 'follow'],
require_auth=True
)
@property
def destroy_friendship(self):
""" :reference: https://dev.twitter.com/rest/reference/post/friendships/destroy
:allowed_param:'id', 'user_id', 'screen_name'
"""
return bind_api(
api=self,
path='/friendships/destroy.json',
method='POST',
payload_type='user',
allowed_param=['id', 'user_id', 'screen_name'],
require_auth=True
)
@property
def show_friendship(self):
""" :reference: https://dev.twitter.com/rest/reference/get/friendships/show
:allowed_param:'source_id', 'source_screen_name', 'target_id', 'target_screen_name'
"""
return bind_api(
api=self,
path='/friendships/show.json',
payload_type='friendship',
allowed_param=['source_id', 'source_screen_name',
'target_id', 'target_screen_name']
)
def lookup_friendships(self, user_ids=None, screen_names=None):
""" Perform bulk look up of friendships from user ID or screenname """
return self._lookup_friendships(list_to_csv(user_ids), list_to_csv(screen_names))
@property
def _lookup_friendships(self):
""" :reference: https://dev.twitter.com/rest/reference/get/friendships/lookup
:allowed_param:'user_id', 'screen_name'
"""
return bind_api(
api=self,
path='/friendships/lookup.json',
payload_type='relationship', payload_list=True,
allowed_param=['user_id', 'screen_name'],
require_auth=True
)
@property
def friends_ids(self):
""" :reference: https://dev.twitter.com/rest/reference/get/friends/ids
:allowed_param:'id', 'user_id', 'screen_name', 'cursor'
"""
return bind_api(
api=self,
path='/friends/ids.json',
payload_type='ids',
allowed_param=['id', 'user_id', 'screen_name', 'cursor']
)
@property
def friends(self):
""" :reference: https://dev.twitter.com/rest/reference/get/friends/list
:allowed_param:'id', 'user_id', 'screen_name', 'cursor', 'skip_status', 'include_user_entities'
"""
return bind_api(
api=self,
path='/friends/list.json',
payload_type='user', payload_list=True,
allowed_param=['id', 'user_id', 'screen_name', 'cursor', 'skip_status', 'include_user_entities']
)
@property
def friendships_incoming(self):
""" :reference: https://dev.twitter.com/rest/reference/get/friendships/incoming
:allowed_param:'cursor'
"""
return bind_api(
api=self,
path='/friendships/incoming.json',
payload_type='ids',
allowed_param=['cursor']
)
@property
def friendships_outgoing(self):
""" :reference: https://dev.twitter.com/rest/reference/get/friendships/outgoing
:allowed_param:'cursor'
"""
return bind_api(
api=self,
path='/friendships/outgoing.json',
payload_type='ids',
allowed_param=['cursor']
)
@property
def followers_ids(self):
""" :reference: https://dev.twitter.com/rest/reference/get/followers/ids
:allowed_param:'id', 'user_id', 'screen_name', 'cursor', 'count'
"""
return bind_api(
api=self,
path='/followers/ids.json',
payload_type='ids',
allowed_param=['id', 'user_id', 'screen_name', 'cursor', 'count']
)
@property
def followers(self):
""" :reference: https://dev.twitter.com/rest/reference/get/followers/list
:allowed_param:'id', 'user_id', 'screen_name', 'cursor', 'count', 'skip_status', 'include_user_entities'
"""
return bind_api(
api=self,
path='/followers/list.json',
payload_type='user', payload_list=True,
allowed_param=['id', 'user_id', 'screen_name', 'cursor', 'count',
'skip_status', 'include_user_entities']
)
@property
def get_settings(self):
""" :reference: https://dev.twitter.com/rest/reference/get/account/settings
"""
return bind_api(
api=self,
path='/account/settings.json',
payload_type='json',
use_cache=False
)
@property
def set_settings(self):
""" :reference: https://dev.twitter.com/rest/reference/post/account/settings
:allowed_param:'sleep_time_enabled', 'start_sleep_time',
'end_sleep_time', 'time_zone', 'trend_location_woeid',
'allow_contributor_request', 'lang'
"""
return bind_api(
api=self,
path='/account/settings.json',
method='POST',
payload_type='json',
allowed_param=['sleep_time_enabled', 'start_sleep_time',
'end_sleep_time', 'time_zone',
'trend_location_woeid', 'allow_contributor_request',
'lang'],
use_cache=False
)
def verify_credentials(self, **kargs):
""" :reference: https://dev.twitter.com/rest/reference/get/account/verify_credentials
:allowed_param:'include_entities', 'skip_status', 'include_email'
"""
try:
return bind_api(
api=self,
path='/account/verify_credentials.json',
payload_type='user',
require_auth=True,
allowed_param=['include_entities', 'skip_status', 'include_email'],
)(**kargs)
except TweepError as e:
if e.response and e.response.status == 401:
return False
raise
@property
def rate_limit_status(self):
""" :reference: https://dev.twitter.com/rest/reference/get/application/rate_limit_status
:allowed_param:'resources'
"""
return bind_api(
api=self,
path='/application/rate_limit_status.json',
payload_type='json',
allowed_param=['resources'],
use_cache=False
)
@property
def set_delivery_device(self):
""" :reference: https://dev.twitter.com/rest/reference/post/account/update_delivery_device
:allowed_param:'device'
"""
return bind_api(
api=self,
path='/account/update_delivery_device.json',
method='POST',
allowed_param=['device'],
payload_type='user',
require_auth=True
)
def update_profile_image(self, filename, file_=None):
""" :reference: https://dev.twitter.com/rest/reference/post/account/update_profile_image
:allowed_param:'include_entities', 'skip_status'
"""
headers, post_data = API._pack_image(filename, 700, f=file_)
return bind_api(
api=self,
path='/account/update_profile_image.json',
method='POST',
payload_type='user',
allowed_param=['include_entities', 'skip_status'],
require_auth=True
)(self, post_data=post_data, headers=headers)
def update_profile_background_image(self, filename, **kargs):
""" :reference: https://dev.twitter.com/rest/reference/post/account/update_profile_background_image
:allowed_param:'tile', 'include_entities', 'skip_status', 'use'
"""
f = kargs.pop('file', None)
headers, post_data = API._pack_image(filename, 800, f=f)
bind_api(
api=self,
path='/account/update_profile_background_image.json',
method='POST',
payload_type='user',
allowed_param=['tile', 'include_entities', 'skip_status', 'use'],
require_auth=True
)(post_data=post_data, headers=headers)
def update_profile_banner(self, filename, **kargs):
""" :reference: https://dev.twitter.com/rest/reference/post/account/update_profile_banner
:allowed_param:'width', 'height', 'offset_left', 'offset_right'
"""
f = kargs.pop('file', None)
headers, post_data = API._pack_image(filename, 700, form_field="banner", f=f)
bind_api(
api=self,
path='/account/update_profile_banner.json',
method='POST',
allowed_param=['width', 'height', 'offset_left', 'offset_right'],
require_auth=True
)(post_data=post_data, headers=headers)
@property
def update_profile(self):
""" :reference: https://dev.twitter.com/rest/reference/post/account/update_profile
:allowed_param:'name', 'url', 'location', 'description', 'profile_link_color'
"""
return bind_api(
api=self,
path='/account/update_profile.json',
method='POST',
payload_type='user',
allowed_param=['name', 'url', 'location', 'description', 'profile_link_color'],
require_auth=True
)
@property
def favorites(self):
""" :reference: https://dev.twitter.com/rest/reference/get/favorites/list
:allowed_param:'screen_name', 'user_id', 'max_id', 'count', 'since_id', 'max_id'
"""
return bind_api(
api=self,
path='/favorites/list.json',
payload_type='status', payload_list=True,
allowed_param=['screen_name', 'user_id', 'max_id', 'count', 'since_id', 'max_id']
)
@property
def create_favorite(self):
""" :reference:https://dev.twitter.com/rest/reference/post/favorites/create
:allowed_param:'id'
"""
return bind_api(
api=self,
path='/favorites/create.json',
method='POST',
payload_type='status',
allowed_param=['id'],
require_auth=True
)
@property
def destroy_favorite(self):
""" :reference: https://dev.twitter.com/rest/reference/post/favorites/destroy
:allowed_param:'id'
"""
return bind_api(
api=self,
path='/favorites/destroy.json',
method='POST',
payload_type='status',
allowed_param=['id'],
require_auth=True
)
@property
def create_block(self):
""" :reference: https://dev.twitter.com/rest/reference/post/blocks/create
:allowed_param:'id', 'user_id', 'screen_name'
"""
return bind_api(
api=self,
path='/blocks/create.json',
method='POST',
payload_type='user',
allowed_param=['id', 'user_id', 'screen_name'],
require_auth=True
)
@property
def destroy_block(self):
""" :reference: https://dev.twitter.com/rest/reference/post/blocks/destroy
:allowed_param:'id', 'user_id', 'screen_name'
"""
return bind_api(
api=self,
path='/blocks/destroy.json',
method='POST',
payload_type='user',
allowed_param=['id', 'user_id', 'screen_name'],
require_auth=True
)
@property
def blocks(self):
""" :reference: https://dev.twitter.com/rest/reference/get/blocks/list
:allowed_param:'cursor'
"""
return bind_api(
api=self,
path='/blocks/list.json',
payload_type='user', payload_list=True,
allowed_param=['cursor'],
require_auth=True
)
@property
def blocks_ids(self):
""" :reference: https://dev.twitter.com/rest/reference/get/blocks/ids """
return bind_api(
api=self,
path='/blocks/ids.json',
payload_type='json',
require_auth=True
)
@property
def report_spam(self):
""" :reference: https://dev.twitter.com/rest/reference/post/users/report_spam
:allowed_param:'user_id', 'screen_name'
"""
return bind_api(
api=self,
path='/users/report_spam.json',
method='POST',
payload_type='user',
allowed_param=['user_id', 'screen_name'],
require_auth=True
)
@property
def saved_searches(self):
""" :reference: https://dev.twitter.com/rest/reference/get/saved_searches/show/%3Aid """
return bind_api(
api=self,
path='/saved_searches/list.json',
payload_type='saved_search', payload_list=True,
require_auth=True
)
@property
def get_saved_search(self):
""" :reference: https://dev.twitter.com/rest/reference/get/saved_searches/show/%3Aid
:allowed_param:'id'
"""
return bind_api(
api=self,
path='/saved_searches/show/{id}.json',
payload_type='saved_search',
allowed_param=['id'],
require_auth=True
)
@property
def create_saved_search(self):
""" :reference: https://dev.twitter.com/rest/reference/post/saved_searches/create
:allowed_param:'query'
"""
return bind_api(
api=self,
path='/saved_searches/create.json',
method='POST',
payload_type='saved_search',
allowed_param=['query'],
require_auth=True
)
@property
def destroy_saved_search(self):
""" :reference: https://dev.twitter.com/rest/reference/post/saved_searches/destroy/%3Aid
:allowed_param:'id'
"""
return bind_api(
api=self,
path='/saved_searches/destroy/{id}.json',
method='POST',
payload_type='saved_search',
allowed_param=['id'],
require_auth=True
)
@property
def create_list(self):
""" :reference: https://dev.twitter.com/rest/reference/post/lists/create
:allowed_param:'name', 'mode', 'description'
"""
return bind_api(
api=self,
path='/lists/create.json',
method='POST',
payload_type='list',
allowed_param=['name', 'mode', 'description'],
require_auth=True
)
@property
def destroy_list(self):
""" :reference: https://dev.twitter.com/rest/reference/post/lists/destroy
:allowed_param:'owner_screen_name', 'owner_id', 'list_id', 'slug'
"""
return bind_api(
api=self,
path='/lists/destroy.json',
method='POST',
payload_type='list',
allowed_param=['owner_screen_name', 'owner_id', 'list_id', 'slug'],
require_auth=True
)
@property
def update_list(self):
""" :reference: https://dev.twitter.com/rest/reference/post/lists/update
:allowed_param: list_id', 'slug', 'name', 'mode', 'description', 'owner_screen_name', 'owner_id'
"""
return bind_api(
api=self,
path='/lists/update.json',
method='POST',
payload_type='list',
allowed_param=['list_id', 'slug', 'name', 'mode', 'description', 'owner_screen_name', 'owner_id'],
require_auth=True
)
@property
def lists_all(self):
""" :reference: https://dev.twitter.com/rest/reference/get/lists/list
:allowed_param:'screen_name', 'user_id'
"""
return bind_api(
api=self,
path='/lists/list.json',
payload_type='list', payload_list=True,
allowed_param=['screen_name', 'user_id'],
require_auth=True
)
@property
def lists_memberships(self):
""" :reference: https://dev.twitter.com/rest/reference/get/lists/memberships
:allowed_param:'screen_name', 'user_id', 'filter_to_owned_lists', 'cursor'
"""
return bind_api(
api=self,
path='/lists/memberships.json',
payload_type='list', payload_list=True,
allowed_param=['screen_name', 'user_id', 'filter_to_owned_lists', 'cursor'],
require_auth=True
)
@property
def lists_subscriptions(self):
""" :reference: https://dev.twitter.com/rest/reference/get/lists/subscriptions
:allowed_param:'screen_name', 'user_id', 'cursor'
"""
return bind_api(
api=self,
path='/lists/subscriptions.json',
payload_type='list', payload_list=True,
allowed_param=['screen_name', 'user_id', 'cursor'],
require_auth=True
)
@property
def list_timeline(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/get/lists/statuses
:allowed_param:'owner_screen_name', 'slug', 'owner_id', 'list_id',
'since_id', 'max_id', 'count', 'include_rts
"""
return bind_api(
api=self,
path='/lists/statuses.json',
payload_type='status', payload_list=True,
allowed_param=['owner_screen_name', 'slug', 'owner_id',
'list_id', 'since_id', 'max_id', 'count',
'include_rts']
)
@property
def get_list(self):
""" :reference: https://dev.twitter.com/rest/reference/get/lists/show
:allowed_param:'owner_screen_name', 'owner_id', 'slug', 'list_id'
"""
return bind_api(
api=self,
path='/lists/show.json',
payload_type='list',
allowed_param=['owner_screen_name', 'owner_id', 'slug', 'list_id']
)
@property
def add_list_member(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/post/lists/members/create
:allowed_param:'screen_name', 'user_id', 'owner_screen_name',
'owner_id', 'slug', 'list_id'
"""
return bind_api(
api=self,
path='/lists/members/create.json',
method='POST',
payload_type='list',
allowed_param=['screen_name', 'user_id', 'owner_screen_name',
'owner_id', 'slug', 'list_id'],
require_auth=True
)
@property
def remove_list_member(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/post/lists/members/destroy
:allowed_param:'screen_name', 'user_id', 'owner_screen_name',
'owner_id', 'slug', 'list_id'
"""
return bind_api(
api=self,
path='/lists/members/destroy.json',
method='POST',
payload_type='list',
allowed_param=['screen_name', 'user_id', 'owner_screen_name',
'owner_id', 'slug', 'list_id'],
require_auth=True
)
def add_list_members(self, screen_name=None, user_id=None, slug=None,
list_id=None, owner_id=None, owner_screen_name=None):
""" Perform bulk add of list members from user ID or screenname """
return self._add_list_members(list_to_csv(screen_name),
list_to_csv(user_id),
slug, list_id, owner_id,
owner_screen_name)
@property
def _add_list_members(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/post/lists/members/create_all
:allowed_param:'screen_name', 'user_id', 'slug', 'list_id',
'owner_id', 'owner_screen_name'
"""
return bind_api(
api=self,
path='/lists/members/create_all.json',
method='POST',
payload_type='list',
allowed_param=['screen_name', 'user_id', 'slug', 'list_id',
'owner_id', 'owner_screen_name'],
require_auth=True
)
def remove_list_members(self, screen_name=None, user_id=None, slug=None,
list_id=None, owner_id=None, owner_screen_name=None):
""" Perform bulk remove of list members from user ID or screenname """
return self._remove_list_members(list_to_csv(screen_name),
list_to_csv(user_id),
slug, list_id, owner_id,
owner_screen_name)
@property
def _remove_list_members(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/post/lists/members/destroy_all
:allowed_param:'screen_name', 'user_id', 'slug', 'list_id',
'owner_id', 'owner_screen_name'
"""
return bind_api(
api=self,
path='/lists/members/destroy_all.json',
method='POST',
payload_type='list',
allowed_param=['screen_name', 'user_id', 'slug', 'list_id',
'owner_id', 'owner_screen_name'],
require_auth=True
)
@property
def list_members(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/get/lists/members
:allowed_param:'owner_screen_name', 'slug', 'list_id',
'owner_id', 'cursor
"""
return bind_api(
api=self,
path='/lists/members.json',
payload_type='user', payload_list=True,
allowed_param=['owner_screen_name', 'slug', 'list_id',
'owner_id', 'cursor']
)
@property
def show_list_member(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/get/lists/members/show
:allowed_param:'list_id', 'slug', 'user_id', 'screen_name',
'owner_screen_name', 'owner_id
"""
return bind_api(
api=self,
path='/lists/members/show.json',
payload_type='user',
allowed_param=['list_id', 'slug', 'user_id', 'screen_name',
'owner_screen_name', 'owner_id']
)
@property
def subscribe_list(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/post/lists/subscribers/create
:allowed_param:'owner_screen_name', 'slug', 'owner_id',
'list_id'
"""
return bind_api(
api=self,
path='/lists/subscribers/create.json',
method='POST',
payload_type='list',
allowed_param=['owner_screen_name', 'slug', 'owner_id',
'list_id'],
require_auth=True
)
@property
def unsubscribe_list(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/post/lists/subscribers/destroy
:allowed_param:'owner_screen_name', 'slug', 'owner_id',
'list_id'
"""
return bind_api(
api=self,
path='/lists/subscribers/destroy.json',
method='POST',
payload_type='list',
allowed_param=['owner_screen_name', 'slug', 'owner_id',
'list_id'],
require_auth=True
)
@property
def list_subscribers(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/get/lists/subscribers
:allowed_param:'owner_screen_name', 'slug', 'owner_id',
'list_id', 'cursor
"""
return bind_api(
api=self,
path='/lists/subscribers.json',
payload_type='user', payload_list=True,
allowed_param=['owner_screen_name', 'slug', 'owner_id',
'list_id', 'cursor']
)
@property
def show_list_subscriber(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/get/lists/subscribers/show
:allowed_param:'owner_screen_name', 'slug', 'screen_name',
'owner_id', 'list_id', 'user_id
"""
return bind_api(
api=self,
path='/lists/subscribers/show.json',
payload_type='user',
allowed_param=['owner_screen_name', 'slug', 'screen_name',
'owner_id', 'list_id', 'user_id']
)
@property
def trends_available(self):
""" :reference: https://dev.twitter.com/rest/reference/get/trends/available """
return bind_api(
api=self,
path='/trends/available.json',
payload_type='json'
)
@property
def trends_place(self):
""" :reference: https://dev.twitter.com/rest/reference/get/trends/place
:allowed_param:'id', 'exclude'
"""
return bind_api(
api=self,
path='/trends/place.json',
payload_type='json',
allowed_param=['id', 'exclude']
)
@property
def trends_closest(self):
""" :reference: https://dev.twitter.com/rest/reference/get/trends/closest
:allowed_param:'lat', 'long'
"""
return bind_api(
api=self,
path='/trends/closest.json',
payload_type='json',
allowed_param=['lat', 'long']
)
@property
def search(self):
""" :reference: https://dev.twitter.com/rest/reference/get/search/tweets
:allowed_param:'q', 'lang', 'locale', 'since_id', 'geocode',
'max_id', 'since', 'until', 'result_type', 'count',
'include_entities', 'from', 'to', 'source'
"""
return bind_api(
api=self,
path='/search/tweets.json',
payload_type='search_results',
allowed_param=['q', 'lang', 'locale', 'since_id', 'geocode',
'max_id', 'since', 'until', 'result_type',
'count', 'include_entities', 'from',
'to', 'source']
)
@property
def reverse_geocode(self):
""" :reference: https://dev.twitter.com/rest/reference/get/geo/reverse_geocode
:allowed_param:'lat', 'long', 'accuracy', 'granularity', 'max_results'
"""
return bind_api(
api=self,
path='/geo/reverse_geocode.json',
payload_type='place', payload_list=True,
allowed_param=['lat', 'long', 'accuracy', 'granularity',
'max_results']
)
@property
def geo_id(self):
""" :reference: https://dev.twitter.com/rest/reference/get/geo/id/%3Aplace_id
:allowed_param:'id'
"""
return bind_api(
api=self,
path='/geo/id/{id}.json',
payload_type='place',
allowed_param=['id']
)
@property
def geo_search(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/get/geo/search
:allowed_param:'lat', 'long', 'query', 'ip', 'granularity',
'accuracy', 'max_results', 'contained_within
"""
return bind_api(
api=self,
path='/geo/search.json',
payload_type='place', payload_list=True,
allowed_param=['lat', 'long', 'query', 'ip', 'granularity',
'accuracy', 'max_results', 'contained_within']
)
@property
def geo_similar_places(self):
""" :reference: https://dev.twitter.com/rest/reference/get/geo/similar_places
:allowed_param:'lat', 'long', 'name', 'contained_within'
"""
return bind_api(
api=self,
path='/geo/similar_places.json',
payload_type='place', payload_list=True,
allowed_param=['lat', 'long', 'name', 'contained_within']
)
@property
def supported_languages(self):
""" :reference: https://dev.twitter.com/rest/reference/get/help/languages """
return bind_api(
api=self,
path='/help/languages.json',
payload_type='json',
require_auth=True
)
@property
def configuration(self):
""" :reference: https://dev.twitter.com/rest/reference/get/help/configuration """
return bind_api(
api=self,
path='/help/configuration.json',
payload_type='json',
require_auth=True
)
""" Internal use only """
@staticmethod
def _pack_image(filename, max_size, form_field="image", f=None):
"""Pack image from file into multipart-formdata post body"""
# image must be less than 5MB in size
if f is None:
try:
if os.path.getsize(filename) > (max_size * 1024):
raise TweepError('File is too big, must be less than %skb.' % max_size)
except os.error as e:
raise TweepError('Unable to access file: %s' % e.strerror)
# build the mulitpart-formdata body
fp = open(filename, 'rb')
else:
f.seek(0, 2) # Seek to end of file
if f.tell() > (max_size * 1024):
raise TweepError('File is too big, must be less than %skb.' % max_size)
f.seek(0) # Reset to beginning of file
fp = f
# image must be gif, jpeg, or png
file_type, _ = mimetypes.guess_type(filename)
if file_type is None:
raise TweepError('Could not determine file type')
if file_type not in IMAGE_MIMETYPES:
raise TweepError('Invalid file type for image: %s' % file_type)
if isinstance(filename, six.text_type):
filename = filename.encode("utf-8")
BOUNDARY = b'Tw3ePy'
body = list()
body.append(b'--' + BOUNDARY)
body.append('Content-Disposition: form-data; name="{0}";'
' filename="{1}"'.format(form_field, filename)
.encode('utf-8'))
body.append('Content-Type: {0}'.format(file_type).encode('utf-8'))
body.append(b'')
body.append(fp.read())
body.append(b'--' + BOUNDARY + b'--')
body.append(b'')
fp.close()
body = b'\r\n'.join(body)
# build headers
headers = {
'Content-Type': 'multipart/form-data; boundary=Tw3ePy',
'Content-Length': str(len(body))
}
return headers, body
@staticmethod
def _chunk_media(command, filename, max_size, form_field="media", chunk_size=4096, f=None, media_id=None, segment_index=0, is_direct_message=False):
fp = None
if command == 'init':
if f is None:
file_size = os.path.getsize(filename)
try:
if file_size > (max_size * 1024):
raise TweepError('File is too big, must be less than %skb.' % max_size)
except os.error as e:
raise TweepError('Unable to access file: %s' % e.strerror)
# build the mulitpart-formdata body
fp = open(filename, 'rb')
else:
f.seek(0, 2) # Seek to end of file
file_size = f.tell()
if file_size > (max_size * 1024):
raise TweepError('File is too big, must be less than %skb.' % max_size)
f.seek(0) # Reset to beginning of file
fp = f
elif command != 'finalize':
if f is not None:
fp = f
else:
raise TweepError('File input for APPEND is mandatory.')
# video must be mp4
file_type, _ = mimetypes.guess_type(filename)
if file_type is None:
raise TweepError('Could not determine file type')
if file_type not in CHUNKED_MIMETYPES:
raise TweepError('Invalid file type for video: %s' % file_type)
BOUNDARY = b'Tw3ePy'
body = list()
if command == 'init':
query = {
'command': 'INIT',
'media_type': file_type,
'total_bytes': file_size,
'media_category': API._get_media_category(
is_direct_message, file_type)
}
body.append(urlencode(query).encode('utf-8'))
headers = {
'Content-Type': 'application/x-www-form-urlencoded; charset=utf-8'
}
elif command == 'append':
if media_id is None:
raise TweepError('Media ID is required for APPEND command.')
body.append(b'--' + BOUNDARY)
body.append('Content-Disposition: form-data; name="command"'.encode('utf-8'))
body.append(b'')
body.append(b'APPEND')
body.append(b'--' + BOUNDARY)
body.append('Content-Disposition: form-data; name="media_id"'.encode('utf-8'))
body.append(b'')
body.append(str(media_id).encode('utf-8'))
body.append(b'--' + BOUNDARY)
body.append('Content-Disposition: form-data; name="segment_index"'.encode('utf-8'))
body.append(b'')
body.append(str(segment_index).encode('utf-8'))
body.append(b'--' + BOUNDARY)
body.append('Content-Disposition: form-data; name="{0}"; filename="{1}"'.format(form_field, os.path.basename(filename)).encode('utf-8'))
body.append('Content-Type: {0}'.format(file_type).encode('utf-8'))
body.append(b'')
body.append(fp.read(chunk_size))
body.append(b'--' + BOUNDARY + b'--')
headers = {
'Content-Type': 'multipart/form-data; boundary=Tw3ePy'
}
elif command == 'finalize':
if media_id is None:
raise TweepError('Media ID is required for FINALIZE command.')
body.append(
urlencode({
'command': 'FINALIZE',
'media_id': media_id
}).encode('utf-8')
)
headers = {
'Content-Type': 'application/x-www-form-urlencoded; charset=utf-8'
}
body = b'\r\n'.join(body)
# build headers
headers['Content-Length'] = str(len(body))
return headers, body, fp
@staticmethod
def _get_media_category(is_direct_message, file_type):
""" :reference: https://developer.twitter.com/en/docs/direct-messages/message-attachments/guides/attaching-media
:allowed_param:
"""
if is_direct_message:
prefix = 'dm'
else:
prefix = 'tweet'
if file_type in IMAGE_MIMETYPES:
if file_type == 'image/gif':
return prefix + '_gif'
else:
return prefix + '_image'
elif file_type == 'video/mp4':
return prefix + '_video'
| |
import machine
class CharLCDPlate():
MCP23017_IOCON_BANK0 = 0x0A
MCP23017_IOCON_BANK1 = 0x15
MCP23017_GPIOA = 0x09
MCP23017_IODIRB = 0x10
MCP23017_GPIOB = 0x19
SELECT = 0
RIGHT = 1
DOWN = 2
UP = 3
LEFT = 4
OFF = 0x00
RED = 0x01
GREEN = 0x02
BLUE = 0x04
YELLOW = RED + GREEN
TEAL = GREEN + BLUE
VIOLET = RED + BLUE
WHITE = RED + GREEN + BLUE
ON = RED + GREEN + BLUE
LCD_CLEARDISPLAY = 0x01
LCD_RETURNHOME = 0x02
LCD_ENTRYMODESET = 0x04
LCD_DISPLAYCONTROL = 0x08
LCD_CURSORSHIFT = 0x10
LCD_FUNCTIONSET = 0x20
LCD_SETCGRAMADDR = 0x40
LCD_SETDDRAMADDR = 0x80
LCD_DISPLAYON = 0x04
LCD_DISPLAYOFF = 0x00
LCD_CURSORON = 0x02
LCD_CURSOROFF = 0x00
LCD_BLINKON = 0x01
LCD_BLINKOFF = 0x00
LCD_ENTRYRIGHT = 0x00
LCD_ENTRYLEFT = 0x02
LCD_ENTRYSHIFTINCREMENT = 0x01
LCD_ENTRYSHIFTDECREMENT = 0x00
LCD_DISPLAYMOVE = 0x08
LCD_CURSORMOVE = 0x00
LCD_MOVERIGHT = 0x04
LCD_MOVELEFT = 0x00
def __init__(self, busnum, addr):
self.i2c = machine.I2C(busnum, machine.I2C.MASTER)
self.address = addr
self.porta, self.portb, self.ddrb = 0, 0, 0b00010000
self.i2c.writeto_mem(0,self.address,self.MCP23017_IOCON_BANK1)
registers =[ 0b00111111, self.ddrb, 0b00111111, 0b00000000, 0b00000000, 0b00000000, 0b00000000,
0b00000000, 0b00000000, 0b00000000, 0b00000000, 0b00000000, 0b00111111, 0b00000000,
0b00000000, 0b00000000, 0b00000000, 0b00000000, self.porta, self.portb, self.porta,
self.portb ]
for register in registers:
self.i2c.writeto_mem(0,self.address,register)
self.i2c.writeto_mem(0b10100000, self.address, self.MCP23017_IOCON_BANK0)
self.displayshift = (self.LCD_CURSORMOVE | self.LCD_MOVERIGHT)
self.displaymode = (self.LCD_ENTRYLEFT | self.LCD_ENTRYSHIFTDECREMENT)
self.displaycontrol = (self.LCD_DISPLAYON | self.LCD_CURSOROFF | self.LCD_BLINKOFF)
self.write(0x33)
self.write(0x32)
self.write(0x28)
self.write(self.LCD_CLEARDISPLAY)
self.write(self.LCD_CURSORSHIFT | self.displayshift)
self.write(self.LCD_ENTRYMODESET | self.displaymode)
self.write(self.LCD_DISPLAYCONTROL | self.displaycontrol)
self.write(self.LCD_RETURNHOME)
flip = ( 0b00000000, 0b00010000, 0b00001000, 0b00011000,
0b00000100, 0b00010100, 0b00001100, 0b00011100,
0b00000010, 0b00010010, 0b00001010, 0b00011010,
0b00000110, 0b00010110, 0b00001110, 0b00011110 )
def out4(self, bitmask, value):
hi = bitmask | self.flip[value >> 4]
lo = bitmask | self.flip[value & 0x0F]
return [hi | 0b00100000, hi, lo | 0b00100000, lo]
pollables = ( LCD_CLEARDISPLAY, LCD_RETURNHOME )
def write(self, value, char_mode=False):
if self.ddrb & 0b00010000:
lo = (self.portb & 0b00000001) | 0b01000000
hi = lo | 0b00100000
self.i2c.writeto_mem(lo,self.address,self.MCP23017_GPIOB)
while True:
self.i2c.writeto(hi,self.address)
bits = self.i2c.readfrom(1,self.address)
self.i2c.writeto_mem(bytearray([lo,hi,lo]),self.address,self.MCP23017_GPIOB)
if (bits[0] & 0b00000010) == 0: break
self.portb = lo
self.ddrb &= 0b11101111
self.i2c.writeto_mem(self.ddrb,self.address,self.MCP23017_IODIRB)
bitmask = self.portb & 0b00000001
if char_mode: bitmask |= 0b10000000
if isinstance(value, str):
last = len(value) - 1
data = []
for i, v in enumerate(value):
data.extend(self.out4(bitmask, ord(v)))
if (len(data) >= 32) or (i == last):
self.i2c.writeto_mem(bytearray(data),self.address,self.MCP23017_GPIOB)
self.portb = data[-1]
data = []
elif isinstance(value, list):
last = len(value) - 1
data = []
for i, v in enumerate(value):
data.extend(self.out4(bitmask, v))
if (len(data) >= 32) or (i == last):
self.i2c.writeto_mem(data,self.address,self.MCP23017_GPIOB)
self.portb = data[-1]
data = []
else:
data = self.out4(bitmask, value)
self.i2c.writeto_mem(bytearray(data),self.address,self.MCP23017_GPIOB)
self.portb = data[-1]
if (not char_mode) and (value in self.pollables):
self.ddrb |= 0b00010000
self.i2c.writeto_mem(self.ddrb,self.address,self.MCP23017_IODIRB)
def begin(self, cols, lines):
self.currline = 0
self.numlines = lines
self.clear()
def clear(self):
self.write(self.LCD_CLEARDISPLAY)
def home(self):
self.write(self.LCD_RETURNHOME)
row_offsets = ( 0x00, 0x40, 0x14, 0x54 )
def setCursor(self, col, row):
if row > self.numlines: row = self.numlines - 1
elif row < 0: row = 0
self.write(self.LCD_SETDDRAMADDR | (col + self.row_offsets[row]))
def display(self):
self.displaycontrol |= self.LCD_DISPLAYON
self.write(self.LCD_DISPLAYCONTROL | self.displaycontrol)
def noDisplay(self):
self.displaycontrol &= ~self.LCD_DISPLAYON
self.write(self.LCD_DISPLAYCONTROL | self.displaycontrol)
def cursor(self):
self.displaycontrol |= self.LCD_CURSORON
self.write(self.LCD_DISPLAYCONTROL | self.displaycontrol)
def noCursor(self):
self.displaycontrol &= ~self.LCD_CURSORON
self.write(self.LCD_DISPLAYCONTROL | self.displaycontrol)
def ToggleCursor(self):
self.displaycontrol ^= self.LCD_CURSORON
self.write(self.LCD_DISPLAYCONTROL | self.displaycontrol)
def blink(self):
self.displaycontrol |= self.LCD_BLINKON
self.write(self.LCD_DISPLAYCONTROL | self.displaycontrol)
def noBlink(self):
self.displaycontrol &= ~self.LCD_BLINKON
self.write(self.LCD_DISPLAYCONTROL | self.displaycontrol)
def ToggleBlink(self):
self.displaycontrol ^= self.LCD_BLINKON
self.write(self.LCD_DISPLAYCONTROL | self.displaycontrol)
def scrollDisplayLeft(self):
self.displayshift = self.LCD_DISPLAYMOVE | self.LCD_MOVELEFT
self.write(self.LCD_CURSORSHIFT | self.displayshift)
def scrollDisplayRight(self):
self.displayshift = self.LCD_DISPLAYMOVE | self.LCD_MOVERIGHT
self.write(self.LCD_CURSORSHIFT | self.displayshift)
def leftToRight(self):
self.displaymode |= self.LCD_ENTRYLEFT
self.write(self.LCD_ENTRYMODESET | self.displaymode)
def rightToLeft(self):
self.displaymode &= ~self.LCD_ENTRYLEFT
self.write(self.LCD_ENTRYMODESET | self.displaymode)
def autoscroll(self):
self.displaymode |= self.LCD_ENTRYSHIFTINCREMENT
self.write(self.LCD_ENTRYMODESET | self.displaymode)
def noAutoscroll(self):
self.displaymode &= ~self.LCD_ENTRYSHIFTINCREMENT
self.write(self.LCD_ENTRYMODESET | self.displaymode)
def createChar(self, location, bitmap):
self.write(self.LCD_SETCGRAMADDR | ((location & 7) << 3))
self.write(bitmap, True)
self.write(self.LCD_SETDDRAMADDR)
def message(self, text):
lines = str(text).split('\n')
for i, line in enumerate(lines):
if i > 0:
self.write(0xC0)
self.write(line, True)
def backlight(self, color):
c = ~color
self.porta = (self.porta & 0b00111111) | ((c & 0b011) << 6)
self.portb = (self.portb & 0b11111110) | ((c & 0b100) >> 2)
self.i2c.writeto_mem(self.porta,self.address,self.MCP23017_GPIOA)
self.i2c.writeto_mem(self.portb,self.address,self.MCP23017_GPIOB)
def buttonPressed(self, b):
return (self.i2c.read_from(1,self.address,self.MCP23017_GPIOA,timeout=100)[0] >> b) & 1
def buttons(self):
return self.i2c.read_from(1,self.address,self.MCP23017_GPIOA,timeout=100)[0] & 0b11111
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.