gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
from autumn.db.query import Query
from autumn.db import escape
from autumn.db.connection import autumn_db, Database
from autumn.validators import ValidatorChain
class ModelCache(object):
models = {}
def add(self, model):
self.models[model.__name__] = model
def get(self, model_name):
return self.models[model_name]
cache = ModelCache()
class Empty:
pass
class ModelBase(type):
'''
Metaclass for Model
Sets up default table name and primary key
Adds fields from table as attributes
Creates ValidatorChains as necessary
'''
def __new__(cls, name, bases, attrs):
if name == 'Model':
return super(ModelBase, cls).__new__(cls, name, bases, attrs)
new_class = type.__new__(cls, name, bases, attrs)
if not getattr(new_class, 'Meta', None):
new_class.Meta = Empty
if not getattr(new_class.Meta, 'table', None):
new_class.Meta.table = name.lower()
new_class.Meta.table_safe = escape(new_class.Meta.table)
# Assume id is the default
if not getattr(new_class.Meta, 'pk', None):
new_class.Meta.pk = 'id'
# Create function to loop over iterable validations
for k, v in getattr(new_class.Meta, 'validations', {}).iteritems():
if isinstance(v, (list, tuple)):
new_class.Meta.validations[k] = ValidatorChain(*v)
# See cursor.description
# http://www.python.org/dev/peps/pep-0249/
if not hasattr(new_class, "db"):
new_class.db = autumn_db
db = new_class.db
q = Query.raw_sql('SELECT * FROM %s LIMIT 1' % new_class.Meta.table_safe, db=new_class.db)
new_class._fields = [f[0] for f in q.description]
cache.add(new_class)
return new_class
class Model(object):
'''
Allows for automatic attributes based on table columns.
Syntax::
from autumn.model import Model
class MyModel(Model):
class Meta:
# If field is blank, this sets a default value on save
defaults = {'field': 1}
# Each validation must be callable
# You may also place validations in a list or tuple which is
# automatically converted int a ValidationChain
validations = {'field': lambda v: v > 0}
# Table name is lower-case model name by default
# Or we can set the table name
table = 'mytable'
# Create new instance using args based on the order of columns
m = MyModel(1, 'A string')
# Or using kwargs
m = MyModel(field=1, text='A string')
# Saving inserts into the database (assuming it validates [see below])
m.save()
# Updating attributes
m.field = 123
# Updates database record
m.save()
# Deleting removes from the database
m.delete()
# Purely saving with an improper value, checked against
# Model.Meta.validations[field_name] will raise Model.ValidationError
m = MyModel(field=0)
# 'ValidationError: Improper value "0" for "field"'
m.save()
# Or before saving we can check if it's valid
if m.is_valid():
m.save()
else:
# Do something to fix it here
# Retrieval is simple using Model.get
# Returns a Query object that can be sliced
MyModel.get()
# Returns a MyModel object with an id of 7
m = MyModel.get(7)
# Limits the query results using SQL's LIMIT clause
# Returns a list of MyModel objects
m = MyModel.get()[:5] # LIMIT 0, 5
m = MyModel.get()[10:15] # LIMIT 10, 5
# We can get all objects by slicing, using list, or iterating
m = MyModel.get()[:]
m = list(MyModel.get())
for m in MyModel.get():
# do something here...
# We can filter our Query
m = MyModel.get(field=1)
m = m.filter(another_field=2)
# This is the same as
m = MyModel.get(field=1, another_field=2)
# Set the order by clause
m = MyModel.get(field=1).order_by('field', 'DESC')
# Removing the second argument defaults the order to ASC
'''
__metaclass__ = ModelBase
debug = False
def __init__(self, *args, **kwargs):
'Allows setting of fields using kwargs'
self.__dict__[self.Meta.pk] = None
self._new_record = True
[setattr(self, self._fields[i], arg) for i, arg in enumerate(args)]
[setattr(self, k, v) for k, v in kwargs.iteritems()]
self._changed = set()
def __setattr__(self, name, value):
'Records when fields have changed'
if name != '_changed' and name in self._fields and hasattr(self, '_changed'):
self._changed.add(name)
self.__dict__[name] = value
def _get_pk(self):
'Sets the current value of the primary key'
return getattr(self, self.Meta.pk, None)
def _set_pk(self, value):
'Sets the primary key'
return setattr(self, self.Meta.pk, value)
def _update(self):
'Uses SQL UPDATE to update record'
query = 'UPDATE %s SET ' % self.Meta.table_safe
query += ', '.join(['%s = %s' % (escape(f), self.db.conn.placeholder) for f in self._changed])
query += ' WHERE %s = %s ' % (escape(self.Meta.pk), self.db.conn.placeholder)
values = [getattr(self, f) for f in self._changed]
values.append(self._get_pk())
cursor = Query.raw_sql(query, values, self.db)
def _new_save(self):
'Uses SQL INSERT to create new record'
# if pk field is set, we want to insert it too
# if pk field is None, we want to auto-create it from lastrowid
auto_pk = 1 and (self._get_pk() is None) or 0
fields=[
escape(f) for f in self._fields
if f != self.Meta.pk or not auto_pk
]
query = 'INSERT INTO %s (%s) VALUES (%s)' % (
self.Meta.table_safe,
', '.join(fields),
', '.join([self.db.conn.placeholder] * len(fields) )
)
values = [getattr(self, f, None) for f in self._fields
if f != self.Meta.pk or not auto_pk]
cursor = Query.raw_sql(query, values, self.db)
if self._get_pk() is None:
self._set_pk(cursor.lastrowid)
return True
def _get_defaults(self):
'Sets attribute defaults based on ``defaults`` dict'
for k, v in getattr(self.Meta, 'defaults', {}).iteritems():
if not getattr(self, k, None):
if callable(v):
v = v()
setattr(self, k, v)
def delete(self):
'Deletes record from database'
query = 'DELETE FROM %s WHERE %s = %s' % (self.Meta.table_safe, self.Meta.pk, self.db.conn.placeholder)
values = [getattr(self, self.Meta.pk)]
Query.raw_sql(query, values, self.db)
return True
def is_valid(self):
'Returns boolean on whether all ``validations`` pass'
try:
self._validate()
return True
except Model.ValidationError:
return False
def _validate(self):
'Tests all ``validations``, raises ``Model.ValidationError``'
for k, v in getattr(self.Meta, 'validations', {}).iteritems():
assert callable(v), 'The validator must be callable'
value = getattr(self, k)
if not v(value):
raise Model.ValidationError, 'Improper value "%s" for "%s"' % (value, k)
def save(self):
'Sets defaults, validates and inserts into or updates database'
self._get_defaults()
self._validate()
if self._new_record:
self._new_save()
self._new_record = False
return True
else:
return self._update()
@classmethod
def get(cls, _obj_pk=None, **kwargs):
'Returns Query object'
if _obj_pk is not None:
return cls.get(**{cls.Meta.pk: _obj_pk})[0]
return Query(model=cls, conditions=kwargs)
class ValidationError(Exception):
pass
| |
# -*- mode:python; sh-basic-offset:4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim:set tabstop=4 softtabstop=4 expandtab shiftwidth=4 fileencoding=utf-8:
#
# Copyright (c) 2012, Jorge A Gallegos <kad@blegh.net>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import argparse
import boto
from dateutil import parser
import grp
import json
import logging
import threading
import os
import pwd
import socket
from queue import Queue
import re
from datetime import datetime
log = logging.getLogger('tableslurp')
stderr = logging.StreamHandler()
stderr.setFormatter(logging.Formatter(
'%(name)s [%(asctime)s] %(levelname)s %(message)s'))
log.addHandler(stderr)
if os.environ.get('TDEBUG', False):
log.setLevel(logging.DEBUG)
else:
log.setLevel(logging.INFO)
clog_regex = re.compile(r'CommitLog-\d-(\d+).log')
def parse_clog(clog):
return clog_regex.search(clog).group(1)
def get_bucket(region, key, secret, token, bucket_name):
# unsure if boto is thread-safe, will reconnect every time
log.debug('Connecting to s3')
if token:
conn = boto.s3.connect_to_region(region,
aws_access_key_id=key,
aws_secret_access_key=secret,
security_token=token)
else:
conn = boto.s3.connect_to_region(region,
aws_access_key_id=key,
aws_secret_access_key=secret,
security_token=token)
bucket = conn.get_bucket(bucket_name)
log.debug('Connected to s3')
return bucket
def find_latest_listdirjson_for_every_table(bucket, prefix):
latest_listdirjsons = {}
# Get all ks folders, e.g /var/lib/cassandra/<ks>
keyspace_dirs = [_.name for _ in list(bucket.list(prefix='%s/' % prefix, delimiter='/'))]
for keyspace_dir in keyspace_dirs:
# Get all tbl folders for every ks folder, e.g /var/lib/cassandra/<ks>/<tbl>
table_dirs = [_.name for _ in list(bucket.list(prefix=keyspace_dir, delimiter='/'))]
for table_dir in table_dirs:
# Get all listdir.json files for every tbl folder,
# e.g /var/lib/cassandra/<ks>/<tbl>/mc-1/2/3/4...-Data.db-listdir.json
keys = [_ for _ in bucket.list(prefix=table_dir, delimiter='/')
if _.name.endswith('-listdir.json')]
if keys:
keys.sort(key=lambda l: parser.parse(l.last_modified))
key = keys.pop()
table = table_dir.replace('%s/' % prefix, '').rstrip('/')
latest_listdirjsons[table] = key
return latest_listdirjsons
def build_recursive_fileset(bucket, prefix):
table_to_latest_listdirjson_mapping = find_latest_listdirjson_for_every_table(bucket, prefix)
table_to_files_mapping = {}
for table, listdirjson in table_to_latest_listdirjson_mapping.items():
json_data = json.loads(listdirjson.get_contents_as_string())
# There should only ever be one latest listdir.json and hence, one set of (latest) SSTables
sstables = json_data.itervalues().next()
table_to_files_mapping[table] = sstables
log.info("Found %s with %d files", table, len(sstables))
return table_to_files_mapping
def build_clog_fileset(bucket, prefix, clog_prefix):
fileset = []
table_to_latest_listdirjson_mapping = find_latest_listdirjson_for_every_table(bucket, prefix)
listdirjsons = table_to_latest_listdirjson_mapping.values()
listdirjsons.sort(key=lambda l: parser.parse(l.last_modified), reverse=True)
oldest_key = listdirjsons.pop()
oldest_timestamp = datetime.strptime(oldest_key.last_modified, "%Y-%m-%dT%H:%M:%S.%fZ").strftime('%s')
oldest_timestamp_ms = int(oldest_timestamp) * 1000
clogs = [_.name for _ in bucket.list(prefix='%s/' % clog_prefix)]
clogs.sort(reverse=True)
for clog in clogs:
if parse_clog(clog) > oldest_timestamp_ms:
fileset.append(clog.replace('%s/' % clog_prefix, '').rstrip('/'))
else:
break
return fileset
def build_single_fileset(bucket, prefix, origin, target_file):
key = None
# If you want to restore a fileset in particular
if target_file:
key = bucket.get_key('%s/%s-listdir.json' %
(prefix, target_file))
else:
prefix_part_count = prefix.count('/') + 1
keys = [_ for _ in bucket.list(prefix='%s/' %
(prefix,)) if
_.name.endswith('-listdir.json') and _.name.count('/') == prefix_part_count]
if keys:
keys.sort(key=lambda l: parser.parse(l.last_modified))
key = keys.pop()
if not key:
raise LookupError('Cannot find anything to restore from %s:%s/%s' %
(bucket.name, prefix, target_file or ''))
json_data = json.loads(key.get_contents_as_string())
fileset = json_data[origin]
log.info('Fileset contains %d files to download' % (len(fileset)))
return fileset
class DownloadCounter(object):
filename = None
attemptcount = 0
def __init__(self, filename):
self.filename = filename
def increment(self):
self.attemptcount += 1
class DownloadHandler(object):
key = None
secret = None
token = None
bucket_name = None
owner = None
group = None
preserve = False
target = None
origin = None
prefix = None
force = False
name = socket.getfqdn()
fileset = []
queue = Queue()
num_threads = 4
threads = {}
def __init__(self, args=None, target=None, prefix=None, fileset=None):
self.target = target
self.origin = args.origin[0]
self.preserve = args.preserve
self.key = args.aws_key
self.secret = args.aws_secret
self.region = args.aws_region
self.token = args.token
self.bucket_name = args.bucket[0]
self.num_threads = args.threads
self.force = args.force
if args.name:
self.name = args.name
self.prefix = prefix
self.fileset = fileset
# It may be a bit sub-optimal, but I rather fail sooner than later
(owner, group) = self._check_metadata()
if not self.preserve:
owner = args.owner
group = args.group
try:
self.owner = pwd.getpwnam(owner).pw_uid
self.group = grp.getgrnam(group).gr_gid
except Exception as e:
log.error(e)
raise OSError('User/Group pair %s:%s does not exist' % (owner, group))
def _get_bucket(self):
return get_bucket(self.region, self.key, self.secret, self.token, self.bucket_name)
def _check_metadata(self):
bucket = self._get_bucket()
k = None
for fileset in self.fileset:
k = bucket.get_key('%s/%s' % (self.prefix, fileset))
if k is not None:
break
# The librato branch introduced this
owner = None
group = None
if k is None:
log.warn('Can not fetch metadata information')
return (owner, group)
meta = k.get_metadata('stat')
log.debug('Metadata is %s' % (meta,))
if meta:
try:
json_data = json.loads(meta)
owner = json_data['user']
group = json_data['group']
except TypeError as te:
log.debug(te)
log.warning('Could not parse stat metadata for %s' % (k.name,))
except KeyError as ke:
log.debug(ke)
log.warning('Incomplete stat metadata for %s, will ignore' %
(k.name,))
return (owner, group)
def _test_permissions(self):
log.info('Will now try to test writing to the target dir %s' %
(self.target,))
try:
if os.path.isdir(self.target) == False:
log.debug('Creating temp file in %s' % (self.target,))
os.makedirs(self.target)
log.debug('Changing owner:group for %s to %s:%s' %
(self.target, self.owner, self.group,))
os.chown(self.target, self.owner, self.group)
except Exception as e:
log.debug(e)
log.exception('%s exists' % (self.target,))
log.info('Will write to %s' % (self.target,))
def _worker(self, idx, queue):
log.info('Thread #%d processing items' % (idx, ))
bucket = self._get_bucket()
while not queue.empty():
queueddownload = queue.get()
fname = queueddownload.filename
keypath = '%s/%s' % (self.prefix, fname,)
destfile = os.path.join(self.target, os.path.basename(fname))
log.debug('Checking if we need to download %s to %s' %
(keypath, destfile,))
if queueddownload.attemptcount < 5:
download = False
# Retry downloading until we succeed
try:
key = bucket.get_key(keypath)
log.debug('Key object is %s' % key)
if os.path.isfile(destfile):
stat = os.stat(destfile)
if self.force:
download = True
elif stat.st_size != key.size:
log.info('%s and %s size differs, will '
're-download' % (key.name, destfile,))
download = True
else:
download = True
if download and key:
log.info('Downloading %s from %s to %s' %
(key.name, bucket.name, destfile))
key.get_contents_to_filename(destfile)
log.debug('Changing owner:group for %s to %s:%s' %
(destfile, self.owner, self.group,))
os.chown(destfile, self.owner, self.group)
except Exception as e:
log.debug(e)
log.exception('Failed to download `%s` retrying' %
(fname,))
#We can't download, try again
queueddownload.increment()
queue.put(queueddownload)
else:
log.info('Tried to download %s too many times. Giving up' %
fname)
#Pop the task regardless of state. If it fails we've put it back
queue.task_done()
log.info('Thread #%d finished processing' % (idx,))
def run(self):
self._test_permissions()
log.info('Running')
#queue up the filesets
for filename in self.fileset:
log.info('Pushing file %s onto queue' % filename)
self.queue.put(DownloadCounter(filename))
# launch threads and attach an event to them
for idx in range(0, self.num_threads):
self.threads[idx] = {}
# e = threading.Event()
t = threading.Thread(target=self._worker,
kwargs={'idx': idx, 'queue': self.queue})
t.setDaemon(True)
self.threads[idx] = t
t.start()
#Wait for everything to finish downloading
self.queue.join()
log.info('My job is done.')
def main():
p = pwd.getpwnam(os.environ['USER'])
owner = p.pw_name
group = [_.gr_name for _ in grp.getgrall() if _.gr_gid == p.pw_gid][0]
ap = argparse.ArgumentParser(
description='This is the companion script to the `tablesnap` program '
'which you can use to restore files from an Amazon S3 bucket to any '
'given local directory which you have write-permissions on. While the '
'code is straightforward, the program assumes the files you are '
'restoring got previously backed up with `tablesnap`',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
ap.add_argument('-k', '--aws-key',
default=os.environ.get('AWS_ACCESS_KEY_ID'),
help='Amazon S3 Key (default from AWS_ACCESS_KEY_ID in environment)')
ap.add_argument('-s', '--aws-secret',
default=os.environ.get('AWS_SECRET_ACCESS_KEY'),
help='Amazon S3 Secret (default from AWS_SECRET_ACCESS_KEY in environment)')
ap.add_argument('--token',
default=os.environ.get('AWS_SECURITY_TOKEN'),
help='Amazon S3 Token (default from AWS_SECURITY_TOKEN in environment)')
ap.add_argument('--aws-region',
default='us-east-1',
choices=[region.name for region in boto.s3.regions()],
help='AWS region to connect to.')
ap.add_argument('-p', '--preserve', default=False, action='store_true',
help='Preserve the permissions (if they exist) from the source. '
'This overrides -o and -g')
ap.add_argument('-o', '--owner', default=owner,
help='After download, chown files to this user.')
ap.add_argument('-g', '--group', default=group,
help='After download, chgrp files to this group.')
ap.add_argument('-t', '--threads', type=int, default=4,
help='Split the download between this many threads')
include_group = ap.add_mutually_exclusive_group()
include_group.add_argument('-f', '--file',
help='If specified, will download the file-set this file belongs to '
'instead of the latest one available.')
include_group.add_argument('-r', '--recursive', default=False, action='store_true',
help='Recursively download the files at the given origin (path), using latest ones available.')
include_group.add_argument('--commitlogs',
help='Download commitlogs, assuming up to the most recent batch possible. \n'
'If your bucket structure is <fqdn or name>/var/lib/cassandra/commitlog \n'
'provide /var/lib/cassandra/commitlog. \n')
ap.add_argument('--force', default=False, action='store_true',
help='Force download files even if they exist')
ap.add_argument('-n', '--name', default=socket.getfqdn(),
help='Use this name instead of the FQDN to prefix the bucket dir')
ap.add_argument('bucket', nargs=1,
help='S3 bucket to download files from')
ap.add_argument('origin', nargs=1,
help='Path inside the bucket to the directory you want to download '
'files from. '
'E.g if your bucket structure is <fqdn or name>/var/lib/cassandra/data/<ks>/<tbl>/<files>. \n'
'With --recursive or --commitlogs, you should provide /var/lib/cassandra/data. \n'
'Otherwise, provide /var/lib/cassandra/data/<ks>/<tbl> if you want to restore just one table.')
ap.add_argument('target', nargs=1,
help='Path in the local FS where files should be downloaded to')
args = ap.parse_args()
prefix = '%s:%s' % (args.name, args.origin[0])
bucket = get_bucket(args.aws_region, args.aws_key, args.aws_secret, args.token, args.bucket[0])
log.info('Building fileset')
if args.recursive:
table_to_files_mapping = build_recursive_fileset(bucket, prefix)
for table,fileset in table_to_files_mapping.iteritems():
dh = DownloadHandler(args, target=os.path.join(args.target[0], table),
prefix=os.path.join(prefix, table), fileset=fileset)
dh.run()
elif args.commitlogs:
clog_prefix = '%s:%s' % (args.name, args.commitlogs)
fileset = build_clog_fileset(bucket, prefix, clog_prefix)
dh = DownloadHandler(args, target=os.path.join(args.target[0]), prefix=clog_prefix,
fileset=fileset)
dh.run()
else:
fileset = build_single_fileset(bucket, prefix, args.origin[0], args.file)
dh = DownloadHandler(args, target=args.target[0], prefix=prefix,
fileset=fileset)
dh.run()
| |
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
import mock
from oslo_config import cfg
from oslotest import mockpatch
import testtools
from tempest import config
from tempest import exceptions
from tempest import test
from tempest.tests import fake_config
from tempest.tests.lib import base
class BaseDecoratorsTest(base.TestCase):
def setUp(self):
super(BaseDecoratorsTest, self).setUp()
self.config_fixture = self.useFixture(fake_config.ConfigFixture())
self.stubs.Set(config, 'TempestConfigPrivate', fake_config.FakePrivate)
class TestAttrDecorator(BaseDecoratorsTest):
def _test_attr_helper(self, expected_attrs, **decorator_args):
@test.attr(**decorator_args)
def foo():
pass
# By our test.attr decorator the attribute __testtools_attrs will be
# set only for 'type' argument, so we test it first.
if 'type' in decorator_args:
# this is what testtools sets
self.assertEqual(getattr(foo, '__testtools_attrs'),
set(expected_attrs))
def test_attr_without_type(self):
self._test_attr_helper(expected_attrs='baz', bar='baz')
def test_attr_decorator_with_list_type(self):
# if type is 'smoke' we'll get the original list of types
self._test_attr_helper(expected_attrs=['smoke', 'foo'],
type=['smoke', 'foo'])
def test_attr_decorator_with_unknown_type(self):
self._test_attr_helper(expected_attrs=['foo'], type='foo')
def test_attr_decorator_with_duplicated_type(self):
self._test_attr_helper(expected_attrs=['foo'], type=['foo', 'foo'])
class TestIdempotentIdDecorator(BaseDecoratorsTest):
def _test_helper(self, _id, **decorator_args):
@test.idempotent_id(_id)
def foo():
"""Docstring"""
pass
return foo
def _test_helper_without_doc(self, _id, **decorator_args):
@test.idempotent_id(_id)
def foo():
pass
return foo
def test_positive(self):
_id = str(uuid.uuid4())
foo = self._test_helper(_id)
self.assertIn('id-%s' % _id, getattr(foo, '__testtools_attrs'))
self.assertTrue(foo.__doc__.startswith('Test idempotent id: %s' % _id))
def test_positive_without_doc(self):
_id = str(uuid.uuid4())
foo = self._test_helper_without_doc(_id)
self.assertTrue(foo.__doc__.startswith('Test idempotent id: %s' % _id))
def test_idempotent_id_not_str(self):
_id = 42
self.assertRaises(TypeError, self._test_helper, _id)
def test_idempotent_id_not_valid_uuid(self):
_id = '42'
self.assertRaises(ValueError, self._test_helper, _id)
class TestServicesDecorator(BaseDecoratorsTest):
def _test_services_helper(self, *decorator_args):
class TestFoo(test.BaseTestCase):
@test.services(*decorator_args)
def test_bar(self):
return 0
t = TestFoo('test_bar')
self.assertEqual(set(decorator_args), getattr(t.test_bar,
'__testtools_attrs'))
self.assertEqual(t.test_bar(), 0)
def test_services_decorator_with_single_service(self):
self._test_services_helper('compute')
def test_services_decorator_with_multiple_services(self):
self._test_services_helper('compute', 'network')
def test_services_decorator_with_duplicated_service(self):
self._test_services_helper('compute', 'compute')
def test_services_decorator_with_invalid_service(self):
self.assertRaises(exceptions.InvalidServiceTag,
self._test_services_helper, 'compute',
'bad_service')
def test_services_decorator_with_service_valid_and_unavailable(self):
self.useFixture(mockpatch.PatchObject(test.CONF.service_available,
'cinder', False))
self.assertRaises(testtools.TestCase.skipException,
self._test_services_helper, 'compute',
'volume')
def test_services_list(self):
service_list = test.get_service_list()
for service in service_list:
try:
self._test_services_helper(service)
except exceptions.InvalidServiceTag:
self.fail('%s is not listed in the valid service tag list'
% service)
except KeyError:
# NOTE(mtreinish): This condition is to test for an entry in
# the outer decorator list but not in the service_list dict.
# However, because we're looping over the service_list dict
# it's unlikely we'll trigger this. So manual review is still
# need for the list in the outer decorator.
self.fail('%s is in the list of valid service tags but there '
'is no corresponding entry in the dict returned from'
' get_service_list()' % service)
except testtools.TestCase.skipException:
# Test didn't raise an exception because of an incorrect list
# entry so move onto the next entry
continue
class TestStressDecorator(BaseDecoratorsTest):
def _test_stresstest_helper(self, expected_frequency='process',
expected_inheritance=False,
**decorator_args):
@test.stresstest(**decorator_args)
def foo():
pass
self.assertEqual(getattr(foo, 'st_class_setup_per'),
expected_frequency)
self.assertEqual(getattr(foo, 'st_allow_inheritance'),
expected_inheritance)
self.assertEqual(set(['stress']), getattr(foo, '__testtools_attrs'))
def test_stresstest_decorator_default(self):
self._test_stresstest_helper()
def test_stresstest_decorator_class_setup_frequency(self):
self._test_stresstest_helper('process', class_setup_per='process')
def test_stresstest_decorator_class_setup_frequency_non_default(self):
self._test_stresstest_helper(expected_frequency='application',
class_setup_per='application')
def test_stresstest_decorator_set_frequency_and_inheritance(self):
self._test_stresstest_helper(expected_frequency='application',
expected_inheritance=True,
class_setup_per='application',
allow_inheritance=True)
class TestRequiresExtDecorator(BaseDecoratorsTest):
def setUp(self):
super(TestRequiresExtDecorator, self).setUp()
cfg.CONF.set_default('api_extensions', ['enabled_ext', 'another_ext'],
'compute-feature-enabled')
def _test_requires_ext_helper(self, expected_to_skip=True,
**decorator_args):
class TestFoo(test.BaseTestCase):
@test.requires_ext(**decorator_args)
def test_bar(self):
return 0
t = TestFoo('test_bar')
if expected_to_skip:
self.assertRaises(testtools.TestCase.skipException, t.test_bar)
else:
try:
self.assertEqual(t.test_bar(), 0)
except testtools.TestCase.skipException:
# We caught a skipException but we didn't expect to skip
# this test so raise a hard test failure instead.
raise testtools.TestCase.failureException(
"Not supposed to skip")
def test_requires_ext_decorator(self):
self._test_requires_ext_helper(expected_to_skip=False,
extension='enabled_ext',
service='compute')
def test_requires_ext_decorator_disabled_ext(self):
self._test_requires_ext_helper(extension='disabled_ext',
service='compute')
def test_requires_ext_decorator_with_all_ext_enabled(self):
cfg.CONF.set_default('api_extensions', ['all'],
group='compute-feature-enabled')
self._test_requires_ext_helper(expected_to_skip=False,
extension='random_ext',
service='compute')
def test_requires_ext_decorator_bad_service(self):
self.assertRaises(KeyError,
self._test_requires_ext_helper,
extension='enabled_ext',
service='bad_service')
class TestSimpleNegativeDecorator(BaseDecoratorsTest):
@test.SimpleNegativeAutoTest
class FakeNegativeJSONTest(test.NegativeAutoTest):
_schema = {}
def test_testfunc_exist(self):
self.assertIn("test_fake_negative", dir(self.FakeNegativeJSONTest))
@mock.patch('tempest.test.NegativeAutoTest.execute')
def test_testfunc_calls_execute(self, mock):
obj = self.FakeNegativeJSONTest("test_fake_negative")
self.assertIn("test_fake_negative", dir(obj))
obj.test_fake_negative()
mock.assert_called_once_with(self.FakeNegativeJSONTest._schema)
| |
import re
import sqlite3
import xlrd
import win32com.client
import fnmatch
import os
def xlsx_to_arr(xlsx_file, worksheet="", row_start=0, col_start=0, row_end=-1, col_end=-1):
arr = []
wb = xlrd.open_workbook(xlsx_file)
ws = wb.sheet_by_index(worksheet)
row_end = ws.nrows if row_end == -1 else row_end+1
col_end = ws.ncols if col_end == -1 else col_end+1
arr = [ws.row_values(row, start_colx=col_start, end_colx=col_end) for row in range(row_start, row_end)]
header = arr[0]
return ','.join(header), arr[1:]
def arr_to_xlsx(filename, header, arr, offset_row=0):
xl = win32com.client.Dispatch('Excel.Application')
xl.ScreenUpdating = False
wb = xl.Workbooks.Add()
ws = wb.Worksheets(1)
if header != "":
for i, cell in enumerate(header.split(',')):
ws.Cells(offset_row+1,i+1).Value = cell
for i, row in enumerate(arr):
for j, cell in enumerate(row):
if str(cell)[0] == '=':
ws.Cells(offset_row+i+2,j+1).Formula = cell
else:
ws.Cells(offset_row+i+2,j+1).Value = cell
ws.Columns.AutoFit()
xl.DisplayAlerts = False
wb.SaveAs(filename)
xl.DisplayAlerts = True
wb.Close(True)
return
def question_marks(st):
question_marks = '?'
for i in range(0, len(st.split(','))-1):
question_marks = question_marks + ",?"
return question_marks
def db_cur(source = ":memory:"):
conn = sqlite3.connect(source, detect_types=sqlite3.PARSE_DECLTYPES)
# conn.row_factory = sqlite3.Row
cur = conn.cursor()
return conn, cur
def create_tbl(cur, tbl_name, header, arr = None, index_arr = None):
cur.execute("""select count(*) FROM sqlite_master WHERE type='table' AND name = '%s' """ % (tbl_name))
tbl_exists = cur.fetchone()
if tbl_exists[0] == 0:
cur.execute("CREATE TABLE " + tbl_name + " (" + header.replace("id,", "id PRIMARY KEY,") + " );")
if index_arr is not None:
for index in index_arr:
cur.execute("CREATE INDEX " + tbl_name + "_" + index + " ON " + tbl_name + " (" + index + ");")
if arr is not None:
cur.executemany("INSERT INTO " + tbl_name + " VALUES ("+question_marks(header)+")", arr)
return
def xlsx_template_process(cur, template_file, borrow=True):
stock_ledger_output = "stock_borrowing_ledger.xlsx"
date_of_agreement_addr = "C3"
registration_no_addr = "C5"
name_of_borrower_addr = "I3"
name_of_lender_addr = "I5"
xl = win32com.client.Dispatch('Excel.Application')
xl.DisplayAlerts = False
cur.execute("""
select distinct CounterpartyCode, date('1899-12-30','+'||RegistrationDate||' day') as RegistrationDate, SBAAgreementNo, Counterparty, GMSLAGMRA from reg join sbl
where SBAAgreementNo <> 'n/a' and SBAAgreementNo <> '' and CounterpartyCode <> '' and CounterpartyCode is not null
order by CounterpartyCode """)
reg_rows = cur.fetchall()
for reg_row in reg_rows:
cpty_code = reg_row[0]
is_sbl = True if reg_row[4] == "GMSLA" else False
cpty_arr = []
wb = xl.Workbooks.Open(template_file)
ws = wb.Worksheets(1)
ws.Range(date_of_agreement_addr).Value = reg_row[1] if borrow else ""
ws.Range(registration_no_addr).Value = reg_row[2].rjust(7, '0') if borrow else ""
ws.Range(name_of_borrower_addr).Value = "Haitong International Securities Company Limited" if borrow else reg_row[3]
ws.Range(name_of_lender_addr).Value = reg_row[3] if borrow else "Haitong International Securities Company Limited"
if is_sbl:
query = " Type = 'B' " if borrow else " (Type = 'L' or Type = 'LP') "
else:
query = " Type = 'CLR' " if borrow else " Type = 'CLD' "
# cur.execute("""
# select loan.ActivityEffectiveDate, loan.Reference, loan.Security, loan.SecurityDescription, loan.ActivityQuantity, return.ActivityEffectiveDate, return.Reference, return.Security, return.SecurityDescription, return.ActivityQuantity
# from
# (
# select UniqueKey, date('1899-12-30','+'||ActivityEffectiveDate||' day') as ActivityEffectiveDate, Reference, Security, SecurityDescription, ActivityQuantity from sbl
# where cpty = ?
# and Activity = 'Return Security'
# and date('1899-12-30','+'||Settled||' day') >= Date('2016-07-01')
# ) return
# left join
# (
# select UniqueKey, date('1899-12-30','+'||ActivityEffectiveDate||' day') as ActivityEffectiveDate, Reference, Security, SecurityDescription, ActivityQuantity from sbl
# where cpty = ?
# and Activity = 'Settle Sec Loan'
# and date('1899-12-30','+'||Settled||' day') >= Date('2016-07-01')
# ) loan
# on loan.Reference = return.Reference
# union
# select loan.ActivityEffectiveDate, loan.Reference, loan.Security, loan.SecurityDescription, loan.ActivityQuantity, return.ActivityEffectiveDate, return.Reference, return.Security, return.SecurityDescription, return.ActivityQuantity
# from
# (
# select UniqueKey, date('1899-12-30','+'||ActivityEffectiveDate||' day') as ActivityEffectiveDate, Reference, Security, SecurityDescription, ActivityQuantity from sbl
# where cpty = ?
# and Activity = 'Settle Sec Loan'
# and date('1899-12-30','+'||Settled||' day') >= Date('2016-07-01')
# ) loan
# left join
# (
# select UniqueKey, date('1899-12-30','+'||ActivityEffectiveDate||' day') as ActivityEffectiveDate, Reference, Security, SecurityDescription, ActivityQuantity from sbl
# where cpty = ?
# and Activity = 'Return Security'
# and date('1899-12-30','+'||Settled||' day') >= Date('2016-07-01')
# ) return
# on loan.Reference = return.Reference
# order by loan.Reference""", (cpty_code,cpty_code,cpty_code,cpty_code,))
# sbl_arr = cur.fetchall()
cur.execute("DELETE FROM ref")
cur.execute("""
INSERT INTO ref
select distinct Reference from sbl where cpty = ? and date('1899-12-30','+'||ActivityEffectiveDate||' day') >= Date('2016-07-01')
""",(cpty_code,) )
cur.execute("""
select date('1899-12-30','+'||ActivityEffectiveDate||' day') as ActivityEffectiveDate, Reference, Security, SecurityDescription, ActivityQuantity from sbl
join ref on sbl.Reference = ref.ref_no
where cpty = ? and Activity = 'Return Security' and """ + query +"""
order by Reference
""",(cpty_code,))
return_arr = cur.fetchall()
cur.execute("""
select date('1899-12-30','+'||ActivityEffectiveDate||' day') as ActivityEffectiveDate, Reference, Security, SecurityDescription, ActivityQuantity from sbl
join ref on sbl.Reference = ref.ref_no
where cpty = ? and Activity = 'Settle Sec Loan' and """ + query +"""
order by Reference
""",(cpty_code,))
borrow_arr = cur.fetchall()
sbl_arr = []
none_list = [None, None, None, None, None]
i = 0
j = 0
while i < len(borrow_arr) and j < len(return_arr):
if int(borrow_arr[i][1].replace("C","")) == int(return_arr[j][1].replace("C","")):
sbl_arr.append(list(borrow_arr[i]) + list(return_arr[j]))
i = i + 1
j = j + 1
elif int(borrow_arr[i][1].replace("C","")) > int(return_arr[j][1].replace("C","")):
sbl_arr.append(none_list + list(return_arr[j]))
j = j + 1
else:
sbl_arr.append(list(borrow_arr[i]) + none_list)
i = i + 1
if i == len(borrow_arr):
while j < len(return_arr):
sbl_arr.append(none_list + list(return_arr[j]))
j = j + 1
if j == len(return_arr):
while i < len(borrow_arr):
sbl_arr.append(list(borrow_arr[i]) + none_list)
i = i + 1
offset_row = 8
for i, row in enumerate(sbl_arr):
for j, cell in enumerate(row):
if str(cell)[0] == '=':
ws.Cells(offset_row+i+1,j+1).Formula = cell
else:
ws.Cells(offset_row+i+1,j+1).Value = cell
wb.SaveAs(template_file.replace(".xlsx", "_" + cpty_code + ".xlsx").replace("input", "output"))
wb.Close(True)
return
def main():
stock_borrow_ledger_template = r"S:\SBL\SBL Ledger\input\stock_borrowing_ledger_template.xlsx"
stock_lend_ledger_template = r"S:\SBL\SBL Ledger\input\stock_lending_ledger_template.xlsx"
stock_ledger_input = r"\\p7fs0003\nd\3014-GlobalOne\MFYU_Activity_Report_By_Cpty_Currency_Security.xlsx"
sbl_reg_rec = r"S:\SBL\SBL Ledger\input\SBL Registration Record.xlsx"
# sbl_reg_rec = os.path.join(os.path.dirname(os.path.abspath(__file__)), "SBL Registration Record 20161230 boci.xlsx")
conn, cur = db_cur()
sbl_header, sbl_arr = xlsx_to_arr(stock_ledger_input, 0, 3)
reg_header, reg_arr = xlsx_to_arr(sbl_reg_rec, 0, col_end=5)
reg_header = re.sub(r"[\*\.#/\$%\d\" ]", "", reg_header)
sbl_header = re.sub(r"[\*\.#/\$%\d\" ]", "", sbl_header)
print(reg_header)
print(sbl_header)
create_tbl(cur, "ref", "ref_no")
create_tbl(cur, "sbl", sbl_header, sbl_arr)
create_tbl(cur, "reg", reg_header, reg_arr)
xlsx_template_process(cur, stock_borrow_ledger_template, True)
xlsx_template_process(cur, stock_lend_ledger_template, False)
return
main()
| |
"""AuthZ Adapter implementations of relationship sessions."""
# pylint: disable=no-init
# Numerous classes don't require __init__.
# pylint: disable=too-many-public-methods
# Number of methods are defined in specification
# pylint: disable=too-many-ancestors
# Inheritance defined in specification
from ...abstract_osid.relationship import sessions as abc_relationship_sessions
from ..osid.osid_errors import NotFound
from ..osid.osid_errors import PermissionDenied, NullArgument, Unimplemented
from ..osid.osid_errors import Unsupported
from ..primitives import Id
from ..utilities import QueryWrapper
from dlkit.authz_adapter.osid import sessions as osid_sessions
class RelationshipLookupSession(abc_relationship_sessions.RelationshipLookupSession, osid_sessions.OsidSession):
"""Adapts underlying RelationshipLookupSession methodswith authorization checks."""
def __init__(self, provider_session, authz_session, proxy=None, **kwargs):
osid_sessions.OsidSession.__init__(self, provider_session, authz_session, proxy)
if 'hierarchy_session' in kwargs:
self._hierarchy_session = kwargs['hierarchy_session']
else:
self._hierarchy_session = None
if 'query_session' in kwargs:
self._query_session = kwargs['query_session']
else:
self._query_session = None
self._qualifier_id = provider_session.get_family_id()
self._id_namespace = 'relationship.Relationship'
self.use_federated_family_view()
self.use_comparative_relationship_view()
def _get_unauth_family_ids(self, family_id):
if self._can('lookup', family_id):
return [] # Don't go further - assumes authorizations inherited
else:
unauth_list = [str(family_id)]
if self._hierarchy_session.has_child_families(family_id):
for child_family_id in self._hierarchy_session.get_child_family_ids(family_id):
unauth_list = unauth_list + self._get_unauth_family_ids(child_family_id)
return unauth_list
def _try_harder(self, query):
if self._hierarchy_session is None or self._query_session is None:
# Should probably try to return empty result instead
# perhaps through a query.match_any(match = None)?
raise PermissionDenied()
for family_id in self._get_unauth_family_ids(self._qualifier_id):
query.match_family_id(family_id, match=False)
return self._query_session.get_relationships_by_query(query)
def get_family_id(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_bin_id_template
return self._provider_session.get_family_id()
family_id = property(fget=get_family_id)
def get_family(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_bin_template
if not self._can('lookup'):
raise PermissionDenied()
else:
return self._provider_session.get_family()
family = property(fget=get_family)
def can_lookup_relationships(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.can_lookup_resources_template
return self._can('lookup')
def use_comparative_relationship_view(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.use_comparative_resource_view_template
self._use_comparative_object_view()
self._provider_session.use_comparative_relationship_view()
def use_plenary_relationship_view(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.use_plenary_resource_view_template
self._use_plenary_object_view()
self._provider_session.use_plenary_relationship_view()
def use_federated_family_view(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.use_federated_bin_view_template
self._use_federated_catalog_view()
self._provider_session.use_federated_family_view()
def use_isolated_family_view(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.use_isolated_bin_view_template
self._use_isolated_catalog_view()
self._provider_session.use_isolated_family_view()
def use_effective_relationship_view(self):
raise Unimplemented()
def use_any_effective_relationship_view(self):
raise Unimplemented()
def get_relationship(self, relationship_id):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_resource_template
if self._can('lookup'):
return self._provider_session.get_relationship(relationship_id)
elif self._is_isolated_catalog_view() or self._is_plenary_object_view():
raise PermissionDenied()
else:
query = self._query_session.get_relationship_query()
query.match_id(relationship_id, match=True)
results = self._try_harder(query)
if results.available() > 0:
return results.next()
else:
raise NotFound()
def get_relationships_by_ids(self, relationship_ids):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_resources_by_ids_template
if self._can('lookup'):
return self._provider_session.get_relationships_by_ids(relationship_ids)
elif self._is_isolated_catalog_view() or self._is_plenary_object_view():
raise PermissionDenied()
else:
query = self._query_session.get_relationship_query()
for relationship_id in (relationship_ids):
query.match_id(relationship_id, match=True)
return self._try_harder(query)
def get_relationships_by_genus_type(self, relationship_genus_type):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_resources_by_genus_type_template
if self._can('lookup'):
return self._provider_session.get_relationships_by_genus_type(relationship_genus_type)
elif self._is_isolated_catalog_view() or self._is_plenary_object_view():
raise PermissionDenied()
else:
query = self._query_session.get_relationship_query()
query.match_genus_type(relationship_genus_type, match=True)
return self._try_harder(query)
def get_relationships_by_parent_genus_type(self, relationship_genus_type):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_resources_by_parent_genus_type_template
if self._can('lookup'):
return self._provider_session.get_relationships_by_parent_genus_type(relationship_genus_type)
elif self._is_isolated_catalog_view() or self._is_plenary_object_view():
raise PermissionDenied()
else:
query = self._query_session.get_relationship_query()
query.match_parent_genus_type(relationship_genus_type, match=True)
return self._try_harder(query)
def get_relationships_by_record_type(self, relationship_record_type):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_resources_by_record_type_template
if self._can('lookup'):
return self._provider_session.get_relationships_by_record_type(relationship_record_type)
elif self._is_isolated_catalog_view() or self._is_plenary_object_view():
raise PermissionDenied()
else:
query = self._query_session.get_relationship_query()
query.match_record_type(relationship_record_type, match=True)
return self._try_harder(query)
def get_relationships_on_date(self, from_, to):
raise Unimplemented()
def get_relationships_for_source(self, source_id):
# Implemented from azosid template for -
# osid.resource.RelationshipLookupSession.get_relationships_for_source_template
if not self._can('lookup'):
raise PermissionDenied()
else:
return self._provider_session.get_relationships_for_source(source_id)
def get_relationships_for_source_on_date(self, source_id, from_, to):
"""Pass through to provider RelationshipLookupSession.get_relationships_for_source_on_date"""
# Implemented from azosid template for -
# osid.relationship.RelationshipLookupSession.get_relationships_for_source_on_date_template
if not self._can('lookup'):
raise PermissionDenied()
else:
return self._provider_session.get_relationships_for_source_on_date(source_id, from_, to)
def get_relationships_by_genus_type_for_source(self, source_id, relationship_genus_type):
raise Unimplemented()
def get_relationships_by_genus_type_for_source_on_date(self, source_id, relationship_genus_type, from_, to):
raise Unimplemented()
def get_relationships_for_destination(self, destination_id):
raise Unimplemented()
def get_relationships_for_destination_on_date(self, destination_id, from_, to):
raise Unimplemented()
def get_relationships_by_genus_type_for_destination(self, destination_id, relationship_genus_type):
raise Unimplemented()
def get_relationships_by_genus_type_for_destination_on_date(self, destination_id, relationship_genus_type, from_, to):
raise Unimplemented()
def get_relationships_for_peers(self, source_id, destination_id):
raise Unimplemented()
def get_relationships_for_peers_on_date(self, source_id, destination_id, from_, to):
raise Unimplemented()
def get_relationships_by_genus_type_for_peers(self, source_id, destination_id, relationship_genus_type):
raise Unimplemented()
def get_relationships_by_genus_type_for_peers_on_date(self, source_id, destination_id, relationship_genus_type, from_, to):
raise Unimplemented()
def get_relationships(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_resources_template
if self._can('lookup'):
return self._provider_session.get_relationships()
elif self._is_isolated_catalog_view() or self._is_plenary_object_view():
raise PermissionDenied()
else:
query = self._query_session.get_relationship_query()
query.match_any(match=True)
return self._try_harder(query)
relationships = property(fget=get_relationships)
class RelationshipQuerySession(abc_relationship_sessions.RelationshipQuerySession, osid_sessions.OsidSession):
"""Adapts underlying RelationshipQuerySession methodswith authorization checks."""
def __init__(self, provider_session, authz_session, proxy=None, **kwargs):
osid_sessions.OsidSession.__init__(self, provider_session, authz_session, proxy)
if 'hierarchy_session' in kwargs:
self._hierarchy_session = kwargs['hierarchy_session']
else:
self._hierarchy_session = None
if 'query_session' in kwargs:
self._query_session = kwargs['query_session']
else:
self._query_session = None
self._qualifier_id = provider_session.get_family_id()
self._id_namespace = 'relationship.Relationship'
def _get_unauth_family_ids(self, family_id):
if self._can('search', family_id):
return [] # Don't go further - assumes authorizations inherited
else:
unauth_list = [str(family_id)]
if self._hierarchy_session.has_child_families(family_id):
for child_family_id in self._hierarchy_session.get_child_family_ids(family_id):
unauth_list = unauth_list + self._get_unauth_family_ids(child_family_id)
return unauth_list
def _try_harder(self, query):
if self._hierarchy_session is None:
# Should probably try to return empty result instead
# perhaps through a query.match_any(match = None)?
raise PermissionDenied()
for family_id in self._get_unauth_family_ids(self._qualifier_id):
query._provider_query.match_family_id(family_id, match=False)
return self._provider_session.get_relationships_by_query(query)
class RelationshipQueryWrapper(QueryWrapper):
"""Wrapper for RelationshipQueries to override match_family_id method"""
def match_family_id(self, family_id, match=True):
self.cat_id_args_list.append({'family_id': family_id, 'match': match})
def get_family_id(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_bin_id_template
return self._provider_session.get_family_id()
family_id = property(fget=get_family_id)
def get_family(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_bin_template
if not self._can('lookup'):
raise PermissionDenied()
else:
return self._provider_session.get_family()
family = property(fget=get_family)
def use_federated_family_view(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.use_federated_bin_view_template
self._use_federated_catalog_view()
self._provider_session.use_federated_family_view()
def use_isolated_family_view(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.use_isolated_bin_view_template
self._use_isolated_catalog_view()
self._provider_session.use_isolated_family_view()
def can_search_relationships(self):
# Implemented from azosid template for -
# osid.resource.ResourceQuerySession.can_search_resources_template
return self._can('search')
def get_relationship_query(self):
# Implemented from azosid template for -
# osid.resource.ResourceQuerySession.get_resource_query_template
if (not self._can('search') and
self._is_isolated_catalog_view()):
raise PermissionDenied()
else:
return self.RelationshipQueryWrapper(self._provider_session.get_relationship_query())
relationship_query = property(fget=get_relationship_query)
def get_relationships_by_query(self, relationship_query):
# Implemented from azosid template for -
# osid.resource.ResourceQuerySession.get_resources_by_query_template
if not hasattr(relationship_query, 'cat_id_args_list'):
raise Unsupported('relationship_query not from this session')
for kwargs in relationship_query.cat_id_args_list:
if self._can('search', kwargs['family_id']):
relationship_query._provider_query.match_family_id(**kwargs)
if self._can('search'):
return self._provider_session.get_relationships_by_query(relationship_query)
elif self._is_isolated_catalog_view():
raise PermissionDenied()
else:
result = self._try_harder(relationship_query)
relationship_query._provider_query.clear_family_terms()
return result
class RelationshipSearchSession(abc_relationship_sessions.RelationshipSearchSession, RelationshipQuerySession):
"""Adapts underlying RelationshipSearchSession methodswith authorization checks."""
def get_relationship_search(self):
"""Pass through to provider RelationshipSearchSession.get_relationship_search"""
# Implemented from azosid template for -
# osid.resource.ResourceSearchSession.get_resource_search_template
if not self._can('search'):
raise PermissionDenied()
else:
return self._provider_session.get_relationship_search()
relationship_search = property(fget=get_relationship_search)
def get_relationship_search_order(self):
raise Unimplemented()
relationship_search_order = property(fget=get_relationship_search_order)
def get_relationships_by_search(self, relationship_query, relationship_search):
"""Pass through to provider RelationshipSearchSession.get_relationships_by_search"""
# Implemented from azosid template for -
# osid.resource.ResourceSearchSession.get_resources_by_search_template
if not self._can('search'):
raise PermissionDenied()
else:
return self._provider_session.get_relationships_by_search(relationship_query, relationship_search)
def get_relationship_query_from_inspector(self, relationship_query_inspector):
raise Unimplemented()
class RelationshipAdminSession(abc_relationship_sessions.RelationshipAdminSession, osid_sessions.OsidSession):
"""Adapts underlying RelationshipAdminSession methodswith authorization checks."""
def __init__(self, provider_session, authz_session, proxy=None):
osid_sessions.OsidSession.__init__(self, provider_session, authz_session, proxy)
self._qualifier_id = provider_session.get_family_id()
self._id_namespace = 'relationship.Relationship'
def get_family_id(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_bin_id_template
return self._provider_session.get_family_id()
family_id = property(fget=get_family_id)
def get_family(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_bin_template
if not self._can('lookup'):
raise PermissionDenied()
else:
return self._provider_session.get_family()
family = property(fget=get_family)
def can_create_relationships(self):
# Implemented from azosid template for -
# osid.resource.ResourceAdminSession.can_create_resources
return self._can('create')
def can_create_relationship_with_record_types(self, relationship_record_types):
# Implemented from azosid template for -
# osid.resource.ResourceAdminSession.can_create_resource_with_record_types
# This would like to be a real implementation someday:
if relationship_record_types == None:
raise NullArgument() # Just 'cause the spec says to :)
else:
return self._can('create')
def get_relationship_form_for_create(self, source_id, destination_id, relationship_record_types):
# Implemented from azosid template for -
# osid.resource.RelationshipAdminSession.get_relationship_form_for_create_template
if not self._can('create'):
raise PermissionDenied()
else:
return self._provider_session.get_relationship_form_for_create(source_id, destination_id, relationship_record_types)
def create_relationship(self, relationship_form):
# Implemented from azosid template for -
# osid.resource.ResourceAdminSession.create_resource
if not self._can('create'):
raise PermissionDenied()
else:
return self._provider_session.create_relationship(relationship_form)
def can_update_relationships(self):
# Implemented from azosid template for -
# osid.resource.ResourceAdminSession.can_create_resources
return self._can('update')
def get_relationship_form_for_update(self, relationship_id):
# Implemented from azosid template for -
# osid.resource.ResourceAdminSession.get_resource_form_for_update
if not self._can('update'):
raise PermissionDenied()
else:
return self._provider_session.get_relationship_form_for_update(relationship_id)
def duplicate_relationship(self, relationship_id):
if not self._can('update'):
raise PermissionDenied()
else:
return self._provider_session.duplicate_relationship(relationship_id)
def update_relationship(self, relationship_form):
# Implemented from azosid template for -
# osid.resource.ResourceAdminSession.update_resource
if not self._can('update'):
raise PermissionDenied()
else:
return self._provider_session.update_relationship(relationship_form)
def can_delete_relationships(self):
# Implemented from azosid template for -
# osid.resource.ResourceAdminSession.can_create_resources
return self._can('delete')
def delete_relationship(self, relationship_id):
# Implemented from azosid template for -
# osid.resource.ResourceAdminSession.delete_resource
if not self._can('delete'):
raise PermissionDenied()
else:
return self._provider_session.delete_relationship(relationship_id)
def can_manage_relationship_aliases(self):
raise Unimplemented()
def alias_relationship(self, relationship_id, alias_id):
# Implemented from azosid template for -
# osid.resource.ResourceAdminSession.alias_resources
if not self._can('alias'):
raise PermissionDenied()
else:
return self._provider_session.alias_relationship(relationship_id, alias_id)
class RelationshipNotificationSession(abc_relationship_sessions.RelationshipNotificationSession, osid_sessions.OsidSession):
"""Adapts underlying RelationshipNotificationSession methodswith authorization checks."""
def get_family_id(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_bin_id_template
return self._provider_session.get_family_id()
family_id = property(fget=get_family_id)
def get_family(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_bin_template
if not self._can('lookup'):
raise PermissionDenied()
else:
return self._provider_session.get_family()
family = property(fget=get_family)
def can_register_for_relationship_notifications(self):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.can_register_for_resource_notifications
return self._can('register')
def use_federated_family_view(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.use_federated_bin_view_template
self._use_federated_catalog_view()
self._provider_session.use_federated_family_view()
def use_isolated_family_view(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.use_isolated_bin_view_template
self._use_isolated_catalog_view()
self._provider_session.use_isolated_family_view()
def reliable_relationship_notifications(self):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_new_resources
self._provider_session.reliable_relationship_notifications()
def unreliable_relationship_notifications(self):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_new_resources
self._provider_session.unreliable_relationship_notifications()
def acknowledge_relationship_notification(self, notification_id):
raise Unimplemented()
def register_for_new_relationships(self):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_new_resources
if not self._can('register'):
raise PermissionDenied()
else:
self._provider_session.register_for_new_relationships()
def register_for_new_relationships_for_source(self, source_id):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_new_resources
if not self._can('register'):
raise PermissionDenied()
else:
self._provider_session.register_for_new_relationships_for_source()
def register_for_new_relationships_for_destination(self, destination_id):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_new_resources
if not self._can('register'):
raise PermissionDenied()
else:
self._provider_session.register_for_new_relationships_for_destination()
def register_for_new_relationships_by_genus_type(self, relationship_genus_type):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_new_resources
if not self._can('register'):
raise PermissionDenied()
else:
self._provider_session.register_for_new_relationships_by_genus_type()
def register_for_changed_relationships(self):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_changed_resources
if not self._can('register'):
raise PermissionDenied()
else:
self._provider_session.register_for_changed_relationships()
def register_for_changed_relationships_for_source(self, source_id):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_changed_resource
if not self._can('register'):
raise PermissionDenied()
else:
self._provider_session.register_for_changed_relationships_for_source(source_id)
def register_for_changed_relationships_for_destination(self, destination_id):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_changed_resource
if not self._can('register'):
raise PermissionDenied()
else:
self._provider_session.register_for_changed_relationships_for_destination(destination_id)
def register_for_changed_relationships_by_genus_type(self, relationship_genus_type):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_changed_resource
if not self._can('register'):
raise PermissionDenied()
else:
self._provider_session.register_for_changed_relationships_by_genus_type(relationship_genus_type)
def register_for_changed_relationship(self, relationship_id):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_changed_resource
if not self._can('register'):
raise PermissionDenied()
else:
self._provider_session.register_for_changed_relationship(relationship_id)
def register_for_deleted_relationships(self):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_deleted_resources
if not self._can('register'):
raise PermissionDenied()
else:
self._provider_session.register_for_deleted_relationships()
def register_for_deleted_relationships_for_source(self, source_id):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_deleted_resource
if not self._can('register'):
raise PermissionDenied()
else:
self._provider_session.register_for_deleted_relationships_for_source(source_id)
def register_for_deleted_relationships_for_destination(self, destination_id):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_deleted_resource
if not self._can('register'):
raise PermissionDenied()
else:
self._provider_session.register_for_deleted_relationships_for_destination(destination_id)
def register_for_deleted_relationships_by_genus_type(self, relationship_genus_type):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_deleted_resource
if not self._can('register'):
raise PermissionDenied()
else:
self._provider_session.register_for_deleted_relationships_by_genus_type(relationship_genus_type)
def register_for_deleted_relationship(self, relationship_id):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_deleted_resource
if not self._can('register'):
raise PermissionDenied()
else:
self._provider_session.register_for_deleted_relationship(relationship_id)
def reliable_relationship_notifications(self):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_new_resources
self._provider_session.reliable_relationship_notifications()
def unreliable_relationship_notifications(self):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_new_resources
self._provider_session.unreliable_relationship_notifications()
def acknowledge_relationship_notification(self, notification_id):
raise Unimplemented()
class RelationshipFamilySession(abc_relationship_sessions.RelationshipFamilySession, osid_sessions.OsidSession):
"""Adapts underlying RelationshipFamilySession methodswith authorization checks."""
def __init__(self, provider_session, authz_session, proxy=None):
osid_sessions.OsidSession.__init__(self, provider_session, authz_session, proxy)
self._qualifier_id = Id('authorization.Qualifier%3AROOT%40ODL.MIT.EDU') # This needs to be done right
self._id_namespace = 'relationship.RelationshipFamily'
def can_lookup_relationship_family_mappings(self):
# Implemented from azosid template for -
# osid.resource.ResourceBinSession.can_lookup_resource_bin_mappings
return self._can('lookup')
def use_comparative_family_view(self):
# Implemented from azosid template for -
# osid.resource.BinLookupSession.use_comparative_bin_view_template
self._provider_session.use_comparative_family_view()
def use_plenary_family_view(self):
# Implemented from azosid template for -
# osid.resource.BinLookupSession.use_plenary_bin_view_template
self._provider_session.use_plenary_family_view()
def get_relationship_ids_by_family(self, family_id):
# Implemented from azosid template for -
# osid.resource.ResourceBinSession.get_resource_ids_by_bin
if not self._can('lookup'):
raise PermissionDenied()
else:
return self._provider_session.get_relationship_ids_by_family(family_id)
def get_relationships_by_family(self, family_id):
# Implemented from azosid template for -
# osid.resource.ResourceBinSession.get_resources_by_bin_template
if not self._can('lookup'):
raise PermissionDenied()
else:
return self._provider_session.get_relationship_ids_by_family(family_id)
def get_relationship_ids_by_families(self, family_ids):
# Implemented from azosid template for -
# osid.resource.ResourceBinSession.get_resource_ids_by_bins
if not self._can('lookup'):
raise PermissionDenied()
else:
return self._provider_session.get_relationship_ids_by_families(family_ids)
def get_relationships_by_families(self, family_ids):
# Implemented from azosid template for -
# osid.resource.ResourceBinSession.get_resources_by_bins
if not self._can('lookup'):
raise PermissionDenied()
else:
return self._provider_session.get_relationships_ids_by_families(family_ids)
def get_family_ids_by_relationship(self, relationship_id):
# Implemented from azosid template for -
# osid.resource.ResourceBinSession.get_bin_ids_by_resource
if not self._can('lookup'):
raise PermissionDenied()
else:
return self._provider_session.get_family_ids_by_relationship(relationship_id)
def get_families_by_relationship(self, relationship_id):
# Implemented from azosid template for -
# osid.resource.ResourceBinSession.get_bins_by_resource
if not self._can('lookup'):
raise PermissionDenied()
else:
return self._provider_session.get_families_by_relationship(relationship_id)
class RelationshipFamilyAssignmentSession(abc_relationship_sessions.RelationshipFamilyAssignmentSession, osid_sessions.OsidSession):
"""Adapts underlying RelationshipFamilyAssignmentSession methodswith authorization checks."""
def __init__(self, provider_session, authz_session, proxy=None):
osid_sessions.OsidSession.__init__(self, provider_session, authz_session, proxy)
self._qualifier_id = Id('authorization.Qualifier%3AROOT%40ODL.MIT.EDU') # This needs to be done right
self._id_namespace = 'relationship.RelationshipFamily'
def can_assign_relationships(self):
# Implemented from azosid template for -
# osid.resource.ResourceBinAssignmentSession.can_assign_resources
return self._can('assign')
def can_assign_relationships_to_family(self, family_id):
# Implemented from azosid template for -
# osid.resource.ResourceBinAssignmentSession.can_assign_resources_to_bin
return self._can('assign', qualifier_id=bin_id)
def get_assignable_family_ids(self, family_id):
# Implemented from azosid template for -
# osid.resource.ResourceBinAssignmentSession.get_assignable_bin_ids
if not self._can('assign'):
raise PermissionDenied()
else:
return self._provider_session.get_assignable_family_ids()
def get_assignable_family_ids_for_relationship(self, family_id, relationship_id):
# Implemented from azosid template for -
# osid.resource.ResourceBinAssignmentSession.get_assignable_bin_ids_for_resource
if not self._can('assign'):
raise PermissionDenied()
else:
return self._provider_session.get_assignable_family_ids_for_relationship(relationship_id)
def assign_relationship_to_family(self, relationship_id, family_id):
# Implemented from azosid template for -
# osid.resource.ResourceBinAssignmentSession.assign_resource_to_bin
if not self._can('assign'):
raise PermissionDenied()
else:
return self._provider_session.assign_relationship_to_family(relationship_id, family_id)
def unassign_relationship_from_family(self, relationship_id, family_id):
# Implemented from azosid template for -
# osid.resource.ResourceBinAssignmentSession.assign_resource_to_bin
if not self._can('assign'):
raise PermissionDenied()
else:
return self._provider_session.unassign_relationship_from_family(relationship_id, family_id)
def reassign_relationship_to_family(self, relationship_id, from_family_id, to_family_id):
raise Unimplemented()
class RelationshipSmartFamilySession(abc_relationship_sessions.RelationshipSmartFamilySession, osid_sessions.OsidSession):
"""Adapts underlying RelationshipSmartFamilySession methodswith authorization checks."""
def get_family_id(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_bin_id_template
return self._provider_session.get_family_id()
family_id = property(fget=get_family_id)
def get_family(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_bin_template
if not self._can('lookup'):
raise PermissionDenied()
else:
return self._provider_session.get_family()
family = property(fget=get_family)
def can_manage_smart_families(self):
raise Unimplemented()
def get_relationship_query(self):
raise Unimplemented()
relationship_query = property(fget=get_relationship_query)
def get_relationship_search_order(self):
raise Unimplemented()
relationship_search_order = property(fget=get_relationship_search_order)
def apply_relationship_query(self, relationship_query):
raise Unimplemented()
def inspect_relationship_query(self):
raise Unimplemented()
def apply_relationship_sequencing(self, relationship_search_order):
raise Unimplemented()
def get_relationship_query_from_inspector(self, relationship_query_inspector):
raise Unimplemented()
class FamilyLookupSession(abc_relationship_sessions.FamilyLookupSession, osid_sessions.OsidSession):
"""Adapts underlying FamilyLookupSession methodswith authorization checks."""
def __init__(self, provider_session, authz_session, proxy=None):
osid_sessions.OsidSession.__init__(self, provider_session, authz_session, proxy)
# This needs to be done right
# Build from authority in config
self._qualifier_id = Id('relationship.Family%3AROOT%40ODL.MIT.EDU')
self._id_namespace = 'relationship.Family'
def can_lookup_families(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.can_lookup_resources_template
return self._can('lookup')
def use_comparative_family_view(self):
# Implemented from azosid template for -
# osid.resource.BinLookupSession.use_comparative_bin_view_template
self._provider_session.use_comparative_family_view()
def use_plenary_family_view(self):
# Implemented from azosid template for -
# osid.resource.BinLookupSession.use_plenary_bin_view_template
self._provider_session.use_plenary_family_view()
def get_family(self, family_id):
# Implemented from azosid template for -
# osid.resource.BinLookupSession.get_bin_template
if not self._can('lookup'):
raise PermissionDenied()
else:
return self._provider_session.get_family(family_id)
def get_families_by_ids(self, family_ids):
# Implemented from azosid template for -
# osid.resource.BinLookupSession.get_bins_by_ids_template
if not self._can('lookup'):
raise PermissionDenied()
else:
return self._provider_session.get_families_by_ids(family_ids)
def get_families_by_genus_type(self, family_genus_type):
raise Unimplemented()
def get_families_by_parent_genus_type(self, family_genus_type):
raise Unimplemented()
def get_families_by_record_type(self, family_record_type):
raise Unimplemented()
def get_families_by_provider(self, resource_id):
raise Unimplemented()
def get_families(self):
# Implemented from azosid template for -
# osid.resource.BinLookupSession.get_bins_template
if not self._can('lookup'):
raise PermissionDenied()
else:
return self._provider_session.get_families()
families = property(fget=get_families)
class FamilyQuerySession(abc_relationship_sessions.FamilyQuerySession, osid_sessions.OsidSession):
"""Adapts underlying FamilyQuerySession methodswith authorization checks."""
def __init__(self, provider_session, authz_session, proxy=None):
osid_sessions.OsidSession.__init__(self, provider_session, authz_session, proxy)
# This needs to be done right
# Build from authority in config
self._qualifier_id = Id('relationship.Family%3AROOT%40ODL.MIT.EDU')
self._id_namespace = 'relationship.Family'
def can_search_families(self):
# Implemented from azosid template for -
# osid.resource.ResourceQuerySession.can_search_resources_template
return self._can('search')
def get_family_query(self):
# Implemented from azosid template for -
# osid.resource.BinQuerySession.get_bin_query_template
if not self._can('search'):
raise PermissionDenied()
else:
return self._provider_session.get_family_query()
family_query = property(fget=get_family_query)
def get_families_by_query(self, family_query):
# Implemented from azosid template for -
# osid.resource.BinQuerySession.get_bins_by_query_template
if not self._can('search'):
raise PermissionDenied()
else:
return self._provider_session.get_families_by_query(family_query)
class FamilySearchSession(abc_relationship_sessions.FamilySearchSession, FamilyQuerySession):
"""Adapts underlying FamilySearchSession methodswith authorization checks."""
def get_family_search(self):
raise Unimplemented()
family_search = property(fget=get_family_search)
def get_family_search_order(self):
raise Unimplemented()
family_search_order = property(fget=get_family_search_order)
def get_families_by_search(self, family_query, family_search):
raise Unimplemented()
def get_family_query_from_inspector(self, family_query_inspector):
raise Unimplemented()
class FamilyAdminSession(abc_relationship_sessions.FamilyAdminSession, osid_sessions.OsidSession):
"""Adapts underlying FamilyAdminSession methodswith authorization checks."""
def __init__(self, provider_session, authz_session, proxy=None):
osid_sessions.OsidSession.__init__(self, provider_session, authz_session, proxy)
# This needs to be done right
# Build from authority in config
self._qualifier_id = Id('relationship.Family%3AROOT%40ODL.MIT.EDU')
self._id_namespace = 'relationship.Family'
def can_create_families(self):
# Implemented from azosid template for -
# osid.resource.ResourceAdminSession.can_create_resources
return self._can('create')
def can_create_family_with_record_types(self, family_record_types):
# Implemented from azosid template for -
# osid.resource.ResourceAdminSession.can_create_resource_with_record_types
# This would like to be a real implementation someday:
if family_record_types == None:
raise NullArgument() # Just 'cause the spec says to :)
else:
return self._can('create')
def get_family_form_for_create(self, family_record_types):
# Implemented from azosid template for -
# osid.resource.BinAdminSession.get_bin_form_for_create_template
if not self._can('create'):
raise PermissionDenied()
else:
return self._provider_session.get_family_form_for_create(family_record_types)
def create_family(self, family_form):
# Implemented from azosid template for -
# osid.resource.BinAdminSession.create_bin_template
if not self._can('create'):
raise PermissionDenied()
else:
return self._provider_session.create_family(family_form)
def can_update_families(self):
# Implemented from azosid template for -
# osid.resource.ResourceAdminSession.can_create_resources
return self._can('update')
def get_family_form_for_update(self, family_id):
# Implemented from azosid template for -
# osid.resource.BinAdminSession.get_bin_form_for_update_template
if not self._can('update'):
raise PermissionDenied()
else:
return self._provider_session.get_family_form_for_update(family_id)
def update_family(self, family_form):
# Implemented from azosid template for -
# osid.resource.BinAdminSession.update_bin_template
if not self._can('update'):
raise PermissionDenied()
else:
return self._provider_session.update_family(family_form)
def can_delete_families(self):
# Implemented from azosid template for -
# osid.resource.ResourceAdminSession.can_create_resources
return self._can('delete')
def delete_family(self, family_id):
# Implemented from azosid template for -
# osid.resource.BinAdminSession.delete_bin_template
if not self._can('delete'):
raise PermissionDenied()
else:
return self._provider_session.delete_family(family_id)
def can_manage_family_aliases(self):
raise Unimplemented()
def alias_family(self, family_id, alias_id):
# Implemented from azosid template for -
# osid.resource.BinAdminSession.alias_bin_template
if not self._can('alias'):
raise PermissionDenied()
else:
return self._provider_session.alias_family(family_id)
class FamilyNotificationSession(abc_relationship_sessions.FamilyNotificationSession, osid_sessions.OsidSession):
"""Adapts underlying FamilyNotificationSession methodswith authorization checks."""
def can_register_for_family_notifications(self):
raise Unimplemented()
def reliable_family_notifications(self):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_new_resources
self._provider_session.reliable_family_notifications()
def unreliable_family_notifications(self):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_new_resources
self._provider_session.unreliable_family_notifications()
def acknowledge_family_notification(self, notification_id):
raise Unimplemented()
def register_for_new_families(self):
raise Unimplemented()
def register_for_changed_families(self):
raise Unimplemented()
def register_for_changed_family(self, family_id):
raise Unimplemented()
def register_for_deleted_families(self):
raise Unimplemented()
def register_for_deleted_family(self, family_id):
raise Unimplemented()
def register_for_changed_family_hierarchy(self):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_changed_resources
if not self._can('register'):
raise PermissionDenied()
else:
self._provider_session.register_for_changed_family_hierarchy()
def register_for_changed_family_hierarchy_for_ancestors(self, family_id):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_changed_resource
if not self._can('register'):
raise PermissionDenied()
else:
self._provider_session.register_for_changed_family_hierarchy_for_ancestors(family_id)
def register_for_changed_family_hierarchy_for_descendants(self, family_id):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_changed_resource
if not self._can('register'):
raise PermissionDenied()
else:
self._provider_session.register_for_changed_family_hierarchy_for_descendants(family_id)
def reliable_family_notifications(self):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_new_resources
self._provider_session.reliable_family_notifications()
def unreliable_family_notifications(self):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_new_resources
self._provider_session.unreliable_family_notifications()
def acknowledge_family_notification(self, notification_id):
raise Unimplemented()
class FamilyHierarchySession(abc_relationship_sessions.FamilyHierarchySession, osid_sessions.OsidSession):
"""Adapts underlying FamilyHierarchySession methodswith authorization checks."""
def __init__(self, provider_session, authz_session, proxy=None):
osid_sessions.OsidSession.__init__(self, provider_session, authz_session, proxy)
# This needs to be done right
# Build from authority in config
self._qualifier_id = Id('relationship.Family%3AROOT%40ODL.MIT.EDU')
self._id_namespace = 'relationship.Family'
def get_family_hierarchy_id(self):
# Implemented from azosid template for -
# osid.resource.BinHierarchySession.get_bin_hierarchy_id
return self._provider_session.get_family_hierarchy_id()
family_hierarchy_id = property(fget=get_family_hierarchy_id)
def get_family_hierarchy(self):
# Implemented from azosid template for -
# osid.resource.BinHierarchySession.get_bin_hierarchy
if not self._can('access'):
raise PermissionDenied()
else:
return self._provider_session.get_family_hierarchy()
family_hierarchy = property(fget=get_family_hierarchy)
def can_access_family_hierarchy(self):
# Implemented from azosid template for -
# osid.resource.BinHierarchySession.can_access_bin_hierarchy
return self._can('access')
def use_comparative_family_view(self):
# Implemented from azosid template for -
# osid.resource.BinLookupSession.use_comparative_bin_view_template
self._provider_session.use_comparative_family_view()
def use_plenary_family_view(self):
# Implemented from azosid template for -
# osid.resource.BinLookupSession.use_plenary_bin_view_template
self._provider_session.use_plenary_family_view()
def get_root_family_ids(self):
# Implemented from azosid template for -
# osid.resource.BinHierarchySession.get_root_bin_ids
if not self._can('access'):
raise PermissionDenied()
else:
return self._provider_session.get_root_family_ids()
root_family_ids = property(fget=get_root_family_ids)
def get_root_families(self):
# Implemented from azosid template for -
# osid.resource.BinHierarchySession.get_root_bins
if not self._can('access'):
raise PermissionDenied()
else:
return self._provider_session.get_root_families()
root_families = property(fget=get_root_families)
def has_parent_families(self, family_id):
# Implemented from azosid template for -
# osid.resource.BinHierarchySession.has_parent_bins
if not self._can('access'):
raise PermissionDenied()
else:
return self._provider_session.has_parent_families(family_id)
def is_parent_of_family(self, id_, family_id):
# Implemented from azosid template for -
# osid.resource.BinHierarchySession.is_parent_of_bin
if not self._can('access'):
raise PermissionDenied()
else:
return self._provider_session.is_parent_of_family(id_, family_id)
def get_parent_family_ids(self, family_id):
# Implemented from azosid template for -
# osid.resource.BinHierarchySession.get_parent_bin_ids
if not self._can('access'):
raise PermissionDenied()
else:
return self._provider_session.get_parent_family_ids(family_id)
def get_parent_families(self, family_id):
# Implemented from azosid template for -
# osid.resource.BinHierarchySession.get_parent_bins
if not self._can('access'):
raise PermissionDenied()
else:
return self._provider_session.get_parent_families(family_id)
def is_ancestor_of_family(self, id_, family_id):
# Implemented from azosid template for -
# osid.resource.BinHierarchySession.is_ancestor_of_bin
if not self._can('access'):
raise PermissionDenied()
else:
return self._provider_session.is_ancestor_of_family(id_, family_id)
def has_child_families(self, family_id):
# Implemented from azosid template for -
# osid.resource.BinHierarchySession.has_child_bins
if not self._can('access'):
raise PermissionDenied()
else:
return self._provider_session.has_child_families(family_id)
def is_child_of_family(self, id_, family_id):
# Implemented from azosid template for -
# osid.resource.BinHierarchySession.is_child_of_bin
if not self._can('access'):
raise PermissionDenied()
else:
return self._provider_session.is_child_of_family(id_, family_id)
def get_child_family_ids(self, family_id):
# Implemented from azosid template for -
# osid.resource.BinHierarchySession.get_child_bin_ids
if not self._can('access'):
raise PermissionDenied()
else:
return self._provider_session.get_child_family_ids(family_id)
def get_child_families(self, family_id):
# Implemented from azosid template for -
# osid.resource.BinHierarchySession.get_bin_hierarchy_id
if not self._can('access'):
raise PermissionDenied()
else:
return self._provider_session.get_child_families(family_id)
def is_descendant_of_family(self, id_, family_id):
# Implemented from azosid template for -
# osid.resource.BinHierarchySession.is_descendant_of_bin
if not self._can('access'):
raise PermissionDenied()
else:
return self._provider_session.is_descendant_of_family(id_, family_id)
def get_family_node_ids(self, family_id, ancestor_levels, descendant_levels, include_siblings):
# Implemented from azosid template for -
# osid.resource.BinHierarchySession.get_bin_node_ids
if not self._can('access'):
raise PermissionDenied()
else:
return self._provider_session.get_family_node_ids(
family_id,
ancestor_levels,
descendant_levels,
include_siblings)
def get_family_nodes(self, family_id, ancestor_levels, descendant_levels, include_siblings):
# Implemented from azosid template for -
# osid.resource.BinHierarchySession.get_bin_nodes
if not self._can('access'):
raise PermissionDenied()
else:
return self._provider_session.get_family_nodes(
family_id,
ancestor_levels,
descendant_levels,
include_siblings)
class FamilyHierarchyDesignSession(abc_relationship_sessions.FamilyHierarchyDesignSession, osid_sessions.OsidSession):
"""Adapts underlying FamilyHierarchyDesignSession methodswith authorization checks."""
def __init__(self, provider_session, authz_session, proxy=None):
osid_sessions.OsidSession.__init__(self, provider_session, authz_session, proxy)
# This needs to be done right
# Build from authority in config
self._qualifier_id = Id('relationship.Family%3AROOT%40ODL.MIT.EDU')
self._id_namespace = 'relationship.Family' # should this be 'relationship.FamilyHierarchy' ?
def get_family_hierarchy_id(self):
# Implemented from azosid template for -
# osid.resource.BinHierarchySession.get_bin_hierarchy_id
return self._provider_session.get_family_hierarchy_id()
family_hierarchy_id = property(fget=get_family_hierarchy_id)
def get_family_hierarchy(self):
# Implemented from azosid template for -
# osid.resource.BinHierarchySession.get_bin_hierarchy
if not self._can('access'):
raise PermissionDenied()
else:
return self._provider_session.get_family_hierarchy()
family_hierarchy = property(fget=get_family_hierarchy)
def can_modify_family_hierarchy(self):
# Implemented from azosid template for -
# osid.resource.BinHierarchyDesignSession.can_modify_bin_hierarchy
return self._can('modify')
def add_root_family(self, family_id):
# Implemented from azosid template for -
# osid.resource.BinHierarchyDesignSession.add_root_bin_template
if not self._can('modify'):
raise PermissionDenied()
else:
return self._provider_session.add_root_family(family_id)
def remove_root_family(self, family_id):
if not self._can('modify'):
raise PermissionDenied()
else:
return self._provider_session.remove_root_family(family_id)
def add_child_family(self, family_id, child_id):
if not self._can('modify'):
raise PermissionDenied()
else:
return self._provider_session.add_child_family(family_id, child_id)
def remove_child_family(self, family_id, child_id):
if not self._can('modify'):
raise PermissionDenied()
else:
return self._provider_session.remove_child_family(family_id, child_id)
def remove_child_families(self, family_id):
if not self._can('modify'):
raise PermissionDenied()
else:
return self._provider_session.remove_child_families(family_id)
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Citrix Systems
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: netscaler_gslb_site
short_description: Manage gslb site entities in Netscaler.
description:
- Manage gslb site entities in Netscaler.
version_added: "2.4.0"
author: George Nikolopoulos (@giorgos-nikolopoulos)
options:
sitename:
description:
- >-
Name for the GSLB site. Must begin with an ASCII alphanumeric or underscore C(_) character, and must
contain only ASCII alphanumeric, underscore C(_), hash C(#), period C(.), space C( ), colon C(:), at C(@), equals
C(=), and hyphen C(-) characters. Cannot be changed after the virtual server is created.
- "Minimum length = 1"
sitetype:
choices:
- 'REMOTE'
- 'LOCAL'
description:
- >-
Type of site to create. If the type is not specified, the appliance automatically detects and sets
the type on the basis of the IP address being assigned to the site. If the specified site IP address
is owned by the appliance (for example, a MIP address or SNIP address), the site is a local site.
Otherwise, it is a remote site.
siteipaddress:
description:
- >-
IP address for the GSLB site. The GSLB site uses this IP address to communicate with other GSLB
sites. For a local site, use any IP address that is owned by the appliance (for example, a SNIP or
MIP address, or the IP address of the ADNS service).
- "Minimum length = 1"
publicip:
description:
- >-
Public IP address for the local site. Required only if the appliance is deployed in a private address
space and the site has a public IP address hosted on an external firewall or a NAT device.
- "Minimum length = 1"
metricexchange:
choices:
- 'enabled'
- 'disabled'
description:
- >-
Exchange metrics with other sites. Metrics are exchanged by using Metric Exchange Protocol (MEP). The
appliances in the GSLB setup exchange health information once every second.
- >-
If you disable metrics exchange, you can use only static load balancing methods (such as round robin,
static proximity, or the hash-based methods), and if you disable metrics exchange when a dynamic load
balancing method (such as least connection) is in operation, the appliance falls back to round robin.
Also, if you disable metrics exchange, you must use a monitor to determine the state of GSLB
services. Otherwise, the service is marked as DOWN.
nwmetricexchange:
choices:
- 'enabled'
- 'disabled'
description:
- >-
Exchange, with other GSLB sites, network metrics such as round-trip time (RTT), learned from
communications with various local DNS (LDNS) servers used by clients. RTT information is used in the
dynamic RTT load balancing method, and is exchanged every 5 seconds.
sessionexchange:
choices:
- 'enabled'
- 'disabled'
description:
- "Exchange persistent session entries with other GSLB sites every five seconds."
triggermonitor:
choices:
- 'ALWAYS'
- 'MEPDOWN'
- 'MEPDOWN_SVCDOWN'
description:
- >-
Specify the conditions under which the GSLB service must be monitored by a monitor, if one is bound.
Available settings function as follows:
- "* C(ALWAYS) - Monitor the GSLB service at all times."
- >-
* C(MEPDOWN) - Monitor the GSLB service only when the exchange of metrics through the Metrics Exchange
Protocol (MEP) is disabled.
- "C(MEPDOWN_SVCDOWN) - Monitor the service in either of the following situations:"
- "* The exchange of metrics through MEP is disabled."
- >-
* The exchange of metrics through MEP is enabled but the status of the service, learned through
metrics exchange, is DOWN.
parentsite:
description:
- "Parent site of the GSLB site, in a parent-child topology."
clip:
description:
- >-
Cluster IP address. Specify this parameter to connect to the remote cluster site for GSLB auto-sync.
Note: The cluster IP address is defined when creating the cluster.
publicclip:
description:
- >-
IP address to be used to globally access the remote cluster when it is deployed behind a NAT. It can
be same as the normal cluster IP address.
naptrreplacementsuffix:
description:
- >-
The naptr replacement suffix configured here will be used to construct the naptr replacement field in
NAPTR record.
- "Minimum length = 1"
extends_documentation_fragment: netscaler
requirements:
- nitro python sdk
'''
EXAMPLES = '''
- name: Setup gslb site
delegate_to: localhost
netscaler_gslb_site:
nsip: 172.18.0.2
nitro_user: nsroot
nitro_pass: nsroot
sitename: gslb-site-1
siteipaddress: 192.168.1.1
sitetype: LOCAL
publicip: 192.168.1.1
metricexchange: enabled
nwmetricexchange: enabled
sessionexchange: enabled
triggermonitor: ALWAYS
'''
RETURN = '''
loglines:
description: list of logged messages by the module
returned: always
type: list
sample: "['message 1', 'message 2']"
msg:
description: Message detailing the failure reason
returned: failure
type: string
sample: "Action does not exist"
diff:
description: List of differences between the actual configured object and the configuration specified in the module
returned: failure
type: dictionary
sample: "{ 'targetlbvserver': 'difference. ours: (str) server1 other: (str) server2' }"
'''
try:
from nssrc.com.citrix.netscaler.nitro.resource.config.gslb.gslbsite import gslbsite
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
PYTHON_SDK_IMPORTED = True
except ImportError as e:
PYTHON_SDK_IMPORTED = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.netscaler import (
ConfigProxy,
get_nitro_client,
netscaler_common_arguments,
log,
loglines,
ensure_feature_is_enabled,
get_immutables_intersection,
)
def gslb_site_exists(client, module):
if gslbsite.count_filtered(client, 'sitename:%s' % module.params['sitename']) > 0:
return True
else:
return False
def gslb_site_identical(client, module, gslb_site_proxy):
gslb_site_list = gslbsite.get_filtered(client, 'sitename:%s' % module.params['sitename'])
diff_dict = gslb_site_proxy.diff_object(gslb_site_list[0])
if len(diff_dict) == 0:
return True
else:
return False
def diff_list(client, module, gslb_site_proxy):
gslb_site_list = gslbsite.get_filtered(client, 'sitename:%s' % module.params['sitename'])
return gslb_site_proxy.diff_object(gslb_site_list[0])
def main():
module_specific_arguments = dict(
sitename=dict(type='str'),
sitetype=dict(
type='str',
choices=[
'REMOTE',
'LOCAL',
]
),
siteipaddress=dict(type='str'),
publicip=dict(type='str'),
metricexchange=dict(
type='str',
choices=[
'enabled',
'disabled',
]
),
nwmetricexchange=dict(
type='str',
choices=[
'enabled',
'disabled',
]
),
sessionexchange=dict(
type='str',
choices=[
'enabled',
'disabled',
]
),
triggermonitor=dict(
type='str',
choices=[
'ALWAYS',
'MEPDOWN',
'MEPDOWN_SVCDOWN',
]
),
parentsite=dict(type='str'),
clip=dict(type='str'),
publicclip=dict(type='str'),
naptrreplacementsuffix=dict(type='str'),
)
hand_inserted_arguments = dict(
)
argument_spec = dict()
argument_spec.update(netscaler_common_arguments)
argument_spec.update(module_specific_arguments)
argument_spec.update(hand_inserted_arguments)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
module_result = dict(
changed=False,
failed=False,
loglines=loglines,
)
# Fail the module if imports failed
if not PYTHON_SDK_IMPORTED:
module.fail_json(msg='Could not load nitro python sdk')
# Fallthrough to rest of execution
client = get_nitro_client(module)
try:
client.login()
except nitro_exception as e:
msg = "nitro exception during login. errorcode=%s, message=%s" % (str(e.errorcode), e.message)
module.fail_json(msg=msg)
except Exception as e:
if str(type(e)) == "<class 'requests.exceptions.ConnectionError'>":
module.fail_json(msg='Connection error %s' % str(e))
elif str(type(e)) == "<class 'requests.exceptions.SSLError'>":
module.fail_json(msg='SSL Error %s' % str(e))
else:
module.fail_json(msg='Unexpected error during login %s' % str(e))
readwrite_attrs = [
'sitename',
'sitetype',
'siteipaddress',
'publicip',
'metricexchange',
'nwmetricexchange',
'sessionexchange',
'triggermonitor',
'parentsite',
'clip',
'publicclip',
'naptrreplacementsuffix',
]
readonly_attrs = [
'status',
'persistencemepstatus',
'version',
'__count',
]
immutable_attrs = [
'sitename',
'sitetype',
'siteipaddress',
'publicip',
'parentsite',
'clip',
'publicclip',
]
transforms = {
'metricexchange': [lambda v: v.upper()],
'nwmetricexchange': [lambda v: v.upper()],
'sessionexchange': [lambda v: v.upper()],
}
# Instantiate config proxy
gslb_site_proxy = ConfigProxy(
actual=gslbsite(),
client=client,
attribute_values_dict=module.params,
readwrite_attrs=readwrite_attrs,
readonly_attrs=readonly_attrs,
immutable_attrs=immutable_attrs,
transforms=transforms,
)
try:
ensure_feature_is_enabled(client, 'GSLB')
# Apply appropriate state
if module.params['state'] == 'present':
log('Applying actions for state present')
if not gslb_site_exists(client, module):
if not module.check_mode:
gslb_site_proxy.add()
if module.params['save_config']:
client.save_config()
module_result['changed'] = True
elif not gslb_site_identical(client, module, gslb_site_proxy):
# Check if we try to change value of immutable attributes
immutables_changed = get_immutables_intersection(gslb_site_proxy, diff_list(client, module, gslb_site_proxy).keys())
if immutables_changed != []:
module.fail_json(
msg='Cannot update immutable attributes %s' % (immutables_changed,),
diff=diff_list(client, module, gslb_site_proxy),
**module_result
)
if not module.check_mode:
gslb_site_proxy.update()
if module.params['save_config']:
client.save_config()
module_result['changed'] = True
else:
module_result['changed'] = False
# Sanity check for state
if not module.check_mode:
log('Sanity checks for state present')
if not gslb_site_exists(client, module):
module.fail_json(msg='GSLB site does not exist', **module_result)
if not gslb_site_identical(client, module, gslb_site_proxy):
module.fail_json(msg='GSLB site differs from configured', diff=diff_list(client, module, gslb_site_proxy), **module_result)
elif module.params['state'] == 'absent':
log('Applying actions for state absent')
if gslb_site_exists(client, module):
if not module.check_mode:
gslb_site_proxy.delete()
if module.params['save_config']:
client.save_config()
module_result['changed'] = True
else:
module_result['changed'] = False
# Sanity check for state
if not module.check_mode:
log('Sanity checks for state absent')
if gslb_site_exists(client, module):
module.fail_json(msg='GSLB site still exists', **module_result)
except nitro_exception as e:
msg = "nitro exception errorcode=%s, message=%s" % (str(e.errorcode), e.message)
module.fail_json(msg=msg, **module_result)
client.logout()
module.exit_json(**module_result)
if __name__ == "__main__":
main()
| |
import re
import unittest
from external.wip import work_in_progress
from rmgpy.species import Species
from .adjlist import ConsistencyChecker
from .molecule import Molecule
from .util import retrieveElementCount
from .inchi import compose_aug_inchi, P_LAYER_PREFIX, P_LAYER_SEPARATOR, U_LAYER_PREFIX, U_LAYER_SEPARATOR
from .parser import *
class InChIParsingTest(unittest.TestCase):
def compare(self, inchi, u_indices=None, p_indices = None):
u_layer = U_LAYER_PREFIX + U_LAYER_SEPARATOR.join(map(str, u_indices)) if u_indices else None
p_layer = P_LAYER_PREFIX + P_LAYER_SEPARATOR.join(map(str, p_indices)) if p_indices else None
aug_inchi = compose_aug_inchi(inchi, u_layer, p_layer)
mol = fromAugmentedInChI(Molecule(), aug_inchi)
ConsistencyChecker.check_multiplicity(mol.getRadicalCount(), mol.multiplicity)
for at in mol.atoms:
ConsistencyChecker.check_partial_charge(at)
spc = Species(molecule=[mol])
spc.generateResonanceIsomers()
ignore_prefix = r"(InChI=1+)(S*)/"
aug_inchi_expected = re.split(ignore_prefix, aug_inchi)[-1]
aug_inchi_computed = re.split(ignore_prefix, spc.getAugmentedInChI())[-1]
self.assertEquals(aug_inchi_expected, aug_inchi_computed)
return mol
def test_Ethane_parsing(self):
inchi = 'C2H6/c1-2/h1-2H3'
self.compare(inchi)
def test_Ethyl_parsing(self):
inchi = 'C2H5/c1-2/h1H2,2H3'
u_indices = [1]
self.compare(inchi, u_indices)
def test_CH3_parsing(self):
inchi = 'CH3/h1H3'
u_indices = [1]
self.compare(inchi, u_indices)
def test_H2_parsing(self):
inchi = 'H2/h1H'
self.compare(inchi)
def test_C2H4_biradical_parsing(self):
inchi = 'C2H4/c1-2/h1-2H2'
u_indices = [1,2]
self.compare(inchi, u_indices)
def test_C2H3_triradical_parsing(self):
inchi = 'C2H3/c1-2/h1H,2H2'
u_indices = [1,1,2]
self.compare(inchi, u_indices)
def test_C3H6_biradical_parsing(self):
inchi = 'C3H6/c1-3-2/h1-3H2'
u_indices = [1,2]
self.compare(inchi, u_indices)
def testC2H3O3(self):
adjlist = '''
1 C u0 p0 c0 {2,D} {6,S} {7,S}
2 C u0 p0 c0 {1,D} {3,S} {5,S}
3 O u1 p2 c0 {2,S}
4 O u0 p2 c0 {5,S} {8,S}
5 O u0 p2 c0 {2,S} {4,S}
6 H u0 p0 c0 {1,S}
7 H u0 p0 c0 {1,S}
8 H u0 p0 c0 {4,S}
'''
inchi = 'C2H3O3/c1-2(3)5-4/h4H,1H2'
u_indices = [1]
self.compare(inchi, u_indices)
def testC2H2(self):
inchi = 'C2H2/c1-2/h1-2H'
u_indices = [1,2]
mol = self.compare(inchi, u_indices)
def testO2(self):
inchi = 'O2/c1-2'
u_indices = [1,2]
self.compare(inchi, u_indices)
def testTriRadicalZwitterMult4(self):
inchi = 'C6H11/c1-3-5-6-4-2/h5H,1-4,6H2'
u_indices = [1,2,5]
self.compare(inchi, u_indices)
def testTriRadicalDoubleBondMult4(self):
inchi = 'C4H7/c1-3-4-2/h3H,1-2,4H2'
u_indices = [1,2,3]
self.compare(inchi, u_indices)
def testTriRadical2DoubleBondMult4(self):
inchi = 'C6H9/c1-4-6(3)5-2/h1,4-6H,2H2,3H3'
u_indices = [1, 2, 5]
self.compare(inchi, u_indices)
def testQuadriRadicalDoubleBondZwitterMult5(self):
inchi = 'C8H14/c1-4-6-7-8(3)5-2/h5-6,8H,1-2,4,7H2,3H3'
u_indices = [1, 2, 5, 6]
mol = self.compare(inchi, u_indices)
def testQuadri2DoubleBondMult5(self):
inchi = 'C8H14/c1-5-7(3)8(4)6-2/h5-8H,1-2H2,3-4H3'
u_indices = [1, 2, 5, 6]
self.compare(inchi, u_indices)
def testC5H6O(self):
inchi = 'C5H6O/c6-5-3-1-2-4-5/h1-3,5H,4H2'
u_indices = [2, 6]
self.compare(inchi, u_indices)
def testC5H6O_2(self):
inchi = 'C5H6O/c1-5-3-2-4-6-5/h2-5H,1H2'
u_indices = [1,3]
self.compare(inchi, u_indices)
def testC5H6O_3(self):
inchi = 'C5H6O/c1-5-3-2-4-6-5/h2-5H,1H2'
u_indices = [1,2,3,4]
self.compare(inchi, u_indices)
@work_in_progress
def testCO(self):
inchi = 'CO/c1-2'
p_indices = [1,2]
mol = self.compare(inchi, [], p_indices)
assert mol.atoms[1].lonePairs == 1 # Oxygen
assert mol.atoms[0].charge == -1
assert mol.atoms[1].charge == +1
def testTripletMethylene(self):
inchi = 'CH2/h1H2'
u_indices = [1,1]
self.compare(inchi, u_indices)
def testSingletMethylene(self):
inchi = 'CH2/h1H2'
p_indices = [1]
self.compare(inchi, u_indices=[], p_indices=p_indices)
def testC4H6O(self):
inchi = 'C4H6O/c1-2-3-4-5/h2H,3H2,1H3'
u_indices = [2,4]
mol = self.compare(inchi, u_indices)
for at in mol.atoms:
if at.isOxygen():
self.assertTrue(at.lonePairs == 2)
def testC6H6(self):
inchi = 'C6H6/c1-3-5-6-4-2/h1,6H,2,5H2'
u_indices = [1, 3]
mol = self.compare(inchi, u_indices)
def testC4H6O_2(self):
inchi = 'C4H6O/c1-2-3-4-5/h2,4H,1,3H2'
u_indices = [4, 5]
mol = self.compare(inchi, u_indices)
def test_CO_triplet(self):
adjlist = """
multiplicity 3
1 C u2 p0 c0 {2,D}
2 O u0 p2 c0 {1,D}
"""
spc = Species(molecule=[Molecule().fromAdjacencyList(adjlist)])
aug_inchi = spc.getAugmentedInChI()
self.assertEqual(Species(molecule=[Molecule().fromAugmentedInChI(aug_inchi)]).isIsomorphic(spc), True)
def test_CCCO_triplet(self):
adjlist = """
multiplicity 3
1 C u0 p0 c0 {2,D} {5,S} {6,S}
2 C u0 p0 c0 {1,D} {3,S} {7,S}
3 C u1 p0 c0 {2,S} {4,S} {8,S}
4 O u1 p2 c0 {3,S}
5 H u0 p0 c0 {1,S}
6 H u0 p0 c0 {1,S}
7 H u0 p0 c0 {2,S}
8 H u0 p0 c0 {3,S}
"""
mol = Molecule().fromAdjacencyList(adjlist)
spc = Species(molecule=[mol])
spc.generateResonanceIsomers()
aug_inchi = spc.getAugmentedInChI()
self.assertEqual(Species(molecule=[Molecule().fromAugmentedInChI(aug_inchi)]).isIsomorphic(spc), True)
def testC3H4(self):
inchi = 'C3H4/c1-3-2/h1,3H,2H2'
u_indices = [1, 1]
mol = self.compare(inchi, u_indices)
def test_C6H8O2(self):
inchi = 'C6H8O2/c1-3-5(7)6(8)4-2/h3-6H,1-2H2'
u_indices = [7,8]
self.compare(inchi, u_indices)
def test_C3H3O3(self):
inchi = 'C3H3O3/c1-2-5-3-6-4/h1-3H'
u_indices = [1,3,4]
self.compare(inchi, u_indices)
def test_CH2O2(self):
inchi = 'CH2O2/c2-1-3/h1H,(H,2,3)'
u_indices = [1,2]
self.compare(inchi, u_indices)
def test_C2H2O3(self):
inchi = 'C2H2O3/c1-5-2(3)4/h1H2'
u_indices = [1,3]
self.compare(inchi, u_indices)
def test_C3H4O4(self):
inchi = 'C3H4O4/c4-3(5)1-2-7-6/h1-3,6H'
u_indices = [4,5]
self.compare(inchi, u_indices)
def test_C6H6O4(self):
inchi = 'InChI=1S/C6H6O4/c1-2-4-9-6(7)3-5-10-8/h2-3H,1,5H2'
u_indices = [1,3,4,8]
self.compare(inchi, u_indices)
def test_C3H2O3(self):
inchi = 'InChI=1S/C3H2O3/c1-2-3(4)6-5/h1H2'
u_indices = [2,5]
self.compare(inchi, u_indices)
def test_C6H6O6(self):
inchi = 'C6H6O6/c7-6(2-5-12-9)10-3-1-4-11-8/h1,7H,4-5H2'
u_indices = [2,3,8,9]
self.compare(inchi, u_indices)
def test_C3H2(self):
inchi = 'C3H2/c1-3-2/h1-2H'
u_indices = [1,1]
self.compare(inchi, u_indices)
def test_C3H4(self):
inchi = 'InChI=1S/C3H4/c1-3-2/h1,3H,2H2'
u_indices = [1,1]
self.compare(inchi, u_indices)
def test_C6H8(self):
inchi = 'InChI=1S/C6H8/c1-3-5-6-4-2/h1,4H,2,5-6H2'
u_indices = [1,1,3,3]
self.compare(inchi, u_indices)
def test_C6H10(self):
inchi = 'InChI=1S/C6H10/c1-3-5-6-4-2/h3-4H,1-2,5-6H2'
u_indices = [1,3]
self.compare(inchi, u_indices)
def test_ammonia(self):
inchi = 'InChI=1S/H3N/h1H3'
self.compare(inchi)
@work_in_progress
def test_ammonium(self):
"""
has same inchi as ammonia but gets a proton layer: /p+1
"""
inchi = 'InChI=1S/H3N/h1H3/p+1'
self.compare(inchi)
def test_H2S(self):
inchi = 'InChI=1S/H2S/h1H2'
self.compare(inchi)
def test_pyridine(self):
inchi = 'InChI=1S/C5H5N/c1-2-4-6-5-3-1/h1-5H'
self.compare(inchi)
def test_pyrimidine(self):
inchi = 'InChI=1S/C4H4N2/c1-2-5-4-6-3-1/h1-4H'
self.compare(inchi)
@work_in_progress
def test_nitrate(self):
"""
- Mobile H spread over oxygen 2, 3, 4
- Negative charge (3 lone pairs) spread out over oxygen 2, 3, 4
- Nitrogen 1 positively charged
"""
inchi = 'InChI=1S/HNO3/c2-1(3)4/h(H,2,3,4)'
p_indices = [-1, 3, 3, 3]#???
mol = self.compare(inchi, [], p_indices)
def test_NO(self):
inchi = 'InChI=1S/NO/c1-2'
u_indices = [1]
mol = self.compare(inchi, u_indices)
if __name__ == '__main__':
unittest.main()
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012, Nicira, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Dan Wendlandt, Nicira, Inc.
import uuid
import mox
import unittest2 as unittest
from quantum.agent.linux import ovs_lib, utils
class OVS_Lib_Test(unittest.TestCase):
"""
A test suite to excercise the OVS libraries shared by Quantum agents.
Note: these tests do not actually execute ovs-* utilities, and thus
can run on any system. That does, however, limit their scope.
"""
def setUp(self):
self.BR_NAME = "br-int"
self.TO = "--timeout=2"
self.mox = mox.Mox()
self.root_helper = 'sudo'
self.br = ovs_lib.OVSBridge(self.BR_NAME, self.root_helper)
self.mox.StubOutWithMock(utils, "execute")
def tearDown(self):
self.mox.UnsetStubs()
def test_vifport(self):
"""create and stringify vif port, confirm no exceptions"""
self.mox.ReplayAll()
pname = "vif1.0"
ofport = 5
vif_id = str(uuid.uuid4())
mac = "ca:fe:de:ad:be:ef"
# test __init__
port = ovs_lib.VifPort(pname, ofport, vif_id, mac, self.br)
self.assertEqual(port.port_name, pname)
self.assertEqual(port.ofport, ofport)
self.assertEqual(port.vif_id, vif_id)
self.assertEqual(port.vif_mac, mac)
self.assertEqual(port.switch.br_name, self.BR_NAME)
# test __str__
foo = str(port)
self.mox.VerifyAll()
def test_reset_bridge(self):
utils.execute(["ovs-vsctl", self.TO, "--",
"--if-exists", "del-br", self.BR_NAME],
root_helper=self.root_helper)
utils.execute(["ovs-vsctl", self.TO, "add-br", self.BR_NAME],
root_helper=self.root_helper)
self.mox.ReplayAll()
self.br.reset_bridge()
self.mox.VerifyAll()
def test_delete_port(self):
pname = "tap5"
utils.execute(["ovs-vsctl", self.TO, "--", "--if-exists",
"del-port", self.BR_NAME, pname],
root_helper=self.root_helper)
self.mox.ReplayAll()
self.br.delete_port(pname)
self.mox.VerifyAll()
def test_add_flow(self):
ofport = "99"
vid = 4000
lsw_id = 18
utils.execute(["ovs-ofctl", "add-flow", self.BR_NAME,
"hard_timeout=0,idle_timeout=0,"
"priority=2,dl_src=ca:fe:de:ad:be:ef"
",actions=strip_vlan,output:0"],
root_helper=self.root_helper)
utils.execute(["ovs-ofctl", "add-flow", self.BR_NAME,
"hard_timeout=0,idle_timeout=0,"
"priority=1,actions=normal"],
root_helper=self.root_helper)
utils.execute(["ovs-ofctl", "add-flow", self.BR_NAME,
"hard_timeout=0,idle_timeout=0,"
"priority=2,actions=drop"],
root_helper=self.root_helper)
utils.execute(["ovs-ofctl", "add-flow", self.BR_NAME,
"hard_timeout=0,idle_timeout=0,"
"priority=2,in_port=%s,actions=drop" % ofport],
root_helper=self.root_helper)
utils.execute(["ovs-ofctl", "add-flow", self.BR_NAME,
"hard_timeout=0,idle_timeout=0,"
"priority=4,in_port=%s,dl_vlan=%s,"
"actions=strip_vlan,set_tunnel:%s,normal"
% (ofport, vid, lsw_id)],
root_helper=self.root_helper)
utils.execute(["ovs-ofctl", "add-flow", self.BR_NAME,
"hard_timeout=0,idle_timeout=0,"
"priority=3,tun_id=%s,actions="
"mod_vlan_vid:%s,output:%s"
% (lsw_id, vid, ofport)], root_helper=self.root_helper)
self.mox.ReplayAll()
self.br.add_flow(priority=2, dl_src="ca:fe:de:ad:be:ef",
actions="strip_vlan,output:0")
self.br.add_flow(priority=1, actions="normal")
self.br.add_flow(priority=2, actions="drop")
self.br.add_flow(priority=2, in_port=ofport, actions="drop")
self.br.add_flow(priority=4, in_port=ofport, dl_vlan=vid,
actions="strip_vlan,set_tunnel:%s,normal" %
(lsw_id))
self.br.add_flow(priority=3, tun_id=lsw_id,
actions="mod_vlan_vid:%s,output:%s" %
(vid, ofport))
self.mox.VerifyAll()
def test_get_port_ofport(self):
pname = "tap99"
ofport = "6"
utils.execute(["ovs-vsctl", self.TO, "get",
"Interface", pname, "ofport"],
root_helper=self.root_helper).AndReturn(ofport)
self.mox.ReplayAll()
self.assertEqual(self.br.get_port_ofport(pname), ofport)
self.mox.VerifyAll()
def test_get_datapath_id(self):
datapath_id = '"0000b67f4fbcc149"'
utils.execute(["ovs-vsctl", self.TO, "get",
"Bridge", self.BR_NAME, "datapath_id"],
root_helper=self.root_helper).AndReturn(datapath_id)
self.mox.ReplayAll()
self.assertEqual(self.br.get_datapath_id(), datapath_id.strip('"'))
self.mox.VerifyAll()
def test_count_flows(self):
utils.execute(["ovs-ofctl", "dump-flows", self.BR_NAME],
root_helper=self.root_helper).AndReturn('ignore'
'\nflow-1\n')
self.mox.ReplayAll()
# counts the number of flows as total lines of output - 2
self.assertEqual(self.br.count_flows(), 1)
self.mox.VerifyAll()
def test_delete_flow(self):
ofport = "5"
lsw_id = 40
vid = 39
utils.execute(["ovs-ofctl", "del-flows", self.BR_NAME,
"in_port=" + ofport], root_helper=self.root_helper)
utils.execute(["ovs-ofctl", "del-flows", self.BR_NAME,
"tun_id=%s" % lsw_id], root_helper=self.root_helper)
utils.execute(["ovs-ofctl", "del-flows", self.BR_NAME,
"dl_vlan=%s" % vid], root_helper=self.root_helper)
self.mox.ReplayAll()
self.br.delete_flows(in_port=ofport)
self.br.delete_flows(tun_id=lsw_id)
self.br.delete_flows(dl_vlan=vid)
self.mox.VerifyAll()
def test_add_tunnel_port(self):
pname = "tap99"
ip = "9.9.9.9"
ofport = "6"
utils.execute(["ovs-vsctl", self.TO, "add-port",
self.BR_NAME, pname], root_helper=self.root_helper)
utils.execute(["ovs-vsctl", self.TO, "set", "Interface",
pname, "type=gre"], root_helper=self.root_helper)
utils.execute(["ovs-vsctl", self.TO, "set", "Interface",
pname, "options:remote_ip=" + ip],
root_helper=self.root_helper)
utils.execute(["ovs-vsctl", self.TO, "set", "Interface",
pname, "options:in_key=flow"],
root_helper=self.root_helper)
utils.execute(["ovs-vsctl", self.TO, "set", "Interface",
pname, "options:out_key=flow"],
root_helper=self.root_helper)
utils.execute(["ovs-vsctl", self.TO, "get",
"Interface", pname, "ofport"],
root_helper=self.root_helper).AndReturn(ofport)
self.mox.ReplayAll()
self.assertEqual(self.br.add_gre_tunnel_port(pname, ip), ofport)
self.mox.VerifyAll()
def test_add_patch_port(self):
pname = "tap99"
peer = "bar10"
ofport = "6"
utils.execute(["ovs-vsctl", self.TO, "add-port",
self.BR_NAME, pname], root_helper=self.root_helper)
utils.execute(["ovs-vsctl", self.TO, "set", "Interface",
pname, "type=patch"], root_helper=self.root_helper)
utils.execute(["ovs-vsctl", self.TO, "set",
"Interface", pname, "options:peer=" + peer],
root_helper=self.root_helper)
utils.execute(["ovs-vsctl", self.TO, "get",
"Interface", pname, "ofport"],
root_helper=self.root_helper).AndReturn(ofport)
self.mox.ReplayAll()
self.assertEqual(self.br.add_patch_port(pname, peer), ofport)
self.mox.VerifyAll()
def _test_get_vif_ports(self, is_xen=False):
pname = "tap99"
ofport = "6"
vif_id = str(uuid.uuid4())
mac = "ca:fe:de:ad:be:ef"
utils.execute(["ovs-vsctl", self.TO, "list-ports", self.BR_NAME],
root_helper=self.root_helper).AndReturn("%s\n" % pname)
if is_xen:
external_ids = ('{xs-vif-uuid="%s", attached-mac="%s"}'
% (vif_id, mac))
else:
external_ids = ('{iface-id="%s", attached-mac="%s"}'
% (vif_id, mac))
utils.execute(["ovs-vsctl", self.TO, "get",
"Interface", pname, "external_ids"],
root_helper=self.root_helper).AndReturn(external_ids)
utils.execute(["ovs-vsctl", self.TO, "get",
"Interface", pname, "ofport"],
root_helper=self.root_helper).AndReturn(ofport)
if is_xen:
utils.execute(["xe", "vif-param-get", "param-name=other-config",
"param-key=nicira-iface-id", "uuid=" + vif_id],
root_helper=self.root_helper).AndReturn(vif_id)
self.mox.ReplayAll()
ports = self.br.get_vif_ports()
self.assertEqual(1, len(ports))
self.assertEqual(ports[0].port_name, pname)
self.assertEqual(ports[0].ofport, ofport)
self.assertEqual(ports[0].vif_id, vif_id)
self.assertEqual(ports[0].vif_mac, mac)
self.assertEqual(ports[0].switch.br_name, self.BR_NAME)
self.mox.VerifyAll()
def test_get_vif_ports_nonxen(self):
self._test_get_vif_ports(False)
def test_get_vif_ports_xen(self):
self._test_get_vif_ports(True)
def test_clear_db_attribute(self):
pname = "tap77"
utils.execute(["ovs-vsctl", self.TO, "clear", "Port",
pname, "tag"], root_helper=self.root_helper)
self.mox.ReplayAll()
self.br.clear_db_attribute("Port", pname, "tag")
self.mox.VerifyAll()
def test_port_id_regex(self):
result = ('external_ids : {attached-mac="fa:16:3e:23:5b:f2",'
' iface-id="5c1321a7-c73f-4a77-95e6-9f86402e5c8f",'
' iface-status=active}\nname :'
' "dhc5c1321a7-c7"\nofport : 2\n')
match = self.br.re_id.search(result)
vif_mac = match.group('vif_mac')
vif_id = match.group('vif_id')
port_name = match.group('port_name')
ofport = int(match.group('ofport'))
self.assertEqual(vif_mac, 'fa:16:3e:23:5b:f2')
self.assertEqual(vif_id, '5c1321a7-c73f-4a77-95e6-9f86402e5c8f')
self.assertEqual(port_name, 'dhc5c1321a7-c7')
self.assertEqual(ofport, 2)
def test_iface_to_br(self):
iface = 'tap0'
br = 'br-int'
root_helper = 'sudo'
utils.execute(["ovs-vsctl", self.TO, "iface-to-br", iface],
root_helper=root_helper).AndReturn('br-int')
self.mox.ReplayAll()
self.assertEqual(ovs_lib.get_bridge_for_iface(root_helper, iface), br)
self.mox.VerifyAll()
def test_iface_to_br(self):
iface = 'tap0'
br = 'br-int'
root_helper = 'sudo'
utils.execute(["ovs-vsctl", self.TO, "iface-to-br", iface],
root_helper=root_helper).AndRaise(Exception)
self.mox.ReplayAll()
self.assertIsNone(ovs_lib.get_bridge_for_iface(root_helper, iface))
self.mox.VerifyAll()
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Class Head.
Contains Class prediction head classes for different meta architectures.
All the class prediction heads have a predict function that receives the
`features` as the first argument and returns class predictions with background.
"""
import functools
import tensorflow as tf
from tensorflow.contrib import slim as contrib_slim
from object_detection.predictors.heads import head
slim = contrib_slim
class MaskRCNNClassHead(head.Head):
"""Mask RCNN class prediction head.
Please refer to Mask RCNN paper:
https://arxiv.org/abs/1703.06870
"""
def __init__(self,
is_training,
num_class_slots,
fc_hyperparams_fn,
use_dropout,
dropout_keep_prob,
scope='ClassPredictor'):
"""Constructor.
Args:
is_training: Indicates whether the BoxPredictor is in training mode.
num_class_slots: number of class slots. Note that num_class_slots may or
may not include an implicit background category.
fc_hyperparams_fn: A function to generate tf-slim arg_scope with
hyperparameters for fully connected ops.
use_dropout: Option to use dropout or not. Note that a single dropout
op is applied here prior to both box and class predictions, which stands
in contrast to the ConvolutionalBoxPredictor below.
dropout_keep_prob: Keep probability for dropout.
This is only used if use_dropout is True.
scope: Scope name for the convolution operation.
"""
super(MaskRCNNClassHead, self).__init__()
self._is_training = is_training
self._num_class_slots = num_class_slots
self._fc_hyperparams_fn = fc_hyperparams_fn
self._use_dropout = use_dropout
self._dropout_keep_prob = dropout_keep_prob
self._scope = scope
def predict(self, features, num_predictions_per_location=1):
"""Predicts boxes and class scores.
Args:
features: A float tensor of shape [batch_size, height, width, channels]
containing features for a batch of images.
num_predictions_per_location: Int containing number of predictions per
location.
Returns:
class_predictions_with_background: A float tensor of shape
[batch_size, 1, num_class_slots] representing the class predictions for
the proposals.
Raises:
ValueError: If num_predictions_per_location is not 1.
"""
if num_predictions_per_location != 1:
raise ValueError('Only num_predictions_per_location=1 is supported')
spatial_averaged_roi_pooled_features = tf.reduce_mean(
features, [1, 2], keep_dims=True, name='AvgPool')
flattened_roi_pooled_features = slim.flatten(
spatial_averaged_roi_pooled_features)
if self._use_dropout:
flattened_roi_pooled_features = slim.dropout(
flattened_roi_pooled_features,
keep_prob=self._dropout_keep_prob,
is_training=self._is_training)
with slim.arg_scope(self._fc_hyperparams_fn()):
class_predictions_with_background = slim.fully_connected(
flattened_roi_pooled_features,
self._num_class_slots,
activation_fn=None,
scope=self._scope)
class_predictions_with_background = tf.reshape(
class_predictions_with_background,
[-1, 1, self._num_class_slots])
return class_predictions_with_background
class ConvolutionalClassHead(head.Head):
"""Convolutional class prediction head."""
def __init__(self,
is_training,
num_class_slots,
use_dropout,
dropout_keep_prob,
kernel_size,
apply_sigmoid_to_scores=False,
class_prediction_bias_init=0.0,
use_depthwise=False,
scope='ClassPredictor'):
"""Constructor.
Args:
is_training: Indicates whether the BoxPredictor is in training mode.
num_class_slots: number of class slots. Note that num_class_slots may or
may not include an implicit background category.
use_dropout: Option to use dropout or not. Note that a single dropout
op is applied here prior to both box and class predictions, which stands
in contrast to the ConvolutionalBoxPredictor below.
dropout_keep_prob: Keep probability for dropout.
This is only used if use_dropout is True.
kernel_size: Size of final convolution kernel. If the
spatial resolution of the feature map is smaller than the kernel size,
then the kernel size is automatically set to be
min(feature_width, feature_height).
apply_sigmoid_to_scores: if True, apply the sigmoid on the output
class_predictions.
class_prediction_bias_init: constant value to initialize bias of the last
conv2d layer before class prediction.
use_depthwise: Whether to use depthwise convolutions for prediction
steps. Default is False.
scope: Scope name for the convolution operation.
Raises:
ValueError: if min_depth > max_depth.
ValueError: if use_depthwise is True and kernel_size is 1.
"""
if use_depthwise and (kernel_size == 1):
raise ValueError('Should not use 1x1 kernel when using depthwise conv')
super(ConvolutionalClassHead, self).__init__()
self._is_training = is_training
self._num_class_slots = num_class_slots
self._use_dropout = use_dropout
self._dropout_keep_prob = dropout_keep_prob
self._kernel_size = kernel_size
self._apply_sigmoid_to_scores = apply_sigmoid_to_scores
self._class_prediction_bias_init = class_prediction_bias_init
self._use_depthwise = use_depthwise
self._scope = scope
def predict(self, features, num_predictions_per_location):
"""Predicts boxes.
Args:
features: A float tensor of shape [batch_size, height, width, channels]
containing image features.
num_predictions_per_location: Number of box predictions to be made per
spatial location.
Returns:
class_predictions_with_background: A float tensors of shape
[batch_size, num_anchors, num_class_slots] representing the class
predictions for the proposals.
"""
net = features
if self._use_dropout:
net = slim.dropout(net, keep_prob=self._dropout_keep_prob)
if self._use_depthwise:
depthwise_scope = self._scope + '_depthwise'
class_predictions_with_background = slim.separable_conv2d(
net, None, [self._kernel_size, self._kernel_size],
padding='SAME', depth_multiplier=1, stride=1,
rate=1, scope=depthwise_scope)
class_predictions_with_background = slim.conv2d(
class_predictions_with_background,
num_predictions_per_location * self._num_class_slots, [1, 1],
activation_fn=None,
normalizer_fn=None,
normalizer_params=None,
scope=self._scope)
else:
class_predictions_with_background = slim.conv2d(
net,
num_predictions_per_location * self._num_class_slots,
[self._kernel_size, self._kernel_size],
activation_fn=None,
normalizer_fn=None,
normalizer_params=None,
scope=self._scope,
biases_initializer=tf.constant_initializer(
self._class_prediction_bias_init))
if self._apply_sigmoid_to_scores:
class_predictions_with_background = tf.sigmoid(
class_predictions_with_background)
batch_size = features.get_shape().as_list()[0]
if batch_size is None:
batch_size = tf.shape(features)[0]
class_predictions_with_background = tf.reshape(
class_predictions_with_background,
[batch_size, -1, self._num_class_slots])
return class_predictions_with_background
# TODO(alirezafathi): See if possible to unify Weight Shared with regular
# convolutional class head.
class WeightSharedConvolutionalClassHead(head.Head):
"""Weight shared convolutional class prediction head.
This head allows sharing the same set of parameters (weights) when called more
then once on different feature maps.
"""
def __init__(self,
num_class_slots,
kernel_size=3,
class_prediction_bias_init=0.0,
use_dropout=False,
dropout_keep_prob=0.8,
use_depthwise=False,
score_converter_fn=tf.identity,
return_flat_predictions=True,
scope='ClassPredictor'):
"""Constructor.
Args:
num_class_slots: number of class slots. Note that num_class_slots may or
may not include an implicit background category.
kernel_size: Size of final convolution kernel.
class_prediction_bias_init: constant value to initialize bias of the last
conv2d layer before class prediction.
use_dropout: Whether to apply dropout to class prediction head.
dropout_keep_prob: Probability of keeping activiations.
use_depthwise: Whether to use depthwise convolutions for prediction
steps. Default is False.
score_converter_fn: Callable elementwise nonlinearity (that takes tensors
as inputs and returns tensors).
return_flat_predictions: If true, returns flattened prediction tensor
of shape [batch, height * width * num_predictions_per_location,
box_coder]. Otherwise returns the prediction tensor before reshaping,
whose shape is [batch, height, width, num_predictions_per_location *
num_class_slots].
scope: Scope name for the convolution operation.
Raises:
ValueError: if use_depthwise is True and kernel_size is 1.
"""
if use_depthwise and (kernel_size == 1):
raise ValueError('Should not use 1x1 kernel when using depthwise conv')
super(WeightSharedConvolutionalClassHead, self).__init__()
self._num_class_slots = num_class_slots
self._kernel_size = kernel_size
self._class_prediction_bias_init = class_prediction_bias_init
self._use_dropout = use_dropout
self._dropout_keep_prob = dropout_keep_prob
self._use_depthwise = use_depthwise
self._score_converter_fn = score_converter_fn
self._return_flat_predictions = return_flat_predictions
self._scope = scope
def predict(self, features, num_predictions_per_location):
"""Predicts boxes.
Args:
features: A float tensor of shape [batch_size, height, width, channels]
containing image features.
num_predictions_per_location: Number of box predictions to be made per
spatial location.
Returns:
class_predictions_with_background: A tensor of shape
[batch_size, num_anchors, num_class_slots] representing the class
predictions for the proposals, or a tensor of shape [batch, height,
width, num_predictions_per_location * num_class_slots] representing
class predictions before reshaping if self._return_flat_predictions is
False.
"""
class_predictions_net = features
if self._use_dropout:
class_predictions_net = slim.dropout(
class_predictions_net, keep_prob=self._dropout_keep_prob)
if self._use_depthwise:
conv_op = functools.partial(slim.separable_conv2d, depth_multiplier=1)
else:
conv_op = slim.conv2d
class_predictions_with_background = conv_op(
class_predictions_net,
num_predictions_per_location * self._num_class_slots,
[self._kernel_size, self._kernel_size],
activation_fn=None, stride=1, padding='SAME',
normalizer_fn=None,
biases_initializer=tf.constant_initializer(
self._class_prediction_bias_init),
scope=self._scope)
batch_size = features.get_shape().as_list()[0]
if batch_size is None:
batch_size = tf.shape(features)[0]
class_predictions_with_background = self._score_converter_fn(
class_predictions_with_background)
if self._return_flat_predictions:
class_predictions_with_background = tf.reshape(
class_predictions_with_background,
[batch_size, -1, self._num_class_slots])
return class_predictions_with_background
| |
"""
The RPyC protocol
"""
import sys
import weakref
import itertools
import socket
import time
from threading import Lock
from rpyc.lib.compat import pickle, next, is_py3k, maxint, select_error
from rpyc.lib.colls import WeakValueDict, RefCountingColl
from rpyc.core import consts, brine, vinegar, netref
from rpyc.core.async import AsyncResult
class PingError(Exception):
"""The exception raised should :func:`Connection.ping` fail"""
pass
DEFAULT_CONFIG = dict(
# ATTRIBUTES
allow_safe_attrs = True,
allow_exposed_attrs = True,
allow_public_attrs = False,
allow_all_attrs = False,
safe_attrs = set(['__abs__', '__add__', '__and__', '__bool__', '__cmp__', '__contains__',
'__delitem__', '__delslice__', '__div__', '__divmod__', '__doc__',
'__eq__', '__float__', '__floordiv__', '__ge__', '__getitem__',
'__getslice__', '__gt__', '__hash__', '__hex__', '__iadd__', '__iand__',
'__idiv__', '__ifloordiv__', '__ilshift__', '__imod__', '__imul__',
'__index__', '__int__', '__invert__', '__ior__', '__ipow__', '__irshift__',
'__isub__', '__iter__', '__itruediv__', '__ixor__', '__le__', '__len__',
'__long__', '__lshift__', '__lt__', '__mod__', '__mul__', '__ne__',
'__neg__', '__new__', '__nonzero__', '__oct__', '__or__', '__pos__',
'__pow__', '__radd__', '__rand__', '__rdiv__', '__rdivmod__', '__repr__',
'__rfloordiv__', '__rlshift__', '__rmod__', '__rmul__', '__ror__',
'__rpow__', '__rrshift__', '__rshift__', '__rsub__', '__rtruediv__',
'__rxor__', '__setitem__', '__setslice__', '__str__', '__sub__',
'__truediv__', '__xor__', 'next', '__length_hint__', '__enter__',
'__exit__', '__next__',]),
exposed_prefix = "exposed_",
allow_getattr = True,
allow_setattr = False,
allow_delattr = False,
# EXCEPTIONS
include_local_traceback = True,
instantiate_custom_exceptions = False,
import_custom_exceptions = False,
instantiate_oldstyle_exceptions = False, # which don't derive from Exception
propagate_SystemExit_locally = False, # whether to propagate SystemExit locally or to the other party
propagate_KeyboardInterrupt_locally = True, # whether to propagate KeyboardInterrupt locally or to the other party
log_exceptions = True,
# MISC
allow_pickle = False,
connid = None,
credentials = None,
endpoints = None,
logger = None,
)
"""
The default configuration dictionary of the protocol. You can override these parameters
by passing a different configuration dict to the :class:`Connection` class.
.. note::
You only need to override the parameters you want to change. There's no need
to repeat parameters whose values remain unchanged.
======================================= ================ =====================================================
Parameter Default value Description
======================================= ================ =====================================================
``allow_safe_attrs`` ``True`` Whether to allow the use of *safe* attributes
(only those listed as ``safe_attrs``)
``allow_exposed_attrs`` ``True`` Whether to allow exposed attributes
(attributes that start with the ``exposed_prefix``)
``allow_public_attrs`` ``False`` Whether to allow public attributes
(attributes that don't start with ``_``)
``allow_all_attrs`` ``False`` Whether to allow all attributes (including private)
``safe_attrs`` ``set([...])`` The set of attributes considered safe
``exposed_prefix`` ``"exposed_"`` The prefix of exposed attributes
``allow_getattr`` ``True`` Whether to allow getting of attributes (``getattr``)
``allow_setattr`` ``False`` Whether to allow setting of attributes (``setattr``)
``allow_delattr`` ``False`` Whether to allow deletion of attributes (``delattr``)
``allow_pickle`` ``False`` Whether to allow the use of ``pickle``
``include_local_traceback`` ``True`` Whether to include the local traceback
in the remote exception
``instantiate_custom_exceptions`` ``False`` Whether to allow instantiation of
custom exceptions (not the built in ones)
``import_custom_exceptions`` ``False`` Whether to allow importing of
exceptions from not-yet-imported modules
``instantiate_oldstyle_exceptions`` ``False`` Whether to allow instantiation of exceptions
which don't derive from ``Exception``. This
is not applicable for Python 3 and later.
``propagate_SystemExit_locally`` ``False`` Whether to propagate ``SystemExit``
locally (kill the server) or to the other
party (kill the client)
``propagate_KeyboardInterrupt_locally`` ``False`` Whether to propagate ``KeyboardInterrupt``
locally (kill the server) or to the other
party (kill the client)
``logger`` ``None`` The logger instance to use to log exceptions
(before they are sent to the other party)
and other events. If ``None``, no logging takes place.
``connid`` ``None`` **Runtime**: the RPyC connection ID (used
mainly for debugging purposes)
``credentials`` ``None`` **Runtime**: the credentails object that was returned
by the server's :ref:`authenticator <api-authenticators>`
or ``None``
``endpoints`` ``None`` **Runtime**: The connection's endpoints. This is a tuple
made of the local socket endpoint (``getsockname``) and the
remote one (``getpeername``). This is set by the server
upon accepting a connection; client side connections
do no have this configuration option set.
======================================= ================ =====================================================
"""
_connection_id_generator = itertools.count(1)
class Connection(object):
"""The RPyC *connection* (AKA *protocol*).
:param service: the :class:`Service <rpyc.core.service.Service>` to expose
:param channel: the :class:`Channel <rpyc.core.channel.Channel>` over which messages are passed
:param config: the connection's configuration dict (overriding parameters
from the :data:`default configuration <DEFAULT_CONFIG>`)
:param _lazy: whether or not to initialize the service with the creation of
the connection. Default is True. If set to False, you will
need to call :func:`_init_service` manually later
"""
def __init__(self, service, channel, config = {}, _lazy = False):
self._closed = True
self._config = DEFAULT_CONFIG.copy()
self._config.update(config)
if self._config["connid"] is None:
self._config["connid"] = "conn%d" % (next(_connection_id_generator),)
self._channel = channel
self._seqcounter = itertools.count()
self._recvlock = Lock()
self._sendlock = Lock()
self._sync_replies = {}
self._async_callbacks = {}
self._local_objects = RefCountingColl()
self._last_traceback = None
self._proxy_cache = WeakValueDict()
self._netref_classes_cache = {}
self._remote_root = None
self._local_root = service(weakref.proxy(self))
if not _lazy:
self._init_service()
self._closed = False
def _init_service(self):
self._local_root.on_connect()
def __del__(self):
self.close()
def __enter__(self):
return self
def __exit__(self, t, v, tb):
self.close()
def __repr__(self):
a, b = object.__repr__(self).split(" object ")
return "%s %r object %s" % (a, self._config["connid"], b)
#
# IO
#
def _cleanup(self, _anyway = True):
if self._closed and not _anyway:
return
self._closed = True
self._channel.close()
self._local_root.on_disconnect()
self._sync_replies.clear()
self._async_callbacks.clear()
self._local_objects.clear()
self._proxy_cache.clear()
self._netref_classes_cache.clear()
self._last_traceback = None
self._remote_root = None
self._local_root = None
#self._seqcounter = None
#self._config.clear()
def close(self, _catchall = True):
"""closes the connection, releasing all held resources"""
if self._closed:
return
self._closed = True
try:
self._async_request(consts.HANDLE_CLOSE)
except EOFError:
pass
except Exception:
if not _catchall:
raise
finally:
self._cleanup(_anyway = True)
@property
def closed(self):
"""Indicates whether the connection has been closed or not"""
return self._closed
def fileno(self):
"""Returns the connectin's underlying file descriptor"""
return self._channel.fileno()
def ping(self, data = None, timeout = 3):
"""
Asserts that the other party is functioning properly, by making sure
the *data* is echoed back before the *timeout* expires
:param data: the data to send (leave ``None`` for the default buffer)
:param timeout: the maximal time to wait for echo
:raises: :class:`PingError` if the echoed data does not match
"""
if data is None:
data = "abcdefghijklmnopqrstuvwxyz" * 20
res = self.async_request(consts.HANDLE_PING, data, timeout = timeout)
if res.value != data:
raise PingError("echo mismatches sent data")
def _send(self, msg, seq, args):
data = brine.dump((msg, seq, args))
self._sendlock.acquire()
try:
self._channel.send(data)
finally:
self._sendlock.release()
def _send_request(self, handler, args):
seq = next(self._seqcounter)
self._send(consts.MSG_REQUEST, seq, (handler, self._box(args)))
return seq
def _send_reply(self, seq, obj):
self._send(consts.MSG_REPLY, seq, self._box(obj))
def _send_exception(self, seq, exctype, excval, exctb):
exc = vinegar.dump(exctype, excval, exctb,
include_local_traceback = self._config["include_local_traceback"])
self._send(consts.MSG_EXCEPTION, seq, exc)
#
# boxing
#
def _box(self, obj):
"""store a local object in such a way that it could be recreated on
the remote party either by-value or by-reference"""
if brine.dumpable(obj):
return consts.LABEL_VALUE, obj
if type(obj) is tuple:
return consts.LABEL_TUPLE, tuple(self._box(item) for item in obj)
elif isinstance(obj, netref.BaseNetref) and obj.____conn__() is self:
return consts.LABEL_LOCAL_REF, obj.____oid__
else:
self._local_objects.add(obj)
try:
cls = obj.__class__
except Exception:
# see issue #16
cls = type(obj)
if not isinstance(cls, type):
cls = type(obj)
return consts.LABEL_REMOTE_REF, (id(obj), cls.__name__, cls.__module__)
def _unbox(self, package):
"""recreate a local object representation of the remote object: if the
object is passed by value, just return it; if the object is passed by
reference, create a netref to it"""
label, value = package
if label == consts.LABEL_VALUE:
return value
if label == consts.LABEL_TUPLE:
return tuple(self._unbox(item) for item in value)
if label == consts.LABEL_LOCAL_REF:
return self._local_objects[value]
if label == consts.LABEL_REMOTE_REF:
oid, clsname, modname = value
if oid in self._proxy_cache:
return self._proxy_cache[oid]
proxy = self._netref_factory(oid, clsname, modname)
self._proxy_cache[oid] = proxy
return proxy
raise ValueError("invalid label %r" % (label,))
def _netref_factory(self, oid, clsname, modname):
typeinfo = (clsname, modname)
if typeinfo in self._netref_classes_cache:
cls = self._netref_classes_cache[typeinfo]
elif typeinfo in netref.builtin_classes_cache:
cls = netref.builtin_classes_cache[typeinfo]
else:
info = self.sync_request(consts.HANDLE_INSPECT, oid)
cls = netref.class_factory(clsname, modname, info)
self._netref_classes_cache[typeinfo] = cls
return cls(weakref.ref(self), oid)
#
# dispatching
#
def _dispatch_request(self, seq, raw_args):
try:
handler, args = raw_args
args = self._unbox(args)
res = self._HANDLERS[handler](self, *args)
except:
# need to catch old style exceptions too
t, v, tb = sys.exc_info()
self._last_traceback = tb
if self._config["logger"] and t is not StopIteration:
self._config["logger"].debug("Exception caught", exc_info=True)
if t is SystemExit and self._config["propagate_SystemExit_locally"]:
raise
if t is KeyboardInterrupt and self._config["propagate_KeyboardInterrupt_locally"]:
raise
self._send_exception(seq, t, v, tb)
else:
self._send_reply(seq, res)
def _dispatch_reply(self, seq, raw):
obj = self._unbox(raw)
if seq in self._async_callbacks:
self._async_callbacks.pop(seq)(False, obj)
else:
self._sync_replies[seq] = (False, obj)
def _dispatch_exception(self, seq, raw):
obj = vinegar.load(raw,
import_custom_exceptions = self._config["import_custom_exceptions"],
instantiate_custom_exceptions = self._config["instantiate_custom_exceptions"],
instantiate_oldstyle_exceptions = self._config["instantiate_oldstyle_exceptions"])
if seq in self._async_callbacks:
self._async_callbacks.pop(seq)(True, obj)
else:
self._sync_replies[seq] = (True, obj)
#
# serving
#
def _recv(self, timeout, wait_for_lock):
if not self._recvlock.acquire(wait_for_lock):
return None
try:
if self._channel.poll(timeout):
data = self._channel.recv()
else:
data = None
except EOFError:
self.close()
raise
finally:
self._recvlock.release()
return data
def _dispatch(self, data):
msg, seq, args = brine.load(data)
if msg == consts.MSG_REQUEST:
self._dispatch_request(seq, args)
elif msg == consts.MSG_REPLY:
self._dispatch_reply(seq, args)
elif msg == consts.MSG_EXCEPTION:
self._dispatch_exception(seq, args)
else:
raise ValueError("invalid message type: %r" % (msg,))
def poll(self, timeout = 0):
"""Serves a single transaction, should one arrives in the given
interval. Note that handling a request/reply may trigger nested
requests, which are all part of a single transaction.
:returns: ``True`` if a transaction was served, ``False`` otherwise"""
data = self._recv(timeout, wait_for_lock = False)
if not data:
return False
self._dispatch(data)
return True
def serve(self, timeout = 1):
"""Serves a single request or reply that arrives within the given
time frame (default is 1 sec). Note that the dispatching of a request
might trigger multiple (nested) requests, thus this function may be
reentrant.
:returns: ``True`` if a request or reply were received, ``False``
otherwise.
"""
data = self._recv(timeout, wait_for_lock = True)
if not data:
return False
self._dispatch(data)
return True
def serve_all(self):
"""Serves all requests and replies for as long as the connection is
alive."""
try:
while True:
self.serve(0.1)
except (socket.error, select_error, IOError):
if not self.closed:
raise
except EOFError:
pass
finally:
self.close()
def poll_all(self, timeout = 0):
"""Serves all requests and replies that arrive within the given interval.
:returns: ``True`` if at least a single transaction was served, ``False`` otherwise
"""
at_least_once = False
t0 = time.time()
duration = timeout
try:
while True:
if self.poll(duration):
at_least_once = True
if timeout is not None:
duration = t0 + timeout - time.time()
if duration < 0:
break
except EOFError:
pass
return at_least_once
#
# requests
#
def sync_request(self, handler, *args):
"""Sends a synchronous request (waits for the reply to arrive)
:raises: any exception that the requets may be generated
:returns: the result of the request
"""
seq = self._send_request(handler, args)
while seq not in self._sync_replies:
self.serve(0.1)
isexc, obj = self._sync_replies.pop(seq)
if isexc:
raise obj
else:
return obj
def _async_request(self, handler, args = (), callback = (lambda a, b: None)):
seq = self._send_request(handler, args)
self._async_callbacks[seq] = callback
def async_request(self, handler, *args, **kwargs):
"""Send an asynchronous request (does not wait for it to finish)
:returns: an :class:`rpyc.core.async.AsyncResult` object, which will
eventually hold the result (or exception)
"""
timeout = kwargs.pop("timeout", None)
if kwargs:
raise TypeError("got unexpected keyword argument(s) %s" % (list(kwargs.keys()),))
res = AsyncResult(weakref.proxy(self))
self._async_request(handler, args, res)
if timeout is not None:
res.set_expiry(timeout)
return res
@property
def root(self):
"""Fetches the root object (service) of the other party"""
if self._remote_root is None:
self._remote_root = self.sync_request(consts.HANDLE_GETROOT)
return self._remote_root
#
# attribute access
#
def _check_attr(self, obj, name):
if self._config["allow_exposed_attrs"]:
if name.startswith(self._config["exposed_prefix"]):
name2 = name
else:
name2 = self._config["exposed_prefix"] + name
if hasattr(obj, name2):
return name2
if self._config["allow_all_attrs"]:
return name
if self._config["allow_safe_attrs"] and name in self._config["safe_attrs"]:
return name
if self._config["allow_public_attrs"] and not name.startswith("_"):
return name
return False
def _access_attr(self, oid, name, args, overrider, param, default):
if is_py3k:
if type(name) is bytes:
name = str(name, "utf8")
elif type(name) is not str:
raise TypeError("name must be a string")
else:
if type(name) not in (str, unicode):
raise TypeError("name must be a string")
name = str(name) # IronPython issue #10 + py3k issue
obj = self._local_objects[oid]
accessor = getattr(type(obj), overrider, None)
if accessor is None:
name2 = self._check_attr(obj, name)
if not self._config[param] or not name2:
raise AttributeError("cannot access %r" % (name,))
accessor = default
name = name2
return accessor(obj, name, *args)
#
# request handlers
#
def _handle_ping(self, data):
return data
def _handle_close(self):
self._cleanup()
def _handle_getroot(self):
return self._local_root
def _handle_del(self, oid):
self._local_objects.decref(oid)
def _handle_repr(self, oid):
return repr(self._local_objects[oid])
def _handle_str(self, oid):
return str(self._local_objects[oid])
def _handle_cmp(self, oid, other):
# cmp() might enter recursive resonance... yet another workaround
#return cmp(self._local_objects[oid], other)
obj = self._local_objects[oid]
try:
return type(obj).__cmp__(obj, other)
except (AttributeError, TypeError):
return NotImplemented
def _handle_hash(self, oid):
return hash(self._local_objects[oid])
def _handle_call(self, oid, args, kwargs=()):
return self._local_objects[oid](*args, **dict(kwargs))
def _handle_dir(self, oid):
return tuple(dir(self._local_objects[oid]))
def _handle_inspect(self, oid):
return tuple(netref.inspect_methods(self._local_objects[oid]))
def _handle_getattr(self, oid, name):
return self._access_attr(oid, name, (), "_rpyc_getattr", "allow_getattr", getattr)
def _handle_delattr(self, oid, name):
return self._access_attr(oid, name, (), "_rpyc_delattr", "allow_delattr", delattr)
def _handle_setattr(self, oid, name, value):
return self._access_attr(oid, name, (value,), "_rpyc_setattr", "allow_setattr", setattr)
def _handle_callattr(self, oid, name, args, kwargs):
return self._handle_getattr(oid, name)(*args, **dict(kwargs))
def _handle_pickle(self, oid, proto):
if not self._config["allow_pickle"]:
raise ValueError("pickling is disabled")
return pickle.dumps(self._local_objects[oid], proto)
def _handle_buffiter(self, oid, count):
items = []
obj = self._local_objects[oid]
i = 0
try:
while i < count:
items.append(next(obj))
i += 1
except StopIteration:
pass
return tuple(items)
def _handle_oldslicing(self, oid, attempt, fallback, start, stop, args):
try:
# first try __xxxitem__
getitem = self._handle_getattr(oid, attempt)
return getitem(slice(start, stop), *args)
except Exception:
# fallback to __xxxslice__. see issue #41
if stop is None:
stop = maxint
getslice = self._handle_getattr(oid, fallback)
return getslice(start, stop, *args)
# collect handlers
_HANDLERS = {}
for name, obj in dict(locals()).items():
if name.startswith("_handle_"):
name2 = "HANDLE_" + name[8:].upper()
if hasattr(consts, name2):
_HANDLERS[getattr(consts, name2)] = obj
else:
raise NameError("no constant defined for %r", name)
del name, name2, obj
| |
#!/usr/bin/env python
#
# ESP32 partition table generation tool
#
# Converts partition tables to/from CSV and binary formats.
#
# See https://docs.espressif.com/projects/esp-idf/en/latest/api-guides/partition-tables.html
# for explanation of partition table structure and uses.
#
# SPDX-FileCopyrightText: 2016-2021 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: Apache-2.0
from __future__ import division, print_function, unicode_literals
import argparse
import binascii
import errno
import hashlib
import os
import re
import struct
import sys
MAX_PARTITION_LENGTH = 0xC00 # 3K for partition data (96 entries) leaves 1K in a 4K sector for signature
MD5_PARTITION_BEGIN = b'\xEB\xEB' + b'\xFF' * 14 # The first 2 bytes are like magic numbers for MD5 sum
PARTITION_TABLE_SIZE = 0x1000 # Size of partition table
MIN_PARTITION_SUBTYPE_APP_OTA = 0x10
NUM_PARTITION_SUBTYPE_APP_OTA = 16
__version__ = '1.2'
APP_TYPE = 0x00
DATA_TYPE = 0x01
TYPES = {
'app': APP_TYPE,
'data': DATA_TYPE,
}
def get_ptype_as_int(ptype):
""" Convert a string which might be numeric or the name of a partition type to an integer """
try:
return TYPES[ptype]
except KeyError:
try:
return int(ptype, 0)
except TypeError:
return ptype
# Keep this map in sync with esp_partition_subtype_t enum in esp_partition.h
SUBTYPES = {
APP_TYPE: {
'factory': 0x00,
'test': 0x20,
},
DATA_TYPE: {
'ota': 0x00,
'phy': 0x01,
'nvs': 0x02,
'coredump': 0x03,
'nvs_keys': 0x04,
'efuse': 0x05,
'undefined': 0x06,
'esphttpd': 0x80,
'fat': 0x81,
'spiffs': 0x82,
},
}
def get_subtype_as_int(ptype, subtype):
""" Convert a string which might be numeric or the name of a partition subtype to an integer """
try:
return SUBTYPES[get_ptype_as_int(ptype)][subtype]
except KeyError:
try:
return int(subtype, 0)
except TypeError:
return subtype
ALIGNMENT = {
APP_TYPE: 0x10000,
DATA_TYPE: 0x4,
}
STRICT_DATA_ALIGNMENT = 0x1000
def get_alignment_for_type(ptype):
return ALIGNMENT.get(ptype, ALIGNMENT[DATA_TYPE])
quiet = False
md5sum = True
secure = False
offset_part_table = 0
def status(msg):
""" Print status message to stderr """
if not quiet:
critical(msg)
def critical(msg):
""" Print critical message to stderr """
sys.stderr.write(msg)
sys.stderr.write('\n')
class PartitionTable(list):
def __init__(self):
super(PartitionTable, self).__init__(self)
@classmethod
def from_file(cls, f):
data = f.read()
data_is_binary = data[0:2] == PartitionDefinition.MAGIC_BYTES
if data_is_binary:
status('Parsing binary partition input...')
return cls.from_binary(data), True
data = data.decode()
status('Parsing CSV input...')
return cls.from_csv(data), False
@classmethod
def from_csv(cls, csv_contents):
res = PartitionTable()
lines = csv_contents.splitlines()
def expand_vars(f):
f = os.path.expandvars(f)
m = re.match(r'(?<!\\)\$([A-Za-z_][A-Za-z0-9_]*)', f)
if m:
raise InputError("unknown variable '%s'" % m.group(1))
return f
for line_no in range(len(lines)):
line = expand_vars(lines[line_no]).strip()
if line.startswith('#') or len(line) == 0:
continue
try:
res.append(PartitionDefinition.from_csv(line, line_no + 1))
except InputError as err:
raise InputError('Error at line %d: %s' % (line_no + 1, err))
except Exception:
critical('Unexpected error parsing CSV line %d: %s' % (line_no + 1, line))
raise
# fix up missing offsets & negative sizes
last_end = offset_part_table + PARTITION_TABLE_SIZE # first offset after partition table
for e in res:
if e.offset is not None and e.offset < last_end:
if e == res[0]:
raise InputError('CSV Error: First partition offset 0x%x overlaps end of partition table 0x%x'
% (e.offset, last_end))
else:
raise InputError('CSV Error: Partitions overlap. Partition at line %d sets offset 0x%x. Previous partition ends 0x%x'
% (e.line_no, e.offset, last_end))
if e.offset is None:
pad_to = get_alignment_for_type(e.type)
if last_end % pad_to != 0:
last_end += pad_to - (last_end % pad_to)
e.offset = last_end
if e.size < 0:
e.size = -e.size - e.offset
last_end = e.offset + e.size
return res
def __getitem__(self, item):
""" Allow partition table access via name as well as by
numeric index. """
if isinstance(item, str):
for x in self:
if x.name == item:
return x
raise ValueError("No partition entry named '%s'" % item)
else:
return super(PartitionTable, self).__getitem__(item)
def find_by_type(self, ptype, subtype):
""" Return a partition by type & subtype, returns
None if not found """
# convert ptype & subtypes names (if supplied this way) to integer values
ptype = get_ptype_as_int(ptype)
subtype = get_subtype_as_int(ptype, subtype)
for p in self:
if p.type == ptype and p.subtype == subtype:
yield p
return
def find_by_name(self, name):
for p in self:
if p.name == name:
return p
return None
def verify(self):
# verify each partition individually
for p in self:
p.verify()
# check on duplicate name
names = [p.name for p in self]
duplicates = set(n for n in names if names.count(n) > 1)
# print sorted duplicate partitions by name
if len(duplicates) != 0:
critical('A list of partitions that have the same name:')
for p in sorted(self, key=lambda x:x.name):
if len(duplicates.intersection([p.name])) != 0:
critical('%s' % (p.to_csv()))
raise InputError('Partition names must be unique')
# check for overlaps
last = None
for p in sorted(self, key=lambda x:x.offset):
if p.offset < offset_part_table + PARTITION_TABLE_SIZE:
raise InputError('Partition offset 0x%x is below 0x%x' % (p.offset, offset_part_table + PARTITION_TABLE_SIZE))
if last is not None and p.offset < last.offset + last.size:
raise InputError('Partition at 0x%x overlaps 0x%x-0x%x' % (p.offset, last.offset, last.offset + last.size - 1))
last = p
# check that otadata should be unique
otadata_duplicates = [p for p in self if p.type == TYPES['data'] and p.subtype == SUBTYPES[DATA_TYPE]['ota']]
if len(otadata_duplicates) > 1:
for p in otadata_duplicates:
critical('%s' % (p.to_csv()))
raise InputError('Found multiple otadata partitions. Only one partition can be defined with type="data"(1) and subtype="ota"(0).')
if len(otadata_duplicates) == 1 and otadata_duplicates[0].size != 0x2000:
p = otadata_duplicates[0]
critical('%s' % (p.to_csv()))
raise InputError('otadata partition must have size = 0x2000')
def flash_size(self):
""" Return the size that partitions will occupy in flash
(ie the offset the last partition ends at)
"""
try:
last = sorted(self, reverse=True)[0]
except IndexError:
return 0 # empty table!
return last.offset + last.size
def verify_size_fits(self, flash_size_bytes: int) -> None:
""" Check that partition table fits into the given flash size.
Raises InputError otherwise.
"""
table_size = self.flash_size()
if flash_size_bytes < table_size:
mb = 1024 * 1024
raise InputError('Partitions tables occupies %.1fMB of flash (%d bytes) which does not fit in configured '
"flash size %dMB. Change the flash size in menuconfig under the 'Serial Flasher Config' menu." %
(table_size / mb, table_size, flash_size_bytes / mb))
@classmethod
def from_binary(cls, b):
md5 = hashlib.md5()
result = cls()
for o in range(0,len(b),32):
data = b[o:o + 32]
if len(data) != 32:
raise InputError('Partition table length must be a multiple of 32 bytes')
if data == b'\xFF' * 32:
return result # got end marker
if md5sum and data[:2] == MD5_PARTITION_BEGIN[:2]: # check only the magic number part
if data[16:] == md5.digest():
continue # the next iteration will check for the end marker
else:
raise InputError("MD5 checksums don't match! (computed: 0x%s, parsed: 0x%s)" % (md5.hexdigest(), binascii.hexlify(data[16:])))
else:
md5.update(data)
result.append(PartitionDefinition.from_binary(data))
raise InputError('Partition table is missing an end-of-table marker')
def to_binary(self):
result = b''.join(e.to_binary() for e in self)
if md5sum:
result += MD5_PARTITION_BEGIN + hashlib.md5(result).digest()
if len(result) >= MAX_PARTITION_LENGTH:
raise InputError('Binary partition table length (%d) longer than max' % len(result))
result += b'\xFF' * (MAX_PARTITION_LENGTH - len(result)) # pad the sector, for signing
return result
def to_csv(self, simple_formatting=False):
rows = ['# ESP-IDF Partition Table',
'# Name, Type, SubType, Offset, Size, Flags']
rows += [x.to_csv(simple_formatting) for x in self]
return '\n'.join(rows) + '\n'
class PartitionDefinition(object):
MAGIC_BYTES = b'\xAA\x50'
# dictionary maps flag name (as used in CSV flags list, property name)
# to bit set in flags words in binary format
FLAGS = {
'encrypted': 0
}
# add subtypes for the 16 OTA slot values ("ota_XX, etc.")
for ota_slot in range(NUM_PARTITION_SUBTYPE_APP_OTA):
SUBTYPES[TYPES['app']]['ota_%d' % ota_slot] = MIN_PARTITION_SUBTYPE_APP_OTA + ota_slot
def __init__(self):
self.name = ''
self.type = None
self.subtype = None
self.offset = None
self.size = None
self.encrypted = False
@classmethod
def from_csv(cls, line, line_no):
""" Parse a line from the CSV """
line_w_defaults = line + ',,,,' # lazy way to support default fields
fields = [f.strip() for f in line_w_defaults.split(',')]
res = PartitionDefinition()
res.line_no = line_no
res.name = fields[0]
res.type = res.parse_type(fields[1])
res.subtype = res.parse_subtype(fields[2])
res.offset = res.parse_address(fields[3])
res.size = res.parse_address(fields[4])
if res.size is None:
raise InputError("Size field can't be empty")
flags = fields[5].split(':')
for flag in flags:
if flag in cls.FLAGS:
setattr(res, flag, True)
elif len(flag) > 0:
raise InputError("CSV flag column contains unknown flag '%s'" % (flag))
return res
def __eq__(self, other):
return self.name == other.name and self.type == other.type \
and self.subtype == other.subtype and self.offset == other.offset \
and self.size == other.size
def __repr__(self):
def maybe_hex(x):
return '0x%x' % x if x is not None else 'None'
return "PartitionDefinition('%s', 0x%x, 0x%x, %s, %s)" % (self.name, self.type, self.subtype or 0,
maybe_hex(self.offset), maybe_hex(self.size))
def __str__(self):
return "Part '%s' %d/%d @ 0x%x size 0x%x" % (self.name, self.type, self.subtype, self.offset or -1, self.size or -1)
def __cmp__(self, other):
return self.offset - other.offset
def __lt__(self, other):
return self.offset < other.offset
def __gt__(self, other):
return self.offset > other.offset
def __le__(self, other):
return self.offset <= other.offset
def __ge__(self, other):
return self.offset >= other.offset
def parse_type(self, strval):
if strval == '':
raise InputError("Field 'type' can't be left empty.")
return parse_int(strval, TYPES)
def parse_subtype(self, strval):
if strval == '':
if self.type == TYPES['app']:
raise InputError('App partition cannot have an empty subtype')
return SUBTYPES[DATA_TYPE]['undefined']
return parse_int(strval, SUBTYPES.get(self.type, {}))
def parse_address(self, strval):
if strval == '':
return None # PartitionTable will fill in default
return parse_int(strval)
def verify(self):
if self.type is None:
raise ValidationError(self, 'Type field is not set')
if self.subtype is None:
raise ValidationError(self, 'Subtype field is not set')
if self.offset is None:
raise ValidationError(self, 'Offset field is not set')
align = get_alignment_for_type(self.type)
if self.offset % align:
raise ValidationError(self, 'Offset 0x%x is not aligned to 0x%x' % (self.offset, align))
# The alignment requirement for non-app partition is 4 bytes, but it should be 4 kB.
# Print a warning for now, make it an error in IDF 5.0 (IDF-3742).
if self.type != APP_TYPE and self.offset % STRICT_DATA_ALIGNMENT:
critical('WARNING: Partition %s not aligned to 0x%x.'
'This is deprecated and will be considered an error in the future release.' % (self.name, STRICT_DATA_ALIGNMENT))
if self.size % align and secure and self.type == APP_TYPE:
raise ValidationError(self, 'Size 0x%x is not aligned to 0x%x' % (self.size, align))
if self.size is None:
raise ValidationError(self, 'Size field is not set')
if self.name in TYPES and TYPES.get(self.name, '') != self.type:
critical("WARNING: Partition has name '%s' which is a partition type, but does not match this partition's "
'type (0x%x). Mistake in partition table?' % (self.name, self.type))
all_subtype_names = []
for names in (t.keys() for t in SUBTYPES.values()):
all_subtype_names += names
if self.name in all_subtype_names and SUBTYPES.get(self.type, {}).get(self.name, '') != self.subtype:
critical("WARNING: Partition has name '%s' which is a partition subtype, but this partition has "
'non-matching type 0x%x and subtype 0x%x. Mistake in partition table?' % (self.name, self.type, self.subtype))
STRUCT_FORMAT = b'<2sBBLL16sL'
@classmethod
def from_binary(cls, b):
if len(b) != 32:
raise InputError('Partition definition length must be exactly 32 bytes. Got %d bytes.' % len(b))
res = cls()
(magic, res.type, res.subtype, res.offset,
res.size, res.name, flags) = struct.unpack(cls.STRUCT_FORMAT, b)
if b'\x00' in res.name: # strip null byte padding from name string
res.name = res.name[:res.name.index(b'\x00')]
res.name = res.name.decode()
if magic != cls.MAGIC_BYTES:
raise InputError('Invalid magic bytes (%r) for partition definition' % magic)
for flag,bit in cls.FLAGS.items():
if flags & (1 << bit):
setattr(res, flag, True)
flags &= ~(1 << bit)
if flags != 0:
critical('WARNING: Partition definition had unknown flag(s) 0x%08x. Newer binary format?' % flags)
return res
def get_flags_list(self):
return [flag for flag in self.FLAGS.keys() if getattr(self, flag)]
def to_binary(self):
flags = sum((1 << self.FLAGS[flag]) for flag in self.get_flags_list())
return struct.pack(self.STRUCT_FORMAT,
self.MAGIC_BYTES,
self.type, self.subtype,
self.offset, self.size,
self.name.encode(),
flags)
def to_csv(self, simple_formatting=False):
def addr_format(a, include_sizes):
if not simple_formatting and include_sizes:
for (val, suffix) in [(0x100000, 'M'), (0x400, 'K')]:
if a % val == 0:
return '%d%s' % (a // val, suffix)
return '0x%x' % a
def lookup_keyword(t, keywords):
for k,v in keywords.items():
if simple_formatting is False and t == v:
return k
return '%d' % t
def generate_text_flags():
""" colon-delimited list of flags """
return ':'.join(self.get_flags_list())
return ','.join([self.name,
lookup_keyword(self.type, TYPES),
lookup_keyword(self.subtype, SUBTYPES.get(self.type, {})),
addr_format(self.offset, False),
addr_format(self.size, True),
generate_text_flags()])
def parse_int(v, keywords={}):
"""Generic parser for integer fields - int(x,0) with provision for
k/m/K/M suffixes and 'keyword' value lookup.
"""
try:
for letter, multiplier in [('k', 1024), ('m', 1024 * 1024)]:
if v.lower().endswith(letter):
return parse_int(v[:-1], keywords) * multiplier
return int(v, 0)
except ValueError:
if len(keywords) == 0:
raise InputError('Invalid field value %s' % v)
try:
return keywords[v.lower()]
except KeyError:
raise InputError("Value '%s' is not valid. Known keywords: %s" % (v, ', '.join(keywords)))
def main():
global quiet
global md5sum
global offset_part_table
global secure
parser = argparse.ArgumentParser(description='ESP32 partition table utility')
parser.add_argument('--flash-size', help='Optional flash size limit, checks partition table fits in flash',
nargs='?', choices=['1MB', '2MB', '4MB', '8MB', '16MB', '32MB', '64MB', '128MB'])
parser.add_argument('--disable-md5sum', help='Disable md5 checksum for the partition table', default=False, action='store_true')
parser.add_argument('--no-verify', help="Don't verify partition table fields", action='store_true')
parser.add_argument('--verify', '-v', help='Verify partition table fields (deprecated, this behaviour is '
'enabled by default and this flag does nothing.', action='store_true')
parser.add_argument('--quiet', '-q', help="Don't print non-critical status messages to stderr", action='store_true')
parser.add_argument('--offset', '-o', help='Set offset partition table', default='0x8000')
parser.add_argument('--secure', help='Require app partitions to be suitable for secure boot', action='store_true')
parser.add_argument('input', help='Path to CSV or binary file to parse.', type=argparse.FileType('rb'))
parser.add_argument('output', help='Path to output converted binary or CSV file. Will use stdout if omitted.',
nargs='?', default='-')
args = parser.parse_args()
quiet = args.quiet
md5sum = not args.disable_md5sum
secure = args.secure
offset_part_table = int(args.offset, 0)
table, input_is_binary = PartitionTable.from_file(args.input)
if not args.no_verify:
status('Verifying table...')
table.verify()
if args.flash_size:
size_mb = int(args.flash_size.replace('MB', ''))
table.verify_size_fits(size_mb * 1024 * 1024)
# Make sure that the output directory is created
output_dir = os.path.abspath(os.path.dirname(args.output))
if not os.path.exists(output_dir):
try:
os.makedirs(output_dir)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
if input_is_binary:
output = table.to_csv()
with sys.stdout if args.output == '-' else open(args.output, 'w') as f:
f.write(output)
else:
output = table.to_binary()
try:
stdout_binary = sys.stdout.buffer # Python 3
except AttributeError:
stdout_binary = sys.stdout
with stdout_binary if args.output == '-' else open(args.output, 'wb') as f:
f.write(output)
class InputError(RuntimeError):
def __init__(self, e):
super(InputError, self).__init__(e)
class ValidationError(InputError):
def __init__(self, partition, message):
super(ValidationError, self).__init__(
'Partition %s invalid: %s' % (partition.name, message))
if __name__ == '__main__':
try:
main()
except InputError as e:
print(e, file=sys.stderr)
sys.exit(2)
| |
# Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from ironic.common import exception
from ironic.common import raid
from ironic.drivers import base as drivers_base
from ironic.tests import base
from ironic.tests.unit.db import base as db_base
from ironic.tests.unit.objects import utils as obj_utils
from ironic.tests.unit import raid_constants
class ValidateRaidConfigurationTestCase(base.TestCase):
def setUp(self):
with open(drivers_base.RAID_CONFIG_SCHEMA, 'r') as raid_schema_fobj:
self.schema = json.load(raid_schema_fobj)
super(ValidateRaidConfigurationTestCase, self).setUp()
def test_validate_configuration_okay(self):
raid_config = json.loads(raid_constants.RAID_CONFIG_OKAY)
raid.validate_configuration(
raid_config, raid_config_schema=self.schema)
def test_validate_configuration_no_logical_disk(self):
self.assertRaises(exception.InvalidParameterValue,
raid.validate_configuration,
{},
raid_config_schema=self.schema)
def test_validate_configuration_zero_logical_disks(self):
raid_config = json.loads(raid_constants.RAID_CONFIG_NO_LOGICAL_DISKS)
self.assertRaises(exception.InvalidParameterValue,
raid.validate_configuration,
raid_config,
raid_config_schema=self.schema)
def test_validate_configuration_no_raid_level(self):
raid_config = json.loads(raid_constants.RAID_CONFIG_NO_RAID_LEVEL)
self.assertRaises(exception.InvalidParameterValue,
raid.validate_configuration,
raid_config,
raid_config_schema=self.schema)
def test_validate_configuration_invalid_raid_level(self):
raid_config = json.loads(raid_constants.RAID_CONFIG_INVALID_RAID_LEVEL)
self.assertRaises(exception.InvalidParameterValue,
raid.validate_configuration,
raid_config,
raid_config_schema=self.schema)
def test_validate_configuration_no_size_gb(self):
raid_config = json.loads(raid_constants.RAID_CONFIG_NO_SIZE_GB)
self.assertRaises(exception.InvalidParameterValue,
raid.validate_configuration,
raid_config,
raid_config_schema=self.schema)
def test_validate_configuration_max_size_gb(self):
raid_config = json.loads(raid_constants.RAID_CONFIG_MAX_SIZE_GB)
raid.validate_configuration(raid_config,
raid_config_schema=self.schema)
def test_validate_configuration_invalid_size_gb(self):
raid_config = json.loads(raid_constants.RAID_CONFIG_INVALID_SIZE_GB)
self.assertRaises(exception.InvalidParameterValue,
raid.validate_configuration,
raid_config,
raid_config_schema=self.schema)
def test_validate_configuration_invalid_is_root_volume(self):
raid_config_str = raid_constants.RAID_CONFIG_INVALID_IS_ROOT_VOL
raid_config = json.loads(raid_config_str)
self.assertRaises(exception.InvalidParameterValue,
raid.validate_configuration,
raid_config,
raid_config_schema=self.schema)
def test_validate_configuration_invalid_multiple_is_root_volume(self):
raid_config_str = raid_constants.RAID_CONFIG_MULTIPLE_IS_ROOT_VOL
raid_config = json.loads(raid_config_str)
self.assertRaises(exception.InvalidParameterValue,
raid.validate_configuration,
raid_config,
raid_config_schema=self.schema)
def test_validate_configuration_invalid_share_physical_disks(self):
raid_config_str = raid_constants.RAID_CONFIG_INVALID_SHARE_PHY_DISKS
raid_config = json.loads(raid_config_str)
self.assertRaises(exception.InvalidParameterValue,
raid.validate_configuration,
raid_config,
raid_config_schema=self.schema)
def test_validate_configuration_invalid_disk_type(self):
raid_config = json.loads(raid_constants.RAID_CONFIG_INVALID_DISK_TYPE)
self.assertRaises(exception.InvalidParameterValue,
raid.validate_configuration,
raid_config,
raid_config_schema=self.schema)
def test_validate_configuration_invalid_int_type(self):
raid_config = json.loads(raid_constants.RAID_CONFIG_INVALID_INT_TYPE)
self.assertRaises(exception.InvalidParameterValue,
raid.validate_configuration,
raid_config,
raid_config_schema=self.schema)
def test_validate_configuration_invalid_number_of_phy_disks(self):
raid_config_str = raid_constants.RAID_CONFIG_INVALID_NUM_PHY_DISKS
raid_config = json.loads(raid_config_str)
self.assertRaises(exception.InvalidParameterValue,
raid.validate_configuration,
raid_config,
raid_config_schema=self.schema)
def test_validate_configuration_invalid_physical_disks(self):
raid_config = json.loads(raid_constants.RAID_CONFIG_INVALID_PHY_DISKS)
self.assertRaises(exception.InvalidParameterValue,
raid.validate_configuration,
raid_config,
raid_config_schema=self.schema)
def test_validate_configuration_additional_property(self):
raid_config = json.loads(raid_constants.RAID_CONFIG_ADDITIONAL_PROP)
self.assertRaises(exception.InvalidParameterValue,
raid.validate_configuration,
raid_config,
raid_config_schema=self.schema)
def test_validate_configuration_custom_schema(self):
raid_config = json.loads(raid_constants.CUSTOM_SCHEMA_RAID_CONFIG)
schema = json.loads(raid_constants.CUSTOM_RAID_SCHEMA)
raid.validate_configuration(raid_config,
raid_config_schema=schema)
class RaidPublicMethodsTestCase(db_base.DbTestCase):
def test_get_logical_disk_properties(self):
with open(drivers_base.RAID_CONFIG_SCHEMA, 'r') as raid_schema_fobj:
schema = json.load(raid_schema_fobj)
logical_disk_properties = raid.get_logical_disk_properties(schema)
self.assertIn('raid_level', logical_disk_properties)
self.assertIn('size_gb', logical_disk_properties)
self.assertIn('volume_name', logical_disk_properties)
self.assertIn('is_root_volume', logical_disk_properties)
self.assertIn('share_physical_disks', logical_disk_properties)
self.assertIn('disk_type', logical_disk_properties)
self.assertIn('interface_type', logical_disk_properties)
self.assertIn('number_of_physical_disks', logical_disk_properties)
self.assertIn('controller', logical_disk_properties)
self.assertIn('physical_disks', logical_disk_properties)
def test_get_logical_disk_properties_custom_schema(self):
raid_schema = json.loads(raid_constants.CUSTOM_RAID_SCHEMA)
logical_disk_properties = raid.get_logical_disk_properties(
raid_config_schema=raid_schema)
self.assertIn('raid_level', logical_disk_properties)
self.assertIn('size_gb', logical_disk_properties)
self.assertIn('foo', logical_disk_properties)
def _test_update_raid_info(self, current_config,
capabilities=None):
node = obj_utils.create_test_node(self.context,
driver='fake')
if capabilities:
properties = node.properties
properties['capabilities'] = capabilities
del properties['local_gb']
node.properties = properties
target_raid_config = json.loads(raid_constants.RAID_CONFIG_OKAY)
node.target_raid_config = target_raid_config
node.save()
raid.update_raid_info(node, current_config)
properties = node.properties
current = node.raid_config
target = node.target_raid_config
self.assertIsNotNone(current['last_updated'])
self.assertIsInstance(current['logical_disks'][0], dict)
if current_config['logical_disks'][0].get('is_root_volume'):
self.assertEqual({'wwn': '600508B100'},
properties['root_device'])
self.assertEqual(100, properties['local_gb'])
self.assertIn('raid_level:1', properties['capabilities'])
if capabilities:
self.assertIn(capabilities, properties['capabilities'])
else:
self.assertNotIn('local_gb', properties)
self.assertNotIn('root_device', properties)
if capabilities:
self.assertNotIn('raid_level:1', properties['capabilities'])
# Verify node.target_raid_config is preserved.
self.assertEqual(target_raid_config, target)
def test_update_raid_info_okay(self):
current_config = json.loads(raid_constants.CURRENT_RAID_CONFIG)
self._test_update_raid_info(current_config,
capabilities='boot_mode:bios')
def test_update_raid_info_okay_no_root_volumes(self):
current_config = json.loads(raid_constants.CURRENT_RAID_CONFIG)
del current_config['logical_disks'][0]['is_root_volume']
del current_config['logical_disks'][0]['root_device_hint']
self._test_update_raid_info(current_config,
capabilities='boot_mode:bios')
def test_update_raid_info_okay_current_capabilities_empty(self):
current_config = json.loads(raid_constants.CURRENT_RAID_CONFIG)
self._test_update_raid_info(current_config,
capabilities=None)
def test_update_raid_info_multiple_root_volumes(self):
current_config = json.loads(raid_constants.RAID_CONFIG_MULTIPLE_ROOT)
self.assertRaises(exception.InvalidParameterValue,
self._test_update_raid_info,
current_config)
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
from bts.bts_price_after_match import BTSPriceAfterMatch
from bts.api import BTS
from bts.market import BTSMarket
import time
import json
import logging
import logging.handlers
from math import fabs
import os
import smtplib
class DelegateTask(object):
def __init__(self):
self.load_config()
self.init_bts_price()
self.setup_log()
self.init_task_feed_price()
def load_config(self):
config_file = os.getenv("HOME")+"/.python-bts/delegate_task.json"
fd_config = open(config_file)
config = json.load(fd_config)
self.notify = config["notify"]
self.config = config["delegate_task"]
self.config_price_feed = self.config["price_feed"]
self.config_withdraw_pay = self.config["withdraw_pay"]
fd_config.close()
config_file = os.getenv("HOME")+"/.python-bts/bts_client.json"
fd_config = open(config_file)
self.config_bts = json.load(fd_config)[self.config["bts_client"]]
fd_config.close()
def init_bts_price(self):
config_bts = self.config_bts
self.client = BTS(config_bts["user"], config_bts["password"],
config_bts["host"], config_bts["port"])
self.bts_market = BTSMarket(self.client)
self.bts_price = BTSPriceAfterMatch(self.bts_market)
# don't use cover order, because it's feed price related,
# if there is a big order, feed price will down without stop
self.bts_price.set_need_cover(False)
def setup_log(self):
# Setting up Logger
self.logger = logging.getLogger('bts')
self.logger.setLevel(logging.INFO)
formatter = logging.Formatter(
'%(asctime)s[%(levelname)s]: %(message)s')
fh = logging.handlers.RotatingFileHandler("/tmp/bts_delegate_task.log")
fh.setFormatter(formatter)
self.logger.addHandler(fh)
def init_task_feed_price(self):
peg_asset_list = ["KRW", "BTC", "SILVER", "GOLD", "TRY",
"SGD", "HKD", "RUB", "SEK", "NZD", "CNY",
"MXN", "CAD", "CHF", "AUD", "GBP", "JPY",
"EUR", "USD", "SHENZHEN", "NASDAQC", "NIKKEI",
"HANGSENG", "SHANGHAI"]
self.price_queue = {}
for asset in peg_asset_list:
self.price_queue[asset] = []
self.time_publish_feed = 0
self.last_publish_price = {}
# Actually I think it may be beneficial to discount all feeds by 0.995
# to give the market makers some breathing room and provide a buffer
# against down trends.
self.discount = 0.995
def fetch_bts_price(self):
# updat rate cny
self.bts_price.get_rate_from_yahoo()
# get all order book
self.bts_price.get_order_book_all()
# can add weight here
for order_type in self.bts_price.order_types:
for market in self.bts_price.order_book:
if market not in self.config_price_feed["market_weight"]:
_weight = 0.0
else:
_weight = self.config_price_feed["market_weight"][market]
for _order in self.bts_price.order_book[market][order_type]:
_order[1] *= _weight
# calculate real price
volume, volume_sum, real_price = self.bts_price.get_real_price(
spread=self.config_price_feed["price_limit"]["spread"])
valid_depth = self.bts_price.get_valid_depth(
price=real_price,
spread=self.config_price_feed["price_limit"]["spread"])
price_cny = real_price / self.bts_price.rate_btc["CNY"]
self.logger.info("fetch price is %.5f CNY/BTS, volume is %.3f",
price_cny, volume)
self.logger.info("efficent depth : %s" % valid_depth)
return real_price
# these MPA's precision is 100, it's too small,
# have to change the price
# but we should fixed these at BTS2.0
def patch_nasdaqc(self, median_price):
if "SHENZHEN" in median_price:
median_price["SHENZHEN"] /= median_price["CNY"]
if "SHANGHAI" in median_price:
median_price["SHANGHAI"] /= median_price["CNY"]
if "NASDAQC" in median_price:
median_price["NASDAQC"] /= median_price["USD"]
if "NIKKEI" in median_price:
median_price["NIKKEI"] /= median_price["JPY"]
if "HANGSENG" in median_price:
median_price["HANGSENG"] /= median_price["HKD"]
def get_median_price(self, bts_price_in_btc):
median_price = {}
for asset in self.price_queue:
if asset not in self.bts_price.rate_btc or \
self.bts_price.rate_btc[asset] is None:
continue
self.price_queue[asset].append(bts_price_in_btc
/ self.bts_price.rate_btc[asset])
if len(self.price_queue[asset]) > \
self.config_price_feed["price_limit"]["median_length"]:
self.price_queue[asset].pop(0)
median_price[asset] = sorted(
self.price_queue[asset])[int(len(self.price_queue[asset]) / 2)]
self.patch_nasdaqc(median_price)
return median_price
def get_feed_price(self):
current_feed_price = {}
for asset in self.price_queue:
current_feed_price[asset] = self.client.get_feed_price(asset)
return current_feed_price
def publish_rule_check2(self, median_price):
if time.time() - self.time_publish_feed > \
self.config_price_feed["max_update_hours"] * 60 * 60:
#self.logger.info("publish 1")
return True
change_min = self.config_price_feed["price_limit"]["change_min"]
for asset in median_price:
if asset not in self.last_publish_price:
continue
price_change = 100.0 * (
median_price[asset] - self.last_publish_price[asset]) \
/ self.last_publish_price[asset]
if fabs(price_change) > change_min:
return True
return False
def publish_rule_check(self, median_price, current_feed_price):
# When attempting to write a market maker the slow movement of the feed
# can be difficult.
# I would recommend the following:
# if REAL_PRICE < MEDIAN and YOUR_PRICE > MEDIAN publish price
# if you haven't published a price in the past 20 minutes
# if REAL_PRICE > MEDIAN and YOUR_PRICE < MEDIAN and
# abs( YOUR_PRICE - REAL_PRICE ) / REAL_PRICE > 0.005 publish price
# The goal is to force the price down rapidly and allow it to creep up
# slowly.
# By publishing prices more often it helps market makers maintain the
# peg and minimizes opportunity for shorts to sell USD below the peg
# that the market makers then have to absorb.
# If we can get updates flowing smoothly then we can gradually reduce
# the spread in the market maker bots.
# note: all prices in USD per BTS
if time.time() - self.time_publish_feed > \
self.config_price_feed["max_update_hours"] * 60 * 60:
return True
for asset in median_price:
price_change = 100.0 * (
median_price[asset] - self.last_publish_price[asset]) \
/ self.last_publish_price[asset]
# ignore if change less than 0.2%
change_ignore = \
self.config_price_feed["price_limit"]["change_ignore"]
if fabs(price_change) < change_ignore:
continue
if current_feed_price[asset] and \
median_price[asset] < current_feed_price[asset] / \
self.discount and self.last_publish_price[asset] > \
current_feed_price[asset] / self.discount:
# self.logger.info(
# "need update: %s %s %s" % (
# median_price[asset], self.last_publish_price[asset],
# current_feed_price[asset] / self.discount))
return True
# if you haven't published a price in the past 20 minutes,
# and the price change more than 0.5%
change_min = self.config_price_feed["price_limit"]["change_min"]
if fabs(price_change) > change_min and \
time.time() - self.time_publish_feed > 20 * 60:
return True
return False
def check_median_price(self, median_price, current_feed_price):
for asset in median_price.keys():
if current_feed_price[asset] is None:
continue
price_change = 100.0 * (
median_price[asset] - current_feed_price[asset]) \
/ current_feed_price[asset]
# don't publish price which change more than "change_max"
if fabs(price_change) > \
self.config_price_feed["price_limit"]["change_max"]:
median_price.pop(asset)
def publish_feed_price(self, median_price):
self.time_publish_feed = time.time()
self.last_publish_price = median_price
publish_feeds = []
for asset in median_price:
publish_feeds.append(
[asset, median_price[asset] * self.discount])
self.logger.info("publish price %s", publish_feeds)
active_delegates = self.client.list_active_delegates()
for delegate in self.config["delegate_list"]:
if "allow_stand_by" in self.config["price_feed"] and \
self.config["price_feed"]["allow_stand_by"] == 1:
self.client.publish_feeds(delegate, publish_feeds)
elif any(d['name'] == delegate for d in active_delegates):
self.client.publish_feeds(delegate, publish_feeds)
def display_price(self, median_price, current_feed_price):
os.system("clear")
print("================ %s ===================" %
time.strftime("%Y%m%dT%H%M%S", time.localtime(time.time())))
print(" ASSET RATE(/BTC) CURRENT_FEED LAST_PUBLISH")
print("-----------------------------------------------------")
for asset in sorted(median_price):
if asset not in self.last_publish_price:
continue
_rate_btc = "%.3f" % 1/self.bts_price.rate_btc[asset]
if current_feed_price[asset] is None:
_current_feed_price = None
else:
_current_feed_price = "%.4g" % current_feed_price[asset]
_last_publish_price = "%.4g" % self.last_publish_price[asset]
print(
'{: >8}'.format("%s" % asset), '{: >10}'.
format('%s' % _rate_btc), '{: >15}'.
format("%s" % _current_feed_price), '{: >15}'.
format('%s' % _last_publish_price))
print("====================================================")
def task_feed_price(self):
bts_price_in_btc = self.fetch_bts_price()
median_price = self.get_median_price(bts_price_in_btc)
current_feed_price = self.get_feed_price()
# if self.publish_rule_check(median_price, current_feed_price):
if self.publish_rule_check2(median_price):
self.check_median_price(median_price, current_feed_price)
self.publish_feed_price(median_price)
self.display_price(median_price, current_feed_price)
def pay_notify(self, delegate_account, pay_account,
pay_balance, percent):
if self.notify["enable"] == 0:
return
mail_list = self.config_withdraw_pay["mail_list"]
if pay_account not in mail_list:
return
sender = self.notify["sender"]
msg_from = "From: %s <%s>\n" % (self.notify["name"], sender)
msg_to = ""
for receiver in mail_list[pay_account]:
msg_to = msg_to+"To: <%s>\n" % receiver
msg_subject = "Subject: pay day from %s\n" % delegate_account
msg_content = "you have got payment %d*%.3f BTS\n" % (
pay_balance, percent)
message = msg_from+msg_to+msg_subject+msg_content
smtpObj = smtplib.SMTP(self.notify["smtp_server"])
smtpObj.sendmail(sender, mail_list[pay_account], message)
def withdraw_pay(self):
for pay_list in self.config_withdraw_pay["pay_list"]:
for delegate_account in pay_list["delegate_account"]:
pay_balance = pay_list["pay_balance"]
account_info = self.client.get_account_info(delegate_account)
balance = float(account_info['delegate_info']['pay_balance'])/(
self.client.get_asset_precision("BTS"))
if balance > pay_balance + 10:
for pay_account, percent in pay_list["pay_account"]:
self.logger.info("withdraw pay %s %s %s"
% (delegate_account, pay_account,
pay_balance*percent))
self.client.delegate_withdraw_pay(
delegate_account, pay_account, pay_balance*percent)
self.pay_notify(delegate_account, pay_account,
pay_balance, percent)
def task_withdraw_pay(self):
self.withdraw_pay()
def excute(self):
run_timer = 0
while True:
try:
if self.config_price_feed["run_timer"]and \
run_timer % self.config_price_feed["run_timer"] == 0:
self.task_feed_price()
if self.config_withdraw_pay["run_timer"]and \
run_timer % self.config_withdraw_pay["run_timer"] == 0:
self.task_withdraw_pay()
except Exception as e:
self.logger.exception(e)
run_timer += 1
time.sleep(int(self.config["base_timer"]))
if __name__ == '__main__':
delegate_task = DelegateTask()
delegate_task.excute()
| |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This is a library for data related functions.
This libary contains two functions, make_train_iterator for generating a
training data iterator from multiple sources of different formats and
make_eval_function for creating an evaluation function that evaluates on
data from multiple sources of different formats.
"""
# pylint:skip-file
from functools import partial
import tensorflow as tf
from smurf import smurf_augmentation
from smurf.data import generic_flow_dataset as flow_dataset
from smurf.data import kitti
from smurf.data import sintel
from smurf.data import smurf_multiframe_dataset
from smurf.data import spoof_dataset
def make_train_dataset(
train_on,
height,
width,
shuffle_buffer_size,
batch_size,
seq_len,
crop_instead_of_resize=False,
apply_augmentation=True,
include_ground_truth=False,
resize_gt_flow=True,
seed=41,
mode='train',
return_full_scale=True,
):
"""Build joint training dataset for all data in train_on.
Args:
train_on: string of the format 'format0:path0;format1:path1', e.g.
'kitti:/tmp/...'.
height: int, height to which the images will be resized or cropped.
width: int, width to which the images will be resized or cropped.
shuffle_buffer_size: int, size that will be used for the shuffle buffer.
batch_size: int, batch size for the iterator.
seq_len: int, number of frames per sequences (at the moment this should
always be 2)
crop_instead_of_resize: bool, indicates if cropping should be used instead
of resizing
apply_augmentation: bool, indicates if geometric and photometric data
augmentation shall be activated (paramaters are gin configurable)
include_ground_truth: bool, indicates if ground truth flow should be
included.
resize_gt_flow: bool, indicates if ground truth flow should be resized (only
important if resizing and supervised training is used)
seed: A seed for a random number generator, controls shuffling of data.
mode: str, the mode to pass to the data loader. defaults to 'train'
return_full_scale: bool, whether or not to include the full size, uncropped
images in the data dictionary.
Returns:
data: A tf.data.Iterator that produces batches of data dictionaries.
"""
train_datasets = []
# Split strings according to pattern "format0:path0;format1:path1".
for format_and_path in train_on.split(';'):
data_format, path = format_and_path.split(':')
if include_ground_truth:
mode += '-supervised'
# Add a dataset based on format and path.
if 'spoof' in data_format:
dataset = spoof_dataset.make_dataset(
path,
mode=mode,
seq_len=seq_len,
shuffle_buffer_size=shuffle_buffer_size,
height=None if crop_instead_of_resize else height,
width=None if crop_instead_of_resize else width,
resize_gt_flow=resize_gt_flow,
seed=seed,
)
elif 'multiframe' in data_format: # Multiframe data.
dataset_manager = smurf_multiframe_dataset.SmurfMultiframe()
dataset = dataset_manager.make_dataset(
path,
mode=mode,
seq_len=seq_len,
shuffle_buffer_size=shuffle_buffer_size,
height=None if crop_instead_of_resize else height,
width=None if crop_instead_of_resize else width,
resize_gt_flow=resize_gt_flow,
seed=seed,
)
elif 'kitti' in data_format:
dataset = kitti.make_dataset(
path,
mode=mode,
seq_len=seq_len,
shuffle_buffer_size=shuffle_buffer_size,
height=None if crop_instead_of_resize else height,
width=None if crop_instead_of_resize else width,
resize_gt_flow=resize_gt_flow,
seed=seed,
)
elif 'chairs' in data_format:
dataset = flow_dataset.make_dataset(
path,
mode=mode,
seq_len=seq_len,
shuffle_buffer_size=shuffle_buffer_size,
height=None if crop_instead_of_resize else height,
width=None if crop_instead_of_resize else width,
resize_gt_flow=resize_gt_flow,
gt_flow_shape=[384, 512, 2],
seed=seed,
)
elif 'sintel' in data_format:
dataset = sintel.make_dataset(
path,
mode=mode,
seq_len=seq_len,
shuffle_buffer_size=shuffle_buffer_size,
height=None if crop_instead_of_resize else height,
width=None if crop_instead_of_resize else width,
resize_gt_flow=resize_gt_flow,
seed=seed,
)
else:
print('Unknown data format "{}"'.format(data_format))
continue
train_datasets.append(dataset)
augmentation_fn = partial(
smurf_augmentation.apply_augmentation,
crop_height=height,
crop_width=width,
return_full_scale=return_full_scale)
# After loading and augmentation the data can have unknown shape.
# The function below ensures that all data has the proper shape.
def _ensure_shapes():
# shape of the data
flow_height = height if resize_gt_flow else None
flow_width = width if resize_gt_flow else None
shapes = {
'images': (batch_size, seq_len, height, width, 3),
'flow': (batch_size, flow_height, flow_width, 2),
'flow_valid': (batch_size, flow_height, flow_width, 1),
'occlusions': (batch_size, height, width, 1),
}
def check_data(data):
output = {}
for key, val in data.items():
if key in shapes:
val = tf.ensure_shape(val, shapes[key])
output[key] = val
return output
return check_data
choice_dataset = tf.data.Dataset.range(len(train_datasets)).repeat()
train_ds = tf.data.experimental.choose_from_datasets(train_datasets,
choice_dataset)
if apply_augmentation:
train_ds = train_ds.map(augmentation_fn)
train_ds = train_ds.batch(batch_size, drop_remainder=True)
train_ds = train_ds.prefetch(1)
train_ds = train_ds.map(_ensure_shapes())
return train_ds
def make_eval_function(eval_on, height, width, progress_bar, plot_dir,
num_plots, weights=None):
"""Build an evaluation function for smurf.
Args:
eval_on: string of the format 'format0:path0;format1:path1', e.g.
'kitti:/tmp/...'.
height: int, the height to which the images should be resized for inference.
width: int, the width to which the images should be resized for inference.
progress_bar: boolean, flag to indicate whether the function should print a
progress_bar during evaluaton.
plot_dir: string, optional path to a directory in which plots are saved (if
num_plots > 0).
num_plots: int, maximum number of qualitative results to plot for the
evaluation.
weights: dictionary of loss weights for computing loss on the evaluation
data.
Returns:
data: A pair consisting of an evaluation function and a list of strings
that holds the keys of the evaluation result.
"""
eval_functions_and_datasets = []
eval_keys = []
# Split strings according to pattern "format0:path0;format1:path1".
for format_and_path in eval_on.split(';'):
data_format, path = format_and_path.split(':')
# Add a dataset based on format and path.
if 'spoof' in data_format:
dataset = spoof_dataset.make_dataset(path, mode='eval')
eval_fn = partial(spoof_dataset.evaluate, prefix=data_format)
eval_keys += spoof_dataset.list_eval_keys(prefix=data_format)
elif 'kitti' in data_format:
if 'benchmark' in data_format:
dataset = kitti.make_dataset(path, mode='test')
eval_fn = kitti.benchmark
else:
dataset = kitti.make_dataset(path, mode='eval')
eval_fn = partial(kitti.evaluate, prefix=data_format)
eval_keys += kitti.list_eval_keys(prefix=data_format)
elif 'chairs' in data_format:
dataset = flow_dataset.make_dataset(path, mode='eval')
eval_fn = partial(
flow_dataset.evaluate,
prefix=data_format,
max_num_evals=500, # We do this to avoid evaluating on 22k samples.
has_occlusion=False,
weights=weights)
eval_keys += flow_dataset.list_eval_keys(prefix=data_format)
elif 'sintel' in data_format:
if 'benchmark' in data_format:
# pylint:disable=g-long-lambda
# pylint:disable=cell-var-from-loop
eval_fn = lambda smurf: sintel.benchmark(
inference_fn=smurf.infer,
height=height,
width=width,
sintel_path=path,
plot_dir=plot_dir,
num_plots=num_plots)
assert len(eval_on.split(
';')) == 1, 'Sintel benchmark should be done in isolation.'
return eval_fn, []
dataset = sintel.make_dataset(path, mode='eval-occlusion')
eval_fn = partial(sintel.evaluate, prefix=data_format,
weights=weights)
eval_keys += sintel.list_eval_keys(prefix=data_format)
else:
print('Unknown data format "{}"'.format(data_format))
continue
dataset = dataset.prefetch(1)
eval_functions_and_datasets.append((eval_fn, dataset))
# Make an eval function that aggregates all evaluations.
def eval_function(smurf):
result = dict()
for eval_fn, ds in eval_functions_and_datasets:
results = eval_fn(
smurf.infer, ds, height,
width, progress_bar, plot_dir, num_plots)
for k, v in results.items():
result[k] = v
return result
return eval_function, eval_keys
| |
# -*- coding: utf-8 -*-
"""
@author: Fernando J. Chaure
"""
from pyqtgraph.Qt import QtCore #interfaz en general
import pyqtgraph as pg #graphicos
import pyqtgraph.functions as fn
from PyQt4 import QtGui, uic
import numpy as np
from configuration import GENERAL_CONFIG as CONFIG
from threading import Thread
from copy import copy
from multiprocess_config import *
from collections import namedtuple
from configuration import BIO_CONFIG
from configuration import LIBGRAPH_CONFIG as LG_CONFIG
from configuration import FILE_CONFIG
from os import path, system
from spectral_view import SpectralHandler
from spike_sorting_view import SpikeSortingHandler
#import logging
#logging.basicConfig(format='%(levelname)s:%(message)s',filename='bci.log',level=logging.WARNING)
SPIKE_DURATION_SAMPLES = int(BIO_CONFIG['SPIKE_DURATION'] / 1000.0*CONFIG['FS'])
CH_COLORS = ['r', 'y', 'g', 'c', 'p', 'm'] * 3
NOT_SAVING_MESSAGE = 'without saving'
SAVING_MESSAGE = 'writing in:'
if int(CONFIG['FS'] / LG_CONFIG['FFT_RESOLUTION']) > int(LG_CONFIG['FFT_N'] * CONFIG['PAQ_USB'] / 2):
FFT_SIZE = int(CONFIG['FS'] / LG_CONFIG['FFT_RESOLUTION'])
else:
FFT_SIZE = int(LG_CONFIG['FFT_L_PAQ'] * CONFIG['PAQ_USB'] / 2)
#fft_frec = np.linspace(0, CONFIG['FS'] / 2, FFT_SIZE/2)
one_pack_time = CONFIG['PAQ_USB'] / CONFIG['FS']
PACK_xSPIKE_COUNT = int(np.ceil(float(LG_CONFIG['TIME_SPIKE_COUNT']) / one_pack_time))
FREQFIX_xSPIKE_COUNT = (float(PACK_xSPIKE_COUNT)*one_pack_time)
beep_command = "beep -f " + LG_CONFIG['BEEP_FREQ'] + " -l " \
+ str(BIO_CONFIG['SPIKE_DURATION']) + " -d "
UIFILE = path.join(path.abspath(path.dirname(__file__)), 'bciui.ui')
SHOW_ERROR_TIME = 5000 #ms
if LG_CONFIG['TWO_WINDOWS']:
second_win_file = path.join(path.abspath(
path.dirname(__file__)),'second_window.ui')
UserChOptions_t = namedtuple('UserChOptions_t','conf_t filter_mode thr_values thr_manual_mode')
class MainWindow(QtGui.QMainWindow):
channel_changed = QtCore.pyqtSignal(int)
def __init__(self, processing_process, get_data_process):
QtGui.QMainWindow.__init__(self)
uic.loadUi(UIFILE, self)
#self.tabifyDockWidget(self.firing_rates_dock,self.clustering_dock);
self.showMaximized()
self.SpS_dock.setVisible(False)
self.actionSpS.setChecked(False)
self.fft_dock.setVisible(False)
self.actionFFT.setChecked(False)
self.lfp_dock.setVisible(False)
self.actionLFP.setChecked(False)
self.neu_firing_rates_dock.setVisible(False)
self.actionNeu_firing_rates.setChecked(False)
self.show_group.setVisible(False)
self.actionGroup_Viewer.setChecked(False)
self.processing_process = processing_process
self.get_data_process = get_data_process
self.data_handler = bci_data_handler()
self.signal_config = Channels_Configuration(queue = self.processing_process.ui_config_queue)#HARDCODE
self.signal_config.try_send()
self.channel_changed.connect(self.change_channel)
self.spectral_handler = SpectralHandler(self, self.data_handler)
self.spike_sorting_handler = SpikeSortingHandler(queue = self.processing_process.ui_config_queue, main_window = self)
self.group_info = plus_display(self.data_handler, self.plus_grid, self.grid_group,
self.plus_grid_fr, self.signal_config,
self.thr_p,self.channel_changed)
self.general_display = GeneralDisplay(self.data_handler,
self.espacio_pg,
self.channel_changed)
###Signal Slots Connections###
QtCore.QObject.connect(self.display_scale, QtCore.SIGNAL("valueChanged(int)"),
self.general_display.changeYrange)
QtCore.QObject.connect(self.filter_mode_button, QtCore.SIGNAL("clicked( bool)"),
self.change_filter_mode)
QtCore.QObject.connect(self.paq_view, QtCore.SIGNAL("valueChanged(int)"),
self.changeXrange)
QtCore.QObject.connect(self.active_channel_cb, QtCore.SIGNAL("clicked( bool)"),
self.activate_channel)
QtCore.QObject.connect(self.manual_thr_cb, QtCore.SIGNAL("clicked( bool)"),
self.group_info.change_th_mode)
QtCore.QObject.connect(self.thr_p, QtCore.SIGNAL("textEdited(const QString&)"),
self.group_info.thr_changed)
QtCore.QObject.connect(self.pausa, QtCore.SIGNAL("clicked (bool)"),
self.group_info.set_pause)
self.thr_p.setValidator(QtGui.QDoubleValidator())
self.contador_registro = -1
self.timer = QtCore.QTimer()
self.loss_data = 0
self.timer.timeout.connect(self.update)
self.timer.start(0) #si va demasiado lento deberia bajarse el tiempo
get_data_process.process.start()
processing_process.process.start()
self.file_label = QtGui.QLabel("")
self.statusBar.addPermanentWidget(self.file_label)
#self.dockWidget.setTitleBarWidget(QtGui.QWidget())
self.file_label.setText(NOT_SAVING_MESSAGE)
self.change_filter_mode(self.filter_mode_button.isChecked())
#self.elec_group = 0
self.channel_changed.emit(0)
def keyPressEvent(self, e):
if e.key() == QtCore.Qt.Key_A and not e.isAutoRepeat():
autoRange_state = self.group_info.VB.getState().get('autoRange')
if autoRange_state.count(True) > 0:
self.group_info.VB.disableAutoRange()
else:
self.group_info.VB.enableAutoRange()
elif e.key() == QtCore.Qt.Key_P and not e.isAutoRepeat():
self.pausa.click()
def change_channel(self, channel):
self.manual_thr_cb.setChecked(self.signal_config.th_manual_modes[channel])
self.active_channel_cb.setChecked(self.signal_config.active_channels[channel])
if LG_CONFIG['PROBE_CONF_L']:
aux = '{}:{} | C:{}'.format(LG_CONFIG['PROBE_CONF_L'],
int(channel/CONFIG['ELEC_GROUP']) + 1, channel%CONFIG['ELEC_GROUP'] + 1)
self.show_group.setWindowTitle('{} {}'.format(LG_CONFIG['GROUP_LABEL'],int(channel/CONFIG['ELEC_GROUP'])+ 1))
self.fft_dock.setWindowTitle('Spectrum '+aux)
else:
aux = 'Electrode : {}'.format(channel+1)
self.show_group.setWindowTitle(aux)
self.fft_dock.setWindowTitle('Spectrum '+aux)
self.info_label.setText(aux)
self.show_s_channel.setWindowTitle(aux)
#self.elec_group = channel%CONFIG['ELEC_GROUP']
def about(self):
QtGui.QMessageBox.about(self, "About",
"""Essentially, all expressions of human nature ever produced, from a caveman's paintings to Mozart's symphonies and Einstein's view of the universe, emerge from the same source: the relentless dynamic toil of large populations of interconnected neurons.
Miguel Nicolelis""")
def change_filter_mode(self, mode):
""""Define si se pide la segnial pura o la filtrada"""
self.signal_config.change_filter_mode(mode)
self.group_info.show_line = mode
self.group_info.threshold_visible(mode)
def update(self):
""""Loop que se ejecuta si llegan nuevos paquetes"""
try:
new_struct = self.processing_process.new_data_queue.get(TIMEOUT_GET)
except Queue_Empty:
return 1
if new_struct['type'] == 'signal':
self.data_handler.update(new_struct)
if self.beepbox.isChecked():
t = Thread(target = beep,
args = [self.data_handler.spikes_times[self.group_info.channel]])
t.start()
if (not self.get_data_process.warnings.empty()):
new_mess = self.get_data_process.warnings.get(TIMEOUT_GET)
if new_mess[0] != SLOW_PROCESS_SIGNAL:
self.loss_data += new_mess[1]
self.statusBar.showMessage("Loss data: " + str(self.loss_data), SHOW_ERROR_TIME)
else:
self.statusBar.showMessage(Errors_Messages[new_mess[0]], SHOW_ERROR_TIME)
if (not self.processing_process.warnings.empty()):
self.statusBar.showMessage(Errors_Messages[self.processing_process.warnings.get(TIMEOUT_GET)], SHOW_ERROR_TIME)
self.spectral_handler.update()
self.general_display.update()
self.group_info.update()
self.signal_config.try_send()
def on_actionDetener(self):
"""detiene el guardado de datos"""
self.get_data_process.control.send(STOP_SIGNAL)
self.file_label.setText(NOT_SAVING_MESSAGE)
def on_actionSalir(self):
"""Pide verificacion, detiene procesos y termina de guardar archivos"""
if(QtGui.QMessageBox.question(
QtGui.QWidget(), 'Exit',
"Are you sure you want to exit the application?",
QtGui.QMessageBox.Yes |QtGui.QMessageBox.No,
QtGui.QMessageBox.No) == QtGui.QMessageBox.No):
return
self.timer.stop()
self.get_data_process.control.send(EXIT_SIGNAL)
self.processing_process.control.send(EXIT_SIGNAL)
self.get_data_process.process.join(2)
self.processing_process.process.join(1)
self.processing_process.process.terminate()
self.get_data_process.process.terminate()
self.general_display.close()
#self.close()
QtCore.QCoreApplication.instance().quit()
#QtCore.QCoreApplication.instance().exit(N) devuelve N en APP.exec_()
import sys
sys.exit()
def on_actionNuevo(self):
"""Nuevo archivo de registro"""
self.get_data_process.control.send(START_SIGNAL)
self.contador_registro += 1
self.file_label.setText(SAVING_MESSAGE + FILE_CONFIG['GENERIC_FILE'] +'-'+str(self.contador_registro))
#def set_autoRange(self):
#if self.autoRange.isChecked():
#self.general_display.setAutoRange(True)
#else:
#self.general_display.setAutoRange(False)
def closeEvent(self, event):
u"""Redirige las senales que disparen ese evento al metodo on_actionSalir()"""
event.ignore()
self.on_actionSalir()
def changeXrange(self, i):
"""Modifica la cantidad de paquetes que se dibujan en los displays"""
self.data_handler.change_paq_view(i)
self.general_display.changeXrange(i)
def activate_channel(self, i):
"""Agrega el canal seleccionado a la lista de canales activos"""
self.signal_config.active_channels[self.group_info.channel] = i
def on_actionStop_SP(self):
"""stop all spike sorting process"""
pass
#self.active_channel_cb.setCheckable(False)
#self.processing_process.control.send(control_maker(self.active_channels))
#implementacion pendiente
#@QtCore.pyqtSlot()
# def view_firing_rate_dock(self,view):
# self.firing_rates_dock.setVisible(view)
class plus_display():
"""Clase que engloba el display inferior junto a los metodos que lo implican individualmente"""
def __init__(self, data_handler, espacio_pg,grid_group, plus_grid_fr, signal_config,
thr_p_label, channel_changed):
channel_changed.connect(self.change_channel)
self.data_handler = data_handler
self.channel = 0
self.signal_config = signal_config
self.tasas_bars = Bar_Graph()
self.thr_p_label = thr_p_label
#layout_graphicos.addItem(self.tasas_bars,row=None, col=0, rowspan=1, colspan=1)
#graph=layout_graphicos.addPlot(row=None, col=1, rowspan=1, colspan=3)
#create and configure selected channel plot:
layout_tet= pg.GraphicsLayout()#border = (100, 0, 100))
layout_tet.setSpacing(0)
grid_group.setCentralItem(layout_tet)
self.tet_curves = list()
for i in range(CONFIG['ELEC_GROUP']):
graph = layout_tet.addPlot(col=1,row = i+1,rowspan = 1, colspan = 1)
axis = graph.getAxis('left')
axis.setScale(scale = CONFIG['ADC_SCALE'])
VB = graph.getViewBox()
VB.setXRange(0, CONFIG['PAQ_USB']/float(CONFIG['FS']), padding=0, update=True)
VB.setYRange(LG_CONFIG['DISPLAY_LIMY'], -LG_CONFIG['DISPLAY_LIMY'], padding=0, update=True)
graph.setMenuEnabled(enableMenu = False, enableViewBoxMenu = None)
graph.setDownsampling(auto = True)
graph.showAxis('left', show = False)
if i != CONFIG['ELEC_GROUP']-1:
graph.showAxis('bottom', show = False)
self.tet_curves.append(graph.plot())
if i==0:
graph_0=graph
else:
graph.setYLink(graph_0)
graph.setXLink(graph_0)
#create and configure selected channel plot:
self.graph = pg.PlotItem()
axis = self.graph.getAxis('left')
axis.setScale(scale = CONFIG['ADC_SCALE'])
self.std = np.ndarray(CONFIG['#CHANNELS'])
self.VB = self.graph.getViewBox()
self.VB.setXRange(0, CONFIG['PAQ_USB']/float(CONFIG['FS']), padding=0, update=True)
self.VB.setYRange(LG_CONFIG['DISPLAY_LIMY'], -LG_CONFIG['DISPLAY_LIMY'], padding=0, update=True)
self.graph.setMenuEnabled(enableMenu = False, enableViewBoxMenu = None)
self.graph.setDownsampling(auto = True)
self.curve = self.graph.plot()
#QtCore.QObject.connect(self.graph_thr, QtCore.SIGNAL("sigPositionChange()"),
self.graph.enableAutoRange('y', False) #self.pepe)
espacio_pg.setCentralItem(self.graph)
plus_grid_fr.setCentralItem(self.tasas_bars)
#Create and onfigure ther line
self.graph_thr = pg.InfiniteLine(pen = pg.mkPen('w', width=2), angle = 0, movable = True)
self.graph_thr.sigPositionChangeFinished.connect(self.thr_changed)
self.fft_n = 0
self.fft_l = 0
self.fft_aux = np.zeros([LG_CONFIG['FFT_N'], FFT_SIZE / 2+1])
self.data_fft_aux = np.zeros([CONFIG['PAQ_USB']*LG_CONFIG['FFT_L_PAQ']])
self.threshold_visible(True)
self.graph_thr.setValue(self.signal_config.thresholds[self.channel])
self.show_line = False
self.pause_mode = False
self.graph_thr.sigDragged.connect(self.moving_line)
self.graph_thr.sigPositionChangeFinished.connect(self.free_line)
self.graph_thr_updatable = True
def moving_line(self):
self.graph_thr_updatable = False
def free_line(self):
self.graph_thr_updatable = True
def thr_changed(self, p = None):
if self.signal_config.th_manual_modes[self.channel]:
self.signal_config.change_th(self.channel, self.graph_thr.value())
self.thr_p_label.setText("{0:.1f}".format(self.graph_thr.value()/self.std[self.channel]))
else:
if type(p)== pg.InfiniteLine:
p = self.graph_thr.value() / self.std[self.channel]
self.signal_config.change_th(self.channel, p)
self.thr_p_label.setText("{0:.1f}".format(p))
else:
self.signal_config.change_th(self.channel, float(p))
self.graph_thr.setValue(float(p)*self.std[self.channel])
def set_pause(self, pause_mode):
if pause_mode == True:
self.data_old = copy(self.data_handler.graph_data)
self.pause_mode = pause_mode
def update(self):
"""Lo ejecutan al llegar nuevos paquetes"""
if self.pause_mode == True:
data = self.data_old
else:
data = self.data_handler.graph_data
n_view = self.data_handler.n_view
xtime = self.data_handler.xtime
self.max_xtime = xtime[n_view-1]
tet = int(self.channel / CONFIG['ELEC_GROUP'])
self.tasas_bars.update(self.data_handler.spikes_times[tet*CONFIG['ELEC_GROUP']:tet*CONFIG['ELEC_GROUP']+CONFIG['ELEC_GROUP']])
self.std = self.data_handler.std
if self.signal_config.th_manual_modes[self.channel]:
self.thr_p_label.setText("{0:.1f}".format(self.graph_thr.value()/self.std[self.channel]))
if (not self.signal_config.th_manual_modes[self.channel]) and self.graph_thr_updatable:
self.graph_thr.setValue(self.signal_config.thresholds[self.channel]*
self.std[self.channel])
self.curve.setPen(CH_COLORS[self.channel%CONFIG['ELEC_GROUP']])
self.curve.setData(x = xtime[:n_view], y = data[self.channel, :n_view])
fist_ch_group = int(self.channel/CONFIG['ELEC_GROUP'])*CONFIG['ELEC_GROUP']
for i in range(CONFIG['ELEC_GROUP']):
self.tet_curves[i].setPen(CH_COLORS[i])
self.tet_curves[i].setData(x = xtime[:n_view], y = data[fist_ch_group+i, :n_view])
def threshold_visible(self, visible):
"""Define si el umbral es visible o no"""
if visible:
self.graph.addItem(self.graph_thr)
else:
self.graph.removeItem(self.graph_thr)
def change_th_mode(self, manual):
self.signal_config.change_th_mode(self.channel, manual)
if manual:
self.signal_config.change_th(self.channel, self.signal_config.thresholds[self.channel]*self.std[self.channel])
else:
self.signal_config.change_th(self.channel, self.signal_config.thresholds[self.channel] / self.std[self.channel])
def change_channel(self, ch):
"""Modifica el canal que se grafica actualmente,
refrescando las barras de firing rate si pertenece a otro tetrodo"""
#update plot
if self.pause_mode == True:
data = self.data_old
else:
data = self.data_handler.graph_data
n_view = self.data_handler.n_view
self.curve.setPen(CH_COLORS[ch%CONFIG['ELEC_GROUP']])
self.curve.setData(x = self.data_handler.xtime[:n_view], y = data[ch, :n_view])
fist_ch_group = int(ch/CONFIG['ELEC_GROUP'])*CONFIG['ELEC_GROUP']
for i in range(CONFIG['ELEC_GROUP']):
self.tet_curves[i].setPen(CH_COLORS[i])
self.tet_curves[i].setData(x = self.data_handler.xtime[:n_view], y = data[fist_ch_group+i, :n_view])
if int(self.channel/CONFIG['ELEC_GROUP']) != int(ch/CONFIG['ELEC_GROUP']):
self.tasas_bars.tet_changed()
self.channel = ch
if self.signal_config.th_manual_modes[self.channel]:
self.graph_thr.setValue(self.signal_config.thresholds[self.channel])
self.thr_p_label.setText("{0:.1f}".format(self.graph_thr.value()/self.std[self.channel]))
else:
self.graph_thr.setValue(self.signal_config.thresholds[self.channel]*
self.std[self.channel])
self.thr_p_label.setText("{0:.1f}".format(self.signal_config.thresholds[self.channel]))
self.fft_l = 0
self.fft_n = 0
class Bar_Graph(pg.PlotItem):
"""Barras con tasas de disparo"""
def __init__(self):
self.npack = 0
self.tasa_bars = list()
self.tasas = np.zeros([PACK_xSPIKE_COUNT, CONFIG['ELEC_GROUP']])
pg.PlotItem.__init__(self)
self.showAxis('bottom', False)
self.setMenuEnabled(enableMenu = False, enableViewBoxMenu = None)
#self.showAxis('left', False)
#self.enableAutoRange('y', False)
self.setXRange(-0.4, (CONFIG['ELEC_GROUP']-1) + 0.4)
self.enableAutoRange('x', False)
self.setMouseEnabled(x=False, y=True)
#self.hideButtons()
for j in range(CONFIG['ELEC_GROUP']):
self.tasa_bars.append(self.plot(pen = CH_COLORS[j],
fillLevel=0,brush = pg.mkBrush(CH_COLORS[j])))
def update(self, spike_times):
for i in xrange(len(spike_times)):
self.tasas[self.npack, i] = (np.greater(spike_times[i][1:] - spike_times[i][:-1],
SPIKE_DURATION_SAMPLES)).sum() + ((spike_times[i]).size > 0)
tasas_aux = self.tasas[:, i].sum() / FREQFIX_xSPIKE_COUNT
self.tasa_bars[i].setData(x = [i%CONFIG['ELEC_GROUP']-0.3, i%CONFIG['ELEC_GROUP']+0.3],
y = [tasas_aux,tasas_aux], _callSync='off')
self.npack += 1
if self.npack is PACK_xSPIKE_COUNT:
self.npack = 0
def tet_changed(self):
self.npack = 0
self.tasas = np.zeros([PACK_xSPIKE_COUNT, CONFIG['ELEC_GROUP']])
class GeneralDisplay():
def __init__(self, data_handler, espacio_pg, ch_changed_signal):
self.data_handler = data_handler
layout_graphicos = pg.GraphicsLayout(border = (100, 0, 100))
#para ordenar los graphicos(items) asi como el simil con los widgets
espacio_pg.setCentralItem(layout_graphicos)
layout_graphicos.setSpacing(0)
self.set_canales = list() #canales seleccionados para ser mostrados
self.curv_canal = list() #curvas para dsp actualizar los datos
self.graphicos = list() #graphicos, para dsp poder modificar su autorange
#graphicos principales
if LG_CONFIG['TWO_WINDOWS'] is False:
main_win_ch = CONFIG['#CHANNELS']
else:
main_win_ch = int(CONFIG['#CHANNELS']*3/CONFIG['ELEC_GROUP']/7)*CONFIG['ELEC_GROUP']
main_win_ch = main_win_ch - ((main_win_ch / CONFIG['ELEC_GROUP'])%LG_CONFIG['COL_DISPLAY'])*CONFIG['ELEC_GROUP']
self.second_win = Second_Display_Window()
layout_graphicos_2 = self.second_win.layout_graphicos
layout_graphicos_2.setSpacing(0)
self.second_win.show()
for i in xrange(0,CONFIG['#CHANNELS'],CONFIG['ELEC_GROUP']):
if (i < main_win_ch):
laxu = layout_graphicos.addLayout(row = int(i/CONFIG['ELEC_GROUP']/LG_CONFIG['COL_DISPLAY']),
col = int(i/CONFIG['ELEC_GROUP'])%LG_CONFIG['COL_DISPLAY'],
rowspan = 1, colspan=1)#, border=(50,0,0)
laxu.setSpacing(1)
else:
laxu = layout_graphicos_2.addLayout(row = int((i-main_win_ch)/CONFIG['ELEC_GROUP'] / LG_CONFIG['COL_DISPLAY']),
col = int((i - main_win_ch)/CONFIG['ELEC_GROUP'])%LG_CONFIG['COL_DISPLAY'],
rowspan = 1, colspan=1)
laxu.setSpacing(1)
label_aux=laxu.addLabel("<font size=\"3\">{} {}</font>".format(LG_CONFIG['GROUP_LABEL'], str(i / CONFIG['ELEC_GROUP'] + 1)), angle=-90, rowspan=CONFIG['ELEC_GROUP'])
label_aux.setMaximumWidth(7)
for j in xrange(i,min(i+CONFIG['ELEC_GROUP'],CONFIG['#CHANNELS'])):
vb = ViewBox_General_Display(j, ch_changed_signal)
graph = laxu.addPlot(viewBox = vb, col=1,
row = j%CONFIG['ELEC_GROUP'],
rowspan = 1, colspan = 1)
graph.hideButtons()
graph.setDownsampling(auto = True)
VB = graph.getViewBox()
VB.setXRange(0, CONFIG['PAQ_USB'], padding = 0, update = True) #HARDCODE
VB.setYRange(LG_CONFIG['DISPLAY_LIMY'], -LG_CONFIG['DISPLAY_LIMY'],
padding = 0, update = True)
graph.showAxis('bottom', show = False)
graph.showAxis('top', show = False)
graph.showAxis('right', show = False)
graph.showAxis('left', show = False)
graph.showGrid(y = True)
graph.setMenuEnabled(enableMenu = False, enableViewBoxMenu = False)
graph.setMouseEnabled(x = False, y = True)
self.curv_canal.append(graph.plot())
self.curv_canal[-1].setPen(width = 1, color = CH_COLORS[j%CONFIG['ELEC_GROUP']])
self.graphicos.append(graph)
def changeYrange(self, p):
p = float(p) / 10
for i in xrange(CONFIG['#CHANNELS']):
self.graphicos[i].setYRange(LG_CONFIG['DISPLAY_LIMY'] * p, -1*LG_CONFIG['DISPLAY_LIMY']*p, padding=0, update=False)
def changeXrange(self, i):
max_x = i*CONFIG['PAQ_USB']
for i in xrange(CONFIG['#CHANNELS']):
self.graphicos[i].setXRange(0, max_x, padding = 0, update = False)
def update(self):
for i in xrange(CONFIG['#CHANNELS']):
self.curv_canal[i].setData(y = self.data_handler.graph_data[i, :self.data_handler.n_view])
def close(self):
if LG_CONFIG['TWO_WINDOWS'] is True:
self.second_win.Close()
class ViewBox_General_Display(pg.ViewBox):
def __init__(self, i, ch_changed_signal):
pg.ViewBox.__init__(self)
self.i = i
self.ch_changed_signal = ch_changed_signal
def mouseClickEvent(self, ev):
if ev.button() == QtCore.Qt.LeftButton:
self.ch_changed_signal.emit(self.i)
def mouseDragEvent(self, ev, axis = None):
"""If axis is specified, event will only affect that axis."""
ev.accept() ## we accept all buttons
## Ignore axes if mouse is disabled
mouseEnabled = np.array(self.state['mouseEnabled'], dtype=np.float)
mask = mouseEnabled.copy()
if axis is not None:
mask[1-axis] = 0.0
## Scale or translate based on mouse button
if ev.button() & QtCore.Qt.RightButton:
dif = ev.screenPos() - ev.lastScreenPos()
tr = self.childGroup.transform()
tr = fn.invertQTransform(tr)
self.scaleBy(x = None, y = ((mask[1] * 0.02) + 1) ** dif.y(), center=(0, 0)) #here mu change
self.sigRangeChangedManually.emit(self.state['mouseEnabled'])
class Second_Display_Window(QtGui.QDialog):
##Ventana extra
def __init__(self):
QtGui.QDialog.__init__(self)
uic.loadUi(second_win_file, self)
self.setWindowFlags(QtCore.Qt.Window)
self.layout_graphicos = pg.GraphicsLayout(border=(100, 0, 100))
#para ordenar los graphicos(items) asi como el simil con los widgets
self.graphicsView.setCentralItem(self.layout_graphicos)
self.closeable = False
def Close(self):
self.closeable = True
self.close()
def closeEvent(self, evnt):
if self.closeable is False:
evnt.ignore()
class bci_data_handler():
"""Controla el alineado de datos, actualizaciones y configuracion
de entrada, agregando una capa de abstraccion al resto de los metodos"""
def __init__(self):
self.data_new = np.int16(np.zeros([CONFIG['#CHANNELS'], CONFIG['PAQ_USB']]))
self.spikes_times = 0
self.graph_data = np.int32(np.zeros([CONFIG['#CHANNELS'],
LG_CONFIG['MAX_PAQ_DISPLAY'] * CONFIG['PAQ_USB']]))
self.paqdisplay = 0
self.paq_view = 1
self.new_paq_view = 1
self.n_view = self.paq_view*CONFIG['PAQ_USB']
self.xtime = np.zeros([LG_CONFIG['MAX_PAQ_DISPLAY']*CONFIG['PAQ_USB']])
self.xtime[:self.n_view] = np.linspace(0, self.n_view / float(CONFIG['FS']), self.n_view)
self.std = np.ndarray(CONFIG['#CHANNELS'])
self.filter_mode = False
def update(self, data_struct):
self.filter_mode = data_struct["filter_mode"]
if data_struct["filter_mode"] is False:
#mean = data_struct["new_data"].mean(axis=1)
self.data_new = data_struct["new_data"] #- mean[:, np.newaxis]
else:
self.data_new = data_struct["new_data"]
self.spikes_times = data_struct["spikes_times"]
self.std = data_struct["std"]
if(self.new_paq_view != self.paq_view):
self.paq_view = self.new_paq_view
self.n_view = self.paq_view*CONFIG['PAQ_USB']
self.xtime[:self.n_view] = np.linspace(0, self.n_view / float(CONFIG['FS']),
self.n_view)
if self.paqdisplay >= self.paq_view:
self.paqdisplay = 0
self.graph_data[:, self.paqdisplay*CONFIG['PAQ_USB']:(self.paqdisplay+1)*CONFIG['PAQ_USB']] = self.data_new
self.paqdisplay += 1
def change_paq_view(self, i):
self.new_paq_view = i
def beep(sk_time):
if not np.size(sk_time):
return
sp = (np.greater(sk_time[1:] - sk_time[:-1], SPIKE_DURATION_SAMPLES)).sum() + 1
# string = beep_command + str(
# int((one_pack_time * 1000.0 - BIO_CONFIG['SPIKE_DURATION'] * sp) / sp))
# for _ in xrange(sp):
# system(string)
string = beep_command + str(
int((one_pack_time * 1000.0 - BIO_CONFIG['SPIKE_DURATION'] * sp) / sp)) + str(' -r ')+str(sp)
system(string)
return
class Channels_Configuration():
def __init__(self, queue, filter_mode = None):
self.th_manual_modes = np.zeros(CONFIG['#CHANNELS'],dtype=bool)
self.thresholds = -4 * np.ones(CONFIG['#CHANNELS']) #all thresholds = -4*std()
self.active_channels = [False] *CONFIG['#CHANNELS']
self.filter_mode = filter_mode
self.queue = queue
self.changed = True
def change_th(self, ch, value):
if(self.thresholds[ch] != value):
self.thresholds[ch] = value
self.changed = True
def change_th_mode(self, ch, value):
self.th_manual_modes[ch] = value
self.changed = True
def change_filter_mode(self, state):
self.filter_mode = state
self.changed = True
def try_send(self):
if self.changed == True:
try:
self.queue.put(UserChOptions_t(conf_t = 'channels',filter_mode=self.filter_mode,
thr_values =self.thresholds,
thr_manual_mode = self.th_manual_modes))
self.changed = False
except Queue_Full:
pass
| |
"""The tests for the Owntracks device tracker."""
import json
from asynctest import patch
import pytest
from homeassistant.components import owntracks
from homeassistant.const import STATE_NOT_HOME
from homeassistant.setup import async_setup_component
from tests.common import (
MockConfigEntry, async_fire_mqtt_message, async_mock_mqtt_component,
mock_coro)
USER = 'greg'
DEVICE = 'phone'
LOCATION_TOPIC = 'owntracks/{}/{}'.format(USER, DEVICE)
EVENT_TOPIC = 'owntracks/{}/{}/event'.format(USER, DEVICE)
WAYPOINTS_TOPIC = 'owntracks/{}/{}/waypoints'.format(USER, DEVICE)
WAYPOINT_TOPIC = 'owntracks/{}/{}/waypoint'.format(USER, DEVICE)
USER_BLACKLIST = 'ram'
WAYPOINTS_TOPIC_BLOCKED = 'owntracks/{}/{}/waypoints'.format(
USER_BLACKLIST, DEVICE)
LWT_TOPIC = 'owntracks/{}/{}/lwt'.format(USER, DEVICE)
BAD_TOPIC = 'owntracks/{}/{}/unsupported'.format(USER, DEVICE)
DEVICE_TRACKER_STATE = 'device_tracker.{}_{}'.format(USER, DEVICE)
IBEACON_DEVICE = 'keys'
MOBILE_BEACON_FMT = 'device_tracker.beacon_{}'
CONF_MAX_GPS_ACCURACY = 'max_gps_accuracy'
CONF_WAYPOINT_IMPORT = owntracks.CONF_WAYPOINT_IMPORT
CONF_WAYPOINT_WHITELIST = owntracks.CONF_WAYPOINT_WHITELIST
CONF_SECRET = owntracks.CONF_SECRET
CONF_MQTT_TOPIC = owntracks.CONF_MQTT_TOPIC
CONF_EVENTS_ONLY = owntracks.CONF_EVENTS_ONLY
CONF_REGION_MAPPING = owntracks.CONF_REGION_MAPPING
TEST_ZONE_LAT = 45.0
TEST_ZONE_LON = 90.0
TEST_ZONE_DEG_PER_M = 0.0000127
FIVE_M = TEST_ZONE_DEG_PER_M * 5.0
# Home Assistant Zones
INNER_ZONE = {
'name': 'zone',
'latitude': TEST_ZONE_LAT + 0.1,
'longitude': TEST_ZONE_LON + 0.1,
'radius': 50
}
OUTER_ZONE = {
'name': 'zone',
'latitude': TEST_ZONE_LAT,
'longitude': TEST_ZONE_LON,
'radius': 100000
}
def build_message(test_params, default_params):
"""Build a test message from overrides and another message."""
new_params = default_params.copy()
new_params.update(test_params)
return new_params
# Default message parameters
DEFAULT_LOCATION_MESSAGE = {
'_type': 'location',
'lon': OUTER_ZONE['longitude'],
'lat': OUTER_ZONE['latitude'],
'acc': 60,
'tid': 'user',
't': 'u',
'batt': 92,
'cog': 248,
'alt': 27,
'p': 101.3977584838867,
'vac': 4,
'tst': 1,
'vel': 0
}
# Owntracks will publish a transition when crossing
# a circular region boundary.
ZONE_EDGE = TEST_ZONE_DEG_PER_M * INNER_ZONE['radius']
DEFAULT_TRANSITION_MESSAGE = {
'_type': 'transition',
't': 'c',
'lon': INNER_ZONE['longitude'],
'lat': INNER_ZONE['latitude'] - ZONE_EDGE,
'acc': 60,
'event': 'enter',
'tid': 'user',
'desc': 'inner',
'wtst': 1,
'tst': 2
}
# iBeacons that are named the same as an HA zone
# are used to trigger enter and leave updates
# for that zone. In this case the "inner" zone.
#
# iBeacons that do not share an HA zone name
# are treated as mobile tracking devices for
# objects which can't track themselves e.g. keys.
#
# iBeacons are typically configured with the
# default lat/lon 0.0/0.0 and have acc 0.0 but
# regardless the reported location is not trusted.
#
# Owntracks will send both a location message
# for the device and an 'event' message for
# the beacon transition.
DEFAULT_BEACON_TRANSITION_MESSAGE = {
'_type': 'transition',
't': 'b',
'lon': 0.0,
'lat': 0.0,
'acc': 0.0,
'event': 'enter',
'tid': 'user',
'desc': 'inner',
'wtst': 1,
'tst': 2
}
# Location messages
LOCATION_MESSAGE = DEFAULT_LOCATION_MESSAGE
LOCATION_MESSAGE_INACCURATE = build_message(
{'lat': INNER_ZONE['latitude'] - ZONE_EDGE,
'lon': INNER_ZONE['longitude'] - ZONE_EDGE,
'acc': 2000},
LOCATION_MESSAGE)
LOCATION_MESSAGE_ZERO_ACCURACY = build_message(
{'lat': INNER_ZONE['latitude'] - ZONE_EDGE,
'lon': INNER_ZONE['longitude'] - ZONE_EDGE,
'acc': 0},
LOCATION_MESSAGE)
LOCATION_MESSAGE_NOT_HOME = build_message(
{'lat': OUTER_ZONE['latitude'] - 2.0,
'lon': INNER_ZONE['longitude'] - 2.0,
'acc': 100},
LOCATION_MESSAGE)
# Region GPS messages
REGION_GPS_ENTER_MESSAGE = DEFAULT_TRANSITION_MESSAGE
REGION_GPS_LEAVE_MESSAGE = build_message(
{'lon': INNER_ZONE['longitude'] - ZONE_EDGE * 10,
'lat': INNER_ZONE['latitude'] - ZONE_EDGE * 10,
'event': 'leave'},
DEFAULT_TRANSITION_MESSAGE)
REGION_GPS_ENTER_MESSAGE_INACCURATE = build_message(
{'acc': 2000},
REGION_GPS_ENTER_MESSAGE)
REGION_GPS_LEAVE_MESSAGE_INACCURATE = build_message(
{'acc': 2000},
REGION_GPS_LEAVE_MESSAGE)
REGION_GPS_ENTER_MESSAGE_ZERO = build_message(
{'acc': 0},
REGION_GPS_ENTER_MESSAGE)
REGION_GPS_LEAVE_MESSAGE_ZERO = build_message(
{'acc': 0},
REGION_GPS_LEAVE_MESSAGE)
REGION_GPS_LEAVE_MESSAGE_OUTER = build_message(
{'lon': OUTER_ZONE['longitude'] - 2.0,
'lat': OUTER_ZONE['latitude'] - 2.0,
'desc': 'outer',
'event': 'leave'},
DEFAULT_TRANSITION_MESSAGE)
REGION_GPS_ENTER_MESSAGE_OUTER = build_message(
{'lon': OUTER_ZONE['longitude'],
'lat': OUTER_ZONE['latitude'],
'desc': 'outer',
'event': 'enter'},
DEFAULT_TRANSITION_MESSAGE)
# Region Beacon messages
REGION_BEACON_ENTER_MESSAGE = DEFAULT_BEACON_TRANSITION_MESSAGE
REGION_BEACON_LEAVE_MESSAGE = build_message(
{'event': 'leave'},
DEFAULT_BEACON_TRANSITION_MESSAGE)
# Mobile Beacon messages
MOBILE_BEACON_ENTER_EVENT_MESSAGE = build_message(
{'desc': IBEACON_DEVICE},
DEFAULT_BEACON_TRANSITION_MESSAGE)
MOBILE_BEACON_LEAVE_EVENT_MESSAGE = build_message(
{'desc': IBEACON_DEVICE,
'event': 'leave'},
DEFAULT_BEACON_TRANSITION_MESSAGE)
# Waypoint messages
WAYPOINTS_EXPORTED_MESSAGE = {
"_type": "waypoints",
"_creator": "test",
"waypoints": [
{
"_type": "waypoint",
"tst": 3,
"lat": 47,
"lon": 9,
"rad": 10,
"desc": "exp_wayp1"
},
{
"_type": "waypoint",
"tst": 4,
"lat": 3,
"lon": 9,
"rad": 500,
"desc": "exp_wayp2"
}
]
}
WAYPOINTS_UPDATED_MESSAGE = {
"_type": "waypoints",
"_creator": "test",
"waypoints": [
{
"_type": "waypoint",
"tst": 4,
"lat": 9,
"lon": 47,
"rad": 50,
"desc": "exp_wayp1"
},
]
}
WAYPOINT_MESSAGE = {
"_type": "waypoint",
"tst": 4,
"lat": 9,
"lon": 47,
"rad": 50,
"desc": "exp_wayp1"
}
WAYPOINT_ENTITY_NAMES = [
'zone.greg_phone_exp_wayp1',
'zone.greg_phone_exp_wayp2',
'zone.ram_phone_exp_wayp1',
'zone.ram_phone_exp_wayp2',
]
LWT_MESSAGE = {
"_type": "lwt",
"tst": 1
}
BAD_MESSAGE = {
"_type": "unsupported",
"tst": 1
}
BAD_JSON_PREFIX = '--$this is bad json#--'
BAD_JSON_SUFFIX = '** and it ends here ^^'
# pylint: disable=invalid-name, len-as-condition, redefined-outer-name
@pytest.fixture
def setup_comp(hass, mock_device_tracker_conf):
"""Initialize components."""
assert hass.loop.run_until_complete(async_setup_component(
hass, 'persistent_notification', {}))
hass.loop.run_until_complete(async_setup_component(
hass, 'device_tracker', {}))
hass.loop.run_until_complete(async_mock_mqtt_component(hass))
hass.states.async_set(
'zone.inner', 'zoning', INNER_ZONE)
hass.states.async_set(
'zone.inner_2', 'zoning', INNER_ZONE)
hass.states.async_set(
'zone.outer', 'zoning', OUTER_ZONE)
yield
async def setup_owntracks(hass, config,
ctx_cls=owntracks.OwnTracksContext):
"""Set up OwnTracks."""
MockConfigEntry(domain='owntracks', data={
'webhook_id': 'owntracks_test',
'secret': 'abcd',
}).add_to_hass(hass)
with patch.object(owntracks, 'OwnTracksContext', ctx_cls):
assert await async_setup_component(
hass, 'owntracks', {'owntracks': config})
await hass.async_block_till_done()
@pytest.fixture
def context(hass, setup_comp):
"""Set up the mocked context."""
orig_context = owntracks.OwnTracksContext
context = None
# pylint: disable=no-value-for-parameter
def store_context(*args):
"""Store the context."""
nonlocal context
context = orig_context(*args)
return context
hass.loop.run_until_complete(setup_owntracks(hass, {
CONF_MAX_GPS_ACCURACY: 200,
CONF_WAYPOINT_IMPORT: True,
CONF_WAYPOINT_WHITELIST: ['jon', 'greg']
}, store_context))
def get_context():
"""Get the current context."""
return context
yield get_context
async def send_message(hass, topic, message, corrupt=False):
"""Test the sending of a message."""
str_message = json.dumps(message)
if corrupt:
mod_message = BAD_JSON_PREFIX + str_message + BAD_JSON_SUFFIX
else:
mod_message = str_message
async_fire_mqtt_message(hass, topic, mod_message)
await hass.async_block_till_done()
await hass.async_block_till_done()
def assert_location_state(hass, location):
"""Test the assertion of a location state."""
state = hass.states.get(DEVICE_TRACKER_STATE)
assert state.state == location
def assert_location_latitude(hass, latitude):
"""Test the assertion of a location latitude."""
state = hass.states.get(DEVICE_TRACKER_STATE)
assert state.attributes.get('latitude') == latitude
def assert_location_longitude(hass, longitude):
"""Test the assertion of a location longitude."""
state = hass.states.get(DEVICE_TRACKER_STATE)
assert state.attributes.get('longitude') == longitude
def assert_location_accuracy(hass, accuracy):
"""Test the assertion of a location accuracy."""
state = hass.states.get(DEVICE_TRACKER_STATE)
assert state.attributes.get('gps_accuracy') == accuracy
def assert_location_source_type(hass, source_type):
"""Test the assertion of source_type."""
state = hass.states.get(DEVICE_TRACKER_STATE)
assert state.attributes.get('source_type') == source_type
def assert_mobile_tracker_state(hass, location, beacon=IBEACON_DEVICE):
"""Test the assertion of a mobile beacon tracker state."""
dev_id = MOBILE_BEACON_FMT.format(beacon)
state = hass.states.get(dev_id)
assert state.state == location
def assert_mobile_tracker_latitude(hass, latitude, beacon=IBEACON_DEVICE):
"""Test the assertion of a mobile beacon tracker latitude."""
dev_id = MOBILE_BEACON_FMT.format(beacon)
state = hass.states.get(dev_id)
assert state.attributes.get('latitude') == latitude
def assert_mobile_tracker_accuracy(hass, accuracy, beacon=IBEACON_DEVICE):
"""Test the assertion of a mobile beacon tracker accuracy."""
dev_id = MOBILE_BEACON_FMT.format(beacon)
state = hass.states.get(dev_id)
assert state.attributes.get('gps_accuracy') == accuracy
async def test_location_invalid_devid(hass, context):
"""Test the update of a location."""
await send_message(hass, 'owntracks/paulus/nexus-5x', LOCATION_MESSAGE)
state = hass.states.get('device_tracker.paulus_nexus_5x')
assert state.state == 'outer'
async def test_location_update(hass, context):
"""Test the update of a location."""
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE)
assert_location_source_type(hass, 'gps')
assert_location_latitude(hass, LOCATION_MESSAGE['lat'])
assert_location_accuracy(hass, LOCATION_MESSAGE['acc'])
assert_location_state(hass, 'outer')
async def test_location_update_no_t_key(hass, context):
"""Test the update of a location when message does not contain 't'."""
message = LOCATION_MESSAGE.copy()
message.pop('t')
await send_message(hass, LOCATION_TOPIC, message)
assert_location_source_type(hass, 'gps')
assert_location_latitude(hass, LOCATION_MESSAGE['lat'])
assert_location_accuracy(hass, LOCATION_MESSAGE['acc'])
assert_location_state(hass, 'outer')
async def test_location_inaccurate_gps(hass, context):
"""Test the location for inaccurate GPS information."""
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE)
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE_INACCURATE)
# Ignored inaccurate GPS. Location remains at previous.
assert_location_latitude(hass, LOCATION_MESSAGE['lat'])
assert_location_longitude(hass, LOCATION_MESSAGE['lon'])
async def test_location_zero_accuracy_gps(hass, context):
"""Ignore the location for zero accuracy GPS information."""
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE)
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE_ZERO_ACCURACY)
# Ignored inaccurate GPS. Location remains at previous.
assert_location_latitude(hass, LOCATION_MESSAGE['lat'])
assert_location_longitude(hass, LOCATION_MESSAGE['lon'])
# ------------------------------------------------------------------------
# GPS based event entry / exit testing
async def test_event_gps_entry_exit(hass, context):
"""Test the entry event."""
# Entering the owntracks circular region named "inner"
await send_message(hass, EVENT_TOPIC, REGION_GPS_ENTER_MESSAGE)
# Enter uses the zone's gps co-ords
assert_location_latitude(hass, INNER_ZONE['latitude'])
assert_location_accuracy(hass, INNER_ZONE['radius'])
assert_location_state(hass, 'inner')
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE)
# Updates ignored when in a zone
# note that LOCATION_MESSAGE is actually pretty far
# from INNER_ZONE and has good accuracy. I haven't
# received a transition message though so I'm still
# associated with the inner zone regardless of GPS.
assert_location_latitude(hass, INNER_ZONE['latitude'])
assert_location_accuracy(hass, INNER_ZONE['radius'])
assert_location_state(hass, 'inner')
await send_message(hass, EVENT_TOPIC, REGION_GPS_LEAVE_MESSAGE)
# Exit switches back to GPS
assert_location_latitude(hass, REGION_GPS_LEAVE_MESSAGE['lat'])
assert_location_accuracy(hass, REGION_GPS_LEAVE_MESSAGE['acc'])
assert_location_state(hass, 'outer')
# Left clean zone state
assert not context().regions_entered[USER]
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE)
# Now sending a location update moves me again.
assert_location_latitude(hass, LOCATION_MESSAGE['lat'])
assert_location_accuracy(hass, LOCATION_MESSAGE['acc'])
async def test_event_gps_with_spaces(hass, context):
"""Test the entry event."""
message = build_message({'desc': "inner 2"},
REGION_GPS_ENTER_MESSAGE)
await send_message(hass, EVENT_TOPIC, message)
assert_location_state(hass, 'inner 2')
message = build_message({'desc': "inner 2"},
REGION_GPS_LEAVE_MESSAGE)
await send_message(hass, EVENT_TOPIC, message)
# Left clean zone state
assert not context().regions_entered[USER]
async def test_event_gps_entry_inaccurate(hass, context):
"""Test the event for inaccurate entry."""
# Set location to the outer zone.
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE)
await send_message(hass, EVENT_TOPIC, REGION_GPS_ENTER_MESSAGE_INACCURATE)
# I enter the zone even though the message GPS was inaccurate.
assert_location_latitude(hass, INNER_ZONE['latitude'])
assert_location_accuracy(hass, INNER_ZONE['radius'])
assert_location_state(hass, 'inner')
async def test_event_gps_entry_exit_inaccurate(hass, context):
"""Test the event for inaccurate exit."""
await send_message(hass, EVENT_TOPIC, REGION_GPS_ENTER_MESSAGE)
# Enter uses the zone's gps co-ords
assert_location_latitude(hass, INNER_ZONE['latitude'])
assert_location_accuracy(hass, INNER_ZONE['radius'])
assert_location_state(hass, 'inner')
await send_message(hass, EVENT_TOPIC, REGION_GPS_LEAVE_MESSAGE_INACCURATE)
# Exit doesn't use inaccurate gps
assert_location_latitude(hass, INNER_ZONE['latitude'])
assert_location_accuracy(hass, INNER_ZONE['radius'])
assert_location_state(hass, 'inner')
# But does exit region correctly
assert not context().regions_entered[USER]
async def test_event_gps_entry_exit_zero_accuracy(hass, context):
"""Test entry/exit events with accuracy zero."""
await send_message(hass, EVENT_TOPIC, REGION_GPS_ENTER_MESSAGE_ZERO)
# Enter uses the zone's gps co-ords
assert_location_latitude(hass, INNER_ZONE['latitude'])
assert_location_accuracy(hass, INNER_ZONE['radius'])
assert_location_state(hass, 'inner')
await send_message(hass, EVENT_TOPIC, REGION_GPS_LEAVE_MESSAGE_ZERO)
# Exit doesn't use zero gps
assert_location_latitude(hass, INNER_ZONE['latitude'])
assert_location_accuracy(hass, INNER_ZONE['radius'])
assert_location_state(hass, 'inner')
# But does exit region correctly
assert not context().regions_entered[USER]
async def test_event_gps_exit_outside_zone_sets_away(hass, context):
"""Test the event for exit zone."""
await send_message(hass, EVENT_TOPIC, REGION_GPS_ENTER_MESSAGE)
assert_location_state(hass, 'inner')
# Exit message far away GPS location
message = build_message(
{'lon': 90.0,
'lat': 90.0},
REGION_GPS_LEAVE_MESSAGE)
await send_message(hass, EVENT_TOPIC, message)
# Exit forces zone change to away
assert_location_state(hass, STATE_NOT_HOME)
async def test_event_gps_entry_exit_right_order(hass, context):
"""Test the event for ordering."""
# Enter inner zone
# Set location to the outer zone.
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE)
await send_message(hass, EVENT_TOPIC, REGION_GPS_ENTER_MESSAGE)
assert_location_state(hass, 'inner')
# Enter inner2 zone
message = build_message(
{'desc': "inner_2"},
REGION_GPS_ENTER_MESSAGE)
await send_message(hass, EVENT_TOPIC, message)
assert_location_state(hass, 'inner_2')
# Exit inner_2 - should be in 'inner'
message = build_message(
{'desc': "inner_2"},
REGION_GPS_LEAVE_MESSAGE)
await send_message(hass, EVENT_TOPIC, message)
assert_location_state(hass, 'inner')
# Exit inner - should be in 'outer'
await send_message(hass, EVENT_TOPIC, REGION_GPS_LEAVE_MESSAGE)
assert_location_latitude(hass, REGION_GPS_LEAVE_MESSAGE['lat'])
assert_location_accuracy(hass, REGION_GPS_LEAVE_MESSAGE['acc'])
assert_location_state(hass, 'outer')
async def test_event_gps_entry_exit_wrong_order(hass, context):
"""Test the event for wrong order."""
# Enter inner zone
await send_message(hass, EVENT_TOPIC, REGION_GPS_ENTER_MESSAGE)
assert_location_state(hass, 'inner')
# Enter inner2 zone
message = build_message(
{'desc': "inner_2"},
REGION_GPS_ENTER_MESSAGE)
await send_message(hass, EVENT_TOPIC, message)
assert_location_state(hass, 'inner_2')
# Exit inner - should still be in 'inner_2'
await send_message(hass, EVENT_TOPIC, REGION_GPS_LEAVE_MESSAGE)
assert_location_state(hass, 'inner_2')
# Exit inner_2 - should be in 'outer'
message = build_message(
{'desc': "inner_2"},
REGION_GPS_LEAVE_MESSAGE)
await send_message(hass, EVENT_TOPIC, message)
assert_location_latitude(hass, REGION_GPS_LEAVE_MESSAGE['lat'])
assert_location_accuracy(hass, REGION_GPS_LEAVE_MESSAGE['acc'])
assert_location_state(hass, 'outer')
async def test_event_gps_entry_unknown_zone(hass, context):
"""Test the event for unknown zone."""
# Just treat as location update
message = build_message(
{'desc': "unknown"},
REGION_GPS_ENTER_MESSAGE)
await send_message(hass, EVENT_TOPIC, message)
assert_location_latitude(hass, REGION_GPS_ENTER_MESSAGE['lat'])
assert_location_state(hass, 'inner')
async def test_event_gps_exit_unknown_zone(hass, context):
"""Test the event for unknown zone."""
# Just treat as location update
message = build_message(
{'desc': "unknown"},
REGION_GPS_LEAVE_MESSAGE)
await send_message(hass, EVENT_TOPIC, message)
assert_location_latitude(hass, REGION_GPS_LEAVE_MESSAGE['lat'])
assert_location_state(hass, 'outer')
async def test_event_entry_zone_loading_dash(hass, context):
"""Test the event for zone landing."""
# Make sure the leading - is ignored
# Owntracks uses this to switch on hold
message = build_message(
{'desc': "-inner"},
REGION_GPS_ENTER_MESSAGE)
await send_message(hass, EVENT_TOPIC, message)
assert_location_state(hass, 'inner')
async def test_events_only_on(hass, context):
"""Test events_only config suppresses location updates."""
# Sending a location message that is not home
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE_NOT_HOME)
assert_location_state(hass, STATE_NOT_HOME)
context().events_only = True
# Enter and Leave messages
await send_message(hass, EVENT_TOPIC, REGION_GPS_ENTER_MESSAGE_OUTER)
assert_location_state(hass, 'outer')
await send_message(hass, EVENT_TOPIC, REGION_GPS_LEAVE_MESSAGE_OUTER)
assert_location_state(hass, STATE_NOT_HOME)
# Sending a location message that is inside outer zone
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE)
# Ignored location update. Location remains at previous.
assert_location_state(hass, STATE_NOT_HOME)
async def test_events_only_off(hass, context):
"""Test when events_only is False."""
# Sending a location message that is not home
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE_NOT_HOME)
assert_location_state(hass, STATE_NOT_HOME)
context().events_only = False
# Enter and Leave messages
await send_message(hass, EVENT_TOPIC, REGION_GPS_ENTER_MESSAGE_OUTER)
assert_location_state(hass, 'outer')
await send_message(hass, EVENT_TOPIC, REGION_GPS_LEAVE_MESSAGE_OUTER)
assert_location_state(hass, STATE_NOT_HOME)
# Sending a location message that is inside outer zone
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE)
# Location update processed
assert_location_state(hass, 'outer')
async def test_event_source_type_entry_exit(hass, context):
"""Test the entry and exit events of source type."""
# Entering the owntracks circular region named "inner"
await send_message(hass, EVENT_TOPIC, REGION_GPS_ENTER_MESSAGE)
# source_type should be gps when entering using gps.
assert_location_source_type(hass, 'gps')
# owntracks shouldn't send beacon events with acc = 0
await send_message(hass, EVENT_TOPIC, build_message(
{'acc': 1}, REGION_BEACON_ENTER_MESSAGE))
# We should be able to enter a beacon zone even inside a gps zone
assert_location_source_type(hass, 'bluetooth_le')
await send_message(hass, EVENT_TOPIC, REGION_GPS_LEAVE_MESSAGE)
# source_type should be gps when leaving using gps.
assert_location_source_type(hass, 'gps')
# owntracks shouldn't send beacon events with acc = 0
await send_message(hass, EVENT_TOPIC, build_message(
{'acc': 1}, REGION_BEACON_LEAVE_MESSAGE))
assert_location_source_type(hass, 'bluetooth_le')
# Region Beacon based event entry / exit testing
async def test_event_region_entry_exit(hass, context):
"""Test the entry event."""
# Seeing a beacon named "inner"
await send_message(hass, EVENT_TOPIC, REGION_BEACON_ENTER_MESSAGE)
# Enter uses the zone's gps co-ords
assert_location_latitude(hass, INNER_ZONE['latitude'])
assert_location_accuracy(hass, INNER_ZONE['radius'])
assert_location_state(hass, 'inner')
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE)
# Updates ignored when in a zone
# note that LOCATION_MESSAGE is actually pretty far
# from INNER_ZONE and has good accuracy. I haven't
# received a transition message though so I'm still
# associated with the inner zone regardless of GPS.
assert_location_latitude(hass, INNER_ZONE['latitude'])
assert_location_accuracy(hass, INNER_ZONE['radius'])
assert_location_state(hass, 'inner')
await send_message(hass, EVENT_TOPIC, REGION_BEACON_LEAVE_MESSAGE)
# Exit switches back to GPS but the beacon has no coords
# so I am still located at the center of the inner region
# until I receive a location update.
assert_location_latitude(hass, INNER_ZONE['latitude'])
assert_location_accuracy(hass, INNER_ZONE['radius'])
assert_location_state(hass, 'inner')
# Left clean zone state
assert not context().regions_entered[USER]
# Now sending a location update moves me again.
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE)
assert_location_latitude(hass, LOCATION_MESSAGE['lat'])
assert_location_accuracy(hass, LOCATION_MESSAGE['acc'])
async def test_event_region_with_spaces(hass, context):
"""Test the entry event."""
message = build_message({'desc': "inner 2"},
REGION_BEACON_ENTER_MESSAGE)
await send_message(hass, EVENT_TOPIC, message)
assert_location_state(hass, 'inner 2')
message = build_message({'desc': "inner 2"},
REGION_BEACON_LEAVE_MESSAGE)
await send_message(hass, EVENT_TOPIC, message)
# Left clean zone state
assert not context().regions_entered[USER]
async def test_event_region_entry_exit_right_order(hass, context):
"""Test the event for ordering."""
# Enter inner zone
# Set location to the outer zone.
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE)
# See 'inner' region beacon
await send_message(hass, EVENT_TOPIC, REGION_BEACON_ENTER_MESSAGE)
assert_location_state(hass, 'inner')
# See 'inner_2' region beacon
message = build_message(
{'desc': "inner_2"},
REGION_BEACON_ENTER_MESSAGE)
await send_message(hass, EVENT_TOPIC, message)
assert_location_state(hass, 'inner_2')
# Exit inner_2 - should be in 'inner'
message = build_message(
{'desc': "inner_2"},
REGION_BEACON_LEAVE_MESSAGE)
await send_message(hass, EVENT_TOPIC, message)
assert_location_state(hass, 'inner')
# Exit inner - should be in 'outer'
await send_message(hass, EVENT_TOPIC, REGION_BEACON_LEAVE_MESSAGE)
# I have not had an actual location update yet and my
# coordinates are set to the center of the last region I
# entered which puts me in the inner zone.
assert_location_latitude(hass, INNER_ZONE['latitude'])
assert_location_accuracy(hass, INNER_ZONE['radius'])
assert_location_state(hass, 'inner')
async def test_event_region_entry_exit_wrong_order(hass, context):
"""Test the event for wrong order."""
# Enter inner zone
await send_message(hass, EVENT_TOPIC, REGION_BEACON_ENTER_MESSAGE)
assert_location_state(hass, 'inner')
# Enter inner2 zone
message = build_message(
{'desc': "inner_2"},
REGION_BEACON_ENTER_MESSAGE)
await send_message(hass, EVENT_TOPIC, message)
assert_location_state(hass, 'inner_2')
# Exit inner - should still be in 'inner_2'
await send_message(hass, EVENT_TOPIC, REGION_BEACON_LEAVE_MESSAGE)
assert_location_state(hass, 'inner_2')
# Exit inner_2 - should be in 'outer'
message = build_message(
{'desc': "inner_2"},
REGION_BEACON_LEAVE_MESSAGE)
await send_message(hass, EVENT_TOPIC, message)
# I have not had an actual location update yet and my
# coordinates are set to the center of the last region I
# entered which puts me in the inner_2 zone.
assert_location_latitude(hass, INNER_ZONE['latitude'])
assert_location_accuracy(hass, INNER_ZONE['radius'])
assert_location_state(hass, 'inner_2')
async def test_event_beacon_unknown_zone_no_location(hass, context):
"""Test the event for unknown zone."""
# A beacon which does not match a HA zone is the
# definition of a mobile beacon. In this case, "unknown"
# will be turned into device_tracker.beacon_unknown and
# that will be tracked at my current location. Except
# in this case my Device hasn't had a location message
# yet so it's in an odd state where it has state.state
# None and no GPS coords to set the beacon to.
hass.states.async_set(DEVICE_TRACKER_STATE, None)
message = build_message(
{'desc': "unknown"},
REGION_BEACON_ENTER_MESSAGE)
await send_message(hass, EVENT_TOPIC, message)
# My current state is None because I haven't seen a
# location message or a GPS or Region # Beacon event
# message. None is the state the test harness set for
# the Device during test case setup.
assert_location_state(hass, 'None')
# We have had no location yet, so the beacon status
# set to unknown.
assert_mobile_tracker_state(hass, 'unknown', 'unknown')
async def test_event_beacon_unknown_zone(hass, context):
"""Test the event for unknown zone."""
# A beacon which does not match a HA zone is the
# definition of a mobile beacon. In this case, "unknown"
# will be turned into device_tracker.beacon_unknown and
# that will be tracked at my current location. First I
# set my location so that my state is 'outer'
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE)
assert_location_state(hass, 'outer')
message = build_message(
{'desc': "unknown"},
REGION_BEACON_ENTER_MESSAGE)
await send_message(hass, EVENT_TOPIC, message)
# My state is still outer and now the unknown beacon
# has joined me at outer.
assert_location_state(hass, 'outer')
assert_mobile_tracker_state(hass, 'outer', 'unknown')
async def test_event_beacon_entry_zone_loading_dash(hass, context):
"""Test the event for beacon zone landing."""
# Make sure the leading - is ignored
# Owntracks uses this to switch on hold
message = build_message(
{'desc': "-inner"},
REGION_BEACON_ENTER_MESSAGE)
await send_message(hass, EVENT_TOPIC, message)
assert_location_state(hass, 'inner')
# ------------------------------------------------------------------------
# Mobile Beacon based event entry / exit testing
async def test_mobile_enter_move_beacon(hass, context):
"""Test the movement of a beacon."""
# I am in the outer zone.
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE)
# I see the 'keys' beacon. I set the location of the
# beacon_keys tracker to my current device location.
await send_message(hass, EVENT_TOPIC, MOBILE_BEACON_ENTER_EVENT_MESSAGE)
assert_mobile_tracker_latitude(hass, LOCATION_MESSAGE['lat'])
assert_mobile_tracker_state(hass, 'outer')
# Location update to outside of defined zones.
# I am now 'not home' and neither are my keys.
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE_NOT_HOME)
assert_location_state(hass, STATE_NOT_HOME)
assert_mobile_tracker_state(hass, STATE_NOT_HOME)
not_home_lat = LOCATION_MESSAGE_NOT_HOME['lat']
assert_location_latitude(hass, not_home_lat)
assert_mobile_tracker_latitude(hass, not_home_lat)
async def test_mobile_enter_exit_region_beacon(hass, context):
"""Test the enter and the exit of a mobile beacon."""
# I am in the outer zone.
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE)
# I see a new mobile beacon
await send_message(hass, EVENT_TOPIC, MOBILE_BEACON_ENTER_EVENT_MESSAGE)
assert_mobile_tracker_latitude(hass, OUTER_ZONE['latitude'])
assert_mobile_tracker_state(hass, 'outer')
# GPS enter message should move beacon
await send_message(hass, EVENT_TOPIC, REGION_GPS_ENTER_MESSAGE)
assert_mobile_tracker_latitude(hass, INNER_ZONE['latitude'])
assert_mobile_tracker_state(hass, REGION_GPS_ENTER_MESSAGE['desc'])
# Exit inner zone to outer zone should move beacon to
# center of outer zone
await send_message(hass, EVENT_TOPIC, REGION_GPS_LEAVE_MESSAGE)
assert_mobile_tracker_latitude(hass, REGION_GPS_LEAVE_MESSAGE['lat'])
assert_mobile_tracker_state(hass, 'outer')
async def test_mobile_exit_move_beacon(hass, context):
"""Test the exit move of a beacon."""
# I am in the outer zone.
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE)
# I see a new mobile beacon
await send_message(hass, EVENT_TOPIC, MOBILE_BEACON_ENTER_EVENT_MESSAGE)
assert_mobile_tracker_latitude(hass, OUTER_ZONE['latitude'])
assert_mobile_tracker_state(hass, 'outer')
# Exit mobile beacon, should set location
await send_message(hass, EVENT_TOPIC, MOBILE_BEACON_LEAVE_EVENT_MESSAGE)
assert_mobile_tracker_latitude(hass, OUTER_ZONE['latitude'])
assert_mobile_tracker_state(hass, 'outer')
# Move after exit should do nothing
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE_NOT_HOME)
assert_mobile_tracker_latitude(hass, OUTER_ZONE['latitude'])
assert_mobile_tracker_state(hass, 'outer')
async def test_mobile_multiple_async_enter_exit(hass, context):
"""Test the multiple entering."""
# Test race condition
for _ in range(0, 20):
async_fire_mqtt_message(
hass, EVENT_TOPIC,
json.dumps(MOBILE_BEACON_ENTER_EVENT_MESSAGE))
async_fire_mqtt_message(
hass, EVENT_TOPIC,
json.dumps(MOBILE_BEACON_LEAVE_EVENT_MESSAGE))
async_fire_mqtt_message(
hass, EVENT_TOPIC,
json.dumps(MOBILE_BEACON_ENTER_EVENT_MESSAGE))
await hass.async_block_till_done()
await send_message(hass, EVENT_TOPIC, MOBILE_BEACON_LEAVE_EVENT_MESSAGE)
assert len(context().mobile_beacons_active['greg_phone']) == 0
async def test_mobile_multiple_enter_exit(hass, context):
"""Test the multiple entering."""
await send_message(hass, EVENT_TOPIC, MOBILE_BEACON_ENTER_EVENT_MESSAGE)
await send_message(hass, EVENT_TOPIC, MOBILE_BEACON_ENTER_EVENT_MESSAGE)
await send_message(hass, EVENT_TOPIC, MOBILE_BEACON_LEAVE_EVENT_MESSAGE)
assert len(context().mobile_beacons_active['greg_phone']) == 0
async def test_complex_movement(hass, context):
"""Test a complex sequence representative of real-world use."""
# I am in the outer zone.
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE)
assert_location_state(hass, 'outer')
# gps to inner location and event, as actually happens with OwnTracks
location_message = build_message(
{'lat': REGION_GPS_ENTER_MESSAGE['lat'],
'lon': REGION_GPS_ENTER_MESSAGE['lon']},
LOCATION_MESSAGE)
await send_message(hass, LOCATION_TOPIC, location_message)
await send_message(hass, EVENT_TOPIC, REGION_GPS_ENTER_MESSAGE)
assert_location_latitude(hass, INNER_ZONE['latitude'])
assert_location_state(hass, 'inner')
# region beacon enter inner event and location as actually happens
# with OwnTracks
location_message = build_message(
{'lat': location_message['lat'] + FIVE_M,
'lon': location_message['lon'] + FIVE_M},
LOCATION_MESSAGE)
await send_message(hass, EVENT_TOPIC, REGION_BEACON_ENTER_MESSAGE)
await send_message(hass, LOCATION_TOPIC, location_message)
assert_location_latitude(hass, INNER_ZONE['latitude'])
assert_location_state(hass, 'inner')
# see keys mobile beacon and location message as actually happens
location_message = build_message(
{'lat': location_message['lat'] + FIVE_M,
'lon': location_message['lon'] + FIVE_M},
LOCATION_MESSAGE)
await send_message(hass, EVENT_TOPIC, MOBILE_BEACON_ENTER_EVENT_MESSAGE)
await send_message(hass, LOCATION_TOPIC, location_message)
assert_location_latitude(hass, INNER_ZONE['latitude'])
assert_mobile_tracker_latitude(hass, INNER_ZONE['latitude'])
assert_location_state(hass, 'inner')
assert_mobile_tracker_state(hass, 'inner')
# Slightly odd, I leave the location by gps before I lose
# sight of the region beacon. This is also a little odd in
# that my GPS coords are now in the 'outer' zone but I did not
# "enter" that zone when I started up so my location is not
# the center of OUTER_ZONE, but rather just my GPS location.
# gps out of inner event and location
location_message = build_message(
{'lat': REGION_GPS_LEAVE_MESSAGE['lat'],
'lon': REGION_GPS_LEAVE_MESSAGE['lon']},
LOCATION_MESSAGE)
await send_message(hass, EVENT_TOPIC, REGION_GPS_LEAVE_MESSAGE)
await send_message(hass, LOCATION_TOPIC, location_message)
assert_location_latitude(hass, REGION_GPS_LEAVE_MESSAGE['lat'])
assert_mobile_tracker_latitude(hass, REGION_GPS_LEAVE_MESSAGE['lat'])
assert_location_state(hass, 'outer')
assert_mobile_tracker_state(hass, 'outer')
# region beacon leave inner
location_message = build_message(
{'lat': location_message['lat'] - FIVE_M,
'lon': location_message['lon'] - FIVE_M},
LOCATION_MESSAGE)
await send_message(hass, EVENT_TOPIC, REGION_BEACON_LEAVE_MESSAGE)
await send_message(hass, LOCATION_TOPIC, location_message)
assert_location_latitude(hass, location_message['lat'])
assert_mobile_tracker_latitude(hass, location_message['lat'])
assert_location_state(hass, 'outer')
assert_mobile_tracker_state(hass, 'outer')
# lose keys mobile beacon
lost_keys_location_message = build_message(
{'lat': location_message['lat'] - FIVE_M,
'lon': location_message['lon'] - FIVE_M},
LOCATION_MESSAGE)
await send_message(hass, LOCATION_TOPIC, lost_keys_location_message)
await send_message(hass, EVENT_TOPIC, MOBILE_BEACON_LEAVE_EVENT_MESSAGE)
assert_location_latitude(hass, lost_keys_location_message['lat'])
assert_mobile_tracker_latitude(hass, lost_keys_location_message['lat'])
assert_location_state(hass, 'outer')
assert_mobile_tracker_state(hass, 'outer')
# gps leave outer
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE_NOT_HOME)
await send_message(hass, EVENT_TOPIC, REGION_GPS_LEAVE_MESSAGE_OUTER)
assert_location_latitude(hass, LOCATION_MESSAGE_NOT_HOME['lat'])
assert_mobile_tracker_latitude(hass, lost_keys_location_message['lat'])
assert_location_state(hass, 'not_home')
assert_mobile_tracker_state(hass, 'outer')
# location move not home
location_message = build_message(
{'lat': LOCATION_MESSAGE_NOT_HOME['lat'] - FIVE_M,
'lon': LOCATION_MESSAGE_NOT_HOME['lon'] - FIVE_M},
LOCATION_MESSAGE_NOT_HOME)
await send_message(hass, LOCATION_TOPIC, location_message)
assert_location_latitude(hass, location_message['lat'])
assert_mobile_tracker_latitude(hass, lost_keys_location_message['lat'])
assert_location_state(hass, 'not_home')
assert_mobile_tracker_state(hass, 'outer')
async def test_complex_movement_sticky_keys_beacon(hass, context):
"""Test a complex sequence which was previously broken."""
# I am not_home
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE)
assert_location_state(hass, 'outer')
# gps to inner location and event, as actually happens with OwnTracks
location_message = build_message(
{'lat': REGION_GPS_ENTER_MESSAGE['lat'],
'lon': REGION_GPS_ENTER_MESSAGE['lon']},
LOCATION_MESSAGE)
await send_message(hass, LOCATION_TOPIC, location_message)
await send_message(hass, EVENT_TOPIC, REGION_GPS_ENTER_MESSAGE)
assert_location_latitude(hass, INNER_ZONE['latitude'])
assert_location_state(hass, 'inner')
# see keys mobile beacon and location message as actually happens
location_message = build_message(
{'lat': location_message['lat'] + FIVE_M,
'lon': location_message['lon'] + FIVE_M},
LOCATION_MESSAGE)
await send_message(hass, EVENT_TOPIC, MOBILE_BEACON_ENTER_EVENT_MESSAGE)
await send_message(hass, LOCATION_TOPIC, location_message)
assert_location_latitude(hass, INNER_ZONE['latitude'])
assert_mobile_tracker_latitude(hass, INNER_ZONE['latitude'])
assert_location_state(hass, 'inner')
assert_mobile_tracker_state(hass, 'inner')
# region beacon enter inner event and location as actually happens
# with OwnTracks
location_message = build_message(
{'lat': location_message['lat'] + FIVE_M,
'lon': location_message['lon'] + FIVE_M},
LOCATION_MESSAGE)
await send_message(hass, EVENT_TOPIC, REGION_BEACON_ENTER_MESSAGE)
await send_message(hass, LOCATION_TOPIC, location_message)
assert_location_latitude(hass, INNER_ZONE['latitude'])
assert_location_state(hass, 'inner')
# This sequence of moves would cause keys to follow
# greg_phone around even after the OwnTracks sent
# a mobile beacon 'leave' event for the keys.
# leave keys
await send_message(hass, LOCATION_TOPIC, location_message)
await send_message(hass, EVENT_TOPIC, MOBILE_BEACON_LEAVE_EVENT_MESSAGE)
assert_location_latitude(hass, INNER_ZONE['latitude'])
assert_location_state(hass, 'inner')
assert_mobile_tracker_state(hass, 'inner')
assert_mobile_tracker_latitude(hass, INNER_ZONE['latitude'])
# leave inner region beacon
await send_message(hass, EVENT_TOPIC, REGION_BEACON_LEAVE_MESSAGE)
await send_message(hass, LOCATION_TOPIC, location_message)
assert_location_state(hass, 'inner')
assert_mobile_tracker_state(hass, 'inner')
assert_mobile_tracker_latitude(hass, INNER_ZONE['latitude'])
# enter inner region beacon
await send_message(hass, EVENT_TOPIC, REGION_BEACON_ENTER_MESSAGE)
await send_message(hass, LOCATION_TOPIC, location_message)
assert_location_latitude(hass, INNER_ZONE['latitude'])
assert_location_state(hass, 'inner')
assert_mobile_tracker_state(hass, 'inner')
assert_mobile_tracker_latitude(hass, INNER_ZONE['latitude'])
# enter keys
await send_message(hass, EVENT_TOPIC, MOBILE_BEACON_ENTER_EVENT_MESSAGE)
await send_message(hass, LOCATION_TOPIC, location_message)
assert_location_state(hass, 'inner')
assert_mobile_tracker_state(hass, 'inner')
assert_mobile_tracker_latitude(hass, INNER_ZONE['latitude'])
# leave keys
await send_message(hass, LOCATION_TOPIC, location_message)
await send_message(hass, EVENT_TOPIC, MOBILE_BEACON_LEAVE_EVENT_MESSAGE)
assert_location_state(hass, 'inner')
assert_mobile_tracker_state(hass, 'inner')
assert_mobile_tracker_latitude(hass, INNER_ZONE['latitude'])
# leave inner region beacon
await send_message(hass, EVENT_TOPIC, REGION_BEACON_LEAVE_MESSAGE)
await send_message(hass, LOCATION_TOPIC, location_message)
assert_location_state(hass, 'inner')
assert_mobile_tracker_state(hass, 'inner')
assert_mobile_tracker_latitude(hass, INNER_ZONE['latitude'])
# GPS leave inner region, I'm in the 'outer' region now
# but on GPS coords
leave_location_message = build_message(
{'lat': REGION_GPS_LEAVE_MESSAGE['lat'],
'lon': REGION_GPS_LEAVE_MESSAGE['lon']},
LOCATION_MESSAGE)
await send_message(hass, EVENT_TOPIC, REGION_GPS_LEAVE_MESSAGE)
await send_message(hass, LOCATION_TOPIC, leave_location_message)
assert_location_state(hass, 'outer')
assert_mobile_tracker_state(hass, 'inner')
assert_location_latitude(hass, REGION_GPS_LEAVE_MESSAGE['lat'])
assert_mobile_tracker_latitude(hass, INNER_ZONE['latitude'])
async def test_waypoint_import_simple(hass, context):
"""Test a simple import of list of waypoints."""
waypoints_message = WAYPOINTS_EXPORTED_MESSAGE.copy()
await send_message(hass, WAYPOINTS_TOPIC, waypoints_message)
# Check if it made it into states
wayp = hass.states.get(WAYPOINT_ENTITY_NAMES[0])
assert wayp is not None
wayp = hass.states.get(WAYPOINT_ENTITY_NAMES[1])
assert wayp is not None
async def test_waypoint_import_blacklist(hass, context):
"""Test import of list of waypoints for blacklisted user."""
waypoints_message = WAYPOINTS_EXPORTED_MESSAGE.copy()
await send_message(hass, WAYPOINTS_TOPIC_BLOCKED, waypoints_message)
# Check if it made it into states
wayp = hass.states.get(WAYPOINT_ENTITY_NAMES[2])
assert wayp is None
wayp = hass.states.get(WAYPOINT_ENTITY_NAMES[3])
assert wayp is None
async def test_waypoint_import_no_whitelist(hass, setup_comp):
"""Test import of list of waypoints with no whitelist set."""
await setup_owntracks(hass, {
CONF_MAX_GPS_ACCURACY: 200,
CONF_WAYPOINT_IMPORT: True,
CONF_MQTT_TOPIC: 'owntracks/#',
})
waypoints_message = WAYPOINTS_EXPORTED_MESSAGE.copy()
await send_message(hass, WAYPOINTS_TOPIC_BLOCKED, waypoints_message)
# Check if it made it into states
wayp = hass.states.get(WAYPOINT_ENTITY_NAMES[2])
assert wayp is not None
wayp = hass.states.get(WAYPOINT_ENTITY_NAMES[3])
assert wayp is not None
async def test_waypoint_import_bad_json(hass, context):
"""Test importing a bad JSON payload."""
waypoints_message = WAYPOINTS_EXPORTED_MESSAGE.copy()
await send_message(hass, WAYPOINTS_TOPIC, waypoints_message, True)
# Check if it made it into states
wayp = hass.states.get(WAYPOINT_ENTITY_NAMES[2])
assert wayp is None
wayp = hass.states.get(WAYPOINT_ENTITY_NAMES[3])
assert wayp is None
async def test_waypoint_import_existing(hass, context):
"""Test importing a zone that exists."""
waypoints_message = WAYPOINTS_EXPORTED_MESSAGE.copy()
await send_message(hass, WAYPOINTS_TOPIC, waypoints_message)
# Get the first waypoint exported
wayp = hass.states.get(WAYPOINT_ENTITY_NAMES[0])
# Send an update
waypoints_message = WAYPOINTS_UPDATED_MESSAGE.copy()
await send_message(hass, WAYPOINTS_TOPIC, waypoints_message)
new_wayp = hass.states.get(WAYPOINT_ENTITY_NAMES[0])
assert wayp == new_wayp
async def test_single_waypoint_import(hass, context):
"""Test single waypoint message."""
waypoint_message = WAYPOINT_MESSAGE.copy()
await send_message(hass, WAYPOINT_TOPIC, waypoint_message)
wayp = hass.states.get(WAYPOINT_ENTITY_NAMES[0])
assert wayp is not None
async def test_not_implemented_message(hass, context):
"""Handle not implemented message type."""
patch_handler = patch('homeassistant.components.owntracks.'
'messages.async_handle_not_impl_msg',
return_value=mock_coro(False))
patch_handler.start()
assert not await send_message(hass, LWT_TOPIC, LWT_MESSAGE)
patch_handler.stop()
async def test_unsupported_message(hass, context):
"""Handle not implemented message type."""
patch_handler = patch('homeassistant.components.owntracks.'
'messages.async_handle_unsupported_msg',
return_value=mock_coro(False))
patch_handler.start()
assert not await send_message(hass, BAD_TOPIC, BAD_MESSAGE)
patch_handler.stop()
def generate_ciphers(secret):
"""Generate test ciphers for the DEFAULT_LOCATION_MESSAGE."""
# PyNaCl ciphertext generation will fail if the module
# cannot be imported. However, the test for decryption
# also relies on this library and won't be run without it.
import pickle
import base64
try:
from nacl.secret import SecretBox
from nacl.encoding import Base64Encoder
keylen = SecretBox.KEY_SIZE
key = secret.encode("utf-8")
key = key[:keylen]
key = key.ljust(keylen, b'\0')
msg = json.dumps(DEFAULT_LOCATION_MESSAGE).encode("utf-8")
ctxt = SecretBox(key).encrypt(msg,
encoder=Base64Encoder).decode("utf-8")
except (ImportError, OSError):
ctxt = ''
mctxt = base64.b64encode(
pickle.dumps(
(secret.encode("utf-8"),
json.dumps(DEFAULT_LOCATION_MESSAGE).encode("utf-8"))
)
).decode("utf-8")
return ctxt, mctxt
TEST_SECRET_KEY = 's3cretkey'
CIPHERTEXT, MOCK_CIPHERTEXT = generate_ciphers(TEST_SECRET_KEY)
ENCRYPTED_LOCATION_MESSAGE = {
# Encrypted version of LOCATION_MESSAGE using libsodium and TEST_SECRET_KEY
'_type': 'encrypted',
'data': CIPHERTEXT
}
MOCK_ENCRYPTED_LOCATION_MESSAGE = {
# Mock-encrypted version of LOCATION_MESSAGE using pickle
'_type': 'encrypted',
'data': MOCK_CIPHERTEXT
}
def mock_cipher():
"""Return a dummy pickle-based cipher."""
def mock_decrypt(ciphertext, key):
"""Decrypt/unpickle."""
import pickle
import base64
(mkey, plaintext) = pickle.loads(base64.b64decode(ciphertext))
if key != mkey:
raise ValueError()
return plaintext
return len(TEST_SECRET_KEY), mock_decrypt
@pytest.fixture
def config_context(hass, setup_comp):
"""Set up the mocked context."""
patch_load = patch(
'homeassistant.components.device_tracker.async_load_config',
return_value=mock_coro([]))
patch_load.start()
patch_save = patch('homeassistant.components.device_tracker.'
'DeviceTracker.async_update_config')
patch_save.start()
yield
patch_load.stop()
patch_save.stop()
@patch('homeassistant.components.owntracks.messages.get_cipher',
mock_cipher)
async def test_encrypted_payload(hass, setup_comp):
"""Test encrypted payload."""
await setup_owntracks(hass, {
CONF_SECRET: TEST_SECRET_KEY,
})
await send_message(hass, LOCATION_TOPIC, MOCK_ENCRYPTED_LOCATION_MESSAGE)
assert_location_latitude(hass, LOCATION_MESSAGE['lat'])
@patch('homeassistant.components.owntracks.messages.get_cipher',
mock_cipher)
async def test_encrypted_payload_topic_key(hass, setup_comp):
"""Test encrypted payload with a topic key."""
await setup_owntracks(hass, {
CONF_SECRET: {
LOCATION_TOPIC: TEST_SECRET_KEY,
}
})
await send_message(hass, LOCATION_TOPIC, MOCK_ENCRYPTED_LOCATION_MESSAGE)
assert_location_latitude(hass, LOCATION_MESSAGE['lat'])
@patch('homeassistant.components.owntracks.messages.get_cipher',
mock_cipher)
async def test_encrypted_payload_no_key(hass, setup_comp):
"""Test encrypted payload with no key, ."""
assert hass.states.get(DEVICE_TRACKER_STATE) is None
await setup_owntracks(hass, {
CONF_SECRET: {
}
})
await send_message(hass, LOCATION_TOPIC, MOCK_ENCRYPTED_LOCATION_MESSAGE)
assert hass.states.get(DEVICE_TRACKER_STATE) is None
@patch('homeassistant.components.owntracks.messages.get_cipher',
mock_cipher)
async def test_encrypted_payload_wrong_key(hass, setup_comp):
"""Test encrypted payload with wrong key."""
await setup_owntracks(hass, {
CONF_SECRET: 'wrong key',
})
await send_message(hass, LOCATION_TOPIC, MOCK_ENCRYPTED_LOCATION_MESSAGE)
assert hass.states.get(DEVICE_TRACKER_STATE) is None
@patch('homeassistant.components.owntracks.messages.get_cipher',
mock_cipher)
async def test_encrypted_payload_wrong_topic_key(hass, setup_comp):
"""Test encrypted payload with wrong topic key."""
await setup_owntracks(hass, {
CONF_SECRET: {
LOCATION_TOPIC: 'wrong key'
},
})
await send_message(hass, LOCATION_TOPIC, MOCK_ENCRYPTED_LOCATION_MESSAGE)
assert hass.states.get(DEVICE_TRACKER_STATE) is None
@patch('homeassistant.components.owntracks.messages.get_cipher',
mock_cipher)
async def test_encrypted_payload_no_topic_key(hass, setup_comp):
"""Test encrypted payload with no topic key."""
await setup_owntracks(hass, {
CONF_SECRET: {
'owntracks/{}/{}'.format(USER, 'otherdevice'): 'foobar'
}})
await send_message(hass, LOCATION_TOPIC, MOCK_ENCRYPTED_LOCATION_MESSAGE)
assert hass.states.get(DEVICE_TRACKER_STATE) is None
async def test_encrypted_payload_libsodium(hass, setup_comp):
"""Test sending encrypted message payload."""
try:
# pylint: disable=unused-import
import nacl # noqa: F401
except (ImportError, OSError):
pytest.skip("PyNaCl/libsodium is not installed")
return
await setup_owntracks(hass, {
CONF_SECRET: TEST_SECRET_KEY,
})
await send_message(hass, LOCATION_TOPIC, ENCRYPTED_LOCATION_MESSAGE)
assert_location_latitude(hass, LOCATION_MESSAGE['lat'])
async def test_customized_mqtt_topic(hass, setup_comp):
"""Test subscribing to a custom mqtt topic."""
await setup_owntracks(hass, {
CONF_MQTT_TOPIC: 'mytracks/#',
})
topic = 'mytracks/{}/{}'.format(USER, DEVICE)
await send_message(hass, topic, LOCATION_MESSAGE)
assert_location_latitude(hass, LOCATION_MESSAGE['lat'])
async def test_region_mapping(hass, setup_comp):
"""Test region to zone mapping."""
await setup_owntracks(hass, {
CONF_REGION_MAPPING: {
'foo': 'inner'
},
})
hass.states.async_set(
'zone.inner', 'zoning', INNER_ZONE)
message = build_message({'desc': 'foo'}, REGION_GPS_ENTER_MESSAGE)
assert message['desc'] == 'foo'
await send_message(hass, EVENT_TOPIC, message)
assert_location_state(hass, 'inner')
async def test_restore_state(hass, hass_client):
"""Test that we can restore state."""
entry = MockConfigEntry(domain='owntracks', data={
'webhook_id': 'owntracks_test',
'secret': 'abcd',
})
entry.add_to_hass(hass)
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
client = await hass_client()
resp = await client.post(
'/api/webhook/owntracks_test',
json=LOCATION_MESSAGE,
headers={
'X-Limit-u': 'Paulus',
'X-Limit-d': 'Pixel',
}
)
assert resp.status == 200
await hass.async_block_till_done()
state_1 = hass.states.get('device_tracker.paulus_pixel')
assert state_1 is not None
await hass.config_entries.async_reload(entry.entry_id)
await hass.async_block_till_done()
state_2 = hass.states.get('device_tracker.paulus_pixel')
assert state_2 is not None
assert state_1 is not state_2
assert state_1.state == state_2.state
assert state_1.name == state_2.name
assert state_1.attributes['latitude'] == state_2.attributes['latitude']
assert state_1.attributes['longitude'] == state_2.attributes['longitude']
assert state_1.attributes['battery_level'] == \
state_2.attributes['battery_level']
assert state_1.attributes['source_type'] == \
state_2.attributes['source_type']
| |
"""
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import uuid
import logging
import os
from django.test import TestCase
from django.core.management import call_command
from django.utils.six import StringIO
from django.contrib.auth.models import User, Group
from rest_framework import status
from rest_framework.reverse import reverse
from rest_framework.test import APITestCase, APIRequestFactory
from gwells.settings import REST_FRAMEWORK
from gwells.models import ProvinceStateCode, Profile
from registries.models import (
ApplicationStatusCode,
Organization,
Person,
RegistriesApplication,
Register,
ActivityCode,
SubactivityCode,
ProofOfAgeCode)
from registries.views import PersonListView, PersonDetailView
from gwells.roles import (roles_to_groups, REGISTRIES_VIEWER_ROLE, REGISTRIES_EDIT_ROLE)
# Note: see postman/newman for more API tests.
# Postman API tests include making requests with incomplete data, missing required fields etc.
# They are located at {base-dir}/api-tests/
# Base classes
class AuthenticatedAPITestCase(APITestCase):
"""
Creates a user before each test and forces authentication with that user.
Extends APITestCase from Django REST Framework.
Not intended for regular Django TestCase (for DRF tests only)
"""
def setUp(self):
# Prepare roles in DB ahead of test, to reduce amount of logging during tests.
roles = [REGISTRIES_EDIT_ROLE, REGISTRIES_VIEWER_ROLE]
for role in roles:
group = Group(name=role)
group.save()
self.user, created = User.objects.get_or_create(username='testuser')
self.user.profile.username = self.user.username
self.user.save()
roles_to_groups(self.user, roles)
self.client.force_authenticate(self.user)
# Django unit tests
class OrganizationTests(TestCase):
"""
Tests for the Organization model
Simple test that we can create objects from models.py for the Organization model
Tests for views.py and other modules are in Django REST Framework tests
"""
def setUp(self):
# Create a ProvinceStateCode object for our Organization's foreign key field
self.province = ProvinceStateCode.objects.create(
province_state_code='BC',
description='British Columbia',
display_order=1
)
self.org = Organization.objects.create(
name='Frankie and Betty Well Drilling Co.',
city='Victoria',
province_state=self.province
)
def test_organization_was_created(self):
org = Organization.objects.get(
name='Frankie and Betty Well Drilling Co.')
self.assertEqual(org.city, 'Victoria')
self.assertEqual(org.province_state, self.province)
class PersonTests(TestCase):
"""
Tests for Person model
"""
def setUp(self):
Person.objects.create(
first_name='Bobby',
surname='Driller'
)
def test_person_was_created(self):
person = Person.objects.get(first_name='Bobby')
self.assertEqual(person.first_name, 'Bobby')
class RegistriesApplicationTestBase(AuthenticatedAPITestCase):
"""
Base class for RegistriesApplication
"""
def setUp(self):
super().setUp()
# Get activityCode
self.activity_drill = ActivityCode.objects.get(registries_activity_code="DRILL")
# Create new registrations
# Create registered driller 1
self.driller = Person.objects.create(
first_name='Wendy', surname="Well")
self.registration = Register.objects.create(
person=self.driller,
registries_activity=self.activity_drill,
registration_no="F12345",
)
# Get subactivities
self.subactivity = SubactivityCode.objects.get(registries_subactivity_code='WATER')
self.subactivity = SubactivityCode.objects.get(registries_subactivity_code='GEOTECH')
# Create application status
self.application_status_active = ApplicationStatusCode.objects.create(
code='A',
description='Active',
display_order=1
)
self.application_status_pending = ApplicationStatusCode.objects.create(
code='P',
description='Pending',
display_order=1
)
self.proof_of_age = ProofOfAgeCode.objects.create(
code="TESTING",
description="Testing",
display_order="1")
# Application
self.app = RegistriesApplication.objects.create(
registration=self.registration,
proof_of_age=self.proof_of_age,
subactivity=self.subactivity)
class RegistriesApplicationNoStatusTest(RegistriesApplicationTestBase):
def test_update_application_status_to_active(self):
""" Test that an application created without a status can be updated to Active
"""
data = {
'current_status': {
'code': 'A'
}
}
url = reverse('application-detail',
kwargs={'application_guid': self.app.application_guid, 'version': 'v1'})
response = self.client.patch(url, data, format='json')
updated_application = RegistriesApplication.objects.get(
application_guid=self.app.application_guid)
self.assertEqual(
updated_application.current_status.code, 'A')
class RegistriesApplicationWithStatusActiveTest(RegistriesApplicationTestBase):
def setUp(self):
super().setUp()
def test_update_application_status_to_active(self):
""" Test that an application created with a Pending status can be updated to Active
"""
data = {
'current_status': {
'code': 'A'
}
}
url = reverse('application-detail',
kwargs={'application_guid': self.app.application_guid, 'version': 'v1'})
response = self.client.patch(url, data, format='json')
updated_application = RegistriesApplication.objects.get(
application_guid=self.app.application_guid)
self.assertEqual(
updated_application.current_status.code, 'A')
class RegistriesApplicationStatusSubactivityTest(RegistriesApplicationTestBase):
def test_update_application_subactivity(self):
""" Test that an application created with water as a subactivity can be changed to geotech
"""
data = {'subactivity': 'GEOTECH'}
url = reverse('application-detail',
kwargs={'application_guid': self.app.application_guid, 'version': 'v1'})
response = self.client.patch(url, data, format='json')
updated_application = RegistriesApplication.objects.get(
application_guid=self.app.application_guid)
self.assertEqual(
updated_application.subactivity.registries_subactivity_code, 'GEOTECH')
# Django REST Framework tests
class APIOrganizationTests(AuthenticatedAPITestCase):
"""
Tests for requests to the Organization resource endpoint
Includes tests for create, list, update (patch and put), and delete
"""
def setUp(self):
super().setUp()
self.province = ProvinceStateCode.objects.create(
province_state_code='BC',
description='British Columbia',
display_order=1)
self.initial_data = {
'name': 'Bobby\'s Drilling',
'city': 'Victoria',
'province_state': 'BC'
}
def test_create_organization(self):
"""
Create a new organization object.
"""
url = reverse('organization-list', kwargs={'version': 'v1'})
count_before = Organization.objects.count()
response = self.client.post(url, self.initial_data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(Organization.objects.count(), count_before + 1)
def test_list_organization(self):
url = reverse('organization-list', kwargs={'version': 'v1'})
new_object = self.client.post(url, self.initial_data, format='json')
created_guid = new_object.data['org_guid']
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(created_guid), 36)
self.assertContains(response, created_guid)
def test_retrieve_organization(self):
create_url = reverse('organization-list', kwargs={'version': 'v1'})
new_object = self.client.post(
create_url, self.initial_data, format='json')
created_guid = new_object.data['org_guid']
retrieve_url = reverse('organization-detail',
kwargs={'org_guid': created_guid, 'version': 'v1'})
response = self.client.get(retrieve_url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['name'], self.initial_data['name'])
self.assertEqual(response.data['city'], self.initial_data['city'])
def test_patch_organization(self):
new_data = {
'city': 'Duncan'
}
create_url = reverse('organization-list', kwargs={'version': 'v1'})
new_object = self.client.post(
create_url, self.initial_data, format='json')
created_guid = new_object.data['org_guid']
object_url = reverse('organization-detail',
kwargs={'org_guid': created_guid, 'version': 'v1'})
# Apply a new city name with PATCH method
self.client.patch(object_url, new_data, format='json')
response = self.client.get(object_url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['name'], self.initial_data['name'])
self.assertEqual(response.data['city'], new_data['city'])
def test_put_organization(self):
new_data = {
'name': 'Betty\'s Drilling',
'city': 'Duncan',
'province_state': 'BC'
}
create_url = reverse('organization-list', kwargs={'version': 'v1'})
new_object = self.client.post(
create_url, self.initial_data, format='json')
created_guid = new_object.data['org_guid']
object_url = reverse('organization-detail',
kwargs={'org_guid': created_guid, 'version': 'v1'})
# Apply a new city name with PATCH method
self.client.put(object_url, new_data, format='json')
response = self.client.get(object_url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['name'], new_data['name'])
self.assertEqual(response.data['city'], new_data['city'])
def test_delete_organization(self):
# setup
logger = logging.getLogger('django.request')
previous_level = logger.getEffectiveLevel()
logger.setLevel(logging.ERROR)
create_url = reverse('organization-list', kwargs={'version': 'v1'})
new_object = self.client.post(
create_url, self.initial_data, format='json')
created_guid = new_object.data['org_guid']
retrieve_url = reverse('organization-detail',
kwargs={'org_guid': created_guid, 'version': 'v1'})
retrieve_response = self.client.get(retrieve_url, format='json')
self.assertEqual(retrieve_response.status_code, status.HTTP_200_OK)
self.assertEqual(
retrieve_response.data['name'], self.initial_data['name'])
self.assertEqual(
retrieve_response.data['city'], self.initial_data['city'])
delete_response = self.client.delete(retrieve_url, format='json')
self.assertEqual(delete_response.status_code,
status.HTTP_204_NO_CONTENT)
get_after_delete_response = self.client.get(
retrieve_url, format='json')
self.assertEqual(get_after_delete_response.status_code,
status.HTTP_404_NOT_FOUND)
# teardown
logger.setLevel(previous_level)
def test_organization_audit_fields(self):
"""
Test that AuditModel fields (create_user, create_date etc.)
are updated when Organization objects are created.
"""
create_url = reverse('organization-list', kwargs={'version': 'v1'})
new_object = self.client.post(
create_url, self.initial_data, format='json')
created_guid = new_object.data['org_guid']
retrieve_url = reverse('organization-detail',
kwargs={'org_guid': created_guid, 'version': 'v1'})
response = self.client.get(retrieve_url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
# TODO: When authentication is enforced, this line will need to change
self.assertEqual(response.data['create_user'], self.user.username)
def test_create_org_not_authenticated(self):
"""
Ensure that users who are not authenticated cannot create Organization objects
"""
self.client.force_authenticate(user=None)
url = reverse('organization-list', kwargs={'version': 'v1'})
data = {'name': 'Big Time Drilling Co'}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_unsafe_methods_by_unauthorized_users(self):
"""
Ensure that users who are not authenticated cannot perform "unsafe" actions
like UPDATE, PUT, DELETE on an object that is already in database
"""
self.client.force_authenticate(user=None)
org_object = Organization.objects.create(
name='Big Time Drilling Co', province_state=self.province)
object_url = reverse('organization-detail',
kwargs={'org_guid': org_object.org_guid, 'version': 'v1'})
update_response = self.client.patch(
object_url, {'name': 'Small Time Drilling Company'}, format='json')
put_response = self.client.put(
object_url,
{
'org_guid': org_object.org_guid,
'name': 'Small Time Drilling Company',
},
format='json'
)
delete_response = self.client.delete(object_url, format='json')
self.assertEqual(update_response.status_code,
status.HTTP_401_UNAUTHORIZED)
self.assertEqual(put_response.status_code,
status.HTTP_401_UNAUTHORIZED)
self.assertEqual(delete_response.status_code,
status.HTTP_401_UNAUTHORIZED)
class APIPersonTests(AuthenticatedAPITestCase):
"""
Tests for Person resource endpoint
"""
def setUp(self):
self.factory = APIRequestFactory()
self.initial_data = {
'first_name': 'Bobby',
'surname': 'Driller'
}
self.prov, _ = ProvinceStateCode.objects.get_or_create(
province_state_code='BC', display_order=1)
super().setUp()
def test_create_person(self):
url = reverse('person-list', kwargs={'version': 'v1'})
data = {'first_name': 'Bobby', 'surname': 'Driller'}
count_before = Person.objects.count()
response = self.client.post(url, data, format='json')
created_guid = response.data.get('person_guid')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(data['first_name'], Person.objects.get(
person_guid=created_guid).first_name)
self.assertEqual(data['surname'], Person.objects.get(
person_guid=created_guid).surname)
self.assertEqual(Person.objects.count(), count_before + 1)
def test_list_people(self):
url = reverse('person-list', kwargs={'version': 'v1'})
new_object = self.client.post(url, self.initial_data, format='json')
created_guid = new_object.data['person_guid']
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(created_guid), 36)
self.assertContains(response, created_guid)
def test_retrieve_person(self):
create_url = reverse('person-list', kwargs={'version': 'v1'})
new_object = self.client.post(
create_url, self.initial_data, format='json')
created_guid = new_object.data['person_guid']
retrieve_url = reverse(
'person-detail', kwargs={'person_guid': created_guid, 'version': 'v1'})
response = self.client.get(retrieve_url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['first_name'],
self.initial_data['first_name'])
self.assertEqual(response.data['surname'],
self.initial_data['surname'])
def test_patch_person(self):
new_data = {
'surname': 'Wells'
}
create_url = reverse('person-list', kwargs={'version': 'v1'})
new_object = self.client.post(
create_url, self.initial_data, format='json')
created_guid = new_object.data['person_guid']
object_url = reverse(
'person-detail', kwargs={'person_guid': created_guid, 'version': 'v1'})
# Apply a new city name with PATCH method
self.client.patch(object_url, new_data, format='json')
response = self.client.get(object_url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['first_name'],
self.initial_data['first_name'])
self.assertEqual(response.data['surname'], new_data['surname'])
def test_update_person_by_put(self):
initial_data = {
'first_name': 'Bobby',
'surname': 'Driller'
}
new_data = {
'first_name': 'Betty',
'surname': 'Wells'
}
create_url = reverse('person-list', kwargs={'version': 'v1'})
new_object = self.client.post(
create_url, self.initial_data, format='json')
created_guid = new_object.data['person_guid']
object_url = reverse(
'person-detail', kwargs={'person_guid': created_guid, 'version': 'v1'})
# Apply a new name with PATCH method
self.client.put(object_url, new_data, format='json')
response = self.client.get(object_url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['first_name'], new_data['first_name'])
self.assertEqual(response.data['surname'], new_data['surname'])
def test_delete_person(self):
# setup
logger = logging.getLogger('django.request')
previous_level = logger.getEffectiveLevel()
logger.setLevel(logging.ERROR)
initial_data = {
'first_name': 'Bobby',
'surname': 'Driller'
}
create_url = reverse('person-list', kwargs={'version': 'v1'})
new_object = self.client.post(
create_url, self.initial_data, format='json')
created_guid = new_object.data['person_guid']
retrieve_url = reverse(
'person-detail', kwargs={'person_guid': created_guid, 'version': 'v1'})
retrieve_response = self.client.get(retrieve_url, format='json')
self.assertEqual(retrieve_response.status_code, status.HTTP_200_OK)
self.assertEqual(
retrieve_response.data['first_name'], self.initial_data['first_name'])
self.assertEqual(
retrieve_response.data['surname'], self.initial_data['surname'])
delete_response = self.client.delete(retrieve_url, format='json')
self.assertEqual(delete_response.status_code,
status.HTTP_204_NO_CONTENT)
get_after_delete_response = self.client.get(
retrieve_url, format='json')
self.assertEqual(get_after_delete_response.status_code,
status.HTTP_404_NOT_FOUND)
# teardown
logger.setLevel(previous_level)
def test_person_audit_fields(self):
"""
Test that AuditModel fields (create_user, create_date etc.)
are updated when Person objects are created.
"""
view = PersonListView.as_view()
post_url = reverse('person-list', kwargs={'version': 'v1'})
request = self.factory.post(post_url, self.initial_data)
request.user = self.user
response = view(request)
created_guid = response.data['person_guid']
person = Person.objects.get(person_guid=created_guid)
self.assertEqual(person.create_user, self.user.username)
def test_person_history(self):
"""
Test that a version history is created when Person objects are created.
"""
call_command('createinitialrevisions')
view = PersonListView.as_view()
post_url = reverse('person-list', kwargs={'version': 'v1'})
request = self.factory.post(post_url, self.initial_data)
request.user = self.user
response = view(request)
created_guid = response.data['person_guid']
person = Person.objects.get(person_guid=created_guid)
self.assertEqual(person.history.count(), 1)
def test_create_person_not_authenticated(self):
"""
Ensure that users who are not authenticated cannot create Person objects
"""
self.client.force_authenticate(user=None)
url = reverse('person-list', kwargs={'version': 'v1'})
data = {'first_name': 'Bobby', 'surname': 'Driller'}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_create_person_wrong_role(self):
user, created = User.objects.get_or_create(username='test_viewer')
if created:
Profile.objects.get_or_create(user=user)
roles_to_groups(user, [REGISTRIES_VIEWER_ROLE, ])
self.client.force_authenticate(user=user)
url = reverse('person-list', kwargs={'version': 'v1'})
data = {'first_name': 'Bobby', 'surname': 'Driller'}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_unsafe_methods_by_unauthorized_users(self):
"""
Ensure that users who are not authenticated cannot perform "unsafe" actions
like UPDATE, PUT, DELETE on an object that is already in database
"""
self.client.force_authenticate(user=None)
person_object = Person.objects.create(
first_name='Bobby', surname='Driller')
object_url = reverse(
'person-detail', kwargs={'person_guid': person_object.person_guid, 'version': 'v1'})
update_response = self.client.patch(
object_url, {'first_name': 'Billy'}, format='json')
put_response = self.client.put(
object_url,
{
'person_guid': person_object.person_guid,
'first_name': 'Betty',
'surname': 'McDrillerson'
},
format='json'
)
delete_response = self.client.delete(object_url, format='json')
self.assertEqual(update_response.status_code,
status.HTTP_401_UNAUTHORIZED)
self.assertEqual(put_response.status_code,
status.HTTP_401_UNAUTHORIZED)
self.assertEqual(delete_response.status_code,
status.HTTP_401_UNAUTHORIZED)
class APIFilteringPaginationTests(APITestCase):
"""
Tests of the filtering, searching and pagination systems.
Filtering tests include filtering out results if user is anonymous.
"""
def setUp(self):
self.province = ProvinceStateCode.objects.create(
province_state_code='BC',
display_order=1)
self.status_active = ApplicationStatusCode.objects.create(
code="A",
description="active",
display_order="1")
self.status_inactive = ApplicationStatusCode.objects.create(
code="NA",
description="inactive",
display_order="2")
self.activity_drill = ActivityCode.objects.get(registries_activity_code="DRILL")
self.activity_pump = ActivityCode.objects.get(registries_activity_code="PUMP")
self.subactivity_pump = SubactivityCode.objects.get(registries_subactivity_code='PUMPINST')
self.proof_of_age = ProofOfAgeCode.objects.create(
code="TESTING",
description="Testing",
display_order="1")
# Create registered driller 1
self.driller = Person.objects.create(
first_name='Wendy', surname="Well")
self.registration = Register.objects.create(
person=self.driller,
registries_activity=self.activity_drill,
registration_no="F12345",
)
self.subactivity = SubactivityCode.objects.get(registries_subactivity_code='WATER')
self.app = RegistriesApplication.objects.create(
registration=self.registration,
proof_of_age=self.proof_of_age,
current_status=self.status_active,
subactivity=self.subactivity)
# Create registered driller 2
self.driller2 = Person.objects.create(
first_name='Debbie', surname="Driller")
self.registration2 = Register.objects.create(
person=self.driller2,
registries_activity=self.activity_drill,
registration_no="F54321",
)
self.app2 = RegistriesApplication.objects.create(
registration=self.registration2,
proof_of_age=self.proof_of_age,
current_status=self.status_active,
subactivity=self.subactivity)
# Create unregistered driller
self.unregistered_driller = Person.objects.create(
first_name="Johnny", surname="Unregistered")
# Create inactive driller
self.inactive_driller = Person.objects.create(
first_name="Billy", surname="Retired")
self.retired_registration = Register.objects.create(
person=self.inactive_driller,
registries_activity=self.activity_drill,
registration_no="R55555"
)
self.retired_app = RegistriesApplication.objects.create(
registration=self.retired_registration,
proof_of_age=self.proof_of_age,
subactivity=self.subactivity)
# create a company with no registered driller
self.company_with_no_driller = Organization.objects.create(
name="Big Time Drilling Company",
province_state=self.province)
# driller approved for one activity, removed for another
self.partially_approved_driller = Person.objects.create(
first_name="Billy", surname="Partially Approved"
)
self.partially_approved_drill_registration = Register.objects.create(
person=self.partially_approved_driller,
registries_activity=self.activity_drill,
registration_no="P9999999"
)
self.partially_approved_drill_app = RegistriesApplication.objects.create(
registration=self.partially_approved_drill_registration,
proof_of_age=self.proof_of_age,
current_status=self.status_inactive,
subactivity=self.subactivity
)
self.partially_approved_pump_registration = Register.objects.create(
person=self.partially_approved_driller,
registries_activity=self.activity_pump,
registration_no="P9999991"
)
self.partially_approved_drill_app = RegistriesApplication.objects.create(
registration=self.partially_approved_pump_registration,
proof_of_age=self.proof_of_age,
current_status=self.status_active,
subactivity=self.subactivity_pump
)
def test_user_cannot_see_unregistered_person_in_list(self):
url = reverse('person-list', kwargs={'version': 'v1'}) + '?activity=DRILL'
response = self.client.get(url, format='json')
self.assertEqual(len(response.data['results']), 2)
self.assertContains(response, 'Wendy')
self.assertContains(response, 'Debbie')
# Johnny is in database but is not registered, so make sure he's not in the publicly available list.
self.assertNotContains(response, 'Johnny')
self.assertNotContains(response, self.unregistered_driller.person_guid)
self.assertNotContains(response, self.partially_approved_driller.person_guid)
def test_user_cannot_retrieve_unregistered_person(self):
""" unauthorized request to person detail view. Note: now always returns 401 if not staff. """
# setup
logger = logging.getLogger('django.request')
previous_level = logger.getEffectiveLevel()
logger.setLevel(logging.ERROR)
url = reverse(
'person-detail', kwargs={'person_guid': self.unregistered_driller.person_guid, 'version': 'v1'})
response = self.client.get(url, format='json')
# quick check to make sure the record actually exists
person = Person.objects.get(
person_guid=self.unregistered_driller.person_guid)
self.assertEqual(person.first_name, 'Johnny')
# now make sure API does not return the record if unauthorized
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
# teardown
logger.setLevel(previous_level)
def test_search_for_name(self):
url = reverse('person-list', kwargs={'version': 'v1'}) + '?activity=DRILL&search=' + self.driller.first_name
response = self.client.get(url, format='json')
self.assertContains(response, self.driller.first_name)
self.assertContains(response, self.driller.person_guid)
self.assertNotContains(response, self.driller2.first_name)
self.assertNotContains(response, 'Johnny')
self.assertNotContains(response, self.driller2.person_guid)
self.assertNotContains(response, self.unregistered_driller.person_guid)
self.assertNotContains(response, self.partially_approved_driller.person_guid)
def test_search_for_registration_number(self):
url = reverse('person-list', kwargs={'version': 'v1'}) + '?activity=DRILL&search=' + \
self.registration2.registration_no
response = self.client.get(url, format='json')
self.assertContains(response, self.driller2.first_name)
self.assertContains(response, self.driller2.person_guid)
self.assertNotContains(response, self.driller.first_name)
self.assertNotContains(response, 'Johnny')
self.assertNotContains(response, self.driller.person_guid)
self.assertNotContains(response, self.unregistered_driller.person_guid)
def test_anon_user_cannot_see_unregistered_organization(self):
# setup
logger = logging.getLogger('django.request')
previous_level = logger.getEffectiveLevel()
logger.setLevel(logging.ERROR)
self.client.force_authenticate(user=None)
url = reverse('organization-detail',
kwargs={'org_guid': self.company_with_no_driller.org_guid, 'version': 'v1'})
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
# teardown
logger.setLevel(previous_level)
def test_anon_user_cannot_create_driller(self):
# setup
logger = logging.getLogger('django.request')
previous_level = logger.getEffectiveLevel()
logger.setLevel(logging.ERROR)
self.client.force_authenticate(user=None)
url = reverse('person-list', kwargs={'version': 'v1'})
response = self.client.post(url, format='json')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
# teardown
logger.setLevel(previous_level)
class TestPublicSearch(TestCase):
def setUp(self):
super().setUp()
self.activity_drill = ActivityCode.objects.get(registries_activity_code="DRILL")
self.status_pending = ApplicationStatusCode.objects.create(
code="P",
description="Pending",
display_order="2")
self.status_approved = ApplicationStatusCode.objects.create(
code="A",
description="Approved",
display_order="3")
self.proof_of_age = ProofOfAgeCode.objects.create(
code="TESTING",
description="Testing",
display_order="1")
# Get subactivities
self.subactivity = SubactivityCode.objects.get(registries_subactivity_code='WATER')
# Person with Pending, Removed and Approved
# A person with a removed application
self.person_removed = Person.objects.create(
first_name='Wendy', surname="Schmoo")
self.registration = Register.objects.create(
person=self.person_removed,
registries_activity=self.activity_drill,
registration_no="F12345")
RegistriesApplication.objects.create(
registration=self.registration,
proof_of_age=self.proof_of_age,
current_status=self.status_approved,
subactivity=self.subactivity)
RegistriesApplication.objects.create(
registration=self.registration,
proof_of_age=self.proof_of_age,
current_status=self.status_pending,
subactivity=self.subactivity)
RegistriesApplication.objects.create(
registration=self.registration,
proof_of_age=self.proof_of_age,
current_status=self.status_approved,
removal_date='2018-01-01',
subactivity=self.subactivity)
def test_search_only_returns_approved(self):
# Test that when searching, only the active applications are returned.
url = reverse('person-list', kwargs={'version': 'v1'}) + '?search=&limit=10&activity=DRILL&status=Removed'
response = self.client.get(url, format='json')
# We expect no pending records to have been returned.
self.assertNotContains(response, 'Pending')
# We expect only one Approved record, as the other record has a removal_date.
# The word approved appears twice. Once in the current_status and once in the display status.
self.assertContains(response, 'Approved', 2)
class TestAuthenticatedSearch(AuthenticatedAPITestCase):
def setUp(self):
super().setUp()
self.activity_drill = ActivityCode.objects.get(registries_activity_code="DRILL")
self.status_pending = ApplicationStatusCode.objects.create(
code="P",
description="Pending",
display_order="2")
self.status_approved = ApplicationStatusCode.objects.create(
code="A",
description="Approved",
display_order="3")
self.status_not_approved = ApplicationStatusCode.objects.create(
code="NA",
description="Not Approved",
display_order="4")
self.proof_of_age = ProofOfAgeCode.objects.create(
code="TESTING",
description="Testing",
display_order="1")
# Get subactivities
self.subactivity = SubactivityCode.objects.get(registries_subactivity_code='WATER')
# A person with no registration associated
self.person_without_registration = Person.objects.create(
first_name='Wendy', surname="NoRegistration")
# A person with a registration, but no application associated
self.person_without_application = Person.objects.create(
first_name='Wendy', surname="NoApplication")
self.registration = Register.objects.create(
person=self.person_without_application,
registries_activity=self.activity_drill,
registration_no="F12345")
# A person with a registration, and an application, with status set to P
self.person_pending = Person.objects.create(
first_name='Wendy', surname="PersonPending")
self.registration = Register.objects.create(
person=self.person_pending,
registries_activity=self.activity_drill,
registration_no="F12345")
self.app = RegistriesApplication.objects.create(
registration=self.registration,
proof_of_age=self.proof_of_age,
current_status=self.status_pending,
subactivity=self.subactivity)
# A person with an approved application
self.person_approved = Person.objects.create(
first_name='Wendy', surname="PersonApproved")
self.registration = Register.objects.create(
person=self.person_approved,
registries_activity=self.activity_drill,
registration_no="F12345")
self.app = RegistriesApplication.objects.create(
registration=self.registration,
proof_of_age=self.proof_of_age,
current_status=self.status_approved,
subactivity=self.subactivity)
# A person with a removed application
self.person_removed = Person.objects.create(
first_name='Wendy', surname="PersonRemoved")
self.registration = Register.objects.create(
person=self.person_removed,
registries_activity=self.activity_drill,
registration_no="F12345")
self.app = RegistriesApplication.objects.create(
registration=self.registration,
proof_of_age=self.proof_of_age,
current_status=self.status_approved,
removal_date='2018-01-01',
subactivity=self.subactivity)
# A person with a "not approved" state
self.person_not_approved = Person.objects.create(
first_name='Wendy', surname='NotApproved')
self.registration = Register.objects.create(
person=self.person_not_approved,
registries_activity=self.activity_drill,
registration_no="F12345")
self.app = RegistriesApplication.objects.create(
registration=self.registration,
proof_of_age=self.proof_of_age,
current_status=self.status_not_approved,
subactivity=self.subactivity)
# A person with an approved application, AND a removed application
self.person_approved_and_removed = Person.objects.create(
first_name='Wendy', surname="ApprovedAndRemoved")
self.registration = Register.objects.create(
person=self.person_approved_and_removed,
registries_activity=self.activity_drill,
registration_no="F12345")
self.app = RegistriesApplication.objects.create(
registration=self.registration,
proof_of_age=self.proof_of_age,
current_status=self.status_approved,
removal_date='2018-01-01',
subactivity=self.subactivity)
self.app = RegistriesApplication.objects.create(
registration=self.registration,
proof_of_age=self.proof_of_age,
current_status=self.status_approved,
subactivity=self.subactivity)
def test_search_all_no_registration(self):
# We expect a person that has no registration whatsoever to show up when searching for all.
url = reverse('person-list', kwargs={'version': 'v1'}) + '?search=&limit=10&activity=DRILL'
response = self.client.get(url, format='json')
self.assertContains(response, self.person_without_registration.surname)
def test_search_all_no_application(self):
# We expect a person that has a registration, but no application to show up when searching for all.
url = reverse('person-list', kwargs={'version': 'v1'}) + '?search=&limit=10&activity=DRILL'
response = self.client.get(url, format='json')
self.assertContains(response, self.person_without_application.surname)
def test_search_pending_no_registration(self):
# We expect a person that has no registrations whatsoever to show up in any
# pending search.
url = reverse('person-list', kwargs={'version': 'v1'}) + '?search=&limit=10&activity=DRILL&status={}'.format(
self.status_pending.code)
response = self.client.get(url, format='json')
self.assertContains(response, self.person_without_registration.surname)
def test_search_pending_no_application(self):
# We expect a person that has a registration, but no application to show up when searching for all.
url = reverse('person-list', kwargs={'version': 'v1'}) + '?search=&limit=10&activity=DRILL&status={}'.format(
self.status_pending.code)
response = self.client.get(url, format='json')
self.assertContains(response, self.person_without_application.surname)
def test_search_pending_with_pending_application(self):
# We expect a person that has a registration, and a pending application to show up when searching for
# pending.
url = reverse('person-list', kwargs={'version': 'v1'}) + '?search=&limit=10&activity=DRILL&status={}'.format(
self.status_pending.code)
response = self.client.get(url, format='json')
self.assertContains(response, self.person_pending.surname)
def test_search_approved_does_not_return_pending_person(self):
# Test that when we search for approved person, we don't get pending persons
url = reverse('person-list', kwargs={'version': 'v1'}) + '?search=&limit=10&activity=DRILL&status={}'.format(
self.status_approved.code)
response = self.client.get(url, format='json')
self.assertNotContains(response, self.person_pending.surname)
self.assertNotContains(response, self.person_without_application.surname)
self.assertNotContains(response, self.person_without_registration.surname)
def test_search_approved_returns_approved_person(self):
url = reverse('person-list', kwargs={'version': 'v1'}) + '?search=&limit=10&activity=DRILL&status={}'.format(
self.status_approved.code)
response = self.client.get(url, format='json')
self.assertContains(response, self.person_approved.surname)
def test_search_approved_does_not_return_removed(self):
url = reverse('person-list', kwargs={'version': 'v1'}) + '?search=&limit=10&activity=DRILL&status={}'.format(
self.status_approved.code)
response = self.client.get(url, format='json')
self.assertNotContains(response, self.person_removed.surname)
def test_search_for_removed_returns_removed(self):
url = reverse('person-list', kwargs={'version': 'v1'}) + '?search=&limit=10&activity=DRILL&status=Removed'
response = self.client.get(url, format='json')
self.assertContains(response, self.person_removed.surname)
def test_search_for_removed_does_not_return_approved(self):
url = reverse('person-list', kwargs={'version': 'v1'}) + '?search=&limit=10&activity=DRILL&status=Removed'
response = self.client.get(url, format='json')
self.assertNotContains(response, self.person_approved.surname)
def test_search_for_not_approved_returns_not_approved(self):
url = reverse('person-list', kwargs={'version': 'v1'}) + '?search=&limit=10&activity=DRILL&status={}'.format(
self.status_not_approved.code)
response = self.client.get(url, format='json')
self.assertContains(response, self.person_not_approved.surname)
def test_search_for_not_approved_does_not_return_removed(self):
url = reverse('person-list', kwargs={'version': 'v1'}) + '?search=&limit=10&activity=DRILL&status={}'.format(
self.status_not_approved.code)
response = self.client.get(url, format='json')
self.assertNotContains(response, self.person_removed.surname)
def test_search_for_not_approved_returns_someone_with_approved_and_removed(self):
url = reverse('person-list', kwargs={'version': 'v1'}) + '?search=&limit=10&activity=DRILL&status=Removed'
response = self.client.get(url, format='json')
self.assertContains(response, self.person_approved_and_removed.surname)
def test_search_approved_returns_someone_with_approved_and_removed(self):
url = reverse('person-list', kwargs={'version': 'v1'}) + '?search=&limit=10&activity=DRILL&status={}'.format(
self.status_approved.code)
response = self.client.get(url, format='json')
self.assertContains(response, self.person_approved_and_removed.surname)
def test_person_history_endpoint(self):
url = reverse('person-history', kwargs={'person_guid': self.person_approved.person_guid, 'version': 'v1'})
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
| |
#!/usr/bin/env python
import os, sys
sys.path = [os.path.dirname(os.path.abspath(__file__))] + sys.path
from liblinear import *
from liblinear import __all__ as liblinear_all
from liblinear import scipy, sparse
from ctypes import c_double
if sys.version_info[0] < 3:
range = xrange
from itertools import izip as zip
__all__ = ['svm_read_problem', 'load_model', 'save_model', 'evaluations',
'train', 'predict'] + liblinear_all
def svm_read_problem(data_file_name, return_scipy=False):
"""
svm_read_problem(data_file_name, return_scipy=False) -> [y, x], y: list, x: list of dictionary
svm_read_problem(data_file_name, return_scipy=True) -> [y, x], y: ndarray, x: csr_matrix
Read LIBSVM-format data from data_file_name and return labels y
and data instances x.
"""
prob_y = []
prob_x = []
row_ptr = [0]
col_idx = []
for i, line in enumerate(open(data_file_name)):
line = line.split(None, 1)
# In case an instance with all zero features
if len(line) == 1: line += ['']
label, features = line
prob_y += [float(label)]
if scipy != None and return_scipy:
nz = 0
for e in features.split():
ind, val = e.split(":")
val = float(val)
if val != 0:
col_idx += [int(ind)-1]
prob_x += [val]
nz += 1
row_ptr += [row_ptr[-1]+nz]
else:
xi = {}
for e in features.split():
ind, val = e.split(":")
if val != 0:
xi[int(ind)] = float(val)
prob_x += [xi]
if scipy != None and return_scipy:
prob_y = scipy.array(prob_y)
prob_x = scipy.array(prob_x)
col_idx = scipy.array(col_idx)
row_ptr = scipy.array(row_ptr)
prob_x = sparse.csr_matrix((prob_x, col_idx, row_ptr))
return (prob_y, prob_x)
def load_model(model_file_name):
"""
load_model(model_file_name) -> model
Load a LIBLINEAR model from model_file_name and return.
"""
model = liblinear.load_model(model_file_name.encode())
if not model:
print("can't open model file %s" % model_file_name)
return None
model = toPyModel(model)
return model
def save_model(model_file_name, model):
"""
save_model(model_file_name, model) -> None
Save a LIBLINEAR model to the file model_file_name.
"""
liblinear.save_model(model_file_name.encode(), model)
def evaluations_scipy(ty, pv):
"""
evaluations_scipy(ty, pv) -> (ACC, MSE, SCC)
ty, pv: ndarray
Calculate accuracy, mean squared error and squared correlation coefficient
using the true values (ty) and predicted values (pv).
"""
if not (scipy != None and isinstance(ty, scipy.ndarray) and isinstance(pv, scipy.ndarray)):
raise TypeError("type of ty and pv must be ndarray")
if len(ty) != len(pv):
raise ValueError("len(ty) must be equal to len(pv)")
ACC = 100.0*(ty == pv).mean()
MSE = ((ty - pv)**2).mean()
l = len(ty)
sumv = pv.sum()
sumy = ty.sum()
sumvy = (pv*ty).sum()
sumvv = (pv*pv).sum()
sumyy = (ty*ty).sum()
with scipy.errstate(all = 'raise'):
try:
SCC = ((l*sumvy-sumv*sumy)*(l*sumvy-sumv*sumy))/((l*sumvv-sumv*sumv)*(l*sumyy-sumy*sumy))
except:
SCC = float('nan')
return (float(ACC), float(MSE), float(SCC))
def evaluations(ty, pv, useScipy = True):
"""
evaluations(ty, pv, useScipy) -> (ACC, MSE, SCC)
ty, pv: list, tuple or ndarray
useScipy: convert ty, pv to ndarray, and use scipy functions for the evaluation
Calculate accuracy, mean squared error and squared correlation coefficient
using the true values (ty) and predicted values (pv).
"""
if scipy != None and useScipy:
return evaluations_scipy(scipy.asarray(ty), scipy.asarray(pv))
if len(ty) != len(pv):
raise ValueError("len(ty) must be equal to len(pv)")
total_correct = total_error = 0
sumv = sumy = sumvv = sumyy = sumvy = 0
for v, y in zip(pv, ty):
if y == v:
total_correct += 1
total_error += (v-y)*(v-y)
sumv += v
sumy += y
sumvv += v*v
sumyy += y*y
sumvy += v*y
l = len(ty)
ACC = 100.0*total_correct/l
MSE = total_error/l
try:
SCC = ((l*sumvy-sumv*sumy)*(l*sumvy-sumv*sumy))/((l*sumvv-sumv*sumv)*(l*sumyy-sumy*sumy))
except:
SCC = float('nan')
return (float(ACC), float(MSE), float(SCC))
def train(arg1, arg2=None, arg3=None):
"""
train(y, x [, options]) -> model | ACC
y: a list/tuple/ndarray of l true labels (type must be int/double).
x: 1. a list/tuple of l training instances. Feature vector of
each training instance is a list/tuple or dictionary.
2. an l * n numpy ndarray or scipy spmatrix (n: number of features).
train(prob [, options]) -> model | ACC
train(prob, param) -> model | ACC
Train a model from data (y, x) or a problem prob using
'options' or a parameter param.
If '-v' is specified in 'options' (i.e., cross validation)
either accuracy (ACC) or mean-squared error (MSE) is returned.
options:
-s type : set type of solver (default 1)
for multi-class classification
0 -- L2-regularized logistic regression (primal)
1 -- L2-regularized L2-loss support vector classification (dual)
2 -- L2-regularized L2-loss support vector classification (primal)
3 -- L2-regularized L1-loss support vector classification (dual)
4 -- support vector classification by Crammer and Singer
5 -- L1-regularized L2-loss support vector classification
6 -- L1-regularized logistic regression
7 -- L2-regularized logistic regression (dual)
for regression
11 -- L2-regularized L2-loss support vector regression (primal)
12 -- L2-regularized L2-loss support vector regression (dual)
13 -- L2-regularized L1-loss support vector regression (dual)
-c cost : set the parameter C (default 1)
-p epsilon : set the epsilon in loss function of SVR (default 0.1)
-e epsilon : set tolerance of termination criterion
-s 0 and 2
|f'(w)|_2 <= eps*min(pos,neg)/l*|f'(w0)|_2,
where f is the primal function, (default 0.01)
-s 11
|f'(w)|_2 <= eps*|f'(w0)|_2 (default 0.001)
-s 1, 3, 4, and 7
Dual maximal violation <= eps; similar to liblinear (default 0.)
-s 5 and 6
|f'(w)|_inf <= eps*min(pos,neg)/l*|f'(w0)|_inf,
where f is the primal function (default 0.01)
-s 12 and 13
|f'(alpha)|_1 <= eps |f'(alpha0)|,
where f is the dual function (default 0.1)
-B bias : if bias >= 0, instance x becomes [x; bias]; if < 0, no bias term added (default -1)
-wi weight: weights adjust the parameter C of different classes (see README for details)
-v n: n-fold cross validation mode
-q : quiet mode (no outputs)
"""
prob, param = None, None
if isinstance(arg1, (list, tuple)) or (scipy and isinstance(arg1, scipy.ndarray)):
assert isinstance(arg2, (list, tuple)) or (scipy and isinstance(arg2, (scipy.ndarray, sparse.spmatrix)))
y, x, options = arg1, arg2, arg3
prob = problem(y, x)
param = parameter(options)
elif isinstance(arg1, problem):
prob = arg1
if isinstance(arg2, parameter):
param = arg2
else:
param = parameter(arg2)
if prob == None or param == None :
raise TypeError("Wrong types for the arguments")
prob.set_bias(param.bias)
liblinear.set_print_string_function(param.print_func)
err_msg = liblinear.check_parameter(prob, param)
if err_msg :
raise ValueError('Error: %s' % err_msg)
if param.flag_find_C:
nr_fold = param.nr_fold
best_C = c_double()
best_rate = c_double()
max_C = 1024
if param.flag_C_specified:
start_C = param.C
else:
start_C = -1.0
liblinear.find_parameter_C(prob, param, nr_fold, start_C, max_C, best_C, best_rate)
print("Best C = %lf CV accuracy = %g%%\n"% (best_C.value, 100.0*best_rate.value))
return best_C.value,best_rate.value
elif param.flag_cross_validation:
l, nr_fold = prob.l, param.nr_fold
target = (c_double * l)()
liblinear.cross_validation(prob, param, nr_fold, target)
ACC, MSE, SCC = evaluations(prob.y[:l], target[:l])
if param.solver_type in [L2R_L2LOSS_SVR, L2R_L2LOSS_SVR_DUAL, L2R_L1LOSS_SVR_DUAL]:
print("Cross Validation Mean squared error = %g" % MSE)
print("Cross Validation Squared correlation coefficient = %g" % SCC)
return MSE
else:
print("Cross Validation Accuracy = %g%%" % ACC)
return ACC
else:
m = liblinear.train(prob, param)
m = toPyModel(m)
return m
def predict(y, x, m, options=""):
"""
predict(y, x, m [, options]) -> (p_labels, p_acc, p_vals)
y: a list/tuple/ndarray of l true labels (type must be int/double).
It is used for calculating the accuracy. Use [] if true labels are
unavailable.
x: 1. a list/tuple of l training instances. Feature vector of
each training instance is a list/tuple or dictionary.
2. an l * n numpy ndarray or scipy spmatrix (n: number of features).
Predict data (y, x) with the SVM model m.
options:
-b probability_estimates: whether to output probability estimates, 0 or 1 (default 0); currently for logistic regression only
-q quiet mode (no outputs)
The return tuple contains
p_labels: a list of predicted labels
p_acc: a tuple including accuracy (for classification), mean-squared
error, and squared correlation coefficient (for regression).
p_vals: a list of decision values or probability estimates (if '-b 1'
is specified). If k is the number of classes, for decision values,
each element includes results of predicting k binary-class
SVMs. if k = 2 and solver is not MCSVM_CS, only one decision value
is returned. For probabilities, each element contains k values
indicating the probability that the testing instance is in each class.
Note that the order of classes here is the same as 'model.label'
field in the model structure.
"""
def info(s):
print(s)
if scipy and isinstance(x, scipy.ndarray):
x = scipy.ascontiguousarray(x) # enforce row-major
elif sparse and isinstance(x, sparse.spmatrix):
x = x.tocsr()
elif not isinstance(x, (list, tuple)):
raise TypeError("type of x: {0} is not supported!".format(type(x)))
if (not isinstance(y, (list, tuple))) and (not (scipy and isinstance(y, scipy.ndarray))):
raise TypeError("type of y: {0} is not supported!".format(type(y)))
predict_probability = 0
argv = options.split()
i = 0
while i < len(argv):
if argv[i] == '-b':
i += 1
predict_probability = int(argv[i])
elif argv[i] == '-q':
info = print_null
else:
raise ValueError("Wrong options")
i+=1
solver_type = m.param.solver_type
nr_class = m.get_nr_class()
nr_feature = m.get_nr_feature()
is_prob_model = m.is_probability_model()
bias = m.bias
if bias >= 0:
biasterm = feature_node(nr_feature+1, bias)
else:
biasterm = feature_node(-1, bias)
pred_labels = []
pred_values = []
if scipy and isinstance(x, sparse.spmatrix):
nr_instance = x.shape[0]
else:
nr_instance = len(x)
if predict_probability:
if not is_prob_model:
raise TypeError('probability output is only supported for logistic regression')
prob_estimates = (c_double * nr_class)()
for i in range(nr_instance):
if scipy and isinstance(x, sparse.spmatrix):
indslice = slice(x.indptr[i], x.indptr[i+1])
xi, idx = gen_feature_nodearray((x.indices[indslice], x.data[indslice]), feature_max=nr_feature)
else:
xi, idx = gen_feature_nodearray(x[i], feature_max=nr_feature)
xi[-2] = biasterm
label = liblinear.predict_probability(m, xi, prob_estimates)
values = prob_estimates[:nr_class]
pred_labels += [label]
pred_values += [values]
else:
if nr_class <= 2:
nr_classifier = 1
else:
nr_classifier = nr_class
dec_values = (c_double * nr_classifier)()
for i in range(nr_instance):
if scipy and isinstance(x, sparse.spmatrix):
indslice = slice(x.indptr[i], x.indptr[i+1])
xi, idx = gen_feature_nodearray((x.indices[indslice], x.data[indslice]), feature_max=nr_feature)
else:
xi, idx = gen_feature_nodearray(x[i], feature_max=nr_feature)
xi[-2] = biasterm
label = liblinear.predict_values(m, xi, dec_values)
values = dec_values[:nr_classifier]
pred_labels += [label]
pred_values += [values]
if len(y) == 0:
y = [0] * nr_instance
ACC, MSE, SCC = evaluations(y, pred_labels)
if m.is_regression_model():
info("Mean squared error = %g (regression)" % MSE)
info("Squared correlation coefficient = %g (regression)" % SCC)
else:
info("Accuracy = %g%% (%d/%d) (classification)" % (ACC, int(round(nr_instance*ACC/100)), nr_instance))
return pred_labels, (ACC, MSE, SCC), pred_values
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import uuid
import mock
from oslo_utils import timeutils
from six.moves import range
from testtools import matchers
from keystone.common import utils
import keystone.conf
from keystone import exception
from keystone.models import revoke_model
from keystone.revoke.backends import sql
from keystone.tests import unit
from keystone.tests.unit import ksfixtures
from keystone.tests.unit import test_backend_sql
from keystone.token.providers import common
CONF = keystone.conf.CONF
def _future_time():
expire_delta = datetime.timedelta(seconds=1000)
future_time = timeutils.utcnow() + expire_delta
return future_time
def _past_time():
expire_delta = datetime.timedelta(days=-1000)
past_time = timeutils.utcnow() + expire_delta
return past_time
def _sample_blank_token():
issued_delta = datetime.timedelta(minutes=-2)
issued_at = timeutils.utcnow() + issued_delta
token_data = revoke_model.blank_token_data(issued_at)
return token_data
def _sample_data():
user_ids = []
project_ids = []
role_ids = []
for i in range(0, 3):
user_ids.append(uuid.uuid4().hex)
project_ids.append(uuid.uuid4().hex)
role_ids.append(uuid.uuid4().hex)
# For testing purposes, create 3 project tokens with a different user_id,
# role_id, and project_id which will be used to verify that revoking by
# grant on certain user_id, project_id, and role_id pairs leaves these
# project_tokens unrevoked if only one of the revoked columns are matched
# but not all of them as the expected behavior dictates
project_tokens = []
i = len(project_tokens)
project_tokens.append(_sample_blank_token())
project_tokens[i]['user_id'] = user_ids[1]
project_tokens[i]['project_id'] = project_ids[0]
project_tokens[i]['roles'] = [role_ids[0]]
i = len(project_tokens)
project_tokens.append(_sample_blank_token())
project_tokens[i]['user_id'] = user_ids[0]
project_tokens[i]['project_id'] = project_ids[1]
project_tokens[i]['roles'] = [role_ids[0]]
i = len(project_tokens)
project_tokens.append(_sample_blank_token())
project_tokens[i]['user_id'] = user_ids[0]
project_tokens[i]['project_id'] = project_ids[0]
project_tokens[i]['roles'] = [role_ids[1]]
return user_ids, project_ids, role_ids, project_tokens
def _matches(event, token_values):
"""See if the token matches the revocation event.
Used as a secondary check on the logic to Check
By Tree Below: This is abrute force approach to checking.
Compare each attribute from the event with the corresponding
value from the token. If the event does not have a value for
the attribute, a match is still possible. If the event has a
value for the attribute, and it does not match the token, no match
is possible, so skip the remaining checks.
:param event: one revocation event to match
:param token_values: dictionary with set of values taken from the
token
:returns: True if the token matches the revocation event, indicating the
token has been revoked
"""
# The token has three attributes that can match the user_id
if event.user_id is not None:
for attribute_name in ['user_id', 'trustor_id', 'trustee_id']:
if event.user_id == token_values[attribute_name]:
break
else:
return False
# The token has two attributes that can match the domain_id
if event.domain_id is not None:
for attribute_name in ['identity_domain_id', 'assignment_domain_id']:
if event.domain_id == token_values[attribute_name]:
break
else:
return False
if event.domain_scope_id is not None:
if event.domain_scope_id != token_values['assignment_domain_id']:
return False
# If any one check does not match, the while token does
# not match the event. The numerous return False indicate
# that the token is still valid and short-circuits the
# rest of the logic.
attribute_names = ['project_id',
'expires_at', 'trust_id', 'consumer_id',
'access_token_id', 'audit_id', 'audit_chain_id']
for attribute_name in attribute_names:
if getattr(event, attribute_name) is not None:
if (getattr(event, attribute_name) !=
token_values[attribute_name]):
return False
if event.role_id is not None:
roles = token_values['roles']
for role in roles:
if event.role_id == role:
break
else:
return False
if token_values['issued_at'] > event.issued_before:
return False
return True
class RevokeTests(object):
def _assertTokenRevoked(self, events, token_data):
backend = sql.Revoke()
if events:
self.assertTrue(revoke_model.is_revoked(events, token_data),
'Token should be revoked')
return self.assertTrue(
revoke_model.is_revoked(backend.list_events(token=token_data),
token_data), 'Token should be revoked')
def _assertTokenNotRevoked(self, events, token_data):
backend = sql.Revoke()
if events:
self.assertTrue(revoke_model.is_revoked(events, token_data),
'Token should be revoked')
return self.assertFalse(
revoke_model.is_revoked(backend.list_events(token=token_data),
token_data), 'Token should not be revoked')
def test_list(self):
self.revoke_api.revoke_by_user(user_id=1)
self.assertEqual(1, len(self.revoke_api.list_events()))
self.revoke_api.revoke_by_user(user_id=2)
self.assertEqual(2, len(self.revoke_api.list_events()))
def test_list_since(self):
self.revoke_api.revoke_by_user(user_id=1)
self.revoke_api.revoke_by_user(user_id=2)
past = timeutils.utcnow() - datetime.timedelta(seconds=1000)
self.assertEqual(2, len(self.revoke_api.list_events(last_fetch=past)))
future = timeutils.utcnow() + datetime.timedelta(seconds=1000)
self.assertEqual(0,
len(self.revoke_api.list_events(last_fetch=future)))
def test_list_revoked_user(self):
revocation_backend = sql.Revoke()
events = []
# This simulates creating a token for a specific user. When we revoke
# the token we should have a single revocation event in the list. We
# are going to assert that the token values match the only revocation
# event in the backend.
first_token = _sample_blank_token()
first_token['user_id'] = uuid.uuid4().hex
add_event(
events, revoke_model.RevokeEvent(user_id=first_token['user_id'])
)
self.revoke_api.revoke_by_user(user_id=first_token['user_id'])
self._assertTokenRevoked(events, first_token)
self.assertEqual(
1, len(revocation_backend.list_events(token=first_token))
)
# This simulates creating a separate token for a separate user. We are
# going to revoke the token just like we did for the previous token.
# We should have two revocation events stored in the backend but only
# one should match the values of the second token.
second_token = _sample_blank_token()
second_token['user_id'] = uuid.uuid4().hex
add_event(
events, revoke_model.RevokeEvent(user_id=second_token['user_id'])
)
self.revoke_api.revoke_by_user(user_id=second_token['user_id'])
self._assertTokenRevoked(events, second_token)
self.assertEqual(
1, len(revocation_backend.list_events(token=second_token))
)
# This simulates creating another separate token for a separate user,
# but we're not going to issue a revocation event. Even though we have
# two revocation events persisted in the backend, neither of them
# should match the values of the third token. If they did - our
# revocation event matching would be too heavy handed, which would
# result in over-generalized revocation patterns.
third_token = _sample_blank_token()
third_token['user_id'] = uuid.uuid4().hex
self._assertTokenNotRevoked(events, third_token)
self.assertEqual(
0, len(revocation_backend.list_events(token=third_token))
)
# This gets a token but overrides the user_id of the token to be None.
# Technically this should never happen because tokens must belong to
# a user. What we're testing here is that the two revocation events
# we've created won't match None values for the user_id.
fourth_token = _sample_blank_token()
fourth_token['user_id'] = None
self._assertTokenNotRevoked(events, fourth_token)
self.assertEqual(
0, len(revocation_backend.list_events(token=fourth_token))
)
def test_list_revoked_project(self):
revocation_backend = sql.Revoke()
events = []
token = _sample_blank_token()
# Create a token for a project, revoke token, check the token we
# created has been revoked, and check the list returned a match for
# the token when passed in.
first_token = _sample_blank_token()
first_token['project_id'] = uuid.uuid4().hex
add_event(events, revoke_model.RevokeEvent(
project_id=first_token['project_id']))
revocation_backend.revoke(revoke_model.RevokeEvent(
project_id=first_token['project_id']))
self._assertTokenRevoked(events, first_token)
self.assertEqual(1, len(revocation_backend.list_events(
token=first_token)))
# Create a second token, revoke it, check the token has been revoked,
# and check the list to make sure that even though we now have 2
# revoked events in the revocation list, it will only return 1 because
# only one match for our second_token should exist
second_token = _sample_blank_token()
second_token['project_id'] = uuid.uuid4().hex
add_event(events, revoke_model.RevokeEvent(
project_id=second_token['project_id']))
revocation_backend.revoke(revoke_model.RevokeEvent(
project_id=second_token['project_id']))
self._assertTokenRevoked(events, second_token)
self.assertEqual(
1, len(revocation_backend.list_events(token=second_token)))
# This gets a token but overrides project_id of the token to be None.
# We expect that since there are two events which both have populated
# project_ids, this should not match this third_token with any other
# event in the list so we should receive 0.
third_token = _sample_blank_token()
third_token['project_id'] = None
self._assertTokenNotRevoked(events, token)
self.assertEqual(0, len(revocation_backend.list_events(token=token)))
def test_list_revoked_audit(self):
revocation_backend = sql.Revoke()
events = []
# Create a token with audit_id set, revoke it, check it is revoked,
# check to make sure that list_events matches the token to the event we
# just revoked.
first_token = _sample_blank_token()
first_token['audit_id'] = common.random_urlsafe_str()
add_event(events, revoke_model.RevokeEvent(
audit_id=first_token['audit_id']))
self.revoke_api.revoke_by_audit_id(
audit_id=first_token['audit_id'])
self._assertTokenRevoked(events, first_token)
self.assertEqual(
1, len(revocation_backend.list_events(token=first_token)))
# Create a second token, revoke it, check it is revoked, check to make
# sure that list events only finds 1 match since there are 2 and they
# dont both have different populated audit_id fields
second_token = _sample_blank_token()
second_token['audit_id'] = common.random_urlsafe_str()
add_event(events, revoke_model.RevokeEvent(
audit_id=second_token['audit_id']))
self.revoke_api.revoke_by_audit_id(
audit_id=second_token['audit_id'])
self._assertTokenRevoked(events, second_token)
self.assertEqual(
1, len(revocation_backend.list_events(token=second_token)))
# Create a third token with audit_id set to None to make sure that
# since there are no events currently revoked with audit_id None this
# finds no matches
third_token = _sample_blank_token()
third_token['audit_id'] = None
self._assertTokenNotRevoked(events, third_token)
self.assertEqual(
0, len(revocation_backend.list_events(token=third_token)))
def test_list_revoked_since(self):
revocation_backend = sql.Revoke()
token = _sample_blank_token()
self.revoke_api.revoke_by_user(user_id=None)
self.revoke_api.revoke_by_user(user_id=None)
self.assertEqual(2, len(revocation_backend.list_events(token=token)))
future = timeutils.utcnow() + datetime.timedelta(seconds=1000)
token['issued_at'] = future
self.assertEqual(0, len(revocation_backend.list_events(token=token)))
def test_list_revoked_multiple_filters(self):
revocation_backend = sql.Revoke()
events = []
# create token that sets key/value filters in list_revoked
first_token = _sample_blank_token()
first_token['user_id'] = uuid.uuid4().hex
first_token['project_id'] = uuid.uuid4().hex
first_token['audit_id'] = common.random_urlsafe_str()
# revoke event and then verify that that there is only one revocation
# and verify the only revoked event is the token
add_event(events, revoke_model.RevokeEvent(
user_id=first_token['user_id'],
project_id=first_token['project_id'],
audit_id=first_token['audit_id']))
self.revoke_api.revoke(revoke_model.RevokeEvent(
user_id=first_token['user_id'],
project_id=first_token['project_id'],
audit_id=first_token['audit_id']))
self._assertTokenRevoked(events, first_token)
self.assertEqual(
1, len(revocation_backend.list_events(token=first_token)))
# If a token has None values which the event contains it shouldn't
# match and not be revoked
second_token = _sample_blank_token()
self._assertTokenNotRevoked(events, second_token)
self.assertEqual(
0, len(revocation_backend.list_events(token=second_token)))
# If an event column and corresponding dict value don't match, Then
# it should not add the event in the list. Demonstrate for project
third_token = _sample_blank_token()
third_token['project_id'] = uuid.uuid4().hex
self._assertTokenNotRevoked(events, third_token)
self.assertEqual(
0, len(revocation_backend.list_events(token=third_token)))
# A revoked event with user_id as null and token user_id non null
# should still be return an event and be revoked if other non null
# event fields match non null token fields
fourth_token = _sample_blank_token()
fourth_token['user_id'] = uuid.uuid4().hex
fourth_token['project_id'] = uuid.uuid4().hex
fourth_token['audit_id'] = common.random_urlsafe_str()
add_event(events, revoke_model.RevokeEvent(
project_id=fourth_token['project_id'],
audit_id=fourth_token['audit_id']))
self.revoke_api.revoke(revoke_model.RevokeEvent(
project_id=fourth_token['project_id'],
audit_id=fourth_token['audit_id']))
self._assertTokenRevoked(events, fourth_token)
self.assertEqual(
1, len(revocation_backend.list_events(token=fourth_token)))
def _user_field_test(self, field_name):
token = _sample_blank_token()
token[field_name] = uuid.uuid4().hex
self.revoke_api.revoke_by_user(user_id=token[field_name])
self._assertTokenRevoked(None, token)
token2 = _sample_blank_token()
token2[field_name] = uuid.uuid4().hex
self._assertTokenNotRevoked(None, token2)
def test_revoke_by_user(self):
self._user_field_test('user_id')
def test_revoke_by_user_matches_trustee(self):
self._user_field_test('trustee_id')
def test_revoke_by_user_matches_trustor(self):
self._user_field_test('trustor_id')
def test_revoke_by_audit_id(self):
token = _sample_blank_token()
# Audit ID and Audit Chain ID are populated with the same value
# if the token is an original token
token['audit_id'] = uuid.uuid4().hex
token['audit_chain_id'] = token['audit_id']
self.revoke_api.revoke_by_audit_id(audit_id=token['audit_id'])
self._assertTokenRevoked(None, token)
token2 = _sample_blank_token()
token2['audit_id'] = uuid.uuid4().hex
token2['audit_chain_id'] = token2['audit_id']
self._assertTokenNotRevoked(None, token2)
def test_by_project_grant(self):
user_ids, project_ids, role_ids, project_tokens = _sample_data()
token1 = _sample_blank_token()
token1['roles'] = role_ids[0]
token1['user_id'] = user_ids[0]
token1['project_id'] = project_ids[0]
token2 = _sample_blank_token()
token2['roles'] = role_ids[1]
token2['user_id'] = user_ids[1]
token2['project_id'] = project_ids[1]
token3 = _sample_blank_token()
token3['roles'] = [role_ids[0],
role_ids[1],
role_ids[2]]
token3['user_id'] = user_ids[2]
token3['project_id'] = project_ids[2]
# Check that all tokens are revoked at the start
self._assertTokenNotRevoked(None, token1)
self._assertTokenNotRevoked(None, token2)
self._assertTokenNotRevoked(None, token3)
for token in project_tokens:
self._assertTokenNotRevoked(None, token)
self.revoke_api.revoke_by_grant(role_id=role_ids[0],
user_id=user_ids[0],
project_id=project_ids[0])
# Only the first token should be revoked
self._assertTokenRevoked(None, token1)
self._assertTokenNotRevoked(None, token2)
self._assertTokenNotRevoked(None, token3)
for token in project_tokens:
self._assertTokenNotRevoked(None, token)
self.revoke_api.revoke_by_grant(role_id=role_ids[1],
user_id=user_ids[1],
project_id=project_ids[1])
# Tokens 1 and 2 should be revoked now
self._assertTokenRevoked(None, token1)
self._assertTokenRevoked(None, token2)
self._assertTokenNotRevoked(None, token3)
for token in project_tokens:
self._assertTokenNotRevoked(None, token)
# test that multiple roles with a single user and project get revoked
# and invalidate token3
self.revoke_api.revoke_by_grant(role_id=role_ids[0],
user_id=user_ids[2],
project_id=project_ids[2])
self.revoke_api.revoke_by_grant(role_id=role_ids[1],
user_id=user_ids[2],
project_id=project_ids[2])
self.revoke_api.revoke_by_grant(role_id=role_ids[2],
user_id=user_ids[2],
project_id=project_ids[2])
# Tokens 1, 2, and 3 should now be revoked leaving project_tokens
# unrevoked.
self._assertTokenRevoked(None, token1)
self._assertTokenRevoked(None, token2)
self._assertTokenRevoked(None, token3)
for token in project_tokens:
self._assertTokenNotRevoked(None, token)
@mock.patch.object(timeutils, 'utcnow')
def test_expired_events_are_removed(self, mock_utcnow):
def _sample_token_values():
token = _sample_blank_token()
token['expires_at'] = utils.isotime(_future_time(),
subsecond=True)
return token
now = datetime.datetime.utcnow()
now_plus_2h = now + datetime.timedelta(hours=2)
mock_utcnow.return_value = now
# Build a token and validate it. This will seed the cache for the
# future 'synchronize' call.
token_values = _sample_token_values()
audit_chain_id = uuid.uuid4().hex
self.revoke_api.revoke_by_audit_chain_id(audit_chain_id)
token_values['audit_chain_id'] = audit_chain_id
self.assertRaises(exception.TokenNotFound,
self.revoke_api.check_token,
token_values)
# Move our clock forward by 2h, build a new token and validate it.
# 'synchronize' should now be exercised and remove old expired events
mock_utcnow.return_value = now_plus_2h
self.revoke_api.revoke_by_audit_chain_id(audit_chain_id)
# two hours later, it should still be not found
self.assertRaises(exception.TokenNotFound,
self.revoke_api.check_token,
token_values)
def test_delete_group_without_role_does_not_revoke_users(self):
revocation_backend = sql.Revoke()
domain = unit.new_domain_ref()
self.resource_api.create_domain(domain['id'], domain)
# Create two groups. Group1 will be used to test deleting a group,
# without role assignments and users in the group, doesn't create
# revoked events. Group2 will show that deleting a group with role
# assignment and users in the group does create revoked events
group1 = unit.new_group_ref(domain_id=domain['id'])
group1 = self.identity_api.create_group(group1)
group2 = unit.new_group_ref(domain_id=domain['id'])
group2 = self.identity_api.create_group(group2)
role = unit.new_role_ref()
self.role_api.create_role(role['id'], role)
user1 = unit.new_user_ref(domain_id=domain['id'])
user1 = self.identity_api.create_user(user1)
user2 = unit.new_user_ref(domain_id=domain['id'])
user2 = self.identity_api.create_user(user2)
# Add two users to the group, verify they are added, delete group, and
# check that the revocaiton events have not been created
self.identity_api.add_user_to_group(user_id=user1['id'],
group_id=group1['id'])
self.identity_api.add_user_to_group(user_id=user2['id'],
group_id=group1['id'])
self.assertEqual(
2, len(self.identity_api.list_users_in_group(group1['id'])))
self.identity_api.delete_group(group1['id'])
self.assertEqual(0, len(revocation_backend.list_events()))
# Assign a role to the group, add two users to the group, verify that
# the role has been assigned to the group, verify the users have been
# added to the group, delete the group, check that the revocation
# events have been created
self.assignment_api.create_grant(group_id=group2['id'],
domain_id=domain['id'],
role_id=role['id'])
grants = self.assignment_api.list_role_assignments(role_id=role['id'])
self.assertThat(grants, matchers.HasLength(1))
self.identity_api.add_user_to_group(user_id=user1['id'],
group_id=group2['id'])
self.identity_api.add_user_to_group(user_id=user2['id'],
group_id=group2['id'])
self.assertEqual(
2, len(self.identity_api.list_users_in_group(group2['id'])))
self.identity_api.delete_group(group2['id'])
self.assertEqual(2, len(revocation_backend.list_events()))
class UUIDSqlRevokeTests(test_backend_sql.SqlTests, RevokeTests):
def config_overrides(self):
super(UUIDSqlRevokeTests, self).config_overrides()
self.config_fixture.config(
group='token',
provider='uuid',
revoke_by_id=False)
class FernetSqlRevokeTests(test_backend_sql.SqlTests, RevokeTests):
def config_overrides(self):
super(FernetSqlRevokeTests, self).config_overrides()
self.config_fixture.config(
group='token',
provider='fernet',
revoke_by_id=False)
self.useFixture(
ksfixtures.KeyRepository(
self.config_fixture,
'fernet_tokens',
CONF.fernet_tokens.max_active_keys
)
)
def add_event(events, event):
events.append(event)
return event
def remove_event(events, event):
for target in events:
if target == event:
events.remove(target)
class RevokeListTests(unit.TestCase):
def setUp(self):
super(RevokeListTests, self).setUp()
self.events = []
self.revoke_events = list()
def _assertTokenRevoked(self, token_data):
self.assertTrue(any([_matches(e, token_data) for e in self.events]))
return self.assertTrue(
revoke_model.is_revoked(self.revoke_events, token_data),
'Token should be revoked')
def _assertTokenNotRevoked(self, token_data):
self.assertFalse(any([_matches(e, token_data) for e in self.events]))
return self.assertFalse(
revoke_model.is_revoked(self.revoke_events, token_data),
'Token should not be revoked')
def _revoke_by_user(self, user_id):
return add_event(
self.revoke_events,
revoke_model.RevokeEvent(user_id=user_id))
def _revoke_by_audit_chain_id(self, audit_chain_id, project_id=None,
domain_id=None):
event = add_event(
self.revoke_events,
revoke_model.RevokeEvent(audit_chain_id=audit_chain_id,
project_id=project_id,
domain_id=domain_id)
)
self.events.append(event)
return event
def _revoke_by_expiration(self, user_id, expires_at, project_id=None,
domain_id=None):
event = add_event(
self.revoke_events,
revoke_model.RevokeEvent(user_id=user_id,
expires_at=expires_at,
project_id=project_id,
domain_id=domain_id))
self.events.append(event)
return event
def _revoke_by_user_and_project(self, user_id, project_id):
event = add_event(self.revoke_events,
revoke_model.RevokeEvent(project_id=project_id,
user_id=user_id))
self.events.append(event)
return event
def _revoke_by_project_role_assignment(self, project_id, role_id):
event = add_event(self.revoke_events,
revoke_model.RevokeEvent(project_id=project_id,
role_id=role_id))
self.events.append(event)
return event
def _revoke_by_domain_role_assignment(self, domain_id, role_id):
event = add_event(self.revoke_events,
revoke_model.RevokeEvent(domain_id=domain_id,
role_id=role_id))
self.events.append(event)
return event
def _revoke_by_domain(self, domain_id):
event = add_event(self.revoke_events,
revoke_model.RevokeEvent(domain_id=domain_id))
self.events.append(event)
def test_revoke_by_audit_chain_id(self):
audit_id = common.build_audit_info(parent_audit_id=None)[0]
token_data_1 = _sample_blank_token()
# Audit ID and Audit Chain ID are populated with the same value
# if the token is an original token
token_data_1['audit_id'] = audit_id
token_data_1['audit_chain_id'] = audit_id
event = self._revoke_by_audit_chain_id(audit_id)
self._assertTokenRevoked(token_data_1)
audit_id_2 = common.build_audit_info(parent_audit_id=audit_id)[0]
token_data_2 = _sample_blank_token()
token_data_2['audit_id'] = audit_id_2
token_data_2['audit_chain_id'] = audit_id
self._assertTokenRevoked(token_data_2)
self.remove_event(event)
self._assertTokenNotRevoked(token_data_1)
self._assertTokenNotRevoked(token_data_2)
def remove_event(self, event):
self.events.remove(event)
remove_event(self.revoke_events, event)
def test_by_project_and_user_and_role(self):
user_id1 = uuid.uuid4().hex
user_id2 = uuid.uuid4().hex
project_id = uuid.uuid4().hex
self.events.append(self._revoke_by_user(user_id1))
self.events.append(
self._revoke_by_user_and_project(user_id2, project_id))
token_data = _sample_blank_token()
token_data['user_id'] = user_id2
token_data['project_id'] = project_id
self._assertTokenRevoked(token_data)
def test_by_domain_user(self):
# If revoke a domain, then a token for a user in the domain is revoked
user_id = uuid.uuid4().hex
domain_id = uuid.uuid4().hex
token_data = _sample_blank_token()
token_data['user_id'] = user_id
token_data['identity_domain_id'] = domain_id
self._revoke_by_domain(domain_id)
self._assertTokenRevoked(token_data)
def test_by_domain_project(self):
# If revoke a domain, then a token scoped to a project in the domain
# is revoked.
user_id = uuid.uuid4().hex
user_domain_id = uuid.uuid4().hex
project_id = uuid.uuid4().hex
project_domain_id = uuid.uuid4().hex
token_data = _sample_blank_token()
token_data['user_id'] = user_id
token_data['identity_domain_id'] = user_domain_id
token_data['project_id'] = project_id
token_data['assignment_domain_id'] = project_domain_id
self._revoke_by_domain(project_domain_id)
self._assertTokenRevoked(token_data)
def test_by_domain_domain(self):
# If revoke a domain, then a token scoped to the domain is revoked.
user_id = uuid.uuid4().hex
user_domain_id = uuid.uuid4().hex
domain_id = uuid.uuid4().hex
token_data = _sample_blank_token()
token_data['user_id'] = user_id
token_data['identity_domain_id'] = user_domain_id
token_data['assignment_domain_id'] = domain_id
self._revoke_by_domain(domain_id)
self._assertTokenRevoked(token_data)
def _assertEmpty(self, collection):
return self.assertEqual(0, len(collection), "collection not empty")
def test_cleanup(self):
events = self.events
self._assertEmpty(self.revoke_events)
for i in range(0, 10):
events.append(
self._revoke_by_project_role_assignment(uuid.uuid4().hex,
uuid.uuid4().hex))
events.append(
self._revoke_by_domain_role_assignment(uuid.uuid4().hex,
uuid.uuid4().hex))
events.append(
self._revoke_by_domain_role_assignment(uuid.uuid4().hex,
uuid.uuid4().hex))
events.append(
self._revoke_by_user_and_project(uuid.uuid4().hex,
uuid.uuid4().hex))
for event in self.events:
remove_event(self.revoke_events, event)
self._assertEmpty(self.revoke_events)
| |
from collections import namedtuple
import logging
import json
from numbers import Number
# For compatibility under py2 to consider unicode as str
from typing import Optional
from six import string_types
import ray
from ray.tune import TuneError
logger = logging.getLogger(__name__)
class Resources(
namedtuple("Resources", [
"cpu", "gpu", "memory", "object_store_memory", "extra_cpu",
"extra_gpu", "extra_memory", "extra_object_store_memory",
"custom_resources", "extra_custom_resources", "has_placement_group"
])):
"""Ray resources required to schedule a trial.
Parameters:
cpu (float): Number of CPUs to allocate to the trial.
gpu (float): Number of GPUs to allocate to the trial.
memory (float): Memory to reserve for the trial.
object_store_memory (float): Object store memory to reserve.
extra_cpu (float): Extra CPUs to reserve in case the trial needs to
launch additional Ray actors that use CPUs.
extra_gpu (float): Extra GPUs to reserve in case the trial needs to
launch additional Ray actors that use GPUs.
extra_memory (float): Memory to reserve for the trial launching
additional Ray actors that use memory.
extra_object_store_memory (float): Object store memory to reserve for
the trial launching additional Ray actors that use object store
memory.
custom_resources (dict): Mapping of resource to quantity to allocate
to the trial.
extra_custom_resources (dict): Extra custom resources to reserve in
case the trial needs to launch additional Ray actors that use
any of these custom resources.
has_placement_group (bool): Bool indicating if the trial also
has an associated placement group.
"""
__slots__ = ()
def __new__(cls,
cpu,
gpu,
memory=0,
object_store_memory=0,
extra_cpu=0,
extra_gpu=0,
extra_memory=0,
extra_object_store_memory=0,
custom_resources=None,
extra_custom_resources=None,
has_placement_group=False):
custom_resources = custom_resources or {}
extra_custom_resources = extra_custom_resources or {}
leftovers = set(custom_resources) ^ set(extra_custom_resources)
for value in leftovers:
custom_resources.setdefault(value, 0)
extra_custom_resources.setdefault(value, 0)
cpu = round(cpu, 2)
gpu = round(gpu, 2)
memory = round(memory, 2)
object_store_memory = round(object_store_memory, 2)
extra_cpu = round(extra_cpu, 2)
extra_gpu = round(extra_gpu, 2)
extra_memory = round(extra_memory, 2)
extra_object_store_memory = round(extra_object_store_memory, 2)
custom_resources = {
resource: round(value, 2)
for resource, value in custom_resources.items()
}
extra_custom_resources = {
resource: round(value, 2)
for resource, value in extra_custom_resources.items()
}
all_values = [
cpu, gpu, memory, object_store_memory, extra_cpu, extra_gpu,
extra_memory, extra_object_store_memory
]
all_values += list(custom_resources.values())
all_values += list(extra_custom_resources.values())
assert len(custom_resources) == len(extra_custom_resources)
for entry in all_values:
assert isinstance(entry, Number), ("Improper resource value.",
entry)
return super(Resources, cls).__new__(
cls, cpu, gpu, memory, object_store_memory, extra_cpu, extra_gpu,
extra_memory, extra_object_store_memory, custom_resources,
extra_custom_resources, has_placement_group)
def summary_string(self):
summary = "{} CPUs, {} GPUs".format(self.cpu + self.extra_cpu,
self.gpu + self.extra_gpu)
if self.memory or self.extra_memory:
summary += ", {} GiB heap".format(
round((self.memory + self.extra_memory) / (1024**3), 2))
if self.object_store_memory or self.extra_object_store_memory:
summary += ", {} GiB objects".format(
round(
(self.object_store_memory + self.extra_object_store_memory)
/ (1024**3), 2))
custom_summary = ", ".join([
"{} {}".format(self.get_res_total(res), res)
for res in self.custom_resources
if not res.startswith(ray.resource_spec.NODE_ID_PREFIX)
])
if custom_summary:
summary += " ({})".format(custom_summary)
return summary
def cpu_total(self):
return self.cpu + self.extra_cpu
def gpu_total(self):
return self.gpu + self.extra_gpu
def memory_total(self):
return self.memory + self.extra_memory
def object_store_memory_total(self):
return self.object_store_memory + self.extra_object_store_memory
def get_res_total(self, key):
return self.custom_resources.get(
key, 0) + self.extra_custom_resources.get(key, 0)
def get(self, key):
return self.custom_resources.get(key, 0)
def is_nonnegative(self):
all_values = [self.cpu, self.gpu, self.extra_cpu, self.extra_gpu]
all_values += list(self.custom_resources.values())
all_values += list(self.extra_custom_resources.values())
return all(v >= 0 for v in all_values)
@classmethod
def subtract(cls, original, to_remove):
cpu = original.cpu - to_remove.cpu
gpu = original.gpu - to_remove.gpu
memory = original.memory - to_remove.memory
object_store_memory = (
original.object_store_memory - to_remove.object_store_memory)
extra_cpu = original.extra_cpu - to_remove.extra_cpu
extra_gpu = original.extra_gpu - to_remove.extra_gpu
extra_memory = original.extra_memory - to_remove.extra_memory
extra_object_store_memory = (original.extra_object_store_memory -
to_remove.extra_object_store_memory)
all_resources = set(original.custom_resources).union(
set(to_remove.custom_resources))
new_custom_res = {
k: original.custom_resources.get(k, 0) -
to_remove.custom_resources.get(k, 0)
for k in all_resources
}
extra_custom_res = {
k: original.extra_custom_resources.get(k, 0) -
to_remove.extra_custom_resources.get(k, 0)
for k in all_resources
}
return Resources(cpu, gpu, memory, object_store_memory, extra_cpu,
extra_gpu, extra_memory, extra_object_store_memory,
new_custom_res, extra_custom_res)
def to_json(self):
return resources_to_json(self)
def json_to_resources(data: Optional[str]):
if data is None or data == "null":
return None
if isinstance(data, string_types):
data = json.loads(data)
for k in data:
if k in ["driver_cpu_limit", "driver_gpu_limit"]:
raise TuneError(
"The field `{}` is no longer supported. Use `extra_cpu` "
"or `extra_gpu` instead.".format(k))
if k not in Resources._fields:
raise ValueError(
"Unknown resource field {}, must be one of {}".format(
k, Resources._fields))
return Resources(
data.get("cpu", 1), data.get("gpu", 0), data.get("memory", 0),
data.get("object_store_memory", 0), data.get("extra_cpu", 0),
data.get("extra_gpu", 0), data.get("extra_memory", 0),
data.get("extra_object_store_memory", 0), data.get("custom_resources"),
data.get("extra_custom_resources"))
def resources_to_json(resources: Optional[Resources]):
if resources is None:
return None
return {
"cpu": resources.cpu,
"gpu": resources.gpu,
"memory": resources.memory,
"object_store_memory": resources.object_store_memory,
"extra_cpu": resources.extra_cpu,
"extra_gpu": resources.extra_gpu,
"extra_memory": resources.extra_memory,
"extra_object_store_memory": resources.extra_object_store_memory,
"custom_resources": resources.custom_resources.copy(),
"extra_custom_resources": resources.extra_custom_resources.copy()
}
| |
from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:60001")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:60001")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Musiccoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Musiccoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
| |
from pyVulkan import *
import sdl2
import ctypes
import sys
from pyVulkan._vulkan import _lib
app_name = 'Initialization demo'
width = 300
height = 300
setup_cmd = VK_NULL_HANDLE
old_swapchain = VK_NULL_HANDLE
depth_stencil = 1.0
def memory_type_from_properties(typeBits, requirements_mask):
for i, v in enumerate(memory_properties.memoryTypes):
if (typeBits & 1) == 1:
if (v.propertyFlags & requirements_mask) == requirements_mask:
return i
typeBits >>= 1
assert False
def set_image_layout(image, aspect_mask, old_image_layout, new_image_layout, src_access_mask):
global setup_cmd
if setup_cmd == VK_NULL_HANDLE:
cmd = VkCommandBufferAllocateInfo(commandPool=cmd_pool,
level=VK_COMMAND_BUFFER_LEVEL_PRIMARY,
commandBufferCount=1)
setup_cmd = vkAllocateCommandBuffers(device, cmd)[0]
cmd_buf_hinfo = VkCommandBufferInheritanceInfo()
cmd_buf_info = VkCommandBufferBeginInfo(pInheritanceInfo=cmd_buf_hinfo)
vkBeginCommandBuffer(setup_cmd, cmd_buf_info)
image_memory_barrier = VkImageMemoryBarrier(srcAccessMask=src_access_mask,
oldLayout=old_image_layout,
newLayout=new_image_layout,
image=image,
subresourceRange=[aspect_mask, 0, 1, 0, 1])
dst_stage_masks = {VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL: VK_ACCESS_TRANSFER_READ_BIT,
VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL: VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL: VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT,
VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL: VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_INPUT_ATTACHMENT_READ_BIT}
if new_image_layout in dst_stage_masks:
image_memory_barrier.dstAccessMask = dst_stage_masks[new_image_layout]
src_stages = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT
dest_stages = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT
vkCmdPipelineBarrier(setup_cmd, src_stages, dest_stages, 0, 0, None, 0, None, 1, [image_memory_barrier])
#initialize
app_info = VkApplicationInfo(pApplicationName=app_name,
applicationVersion=0,
pEngineName=app_name,
engineVersion=0,
apiVersion=VK_MAKE_VERSION(1, 0, 0))
def string(char_ptr):
if sys.version_info < (3, 0):
return ffi.string(char_ptr)
else:
return ffi.string(char_ptr).decode('ascii')
def _getInstanceLayers():
instance_validation_layers_alts = [["VK_LAYER_LUNARG_standard_validation"],
["VK_LAYER_GOOGLE_threading", "VK_LAYER_LUNARG_parameter_validation",
"VK_LAYER_LUNARG_device_limits", "VK_LAYER_LUNARG_object_tracker",
"VK_LAYER_LUNARG_image", "VK_LAYER_LUNARG_core_validation",
"VK_LAYER_LUNARG_swapchain", "VK_LAYER_GOOGLE_unique_objects"]]
instance_layer_names = [string(i.layerName) for _, i in enumerate(vkEnumerateInstanceLayerProperties())]
return next((i for i in instance_validation_layers_alts if set(i).issubset(instance_layer_names)), [])
# instance_layers = []
instance_layers = _getInstanceLayers()
extensions = [string(i.extensionName) for i in vkEnumerateInstanceExtensionProperties(None)]
@vkDebugReportCallbackEXT
def dbgFunc(*args):
print (string(args[6]))
return True
debug_info = VkDebugReportCallbackCreateInfoEXT(pfnCallback=dbgFunc,
flags=VK_DEBUG_REPORT_ERROR_BIT_EXT | VK_DEBUG_REPORT_WARNING_BIT_EXT)
instance_info = VkInstanceCreateInfo(pApplicationInfo=app_info,
enabledLayerCount=len(instance_layers),
ppEnabledLayerNames=instance_layers,
enabledExtensionCount=len(extensions),
ppEnabledExtensionNames=extensions,
pNext=debug_info)
ptrs = set()
@vkAllocationFunction
def allocFunc(*args):
temp = ffi.new("char[]", args[1])
ptrs.add(temp)
return temp
@vkFreeFunction
def freeFunc(*args):
if args[1] != ffi.NULL:
ptrs.remove(args[1])
@vkReallocationFunction
def reallocFunc(*args):
raise NotImplementedError()
@vkInternalAllocationNotification
def internalAllocNotify(*args):
raise NotImplementedError()
@vkInternalFreeNotification
def internalFreeNotify(*args):
raise NotImplementedError()
allocation_callbacks = VkAllocationCallbacks(pUserData=None,
pfnAllocation=allocFunc,
pfnReallocation=reallocFunc,
pfnFree=freeFunc,
pfnInternalAllocation=internalAllocNotify,
pfnInternalFree=internalFreeNotify)
# inst = vkCreateInstance(instance_info, allocation_callbacks)
inst = vkCreateInstance(instance_info)
vkDestroySurfaceKHR = vkGetInstanceProcAddr(inst, 'vkDestroySurfaceKHR')
vkGetPhysicalDeviceSurfaceSupportKHR = vkGetInstanceProcAddr(inst, 'vkGetPhysicalDeviceSurfaceSupportKHR')
vkGetPhysicalDeviceSurfaceFormatsKHR = vkGetInstanceProcAddr(inst, 'vkGetPhysicalDeviceSurfaceFormatsKHR')
vkGetPhysicalDeviceSurfaceCapabilitiesKHR = vkGetInstanceProcAddr(inst, 'vkGetPhysicalDeviceSurfaceCapabilitiesKHR')
vkGetPhysicalDeviceSurfacePresentModesKHR = vkGetInstanceProcAddr(inst, 'vkGetPhysicalDeviceSurfacePresentModesKHR')
vkCreateDebugReportCallbackEXT = vkGetInstanceProcAddr(inst, 'vkCreateDebugReportCallbackEXT')
vkDestroyDebugReportCallbackEXT = vkGetInstanceProcAddr(inst, 'vkDestroyDebugReportCallbackEXT')
debug_callback = vkCreateDebugReportCallbackEXT(inst, debug_info)
gpu = vkEnumeratePhysicalDevices(inst)[0]
gpu_props = vkGetPhysicalDeviceProperties(gpu)
queue_props = vkGetPhysicalDeviceQueueFamilyProperties(gpu)
features = vkGetPhysicalDeviceFeatures(gpu)
##init sdl
if sdl2.SDL_Init(sdl2.SDL_INIT_VIDEO) != 0:
print(sdl2.SDL_GetError())
window = sdl2.SDL_CreateWindow(app_name.encode('ascii'), sdl2.SDL_WINDOWPOS_UNDEFINED, sdl2.SDL_WINDOWPOS_UNDEFINED, width, height, 0)
if not window:
print(sdl2.SDL_GetError())
wm_info = sdl2.SDL_SysWMinfo()
sdl2.SDL_VERSION(wm_info.version)
sdl2.SDL_GetWindowWMInfo(window, ctypes.byref(wm_info))
if wm_info.subsystem == sdl2.SDL_SYSWM_X11:
vkCreateXlibSurfaceKHR = vkGetInstanceProcAddr(inst, 'vkCreateXlibSurfaceKHR')
surface = vkCreateXlibSurfaceKHR(inst, VkXlibSurfaceCreateInfoKHR(dpy=wm_info.info.x11.display, window=wm_info.info.x11.window))
elif wm_info.subsystem == sdl2.SDL_SYSWM_WINDOWS:
vkCreateWin32SurfaceKHR = vkGetInstanceProcAddr(inst, 'vkCreateWin32SurfaceKHR')
import win32misc
hinstance = win32misc.getInstance(wm_info.info.win.window)
surface = vkCreateWin32SurfaceKHR(inst, VkWin32SurfaceCreateInfoKHR(hinstance=hinstance, hwnd=wm_info.info.win.window))
else:
assert False
support_presents = [vkGetPhysicalDeviceSurfaceSupportKHR(gpu, i, surface) for i, _ in enumerate(queue_props)]
graphics_queue_node_index = None
present_queue_node_index = None
for i, v in enumerate(queue_props):
if v.queueFlags & VK_QUEUE_GRAPHICS_BIT:
if not graphics_queue_node_index:
graphics_queue_node_index = i
if support_presents[i] == VK_TRUE:
graphics_queue_node_index = i
present_queue_node_index = i
break
if not present_queue_node_index:
for i, v in enumerate(support_presents):
if v == VK_TRUE:
present_queue_node_index = i
assert (graphics_queue_node_index is not None) and (present_queue_node_index is not None)
assert graphics_queue_node_index == present_queue_node_index
queue_info = VkDeviceQueueCreateInfo(queueFamilyIndex=graphics_queue_node_index,
queueCount=1,
pQueuePriorities=[0.0])
extensions = [string(i.extensionName) for i in vkEnumerateDeviceExtensionProperties(gpu)]
device_info = VkDeviceCreateInfo(queueCreateInfoCount=1,
pQueueCreateInfos=queue_info,
pEnabledFeatures=VkPhysicalDeviceFeatures(),
ppEnabledLayerNames=[],
ppEnabledExtensionNames=extensions)
device = vkCreateDevice(gpu, device_info)
vkCreateSwapchainKHR = vkGetDeviceProcAddr(device, 'vkCreateSwapchainKHR')
vkGetSwapchainImagesKHR = vkGetDeviceProcAddr(device, 'vkGetSwapchainImagesKHR')
vkAcquireNextImageKHR = vkGetDeviceProcAddr(device, 'vkAcquireNextImageKHR')
vkQueuePresentKHR = vkGetDeviceProcAddr(device, 'vkQueuePresentKHR')
vkDestroySwapchainKHR = vkGetDeviceProcAddr(device, 'vkDestroySwapchainKHR')
queue = vkGetDeviceQueue(device, graphics_queue_node_index, 0)
surface_formats = vkGetPhysicalDeviceSurfaceFormatsKHR(gpu, surface)
if len(surface_formats) == 1 and surface_formats[0].format == VK_FORMAT_UNDEFINED:
format_ = VK_FORMAT_B8G8R8A8_UNORM
else:
format_ = surface_formats[0].format
color_space = surface_formats[0].colorSpace
memory_properties = vkGetPhysicalDeviceMemoryProperties(gpu)
cmd_pool_info = VkCommandPoolCreateInfo(queueFamilyIndex=graphics_queue_node_index, flags=VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT)
cmd_pool = vkCreateCommandPool(device, cmd_pool_info)
cmd_buffer_info = VkCommandBufferAllocateInfo(commandPool=cmd_pool,
level=VK_COMMAND_BUFFER_LEVEL_PRIMARY,
commandBufferCount=1)
draw_cmd = vkAllocateCommandBuffers(device, cmd_buffer_info)[0]
surface_capabilities = vkGetPhysicalDeviceSurfaceCapabilitiesKHR(gpu, surface)
present_modes = vkGetPhysicalDeviceSurfacePresentModesKHR(gpu, surface)
if surface_capabilities.currentExtent.width == ffi.cast('uint32_t', -1):
swapchain_extent = VkExtent2D(width=width, height=height)
else:
swapchain_extent = surface_capabilities.currentExtent
width = surface_capabilities.currentExtent.width
height = surface_capabilities.currentExtent.height
swapchain_present_mode = VK_PRESENT_MODE_MAILBOX_KHR
desiredNumberOfSwapchainImages = surface_capabilities.minImageCount + 1
if (surface_capabilities.maxImageCount > 0) and (desiredNumberOfSwapchainImages > surface_capabilities.maxImageCount):
desiredNumberOfSwapchainImages = surface_capabilities.maxImageCount
if surface_capabilities.supportedTransforms & VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR:
pre_transform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR
else:
pre_transform = surface_capabilities.currentTransform
swapchain_info = VkSwapchainCreateInfoKHR(surface=surface,
minImageCount=desiredNumberOfSwapchainImages,
imageFormat=format_,
imageColorSpace=color_space,
imageExtent=swapchain_extent,
imageUsage=VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
preTransform=pre_transform,
compositeAlpha=VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR,
imageArrayLayers=1,
imageSharingMode=VK_SHARING_MODE_EXCLUSIVE,
presentMode=swapchain_present_mode,
oldSwapchain=old_swapchain,
clipped=True)
swapchain = vkCreateSwapchainKHR(device, swapchain_info)
swapchain_images = vkGetSwapchainImagesKHR(device, swapchain)
def _getView(image):
set_image_layout(image, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, 0)
return vkCreateImageView(device, VkImageViewCreateInfo(format=format_,
components={'r': VK_COMPONENT_SWIZZLE_R,
'g': VK_COMPONENT_SWIZZLE_G,
'b': VK_COMPONENT_SWIZZLE_B,
'a': VK_COMPONENT_SWIZZLE_A},
subresourceRange={'aspectMask': VK_IMAGE_ASPECT_COLOR_BIT,
'baseMipLevel': 0,
'levelCount': 1,
'baseArrayLayer': 0,
'layerCount': 1},
viewType=VK_IMAGE_VIEW_TYPE_2D,
flags=0,
image=image))
views = [_getView(i) for i in swapchain_images]
current_buffer = 0
depth_format = VK_FORMAT_D16_UNORM
image = VkImageCreateInfo(imageType=VK_IMAGE_TYPE_2D,
format=depth_format,
extent=[width, height, 1],
mipLevels=1,
arrayLayers=1,
samples=VK_SAMPLE_COUNT_1_BIT,
tiling=VK_IMAGE_TILING_OPTIMAL,
usage=VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)
mem_alloc = VkMemoryAllocateInfo()
view = VkImageViewCreateInfo(format=depth_format,
subresourceRange=VkImageSubresourceRange(aspectMask=VK_IMAGE_ASPECT_DEPTH_BIT,
baseMipLevel=0,
levelCount=1,
baseArrayLayer=0,
layerCount=1),
viewType=VK_IMAGE_VIEW_TYPE_2D)
depth_image = vkCreateImage(device, image)
mem_reqs = vkGetImageMemoryRequirements(device, depth_image)
mem_alloc.allocationSize = mem_reqs.size
mem_alloc.memoryTypeIndex = memory_type_from_properties(mem_reqs.memoryTypeBits, 0)
depth_mem = vkAllocateMemory(device, mem_alloc)
vkBindImageMemory(device, depth_image, depth_mem, 0)
set_image_layout(depth_image, VK_IMAGE_ASPECT_DEPTH_BIT,
VK_IMAGE_LAYOUT_UNDEFINED,
VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
0)
view.image = depth_image
depth_view = vkCreateImageView(device, view)
attachments = [VkAttachmentDescription(format=format_,
samples=VK_SAMPLE_COUNT_1_BIT,
loadOp=VK_ATTACHMENT_LOAD_OP_CLEAR,
storeOp=VK_ATTACHMENT_STORE_OP_STORE,
stencilLoadOp=VK_ATTACHMENT_LOAD_OP_DONT_CARE,
stencilStoreOp=VK_ATTACHMENT_STORE_OP_DONT_CARE,
initialLayout=VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
finalLayout=VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL),
VkAttachmentDescription(format=depth_format,
samples=VK_SAMPLE_COUNT_1_BIT,
loadOp=VK_ATTACHMENT_LOAD_OP_CLEAR,
storeOp=VK_ATTACHMENT_STORE_OP_DONT_CARE,
stencilLoadOp=VK_ATTACHMENT_LOAD_OP_DONT_CARE,
stencilStoreOp=VK_ATTACHMENT_STORE_OP_DONT_CARE,
initialLayout=VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
finalLayout=VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL)]
color_reference = VkAttachmentReference(attachment=0, layout=VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL)
depth_reference = VkAttachmentReference(attachment=1, layout=VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL)
subpass = VkSubpassDescription(pipelineBindPoint=VK_PIPELINE_BIND_POINT_GRAPHICS,
colorAttachmentCount=1,
pColorAttachments=[color_reference],
pDepthStencilAttachment=depth_reference)
rp_info = VkRenderPassCreateInfo(attachmentCount=len(attachments),
pAttachments=attachments,
subpassCount=1,
pSubpasses=[subpass])
render_pass = vkCreateRenderPass(device, rp_info)
framebuffers = [vkCreateFramebuffer(device, VkFramebufferCreateInfo(renderPass=render_pass,
attachmentCount=2,
pAttachments=[v, depth_view],
width=width,
height=height,
layers=1)) for i, v in enumerate(views)]
def flush_init_cmd():
global setup_cmd
if setup_cmd:
vkEndCommandBuffer(setup_cmd)
submit_info = VkSubmitInfo(commandBufferCount=1, pCommandBuffers=[setup_cmd])
vkQueueSubmit(queue, 1, submit_info, VK_NULL_HANDLE)
vkQueueWaitIdle(queue)
vkFreeCommandBuffers(device, cmd_pool, 1, [setup_cmd])
setup_cmd = VK_NULL_HANDLE
cmd_buf_hinfo = VkCommandBufferInheritanceInfo(occlusionQueryEnable=VK_FALSE)
cmd_buf_info = VkCommandBufferBeginInfo(pInheritanceInfo=cmd_buf_hinfo)
vkBeginCommandBuffer(draw_cmd, cmd_buf_info)
vkEndCommandBuffer(draw_cmd)
def draw():
vkDeviceWaitIdle(device)
present_complete_semaphore = vkCreateSemaphore(device, VkSemaphoreCreateInfo())
current_buffer = vkAcquireNextImageKHR(device, swapchain, ffi.cast('uint64_t', -1),
present_complete_semaphore,
0)
set_image_layout(swapchain_images[current_buffer], VK_IMAGE_ASPECT_COLOR_BIT,
VK_IMAGE_LAYOUT_PRESENT_SRC_KHR,
VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, 0)
flush_init_cmd()
cmd_buf_hinfo = VkCommandBufferInheritanceInfo(occlusionQueryEnable=VK_FALSE)
cmd_buf_info = VkCommandBufferBeginInfo(pInheritanceInfo=cmd_buf_hinfo)
clear_values = [VkClearValue(color={'float32': [0.2, 0.2, 0.2, 0.2]}), VkClearValue(depthStencil={'depth': 1.0, 'stencil': 0})]
rp_begin = VkRenderPassBeginInfo(renderPass=render_pass,
framebuffer=framebuffers[current_buffer],
renderArea={'offset': {'x': 0, 'y': 0},
'extent': {'width': width, 'height': height}},
clearValueCount=len(clear_values),
pClearValues=clear_values)
vkBeginCommandBuffer(draw_cmd, cmd_buf_info)
vkCmdBeginRenderPass(draw_cmd, rp_begin, VK_SUBPASS_CONTENTS_INLINE)
vkCmdEndRenderPass(draw_cmd)
pre_present_barrier = VkImageMemoryBarrier(srcAccessMask=VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
dstAccessMask=VK_ACCESS_MEMORY_READ_BIT,
oldLayout=VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
newLayout=VK_IMAGE_LAYOUT_PRESENT_SRC_KHR,
srcQueueFamilyIndex=ffi.cast('uint32_t', VK_QUEUE_FAMILY_IGNORED),
dstQueueFamilyIndex=ffi.cast('uint32_t', VK_QUEUE_FAMILY_IGNORED),
subresourceRange=[VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1],
image=swapchain_images[current_buffer])
vkCmdPipelineBarrier(draw_cmd, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, 0, 0, None, 0,
None, 1, pre_present_barrier)
vkEndCommandBuffer(draw_cmd)
submit_info = VkSubmitInfo(waitSemaphoreCount=1,
pWaitSemaphores=[present_complete_semaphore],
pWaitDstStageMask=[VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT],
commandBufferCount=1,
pCommandBuffers=[draw_cmd])
vkQueueSubmit(queue, 1, [submit_info], VK_NULL_HANDLE)
vkQueueWaitIdle(queue)
present = VkPresentInfoKHR(swapchainCount=1,
pSwapchains=[swapchain],
pImageIndices=[current_buffer])
vkQueuePresentKHR(queue, present)
vkDestroySemaphore(device, present_complete_semaphore)
#main loop
running = True
event = sdl2.SDL_Event()
last_ticks = 0
while running:
while sdl2.SDL_PollEvent(ctypes.byref(event)) != 0:
if event.type == sdl2.SDL_QUIT:
running = False
new_ticks = sdl2.SDL_GetTicks()
if new_ticks - last_ticks > 1000 / 30:
draw()
last_ticks = new_ticks
#cleanup
vkFreeMemory(device, depth_mem)
for i in framebuffers:
vkDestroyFramebuffer(device, i)
if setup_cmd:
vkFreeCommandBuffers(device, cmd_pool, 1, [setup_cmd])
vkFreeCommandBuffers(device, cmd_pool, 1, [draw_cmd])
vkDestroyCommandPool(device, cmd_pool)
vkDestroyRenderPass(device, render_pass)
for i in views:
vkDestroyImageView(device, i)
vkDestroyImageView(device, depth_view)
vkDestroyImage(device, depth_image)
vkDestroySwapchainKHR(device, swapchain)
vkDestroyDevice(device)
vkDestroyDebugReportCallbackEXT(inst, debug_callback)
vkDestroySurfaceKHR(inst, surface)
vkDestroyInstance(inst)
sdl2.SDL_DestroyWindow(window)
sdl2.SDL_Quit()
| |
# -*- coding: utf-8 -*-
"""
celery.worker.job
~~~~~~~~~~~~~~~~~
This module defines the :class:`Request` class,
which specifies how tasks are executed.
:copyright: (c) 2009 - 2012 by Ask Solem.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import logging
import time
import socket
import sys
from datetime import datetime
from .. import exceptions
from ..datastructures import ExceptionInfo
from ..registry import tasks
from ..app import app_or_default
from ..execute.trace import build_tracer, trace_task, report_internal_error
from ..platforms import set_mp_process_title as setps
from ..utils import noop, kwdict, fun_takes_kwargs, truncate_text
from ..utils.encoding import safe_repr, safe_str
from ..utils.timeutils import maybe_iso8601, timezone
from . import state
# Localize
tz_to_local = timezone.to_local
tz_or_local = timezone.tz_or_local
tz_utc = timezone.utc
NEEDS_KWDICT = sys.version_info <= (2, 6)
def execute_and_trace(name, uuid, args, kwargs, request=None, **opts):
"""This is a pickleable method used as a target when applying to pools.
It's the same as::
>>> trace_task(name, *args, **kwargs)[0]
"""
task = tasks[name]
try:
hostname = opts.get("hostname")
setps("celeryd", name, hostname, rate_limit=True)
try:
if task.__tracer__ is None:
task.__tracer__ = build_tracer(name, task, **opts)
return task.__tracer__(uuid, args, kwargs, request)[0]
finally:
setps("celeryd", "-idle-", hostname, rate_limit=True)
except Exception, exc:
return report_internal_error(task, exc)
class Request(object):
"""A request for task execution."""
__slots__ = ("app", "name", "id", "args", "kwargs",
"on_ack", "delivery_info", "hostname",
"logger", "eventer", "connection_errors",
"task", "eta", "expires",
"_does_debug", "_does_info", "request_dict",
"acknowledged", "success_msg", "error_msg",
"retry_msg", "time_start", "worker_pid",
"_already_revoked", "_terminate_on_ack", "_tzlocal")
#: Format string used to log task success.
success_msg = """\
Task %(name)s[%(id)s] succeeded in %(runtime)ss: %(return_value)s
"""
#: Format string used to log task failure.
error_msg = """\
Task %(name)s[%(id)s] raised exception: %(exc)s
"""
#: Format string used to log internal error.
internal_error_msg = """\
Task %(name)s[%(id)s] INTERNAL ERROR: %(exc)s
"""
#: Format string used to log task retry.
retry_msg = """Task %(name)s[%(id)s] retry: %(exc)s"""
def __init__(self, body, on_ack=noop,
hostname=None, logger=None, eventer=None, app=None,
connection_errors=None, request_dict=None,
delivery_info=None, task=None, **opts):
self.app = app or app_or_default(app)
name = self.name = body["task"]
self.id = body["id"]
self.args = body.get("args", [])
self.kwargs = body.get("kwargs", {})
try:
self.kwargs.items
except AttributeError:
raise exceptions.InvalidTaskError(
"Task keyword arguments is not a mapping")
if NEEDS_KWDICT:
self.kwargs = kwdict(self.kwargs)
eta = body.get("eta")
expires = body.get("expires")
utc = body.get("utc", False)
self.on_ack = on_ack
self.hostname = hostname or socket.gethostname()
self.logger = logger or self.app.log.get_default_logger()
self.eventer = eventer
self.connection_errors = connection_errors or ()
self.task = task or tasks[name]
self.acknowledged = self._already_revoked = False
self.time_start = self.worker_pid = self._terminate_on_ack = None
self._tzlocal = None
# timezone means the message is timezone-aware, and the only timezone
# supported at this point is UTC.
if eta is not None:
tz = tz_utc if utc else self.tzlocal
self.eta = tz_to_local(maybe_iso8601(eta), self.tzlocal, tz)
else:
self.eta = None
if expires is not None:
tz = tz_utc if utc else self.tzlocal
self.expires = tz_to_local(maybe_iso8601(expires),
self.tzlocal, tz)
else:
self.expires = None
delivery_info = {} if delivery_info is None else delivery_info
self.delivery_info = {
"exchange": delivery_info.get("exchange"),
"routing_key": delivery_info.get("routing_key"),
}
## shortcuts
self._does_debug = self.logger.isEnabledFor(logging.DEBUG)
self._does_info = self.logger.isEnabledFor(logging.INFO)
self.request_dict = body
@classmethod
def from_message(cls, message, body, **kwargs):
# should be deprecated
return Request(body,
delivery_info=getattr(message, "delivery_info", None),
**kwargs)
def extend_with_default_kwargs(self, loglevel, logfile):
"""Extend the tasks keyword arguments with standard task arguments.
Currently these are `logfile`, `loglevel`, `task_id`,
`task_name`, `task_retries`, and `delivery_info`.
See :meth:`celery.task.base.Task.run` for more information.
Magic keyword arguments are deprecated and will be removed
in version 3.0.
"""
kwargs = dict(self.kwargs)
default_kwargs = {"logfile": logfile,
"loglevel": loglevel,
"task_id": self.id,
"task_name": self.name,
"task_retries": self.request_dict.get("retries", 0),
"task_is_eager": False,
"delivery_info": self.delivery_info}
fun = self.task.run
supported_keys = fun_takes_kwargs(fun, default_kwargs)
extend_with = dict((key, val) for key, val in default_kwargs.items()
if key in supported_keys)
kwargs.update(extend_with)
return kwargs
def execute_using_pool(self, pool, loglevel=None, logfile=None):
"""Like :meth:`execute`, but using a worker pool.
:param pool: A :class:`multiprocessing.Pool` instance.
:keyword loglevel: The loglevel used by the task.
:keyword logfile: The logfile used by the task.
"""
if self.revoked():
return
task = self.task
hostname = self.hostname
kwargs = self.kwargs
if self.task.accept_magic_kwargs:
kwargs = self.extend_with_default_kwargs(loglevel, logfile)
request = self.request_dict
request.update({"loglevel": loglevel, "logfile": logfile,
"hostname": hostname, "is_eager": False,
"delivery_info": self.delivery_info})
result = pool.apply_async(execute_and_trace,
args=(self.name, self.id, self.args, kwargs),
kwargs={"hostname": hostname,
"request": request},
accept_callback=self.on_accepted,
timeout_callback=self.on_timeout,
callback=self.on_success,
error_callback=self.on_failure,
soft_timeout=task.soft_time_limit,
timeout=task.time_limit)
return result
def execute(self, loglevel=None, logfile=None):
"""Execute the task in a :func:`~celery.execute.trace.trace_task`.
:keyword loglevel: The loglevel used by the task.
:keyword logfile: The logfile used by the task.
"""
if self.revoked():
return
# acknowledge task as being processed.
if not self.task.acks_late:
self.acknowledge()
kwargs = self.kwargs
if self.task.accept_magic_kwargs:
kwargs = self.extend_with_default_kwargs(loglevel, logfile)
request = self.request_dict
request.update({"loglevel": loglevel, "logfile": logfile,
"hostname": self.hostname, "is_eager": False,
"delivery_info": self.delivery_info})
retval, _ = trace_task(self.task, self.id, self.args, kwargs,
**{"hostname": self.hostname,
"loader": self.app.loader,
"request": request})
self.acknowledge()
return retval
def maybe_expire(self):
"""If expired, mark the task as revoked."""
if self.expires and datetime.now(self.tzlocal) > self.expires:
state.revoked.add(self.id)
if self.store_errors:
self.task.backend.mark_as_revoked(self.id)
def terminate(self, pool, signal=None):
if self.time_start:
return pool.terminate_job(self.worker_pid, signal)
else:
self._terminate_on_ack = (True, pool, signal)
def revoked(self):
"""If revoked, skip task and mark state."""
if self._already_revoked:
return True
if self.expires:
self.maybe_expire()
if self.id in state.revoked:
self.logger.warn("Skipping revoked task: %s[%s]",
self.name, self.id)
self.send_event("task-revoked", uuid=self.id)
self.acknowledge()
self._already_revoked = True
return True
return False
def send_event(self, type, **fields):
if self.eventer and self.eventer.enabled:
self.eventer.send(type, **fields)
def on_accepted(self, pid, time_accepted):
"""Handler called when task is accepted by worker pool."""
self.worker_pid = pid
self.time_start = time_accepted
state.task_accepted(self)
if not self.task.acks_late:
self.acknowledge()
self.send_event("task-started", uuid=self.id, pid=pid)
if self._does_debug:
self.logger.debug("Task accepted: %s[%s] pid:%r",
self.name, self.id, pid)
if self._terminate_on_ack is not None:
_, pool, signal = self._terminate_on_ack
self.terminate(pool, signal)
def on_timeout(self, soft, timeout):
"""Handler called if the task times out."""
state.task_ready(self)
if soft:
self.logger.warning("Soft time limit (%ss) exceeded for %s[%s]",
timeout, self.name, self.id)
exc = exceptions.SoftTimeLimitExceeded(timeout)
else:
self.logger.error("Hard time limit (%ss) exceeded for %s[%s]",
timeout, self.name, self.id)
exc = exceptions.TimeLimitExceeded(timeout)
if self.store_errors:
self.task.backend.mark_as_failure(self.id, exc)
def on_success(self, ret_value, now=None):
"""Handler called if the task was successfully processed."""
if isinstance(ret_value, ExceptionInfo):
if isinstance(ret_value.exception, (
SystemExit, KeyboardInterrupt)):
raise ret_value.exception
return self.on_failure(ret_value)
state.task_ready(self)
if self.task.acks_late:
self.acknowledge()
if self.eventer and self.eventer.enabled:
now = time.time()
runtime = self.time_start and (time.time() - self.time_start) or 0
self.send_event("task-succeeded", uuid=self.id,
result=safe_repr(ret_value), runtime=runtime)
if self._does_info:
now = now or time.time()
runtime = self.time_start and (time.time() - self.time_start) or 0
self.logger.info(self.success_msg.strip(),
{"id": self.id,
"name": self.name,
"return_value": self.repr_result(ret_value),
"runtime": runtime})
def on_retry(self, exc_info):
"""Handler called if the task should be retried."""
self.send_event("task-retried", uuid=self.id,
exception=safe_repr(exc_info.exception.exc),
traceback=safe_str(exc_info.traceback))
if self._does_info:
self.logger.info(self.retry_msg.strip(),
{"id": self.id,
"name": self.name,
"exc": safe_repr(exc_info.exception.exc)},
exc_info=exc_info)
def on_failure(self, exc_info):
"""Handler called if the task raised an exception."""
state.task_ready(self)
if not exc_info.internal:
if isinstance(exc_info.exception, exceptions.RetryTaskError):
return self.on_retry(exc_info)
# This is a special case as the process would not have had
# time to write the result.
if isinstance(exc_info.exception, exceptions.WorkerLostError) and \
self.store_errors:
self.task.backend.mark_as_failure(self.id, exc_info.exception)
# (acks_late) acknowledge after result stored.
if self.task.acks_late:
self.acknowledge()
self._log_error(exc_info)
def _log_error(self, exc_info):
format = self.error_msg
description = "raised exception"
severity = logging.ERROR
self.send_event("task-failed", uuid=self.id,
exception=safe_repr(exc_info.exception),
traceback=safe_str(exc_info.traceback))
if exc_info.internal:
format = self.internal_error_msg
description = "INTERNAL ERROR"
severity = logging.CRITICAL
context = {"hostname": self.hostname,
"id": self.id,
"name": self.name,
"exc": safe_repr(exc_info.exception),
"traceback": safe_str(exc_info.traceback),
"args": safe_repr(self.args),
"kwargs": safe_repr(self.kwargs),
"description": description}
self.logger.log(severity, format.strip(), context,
exc_info=exc_info.exc_info,
extra={"data": {"id": self.id,
"name": self.name,
"hostname": self.hostname}})
task_obj = tasks.get(self.name, object)
task_obj.send_error_email(context, exc_info.exception)
def acknowledge(self):
"""Acknowledge task."""
if not self.acknowledged:
self.on_ack(self.logger, self.connection_errors)
self.acknowledged = True
def repr_result(self, result, maxlen=46):
# 46 is the length needed to fit
# "the quick brown fox jumps over the lazy dog" :)
return truncate_text(safe_repr(result), maxlen)
def info(self, safe=False):
return {"id": self.id,
"name": self.name,
"args": self.args if safe else safe_repr(self.args),
"kwargs": self.kwargs if safe else safe_repr(self.kwargs),
"hostname": self.hostname,
"time_start": self.time_start,
"acknowledged": self.acknowledged,
"delivery_info": self.delivery_info,
"worker_pid": self.worker_pid}
def shortinfo(self):
return "%s[%s]%s%s" % (
self.name, self.id,
" eta:[%s]" % (self.eta, ) if self.eta else "",
" expires:[%s]" % (self.expires, ) if self.expires else "")
__str__ = shortinfo
def __repr__(self):
return '<%s: {name:"%s", id:"%s", args:"%s", kwargs:"%s"}>' % (
self.__class__.__name__,
self.name, self.id, self.args, self.kwargs)
@property
def tzlocal(self):
if self._tzlocal is None:
self._tzlocal = tz_or_local(self.app.conf.CELERY_TIMEZONE)
return self._tzlocal
@property
def store_errors(self):
return (not self.task.ignore_result
or self.task.store_errors_even_if_ignored)
def _compat_get_task_id(self):
return self.id
def _compat_set_task_id(self, value):
self.id = value
def _compat_get_task_name(self):
return self.name
def _compat_set_task_name(self, value):
self.name = value
task_id = property(_compat_get_task_id, _compat_set_task_id)
task_name = property(_compat_get_task_name, _compat_set_task_name)
class TaskRequest(Request):
def __init__(self, name, id, args=(), kwargs={},
eta=None, expires=None, **options):
"""Compatibility class."""
super(TaskRequest, self).__init__({
"task": name, "id": id, "args": args,
"kwargs": kwargs, "eta": eta,
"expires": expires}, **options)
| |
from . import certs
from . import fragments
import glob
import os
import datetime
import sys
import io
import subprocess
from typing import List, Dict, Set
_certificates = [] # type: List[certs.Certificate]
_certificate_keys = {}
_certificate_by_hostname = {} # type: Dict[str, certs.Certificate]
_sites = []
_misc_dir = None
_collisions = set() # type: Set
_bad_hostnames = set() # type: Set[str]
separate_logs = True
def load_certificate(path, key=None):
crt = certs.load_cert(path)
if not key:
key = path.rsplit('.', 1)[0] + '.key'
if not os.path.exists(key):
print('Warning: key file for %r not found (tried %r)' % (path, key))
return
_certificates.append(crt)
_certificate_keys[path] = key
def find_certificates(pattern):
matches = glob.glob(pattern)
if not matches:
print('Warning: no certificates found (%r)' % pattern, file=sys.stderr)
for path in matches:
load_certificate(path)
def find_acmesh_certificates():
keys = glob.glob(os.path.expanduser('~/.acme.sh/*/*.key'))
for key in keys:
crt = os.path.dirname(key) + '/fullchain.cer'
if os.path.exists(crt):
load_certificate(crt, key=key)
def _check_collision(addr):
if addr in _collisions:
raise ValueError('address %s used twice' % (addr, ))
_collisions.add(addr)
class Location:
def __init__(self, prefix):
self.location = prefix
self.lines = [] # type: List[str]
self.raw = [] # type: List[str]
def proxy(self, *, location='/', to, headers=None, proxy_redirect=None):
self.lines += fragments.proxy.splitlines()
for k, v in (headers or {}).items():
self.lines.append('proxy_set_header %s %s;' % (q(k), qval(v)))
self.lines.append('proxy_pass %s;' % q(to))
if proxy_redirect:
if proxy_redirect is True:
self.lines.append('proxy_redirect default;')
else:
self.lines.append('proxy_redirect %s %s;' % (q(proxy_redirect[0]), q(proxy_redirect[1])))
def set_header(self, k, v):
self.lines.append('add_header %s %s;' % (q(k), qval(v)))
def return_code(self, code):
self.lines.append('return %d;' % code)
def files(self, path, allow_index=False):
self.lines.append('alias %s;' % q(path))
if allow_index:
self.lines.append('autoindex on;')
def _generate(self):
lines = [ 'location %s {' % q(self.location) ]
lines += [ ' ' + line for line in self.lines + self.raw ]
lines.append('}')
return lines
def rewrite(self, *, regex, to, permanent=False, redirect=False):
s = 'rewrite %s %s' % (q(regex), q(to))
if permanent:
s += ' permanent'
if redirect: s += ' redirect'
self.lines.append(s + ';')
def q(s):
if ' ' in s or '\n' in s:
raise ValueError('whitespace in %r!' % s)
return s
def qval(s):
if ' ' in s:
if '"' in s:
raise ValueError('" in %r!' % s)
return '"%s"' % s
else:
return s
class Site:
def __init__(self, name, aliases=[], no_tls=False, auto_tls=True, tls_only=False, hsts=False, auto_cert=True, log_name=None):
self.rewrites = []
self.locations = []
self.locations_by_key = {}
self.raw = []
if type(name) is list:
self.all_names = list(name)
name = name[0]
else:
self.all_names = [name]
self.name = name
self.log_name = log_name or name
self.no_tls = no_tls
self.auto_tls = auto_tls and not no_tls and not tls_only
self.tls_only = tls_only
self.hsts = hsts
self.default_site = False
self.auto_cert = auto_cert
assert not (self.tls_only and self.no_tls)
for alias in aliases:
redirect(alias, self.base_url, log_name=name, no_tls=no_tls, auto_tls=auto_tls, permanent=True, auto_cert=auto_cert)
if self.auto_tls:
redirect(name, self.base_url, log_name=name, no_tls=True, permanent=True, auto_cert=auto_cert)
_sites.append(self)
def rewrite(self, *, location='/', **kwargs):
self.location(location).rewrite(**kwargs)
def proxy(self, *, location='/', **kwargs):
self.location(location).proxy(**kwargs)
def return_code(self, *, location='/', code):
self.location(location).return_code(code)
def redirect(self, *, to, strip_path=False, permanent=False):
self.rewrite(regex='^', to=to.rstrip('/') + '$request_uri?', permanent=permanent)
def location(self, prefix):
key = (prefix, )
if key not in self.locations_by_key:
l = Location(prefix=prefix)
self.locations.append(l)
self.locations_by_key[key] = l
return self.locations_by_key[key]
def _generate(self):
return '\n'.join(self._generate_one(name) for name in self.all_names )
def _generate_one(self, name):
lines = ['server_name %s;' % q(name)]
lines += self.raw
if (self.no_tls or not self.auto_tls) and not self.tls_only:
_check_collision((80, name))
lines.append('listen 80;')
lines += [
'location /.well-known/acme-challenge/ {',
' root %s/challenges/;' % q(_misc_dir),
'}'
]
if separate_logs:
lines += [
'access_log /var/log/nginx/%s_access.log main;' % q(self.log_name),
'error_log /var/log/nginx/%s_error.log;' % q(self.log_name),
]
if not self.no_tls:
_check_collision((443, name))
lines.append('listen 443 ssl;')
crt, key = _certificate_by_hostname[name]
lines.append('ssl_certificate %s;' % q(crt))
lines.append('ssl_certificate_key %s;' % q(key))
if self.hsts:
lines.append('add_header Strict-Transport-Security "max-age=31536000; includeSubDomains";')
lines += self.rewrites
for location in self.locations:
lines += location._generate()
return _make_server(lines)
@property
def base_url(self):
if self.no_tls:
return 'http://%s/' % self.name
else:
return 'https://%s/' % self.name
def _make_server(lines):
return ' server {\n %s\n }' % '\n '.join(lines)
class DefaultSite(Site):
def __init__(self, certificate_for):
Site.__init__(self, [certificate_for], auto_tls=False)
def _generate(self):
lines = ['listen 80 default_server;', 'listen 443 default_server;']
try:
crt, key = _certificate_by_hostname[self.name]
except KeyError:
pass
else:
lines.append('ssl_certificate %s;' % q(crt))
lines.append('ssl_certificate_key %s;' % q(key))
lines += self.rewrites
for location in self.locations:
lines += location._generate()
return _make_server(lines)
def redirect(hostname, target_url, *, log_name=None, no_tls=False, permanent=False, auto_tls=True, tls_only=False, auto_cert=True):
site = Site(hostname, no_tls=no_tls, auto_tls=auto_tls, tls_only=tls_only, auto_cert=auto_cert, log_name=log_name)
site.redirect(to=target_url, permanent=permanent)
def _find_cert(hostname):
matching = [ cert for cert in _certificates
if certs.matches(cert, hostname) ]
matching.sort(key=lambda cert: cert.expiration)
if not matching:
return False, None
cert = matching[-1]
good = _is_expiration_ok(cert)
return good, cert
def _is_expiration_ok(cert):
return (cert.expiration - datetime.datetime.now()) > datetime.timedelta(days=30)
def _abs_symlink_force(src, dst):
if os.path.islink(dst):
os.unlink(dst)
os.symlink(os.path.abspath(src), dst)
def _assign_certs(hostnames):
certs_dir = _misc_dir + '/certs'
if not os.path.exists(certs_dir):
os.mkdir(certs_dir)
global _bad_hostnames
_bad_hostnames = set()
for hostname in hostnames:
good, cert = _find_cert(hostname)
#print(hostname, '->', cert.path if cert else None, 'ok' if good else 'not ok')
if not good:
_bad_hostnames.add(hostname)
if not cert:
if not _certificates:
raise ValueError('no certificates found at all, but TLS requested')
cert = _certificates[0]
if '/' in hostname:
raise ValueError('bad hostname %r' % hostname)
path = cert.path
key_path = os.path.abspath(certs_dir + '/' + hostname + '.key')
crt_path = os.path.abspath(certs_dir + '/' + hostname + '.crt')
_abs_symlink_force(path, crt_path)
_abs_symlink_force(_certificate_keys[path], key_path)
_certificate_by_hostname[hostname] = (crt_path, key_path)
def _gen_config():
config = io.StringIO()
#_sites.sort(key=lambda s: reversed(s.name.split('.')))
print(fragments.preamble, file=config)
for site in _sites:
print(site._generate(), file=config)
print(file=config)
print('}', file=config)
return config.getvalue()
def _get_hostnames(need_auto_cert=False):
hostnames = []
for site in _sites:
if need_auto_cert and not site.auto_cert:
continue
if not site.no_tls:
hostnames += site.all_names
return hostnames
def hostname_sort_order(k):
return (k.count('.'), list(reversed(k.split('.'))))
def target_simple_file(filename, misc_dir):
global _misc_dir
_misc_dir = misc_dir
if not os.path.exists(_misc_dir):
os.mkdir(_misc_dir, 0o700)
find_acmesh_certificates()
find_certificates(_misc_dir + '/autocerts/*.crt')
hostnames = _get_hostnames()
_assign_certs(hostnames)
value = _gen_config()
with open(filename, 'w') as f:
f.write(value)
auto_cert_hostnames = _get_hostnames(need_auto_cert=True)
auto_cert_bad_hostnames = [ hostname for hostname in _bad_hostnames if hostname in auto_cert_hostnames ]
with open(_misc_dir + '/hostnames.txt', 'w') as f:
f.write('\n'.join(auto_cert_hostnames) + '\n')
if auto_cert_bad_hostnames:
print('You have several domains without TLS certificates configured for them:', file=sys.stderr)
print('\t', ' '.join(sorted(auto_cert_bad_hostnames, key=hostname_sort_order)), file=sys.stderr)
print('Run ./renew.sh to request Let\'s Encrypt certificates for them.', file=sys.stderr)
def target_debian():
target_simple_file('/etc/nginx/nginx.conf', misc_dir='/etc/nginx/misc')
subprocess.check_call('nginx -s reload', shell=True)
| |
import hashlib
from shiftmedia import exceptions as x
from shiftmedia import utils
class PathBuilder:
def __init__(self, secret_key):
"""
Path builder constructor
Initializes path builder service.
:param secret_key: string - secret key from config
"""
self.secret_key = secret_key
def generate_signature(self, id, filename):
"""
Generate signature
Accepts storage id and a filename to generate hash signature.
Signatures are used to prevent brute force attack against
resizing service.
:param id: string - storage id
:param filename: - string, resize filename
:return: string - signature
"""
sign_me = bytes(id + filename + self.secret_key, 'utf-8')
signature = hashlib.md5()
signature.update(sign_me)
return signature.hexdigest()
def validate_signature(self, id, filename):
"""
Validate signature
Accepts storage id and a filename and validates hash signature.
Signatures are used to prevent brute force attack against
resizing service.
:param id: string - storage id
:param filename: - string, resize filename
:return:
"""
parts = filename.split('-')
if len(parts) != 5:
return False
extension = parts[4][parts[4].index('.'):]
non_signed_filename = '-'.join(parts[:4]) + extension
signature = parts[4].replace(extension, '')
return signature == self.generate_signature(id, non_signed_filename)
def get_auto_crop_filename(
self,
id,
size,
factor,
output_format=None,
upscale=True,
quality=65
):
"""
Get auto crop filename
Encodes parameters for automatic cropping/resizing into a filename.
Resulting filename will contain hash signature.
:param id: string - storage id (used to generate signature)
:param size: string - width x height
:param factor: string - crop factor, fit/fill
:param output_format: string - output format
:param upscale: bool - enlarge smaller original
:param quality: string - differs per format. i.e. 0-100 for jpg
:return: string - signed filename
"""
# validate size
err = False
dimensions = size.lower().split('x')
if len(dimensions) != 2:
err = True
for dimension in dimensions:
if not dimension.isdigit() or int(dimension) <= 0:
err = True
if err:
err = 'Invalid size provided must be in 100x200 format'
raise x.InvalidArgumentException(err)
# validate factor
if factor not in ['fit', 'fill']:
err = 'Auto crop factor must be either fit or fill'
raise x.InvalidArgumentException(err)
# validate quality
if not str(quality).isdigit():
err = 'Quality must be numeric'
raise x.InvalidArgumentException(err)
# guess format from original if not specified
if not output_format:
parts = id.split('-')
output_format= parts[5][parts[5].index('.') + 1:]
# prepare upscale
upscale = 'upscale' if bool(upscale) else 'noupscale'
# create filename filename
schema = '{size}-{factor}-{quality}-{upscale}.{format}'
signed_schema = '{size}-{factor}-{quality}-{upscale}-{sig}.{format}'
params = dict(
size=size,
factor=factor,
quality=quality,
upscale=upscale,
format=output_format
)
unsigend_filename = schema.format(**params)
# sign
params['sig'] = self.generate_signature(id, unsigend_filename)
signed_filename = signed_schema.format(**params)
return signed_filename
def get_manual_crop_filename(
self,
id,
sample_size,
target_size,
output_format=None,
upscale=True,
quality=65
):
"""
Get manual crop filename
Encodes parameters for automatic cropping/resizing into a filename.
Resulting filename will contain hash signature.
:param id: string - storage id (used to generate signature)
:param target_size: string - width x height
:param sample_size: string - width x height, must be proportional
:param output_format: string - output format
:param upscale: bool - enlarge smaller original
:param quality: string - differs per format. i.e. 0-100 for jpg
:return: string - signed filename
"""
# validate sample size
err = False
sample_dimensions = sample_size.lower().split('x')
if len(sample_dimensions) != 2:
err = True
for dimension in sample_dimensions:
if not dimension.isdigit() or int(dimension) <= 0:
err = True
if err:
err = 'Invalid sample size provided must be in 100x200 format'
raise x.InvalidArgumentException(err)
# validate target size
err = False
target_dimensions = target_size.lower().split('x')
if len(target_dimensions) != 2:
err = True
for dimension in target_dimensions:
if not dimension.isdigit() or int(dimension) <= 0:
err = True
if err:
err = 'Invalid target size provided must be in 100x200 format'
raise x.InvalidArgumentException(err)
# validate sample and target sizes being proportional
sw = int(sample_dimensions[0])
sh = int(sample_dimensions[1])
tw = int(target_dimensions[0])
th = int(target_dimensions[1])
if (sw/sh) != (tw/th):
err = 'Sample size and target size must be proportional'
raise x.InvalidArgumentException(err)
# validate quality
if not str(quality).isdigit():
err = 'Quality must be numeric'
raise x.InvalidArgumentException(err)
# guess format from original if not specified
if not output_format:
parts = id.split('-')
output_format= parts[5][parts[5].index('.') + 1:]
# prepare upscale
upscale = 'upscale' if bool(upscale) else 'noupscale'
# initial filename
schema = '{target}-{sample}-{quality}-{upscale}.{format}'
signed_schema = '{target}-{sample}-{quality}-{upscale}-{sig}.{format}'
params = dict(
sample=sample_size,
target=target_size,
quality=quality,
upscale=upscale,
format=output_format
)
unsigend_filename = schema.format(**params)
# sign
params['sig'] = self.generate_signature(id, unsigend_filename)
signed_filename = signed_schema.format(**params)
return signed_filename
def filename_to_resize_params(self, id, filename):
"""
Filename to parameters
Parses resize filename to a set of usable parameters. Will perform
filename signature checking and throw an exception if requested
resize filename is malformed.
:param id: string - unique storage id
:param filename: string - resize filename
:return: dict of parameters
"""
# validate signature
if not self.validate_signature(id, filename):
err = 'Unable to parse filename: bad signature'
raise x.InvalidArgumentException(err)
# get parts
parts = filename.split('-')
target_size,sample_size,quality,upscale,rest = parts
target_format = rest[rest.index('.') + 1:]
# detect manual/auto
if sample_size in ['fill', 'fit']:
resize = 'auto'
else:
err = False
sample_size = sample_size.split('x')
for dimension in sample_size:
if not dimension.isdigit() or int(dimension) <= 0:
err = True
break
if err or len(sample_size) != 2:
err = 'Unable to parse filename: bad sample size or crop factor'
raise x.InvalidArgumentException(err)
else:
resize = 'manual'
# validate size
err = False
target_size = target_size.split('x')
for dimension in target_size:
if not dimension.isdigit() or int(dimension) <= 0:
err = True
break
if err or len(target_size) != 2:
err = 'Unable to parse filename: bad target size'
raise x.InvalidArgumentException(err)
# validate quality
if not str(quality).isdigit():
err = 'Quality must be numeric'
raise x.InvalidArgumentException(err)
# prepare upscale
upscale = True if upscale == 'upscale' else False
# prepare result
result = dict(
id=id,
resize_mode=resize,
target_size='x'.join(target_size),
output_format=target_format,
quality=int(quality),
filename=filename,
upscale=upscale
)
if resize == 'auto':
result['factor'] = sample_size
if resize == 'manual':
result['sample_size'] = 'x'.join(sample_size)
return result
| |
import json
import six
from django.utils.html import escape
from django.core.files.uploadedfile import InMemoryUploadedFile
from django import forms
from hashlib import sha1
from os import path
from .form import FormsBasePlugin, BaseEditorForm
class DataForm(BaseEditorForm):
data__id = forms.CharField(
label="ID",
max_length=255,
required=False,
widget=forms.TextInput(attrs={"class": "form-control"}),
)
data__alt = forms.CharField(
label="Alt text",
max_length=255,
required=False,
widget=forms.TextInput(attrs={"class": "form-control"}),
)
data__class = forms.CharField(
label="Class",
max_length=255,
required=False,
widget=forms.TextInput(attrs={"class": "form-control"}),
)
class ImagePluginBase(FormsBasePlugin):
ext = 'img'
@property
def forms(self):
return {'HTML': DataForm}
def _open(self, filename):
raise NotImplementedError
def _save(self, filename, bytes):
raise NotImplementedError
def _url(self, filename):
raise NotImplementedError
def _create_filename(self, filename, **kwargs):
name, ext = path.splitext(filename)
dir, name = name.rsplit(path.sep, 1)
name += ''.join(sorted(key + str(value) for key, value in six.iteritems(kwargs)))
if six.PY3:
name = name.encode('utf-8')
name = sha1(name).hexdigest()
subdir = name[:2]
return path.sep.join((dir, subdir, name + ext))
def load(self, content):
if content:
data = json.loads(content)
# Add image url to loaded data
filename = data.get('filename', None)
if filename:
data['url'] = self._url(filename)
return data
else:
return {'filename': None, 'url': None}
def save(self, data):
from PIL import Image
width = int(data.get('width') or 0)
height = int(data.get('height') or 0)
file = None
upload = data.get('file')
filename = data.get('filename')
if upload:
image = Image.open(upload)
filename = path.sep.join(('djedi', 'img', upload.name))
filename = self._create_filename(filename, w=width, h=height)
elif filename:
file = self._open(filename)
image = Image.open(file)
else:
image = None
if image:
# Remember original image format before processing
format = image.format
# Crop
crop = data.get('crop')
if crop:
try:
box = tuple(int(x) for x in crop.split(','))
image = image.crop(box)
except Exception:
pass # TODO: Handle image crop error
else:
filename = self._create_filename(filename, crop=crop)
# Resize
i_width, i_height = image.size
if (width and width != i_width) or (height and height != i_height):
try:
image = image.resize((width, height), Image.ANTIALIAS)
except Exception:
pass
else:
filename = self._create_filename(filename, w=width, h=height)
else:
width = i_width
height = i_height
# Write file
if filename != data.get('filename'):
new_file = six.BytesIO()
image.save(new_file, format)
filename = self._save(filename, new_file)
if file:
file.close()
content = super(ImagePluginBase, self).save(data, dumps=False)
content.update({
'filename': filename,
'width': width,
'height': height,
})
return json.dumps(content)
def delete(self, data):
raise NotImplementedError
def render(self, data):
attrs = {
# Use a data URI so that the image works without hassle even if the
# Djedi backend and frontend run on different domains. The base64
# part was made by running:
# $ svgo djedi/static/djedi/placeholder.svg -o - | openssl base64 | tr -d '\n'
# 'src': '/static/djedi/placeholder.svg',
'src': 'data:image/svg+xml;base64,PHN2ZyB2aWV3Qm94PSIwIDAgMTYwIDkwIiB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciPjxwYXRoIG9wYWNpdHk9Ii4yNSIgZmlsbD0iIzIwYjJhYSIgZD0iTTAgMGgxNjB2OTBIMHoiLz48L3N2Zz4K', # noqa: E501
'width': 160,
'height': 90
}
if data:
url = data.get('url')
if url:
attrs['src'] = url
width = data.get('width') or 0
height = data.get('height') or 0
if width and height:
attrs['width'] = width
attrs['height'] = height
attrs['alt'] = data.get('alt') or ''
attr_id = data.get('id')
if attr_id:
attrs['id'] = attr_id
attr_class = data.get('class')
if attr_class:
attrs['class'] = attr_class
html_attrs = (u'{0}="{1}"'.format(attr, escape(attrs[attr])) for attr in sorted(attrs.keys()))
return u'<img {0} />'.format(u' '.join(html_attrs))
class ImagePlugin(ImagePluginBase):
"""
Image plugin extending abstract content-io image plugin to use django file storage.
"""
@property
def _file_storage(self):
# Get configured file storage from settings
file_storage = self.settings.get('FILE_STORAGE')
# Fallback on default file storage
if not file_storage:
from django.core.files.storage import default_storage as file_storage
return file_storage
def _open(self, filename):
return self._file_storage.open(filename)
def _save(self, filename, bytes):
content = InMemoryUploadedFile(bytes, None, filename, None, None, None)
return self._file_storage.save(filename, content)
def _url(self, filename):
return self._file_storage.url(filename)
| |
# Copyright 2020, The TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl.testing import absltest
import numpy as np
from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack.data_structures import AttackType
from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack.seq2seq_mia import run_seq2seq_attack
from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack.seq2seq_mia import Seq2SeqAttackInputData
class Seq2SeqAttackInputDataTest(absltest.TestCase):
def test_validator(self):
valid_logits_train = iter([np.array([]), np.array([])])
valid_logits_test = iter([np.array([]), np.array([])])
valid_labels_train = iter([np.array([]), np.array([])])
valid_labels_test = iter([np.array([]), np.array([])])
invalid_logits_train = []
invalid_logits_test = []
invalid_labels_train = []
invalid_labels_test = []
self.assertRaises(
ValueError,
Seq2SeqAttackInputData(logits_train=valid_logits_train).validate)
self.assertRaises(
ValueError,
Seq2SeqAttackInputData(labels_train=valid_labels_train).validate)
self.assertRaises(
ValueError,
Seq2SeqAttackInputData(logits_test=valid_logits_test).validate)
self.assertRaises(
ValueError,
Seq2SeqAttackInputData(labels_test=valid_labels_test).validate)
self.assertRaises(ValueError, Seq2SeqAttackInputData(vocab_size=0).validate)
self.assertRaises(ValueError, Seq2SeqAttackInputData(train_size=0).validate)
self.assertRaises(ValueError, Seq2SeqAttackInputData(test_size=0).validate)
self.assertRaises(ValueError, Seq2SeqAttackInputData().validate)
# Tests that both logits and labels must be set.
self.assertRaises(
ValueError,
Seq2SeqAttackInputData(
logits_train=valid_logits_train,
logits_test=valid_logits_test,
vocab_size=0,
train_size=0,
test_size=0).validate)
self.assertRaises(
ValueError,
Seq2SeqAttackInputData(
labels_train=valid_labels_train,
labels_test=valid_labels_test,
vocab_size=0,
train_size=0,
test_size=0).validate)
# Tests that vocab, train, test sizes must all be set.
self.assertRaises(
ValueError,
Seq2SeqAttackInputData(
logits_train=valid_logits_train,
logits_test=valid_logits_test,
labels_train=valid_labels_train,
labels_test=valid_labels_test).validate)
self.assertRaises(
ValueError,
Seq2SeqAttackInputData(
logits_train=invalid_logits_train,
logits_test=invalid_logits_test,
labels_train=invalid_labels_train,
labels_test=invalid_labels_test,
vocab_size=0,
train_size=0,
test_size=0).validate)
def _get_batch_logits_and_labels(num_sequences, max_tokens_in_sequence,
vocab_size):
num_tokens_in_sequence = np.random.choice(max_tokens_in_sequence,
num_sequences) + 1
batch_logits, batch_labels = [], []
for num_tokens in num_tokens_in_sequence:
logits, labels = _get_sequence_logits_and_labels(num_tokens, vocab_size)
batch_logits.append(logits)
batch_labels.append(labels)
return np.array(
batch_logits, dtype=object), np.array(
batch_labels, dtype=object)
def _get_sequence_logits_and_labels(num_tokens, vocab_size):
sequence_logits = []
for _ in range(num_tokens):
token_logits = np.random.random(vocab_size)
token_logits /= token_logits.sum()
sequence_logits.append(token_logits)
sequence_labels = np.random.choice(vocab_size, num_tokens)
return np.array(
sequence_logits, dtype=np.float32), np.array(
sequence_labels, dtype=np.float32)
def get_seq2seq_test_input(n_train,
n_test,
max_seq_in_batch,
max_tokens_in_sequence,
vocab_size,
seed=None):
"""Returns example inputs for attacks on seq2seq models."""
if seed is not None:
np.random.seed(seed=seed)
logits_train, labels_train = [], []
for _ in range(n_train):
num_sequences = np.random.choice(max_seq_in_batch, 1)[0] + 1
batch_logits, batch_labels = _get_batch_logits_and_labels(
num_sequences, max_tokens_in_sequence, vocab_size)
logits_train.append(batch_logits)
labels_train.append(batch_labels)
logits_test, labels_test = [], []
for _ in range(n_test):
num_sequences = np.random.choice(max_seq_in_batch, 1)[0] + 1
batch_logits, batch_labels = _get_batch_logits_and_labels(
num_sequences, max_tokens_in_sequence, vocab_size)
logits_test.append(batch_logits)
labels_test.append(batch_labels)
return Seq2SeqAttackInputData(
logits_train=iter(logits_train),
logits_test=iter(logits_test),
labels_train=iter(labels_train),
labels_test=iter(labels_test),
vocab_size=vocab_size,
train_size=n_train,
test_size=n_test)
class RunSeq2SeqAttackTest(absltest.TestCase):
def test_run_seq2seq_attack_size(self):
result = run_seq2seq_attack(
get_seq2seq_test_input(
n_train=10,
n_test=5,
max_seq_in_batch=3,
max_tokens_in_sequence=5,
vocab_size=2))
self.assertLen(result.single_attack_results, 1)
def test_run_seq2seq_attack_trained_sets_attack_type(self):
result = run_seq2seq_attack(
get_seq2seq_test_input(
n_train=10,
n_test=5,
max_seq_in_batch=3,
max_tokens_in_sequence=5,
vocab_size=2))
seq2seq_result = list(result.single_attack_results)[0]
self.assertEqual(seq2seq_result.attack_type, AttackType.THRESHOLD_ATTACK)
def test_run_seq2seq_attack_calculates_correct_auc(self):
result = run_seq2seq_attack(
get_seq2seq_test_input(
n_train=20,
n_test=10,
max_seq_in_batch=3,
max_tokens_in_sequence=5,
vocab_size=3,
seed=12345),
balance_attacker_training=False)
seq2seq_result = list(result.single_attack_results)[0]
np.testing.assert_almost_equal(
seq2seq_result.roc_curve.get_auc(), 0.59, decimal=2)
def test_run_seq2seq_attack_calculates_correct_metadata(self):
attack_input = Seq2SeqAttackInputData(
logits_train=iter([
np.array([
np.array([[0.1, 0.1, 0.8], [0.7, 0.3, 0]], dtype=np.float32),
np.array([[0.4, 0.5, 0.1]], dtype=np.float32)
],
dtype=object),
np.array(
[np.array([[0.25, 0.6, 0.15], [1, 0, 0]], dtype=np.float32)],
dtype=object),
np.array([
np.array([[0.9, 0, 0.1], [0.25, 0.5, 0.25]], dtype=np.float32),
np.array([[0, 1, 0], [0.2, 0.1, 0.7]], dtype=np.float32)
],
dtype=object),
np.array([
np.array([[0.9, 0, 0.1], [0.25, 0.5, 0.25]], dtype=np.float32),
np.array([[0, 1, 0], [0.2, 0.1, 0.7]], dtype=np.float32)
],
dtype=object)
]),
logits_test=iter([
np.array([
np.array([[0.25, 0.4, 0.35], [0.2, 0.4, 0.4]], dtype=np.float32)
],
dtype=object),
np.array([
np.array([[0.3, 0.3, 0.4], [0.4, 0.4, 0.2]], dtype=np.float32),
np.array([[0.3, 0.35, 0.35]], dtype=np.float32)
],
dtype=object),
np.array([
np.array([[0.25, 0.4, 0.35], [0.2, 0.4, 0.4]], dtype=np.float32)
],
dtype=object),
np.array([
np.array([[0.25, 0.4, 0.35], [0.2, 0.4, 0.4]], dtype=np.float32)
],
dtype=object)
]),
labels_train=iter([
np.array([
np.array([2, 0], dtype=np.float32),
np.array([1], dtype=np.float32)
],
dtype=object),
np.array([np.array([1, 0], dtype=np.float32)], dtype=object),
np.array([
np.array([0, 1], dtype=np.float32),
np.array([1, 2], dtype=np.float32)
],
dtype=object),
np.array([
np.array([0, 0], dtype=np.float32),
np.array([0, 1], dtype=np.float32)
],
dtype=object)
]),
labels_test=iter([
np.array([np.array([2, 1], dtype=np.float32)]),
np.array([
np.array([2, 0], dtype=np.float32),
np.array([1], dtype=np.float32)
],
dtype=object),
np.array([np.array([2, 1], dtype=np.float32)]),
np.array([np.array([2, 1], dtype=np.float32)]),
]),
vocab_size=3,
train_size=4,
test_size=4)
result = run_seq2seq_attack(attack_input, balance_attacker_training=False)
metadata = result.privacy_report_metadata
np.testing.assert_almost_equal(metadata.loss_train, 0.91, decimal=2)
np.testing.assert_almost_equal(metadata.loss_test, 1.58, decimal=2)
np.testing.assert_almost_equal(metadata.accuracy_train, 0.77, decimal=2)
np.testing.assert_almost_equal(metadata.accuracy_test, 0.67, decimal=2)
if __name__ == '__main__':
absltest.main()
| |
#coding: utf-8
# Copyright 2005-2010 Wesabe, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ofx.QifConverter - translate QIF files into OFX files.
#
import datetime
import dateutil.parser
import ofx
import ofxtools
import re
import sys
import xml.sax.saxutils as sax
from decimal import *
from time import localtime, strftime
from ofx.builder import *
class QifConverter:
def __init__(self, qif, fid="UNKNOWN", org="UNKNOWN", bankid="UNKNOWN",
accttype="UNKNOWN", acctid="UNKNOWN", balance="UNKNOWN",
curdef=None, lang="ENG", dayfirst=False, debug=False):
self.qif = qif
self.fid = fid
self.org = org
self.bankid = bankid
self.accttype = accttype
self.acctid = acctid
self.balance = balance
self.curdef = curdef
self.lang = lang
self.debug = debug
self.dayfirst = dayfirst
self.parsed_qif = None
# FIXME: Move this to one of the OFX generation classes (Document or Response).
self.txns_by_date = {}
# This is a list of possible transaction types embedded in the
# QIF Payee or Memo field (depending on bank and, it seems,
# other factors). The keys are used to match possible fields
# that we can identify. The values are used as substitutions,
# since banks will use their own vernacular (like "DBT"
# instead of "DEBIT") for some transaction types. All of the
# types in the values column (except "ACH", which is given
# special treatment) are OFX-2.0 standard transaction types;
# the keys are not all standard. To add a new translation,
# find the QIF name for the transaction type, and add it to
# the keys column, then add the appropriate value from the
# OFX-2.0 spec (see page 180 of doc/ofx/ofx-2.0/ofx20.pdf).
# The substitution will be made if either the payee or memo
# field begins with one of the keys followed by a "/", OR if
# the payee or memo field exactly matches a key.
self.txn_types = { "ACH" : "ACH",
"CHECK CARD" : "POS",
"CREDIT" : "CREDIT",
"DBT" : "DEBIT",
"DEBIT" : "DEBIT",
"INT" : "INT",
"DIV" : "DIV",
"FEE" : "FEE",
"SRVCHG" : "SRVCHG",
"DEP" : "DEP",
"DEPOSIT" : "DEP",
"ATM" : "ATM",
"POS" : "POS",
"XFER" : "XFER",
"CHECK" : "CHECK",
"Checks" : "CHECK",
"PAYMENT" : "PAYMENT",
"CASH" : "CASH",
"DIRECTDEP" : "DIRECTDEP",
"DIRECTDEBIT" : "DIRECTDEBIT",
"REPEATPMT" : "REPEATPMT",
"OTHER" : "OTHER" }
# Some joker British bank starts QIF with a single bang and nothing
# else.
if re.match("!\n", self.qif) is not None:
if self.debug: sys.stderr.write("Fixing typeless bang header.\n")
self.qif = self.qif.replace("!", "!Type:Bank", 1)
# Chase does not provide a Type header, so force one in the
# case where it is omitted.
if re.search("!Type:", self.qif, re.IGNORECASE) == None:
if self.debug: sys.stderr.write("Forcing bank type header.\n")
self.qif = "!Type:Bank\n" + self.qif
acctblock = re.search("(!Account.*?\^\s*)", self.qif, re.DOTALL | re.IGNORECASE | re.MULTILINE)
if acctblock is not None:
block = acctblock.group(1)
if self.debug:
sys.stderr.write("Discarding account block from QIF file:\n%s" % block)
self.qif = self.qif.replace(block, '', 1)
# Some other personal finance program puts out a spurious transaction
# showing current balance, but not as a balance -- instead as a
# transaction before the type header. And, there are a bunch of other
# cases where crap before the type header is messing us up right now.
# So, this is an awfully big hammer but one that at least lets people
# import from other finance programs and broken banks.
straycrap = re.search("^(.*?)!Type", self.qif, re.DOTALL | re.IGNORECASE | re.MULTILINE)
if straycrap is not None:
crap = straycrap.group(1)
if len(crap) > 0:
if self.debug:
sys.stderr.write("Discarding stray crap from beginning of QIF file:\n%s" % crap)
self.qif = self.qif.replace(crap, '', 1)
if self.debug: sys.stderr.write("Parsing document.\n")
parser = ofxtools.QifParser(debug=debug)
self.parsed_qif = parser.parse(self.qif)
if self.debug: sys.stderr.write("Cleaning transactions.\n")
# We do a two-pass conversion in order to check the dates of all
# transactions in the statement, and convert all the dates using
# the same date format. The first pass does nothing but look
# at dates; the second actually applies the date conversion and
# all other conversions, and extracts information needed for
# the final output (like date range).
txn_list = self._extract_txn_list(self.parsed_qif)
self._guess_formats(txn_list)
self._clean_txn_list(txn_list)
def _extract_txn_list(self, qif):
stmt_obj = qif.asDict()["QifStatement"]
if self.accttype == "UNKNOWN":
if "BankTransactions" in stmt_obj:
self.accttype = "CHECKING"
elif "CreditCardTransactions" in stmt_obj:
self.accttype = "CREDITCARD"
txn_list = []
for stmt in stmt_obj:
for txn in stmt:
txn_list.append(txn)
return txn_list
#
# Date methods
#
def _guess_formats(self, txn_list):
# Go through the transactions one at a time, and try to parse the date
# field and currency format. If we check the date format and find a
# transaction where the first number must be the day (that is, the first
# number is in the range 13..31), then set the state of the converter to
# use dayfirst for all transaction cleanups. This is a guess because the
# method will only work for UK dates if the statement contains a day in
# the 13..31 range. (We could also test whether a date appears out of
# order, or whether the jumps between transactions are especially long,
# if this guessing method doesn't work reliably.)
for txn_obj in txn_list:
txn = txn_obj.asDict()
txn_date = txn.get("Date", "UNKNOWN")
txn_currency = txn.get("Currency", "UNKNOWN")
# Look for date format.
parsed_date = self._parse_date(txn_date)
self._check_date_format(parsed_date)
# Look for currency format.
if self.curdef is None and txn_currency == '^EUR':
self.curdef = 'EUR'
def _parse_date(self, txn_date, dayfirst=False):
# Try as best we can to parse the date into a datetime object. Note:
# this assumes that we never see a timestamp, just the date, in any
# QIF date.
if txn_date != "UNKNOWN":
try:
return dateutil.parser.parse(txn_date, dayfirst=dayfirst)
except ValueError:
# dateutil.parser doesn't recognize dates of the
# format "MMDDYYYY", though it does recognize
# "MM/DD/YYYY". So, if parsing has failed above,
# try shoving in some slashes and see if that
# parses.
try:
if len(txn_date) == 8:
# The int() cast will only succeed if all 8
# characters of txn_date are numbers. If
# it fails, it will throw an exception we
# can catch below.
date_int = int(txn_date)
# No exception? Great, keep parsing the
# string (dateutil wants a string
# argument).
slashified = "%s/%s/%s" % (txn_date[0:2],
txn_date[2:4],
txn_date[4:])
return dateutil.parser.parse(slashified,
dayfirst=dayfirst)
except:
pass
# If we've made it this far, our guesses have failed.
raise ValueError("Unrecognized date format: '%s'." % txn_date)
else:
return "UNKNOWN"
def _check_date_format(self, parsed_date):
# If we *ever* find a date that parses as dayfirst, treat
# *all* transactions in this statement as dayfirst.
if parsed_date is not None and parsed_date != "UNKNOWN" and parsed_date.microsecond == 3:
self.dayfirst = True
#
# Cleanup methods
#
def _clean_txn_list(self, txn_list):
for txn_obj in txn_list:
try:
txn = self._clean_txn(txn_obj)
txn_date = txn["Date"]
txn_date_list = self.txns_by_date.get(txn_date, [])
txn_date_list.append(txn)
self.txns_by_date[txn_date] = txn_date_list
except ValueError:
# The _clean_txn method will sometimes find transactions
# that are inherently unclean and are unable to be purified.
# In these cases it will reject the transaction by throwing
# a ValueError, which signals us not to store the transaction.
if self.debug: sys.stderr.write("Skipping transaction '%s'." %
str(txn_obj.asDict()))
if len(txn_list) > 0:
# Sort the dates (in YYYYMMDD format) and choose the lowest
# date as our start date, and the highest date as our end
# date.
date_list = self.txns_by_date.keys()
date_list.sort()
self.start_date = date_list[0]
self.end_date = date_list[-1]
else:
# If we didn't get any transactions (which actually happens
# quite a lot -- QIF statements are often just the type header,
# presumably since there was no activity in the downloaded
# statement), just assume that the start and end date were
# both today.
self.start_date = strftime("%Y%m%d", localtime())
self.end_date = self.start_date
def _clean_txn(self, txn_obj):
# This is sort of the brute-force method of the converter. It
# looks at the data we get from the bank and tries as hard as
# possible to make best-effort guesses about what the OFX 2.0
# standard values for the transaction should be. There's a
# reasonable amount of guesswork in here -- some of it wise,
# maybe some of it not. If the cleanup method determines that
# the txn_obj shouldn't be in the data, it will throw a ValueError.
# Otherwise, it will return a transaction cleaned to the best
# of our abilities.
txn = txn_obj.asDict()
self._clean_txn_date(txn)
self._clean_txn_amount(txn)
self._clean_txn_number(txn)
self._clean_txn_type(txn)
self._clean_txn_payee(txn)
return txn
def _clean_txn_date(self, txn):
txn_date = txn.get("Date", "UNKNOWN").strip()
if txn_date != "UNKNOWN":
parsed_date = self._parse_date(txn_date, dayfirst=self.dayfirst)
txn["Date"] = parsed_date.strftime("%Y%m%d")
else:
txn["Date"] = "UNKNOWN"
def _clean_txn_amount(self, txn):
txn_amount = txn.get("Amount", "00.00")
txn_amount2 = txn.get("Amount2", "00.00")
# Home Depot Credit Card seems to send two transaction records for each
# transaction. They're out of order (that is, the second record is not
# directly after the first, nor even necessarily after it at all), and
# the second one *sometimes* appears to be a memo field on the first one
# (e.g., a credit card payment will show up with an amount and date, and
# then the next transaction will have the same date and a payee that
# reads, "Thank you for your payment!"), and *sometimes* is the real
# payee (e.g., the first will say "Home Depot" and the second will say
# "Seasonal/Garden"). One of the two transaction records will have a
# transaction amount of "-", and the other will have the real
# transaction amount. Ideally, we would pull out the memo and attach it
# to the right transaction, but unless the two transactions are the only
# transactions on that date, there doesn't seem to be a good clue (order
# in statement, amount, etc.) as to how to associate them. So, instead,
# we're throwing a ValueError, which means this transaction should be removed
# from the statement and not displayed to the user. The result is that
# for Home Depot cards, sometimes we lose the memo (which isn't that big
# a deal), and sometimes we make the memo into the payee (which sucks).
if txn_amount == "-" or txn_amount == " ":
raise ValueError("Transaction amount is undefined.")
# Some QIF sources put the amount in Amount2 instead, for unknown
# reasons. Here we ignore Amount2 unless Amount is unknown.
if txn_amount == "00.00":
txn_amount = txn_amount2
# Okay, now strip out whitespace padding.
txn_amount = txn_amount.strip()
# Some QIF files have dollar signs in the amount. Hey, why not?
txn_amount = txn_amount.replace('$', '', 1)
# Some QIF files (usually from non-US banks) put the minus sign at
# the end of the amount, rather than at the beginning. Let's fix that.
if txn_amount[-1] == "-":
txn_amount = "-" + txn_amount[:-1]
# Some QIF sources put three digits after the decimal, and the Ruby
# code thinks that means we're in Europe. So.....let's deal with
# that now.
try:
txn_amount = str(Decimal(txn_amount).quantize(Decimal('.01')))
except:
# Just keep truckin'.
pass
txn["Amount"] = txn_amount
def _clean_txn_number(self, txn):
txn_number = txn.get("Number", "UNKNOWN").strip()
txn_payee = txn.get("Payee", "UNKNOWN").strip()
# Clean up bad check number behavior
all_digits = re.compile("\d+")
if txn_number == "N/A":
# Get rid of brain-dead Chase check number "N/A"s
del txn["Number"]
elif txn_number.startswith("XXXX-XXXX-XXXX"):
# Home Depot credit cards throw THE CREDIT CARD NUMBER
# into the check number field. Oy! At least they mask
# the first twelve digits, so we know they're insane.
del txn["Number"]
elif txn_number != "UNKNOWN" and self.accttype == "CREDITCARD":
# Several other credit card companies (MBNA, CapitalOne)
# seem to use the number field as a transaction ID. Get
# rid of this.
del txn["Number"]
elif txn_number == "0000000000" and self.accttype != "CREDITCARD":
# There's some bank that puts "N0000000000" in every non-check
# transaction. (They do use normal check numbers for checks.)
del txn["Number"]
elif txn_number != "UNKNOWN" and all_digits.search(txn_number):
# Washington Mutual doesn't indicate a CHECK transaction
# when a check number is present.
txn["Type"] = "CHECK"
elif txn_payee.startswith("CHECK # "):
# USAA QIF export sends blank number fields but has the check
# number in the payee field instead padded with leading zeros
number = re.search("^CHECK # (\d+)", txn_payee)
if number is not None:
txn["Number"] = number.group(1).lstrip('0')
def _clean_txn_type(self, txn):
txn_type = "UNKNOWN"
txn_amount = txn.get("Amount", "UNKNOWN")
txn_payee = txn.get("Payee", "UNKNOWN")
txn_memo = txn.get("Memo", "UNKNOWN")
txn_number = txn.get("Number", "UNKNOWN")
txn_sign = self._txn_sign(txn_amount)
# Try to figure out the transaction type from the Payee or
# Memo field.
for typestr in self.txn_types.keys():
if txn_number == typestr:
# US Bank sends "DEBIT" or "CREDIT" as a check number
# on credit card transactions.
txn["Type"] = self.txn_types[typestr]
del txn["Number"]
break
elif txn_payee.startswith(typestr + "/") or \
txn_memo.startswith(typestr + "/") or \
txn_memo == typestr or txn_payee == typestr:
if typestr == "ACH" and txn_sign == "credit":
txn["Type"] = "DIRECTDEP"
elif typestr == "ACH" and txn_sign == "debit":
txn["Type"] = "DIRECTDEBIT"
else:
txn["Type"] = self.txn_types[typestr]
break
def _clean_txn_payee(self, txn):
txn_payee = txn.get("Payee", "UNKNOWN")
txn_memo = txn.get("Memo", "UNKNOWN")
txn_number = txn.get("Number", "UNKNOWN")
txn_type = txn.get("Type", "UNKNOWN")
txn_amount = txn.get("Amount", "UNKNOWN")
txn_sign = self._txn_sign(txn_amount)
# Try to fill in the payee field with some meaningful value.
if txn_payee == "UNKNOWN":
if txn_number != "UNKNOWN" and (self.accttype == "CHECKING" or
self.accttype == "SAVINGS"):
txn["Payee"] = "Check #%s" % txn_number
txn["Type"] = "CHECK"
elif txn_type == "INT" and txn_sign == "debit":
txn["Payee"] = "Interest paid"
elif txn_type == "INT" and txn_sign == "credit":
txn["Payee"] = "Interest earned"
elif txn_type == "ATM" and txn_sign == "debit":
txn["Payee"] = "ATM Withdrawal"
elif txn_type == "ATM" and txn_sign == "credit":
txn["Payee"] = "ATM Deposit"
elif txn_type == "POS" and txn_sign == "debit":
txn["Payee"] = "Point of Sale Payment"
elif txn_type == "POS" and txn_sign == "credit":
txn["Payee"] = "Point of Sale Credit"
elif txn_memo != "UNKNOWN":
txn["Payee"] = txn_memo
# Down here, we have no payee, no memo, no check number,
# and no type. Who knows what this stuff is.
elif txn_type == "UNKNOWN" and txn_sign == "debit":
txn["Payee"] = "Other Debit"
txn["Type"] = "DEBIT"
elif txn_type == "UNKNOWN" and txn_sign == "credit":
txn["Payee"] = "Other Credit"
txn["Type"] = "CREDIT"
# Make sure the transaction type has some valid value.
if not txn.has_key("Type") and txn_sign == "debit":
txn["Type"] = "DEBIT"
elif not txn.has_key("Type") and txn_sign == "credit":
txn["Type"] = "CREDIT"
def _txn_sign(self, txn_amount):
# Is this a credit or a debit?
if txn_amount.startswith("-"):
return "debit"
else:
return "credit"
#
# Conversion methods
#
def to_ofx102(self):
if self.debug: sys.stderr.write("Making OFX/1.02.\n")
return DOCUMENT(self._ofx_header(),
OFX(self._ofx_signon(),
self._ofx_stmt()))
def to_xml(self):
ofx102 = self.to_ofx102()
if self.debug:
sys.stderr.write(ofx102 + "\n")
sys.stderr.write("Parsing OFX/1.02.\n")
response = ofx.Response(ofx102) #, debug=self.debug)
if self.debug: sys.stderr.write("Making OFX/2.0.\n")
if self.dayfirst:
date_format = "DD/MM/YY"
else:
date_format = "MM/DD/YY"
xml = response.as_xml(original_format="QIF", date_format=date_format)
return xml
# FIXME: Move the remaining methods to ofx.Document or ofx.Response.
def _ofx_header(self):
return HEADER(
OFXHEADER("100"),
DATA("OFXSGML"),
VERSION("102"),
SECURITY("NONE"),
ENCODING("USASCII"),
CHARSET("1252"),
COMPRESSION("NONE"),
OLDFILEUID("NONE"),
NEWFILEUID("NONE"))
def _ofx_signon(self):
return SIGNONMSGSRSV1(
SONRS(
STATUS(
CODE("0"),
SEVERITY("INFO"),
MESSAGE("SUCCESS")),
DTSERVER(self.end_date),
LANGUAGE(self.lang),
FI(
ORG(self.org),
FID(self.fid))))
def _ofx_stmt(self):
# Set default currency here, instead of on init, so that the caller
# can override the currency format found in the QIF file if desired.
# See also _guess_formats(), above.
if self.curdef is None:
curdef = "USD"
else:
curdef = self.curdef
if self.accttype == "CREDITCARD":
return CREDITCARDMSGSRSV1(
CCSTMTTRNRS(
TRNUID("0"),
self._ofx_status(),
CCSTMTRS(
CURDEF(curdef),
CCACCTFROM(
ACCTID(self.acctid)),
self._ofx_txns(),
self._ofx_ledgerbal(),
self._ofx_availbal())))
else:
return BANKMSGSRSV1(
STMTTRNRS(
TRNUID("0"),
self._ofx_status(),
STMTRS(
CURDEF(curdef),
BANKACCTFROM(
BANKID(self.bankid),
ACCTID(self.acctid),
ACCTTYPE(self.accttype)),
self._ofx_txns(),
self._ofx_ledgerbal(),
self._ofx_availbal())))
def _ofx_status(self):
return STATUS(
CODE("0"),
SEVERITY("INFO"),
MESSAGE("SUCCESS"))
def _ofx_ledgerbal(self):
return LEDGERBAL(
BALAMT(self.balance),
DTASOF(self.end_date))
def _ofx_availbal(self):
return AVAILBAL(
BALAMT(self.balance),
DTASOF(self.end_date))
def _ofx_txns(self):
txns = ""
# OFX transactions appear most recent first, and oldest last,
# so we do a reverse sort of the dates in this statement.
date_list = self.txns_by_date.keys()
date_list.sort()
date_list.reverse()
for date in date_list:
txn_list = self.txns_by_date[date]
txn_index = len(txn_list)
for txn in txn_list:
txn_date = txn.get("Date", "UNKNOWN")
txn_amt = txn.get("Amount", "00.00")
# Make a synthetic transaction ID using as many
# uniqueness guarantors as possible.
txn["ID"] = "%s-%s-%s-%s-%s" % (self.org, self.accttype,
txn_date, txn_index,
txn_amt)
txns += self._ofx_txn(txn)
txn_index -= 1
# FIXME: This should respect the type of statement being generated.
return BANKTRANLIST(
DTSTART(self.start_date),
DTEND(self.end_date),
txns)
def _ofx_txn(self, txn):
fields = []
if self._check_field("Type", txn):
fields.append(TRNTYPE(txn["Type"].strip()))
if self._check_field("Date", txn):
fields.append(DTPOSTED(txn["Date"].strip()))
if self._check_field("Amount", txn):
fields.append(TRNAMT(txn["Amount"].strip()))
if self._check_field("Number", txn):
fields.append(CHECKNUM(txn["Number"].strip()))
if self._check_field("ID", txn):
fields.append(FITID(txn["ID"].strip()))
if self._check_field("Payee", txn):
fields.append(NAME(sax.escape(sax.unescape(txn["Payee"].strip()))))
if self._check_field("Memo", txn):
fields.append(MEMO(sax.escape(sax.unescape(txn["Memo"].strip()))))
return STMTTRN(*fields)
def _check_field(self, key, txn):
return txn.has_key(key) and txn[key].strip() != ""
| |
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Compiler Test."""
import unittest
from qiskit import BasicAer
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit
from qiskit.transpiler import PassManager
from qiskit import execute
from qiskit.compiler import transpile, assemble
from qiskit.test import QiskitTestCase, Path
from qiskit.test.mock import FakeRueschlikon, FakeTenerife
from qiskit.qobj import QasmQobj
class TestCompiler(QiskitTestCase):
"""Qiskit Compiler Tests."""
def setUp(self):
self.seed_simulator = 42
self.backend = BasicAer.get_backend("qasm_simulator")
def test_example_multiple_compile(self):
"""Test a toy example compiling multiple circuits.
Pass if the results are correct.
"""
backend = BasicAer.get_backend('qasm_simulator')
coupling_map = [[0, 1], [0, 2],
[1, 2],
[3, 2], [3, 4],
[4, 2]]
qr = QuantumRegister(5)
cr = ClassicalRegister(5)
bell = QuantumCircuit(qr, cr)
ghz = QuantumCircuit(qr, cr)
# Create a GHZ state
ghz.h(qr[0])
for i in range(4):
ghz.cx(qr[i], qr[i + 1])
# Insert a barrier before measurement
ghz.barrier()
# Measure all of the qubits in the standard basis
for i in range(5):
ghz.measure(qr[i], cr[i])
# Create a Bell state
bell.h(qr[0])
bell.cx(qr[0], qr[1])
bell.barrier()
bell.measure(qr[0], cr[0])
bell.measure(qr[1], cr[1])
shots = 2048
bell_backend = transpile(bell, backend=backend)
ghz_backend = transpile(ghz, backend=backend,
coupling_map=coupling_map)
bell_qobj = assemble(bell_backend, shots=shots,
seed_simulator=10)
ghz_qobj = assemble(ghz_backend, shots=shots,
seed_simulator=10)
bell_result = backend.run(bell_qobj).result()
ghz_result = backend.run(ghz_qobj).result()
threshold = 0.05 * shots
counts_bell = bell_result.get_counts()
target_bell = {'00000': shots / 2, '00011': shots / 2}
self.assertDictAlmostEqual(counts_bell, target_bell, threshold)
counts_ghz = ghz_result.get_counts()
target_ghz = {'00000': shots / 2, '11111': shots / 2}
self.assertDictAlmostEqual(counts_ghz, target_ghz, threshold)
def test_compile_coupling_map(self):
"""Test compile_coupling_map.
If all correct should return data with the same stats. The circuit may
be different.
"""
backend = BasicAer.get_backend('qasm_simulator')
qr = QuantumRegister(3, 'qr')
cr = ClassicalRegister(3, 'cr')
qc = QuantumCircuit(qr, cr, name='qccccccc')
qc.h(qr[0])
qc.cx(qr[0], qr[1])
qc.cx(qr[0], qr[2])
qc.measure(qr[0], cr[0])
qc.measure(qr[1], cr[1])
qc.measure(qr[2], cr[2])
shots = 2048
coupling_map = [[0, 1], [1, 2]]
initial_layout = [0, 1, 2]
qc_b = transpile(qc, backend=backend,
coupling_map=coupling_map,
initial_layout=initial_layout)
qobj = assemble(qc_b, shots=shots, seed_simulator=88)
job = backend.run(qobj)
result = job.result()
qasm_to_check = qc.qasm()
self.assertEqual(len(qasm_to_check), 173)
counts = result.get_counts(qc)
target = {'000': shots / 2, '111': shots / 2}
threshold = 0.05 * shots
self.assertDictAlmostEqual(counts, target, threshold)
def test_example_swap_bits(self):
"""Test a toy example swapping a set bit around.
Uses the mapper. Pass if results are correct.
"""
backend = BasicAer.get_backend('qasm_simulator')
coupling_map = [[0, 1], [0, 8], [1, 2], [1, 9], [2, 3], [2, 10],
[3, 4], [3, 11], [4, 5], [4, 12], [5, 6], [5, 13],
[6, 7], [6, 14], [7, 15], [8, 9], [9, 10], [10, 11],
[11, 12], [12, 13], [13, 14], [14, 15]]
n = 3 # make this at least 3
qr0 = QuantumRegister(n)
qr1 = QuantumRegister(n)
ans = ClassicalRegister(2 * n)
qc = QuantumCircuit(qr0, qr1, ans)
# Set the first bit of qr0
qc.x(qr0[0])
# Swap the set bit
qc.swap(qr0[0], qr0[n - 1])
qc.swap(qr0[n - 1], qr1[n - 1])
qc.swap(qr1[n - 1], qr0[1])
qc.swap(qr0[1], qr1[1])
# Insert a barrier before measurement
qc.barrier()
# Measure all of the qubits in the standard basis
for j in range(n):
qc.measure(qr0[j], ans[j])
qc.measure(qr1[j], ans[j + n])
# First version: no mapping
result = execute(qc, backend=backend,
coupling_map=None, shots=1024,
seed_simulator=14).result()
self.assertEqual(result.get_counts(qc), {'010000': 1024})
# Second version: map to coupling graph
result = execute(qc, backend=backend,
coupling_map=coupling_map, shots=1024,
seed_simulator=14).result()
self.assertEqual(result.get_counts(qc), {'010000': 1024})
def test_parallel_compile(self):
"""Trigger parallel routines in compile.
"""
backend = FakeRueschlikon()
qr = QuantumRegister(16)
cr = ClassicalRegister(2)
qc = QuantumCircuit(qr, cr)
qc.h(qr[0])
for k in range(1, 15):
qc.cx(qr[0], qr[k])
qc.measure(qr[5], cr[0])
qlist = [qc for k in range(10)]
qobj = assemble(transpile(qlist, backend=backend))
self.assertEqual(len(qobj.experiments), 10)
def test_compile_single_qubit(self):
""" Compile a single-qubit circuit in a non-trivial layout
"""
qr = QuantumRegister(1, 'qr')
circuit = QuantumCircuit(qr)
circuit.h(qr[0])
layout = {qr[0]: 12}
cmap = [[1, 0], [1, 2], [2, 3], [4, 3], [4, 10], [5, 4], [5, 6], [5, 9], [6, 8], [7, 8],
[9, 8], [9, 10], [11, 3], [11, 10], [11, 12], [12, 2], [13, 1], [13, 12]]
circuit2 = transpile(circuit, backend=None, coupling_map=cmap, basis_gates=['u2'],
initial_layout=layout)
qobj = assemble(circuit2)
compiled_instruction = qobj.experiments[0].instructions[0]
self.assertEqual(compiled_instruction.name, 'u2')
self.assertEqual(compiled_instruction.qubits, [12])
self.assertEqual(str(compiled_instruction.params), str([0, 3.14159265358979]))
def test_compile_pass_manager(self):
"""Test compile with and without an empty pass manager."""
qr = QuantumRegister(2)
cr = ClassicalRegister(2)
qc = QuantumCircuit(qr, cr)
qc.u1(3.14, qr[0])
qc.u2(3.14, 1.57, qr[0])
qc.barrier(qr)
qc.measure(qr, cr)
backend = BasicAer.get_backend('qasm_simulator')
qrtrue = assemble(transpile(qc, backend, seed_transpiler=8),
seed_simulator=42)
rtrue = backend.run(qrtrue).result()
qrfalse = assemble(transpile(qc, backend, seed_transpiler=8,
pass_manager=PassManager()),
seed_simulator=42)
rfalse = backend.run(qrfalse).result()
self.assertEqual(rtrue.get_counts(), rfalse.get_counts())
def test_mapper_overoptimization(self):
"""Check mapper overoptimization.
The mapper should not change the semantics of the input.
An overoptimization introduced issue #81:
https://github.com/Qiskit/qiskit-terra/issues/81
"""
# -X-.-----
# -Y-+-S-.-
# -Z-.-T-+-
# ---+-H---
qr = QuantumRegister(4)
cr = ClassicalRegister(4)
circ = QuantumCircuit(qr, cr)
circ.x(qr[0])
circ.y(qr[1])
circ.z(qr[2])
circ.cx(qr[0], qr[1])
circ.cx(qr[2], qr[3])
circ.s(qr[1])
circ.t(qr[2])
circ.h(qr[3])
circ.cx(qr[1], qr[2])
circ.measure(qr[0], cr[0])
circ.measure(qr[1], cr[1])
circ.measure(qr[2], cr[2])
circ.measure(qr[3], cr[3])
coupling_map = [[0, 2], [1, 2], [2, 3]]
shots = 1000
result1 = execute(circ, backend=self.backend,
coupling_map=coupling_map,
seed_simulator=self.seed_simulator,
seed_transpiler=8,
shots=shots)
count1 = result1.result().get_counts()
result2 = execute(circ, backend=self.backend,
coupling_map=None,
seed_simulator=self.seed_simulator,
seed_transpiler=8, shots=shots)
count2 = result2.result().get_counts()
self.assertDictAlmostEqual(count1, count2, shots * 0.02)
def test_grovers_circuit(self):
"""Testing a circuit originated in the Grover algorithm"""
shots = 1000
coupling_map = None
# 6-qubit grovers
qr = QuantumRegister(6)
cr = ClassicalRegister(2)
circuit = QuantumCircuit(qr, cr, name='grovers')
circuit.h(qr[0])
circuit.h(qr[1])
circuit.x(qr[2])
circuit.x(qr[3])
circuit.x(qr[0])
circuit.cx(qr[0], qr[2])
circuit.x(qr[0])
circuit.cx(qr[1], qr[3])
circuit.ccx(qr[2], qr[3], qr[4])
circuit.cx(qr[1], qr[3])
circuit.x(qr[0])
circuit.cx(qr[0], qr[2])
circuit.x(qr[0])
circuit.x(qr[1])
circuit.x(qr[4])
circuit.h(qr[4])
circuit.ccx(qr[0], qr[1], qr[4])
circuit.h(qr[4])
circuit.x(qr[0])
circuit.x(qr[1])
circuit.x(qr[4])
circuit.h(qr[0])
circuit.h(qr[1])
circuit.h(qr[4])
circuit.barrier(qr)
circuit.measure(qr[0], cr[0])
circuit.measure(qr[1], cr[1])
result = execute(circuit, backend=self.backend,
coupling_map=coupling_map,
seed_simulator=self.seed_simulator, shots=shots)
counts = result.result().get_counts()
expected_probs = {'00': 0.64,
'01': 0.117,
'10': 0.113,
'11': 0.13}
target = {key: shots * val for key, val in expected_probs.items()}
threshold = 0.04 * shots
self.assertDictAlmostEqual(counts, target, threshold)
def test_math_domain_error(self):
"""Check for floating point errors.
The math library operates over floats and introduces floating point
errors that should be avoided.
See: https://github.com/Qiskit/qiskit-terra/issues/111
"""
qr = QuantumRegister(4)
cr = ClassicalRegister(4)
circ = QuantumCircuit(qr, cr)
circ.y(qr[0])
circ.z(qr[2])
circ.h(qr[2])
circ.cx(qr[1], qr[0])
circ.y(qr[2])
circ.t(qr[2])
circ.z(qr[2])
circ.cx(qr[1], qr[2])
circ.measure(qr[0], cr[0])
circ.measure(qr[1], cr[1])
circ.measure(qr[2], cr[2])
circ.measure(qr[3], cr[3])
coupling_map = [[0, 2], [1, 2], [2, 3]]
shots = 2000
job = execute(circ, backend=self.backend,
coupling_map=coupling_map,
seed_simulator=self.seed_simulator, shots=shots)
counts = job.result().get_counts()
target = {'0001': shots / 2, '0101': shots / 2}
threshold = 0.04 * shots
self.assertDictAlmostEqual(counts, target, threshold)
def test_random_parameter_circuit(self):
"""Run a circuit with randomly generated parameters."""
circ = QuantumCircuit.from_qasm_file(
self._get_resource_path('random_n5_d5.qasm', Path.QASMS))
coupling_map = [[0, 1], [1, 2], [2, 3], [3, 4]]
shots = 1024
qobj = execute(circ, backend=self.backend,
coupling_map=coupling_map, shots=shots,
seed_simulator=self.seed_simulator)
counts = qobj.result().get_counts()
expected_probs = {
'00000': 0.079239867254200971,
'00001': 0.032859032998526903,
'00010': 0.10752610993531816,
'00011': 0.018818532050952699,
'00100': 0.054830807251011054,
'00101': 0.0034141983951965164,
'00110': 0.041649309748902276,
'00111': 0.039967731207338125,
'01000': 0.10516937819949743,
'01001': 0.026635620063700002,
'01010': 0.0053475143548793866,
'01011': 0.01940513314416064,
'01100': 0.0044028405481225047,
'01101': 0.057524760052126644,
'01110': 0.010795354134597078,
'01111': 0.026491296821535528,
'10000': 0.094827455395274859,
'10001': 0.0008373965072688836,
'10010': 0.029082297894094441,
'10011': 0.012386622870598416,
'10100': 0.018739140061148799,
'10101': 0.01367656456536896,
'10110': 0.039184170706009248,
'10111': 0.062339335178438288,
'11000': 0.00293674365989009,
'11001': 0.012848433960739968,
'11010': 0.018472497159499782,
'11011': 0.0088903691234912003,
'11100': 0.031305389080034329,
'11101': 0.0004788556283690458,
'11110': 0.002232419390471667,
'11111': 0.017684822659235985
}
target = {key: shots * val for key, val in expected_probs.items()}
threshold = 0.04 * shots
self.assertDictAlmostEqual(counts, target, threshold)
def test_yzy_zyz_cases(self):
"""yzy_to_zyz works in previously failed cases.
See: https://github.com/Qiskit/qiskit-terra/issues/607
"""
backend = FakeTenerife()
qr = QuantumRegister(2)
circ1 = QuantumCircuit(qr)
circ1.cx(qr[0], qr[1])
circ1.rz(0.7, qr[1])
circ1.rx(1.570796, qr[1])
qobj1 = assemble(transpile(circ1, backend))
self.assertIsInstance(qobj1, QasmQobj)
circ2 = QuantumCircuit(qr)
circ2.y(qr[0])
circ2.h(qr[0])
circ2.s(qr[0])
circ2.h(qr[0])
qobj2 = assemble(transpile(circ2, backend))
self.assertIsInstance(qobj2, QasmQobj)
if __name__ == '__main__':
unittest.main(verbosity=2)
| |
import os
import unittest
from django.test import TestCase
from djblets.siteconfig.models import SiteConfiguration
from reviewboard.diffviewer.models import DiffSet, FileDiff
from reviewboard.diffviewer.templatetags.difftags import highlightregion
import reviewboard.diffviewer.diffutils as diffutils
import reviewboard.diffviewer.parser as diffparser
from reviewboard.scmtools.models import Repository
class MyersDifferTest(TestCase):
def testDiff(self):
"""Testing myers differ"""
self.__test_diff(["1", "2", "3"],
["1", "2", "3"],
[("equal", 0, 3, 0, 3), ])
self.__test_diff(["1", "2", "3"],
[],
[("delete", 0, 3, 0, 0), ])
self.__test_diff("1\n2\n3\n",
"0\n1\n2\n3\n",
[("insert", 0, 0, 0, 2),
("equal", 0, 6, 2, 8)])
self.__test_diff("1\n2\n3\n7\n",
"1\n2\n4\n5\n6\n7\n",
[("equal", 0, 4, 0, 4),
("replace", 4, 5, 4, 5),
("insert", 5, 5, 5, 9),
("equal", 5, 8, 9, 12)])
def __test_diff(self, a, b, expected):
opcodes = list(diffutils.MyersDiffer(a, b).get_opcodes())
self.assertEquals(opcodes, expected)
class InterestingLinesTest(TestCase):
PREFIX = os.path.join(os.path.dirname(__file__), 'testdata')
def testCSharp(self):
"""Testing interesting lines scanner with a C# file"""
lines = self.__get_lines("helloworld.cs")
self.assertEqual(len(lines[0]), 2)
self.assertEqual(lines[0][0], (0, 'public class HelloWorld {\n'))
self.assertEqual(lines[0][1], (1, '\tpublic static void Main() {\n'))
self.assertEqual(lines[1][0], (3, 'public class HelloWorld\n'))
self.assertEqual(lines[1][1], (8, '\tpublic static void Main()\n'))
def testJava(self):
"""Testing interesting lines scanner with a Java file"""
lines = self.__get_lines("helloworld.java")
self.assertEqual(len(lines[0]), 2)
self.assertEqual(lines[0][0], (0, 'class HelloWorld {\n'))
self.assertEqual(lines[0][1],
(1, '\tpublic static void main(String[] args) {\n'))
self.assertEqual(len(lines[1]), 2)
self.assertEqual(lines[1][0], (3, 'class HelloWorld\n'))
self.assertEqual(lines[1][1],
(8, '\tpublic static void main(String[] args)\n'))
def testJavaScript(self):
"""Testing interesting lines scanner with a JavaScript file"""
lines = self.__get_lines("helloworld.js")
self.assertEqual(len(lines[0]), 3)
self.assertEqual(lines[0][0], (0, 'function helloWorld() {\n'))
self.assertEqual(lines[0][1], (5, '\thelloWorld2: function() {\n'))
self.assertEqual(lines[0][2], (10, 'var helloWorld3 = function() {\n'))
self.assertEqual(len(lines[1]), 3)
self.assertEqual(lines[1][0], (3, 'function helloWorld()\n'))
self.assertEqual(lines[1][1], (12, '\thelloWorld2: function()\n'))
self.assertEqual(lines[1][2], (18, 'var helloWorld3 = function()\n'))
def testObjectiveC(self):
"""Testing interesting lines scanner with an Objective C file"""
lines = self.__get_lines("helloworld.m")
self.assertEqual(len(lines[0]), 3)
self.assertEqual(lines[0][0], (0, '@interface MyClass : Object\n'))
self.assertEqual(lines[0][1], (4, '@implementation MyClass\n'))
self.assertEqual(lines[0][2], (5, '- (void) sayHello {\n'))
self.assertEqual(len(lines[1]), 3)
self.assertEqual(lines[1][0], (0, '@interface MyClass : Object\n'))
self.assertEqual(lines[1][1], (4, '@implementation MyClass\n'))
self.assertEqual(lines[1][2], (8, '- (void) sayHello\n'))
def testPerl(self):
"""Testing interesting lines scanner with a Perl file"""
lines = self.__get_lines("helloworld.pl")
self.assertEqual(len(lines[0]), 1)
self.assertEqual(lines[0][0], (0, 'sub helloWorld {\n'))
self.assertEqual(len(lines[1]), 1)
self.assertEqual(lines[1][0], (1, 'sub helloWorld\n'))
def testPHP(self):
"""Testing interesting lines scanner with a PHP file"""
lines = self.__get_lines("helloworld.php")
self.assertEqual(len(lines[0]), 2)
self.assertEqual(lines[0][0], (1, 'class HelloWorld {\n'))
self.assertEqual(lines[0][1], (2, '\tfunction helloWorld() {\n'))
self.assertEqual(len(lines[1]), 2)
self.assertEqual(lines[1][0], (4, 'class HelloWorld\n'))
self.assertEqual(lines[1][1], (9, '\tfunction helloWorld()\n'))
def testPython(self):
"""Testing interesting lines scanner with a Python file"""
lines = self.__get_lines("helloworld.py")
self.assertEqual(len(lines[0]), 2)
self.assertEqual(lines[0][0], (0, 'class HelloWorld:\n'))
self.assertEqual(lines[0][1], (1, ' def main(self):\n'))
self.assertEqual(len(lines[1]), 2)
self.assertEqual(lines[1][0], (0, 'class HelloWorld:\n'))
self.assertEqual(lines[1][1], (3, ' def main(self):\n'))
def testRuby(self):
"""Testing interesting lines scanner with a Ruby file"""
lines = self.__get_lines("helloworld.rb")
self.assertEqual(len(lines[0]), 2)
self.assertEqual(lines[0][0], (0, 'class HelloWorld\n'))
self.assertEqual(lines[0][1], (1, '\tdef helloWorld\n'))
self.assertEqual(len(lines[1]), 2)
self.assertEqual(lines[1][0], (1, 'class HelloWorld\n'))
self.assertEqual(lines[1][1], (3, '\tdef helloWorld()\n'))
def __get_lines(self, filename):
f = open(os.path.join(self.PREFIX, "orig_src", filename), "r")
a = f.readlines()
f.close()
f = open(os.path.join(self.PREFIX, "new_src", filename), "r")
b = f.readlines()
f.close()
differ = diffutils.MyersDiffer(a, b)
diffutils.register_interesting_lines_for_filename(differ, filename)
# Begin the scan.
list(differ.get_opcodes())
result = (differ.get_interesting_lines('header', False),
differ.get_interesting_lines('header', True))
print result
return result
class DiffParserTest(unittest.TestCase):
PREFIX = os.path.join(os.path.dirname(__file__), 'testdata')
def diff(self, options=''):
f = os.popen('diff -rN -x .svn %s %s/orig_src %s/new_src' %
(options, self.PREFIX, self.PREFIX))
data = f.read()
f.close()
return data
def compareDiffs(self, files, testdir):
self.assertEqual(len(files), 14)
for file in files:
f = open("%s/diffs/%s/%s.diff" %
(self.PREFIX, testdir, os.path.basename(file.newFile)))
data = f.read()
f.close()
self.failUnless(file.origFile.startswith("%s/orig_src/" %
self.PREFIX))
self.failUnless(file.newFile.startswith("%s/new_src/" %
self.PREFIX))
self.assertNotEquals(file.origInfo, "")
self.assertNotEquals(file.newInfo, "")
self.assertNotEquals(file.data, "")
self.assertNotEquals(data, "")
# Can't really compare the strings because of timestamps...
def testUnifiedDiff(self):
"""Testing parse on a unified diff"""
data = self.diff('-u')
files = diffparser.DiffParser(data).parse()
self.compareDiffs(files, "unified")
def testContextDiff(self):
"""Testing parse on a context diff"""
data = self.diff('-c')
files = diffparser.DiffParser(data).parse()
self.compareDiffs(files, "context")
def testPatch(self):
"""Testing patching"""
file = 'foo.c'
old = self._get_file('orig_src', file)
new = self._get_file('new_src', file)
diff = self._get_file('diffs', 'unified', 'foo.c.diff')
patched = diffutils.patch(diff, old, file)
self.assertEqual(patched, new)
diff = self._get_file('diffs', 'unified', 'README.diff')
self.assertRaises(Exception, lambda: diffutils.patch(diff, old, file))
def testEmptyPatch(self):
"""Testing patching with an empty diff"""
old = 'This is a test'
diff = ''
patched = diffutils.patch(diff, old, 'test.c')
self.assertEqual(patched, old)
def testPatchCRLFFileCRLFDiff(self):
"""Testing patching a CRLF file with a CRLF diff"""
old = self._get_file('orig_src', 'README.crlf')
new = self._get_file('new_src', 'README')
diff = self._get_file('diffs', 'unified', 'README.crlf.diff')
patched = diffutils.patch(diff, old, new)
self.assertEqual(patched, new)
def testPatchCRFileCRLFDiff(self):
"""Testing patching a CR file with a CRLF diff"""
old = self._get_file('orig_src', 'README')
new = self._get_file('new_src', 'README')
diff = self._get_file('diffs', 'unified', 'README.crlf.diff')
patched = diffutils.patch(diff, old, new)
self.assertEqual(patched, new)
def testPatchCRLFFileCRDiff(self):
"""Testing patching a CRLF file with a CR diff"""
old = self._get_file('orig_src', 'README.crlf')
new = self._get_file('new_src', 'README')
diff = self._get_file('diffs', 'unified', 'README.diff')
patched = diffutils.patch(diff, old, new)
self.assertEqual(patched, new)
def testPatchFileWithFakeNoNewline(self):
"""Testing patching a file indicating no newline with a trailing \\r"""
old = self._get_file('orig_src', 'README.nonewline')
new = self._get_file('new_src', 'README.nonewline')
diff = self._get_file('diffs', 'unified', 'README.nonewline.diff')
files = diffparser.DiffParser(diff).parse()
patched = diffutils.patch(files[0].data, old, new)
self.assertEqual(diff, files[0].data)
self.assertEqual(patched, new)
def testInterline(self):
"""Testing inter-line diffs"""
def deepEqual(A, B):
typea, typeb = type(A), type(B)
self.assertEqual(typea, typeb)
if typea is tuple or typea is list:
for a, b in map(None, A, B):
deepEqual(a, b)
else:
self.assertEqual(A, B)
deepEqual(diffutils.get_line_changed_regions(None, None),
(None, None))
old = 'submitter = models.ForeignKey(Person, verbose_name="Submitter")'
new = 'submitter = models.ForeignKey(User, verbose_name="Submitter")'
regions = diffutils.get_line_changed_regions(old, new)
deepEqual(regions, ([(30, 36)], [(30, 34)]))
old = '-from reviews.models import ReviewRequest, Person, Group'
new = '+from .reviews.models import ReviewRequest, Group'
regions = diffutils.get_line_changed_regions(old, new)
deepEqual(regions, ([(0, 1), (6, 6), (43, 51)],
[(0, 1), (6, 7), (44, 44)]))
old = 'abcdefghijklm'
new = 'nopqrstuvwxyz'
regions = diffutils.get_line_changed_regions(old, new)
deepEqual(regions, (None, None))
def testMoveDetection(self):
"""Testing move detection"""
# movetest1 has two blocks of code that would appear to be moves:
# a function, and an empty comment block. Only the function should
# be seen as a move, whereas the empty comment block is less useful
# (since it's content-less) and shouldn't be seen as once.
old = self._get_file('orig_src', 'movetest1.c')
new = self._get_file('new_src', 'movetest1.c')
differ = diffutils.Differ(old.splitlines(), new.splitlines())
r_moves = []
i_moves = []
for opcodes in diffutils.opcodes_with_metadata(differ):
tag = opcodes[0]
meta = opcodes[-1]
if tag == 'delete':
if 'moved' in meta:
r_moves.append(meta['moved'])
elif tag == 'insert':
if 'moved' in meta:
i_moves.append(meta['moved'])
self.assertEqual(len(r_moves), 1)
self.assertEqual(len(i_moves), 1)
moves = [
(15, 28),
(16, 29),
(17, 30),
(18, 31),
(19, 32)
]
for i, j in moves:
self.assertTrue(j in i_moves[0])
self.assertTrue(i in r_moves[0])
self.assertEqual(i_moves[0][j], i)
self.assertEqual(r_moves[0][i], j)
def _get_file(self, *relative):
f = open(os.path.join(*tuple([self.PREFIX] + list(relative))))
data = f.read()
f.close()
return data
class HighlightRegionTest(TestCase):
def setUp(self):
siteconfig = SiteConfiguration.objects.get_current()
siteconfig.set('diffviewer_syntax_highlighting', True)
def testHighlightRegion(self):
"""Testing highlightregion"""
self.assertEquals(highlightregion("", None), "")
self.assertEquals(highlightregion("abc", None), "abc")
self.assertEquals(highlightregion("abc", [(0, 3)]),
'<span class="hl">abc</span>')
self.assertEquals(highlightregion("abc", [(0, 1)]),
'<span class="hl">a</span>bc')
self.assertEquals(highlightregion(
'<span class="xy">a</span>bc',
[(0, 1)]),
'<span class="xy"><span class="hl">a</span></span>bc')
self.assertEquals(highlightregion(
'<span class="xy">abc</span>123',
[(1, 4)]),
'<span class="xy">a<span class="hl">bc</span></span>' +
'<span class="hl">1</span>23')
self.assertEquals(highlightregion(
'<span class="xy">abc</span><span class="z">12</span>3',
[(1, 4)]),
'<span class="xy">a<span class="hl">bc</span></span>' +
'<span class="z"><span class="hl">1</span>2</span>3')
self.assertEquals(highlightregion(
'foo<span class="xy">abc</span><span class="z">12</span>3',
[(0, 6), (7, 9)]),
'<span class="hl">foo</span><span class="xy">' +
'<span class="hl">abc</span></span><span class="z">1' +
'<span class="hl">2</span></span><span class="hl">3</span>')
self.assertEquals(highlightregion(
'foo"bar',
[(0, 7)]),
'<span class="hl">foo"bar</span>')
self.assertEquals(highlightregion(
'"foo"',
[(0, 1)]),
'<span class="hl">"</span>foo"')
self.assertEquals(highlightregion(
'"foo"',
[(2, 5)]),
'"f<span class="hl">oo"</span>')
self.assertEquals(highlightregion(
'foo=<span class="ab">"foo"</span>)',
[(4, 9)]),
'foo=<span class="ab"><span class="hl">"foo"' +
'</span></span>)')
class DbTests(TestCase):
"""Unit tests for database operations."""
fixtures = ['test_scmtools.json']
PREFIX = os.path.join(os.path.dirname(__file__), 'testdata')
def testLongFilenames(self):
"""Testing using long filenames (1024 characters) in FileDiff."""
long_filename = 'x' * 1024
repository = Repository.objects.get(pk=1)
diffset = DiffSet.objects.create(name='test',
revision=1,
repository=repository)
filediff = FileDiff(source_file=long_filename,
dest_file='foo',
diffset=diffset)
filediff.save()
filediff = FileDiff.objects.get(pk=filediff.id)
self.assertEquals(filediff.source_file, long_filename)
def testDiffHashes(self):
"""
Testing that uploading two of the same diff will result in only
one database entry.
"""
repository = Repository.objects.get(pk=1)
diffset = DiffSet.objects.create(name='test',
revision=1,
repository=repository)
f = open(os.path.join(self.PREFIX, "diffs", "context", "foo.c.diff"),
"r")
data = f.read()
f.close()
filediff1 = FileDiff(diff=data,
diffset=diffset)
filediff1.save()
filediff2 = FileDiff(diff=data,
diffset=diffset)
filediff2.save()
self.assertEquals(filediff1.diff_hash, filediff2.diff_hash)
| |
import logging
import time
import threading
import netuitive
from .elements import Elements
from .statsd import parse_message
logger = logging.getLogger(__name__)
class Poster(threading.Thread):
"""
Thread for posting the collected data to Netuitive's API
"""
def __init__(self, config, element, version='develop'):
logger.debug('Poster')
threading.Thread.__init__(self)
self.setName('PosterThread')
self.lock = threading.Lock()
self.config = config
self.runner = threading.Event()
self.sample_count = float(0)
self.packet_count = float(0)
self.event_count = float(0)
self.metric_prefix = self.config['prefix']
self.stats_prefix = 'statsd.netuitive-statsd'
self.no_internal_metrics = self.config['no_internal_metrics']
self.flush_time = 0
logger.debug('Messages will be sent to ' + self.config['url'])
self.api = netuitive.Client(self.config['url'],
self.config['api_key'],
agent='Netuitive-Statsd/' + str(version))
self.interval = int(self.config['interval'])
self.hostname = config['hostname']
self.events = []
self.elements = Elements(self.hostname, element)
self.flush_error_count = 0
self.flush_error_max = max(self.interval * 15, 900)
def stop(self):
logger.debug("Poster Shutting down")
self.runner.set()
def run(self):
"""
start the loop
"""
while not self.runner.is_set():
logger.debug('Waiting {0} seconds'.format(self.interval))
self.runner.wait(self.interval)
logger.debug('Flushing')
if self.flush():
logger.debug('Flush sucessful')
else:
logger.error('Error during flush')
def flush(self):
"""
send the data to the Netuitive API and remove the local data
"""
try:
with self.lock:
if self.flush_error_count >= self.flush_error_max:
logger.error(
'failed to post for at least {0} seconds. '.format(
self.flush_error_max) +
'dropping data to prevent memory starvation.'
)
self.elements.delete_all()
return(False)
timestamp = int(time.time())
if self.no_internal_metrics is False:
# add some of our internal metric samples
self.elements.add(self.stats_prefix +
'.packets_received.count',
timestamp,
self.packet_count,
'c',
elementId=self.hostname)
self.elements.add(self.stats_prefix +
'.samples_received.count',
timestamp,
self.sample_count,
'c',
elementId=self.hostname)
self.elements.add(self.stats_prefix +
'.event_received.count',
timestamp,
self.event_count,
'c',
elementId=self.hostname)
logger.debug('Sample count: {0}'.format(self.sample_count))
logger.debug('Packet count: {0}'.format(self.packet_count))
logger.debug('Event count: {0}'.format(self.event_count))
logger.debug(
'Flushing {0} samples and '
'{1} events total'.format(self.sample_count,
self.event_count))
ec = 0
sc = 0
for ename in self.elements.elements:
e = self.elements.elements[ename]
e.prepare()
element = e.element
sample_count = len(element.samples)
ec += 1
sc += sample_count
logger.debug('{0} has {1} samples'.format(ename,
sample_count))
for s in element.samples:
logger.debug('elementId: {0} metricId: '
'{1} value: {2} timestamp: {3}'.format(
ename,
s.metricId,
s.val,
str(s.timestamp)))
if sc > 0:
logger.debug(
'sending {0} samples for for {1}'.format(sc,
ename))
# do the post
if self.api.post(element):
self.elements.clear_samples(ename)
logger.debug(
"Successfully sent {0} elements with "
"{1} samples total".format(ec, sc))
elapsed = int(time.time()) - self.flush_time
if elapsed > 900 or self.flush_time == 0:
self.flush_time = int(time.time())
logging.info('Data posted Successfully. '
'Next log message in 15 minutes.')
else:
logger.error(
"Failed to send {0} elements with "
"{1} samples total".format(ec, sc))
logger.debug(
'Flushing {0} events'.format(len(self.events)))
for event in self.events:
if self.api.post_event(event):
logger.debug(
"Successfully sent event "
"titled {0}".format(event.title))
else:
logger.warning(
"Failed to send {0} event "
"titled {0}".format(event.title))
# reset
self.sample_count = 0
self.packet_count = 0
self.event_count = 0
self.events = []
self.elements.delete_all()
return(True)
except Exception as e:
logger.error(e, exc_info=True)
self.flush_error_count += self.interval
return(False)
def submit(self, message, ts):
"""
process incoming messages
"""
timestamp = int(ts)
try:
self.packet_count += 1
messages = parse_message(message)
if messages is not None:
self.sample_count += float(messages['counts']['messages'])
self.event_count += float(messages['counts']['events'])
# process an event message
if len(messages['events']) > 0:
for e in messages['events']:
title = e['title']
text = e['text']
tgs = e['tags']
tags = []
for t in tgs:
for k, v in t.items():
tags.append((k, v))
if e['hostname'] is None:
eid = self.hostname
else:
eid = e['hostname']
lvl = 'INFO'
if e['priority'] is not None:
tags.append(('priority', e['priority']))
if e['priority'].upper() == "CRITICAL":
lvl = "CRITICAL"
if e['priority'].upper() == "WARNING":
lvl = "WARNING"
if e['date_happened'] is not None:
tags.append(
('date_happened', e['date_happened']))
if e['aggregation_key'] is not None:
tags.append(
('aggregation_key', e['aggregation_key']))
if e['source_type_name'] is not None:
tags.append(
('source_type_name', e['source_type_name']))
if e['alert_type'] is not None:
tags.append(('alert_type', e['alert_type']))
with self.lock:
self.events.append(
netuitive.Event(eid,
'INFO',
title,
text,
lvl,
tags,
timestamp,
'netuitive-statsd'))
# process a metric/sample message
if len(messages['metrics']) > 0:
for m in messages['metrics']:
with self.lock:
self.elements.add(
self.metric_prefix + '.' + m['name']
if self.metric_prefix != ""
else m['name'],
timestamp,
m['value'],
m['type'],
m['sign'],
m['rate'],
m['tags'],
m['hostname']
)
except Exception as e:
logger.error(
'Invalid Packet Format: "' + str(message).rstrip() + '"')
logger.error(e, exc_info=True)
| |
# -*- coding: utf-8 -*-
#
# Copyright 2012-2021 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""k-fold cross-validation procedure (analysis subcommand)
Functions used in k-fold cross-validation procedure.
"""
import os
import sys
import json
import re
from copy import copy
import bigml
from bigml.fields import Fields
from bigml.io import UnicodeWriter
import bigmler.processing.args as a
import bigmler.utils as u
from bigmler.dispatcher import main_dispatcher
from bigmler.options.analyze import ACCURACY, MINIMIZE_OPTIONS
from bigmler.resourcesapi.common import ALL_FIELDS_QS
AVG_PREFIX = "average_%s"
R_SQUARED = "r_squared"
PER_CLASS = "per_class_statistics"
EXTENDED_DATASET = "kfold_dataset.json"
TEST_DATASET = "kfold_dataset-%s.json"
NEW_FIELD = ('{"row_offset": %s, "row_step": %s,'
' "new_fields": [{"name": "%s", "field": "%s"}],'
' "objective_field": {"id": "%s"}}')
COMMANDS = {"selection":
("main --dataset %s --new-field %s --no-model --output-dir %s"
" --store"),
"create_cv":
("main --datasets %s --output-dir %s --dataset-off --evaluate"
" --name %s --dataset-file %s"),
"node_threshold":
("main --datasets %s --node-threshold %s --output-dir %s"
" --dataset-off --evaluate"),
"random_candidates":
("main --datasets %s --random-candidates %s --output-dir %s"
" --randomize --dataset-off --evaluate"),
"prediction":
("main --test-datasets %s/dataset_gen"
" --%s %s/%s"
" --dataset-off --remote --output-dir %s_pred"
" --prediction-info full --prediction-header --to-dataset")}
DEFAULT_KFOLD_FIELD = "__kfold__"
KFOLD_SUBDIR = "k_fold"
DEFAULT_MIN_NODES = 3
DEFAULT_NODES_STEP = 100
DEFAULT_MIN_CANDIDATES = 1
DEFAULT_CANDIDATES_STEP = 1
PERCENT_EVAL_METRICS = [ACCURACY, "precision", "recall"]
# difference needed to become new best node
EPSILON = 0.001
# per feature score penalty
DEFAULT_PENALTY = 0.001
# per node score penalty
DEFAULT_NODES_PENALTY = 0
# per candidate score penalty
DEFAULT_CANDIDATES_PENALTY = 0
# staleness
DEFAULT_STALENESS = 5
# k-fold
DEFAULT_KFOLDS = 5
#subcommands
SUBCOMMAND_LOG = ".bigmler_subcmd"
SESSIONS_LOG = "bigmler_sessions"
FEATURES_LOG = "features_sets.csv"
NODES_LOG = "nodes_sets.csv"
CANDIDATES_LOG = "random_candidate_sets.csv"
#name max length
NAME_MAX_LENGTH = 127
#default number of models for --random-fields random forests
DEFAULT_NUMBER_OF_MODELS = 10
# CSV summary files headers
FEATURES_HEADER = ["step", "state", "score", "metric_value", "best_score"]
NODES_HEADER = ["step", "node_threshold", "score", "metric_value",
"best_score"]
CANDIDATES_HEADER = ["step", "random_candidates", "score", "metric_value",
"best_score"]
subcommand_list = []
subcommand_file = None
session_file = None
def set_subcommand_file(output_dir):
"""Creates the subcommand file in the output_dir directory
"""
global subcommand_file
global session_file
subcommand_file = os.path.normpath(os.path.join(output_dir,
SUBCOMMAND_LOG))
session_file = os.path.normpath(os.path.join(output_dir, SESSIONS_LOG))
def retrieve_subcommands():
"""Retrieves the executed subcommands in inverse order
"""
global subcommand_list
subcommand_list = open(subcommand_file, u.open_mode("r")).readlines()
subcommand_list.reverse()
def rebuild_command(args):
"""Rebuilds a unicode command string prepared to be stored in a file
"""
return "%s\n" % (" ".join(args)).replace("\\", "\\\\")
def different_command(next_command, command):
if next_command == command:
return False
else:
if 'name=BigMLer_' in command:
# the difference may be due to the timestamp of default name
# parameter
pattern = re.compile(r'name=Bigmler_[^\s]+')
return re.sub(pattern, "", next_command) == re.sub(pattern,
"", command)
return False
def create_prediction_dataset(base_path, folder, args, resume):
"""Creates batch prediction datasets and a multidataset with the prediction
results for the best scoring model in the folder set by the argument
"""
args.output_dir = os.path.join(base_path, "%s_pred" % folder)
folder = os.path.join(base_path, folder)
model_type = "ensembles" if hasattr(args, "number_of_models") and \
args.number_of_models > 1 else "models"
global subcommand_list
# creating the predictions CSV file
command = COMMANDS["prediction"] % (base_path, model_type, folder,
model_type, folder)
command_args = command.split()
if resume:
next_command = subcommand_list.pop()
if different_command(next_command, command):
resume = False
u.sys_log_message(command, log_file=subcommand_file)
main_dispatcher(args=command_args)
elif not subcommand_list:
main_dispatcher(args=['main', '--resume'])
resume = False
else:
u.sys_log_message(command, log_file=subcommand_file)
main_dispatcher(args=command_args)
return resume
def create_kfold_cv(args, api, command_obj, resume=False):
"""Creates the kfold cross-validation
"""
set_subcommand_file(args.output_dir)
if resume:
retrieve_subcommands()
datasets_file, objective_name, resume = create_kfold_datasets_file(
args, api, command_obj, resume=resume)
if datasets_file is not None:
args.output_dir = os.path.normpath(
os.path.join(u.check_dir(datasets_file),
KFOLD_SUBDIR))
message = ('Creating the kfold evaluations.........\n')
u.log_message(message, log_file=session_file,
console=args.verbosity)
args.objective_field = objective_name
create_kfold_evaluations(datasets_file, args, command_obj,
resume=resume)
def create_features_analysis(args, api, command_obj, resume=False):
"""Analyzes the features in the dataset to find the ones that
optimize the model performance
"""
set_subcommand_file(args.output_dir)
output_dir = args.output_dir
if resume:
retrieve_subcommands()
datasets_file, objective_name, resume = create_kfold_datasets_file(
args, api, command_obj, resume=resume)
args.objective_field = objective_name
message = ('Creating the best features set..........\n')
u.log_message(message, log_file=session_file,
console=args.verbosity)
model_fields = best_first_search(
datasets_file, api, args, command_obj,
staleness=args.staleness,
penalty=args.penalty,
objective_name=objective_name, resume=resume)
# showing the instruction to create the complete model with the
# selected feature subset
bigmler_command = 'bigmler --dataset %s --model-fields="%s"' % ( \
args.dataset, ",".join(model_fields))
if args.number_of_models > 1:
bigmler_command = "%s --number-of-models %s" % ( \
bigmler_command, args.number_of_models)
connection_args = command_obj.propagate([], connection_only=True)
bigmler_command = "%s %s" % (bigmler_command, " ".join(connection_args))
message = ('To create the final model with the entire dataset using '
'the selected feature subset use:\n%s\n\n' %
bigmler_command)
u.log_message(message, log_file=session_file, console=1)
# showing the instruction to delete the user-given output-dir
bigmler_command = ('bigmler delete --from-dir %s') % (
output_dir)
bigmler_command = "%s %s" % (bigmler_command, " ".join(connection_args))
message = ('To delete all the resources generated by this analyze'
' subcommand use:\n%s\n\n') % bigmler_command
u.log_message(message, log_file=session_file, console=1)
def create_nodes_analysis(args, api, command_obj, resume=False):
"""Analyzes the model performance as a function of node threshold.
"""
set_subcommand_file(args.output_dir)
output_dir = args.output_dir
if resume:
retrieve_subcommands()
datasets_file, objective_name, resume = create_kfold_datasets_file(
args, api, command_obj, resume=resume)
args.objective_field = objective_name
message = ('Creating the node threshold set..........\n')
u.log_message(message, log_file=session_file,
console=args.verbosity)
node_threshold = best_node_threshold(
datasets_file, args, command_obj,
staleness=args.staleness,
penalty=args.penalty,
resume=resume)
# showing the instruction to create the complete model with the
# chosen node threshold
bigmler_command = ('bigmler --dataset %s --node-threshold %s') % (
args.dataset, node_threshold)
message = ('To create the final model with the entire dataset using '
'the selected maximum number of nodes use:\n%s\n\n' %
bigmler_command)
u.log_message(message, log_file=session_file, console=1)
# showing the instruction to delete the user-given output-dir
bigmler_command = ('bigmler delete --from-dir %s') % (
output_dir)
message = ('To delete all the resources generated by this analyze'
' subcommand use:\n%s\n\n') % bigmler_command
u.log_message(message, log_file=session_file, console=1)
def create_kfold_datasets_file(args, api, command_obj, resume=False):
"""Create the kfold dataset resources and store their ids in a file
one per line
"""
message = ('Creating the kfold datasets............\n')
u.log_message(message, log_file=session_file, console=args.verbosity)
if args.output_dir is None:
args.output_dir = a.NOW
csv_properties = {}
fields = None
dataset = None
datasets = []
if args.dataset_file:
# dataset is retrieved from the contents of the given local JSON file
model_dataset, csv_properties, fields = u.read_local_resource(
args.dataset_file,
csv_properties=csv_properties)
if not args.datasets:
datasets = [model_dataset]
dataset = model_dataset
else:
datasets = u.read_datasets(args.datasets)
dataset_id = dataset['resource']
elif args.dataset:
dataset_id = bigml.api.get_dataset_id(args.dataset)
datasets = [dataset_id]
elif args.dataset_ids:
datasets = args.dataset_ids
dataset_id = datasets[0]
if dataset_id:
if not dataset:
dataset = api.check_resource(dataset_id,
query_string=ALL_FIELDS_QS)
try:
args.objective_field = int(args.objective_field)
except (TypeError, ValueError):
pass
# if the user provided no objective field, try to use the one in the
# dataset
if args.objective_field is None:
try:
args.objective_field = dataset['object'][
'objective_field']['column_number']
except KeyError:
pass
# check that kfold_field is unique
fields = Fields(dataset, objective_field=args.objective_field,
objective_field_present=True)
if args.random_fields:
default_candidates_limits(args, fields)
try:
objective_id = fields.field_id(fields.objective_field)
objective_name = fields.field_name(objective_id)
except ValueError as exc:
sys.exit(exc)
kfold_field_name = avoid_duplicates(DEFAULT_KFOLD_FIELD, fields)
# create jsons to generate partial datasets
selecting_file_list, resume = create_kfold_json(args, kfold_field_name,
objective_id,
resume=resume)
# generate test datasets
datasets_file, resume = create_kfold_datasets(dataset_id, args,
selecting_file_list,
command_obj,
resume=resume)
return datasets_file, objective_name, resume
return None, None, None
def create_kfold_json(args, kfold_field=DEFAULT_KFOLD_FIELD,
objective_field=None, resume=False):
"""Create the files to generate a new field with a random integer from
0 to k-1, and a filter file for each of these indexes.
"""
output_dir = args.output_dir
k = args.k_folds if args.k_folds else DEFAULT_KFOLDS
try:
selecting_file_list = []
for index in range(0, k):
new_field = NEW_FIELD % (index, k, kfold_field,
index, objective_field)
selecting_file = TEST_DATASET % index
selecting_file = os.path.normpath(os.path.join(output_dir,
selecting_file))
selecting_file_list.append(selecting_file)
# When resuming, check if the file already exists
if not resume or not os.path.isfile(selecting_file):
resume = False
with open(selecting_file, u.open_mode("w")) as test_dataset:
test_dataset.write(new_field)
return selecting_file_list, resume
except IOError:
sys.exit("Could not create the necessary files.")
def avoid_duplicates(field_name, fields, affix="_"):
"""Checks if a field name already exists in a fields structure.
"""
if any([field['name'] == field_name
for _, field in list(fields.fields.items())]):
return avoid_duplicates("%s%s%s" % (affix, field_name, affix),
fields, affix=affix)
return field_name
def create_kfold_datasets(dataset, args,
selecting_file_list,
command_obj, resume=False):
"""Calling the bigmler procedure to create the k-fold datasets
"""
args.output_dir = os.path.normpath(os.path.join(args.output_dir, "test"))
output_dir = args.output_dir
global subcommand_list
# creating the selecting datasets
for index in range(0, len(selecting_file_list)):
command = COMMANDS["selection"] % (
dataset, selecting_file_list[index],
output_dir)
command_args = command.split()
command_obj.propagate(command_args)
command = rebuild_command(command_args)
if resume:
next_command = subcommand_list.pop()
if different_command(next_command, command):
resume = False
u.sys_log_message(command, log_file=subcommand_file)
main_dispatcher(args=command_args)
elif not subcommand_list:
main_dispatcher(args=['main', '--resume'])
resume = False
else:
u.sys_log_message(command, log_file=subcommand_file)
main_dispatcher(args=command_args)
datasets_file = os.path.normpath(os.path.join(output_dir, "dataset_gen"))
return datasets_file, resume
def add_model_options(command_args, args):
"""Adds the command options used to configure models or ensembles
"""
if args.balance:
command_args.append("--balance")
if args.missing_splits:
command_args.append("--missing-splits")
if args.pruning:
command_args.append("--pruning")
command_args.append(args.pruning)
if args.weight_field:
command_args.append("--weight-field")
command_args.append(args.weight_field)
if args.objective_weights:
command_args.append("--objective-weights")
command_args.append(args.objective_weights)
if args.model_attributes:
command_args.append("--model-attributes")
command_args.append(args.model_attributes)
if args.number_of_models > 1:
command_args.append("--number-of-models")
command_args.append(str(args.number_of_models))
# ensembles options
if args.sample_rate < 1:
command_args.append("--sample-rate")
command_args.append(str(args.sample_rate))
if args.replacement:
command_args.append("--replacement")
if not args.ensemble_sample_replacement:
command_args.append("--ensemble-sample-no-replacement")
if args.ensemble_sample_seed:
command_args.append("--ensemble-sample-seed")
command_args.append(args.ensemble_sample_seed)
if args.ensemble_sample_rate < 1:
command_args.append("--ensemble-sample-rate")
command_args.append(str(args.ensemble_sample_rate))
if args.randomize:
command_args.append("--randomize")
if args.ensemble_attributes:
command_args.append("--ensemble-attributes")
command_args.append(args.ensemble_attributes)
return command_args
def create_kfold_evaluations(datasets_file, args, command_obj,
resume=False, counter=0):
""" Create k-fold cross-validation from a datasets file
"""
global subcommand_list
output_dir = os.path.normpath(
u.check_dir(os.path.join("%s%s" % (args.output_dir, counter),
"evaluation.json")))
model_fields = args.model_fields
name_suffix = "_subset_%s" % counter
name_max_length = NAME_MAX_LENGTH - len(name_suffix)
name = "%s%s" % (args.name[0: name_max_length], name_suffix)
dataset_id = u.read_datasets(datasets_file)[0]
model_dataset = os.path.normpath(
os.path.join(u.check_dir(datasets_file), dataset_id.replace("/", "_")))
command = COMMANDS["create_cv"] % (datasets_file, output_dir, name,
model_dataset)
command_args = command.split()
if model_fields:
command_args.append("--model-fields")
command_args.append(model_fields)
command_args.append("--objective")
command_args.append(args.objective_field)
command_args = add_model_options(command_args, args)
"""
common_options_list = u.get_options_list(args, command_obj.common_options,
prioritary=command_args)
command_args.extend(common_options_list)
"""
command_obj.propagate(command_args, exclude=["--dataset",
"--datasets",
"--dataset-file"])
command = rebuild_command(command_args)
if resume:
next_command = subcommand_list.pop()
if different_command(next_command, command):
resume = False
u.sys_log_message(command, log_file=subcommand_file)
main_dispatcher(args=command_args)
elif not subcommand_list:
main_dispatcher(args=['main', '--resume'])
resume = False
else:
u.sys_log_message(command, log_file=subcommand_file)
main_dispatcher(args=command_args)
evaluation_file = os.path.normpath(os.path.join(output_dir,
"evaluation.json"))
try:
with open(evaluation_file) as evaluation_handler:
evaluation = json.loads(evaluation_handler.read())
return evaluation, resume
except (ValueError, IOError):
sys.exit("Failed to retrieve evaluation.")
def find_max_state(states_list):
max_state, max_score, max_metric_value, max_counter = (
None, - float('inf'), - float('inf'), 0)
for (state, score, metric_value, counter) in states_list:
if score > max_score or max_state is None and max_score == score:
max_state, max_score, max_metric_value, max_counter = (
state, score, metric_value, counter)
return max_state, max_score, max_metric_value, max_counter
def expand_state(parent):
"""Get all connected states
"""
children = []
for i in range(len(parent)):
child = copy(parent)
child[i] = not child[i]
children.append(child)
return children
def best_first_search(datasets_file, api, args, command_obj,
staleness=None, penalty=None, objective_name=None,
resume=False):
"""Selecting the fields to be used in the model construction
"""
counter = 0
loop_counter = 0
features_file = os.path.normpath(os.path.join(args.output_dir,
FEATURES_LOG))
features_writer = UnicodeWriter(features_file).open_writer()
features_header = FEATURES_HEADER
if staleness is None:
staleness = DEFAULT_STALENESS
if penalty is None:
penalty = DEFAULT_PENALTY
# retrieving the first dataset in the file
try:
with open(datasets_file, u.open_mode("r")) as datasets_handler:
dataset_id = datasets_handler.readline().strip()
except IOError as exc:
sys.exit("Could not read the generated datasets file: %s" %
str(exc))
try:
stored_dataset = u.storage_file_name(args.output_dir, dataset_id)
with open(stored_dataset, u.open_mode("r")) as dataset_handler:
dataset = json.loads(dataset_handler.read())
except IOError:
dataset = api.check_resource(dataset_id,
query_string=ALL_FIELDS_QS)
# initial feature set
fields = Fields(dataset)
excluded_features = ([] if args.exclude_features is None else
args.exclude_features.split(
args.args_separator))
try:
excluded_ids = [fields.field_id(feature) for
feature in excluded_features]
objective_id = fields.field_id(objective_name)
except ValueError as exc:
sys.exit(exc)
field_ids = [field_id for field_id in fields.preferred_fields()
if field_id != objective_id and
not field_id in excluded_ids]
field_ids.sort()
# headers are extended with a column per field
fields_names = [fields.field_name(field_id) for field_id in field_ids]
features_header.extend(fields_names)
features_writer.writerow(features_header)
initial_state = [False for field_id in field_ids]
open_list = [(initial_state, - float('inf'), -float('inf'), 0)]
closed_list = []
best_state, best_score, best_metric_value, best_counter = open_list[0]
best_unchanged_count = 0
metric = args.optimize
while best_unchanged_count < staleness and open_list:
loop_counter += 1
features_set = find_max_state(open_list)
state, score, metric_value, _ = features_set
if loop_counter > 1:
csv_results = [loop_counter - 1, \
[int(in_set) for in_set in state], \
score, metric_value, best_score]
csv_results.extend([int(in_set) for in_set in state])
features_writer.writerow(csv_results)
try:
state_fields = [fields.field_name(field_ids[index])
for (index, in_set) in enumerate(state)
if in_set]
except ValueError as exc:
sys.exit(exc)
closed_list.append(features_set)
open_list.remove(features_set)
if (score - EPSILON) > best_score:
best_state, best_score, best_metric_value, best_counter = \
features_set
best_unchanged_count = 0
if state_fields:
message = 'New best state: %s\n' % (state_fields)
u.log_message(message, log_file=session_file,
console=args.verbosity)
if metric in PERCENT_EVAL_METRICS:
message = '%s = %0.2f%% (score = %s)\n' % (
metric.capitalize(), metric_value * 100, score)
else:
message = '%s = %f (score = %s)\n' % (
metric.capitalize(), metric_value, score)
u.log_message(message, log_file=session_file,
console=args.verbosity)
else:
best_unchanged_count += 1
children = expand_state(state)
for child in children:
if (child not in [state for state, _, _, _ in open_list] and
child not in [state for state, _, _, _ in closed_list]):
try:
# we need to keep names instead of IDs because
# IDs can change for different datasets
input_fields = [fields.field_name(field_id)
for (i, field_id)
in enumerate(field_ids) if child[i]]
except ValueError as exc:
sys.exit(exc)
# create models and evaluation with input_fields
args.model_fields = args.args_separator.join(input_fields)
counter += 1
(score,
metric_value,
metric,
resume) = kfold_evaluate(datasets_file,
args, counter, command_obj,
penalty=penalty, resume=resume,
metric=metric)
open_list.append((child, score, metric_value, counter))
try:
best_features = [fields.field_name(field_ids[i]) for (i, score)
in enumerate(best_state) if score]
except ValueError as exc:
sys.exit(exc)
message = ('The best feature subset is: %s \n'
% ", ".join(best_features))
u.log_message(message, log_file=session_file, console=1)
if metric in PERCENT_EVAL_METRICS:
message = ('%s = %0.2f%%\n' % (metric.capitalize(),
(best_metric_value * 100)))
else:
message = ('%s = %f\n' % (metric.capitalize(), best_metric_value))
u.log_message(message, log_file=session_file, console=1)
output_dir = os.path.normpath(u.check_dir(datasets_file))
if args.predictions_csv:
resume = create_prediction_dataset(output_dir, "kfold%s" % best_counter,
args, resume)
message = ('Evaluated %d/%d feature subsets\n\n' %
((len(open_list) + len(closed_list) - 1),
2 ** len(field_ids) - 1))
u.log_message(message, log_file=session_file, console=1)
features_writer.close_writer()
return best_features
def extract_evaluation_info(evaluation, category):
"""Returns the evaluation metrics for the chosen
category or the average.
"""
evaluation = evaluation.get("model", {})
if category and PER_CLASS in evaluation:
for class_evaluation in evaluation[PER_CLASS]:
if category == class_evaluation["class_name"]:
return class_evaluation
return evaluation
def kfold_evaluate(datasets_file, args, counter, command_obj,
penalty=DEFAULT_PENALTY,
metric=ACCURACY, resume=False):
"""Scoring k-fold cross-validation using the given feature subset
"""
# create evaluation with input_fields
args.output_dir = os.path.normpath(os.path.join(u.check_dir(datasets_file),
"kfold"))
evaluation, resume = create_kfold_evaluations(datasets_file, args,
command_obj,
resume=resume,
counter=counter)
evaluation = extract_evaluation_info(
evaluation, args.optimize_category)
avg_metric = AVG_PREFIX % metric
metric_literal = metric
if not avg_metric in evaluation:
avg_metric = AVG_PREFIX % R_SQUARED
metric_literal = R_SQUARED
if not avg_metric in evaluation:
sys.exit("Failed to find %s or r-squared in the evaluation"
% metric)
invert = -1 if metric in MINIMIZE_OPTIONS else 1
return (invert * (evaluation[avg_metric] -
invert * penalty *
len(args.model_fields.split(args.args_separator))),
evaluation[avg_metric],
metric_literal, resume)
def best_node_threshold(datasets_file, args, command_obj,
staleness=None, penalty=None,
resume=False):
"""Selecting the node_limit to be used in the model construction
"""
loop_counter = 0
nodes_file = os.path.normpath(os.path.join(args.output_dir,
NODES_LOG))
nodes_writer = UnicodeWriter(nodes_file).open_writer()
nodes_writer.writerow(NODES_HEADER)
args.output_dir = os.path.normpath(os.path.join(args.output_dir,
"node_th"))
max_nodes = args.max_nodes + 1
if args.min_nodes is None:
args.min_nodes = DEFAULT_MIN_NODES
if args.nodes_step is None:
args.nodes_step = DEFAULT_NODES_STEP
node_threshold = args.min_nodes
if staleness is None:
staleness = DEFAULT_STALENESS
if penalty is None:
penalty = DEFAULT_NODES_PENALTY
best_score = - float('inf')
best_unchanged_count = 0
metric = args.optimize
score = best_score
best_counter = 0
while best_unchanged_count < staleness and node_threshold < max_nodes:
loop_counter += 1
(score,
metric_value,
metric,
resume) = node_threshold_evaluate(datasets_file, args,
node_threshold, command_obj,
penalty=penalty, resume=resume,
metric=metric)
nodes_writer.writerow([
loop_counter - 1, node_threshold, score, metric_value, best_score])
if (score - EPSILON) > best_score:
best_threshold = node_threshold
best_score = score
best_unchanged_count = 0
best_counter = loop_counter
message = 'New best node threshold: %s\n' % (best_threshold)
u.log_message(message, log_file=session_file,
console=args.verbosity)
if metric in PERCENT_EVAL_METRICS:
message = '%s = %0.2f%% (score = %s)\n' % (
metric.capitalize(), metric_value * 100, score)
else:
message = '%s = %f (score = %s)\n' % (metric.capitalize(),
metric_value,
score)
u.log_message(message, log_file=session_file,
console=args.verbosity)
else:
best_unchanged_count += 1
node_threshold += args.nodes_step
if args.predictions_csv:
resume = create_prediction_dataset(args.output_dir,
"node_th%s" % best_counter,
args, resume)
message = ('The best node threshold is: %s \n'
% best_threshold)
u.log_message(message, log_file=session_file, console=1)
if metric in PERCENT_EVAL_METRICS:
message = ('%s = %0.2f%%\n' % (metric.capitalize(),
(best_score * 100)))
else:
message = ('%s = %f\n' % (metric.capitalize(), best_score))
u.log_message(message, log_file=session_file, console=1)
nodes_writer.close_writer()
return best_threshold
def node_threshold_evaluate(datasets_file, args, node_threshold,
command_obj, penalty=DEFAULT_NODES_PENALTY,
metric=ACCURACY, resume=False):
"""Scoring node_threshold created models
"""
# create evaluation with input_fields
evaluation, resume = create_node_th_evaluations(
datasets_file, args, command_obj, resume=resume,
node_threshold=node_threshold)
evaluation = extract_evaluation_info(
evaluation, args.optimize_category)
avg_metric = AVG_PREFIX % metric
metric_literal = metric
if not avg_metric in evaluation:
avg_metric = AVG_PREFIX % R_SQUARED
metric_literal = R_SQUARED
if not avg_metric in evaluation:
sys.exit("Failed to find %s or r-squared in the evaluation"
% metric)
invert = -1 if metric in MINIMIZE_OPTIONS else 1
return (invert * (evaluation[avg_metric] -
invert * penalty * node_threshold),
evaluation[avg_metric],
metric_literal, resume)
def create_node_th_evaluations(datasets_file, args, command_obj,
resume=False,
node_threshold=DEFAULT_MIN_NODES):
""" Create node_threshold evaluations
"""
global subcommand_list
output_dir = os.path.normpath(u.check_dir(
os.path.join("%s%s" % (args.output_dir, node_threshold),
"evaluation.json")))
command = COMMANDS["node_threshold"] % (
datasets_file, node_threshold, output_dir)
command_args = command.split()
command_obj.propagate(command_args, exclude=["--dataset",
"--datasets",
"--dataset-file"])
command = rebuild_command(command_args)
if resume:
next_command = subcommand_list.pop()
if different_command(next_command, command):
resume = False
u.sys_log_message(command, log_file=subcommand_file)
main_dispatcher(args=command_args)
elif not subcommand_list:
main_dispatcher(args=['main', '--resume'])
resume = False
else:
u.sys_log_message(command, log_file=subcommand_file)
main_dispatcher(args=command_args)
evaluation_file = os.path.normpath(os.path.join(output_dir,
"evaluation.json"))
try:
with open(evaluation_file, u.open_mode("r")) as evaluation_handler:
evaluation = json.loads(evaluation_handler.read())
return evaluation, resume
except (ValueError, IOError):
sys.exit("Failed to retrieve evaluation.")
def default_candidates_limits(args, fields):
"""Setting the limits of default random candidates in the random
candidates analyze
"""
args.min_candidates = DEFAULT_MIN_CANDIDATES
args.max_candidates = len(list(fields.preferred_fields().keys()))
def create_candidates_analysis(args, api, command_obj, resume=False):
"""Analyzes the model performance as a function of the number of
random candidates.
"""
set_subcommand_file(args.output_dir)
output_dir = args.output_dir
if resume:
retrieve_subcommands()
datasets_file, objective_name, resume = create_kfold_datasets_file(
args, api, command_obj, resume=resume)
args.objective_field = objective_name
if args.number_of_models == 1:
args.number_of_models = DEFAULT_NUMBER_OF_MODELS
message = ('Creating the random candidates set..........\n')
u.log_message(message, log_file=session_file,
console=args.verbosity)
random_candidates = best_candidates_number(
datasets_file, args, command_obj,
penalty=args.penalty,
resume=resume)
# showing the instruction to create the complete model with the
# chosen random candidates number
bigmler_command = ('bigmler --dataset %s --number-of-models %s'
' --randomize --random-candidates %s') % ( \
args.dataset, args.number_of_models, random_candidates)
message = ('To create the final ensemble with the entire dataset using '
'the selected number of random candidates use:\n%s\n\n' %
bigmler_command)
u.log_message(message, log_file=session_file, console=1)
# showing the instruction to delete the user-given output-dir
bigmler_command = ('bigmler delete --from-dir %s') % (
output_dir)
message = ('To delete all the resources generated by this analyze'
' subcommand use:\n%s\n\n') % bigmler_command
u.log_message(message, log_file=session_file, console=1)
def best_candidates_number(datasets_file, args, command_obj,
penalty=None,
resume=False):
"""Selecting the best number of random candidates
to be used in the ensemble construction
"""
loop_counter = 0
candidates_file = os.path.normpath(os.path.join(args.output_dir,
CANDIDATES_LOG))
candidates_writer = UnicodeWriter(candidates_file).open_writer()
candidates_writer.writerow(CANDIDATES_HEADER)
args.output_dir = os.path.normpath(os.path.join(args.output_dir,
"random"))
max_candidates = args.max_candidates + 1
if args.nodes_step is None:
args.nodes_step = DEFAULT_CANDIDATES_STEP
random_candidates = args.min_candidates
if penalty is None:
penalty = DEFAULT_CANDIDATES_PENALTY
best_score = - float('inf')
metric = args.optimize
score = best_score
best_counter = 0
while random_candidates < max_candidates:
loop_counter += 1
(score,
metric_value,
metric,
resume) = candidates_evaluate(datasets_file, args,
random_candidates, command_obj,
penalty=penalty, resume=resume,
metric=metric)
candidates_writer.writerow([
loop_counter, random_candidates, score, metric_value,
best_score])
if (score - EPSILON) > best_score:
best_candidates = random_candidates
best_score = score
best_counter = loop_counter
message = 'New best random candidates number is: %s\n' % \
best_candidates
u.log_message(message, log_file=session_file,
console=args.verbosity)
if metric in PERCENT_EVAL_METRICS:
message = '%s = %0.2f%% (score = %s)\n' % (
metric.capitalize(), metric_value * 100, score)
else:
message = '%s = %f (score = %s)\n' % (metric.capitalize(),
metric_value,
score)
u.log_message(message, log_file=session_file,
console=args.verbosity)
random_candidates += DEFAULT_CANDIDATES_STEP
if args.predictions_csv:
resume = create_prediction_dataset(args.output_dir,
"random%s" % best_counter,
args, resume)
message = ('The best random candidates number is: %s \n'
% best_candidates)
u.log_message(message, log_file=session_file, console=1)
if metric in PERCENT_EVAL_METRICS:
message = ('%s = %0.2f%%\n' % (metric.capitalize(),
(best_score * 100)))
else:
message = ('%s = %f\n' % (metric.capitalize(), best_score))
u.log_message(message, log_file=session_file, console=1)
candidates_writer.close_writer()
return best_candidates
def candidates_evaluate(datasets_file, args, random_candidates,
command_obj, penalty=DEFAULT_CANDIDATES_PENALTY,
metric=ACCURACY, resume=False):
"""Scoring random candidates ensembles
"""
# create evaluation with input_fields
evaluation, resume = create_candidates_evaluations(
datasets_file, args, command_obj, resume=resume,
random_candidates=random_candidates)
evaluation = extract_evaluation_info(
evaluation, args.optimize_category)
avg_metric = AVG_PREFIX % metric
metric_literal = metric
if not avg_metric in evaluation:
avg_metric = AVG_PREFIX % R_SQUARED
metric_literal = R_SQUARED
if not avg_metric in evaluation:
sys.exit("Failed to find %s or r-squared in the evaluation"
% metric)
invert = -1 if metric in MINIMIZE_OPTIONS else 1
return (invert * (evaluation[avg_metric] -
invert * penalty * random_candidates),
evaluation[avg_metric],
metric_literal, resume)
def create_candidates_evaluations(datasets_file, args, command_obj,
resume=False,
random_candidates=DEFAULT_MIN_CANDIDATES):
""" Create random candidates ensembles evaluations
"""
global subcommand_list
output_dir = os.path.normpath(u.check_dir(
os.path.join("%s%s" % (args.output_dir, random_candidates),
"evaluation.json")))
command = COMMANDS["random_candidates"] % (
datasets_file, random_candidates, output_dir)
command_args = command.split()
"""
common_options_list = u.get_options_list(args, command_obj.common_options,
prioritary=command_args)
command_args.extend(common_options_list)
"""
command_args.append("--objective")
command_args.append(args.objective_field)
command_args = add_model_options(command_args, args)
command_obj.propagate(command_args, exclude=["--dataset",
"--datasets",
"--dataset-file"])
command = rebuild_command(command_args)
if resume:
next_command = subcommand_list.pop()
if different_command(next_command, command):
resume = False
u.sys_log_message(command, log_file=subcommand_file)
main_dispatcher(args=command_args)
elif not subcommand_list:
main_dispatcher(args=['main', '--resume'])
resume = False
else:
u.sys_log_message(command, log_file=subcommand_file)
main_dispatcher(args=command_args)
evaluation_file = os.path.normpath(os.path.join(output_dir,
"evaluation.json"))
try:
with open(evaluation_file, u.open_mode("r")) as evaluation_handler:
evaluation = json.loads(evaluation_handler.read())
return evaluation, resume
except (ValueError, IOError):
sys.exit("Failed to retrieve evaluation.")
| |
#! /usr/bin/env python3
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implement training process including curriculum and hard negative mining."""
import argparse
import collections
import copy
from ..tqdm_utils import tqdm_for
from difflogic.dataset.utils import RandomlyIterDataset
from difflogic.thutils import monitor_gradrms
import jacinle.random as random
from jacinle.logging import get_logger
from jacinle.utils.meter import GroupMeters
from jacinle.utils.tqdm import tqdm_pbar
from jactorch.train.env import TrainerEnv
from jactorch.utils.meta import as_cuda
__all__ = ['TrainerBase', 'CurriculumTrainerBase', 'MiningTrainerBase']
logger = get_logger(__file__)
class TrainerBase(TrainerEnv):
"""The base Trainer class supports basic training and testing interfaces.
Implement the basic training and testing procedure. The training have multiple
epochs, with multiple iterations in each epoch. A list defined by
[begin:step:end] represents the argument (number of objects) for testing.
Args:
model: The model for both training and evaluation, the mode is turned by
calling model.eval() or model.train().
optimizer: The optimizer for the model when being optimized.
epochs: The number of epochs for training.
epoch_size: The number of iterations per epoch during training.
test_epoch_size: The number of iterations per epoch during testing.
test_number_begin: The begin number of the list.
test_number_step: The step size of the list.
test_number_end: The end number of the list.
"""
def __init__(self, model, optimizer, epochs, epoch_size, test_epoch_size,
test_number_begin, test_number_step, test_number_end):
super().__init__(model, optimizer)
self.epochs = epochs
self.epoch_size = epoch_size
self.test_epoch_size = test_epoch_size
self.test_number_begin = test_number_begin
self.test_number_step = test_number_step
self.test_number_end = test_number_end
__hyperparams__ = ('epochs', 'epoch_size', 'test_epoch_size',
'test_number_begin', 'test_number_step', 'test_number_end')
__hyperparam_defaults__ = {'test_number_step': 0}
@classmethod
def _get_hyperparams(cls):
return TrainerBase.__hyperparams__
@classmethod
def make_trainer_parser(cls, parser, defaults, prefix=None):
for k, v in TrainerBase.__hyperparam_defaults__.items():
defaults.setdefault(k, v)
prefix = '--' if prefix is None else '--' + str(prefix) + '-'
if not isinstance(parser, argparse._ArgumentGroup):
parser = parser.add_argument_group('Trainer')
parser.add_argument(
prefix + 'epochs',
type=int,
default=defaults['epochs'],
metavar='N',
help='number of total epochs to run')
parser.add_argument(
prefix + 'epoch-size',
type=int,
default=defaults['epoch_size'],
metavar='N',
help='number of iterations per epoch')
parser.add_argument(
prefix + 'test-epoch-size',
type=int,
default=defaults['test_epoch_size'],
metavar='N',
help='number of iterations per test epoch')
parser.add_argument(
prefix + 'test-number-begin',
type=int,
default=defaults['test_number_begin'],
metavar='N',
help='begin number of nodes for test')
parser.add_argument(
prefix + 'test-number-step',
type=int,
default=defaults['test_number_step'],
metavar='N',
help='step number of nodes for test')
parser.add_argument(
prefix + 'test-number-end',
type=int,
default=defaults['test_number_end'],
metavar='N',
help='end number of nodes for test')
@classmethod
def from_args(cls, model, optimizer, args, prefix=None, **kwargs):
prefix = '' if prefix is None else str(prefix) + '_'
init_params = {k: getattr(args, prefix + k) for k in cls._get_hyperparams()}
init_params.update(kwargs)
return cls(model, optimizer, **init_params)
def _dump_meters(self, meters, mode):
"""Provide ways to dump the statistics (stored in meters)
for plotting or analysing.
"""
pass
def _prepare_dataset(self, epoch_size, mode):
"""Prepare dataset for getting training/testing data.
Args:
epoch_size: The number of iterations in each epoch.
mode: 'train' or 'test' for training or testing.
Returns:
None, this is a hook function before getting training/testing data.
"""
raise NotImplementedError()
def _get_train_data(self, index, meters):
"""Get training data can be directly fed into the train_step function.
Args:
index: The current iteration index in current epoch.
meters: a stats collector to collect information.
Returns:
feed_dict, A dict can be directly fed into the train_step function.
"""
raise NotImplementedError()
def _get_result(self, index, meters, mode):
"""Include two steps, get testing data from dataset & evaluate the model.
Args:
index: The current iteration index in current epoch.
meters: a stats collector to collect information.
mode: 'train' or 'test' or others.
Returns:
message: The message to be shown on tqdm progress bar.
extra_info: An extra variable to give extra information.
"""
raise NotImplementedError()
def _train_step(self, feed_dict, meters):
ret = self.step(feed_dict)
loss, monitors, output_dict, extras = ret
meters.update(monitor_gradrms(self.model))
meters.update(monitors)
meters.update(loss=loss)
return 'Train: loss={loss:.4f}'.format(loss=loss), ret
def _train_epoch(self, epoch_size):
model = self.model
meters = GroupMeters()
self._prepare_dataset(epoch_size, mode='train')
def train_func(index):
model.eval()
feed_dict = self._get_train_data(index, meters)
model.train()
message, _ = self._train_step(feed_dict, meters)
return message
# For $epoch_size times, do train_func with tqdm progress bar.
tqdm_for(epoch_size, train_func)
logger.info(
meters.format_simple(
'> Train Epoch {:5d}: '.format(self.current_epoch),
compressed=False))
self._dump_meters(meters, 'train')
return meters
def _test_epoch(self, epoch_size):
meters = GroupMeters()
self._prepare_dataset(epoch_size, mode='test')
def test_func(index):
message, _ = self._get_result(index, meters, mode='test')
return message
tqdm_for(epoch_size, test_func)
logger.info(meters.format_simple('> Evaluation: ', compressed=False))
self._dump_meters(meters, 'test')
return meters
def _early_stop(self, meters):
"""A hook function to enable early_stop checking."""
return False
def train(self):
self.early_stopped = False
for i in range(1, 1 + self.epochs):
self.current_epoch = i
meters = self._train_epoch(self.epoch_size)
if self._early_stop(meters):
self.early_stopped = True
break
return meters
def test(self):
self.model.eval()
results = []
self.test_number = self.test_number_begin
while self.test_number <= self.test_number_end:
meters = self._test_epoch(self.test_epoch_size)
results.append(meters)
if self.test_number_step <= 0:
break
self.test_number += self.test_number_step
return results
class CurriculumTrainerBase(TrainerBase):
"""A base trainer class supports curriculum learning w.r.t an integer argument.
The lessons in the curriculum are defined by an integer argument: The number
of object. The lessons are defined by a list of the form [start:step:graduate].
"""
def __init__(self, model, optimizer, epochs, epoch_size, test_epoch_size,
test_number_begin, test_number_step, test_number_end,
curriculum_start, curriculum_step, curriculum_graduate,
enable_candidate, curriculum_thresh, curriculum_thresh_relax,
curriculum_force_upgrade_epochs, sample_array_capacity,
enhance_epochs):
super().__init__(model, optimizer, epochs, epoch_size, test_epoch_size,
test_number_begin, test_number_step, test_number_end)
self.curriculum_start = curriculum_start
self.curriculum_step = curriculum_step
self.curriculum_graduate = curriculum_graduate
self.enable_candidate = enable_candidate
self.curriculum_thresh = curriculum_thresh
self.curriculum_thresh_relax = curriculum_thresh_relax
self.curriculum_force_upgrade_epochs = curriculum_force_upgrade_epochs
self.sample_array_capacity = sample_array_capacity
self.enhance_epochs = enhance_epochs
__hyperparams__ = ('curriculum_start', 'curriculum_step',
'curriculum_graduate', 'enable_candidate',
'curriculum_thresh', 'curriculum_thresh_relax',
'curriculum_force_upgrade_epochs', 'sample_array_capacity',
'enhance_epochs')
__hyperparam_defaults__ = {
'curriculum_step': 1,
'enable_candidate': True,
'curriculum_thresh': 1.0,
'curriculum_thresh_relax': 0.0,
'curriculum_force_upgrade_epochs': None,
'sample_array_capacity': 1,
'enhance_epochs': 0
}
@classmethod
def _get_hyperparams(cls):
return super()._get_hyperparams() + CurriculumTrainerBase.__hyperparams__
@classmethod
def make_trainer_parser(cls, parser, defaults, prefix=None):
super().make_trainer_parser(parser, defaults, prefix)
for k, v in CurriculumTrainerBase.__hyperparam_defaults__.items():
defaults.setdefault(k, v)
prefix = '--' if prefix is None else '--' + str(prefix) + '-'
if not isinstance(parser, argparse._ArgumentGroup):
parser = parser.add_argument_group('CurriculumTrainer')
parser.add_argument(
prefix + 'curriculum-start',
type=int,
default=defaults['curriculum_start'],
metavar='N',
help='starting number of nodes for curriculum')
parser.add_argument(
prefix + 'curriculum-step',
type=int,
default=defaults['curriculum_step'],
metavar='N',
help='number of nodes difference between lessons in curriculum')
parser.add_argument(
prefix + 'curriculum-graduate',
type=int,
default=defaults['curriculum_graduate'],
metavar='N',
help='graduate number of nodes for curriculum')
parser.add_argument(
prefix + 'enable-candidate',
type='bool',
default=defaults['enable_candidate'],
metavar='B',
help='enable candidate stage in curriculum')
parser.add_argument(
prefix + 'curriculum-thresh',
type=float,
default=defaults['curriculum_thresh'],
metavar='F',
help='threshold for curriculum lessons')
parser.add_argument(
prefix + 'curriculum-thresh-relax',
type=float,
default=defaults['curriculum_thresh_relax'],
metavar='F',
help='threshold = 1 - (graduate_number - current_number) * relax')
parser.add_argument(
prefix + 'curriculum-force-upgrade-epochs',
type=int,
default=defaults['curriculum_force_upgrade_epochs'],
metavar='N',
help='maximum number of epochs to force upgrade lesson')
parser.add_argument(
prefix + 'sample-array-capacity',
type=int,
default=defaults['sample_array_capacity'],
metavar='N',
help='the capacity of the sample array for numbers')
parser.add_argument(
prefix + 'enhance-epochs',
type=int,
default=defaults['enhance_epochs'],
metavar='N',
help='The number of training epochs even after graduation.')
def _get_accuracy(self, meters):
"""return the statistics to be compared with the threshold."""
raise NotImplementedError()
def _get_threshold(self):
return self.curriculum_thresh - self.curriculum_thresh_relax * \
(self.curriculum_graduate - self.current_number)
def _pass_lesson(self, meters):
"""Check whether the current performance is enough to pass current lesson."""
acc = self._get_accuracy(meters)
thresh = self._get_threshold()
if acc >= thresh:
return True
# Force upgrade to next lesson if used too much epochs.
t = self.curriculum_force_upgrade_epochs
if t is not None and self.current_epoch - self.last_upgrade_epoch >= t:
return True
return False
def _upgrade_lesson(self):
"""Upgrade to next lesson."""
self.nr_upgrades += 1
self.last_upgrade_epoch = self.current_epoch
if self.enable_candidate:
# When all lessons finished, it becomes candidate before graduated.
if self.current_number < self.curriculum_graduate:
self.current_number += self.curriculum_step
# sample_array records the lessons the model recently studied.
self.sample_array.append(self.current_number)
elif self.is_candidate:
self.is_graduated = True
else:
self.is_candidate = True
else:
if self.current_number < self.curriculum_graduate:
self.current_number += self.curriculum_step
self.sample_array.append(self.current_number)
else:
self.is_graduated = True
def _take_exam(self, train_meters=None):
"""Use training results as exam result, upgrade to next lesson if pass."""
if self._pass_lesson(train_meters):
self._upgrade_lesson()
def _train_epoch(self, epoch_size):
"""Add an exam session after each training epoch."""
meters = super()._train_epoch(epoch_size)
if not self.is_graduated:
self._take_exam(train_meters=copy.copy(meters))
return meters
def _early_stop(self, meters):
"""Early stop the training when the model graduated from the curriculum."""
return self.is_graduated and \
self.current_epoch - self.last_upgrade_epoch >= self.enhance_epochs
def _sample_number(self, mode):
"""Sample an integer argument from choices defined by an array."""
if mode == 'test':
return self.test_number
# review (sample training data) from recently studied lessons.
return random.choice(self.sample_array)
def train(self):
self.is_candidate = False
self.is_graduated = False
self.nr_upgrades = 0
self.last_upgrade_epoch = 0
self.current_number = self.curriculum_start
self.sample_array = collections.deque(maxlen=self.sample_array_capacity)
self.sample_array.append(self.current_number)
super().train()
return self.is_graduated
class MiningTrainerBase(CurriculumTrainerBase):
"""A trainer class supports both curriculum learning and hard negative mining.
Targeted on RL cases (with environment provided). Maintain two list of data
represents positive and negative ones. The environment instance is regarded as
positive if the agent can successfully accomplish the task.
Based on the curriculum schedule, there are periodically mining process (also
used as exams to determine the upgrade to next lesson or not). During the
mining process, random environment instances are being sampled, and collected
into positive and negative ones according to the outcome. Dur training, the
data are being balanced sampled from the positive and negative examples.
"""
pos_data = None
neg_data = None
def __init__(self, model, optimizer, epochs, epoch_size, test_epoch_size,
test_number_begin, test_number_step, test_number_end,
curriculum_start, curriculum_step, curriculum_graduate,
enable_candidate, curriculum_thresh, curriculum_thresh_relax,
curriculum_force_upgrade_epochs, sample_array_capacity,
enhance_epochs, enable_mining, repeat_mining, candidate_mul,
mining_interval, mining_epoch_size, mining_dataset_size,
inherit_neg_data, disable_balanced_sample, prob_pos_data):
super().__init__(model, optimizer, epochs, epoch_size, test_epoch_size,
test_number_begin, test_number_step, test_number_end,
curriculum_start, curriculum_step, curriculum_graduate,
enable_candidate, curriculum_thresh,
curriculum_thresh_relax, curriculum_force_upgrade_epochs,
sample_array_capacity, enhance_epochs)
self.enable_mining = enable_mining
self.repeat_mining = repeat_mining
self.candidate_mul = candidate_mul
self.mining_interval = mining_interval
self.mining_epoch_size = mining_epoch_size
self.mining_dataset_size = mining_dataset_size
self.inherit_neg_data = inherit_neg_data
self.disable_balanced_sample = disable_balanced_sample
self.prob_pos_data = prob_pos_data
__hyperparams__ = ('enable_mining', 'repeat_mining', 'candidate_mul',
'mining_interval', 'mining_epoch_size',
'mining_dataset_size', 'inherit_neg_data',
'disable_balanced_sample', 'prob_pos_data')
__hyperparam_defaults__ = {
'repeat_mining': True,
'candidate_mul': 2,
'inherit_neg_data': False,
'disable_balanced_sample': False,
'prob_pos_data': 0.5
}
@classmethod
def _get_hyperparams(cls):
return super()._get_hyperparams() + MiningTrainerBase.__hyperparams__
@classmethod
def make_trainer_parser(cls, parser, defaults, prefix=None):
super().make_trainer_parser(parser, defaults, prefix)
for k, v in MiningTrainerBase.__hyperparam_defaults__.items():
defaults.setdefault(k, v)
prefix = '--' if prefix is None else '--' + str(prefix) + '-'
if not isinstance(parser, argparse._ArgumentGroup):
parser = parser.add_argument_group('MiningTrainer')
parser.add_argument(
prefix + 'enable-mining',
type='bool',
default=defaults['enable_mining'],
metavar='B',
help='enable hard-env mining')
parser.add_argument(
prefix + 'repeat-mining',
type='bool',
default=defaults['repeat_mining'],
metavar='B',
help='repeat mining until failing on a lesson')
parser.add_argument(
prefix + 'candidate-mul',
type=int,
default=defaults['candidate_mul'],
metavar='N',
help='x times more mining iters when being candidate')
parser.add_argument(
prefix + 'mining-interval',
type=int,
default=defaults['mining_interval'],
metavar='N',
help='the interval(number of epochs) of the mining')
parser.add_argument(
prefix + 'mining-epoch-size',
type=int,
default=defaults['mining_epoch_size'],
metavar='N',
help='number of iterations per epoch of mining')
parser.add_argument(
prefix + 'mining-dataset-size',
type=int,
default=defaults['mining_dataset_size'],
metavar='N',
help='size of the dataset collected during mining')
parser.add_argument(
prefix + 'inherit-neg-data',
type='bool',
default=defaults['inherit_neg_data'],
metavar='B',
help='recompute the negative data from last mining')
parser.add_argument(
prefix + 'disable-balanced-sample',
type='bool',
default=defaults['disable_balanced_sample'],
metavar='B',
help='use random samples instead of balanced samples when enable mining'
)
parser.add_argument(
prefix + 'prob-pos-data',
type=float,
default=defaults['prob_pos_data'],
metavar='F',
help='the probability of use positive data during training')
def _get_player(self, number, mode):
"""Get an environment to be interact with, with nr_obj & mode specified."""
raise NotImplementedError()
def _balanced_sample(self, meters):
"""Balanced sample positive and negative data with $prob_pos_data."""
nr_pos, nr_neg = self.pos_data.size, self.neg_data.size
assert nr_pos + nr_neg > 0
if nr_neg == 0:
use_pos_data = True
elif nr_pos == 0:
use_pos_data = False
else:
use_pos_data = random.rand() < self.prob_pos_data
meters.update(pos_data_ratio=int(use_pos_data))
pool = self.pos_data if use_pos_data else self.neg_data
return pool.get()
def _get_number_and_player(self, meters, mode):
"""Sample both the number of objects and the environment."""
balanced_sample = mode == 'train' and self.enable_mining and (
not self.disable_balanced_sample and self.pos_data is not None)
if balanced_sample:
number, player = self._balanced_sample(meters)
else:
number = self._sample_number(mode)
player = self._get_player(number, mode)
if mode == 'train':
meters.update(train_number=number)
return number, player
def _get_result_given_player(self, index, meters, number, player, mode):
"""Compute the result given player, upon the mode.
Args:
index: Current episode id.
meters: Used to collect stats.
number: The number of objects/blocks.
player: Environment for player to interact.
mode: 'train'/'test'/'mining'/'inherit'
Returns('train' mode):
feed_dict: feed_dict for train_step
Returns(other modes):
message: The message shown on the progress bar.
result: necessary extra information, see also _extract_info
"""
raise NotImplementedError()
def _get_result(self, index, meters, mode):
number, player = self._get_number_and_player(meters, mode)
return self._get_result_given_player(index, meters, number, player, mode)
def _extract_info(self, extra):
"""Extract necessary information from extra variable.
Args:
extra: An extra variable returned by _get_result_given_player.
Returns:
succ: The result of the episode, success or not, to classify as pos/neg.
number: The number of objects/blocks
backup: The clone of the environment, for interacting multiple times.
"""
raise NotImplementedError()
def _get_train_data(self, index, meters):
return self._get_result(index, meters, mode='train')
def _inherit_neg_data(self, neg_data, old_neg_data, meters,
mining_dataset_size):
"""To avoid wasting already collect negative data, re-exam them."""
if not self.inherit_neg_data or \
(old_neg_data is None or old_neg_data.size == 0):
return
original_size = neg_data.size
old_neg_data.reset()
maximum_inherit_size = min(old_neg_data.size, mining_dataset_size)
def inherit_func(index):
number, player = old_neg_data.get()
message, result = self._get_result_given_player(
index, meters, number, player, mode='inherit')
positive, number, backup = self._extract_info(result)
if not positive:
neg_data.append((number, backup))
return message
tqdm_for(maximum_inherit_size, inherit_func)
logger.info(
meters.format_simple(
'> Inherit: new_size:{}, old_size:{}'.format(
neg_data.size - original_size, old_neg_data.size),
compressed=False))
def _mining_epoch(self, mining_epoch_size, mining_dataset_size):
"""Take exam, collect and update positive dataset and negative dataset"""
pos_data = RandomlyIterDataset()
neg_data = RandomlyIterDataset()
self.model.eval()
meters = GroupMeters()
with tqdm_pbar(total=mining_epoch_size) as pbar:
for i in range(mining_epoch_size):
message, result = self._get_result(i, meters, mode='mining')
positive, number, backup = self._extract_info(result)
dataset = pos_data if positive else neg_data
if dataset.size < mining_dataset_size:
dataset.append((number, backup))
pbar.set_description(message)
pbar.update()
# When both positive and negative dataset are full, break.
if pos_data.size >= mining_dataset_size and \
neg_data.size >= mining_dataset_size:
break
logger.info(meters.format_simple('> Mining: ', compressed=False))
self._inherit_neg_data(neg_data, self.neg_data, meters, mining_dataset_size)
self.pos_data = pos_data
self.neg_data = neg_data
self._dump_meters(meters, 'mining')
return meters
def _upgrade_lesson(self):
super()._upgrade_lesson()
if self.is_graduated:
self.pos_data, self.neg_data = None, None
def _take_exam(self, train_meters=None):
if not self.enable_mining:
super()._take_exam(train_meters)
return
# The mining process, as well as the examing time,
# only taken at a certain rate.
if self.need_mining or (self.mining_interval <=
self.current_epoch - self.last_mining_epoch):
self.last_mining_epoch = self.current_epoch
mining_epoch_size = self.mining_epoch_size
# The exam elapses longer when in candidate status.
if self.is_candidate:
mining_epoch_size *= self.candidate_mul
meters = self._mining_epoch(mining_epoch_size, self.mining_dataset_size)
# Use the performance during mining as the outcome for the exam.
if self._pass_lesson(meters):
self._upgrade_lesson()
if self.is_graduated or (not self.repeat_mining and self.need_mining):
self.need_mining = False
else:
# Can take exam consecutively if repeat_mining=True.
self.need_mining = True
self._take_exam()
else:
self.need_mining = False
def train(self):
self.need_mining = False
self.last_mining_epoch = 0
return super().train()
| |
import hmac
import json
import urllib
import hashlib
import requests
from urllib import parse
from datetime import datetime
class LocalBitcoin:
baseurl = 'https://localbitcoins.com'
def __init__(self, hmac_auth_key, hmac_auth_secret, debug = False):
self.hmac_auth_key = hmac_auth_key
self.hmac_auth_secret = hmac_auth_secret
self.debug = debug
"""
Returns public user profile information
"""
def getAccountInfo(self, username):
return self.sendRequest('/api/account_info/' + username + '/', '', 'get')
"""
Returns recent notifications.
"""
def getNotifications(self):
return self.sendRequest('/api/notifications/', '', 'get')
"""
Return the information of the currently logged in user (the owner of authentication token).
"""
def getMyself(self):
return self.sendRequest('/api/myself/', '', 'get')
"""
Checks the given PIN code against the user's currently active PIN code.
You can use this method to ensure the person using the session is the legitimate user.
"""
def checkPinCode(self, code):
return self.sendRequest('/api/pincode/', {'code': code}, 'post')
"""
Return open and active contacts
"""
def getDashboard(self):
return self.sendRequest('/api/dashboard/', '', 'get')
"""
Return released (successful) contacts
"""
def getDashboardReleased(self):
return self.sendRequest('/api/dashboard/released/', '', 'get')
"""
Return canceled contacts
"""
def getDashboardCanceled(self):
return self.sendRequest('/api/dashboard/canceled/', '', 'get')
"""
Return closed contacts, both released and canceled
"""
def getDashboardClosed(self):
return self.sendRequest('/api/dashboard/closed/', '', 'get')
"""
Releases the escrow of contact specified by ID {contact_id}.
On success there's a complimentary message on the data key.
"""
def contactRelease(self, contact_id):
return self.sendRequest('/api/contact_release/' + contact_id + '/', '', 'post')
"""
Releases the escrow of contact specified by ID {contact_id}.
On success there's a complimentary message on the data key.
"""
def contactReleasePin(self, contact_id, pincode):
return self.sendRequest('/api/contact_release_pin/' + contact_id + '/', {'pincode': pincode}, 'post')
"""
Reads all messaging from the contact. Messages are on the message_list key.
On success there's a complimentary message on the data key.
attachment_* fields exist only if there is an attachment.
"""
def getContactMessages(self, contact_id):
return self.sendRequest('/api/contact_messages/' + contact_id + '/', '', 'get')
"""
Marks a contact as paid.
It is recommended to access this API through /api/online_buy_contacts/ entries' action key.
"""
def markContactAsPaid(self, contact_id):
return self.sendRequest('/api/contact_mark_as_paid/' + contact_id + '/', '', 'get')
"""
Post a message to contact
"""
def postMessageToContact(self, contact_id, message, document=None):
return self.sendRequest('/api/contact_message_post/' + contact_id + '/', {'msg': message}, 'post')
"""
Starts a dispute with the contact, if possible.
You can provide a short description using topic. This helps support to deal with the problem.
"""
def startDispute(self, contact_id, topic = None):
topic = ''
if topic != None:
topic = {'topic': topic}
return self.sendRequest('/api/contact_dispute/' + contact_id + '/', topic, 'post')
"""
Cancels the contact, if possible
"""
def cancelContact(self, contact_id):
return self.sendRequest('/api/contact_cancel/' + contact_id + '/', '', 'post')
"""
Attempts to fund an unfunded local contact from the seller's wallet.
"""
def fundContact(self, contact_id):
return self.sendRequest('/api/contact_fund/' + contact_id + '/', '', 'post')
"""
Attempts to create a contact to trade bitcoins.
Amount is a number in the advertisement's fiat currency.
Returns the API URL to the newly created contact at actions.contact_url.
Whether the contact was able to be funded automatically is indicated at data.funded.
Only non-floating LOCAL_SELL may return unfunded, all other trade types either fund or fail.
"""
def createContact(self, contact_id, ammount, message = None):
post = ''
if message == None:
post = {'ammount': ammount}
else:
post = {'ammount': ammount, 'message': message}
return self.sendRequest('/api/contact_create/' + contact_id + '/', post, 'post')
"""
Gets information about a single contact you are involved in. Same fields as in /api/contacts/.
"""
def getContactInfo(self, contact_id):
return self.sendRequest('/api/contact_info/' + contact_id + '/', '', 'get')
"""
contacts is a comma-separated list of contact IDs that you want to access in bulk.
The token owner needs to be either a buyer or seller in the contacts, contacts that do not pass this check are simply not returned.
A maximum of 50 contacts can be requested at a time.
The contacts are not returned in any particular order.
"""
def getContactsInfo(self, contacts):
return self.sendRequest('/api/contact_info/', {'contacts': contacts}, 'get')
"""
Returns maximum of 50 newest trade messages.
Messages are ordered by sending time, and the newest one is first.
The list has same format as /api/contact_messages/, but each message has also contact_id field.
"""
def getRecentMessages(self):
return self.sendRequest('/api/recent_messages/', '', 'get')
"""
Gives feedback to user.
Possible feedback values are: trust, positive, neutral, block, block_without_feedback as strings.
You may also set feedback message field with few exceptions. Feedback block_without_feedback clears the message and with block the message is mandatory.
"""
def postFeedbackToUser(self, username, feedback, message = None):
post = {'feedback': feedback}
if message != None:
post = {'feedback': feedback, 'msg': message}
return self.sendRequest('/api/feedback/' + username + '/', post, 'post')
"""
Gets information about the token owner's wallet balance.
"""
def getWallet(self):
return self.sendRequest('/api/wallet/', '', 'get')
"""
Same as /api/wallet/ above, but only returns the message, receiving_address_list and total fields.
(There's also a receiving_address_count but it is always 1: only the latest receiving address is ever returned by this call.)
Use this instead if you don't care about transactions at the moment.
"""
def getWalletBallance(self):
return self.sendRequest('/api/wallet-balance/', '', 'get')
"""
Sends amount bitcoins from the token owner's wallet to address.
Note that this API requires its own API permission called Money.
On success, this API returns just a message indicating success.
It is highly recommended to minimize the lifetime of access tokens with the money permission.
Call /api/logout/ to make the current token expire instantly.
"""
def walletSend(self, ammount, address):
return self.sendRequest('/api/wallet-send/', {'ammount': ammount, 'address': address}, 'post')
"""
As above, but needs the token owner's active PIN code to succeed.
Look before you leap. You can check if a PIN code is valid without attempting a send with /api/pincode/.
Security concern: To get any security beyond the above API, do not retain the PIN code beyond a reasonable user session, a few minutes at most.
If you are planning to save the PIN code anyway, please save some headache and get the real no-pin-required money permission instead.
"""
def walletSendWithPin(self, ammount, address, pincode):
return self.sendRequest('/api/wallet-send-pin/', {'ammount': ammount, 'address': address, 'pincode': pincode}, 'post')
"""
Gets an unused receiving address for the token owner's wallet, its address given in the address key of the response.
Note that this API may keep returning the same (unused) address if called repeatedly.
"""
def getWalletAddress(self):
return self.sendRequest('/api/wallet-addr/', '', 'post')
"""
Expires the current access token immediately.
To get a new token afterwards, public apps will need to reauthenticate, confidential apps can turn in a refresh token.
"""
def logout(self):
return self.sendRequest('/api/logout/', '', 'post')
"""
Lists the token owner's all ads on the data key ad_list, optionally filtered. If there's a lot of ads, the listing will be paginated.
Refer to the ad editing pages for the field meanings. List item structure is like so:
"""
def getOwnAds(self):
return self.sendRequest('/api/ads/', '', 'post')
"""
This endpoint lets you edit an ad given the ad id and all the required fiends as designated by the API.
If you just want to update the equation there is a better endpoint for that, this one takes a lot of LBC resources.
"""
def editAd(self, ad_id, lat, bank_name, price_equation, lon, countrycode, opening_hours, msg, max_amount, track_max_amount, visible):
return self.sendRequest('/api/ad/' + ad_id + '/', {'lat': lat,'bank_name': bank_name,'price_equation': price_equation,'lon': lon,'countrycode': countrycode, 'opening_hours': opening_hours, 'msg': msg, 'max_amount': max_amount, 'track_max_amount': track_max_amount, 'visible': visible}, 'post')
"""
Creates a new invoice under the LBC merchant services page.
"""
def newInvoice(self, currency, amount, description):
return self.sendRequest('/api/merchant/new_invoice/', {'currency': currency, 'amount': amount, 'description': description,}, 'post')
"""
Marks a users id as verified based on an open contact id.
"""
def markIdentityVerified(self, contact_id):
return self.sendRequest('/api/contact_mark_identified/' + contact_id + '/', '', 'post')
"""
Get all the details of an ad based on its ID, can be any ad.
"""
def getAd(self, ad_id):
return self.sendRequest('/api/ad-get/' + ad_id + '/', '', 'get')
"""
Change an ad's pricing equation to something else.
"""
def changeEquation(self, ad_id, equation):
return self.sendRequest('/api/ad-equation/{ad_id}/'.format(ad_id=ad_id), {'price_equation': equation}, 'post')
"""
Main driver.
"""
def sendRequest(self, endpoint, params, method):
params_encoded = ''
if params != '':
params_encoded = parse.urlencode(params)
if method == 'get':
params_encoded = '?' + params_encoded
now = datetime.utcnow()
epoch = datetime.utcfromtimestamp(0)
delta = now - epoch
nonce = int(delta.total_seconds() * 1000)
message = str(nonce) + self.hmac_auth_key + endpoint + params_encoded
signature = hmac.new(bytes(self.hmac_auth_secret, 'latin-1'), msg = bytes(message , 'latin-1'), digestmod = hashlib.sha256).hexdigest().upper()
headers = {}
headers['Apiauth-key'] = self.hmac_auth_key
headers['Apiauth-Nonce'] = str(nonce)
headers['Apiauth-Signature'] = signature
if method == 'get':
response = requests.get(self.baseurl + endpoint, headers = headers, params = params)
else:
response = requests.post(self.baseurl + endpoint, headers = headers, data = params)
if self.debug == True:
print('REQUEST: ' + self.baseurl + endpoint)
print('PARAMS: ' + str(params))
print('METHOD: ' + method)
print('RESPONSE: ' + response.text)
return json.loads(response.text)['data']
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class VpnSitesOperations:
"""VpnSitesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_11_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def get(
self,
resource_group_name: str,
vpn_site_name: str,
**kwargs: Any
) -> "_models.VpnSite":
"""Retrieves the details of a VPN site.
:param resource_group_name: The resource group name of the VpnSite.
:type resource_group_name: str
:param vpn_site_name: The name of the VpnSite being retrieved.
:type vpn_site_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VpnSite, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_11_01.models.VpnSite
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnSite"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vpnSiteName': self._serialize.url("vpn_site_name", vpn_site_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VpnSite', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnSites/{vpnSiteName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
vpn_site_name: str,
vpn_site_parameters: "_models.VpnSite",
**kwargs: Any
) -> "_models.VpnSite":
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnSite"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vpnSiteName': self._serialize.url("vpn_site_name", vpn_site_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(vpn_site_parameters, 'VpnSite')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VpnSite', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VpnSite', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnSites/{vpnSiteName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
vpn_site_name: str,
vpn_site_parameters: "_models.VpnSite",
**kwargs: Any
) -> AsyncLROPoller["_models.VpnSite"]:
"""Creates a VpnSite resource if it doesn't exist else updates the existing VpnSite.
:param resource_group_name: The resource group name of the VpnSite.
:type resource_group_name: str
:param vpn_site_name: The name of the VpnSite being created or updated.
:type vpn_site_name: str
:param vpn_site_parameters: Parameters supplied to create or update VpnSite.
:type vpn_site_parameters: ~azure.mgmt.network.v2020_11_01.models.VpnSite
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either VpnSite or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_11_01.models.VpnSite]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnSite"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
vpn_site_name=vpn_site_name,
vpn_site_parameters=vpn_site_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VpnSite', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vpnSiteName': self._serialize.url("vpn_site_name", vpn_site_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnSites/{vpnSiteName}'} # type: ignore
async def update_tags(
self,
resource_group_name: str,
vpn_site_name: str,
vpn_site_parameters: "_models.TagsObject",
**kwargs: Any
) -> "_models.VpnSite":
"""Updates VpnSite tags.
:param resource_group_name: The resource group name of the VpnSite.
:type resource_group_name: str
:param vpn_site_name: The name of the VpnSite being updated.
:type vpn_site_name: str
:param vpn_site_parameters: Parameters supplied to update VpnSite tags.
:type vpn_site_parameters: ~azure.mgmt.network.v2020_11_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VpnSite, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_11_01.models.VpnSite
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnSite"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vpnSiteName': self._serialize.url("vpn_site_name", vpn_site_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(vpn_site_parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VpnSite', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnSites/{vpnSiteName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
vpn_site_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vpnSiteName': self._serialize.url("vpn_site_name", vpn_site_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnSites/{vpnSiteName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
vpn_site_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes a VpnSite.
:param resource_group_name: The resource group name of the VpnSite.
:type resource_group_name: str
:param vpn_site_name: The name of the VpnSite being deleted.
:type vpn_site_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
vpn_site_name=vpn_site_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vpnSiteName': self._serialize.url("vpn_site_name", vpn_site_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnSites/{vpnSiteName}'} # type: ignore
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.ListVpnSitesResult"]:
"""Lists all the vpnSites in a resource group.
:param resource_group_name: The resource group name of the VpnSite.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListVpnSitesResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_11_01.models.ListVpnSitesResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListVpnSitesResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ListVpnSitesResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnSites'} # type: ignore
def list(
self,
**kwargs: Any
) -> AsyncIterable["_models.ListVpnSitesResult"]:
"""Lists all the VpnSites in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListVpnSitesResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_11_01.models.ListVpnSitesResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListVpnSitesResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ListVpnSitesResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/vpnSites'} # type: ignore
| |
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class rnat(base_resource) :
""" Configuration for RNAT configured route resource. """
def __init__(self) :
self._network = ""
self._netmask = ""
self._aclname = ""
self._redirectport = False
self._natip = ""
self._td = 0
self._natip2 = ""
self._srcippersistency = ""
self.___count = 0
@property
def network(self) :
ur"""The network address defined for the RNAT entry.<br/>Minimum length = 1.
"""
try :
return self._network
except Exception as e:
raise e
@network.setter
def network(self, network) :
ur"""The network address defined for the RNAT entry.<br/>Minimum length = 1
"""
try :
self._network = network
except Exception as e:
raise e
@property
def netmask(self) :
ur"""The subnet mask for the network address.<br/>Minimum length = 1.
"""
try :
return self._netmask
except Exception as e:
raise e
@netmask.setter
def netmask(self, netmask) :
ur"""The subnet mask for the network address.<br/>Minimum length = 1
"""
try :
self._netmask = netmask
except Exception as e:
raise e
@property
def aclname(self) :
ur"""An extended ACL defined for the RNAT entry.<br/>Minimum length = 1.
"""
try :
return self._aclname
except Exception as e:
raise e
@aclname.setter
def aclname(self, aclname) :
ur"""An extended ACL defined for the RNAT entry.<br/>Minimum length = 1
"""
try :
self._aclname = aclname
except Exception as e:
raise e
@property
def redirectport(self) :
ur"""The port number to which the packets are redirected.
"""
try :
return self._redirectport
except Exception as e:
raise e
@redirectport.setter
def redirectport(self, redirectport) :
ur"""The port number to which the packets are redirected.
"""
try :
self._redirectport = redirectport
except Exception as e:
raise e
@property
def natip(self) :
ur"""The NAT IP address defined for the RNAT entry. .<br/>Minimum length = 1.
"""
try :
return self._natip
except Exception as e:
raise e
@natip.setter
def natip(self, natip) :
ur"""The NAT IP address defined for the RNAT entry. .<br/>Minimum length = 1
"""
try :
self._natip = natip
except Exception as e:
raise e
@property
def td(self) :
ur"""Integer value that uniquely identifies the traffic domain in which you want to configure the entity. If you do not specify an ID, the entity becomes part of the default traffic domain, which has an ID of 0.<br/>Maximum length = 4094.
"""
try :
return self._td
except Exception as e:
raise e
@td.setter
def td(self, td) :
ur"""Integer value that uniquely identifies the traffic domain in which you want to configure the entity. If you do not specify an ID, the entity becomes part of the default traffic domain, which has an ID of 0.<br/>Maximum length = 4094
"""
try :
self._td = td
except Exception as e:
raise e
@property
def natip2(self) :
ur"""The NAT IP(s) assigned to the RNAT.<br/>Minimum length = 1.
"""
try :
return self._natip2
except Exception as e:
raise e
@natip2.setter
def natip2(self, natip2) :
ur"""The NAT IP(s) assigned to the RNAT.<br/>Minimum length = 1
"""
try :
self._natip2 = natip2
except Exception as e:
raise e
@property
def srcippersistency(self) :
ur"""Enables the NetScaler appliance to use the same NAT IP address for all RNAT sessions initiated from a particular server.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._srcippersistency
except Exception as e:
raise e
@srcippersistency.setter
def srcippersistency(self, srcippersistency) :
ur"""Enables the NetScaler appliance to use the same NAT IP address for all RNAT sessions initiated from a particular server.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._srcippersistency = srcippersistency
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(rnat_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.rnat
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
return 0
except Exception as e :
raise e
@classmethod
def clear(cls, client, resource) :
ur""" Use this API to clear rnat.
"""
try :
if type(resource) is not list :
clearresource = rnat()
clearresource.network = resource.network
clearresource.netmask = resource.netmask
clearresource.aclname = resource.aclname
clearresource.redirectport = resource.redirectport
clearresource.natip = resource.natip
clearresource.td = resource.td
return clearresource.perform_operation(client,"clear")
else :
if (resource and len(resource) > 0) :
clearresources = [ rnat() for _ in range(len(resource))]
for i in range(len(resource)) :
clearresources[i].network = resource[i].network
clearresources[i].netmask = resource[i].netmask
clearresources[i].aclname = resource[i].aclname
clearresources[i].redirectport = resource[i].redirectport
clearresources[i].natip = resource[i].natip
clearresources[i].td = resource[i].td
result = cls.perform_operation_bulk_request(client, clearresources,"clear")
return result
except Exception as e :
raise e
@classmethod
def update(cls, client, resource) :
ur""" Use this API to update rnat.
"""
try :
if type(resource) is not list :
updateresource = rnat()
updateresource.network = resource.network
updateresource.netmask = resource.netmask
updateresource.natip = resource.natip
updateresource.td = resource.td
updateresource.aclname = resource.aclname
updateresource.redirectport = resource.redirectport
updateresource.natip2 = resource.natip2
updateresource.srcippersistency = resource.srcippersistency
return updateresource.update_resource(client)
else :
if (resource and len(resource) > 0) :
updateresources = [ rnat() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].network = resource[i].network
updateresources[i].netmask = resource[i].netmask
updateresources[i].natip = resource[i].natip
updateresources[i].td = resource[i].td
updateresources[i].aclname = resource[i].aclname
updateresources[i].redirectport = resource[i].redirectport
updateresources[i].natip2 = resource[i].natip2
updateresources[i].srcippersistency = resource[i].srcippersistency
result = cls.update_bulk_request(client, updateresources)
return result
except Exception as e :
raise e
@classmethod
def unset(cls, client, resource, args) :
ur""" Use this API to unset the properties of rnat resource.
Properties that need to be unset are specified in args array.
"""
try :
if type(resource) is not list :
unsetresource = rnat()
unsetresource.network = resource.network
unsetresource.netmask = resource.netmask
unsetresource.td = resource.td
unsetresource.aclname = resource.aclname
unsetresource.redirectport = resource.redirectport
unsetresource.natip = resource.natip
return unsetresource.unset_resource(client, args)
else :
if (resource and len(resource) > 0) :
unsetresources = [ rnat() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].network = resource[i].network
unsetresources[i].netmask = resource[i].netmask
unsetresources[i].td = resource[i].td
unsetresources[i].aclname = resource[i].aclname
unsetresources[i].redirectport = resource[i].redirectport
unsetresources[i].natip = resource[i].natip
result = cls.unset_bulk_request(client, unsetresources, args)
return result
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
ur""" Use this API to fetch all the rnat resources that are configured on netscaler.
"""
try :
if not name :
obj = rnat()
response = obj.get_resources(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
ur""" Use this API to fetch filtered set of rnat resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = rnat()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
ur""" Use this API to count the rnat resources configured on NetScaler.
"""
try :
obj = rnat()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
ur""" Use this API to count filtered the set of rnat resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = rnat()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class Srcippersistency:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class rnat_response(base_response) :
def __init__(self, length=1) :
self.rnat = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.rnat = [rnat() for _ in range(length)]
| |
# Copyright 2007 Matt Chaput. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
from __future__ import with_statement
import threading
import time
from contextlib import contextmanager
from whoosh.compat import abstractmethod
from whoosh.store import LockError
from whoosh.util import synchronized
# Exceptions
class IndexingError(Exception):
pass
# Document grouping context manager
@contextmanager
def groupmanager(writer):
writer.start_group()
yield
writer.end_group()
# Base class
class IndexWriter(object):
"""High-level object for writing to an index.
To get a writer for a particular index, call
:meth:`~whoosh.index.Index.writer` on the Index object.
>>> writer = myindex.writer()
You can use this object as a context manager. If an exception is thrown
from within the context it calls :meth:`~IndexWriter.cancel` to clean up
temporary files, otherwise it calls :meth:`~IndexWriter.commit` when the
context exits.
>>> with myindex.writer() as w:
... w.add_document(title="First document", content="Hello there.")
... w.add_document(title="Second document", content="This is easy!")
"""
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type:
self.cancel()
else:
self.commit()
def group(self):
"""Returns a context manager that calls
:meth:`~IndexWriter.start_group` and :meth:`~IndexWriter.end_group` for
you, allowing you to use a ``with`` statement to group hierarchical
documents::
with myindex.writer() as w:
with w.group():
w.add_document(kind="class", name="Accumulator")
w.add_document(kind="method", name="add")
w.add_document(kind="method", name="get_result")
w.add_document(kind="method", name="close")
with w.group():
w.add_document(kind="class", name="Calculator")
w.add_document(kind="method", name="add")
w.add_document(kind="method", name="multiply")
w.add_document(kind="method", name="get_result")
w.add_document(kind="method", name="close")
"""
return groupmanager(self)
def start_group(self):
"""Start indexing a group of hierarchical documents. The backend should
ensure that these documents are all added to the same segment::
with myindex.writer() as w:
w.start_group()
w.add_document(kind="class", name="Accumulator")
w.add_document(kind="method", name="add")
w.add_document(kind="method", name="get_result")
w.add_document(kind="method", name="close")
w.end_group()
w.start_group()
w.add_document(kind="class", name="Calculator")
w.add_document(kind="method", name="add")
w.add_document(kind="method", name="multiply")
w.add_document(kind="method", name="get_result")
w.add_document(kind="method", name="close")
w.end_group()
A more convenient way to group documents is to use the
:meth:`~IndexWriter.group` method and the ``with`` statement.
"""
pass
def end_group(self):
"""Finish indexing a group of hierarchical documents. See
:meth:`~IndexWriter.start_group`.
"""
pass
def add_field(self, fieldname, fieldtype, **kwargs):
"""Adds a field to the index's schema.
:param fieldname: the name of the field to add.
:param fieldtype: an instantiated :class:`whoosh.fields.FieldType`
object.
"""
self.schema.add(fieldname, fieldtype, **kwargs)
def remove_field(self, fieldname, **kwargs):
"""Removes the named field from the index's schema. Depending on the
backend implementation, this may or may not actually remove existing
data for the field from the index. Optimizing the index should always
clear out existing data for a removed field.
"""
self.schema.remove(fieldname, **kwargs)
@abstractmethod
def reader(self, **kwargs):
"""Returns a reader for the existing index.
"""
raise NotImplementedError
def searcher(self, **kwargs):
from whoosh.searching import Searcher
return Searcher(self.reader(), **kwargs)
def delete_by_term(self, fieldname, text, searcher=None):
"""Deletes any documents containing "term" in the "fieldname" field.
This is useful when you have an indexed field containing a unique ID
(such as "pathname") for each document.
:returns: the number of documents deleted.
"""
from whoosh.query import Term
q = Term(fieldname, text)
return self.delete_by_query(q, searcher=searcher)
def delete_by_query(self, q, searcher=None):
"""Deletes any documents matching a query object.
:returns: the number of documents deleted.
"""
if searcher:
s = searcher
else:
s = self.searcher()
try:
count = 0
for docnum in s.docs_for_query(q, for_deletion=True):
if not self.is_deleted(docnum):
self.delete_document(docnum)
count += 1
finally:
if not searcher:
s.close()
return count
@abstractmethod
def delete_document(self, docnum, delete=True):
"""Deletes a document by number.
"""
raise NotImplementedError
@abstractmethod
def add_document(self, **fields):
"""The keyword arguments map field names to the values to index/store::
w = myindex.writer()
w.add_document(path=u"/a", title=u"First doc", text=u"Hello")
w.commit()
Depending on the field type, some fields may take objects other than
unicode strings. For example, NUMERIC fields take numbers, and DATETIME
fields take ``datetime.datetime`` objects::
from datetime import datetime, timedelta
from whoosh import index
from whoosh.fields import *
schema = Schema(date=DATETIME, size=NUMERIC(float), content=TEXT)
myindex = index.create_in("indexdir", schema)
w = myindex.writer()
w.add_document(date=datetime.now(), size=5.5, content=u"Hello")
w.commit()
Instead of a single object (i.e., unicode string, number, or datetime),
you can supply a list or tuple of objects. For unicode strings, this
bypasses the field's analyzer. For numbers and dates, this lets you add
multiple values for the given field::
date1 = datetime.now()
date2 = datetime(2005, 12, 25)
date3 = datetime(1999, 1, 1)
w.add_document(date=[date1, date2, date3], size=[9.5, 10],
content=[u"alfa", u"bravo", u"charlie"])
For fields that are both indexed and stored, you can specify an
alternate value to store using a keyword argument in the form
"_stored_<fieldname>". For example, if you have a field named "title"
and you want to index the text "a b c" but store the text "e f g", use
keyword arguments like this::
writer.add_document(title=u"a b c", _stored_title=u"e f g")
You can boost the weight of all terms in a certain field by specifying
a ``_<fieldname>_boost`` keyword argument. For example, if you have a
field named "content", you can double the weight of this document for
searches in the "content" field like this::
writer.add_document(content="a b c", _title_boost=2.0)
You can boost every field at once using the ``_boost`` keyword. For
example, to boost fields "a" and "b" by 2.0, and field "c" by 3.0::
writer.add_document(a="alfa", b="bravo", c="charlie",
_boost=2.0, _c_boost=3.0)
Note that some scoring algroithms, including Whoosh's default BM25F,
do not work with term weights less than 1, so you should generally not
use a boost factor less than 1.
See also :meth:`Writer.update_document`.
"""
raise NotImplementedError
def _doc_boost(self, fields, default=1.0):
if "_boost" in fields:
return float(fields["_boost"])
else:
return default
def _field_boost(self, fields, fieldname, default=1.0):
boostkw = "_%s_boost" % fieldname
if boostkw in fields:
return float(fields[boostkw])
else:
return default
def _unique_fields(self, fields):
# Check which of the supplied fields are unique
unique_fields = [name for name, field in self.schema.items()
if name in fields and field.unique]
return unique_fields
def update_document(self, **fields):
"""The keyword arguments map field names to the values to index/store.
This method adds a new document to the index, and automatically deletes
any documents with the same values in any fields marked "unique" in the
schema::
schema = fields.Schema(path=fields.ID(unique=True, stored=True),
content=fields.TEXT)
myindex = index.create_in("index", schema)
w = myindex.writer()
w.add_document(path=u"/", content=u"Mary had a lamb")
w.commit()
w = myindex.writer()
w.update_document(path=u"/", content=u"Mary had a little lamb")
w.commit()
assert myindex.doc_count() == 1
It is safe to use ``update_document`` in place of ``add_document``; if
there is no existing document to replace, it simply does an add.
You cannot currently pass a list or tuple of values to a "unique"
field.
Because this method has to search for documents with the same unique
fields and delete them before adding the new document, it is slower
than using ``add_document``.
* Marking more fields "unique" in the schema will make each
``update_document`` call slightly slower.
* When you are updating multiple documents, it is faster to batch
delete all changed documents and then use ``add_document`` to add
the replacements instead of using ``update_document``.
Note that this method will only replace a *committed* document;
currently it cannot replace documents you've added to the IndexWriter
but haven't yet committed. For example, if you do this:
>>> writer.update_document(unique_id=u"1", content=u"Replace me")
>>> writer.update_document(unique_id=u"1", content=u"Replacement")
...this will add two documents with the same value of ``unique_id``,
instead of the second document replacing the first.
See :meth:`Writer.add_document` for information on
``_stored_<fieldname>``, ``_<fieldname>_boost``, and ``_boost`` keyword
arguments.
"""
# Delete the set of documents matching the unique terms
unique_fields = self._unique_fields(fields)
if unique_fields:
with self.searcher() as s:
for docnum in s._find_unique([(name, fields[name])
for name in unique_fields]):
self.delete_document(docnum)
# Add the given fields
self.add_document(**fields)
def commit(self):
"""Finishes writing and unlocks the index.
"""
pass
def cancel(self):
"""Cancels any documents/deletions added by this object
and unlocks the index.
"""
pass
class AsyncWriter(threading.Thread, IndexWriter):
"""Convenience wrapper for a writer object that might fail due to locking
(i.e. the ``filedb`` writer). This object will attempt once to obtain the
underlying writer, and if it's successful, will simply pass method calls on
to it.
If this object *can't* obtain a writer immediately, it will *buffer*
delete, add, and update method calls in memory until you call ``commit()``.
At that point, this object will start running in a separate thread, trying
to obtain the writer over and over, and once it obtains it, "replay" all
the buffered method calls on it.
In a typical scenario where you're adding a single or a few documents to
the index as the result of a Web transaction, this lets you just create the
writer, add, and commit, without having to worry about index locks,
retries, etc.
For example, to get an aynchronous writer, instead of this:
>>> writer = myindex.writer()
Do this:
>>> from whoosh.writing import AsyncWriter
>>> writer = AsyncWriter(myindex)
"""
def __init__(self, index, delay=0.25, writerargs=None):
"""
:param index: the :class:`whoosh.index.Index` to write to.
:param delay: the delay (in seconds) between attempts to instantiate
the actual writer.
:param writerargs: an optional dictionary specifying keyword arguments
to to be passed to the index's ``writer()`` method.
"""
threading.Thread.__init__(self)
self.running = False
self.index = index
self.writerargs = writerargs or {}
self.delay = delay
self.events = []
try:
self.writer = self.index.writer(**self.writerargs)
except LockError:
self.writer = None
def reader(self):
return self.index.reader()
def searcher(self, **kwargs):
from whoosh.searching import Searcher
return Searcher(self.reader(), fromindex=self.index, **kwargs)
def _record(self, method, args, kwargs):
if self.writer:
getattr(self.writer, method)(*args, **kwargs)
else:
self.events.append((method, args, kwargs))
def run(self):
self.running = True
writer = self.writer
while writer is None:
try:
writer = self.index.writer(**self.writerargs)
except LockError:
time.sleep(self.delay)
for method, args, kwargs in self.events:
getattr(writer, method)(*args, **kwargs)
writer.commit(*self.commitargs, **self.commitkwargs)
def delete_document(self, *args, **kwargs):
self._record("delete_document", args, kwargs)
def add_document(self, *args, **kwargs):
self._record("add_document", args, kwargs)
def update_document(self, *args, **kwargs):
self._record("update_document", args, kwargs)
def add_field(self, *args, **kwargs):
self._record("add_field", args, kwargs)
def remove_field(self, *args, **kwargs):
self._record("remove_field", args, kwargs)
def delete_by_term(self, *args, **kwargs):
self._record("delete_by_term", args, kwargs)
def commit(self, *args, **kwargs):
if self.writer:
self.writer.commit(*args, **kwargs)
else:
self.commitargs, self.commitkwargs = args, kwargs
self.start()
def cancel(self, *args, **kwargs):
if self.writer:
self.writer.cancel(*args, **kwargs)
class BufferedWriter(IndexWriter):
"""Convenience class that acts like a writer but buffers added documents to
a :class:`~whoosh.ramindex.RamIndex` before dumping the buffered documents
as a batch into the actual index.
In scenarios where you are continuously adding single documents very
rapidly (for example a web application where lots of users are adding
content simultaneously), using a BufferedWriter is *much* faster than
opening and committing a writer for each document you add.
(This class may also be useful for batches of ``update_document`` calls. In
a normal writer, ``update_document`` calls cannot update documents you've
added *in that writer*. With ``BufferedWriter``, this will work.)
If you're adding a batches of documents at a time, you can just use a
regular writer -- you're already committing a "batch" of documents, so you
don't need this class.
To use this class, create it from your index and *keep it open*, sharing
it between threads.
>>> from whoosh.writing import BufferedWriter
>>> writer = BufferedWriter(myindex, period=120, limit=100)
You can control how often the ``BufferedWriter`` flushes the in-memory
index to disk using the ``period`` and ``limit`` arguments. ``period`` is
the maximum number of seconds between commits. ``limit`` is the maximum
number of additions to buffer between commits.
You can read/search the combination of the on-disk index and the buffered
documents in memory by calling ``BufferedWriter.reader()`` or
``BufferedWriter.searcher()``. This allows quasi-real-time search, where
documents are available for searching as soon as they are buffered in
memory, before they are committed to disk.
>>> searcher = writer.searcher()
.. tip::
By using a searcher from the shared writer, multiple *threads* can
search the buffered documents. Of course, other *processes* will only
see the documents that have been written to disk. If you want indexed
documents to become available to other processes as soon as possible,
you have to use a traditional writer instead of a ``BufferedWriter``.
Calling ``commit()`` on the ``BufferedWriter`` manually commits any batched
up changes. You can continue to make changes after calling ``commit()``,
and you can call ``commit()`` multiple times.
.. note::
This object keeps an underlying writer open and stores documents in
memory, so you must explicitly call the :meth:`~BufferedWriter.close()`
method on this object before it goes out of scope to release the
write lock and make sure any uncommitted changes are saved.
"""
def __init__(self, index, period=60, limit=10, writerargs=None,
commitargs=None, tempixclass=None):
"""
:param index: the :class:`whoosh.index.Index` to write to.
:param period: the maximum amount of time (in seconds) between commits.
Set this to ``0`` or ``None`` to not use a timer. Do not set this
any lower than a few seconds.
:param limit: the maximum number of documents to buffer before
committing.
:param writerargs: dictionary specifying keyword arguments to be passed
to the index's ``writer()`` method when creating a writer.
:param commitargs: dictionary specifying keyword arguments to be passed
to the writer's ``commit()`` method when committing a writer.
"""
self.index = index
self.period = period
self.limit = limit
self.writerargs = writerargs or {}
self.commitargs = commitargs or {}
self._sync_lock = threading.RLock()
self._write_lock = threading.Lock()
if tempixclass is None:
from whoosh.ramindex import RamIndex as tempixclass
self.tempixclass = tempixclass
self.writer = None
self.base = self.index.doc_count_all()
self.bufferedcount = 0
self.commitcount = 0
self.ramindex = self._create_ramindex()
if self.period:
self.timer = threading.Timer(self.period, self.commit)
def __del__(self):
if hasattr(self, "writer") and self.writer:
if not self.writer.is_closed:
try:
self.writer.cancel()
except:
pass
del self.writer
def _create_ramindex(self):
return self.tempixclass(self.index.schema)
def _get_writer(self):
if self.writer is None:
self.writer = self.index.writer(**self.writerargs)
self.schema = self.writer.schema
self.base = self.index.doc_count_all()
self.bufferedcount = 0
return self.writer
@synchronized
def reader(self, **kwargs):
from whoosh.reading import MultiReader
writer = self._get_writer()
ramreader = self.ramindex
if self.index.is_empty():
return ramreader
else:
reader = writer.reader(**kwargs)
if reader.is_atomic():
reader = MultiReader([reader, ramreader])
else:
reader.add_reader(ramreader)
return reader
def searcher(self, **kwargs):
from whoosh.searching import Searcher
return Searcher(self.reader(), fromindex=self.index, **kwargs)
def close(self):
self.commit(restart=False)
def commit(self, restart=True):
if self.period:
self.timer.cancel()
# Replace the RAM index
with self._sync_lock:
oldramindex = self.ramindex
self.ramindex = self._create_ramindex()
with self._write_lock:
if self.bufferedcount:
self._get_writer().add_reader(oldramindex.reader())
if self.writer:
self.writer.commit(**self.commitargs)
self.writer = None
self.commitcount += 1
if restart:
if self.period:
self.timer = threading.Timer(self.period, self.commit)
def add_reader(self, reader):
with self._write_lock:
self._get_writer().add_reader(reader)
def add_document(self, **fields):
with self._sync_lock:
self.ramindex.add_document(**fields)
self.bufferedcount += 1
if self.bufferedcount >= self.limit:
self.commit()
@synchronized
def update_document(self, **fields):
self._get_writer()
super(BufferedWriter, self).update_document(**fields)
@synchronized
def delete_document(self, docnum, delete=True):
if docnum < self.base:
return self._get_writer().delete_document(docnum, delete=delete)
else:
return self.ramindex.delete_document(docnum - self.base,
delete=delete)
@synchronized
def is_deleted(self, docnum):
if docnum < self.base:
return self.writer.is_deleted(docnum)
else:
return self.ramindex.is_deleted(docnum - self.base)
# Backwards compatibility with old name
BatchWriter = BufferedWriter
| |
# Copyright 2013 Google Inc. All Rights Reserved.
"""Generate usage text for displaying to the user.
"""
import argparse
import re
import StringIO
import sys
import textwrap
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.core.console import console_io
LINE_WIDTH = 80
HELP_INDENT = 25
MARKDOWN_BOLD = '*'
MARKDOWN_ITALIC = '_'
MARKDOWN_CODE = '`'
class HelpInfo(object):
"""A class to hold some the information we need to generate help text."""
def __init__(self, help_text, is_hidden, release_track):
"""Create a HelpInfo object.
Args:
help_text: str, The text of the help message.
is_hidden: bool, True if this command or group has been marked as hidden.
release_track: calliope.base.ReleaseTrack, The maturity level of this
command.
"""
self.help_text = help_text or ''
self.is_hidden = is_hidden
self.release_track = release_track
class CommandChoiceSuggester(object):
"""Utility to suggest mistyped commands.
"""
TEST_QUOTA = 5000
MAX_DISTANCE = 5
def __init__(self):
self.cache = {}
self.inf = float('inf')
self._quota = self.TEST_QUOTA
def Deletions(self, s):
return [s[:i] + s[i + 1:] for i in range(len(s))]
def GetDistance(self, longer, shorter):
"""Get the edit distance between two words.
They must be in the correct order, since deletions and mutations only happen
from 'longer'.
Args:
longer: str, The longer of the two words.
shorter: str, The shorter of the two words.
Returns:
int, The number of substitutions or deletions on longer required to get
to shorter.
"""
if longer == shorter:
return 0
try:
return self.cache[(longer, shorter)]
except KeyError:
pass
self.cache[(longer, shorter)] = self.inf
best_distance = self.inf
if len(longer) > len(shorter):
if self._quota < 0:
return self.inf
self._quota -= 1
for m in self.Deletions(longer):
best_distance = min(best_distance, self.GetDistance(m, shorter) + 1)
if len(longer) == len(shorter):
# just count how many letters differ
best_distance = 0
for i in range(len(longer)):
if longer[i] != shorter[i]:
best_distance += 1
self.cache[(longer, shorter)] = best_distance
return best_distance
def SuggestCommandChoice(self, arg, choices):
"""Find the item that is closest to what was attempted.
Args:
arg: str, The argument provided.
choices: [str], The list of valid arguments.
Returns:
str, The closest match.
"""
min_distance = self.inf
for choice in choices:
self._quota = self.TEST_QUOTA
first, second = arg, choice
if len(first) < len(second):
first, second = second, first
if len(first) - len(second) > self.MAX_DISTANCE:
# Don't bother if they're too different.
continue
d = self.GetDistance(first.lower(), second.lower())
if d < min_distance:
min_distance = d
bestchoice = choice
if min_distance > self.MAX_DISTANCE:
return None
return bestchoice
def WrapMessageInNargs(msg, nargs):
"""Create the display help string for a positional arg.
Args:
msg: [str] The possibly repeated text.
nargs: The repetition operator.
Returns:
str, The string representation for printing.
"""
if nargs == '+':
return '{msg} [{msg} ...]'.format(msg=msg)
elif nargs == '*' or nargs == argparse.REMAINDER:
return '[{msg} ...]'.format(msg=msg)
elif nargs == '?':
return '[{msg}]'.format(msg=msg)
else:
return msg
def GetFlagMetavar(metavar, flag):
if isinstance(flag.type, arg_parsers.ArgList):
msg = '[{metavar},...]'.format(metavar=metavar)
if flag.type.min_length:
msg = ','.join([metavar]*flag.type.min_length+[msg])
return msg
return metavar
def PositionalDisplayString(arg, markdown=False):
"""Create the display help string for a positional arg.
Args:
arg: argparse.Argument, The argument object to be displayed.
markdown: bool, If true add markdowns.
Returns:
str, The string representation for printing.
"""
msg = arg.metavar or arg.dest.upper()
if markdown:
msg = re.sub(r'(\b[a-zA-Z][-a-zA-Z_0-9]*)',
MARKDOWN_ITALIC + r'\1' + MARKDOWN_ITALIC, msg)
return ' ' + WrapMessageInNargs(msg, arg.nargs)
def FlagDisplayString(arg, brief=False, markdown=False):
"""Create the display help string for a flag arg.
Args:
arg: argparse.Argument, The argument object to be displayed.
brief: bool, If true, only display one version of a flag that has
multiple versions, and do not display the default value.
markdown: bool, If true add markdowns.
Returns:
str, The string representation for printing.
"""
metavar = arg.metavar or arg.dest.upper()
if brief:
long_string = sorted(arg.option_strings)[0]
if arg.nargs == 0:
return long_string
return '{flag} {metavar}'.format(
flag=long_string,
metavar=GetFlagMetavar(metavar, arg))
if arg.nargs == 0:
if markdown:
display_string = ', '.join([MARKDOWN_BOLD + x + MARKDOWN_BOLD
for x in arg.option_strings])
else:
display_string = ', '.join(arg.option_strings)
else:
if markdown:
metavar = re.sub('(\\b[a-zA-Z][-a-zA-Z_0-9]*)',
MARKDOWN_ITALIC + '\\1' + MARKDOWN_ITALIC, metavar)
display_string = ', '.join(
['{bb}{flag}{be} {metavar}'.format(
bb=MARKDOWN_BOLD if markdown else '',
flag=option_string,
be=MARKDOWN_BOLD if markdown else '',
metavar=GetFlagMetavar(metavar, arg))
for option_string in arg.option_strings])
if not arg.required and arg.default:
display_string += '; default="{val}"'.format(val=arg.default)
return display_string
def WrapWithPrefix(prefix, message, indent, length, spacing,
writer=sys.stdout):
"""Helper function that does two-column writing.
If the first column is too long, the second column begins on the next line.
Args:
prefix: str, Text for the first column.
message: str, Text for the second column.
indent: int, Width of the first column.
length: int, Width of both columns, added together.
spacing: str, Space to put on the front of prefix.
writer: file-like, Receiver of the written output.
"""
def W(s):
writer.write(s)
def Wln(s):
W(s + '\n')
# Reformat the message to be of rows of the correct width, which is what's
# left-over from length when you subtract indent. The first line also needs
# to begin with the indent, but that will be taken care of conditionally.
message = ('\n%%%ds' % indent % ' ').join(
textwrap.wrap(message, length - indent))
if len(prefix) > indent - len(spacing) - 2:
# If the prefix is too long to fit in the indent width, start the message
# on a new line after writing the prefix by itself.
Wln('%s%s' % (spacing, prefix))
# The message needs to have the first line indented properly.
W('%%%ds' % indent % ' ')
Wln(message)
else:
# If the prefix fits comfortably within the indent (2 spaces left-over),
# print it out and start the message after adding enough whitespace to make
# up the rest of the indent.
W('%s%s' % (spacing, prefix))
Wln('%%%ds %%s'
% (indent - len(prefix) - len(spacing) - 1)
% (' ', message))
def GenerateUsage(command, argument_interceptor, topic=False):
"""Generate a usage string for a calliope command, group or help topic.
Args:
command: calliope._CommandCommon, The command, group or help topic object
that we're generating usage for.
argument_interceptor: calliope._ArgumentInterceptor, the object that tracks
all of the flags for this command or group.
topic: True if this is a supplementary help topic command.
Returns:
str, The usage string.
"""
command.LoadAllSubElements()
buf = StringIO.StringIO()
command_path = ' '.join(command.GetPath())
command_id = 'topic' if topic else 'command'
usage_parts = []
optional_messages = False
flag_messages = []
if not topic:
# Do positional args first, since flag args taking lists can mess them
# up otherwise.
# Explicitly not sorting here - order matters.
# Make a copy, and we'll pop items off. Once we get to a REMAINDER, that
# goes after the flags so we'll stop and finish later.
positional_args = argument_interceptor.positional_args[:]
while positional_args:
arg = positional_args[0]
if arg.nargs == argparse.REMAINDER:
break
positional_args.pop(0)
usage_parts.append(PositionalDisplayString(arg))
for arg in argument_interceptor.flag_args:
if arg.help == argparse.SUPPRESS:
continue
if not arg.required:
optional_messages = True
continue
# and add it to the usage
msg = FlagDisplayString(arg, brief=True)
flag_messages.append(msg)
usage_parts.extend(sorted(flag_messages))
if optional_messages:
# If there are any optional flags, add a simple message to the usage.
usage_parts.append('[optional flags]')
# positional_args will only be non-empty if we had some REMAINDER left.
for arg in positional_args:
usage_parts.append(PositionalDisplayString(arg))
group_helps = command.GetSubGroupHelps()
command_helps = command.GetSubCommandHelps()
groups = sorted([name for (name, help_info) in group_helps.iteritems()
if command.IsHidden() or not help_info.is_hidden])
commands = sorted([name for (name, help_info) in command_helps.iteritems()
if command.IsHidden() or not help_info.is_hidden])
all_subtypes = []
if groups:
all_subtypes.append('group')
if commands:
all_subtypes.append(command_id)
if groups or commands:
usage_parts.append('<%s>' % ' | '.join(all_subtypes))
usage_msg = ' '.join(usage_parts)
non_option = '{command} '.format(command=command_path)
buf.write(non_option + usage_msg + '\n')
if groups:
WrapWithPrefix('group may be', ' | '.join(
groups), HELP_INDENT, LINE_WIDTH, spacing=' ', writer=buf)
if commands:
WrapWithPrefix('%s may be' % command_id, ' | '.join(
commands), HELP_INDENT, LINE_WIDTH, spacing=' ', writer=buf)
return buf.getvalue()
def ExpandHelpText(command, text):
"""Expand command {...} references in text.
Args:
command: calliope._CommandCommon, The command object that we're helping.
text: str, The text chunk to expand.
Returns:
str, The expanded help text.
"""
if text == command.long_help:
long_help = ''
else:
long_help = ExpandHelpText(command, command.long_help)
path = command.GetPath()
return console_io.LazyFormat(
text or '',
command=' '.join(path),
man_name='_'.join(path),
top_command=path[0],
parent_command=' '.join(path[:-1]),
index=command.short_help,
description=long_help)
def ShortHelpText(command, argument_interceptor):
"""Get a command's short help text.
Args:
command: calliope._CommandCommon, The command object that we're helping.
argument_interceptor: calliope._ArgumentInterceptor, the object that tracks
all of the flags for this command or group.
Returns:
str, The short help text.
"""
command.LoadAllSubElements()
topic = len(command.GetPath()) >= 2 and command.GetPath()[1] == 'topic'
buf = StringIO.StringIO()
required_messages = []
optional_messages = []
# Sorting for consistency and readability.
for arg in (argument_interceptor.flag_args +
argument_interceptor.ancestor_flag_args):
if arg.help == argparse.SUPPRESS:
continue
message = (FlagDisplayString(arg), arg.help or '')
if arg.required:
required_messages.append(message)
else:
optional_messages.append(message)
positional_messages = []
# Explicitly not sorting here - order matters.
for arg in argument_interceptor.positional_args:
positional_messages.append(
(PositionalDisplayString(arg), arg.help or ''))
group_helps = command.GetSubGroupHelps()
command_helps = command.GetSubCommandHelps()
group_messages = [(name, help_info.help_text) for (name, help_info)
in group_helps.iteritems()
if command.IsHidden() or not help_info.is_hidden]
command_messages = [(name, help_info.help_text) for (name, help_info)
in command_helps.iteritems()
if command.IsHidden() or not help_info.is_hidden]
buf.write('Usage: ' + GenerateUsage(command, argument_interceptor, topic) +
'\n')
# Second, print out the long help.
buf.write('\n'.join(textwrap.wrap(ExpandHelpText(command, command.long_help),
LINE_WIDTH)))
buf.write('\n\n')
# Third, print out the short help for everything that can come on
# the command line, grouped into required flags, optional flags,
# sub groups, sub commands, and positional arguments.
# This printing is done by collecting a list of rows. If the row is just
# a string, that means print it without decoration. If the row is a tuple,
# use WrapWithPrefix to print that tuple in aligned columns.
def TextIfExists(title, messages):
if not messages:
return None
textbuf = StringIO.StringIO()
textbuf.write('%s\n' % title)
for (arg, helptxt) in messages:
WrapWithPrefix(arg, helptxt, HELP_INDENT, LINE_WIDTH,
spacing=' ', writer=textbuf)
return textbuf.getvalue()
if topic:
all_messages = [
TextIfExists('topics:', sorted(command_messages)),
]
else:
all_messages = [
TextIfExists('required flags:', sorted(required_messages)),
TextIfExists('optional flags:', sorted(optional_messages)),
TextIfExists('positional arguments:', positional_messages),
TextIfExists('command groups:', sorted(group_messages)),
]
buf.write('\n'.join([msg for msg in all_messages if msg]))
return buf.getvalue()
def ExtractHelpStrings(docstring):
"""Extracts short help and long help from a docstring.
If the docstring contains a blank line (i.e., a line consisting of zero or
more spaces), everything before the first blank line is taken as the short
help string and everything after it is taken as the long help string. The
short help is flowing text with no line breaks, while the long help may
consist of multiple lines, each line beginning with an amount of whitespace
determined by dedenting the docstring.
If the docstring does not contain a blank line, the sequence of words in the
docstring is used as both the short help and the long help.
Corner cases: If the first line of the docstring is empty, everything
following it forms the long help, and the sequence of words of in the long
help (without line breaks) is used as the short help. If the short help
consists of zero or more spaces, None is used instead. If the long help
consists of zero or more spaces, the short help (which might or might not be
None) is used instead.
Args:
docstring: The docstring from which short and long help are to be taken
Returns:
a tuple consisting of a short help string and a long help string
"""
if docstring:
unstripped_doc_lines = docstring.splitlines()
stripped_doc_lines = [s.strip() for s in unstripped_doc_lines]
try:
empty_line_index = stripped_doc_lines.index('')
short_help = ' '.join(stripped_doc_lines[:empty_line_index])
raw_long_help = '\n'.join(unstripped_doc_lines[empty_line_index + 1:])
long_help = textwrap.dedent(raw_long_help).strip()
except ValueError: # no empty line in stripped_doc_lines
short_help = ' '.join(stripped_doc_lines).strip()
long_help = ''
if not short_help: # docstring started with a blank line
short_help = ' '.join(stripped_doc_lines[empty_line_index + 1:]).strip()
# words of long help as flowing text
return (short_help, long_help or short_help)
else:
return ('', '')
| |
# -*- coding: utf-8 -*-
"""In this file we have all the top level commands for the transifex client.
Since we're using a way to automatically list them and execute them, when
adding code to this file you must take care of the following:
* Added functions must begin with 'cmd_' followed by the actual name of the
command being used in the command line (eg cmd_init)
* The description for each function that we display to the user is read from
the func_doc attribute which reads the doc string. So, when adding
docstring to a new function make sure you add an oneliner which is
descriptive and is meant to be seen by the user.
* When including libraries, it's best if you include modules instead of
functions because that way our function resolution will work faster and the
chances of overlapping are minimal
* All functions should use the OptionParser and should have a usage and
descripition field.
"""
import os
import re
import shutil
import sys
try:
import configparser
except ImportError:
import ConfigParser as configparser
from six.moves import input
from txclib import utils, project
from txclib.config import OrderedRawConfigParser
from txclib.exceptions import UnInitializedError
from txclib.parsers import delete_parser, help_parser, parse_csv_option, \
status_parser, pull_parser, set_parser, push_parser, init_parser
from txclib.paths import posix_path
from txclib.log import logger
def cmd_init(argv, path_to_tx):
"""Initialize a new transifex project."""
parser = init_parser()
(options, args) = parser.parse_args(argv)
if len(args) > 1:
parser.error("Too many arguments were provided. Aborting...")
if args:
path_to_tx = args[0]
else:
path_to_tx = os.getcwd()
save = options.save
# if we already have a config file and we are not told to override it
# in the args we have to ask
if os.path.isdir(os.path.join(path_to_tx, ".tx")) and not save:
logger.info("tx: There is already a tx folder!")
if not utils.confirm(
prompt='Do you want to delete it and reinit the project?',
default=False
):
return
# Clean the old settings
# FIXME: take a backup
else:
save = True
rm_dir = os.path.join(path_to_tx, ".tx")
shutil.rmtree(rm_dir)
logger.info("Creating .tx folder...")
os.mkdir(os.path.join(path_to_tx, ".tx"))
default_transifex = "https://www.transifex.com"
transifex_host = options.host or input("Transifex instance [%s]: " %
default_transifex)
if not transifex_host:
transifex_host = default_transifex
if not transifex_host.startswith(('http://', 'https://')):
transifex_host = 'https://' + transifex_host
config_file = os.path.join(path_to_tx, ".tx", "config")
if not os.path.exists(config_file):
# The path to the config file (.tx/config)
logger.info("Creating skeleton...")
# Handle the credentials through transifexrc
config = OrderedRawConfigParser()
config.add_section('main')
config.set('main', 'host', transifex_host)
# Touch the file if it doesn't exist
logger.info("Creating config file...")
fh = open(config_file, 'w')
config.write(fh)
fh.close()
prj = project.Project(path_to_tx)
prj.getset_host_credentials(transifex_host, username=options.user,
password=options.password,
token=options.token, save=save)
prj.save()
logger.info("Done.")
def cmd_set(argv, path_to_tx):
"""Add local or remote files under transifex"""
parser = set_parser()
(options, args) = parser.parse_args(argv)
# Implement options/args checks
# TODO !!!!!!!
if options.local:
try:
expression = args[0]
except IndexError:
parser.error("Please specify an expression.")
if not options.resource:
parser.error("Please specify a resource")
if not options.source_language:
parser.error("Please specify a source language.")
if '<lang>' not in expression:
parser.error("The expression you have provided is not valid.")
if not utils.valid_slug(options.resource):
parser.error("Invalid resource slug. The format is <project_slug>"
".<resource_slug> and the valid characters include"
" [_-\w].")
_auto_local(path_to_tx, options.resource,
source_language=options.source_language,
expression=expression, source_file=options.source_file,
execute=options.execute, regex=False)
if options.execute:
_set_minimum_perc(options.resource, options.minimum_perc,
path_to_tx)
_set_mode(options.resource, options.mode, path_to_tx)
_set_type(options.resource, options.i18n_type, path_to_tx)
return
if options.remote:
try:
url = args[0]
except IndexError:
parser.error("Please specify an remote url")
_auto_remote(path_to_tx, url)
_set_minimum_perc(options.resource, options.minimum_perc, path_to_tx)
_set_mode(options.resource, options.mode, path_to_tx)
return
if options.is_source:
resource = options.resource
if not resource:
parser.error("You must specify a resource name with the "
"-r|--resource flag.")
lang = options.language
if not lang:
parser.error("Please specify a source language.")
if len(args) != 1:
parser.error("Please specify a file.")
if not utils.valid_slug(resource):
parser.error("Invalid resource slug. The format is <project_slug>"
".<resource_slug> and the valid characters include "
"[_-\w].")
file = args[0]
# Calculate relative path
path_to_file = os.path.relpath(file, path_to_tx)
_set_source_file(path_to_tx, resource, options.language, path_to_file)
elif options.resource or options.language:
resource = options.resource
lang = options.language
if len(args) != 1:
parser.error("Please specify a file")
# Calculate relative path
path_to_file = os.path.relpath(args[0], path_to_tx)
try:
_go_to_dir(path_to_tx)
except UnInitializedError as e:
utils.logger.error(e)
return
if not utils.valid_slug(resource):
parser.error("Invalid resource slug. The format is <project_slug>"
".<resource_slug> and the valid characters include "
"[_-\w].")
_set_translation(path_to_tx, resource, lang, path_to_file)
_set_mode(options.resource, options.mode, path_to_tx)
_set_type(options.resource, options.i18n_type, path_to_tx)
_set_minimum_perc(options.resource, options.minimum_perc, path_to_tx)
logger.info("Done.")
return
def _auto_local(path_to_tx, resource, source_language, expression,
execute=False, source_file=None, regex=False):
"""Auto configure local project."""
# The path everything will be relative to
curpath = os.path.abspath(os.curdir)
# Force expr to be a valid regex expr (escaped) but keep <lang> intact
expr_re = utils.regex_from_filefilter(expression, curpath)
expr_rec = re.compile(expr_re)
if not execute:
logger.info("Only printing the commands which will be run if the "
"--execute switch is specified.")
# First, let's construct a dictionary of all matching files.
# Note: Only the last matching file of a language will be stored.
translation_files = {}
for f_path in utils.files_in_project(curpath):
match = expr_rec.match(posix_path(f_path))
if match:
lang = match.group(1)
if lang == source_language and not source_file:
source_file = f_path
else:
translation_files[lang] = f_path
if not source_file:
raise Exception("Could not find a source language file. Please run "
"set --source manually and then re-run this command "
"or provide the source file with the -s flag.")
if execute:
logger.info("Updating source for resource %s ( %s -> %s )." % (
resource, source_language, os.path.relpath(
source_file, path_to_tx)
))
_set_source_file(path_to_tx, resource, source_language,
os.path.relpath(source_file, path_to_tx))
else:
logger.info('\ntx set --source -r %(res)s -l %(lang)s %(file)s\n' % {
'res': resource,
'lang': source_language,
'file': os.path.relpath(source_file, curpath)})
prj = project.Project(path_to_tx)
if execute:
try:
prj.config.get("%s" % resource, "source_file")
except configparser.NoSectionError:
raise Exception("No resource with slug \"%s\" was found.\nRun "
"'tx set --auto-local -r %s \"expression\"' to "
"do the initial configuration." % resource)
# Now let's handle the translation files.
if execute:
logger.info("Updating file expression for resource %s ( %s )." % (
resource, expression))
# Eval file_filter relative to root dir
file_filter = posix_path(
os.path.relpath(os.path.join(curpath, expression), path_to_tx)
)
prj.config.set("%s" % resource, "file_filter", file_filter)
else:
for (lang, f_path) in sorted(translation_files.items()):
logger.info('tx set -r %(res)s -l %(lang)s %(file)s' % {
'res': resource,
'lang': lang,
'file': os.path.relpath(f_path, curpath)})
if execute:
prj.save()
def _auto_remote(path_to_tx, url):
"""Initialize a remote project/resource to the current directory."""
logger.info("Auto configuring local project from remote URL...")
type, vars = utils.parse_tx_url(url)
prj = project.Project(path_to_tx)
username, password = prj.getset_host_credentials(vars['hostname'])
if type.startswith('project'):
logger.info("Getting details for project %s" % vars['project'])
proj_info = utils.get_details(
'project_details',
username, password,
hostname=vars['hostname'],
project=vars['project'])
resources = ['.'.join([vars['project'],
r['slug']]) for r in proj_info['resources']]
logger.info("%s resources found. Configuring..." % len(resources))
elif type == 'release':
logger.info("Getting details for release %s" % vars['release'])
rel_info = utils.get_details(
'release_details',
username, password,
hostname=vars['hostname'],
project=vars['project'],
release=vars['release'])
resources = []
for r in rel_info['resources']:
if 'project' in r:
resources.append('.'.join([r['project']['slug'], r['slug']]))
else:
resources.append('.'.join([vars['project'], r['slug']]))
logger.info("%s resources found. Configuring..." % len(resources))
elif type.startswith('resource'):
logger.info("Getting details for resource %s" % vars['resource'])
resources = ['.'.join([vars['project'], vars['resource']])]
else:
raise Exception("Url '%s' is not recognized." % url)
for resource in resources:
logger.info("Configuring resource %s." % resource)
proj, res = resource.split('.')
res_info = utils.get_details(
'resource_details',
username, password,
hostname=vars['hostname'],
project=proj, resource=res)
try:
source_lang = res_info['source_language_code']
i18n_type = res_info['i18n_type']
except KeyError:
raise Exception("Remote server seems to be running an unsupported "
"version of Transifex. Either update your server "
"software of fallback to a previous version "
"of transifex-client.")
prj.set_remote_resource(
resource=resource,
host=vars['hostname'],
source_lang=source_lang,
i18n_type=i18n_type)
prj.save()
def cmd_push(argv, path_to_tx):
"""Push local files to remote server"""
parser = push_parser()
(options, args) = parser.parse_args(argv)
force_creation = options.force_creation
languages = parse_csv_option(options.languages)
resources = parse_csv_option(options.resources)
skip = options.skip_errors
xliff = options.xliff
prj = project.Project(path_to_tx)
if not (options.push_source or options.push_translations):
parser.error("You need to specify at least one of the -s|--source, "
"-t|--translations flags with the push command.")
prj.push(
force=force_creation, resources=resources, languages=languages,
skip=skip, source=options.push_source,
translations=options.push_translations,
no_interactive=options.no_interactive,
xliff=xliff
)
logger.info("Done.")
def cmd_pull(argv, path_to_tx):
"""Pull files from remote server to local repository"""
parser = pull_parser()
(options, args) = parser.parse_args(argv)
if options.fetchall and options.languages:
parser.error("You can't user a language filter along with the "
"-a|--all option")
languages = parse_csv_option(options.languages)
resources = parse_csv_option(options.resources)
pseudo = options.pseudo
# Should we download as xliff?
xliff = options.xliff
skip = options.skip_errors
minimum_perc = options.minimum_perc or None
try:
_go_to_dir(path_to_tx)
except UnInitializedError as e:
utils.logger.error(e)
return
# instantiate the project.Project
prj = project.Project(path_to_tx)
prj.pull(
languages=languages, resources=resources, overwrite=options.overwrite,
fetchall=options.fetchall, fetchsource=options.fetchsource,
force=options.force, skip=skip, minimum_perc=minimum_perc,
mode=options.mode, pseudo=pseudo, xliff=xliff
)
logger.info("Done.")
def _set_source_file(path_to_tx, resource, lang, path_to_file):
"""Reusable method to set source file."""
proj, res = resource.split('.')
if not proj or not res:
raise Exception("\"%s.%s\" is not a valid resource identifier. "
"It should be in the following format "
"project_slug.resource_slug." %
(proj, res))
if not lang:
raise Exception("You haven't specified a source language.")
try:
_go_to_dir(path_to_tx)
except UnInitializedError as e:
utils.logger.error(e)
return
if not os.path.exists(path_to_file):
raise Exception("tx: File ( %s ) does not exist." %
os.path.join(path_to_tx, path_to_file))
# instantiate the project.Project
prj = project.Project(path_to_tx)
root_dir = os.path.abspath(path_to_tx)
if root_dir not in os.path.normpath(os.path.abspath(path_to_file)):
raise Exception("File must be under the project root directory.")
logger.info("Setting source file for resource %s.%s ( %s -> %s )." % (
proj, res, lang, path_to_file))
path_to_file = os.path.relpath(path_to_file, root_dir)
prj = project.Project(path_to_tx)
# FIXME: Check also if the path to source file already exists.
try:
try:
prj.config.get("%s.%s" % (proj, res), "source_file")
except configparser.NoSectionError:
prj.config.add_section("%s.%s" % (proj, res))
except configparser.NoOptionError:
pass
finally:
prj.config.set(
"%s.%s" % (proj, res), "source_file", posix_path(path_to_file)
)
prj.config.set("%s.%s" % (proj, res), "source_lang", lang)
prj.save()
def _set_translation(path_to_tx, resource, lang, path_to_file):
"""Reusable method to set translation file."""
proj, res = resource.split('.')
if not project or not resource:
raise Exception("\"%s\" is not a valid resource identifier. "
"It should be in the following format "
"project_slug.resource_slug." %
resource)
try:
_go_to_dir(path_to_tx)
except UnInitializedError as e:
utils.logger.error(e)
return
# Warn the user if the file doesn't exist
if not os.path.exists(path_to_file):
logger.info("Warning: File '%s' doesn't exist." % path_to_file)
# instantiate the project.Project
prj = project.Project(path_to_tx)
root_dir = os.path.abspath(path_to_tx)
if root_dir not in os.path.normpath(os.path.abspath(path_to_file)):
raise Exception("File must be under the project root directory.")
if lang == prj.config.get("%s.%s" % (proj, res), "source_lang"):
raise Exception("tx: You cannot set translation file for "
"the source language. Source languages contain "
"the strings which will be translated!")
logger.info("Updating translations for resource %s ( %s -> %s )." % (
resource, lang, path_to_file))
path_to_file = os.path.relpath(path_to_file, root_dir)
prj.config.set(
"%s.%s" % (proj, res), "trans.%s" % lang, posix_path(path_to_file)
)
prj.save()
def cmd_status(argv, path_to_tx):
"""Print status of current project"""
parser = status_parser()
(options, args) = parser.parse_args(argv)
resources = parse_csv_option(options.resources)
prj = project.Project(path_to_tx)
resources = prj.get_chosen_resources(resources)
resources_num = len(resources)
for idx, res in enumerate(resources):
p, r = res.split('.')
logger.info("%s -> %s (%s of %s)" % (p, r, idx + 1, resources_num))
logger.info("Translation Files:")
slang = prj.get_resource_option(res, 'source_lang')
sfile = prj.get_resource_option(res, 'source_file') or "N/A"
lang_map = prj.get_resource_lang_mapping(res)
logger.info(" - %s: %s (%s)" % (utils.color_text(slang, "RED"),
sfile, utils.color_text("source", "YELLOW")))
files = prj.get_resource_files(res)
fkeys = list(files.keys())
fkeys.sort()
for lang in fkeys:
local_lang = lang
if lang in list(lang_map.values()):
local_lang = lang_map.flip[lang]
logger.info(" - %s: %s" % (utils.color_text(local_lang, "RED"),
files[lang]))
logger.info("")
def cmd_help(argv, path_to_tx):
"""List all available commands"""
parser = help_parser()
(options, args) = parser.parse_args(argv)
if len(args) > 1:
parser.error("Multiple arguments received. Exiting...")
# Get all commands
fns = utils.discover_commands()
# Print help for specific command
if len(args) == 1:
try:
fns[argv[0]](['--help'], path_to_tx)
except KeyError:
utils.logger.error("Command %s not found" % argv[0])
# or print summary of all commands
# the code below will only be executed if the KeyError exception is thrown
# becuase in all other cases the function called with --help will exit
# instead of return here
keys = list(fns.keys())
keys.sort()
logger.info("Transifex command line client.\n")
logger.info("Available commands are:")
for key in keys:
logger.info(" %-15s\t%s" % (key, getattr(fns[key], '__doc__')))
logger.info("\nFor more information run %s command --help" % sys.argv[0])
def cmd_delete(argv, path_to_tx):
"""Delete an accessible resource or translation in a remote server."""
parser = delete_parser()
(options, args) = parser.parse_args(argv)
languages = parse_csv_option(options.languages)
resources = parse_csv_option(options.resources)
skip = options.skip_errors
force = options.force_delete
prj = project.Project(path_to_tx)
prj.delete(resources, languages, skip, force)
logger.info("Done.")
def _go_to_dir(path):
"""Change the current working directory to the directory specified as
argument.
Args:
path: The path to chdor to.
Raises:
UnInitializedError, in case the directory has not been initialized.
"""
if path is None:
raise UnInitializedError(
"Directory has not been initialzied. "
"Did you forget to run 'tx init' first?"
)
os.chdir(path)
def _set_minimum_perc(resource, value, path_to_tx):
"""Set the minimum percentage in the .tx/config file."""
args = (resource, 'minimum_perc', value, path_to_tx, 'set_min_perc')
_set_project_option(*args)
def _set_mode(resource, value, path_to_tx):
"""Set the mode in the .tx/config file."""
args = (resource, 'mode', value, path_to_tx, 'set_default_mode')
_set_project_option(*args)
def _set_type(resource, value, path_to_tx):
"""Set the i18n type in the .tx/config file."""
args = (resource, 'type', value, path_to_tx, 'set_i18n_type')
_set_project_option(*args)
def _set_project_option(resource, name, value, path_to_tx, func_name):
"""Save the option to the project config file."""
if value is None:
return
if not resource:
logger.debug("Setting the %s for all resources." % name)
resources = []
else:
logger.debug("Setting the %s for resource %s." % (name, resource))
resources = [resource, ]
prj = project.Project(path_to_tx)
getattr(prj, func_name)(resources, value)
prj.save()
| |
"""Create a conduit from the available information.
Can try to examine './.arcconfig' and '~/.arcrc' if not enough
information is provided.
"""
# =============================================================================
# CONTENTS
# -----------------------------------------------------------------------------
# phlsys_makeconduit
#
# Public Classes:
# InsufficientInfoException
#
# Public Functions:
# add_argparse_arguments
# make_conduit
# obscured_cert
# get_uri_user_cert_explanation
#
# -----------------------------------------------------------------------------
# (this contents block is generated, edits will be lost)
# =============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import difflib
import phlsys_arcconfig
import phlsys_arcrc
import phlsys_conduit
class InsufficientInfoException(Exception):
def __init__(self, message):
super(InsufficientInfoException, self).__init__(message)
def _make_exception(*args):
return InsufficientInfoException("\n" + "\n\n".join(args))
def add_argparse_arguments(parser):
"""Add a 'connection arguments' group to the supplied argparse.parser."""
connection = parser.add_argument_group(
'connection arguments',
'use these optional parameters to override settings present in your\n'
'"~/.arcrc" or ".arcconfig" files')
connection.add_argument(
"--uri",
type=str,
metavar="ADDRESS",
help="address of the phabricator instance to connect to.")
connection.add_argument(
"--user",
type=str,
metavar="NAME",
help="name of the user to connect as.")
connection.add_argument(
"--cert",
type=str,
metavar="HEX",
help="long certificate string of the user to connect as, you can find "
"this string here: "
"http://your.phabricator/settings/panel/conduit/. generally you "
"wouldn't expect to enter this on the command-line and would "
"make an ~/.arcrc file by using '$ arc install-certificate'.")
connection.add_argument(
'--act-as-user',
type=str,
metavar="NAME",
help="name of the user to impersonate (admin only).\n")
def make_conduit(uri=None, user=None, cert=None, act_as_user=None):
uri, user, cert, _ = get_uri_user_cert_explanation(uri, user, cert)
return phlsys_conduit.Conduit(uri, user, cert, act_as_user)
def obscured_cert(cert):
"""Return an obscured version of the supplied 'cert' suitable for display.
:cert: a string of a conduit certificate
:returns: a string of an obscured conduit certificate
"""
return cert[:4] + '...' + cert[-4:]
def get_uri_user_cert_explanation(uri, user, cert):
if uri and user and cert:
explanations = ["all parameters were supplied"]
uri = _fix_uri(explanations, uri)
return uri, user, cert, '\n\n'.join(explanations)
arcrc, arcrc_path = _load_arcrc()
arcconfig_path, arcconfig = _load_arcconfig()
install_arc_url = str(
"http://www.phabricator.com/docs/phabricator/article/"
"Arcanist_User_Guide.html#installing-arcanist")
no_uri = "no uri to a Phabricator instance was specified."
no_user = "no username for the Phabricator instance was specified."
no_cert = "no certificate for the Phabricator instance was specified."
no_arcconfig = (
"couldn't find an .arcconfig, this file should contain "
"the uri for the phabricator instance you wish to connect "
"to.\n"
"we search for it in the current working directory and in "
"the parent directories\n"
"here is an example .arcconfig:\n"
"{\n"
" \"conduit_uri\" : \"https://your.phabricator/\"\n"
"}")
no_arcrc = (
"couldn't find ~/.arcrc, this file should contain "
"usernames and certificates which will allow us to authenticate with "
"Phabricator.\n"
"To generate a valid ~/.arcrc for a particular instance, you may "
"run:\n"
"\n"
"$ arc install-certificate [URI]\n"
"N.B. to install arc:\n" + install_arc_url)
bad_arcrc = (
"can't load .arcrc, it may be invalid json or not permissioned\n"
"path used: " + str(arcrc_path))
bad_arcconfig = (
"can't load .arcconfig, it may be invalid json or not permissioned\n"
"path used: " + str(arcconfig_path))
arcrc_no_default = (
"no default uri was discovered in .arcrc, you may add one like so:\n"
"$ arc set-config default https://your.phabricator/\n"
"N.B. to install arc:\n" + install_arc_url)
arcconfig_no_uri = (
".arcconfig doesn't seem to contain a conduit_uri entry\n"
"path used: " + str(arcconfig_path))
explanations = []
# try to discover conduit uri first
if uri is None:
if not arcconfig_path:
if not arcrc_path:
raise _make_exception(no_uri, no_arcconfig, no_arcrc)
if arcrc is None:
raise _make_exception(no_uri, no_arcconfig, bad_arcrc)
if "config" in arcrc:
uri = arcrc["config"].get("default", None)
if uri is None:
raise _make_exception(no_uri, no_arcconfig, arcrc_no_default)
explanations.append(
"got uri from 'default' entry in arcrc\n"
" path: {0}\n"
" uri: {1}".format(arcrc_path, uri))
else: # if arcconfig_path
if arcconfig is None:
raise _make_exception(no_uri, bad_arcconfig)
uri = arcconfig.get("conduit_uri", None)
if uri is None:
raise _make_exception(no_uri, arcconfig_no_uri)
explanations.append(
"got uri from .arcconfig\n"
" path: {0}\n"
" uri: {1}".format(arcconfig_path, uri))
uri = _fix_uri(explanations, uri)
arcrc_no_entry = (
"no entry for the uri was found in .arcrc, you may add one like so:\n"
"$ arc install-certificate " + uri + "\n"
"N.B. to install arc:\n" + install_arc_url)
# try to discover user
if user is None:
if not arcrc_path:
raise _make_exception(no_user, no_arcrc)
if arcrc is None:
raise _make_exception(no_user, bad_arcrc)
if "hosts" in arcrc:
host = phlsys_arcrc.get_host(arcrc, uri)
if host is None:
raise _make_exception(no_user, arcrc_no_entry)
user = host.get("user", None)
explanations.append(
"got user from uri's entry in .arcrc\n"
" path: {0}\n"
" user: {1}".format(arcrc_path, user))
if cert is None:
cert = host.get("cert", None)
explanations.append(
"got cert from uri's entry in .arcrc\n"
" path: {0}\n"
" cert: {1}".format(arcrc_path, obscured_cert(cert)))
if user is None:
raise _make_exception(no_user, arcrc_no_entry)
if user is None:
raise _make_exception(no_user, arcrc_no_entry)
# try to discover cert
if cert is None:
if not arcrc_path:
raise _make_exception(no_cert, no_arcrc)
if arcrc is None:
raise _make_exception(no_cert, bad_arcrc)
if "hosts" in arcrc:
host = phlsys_arcrc.get_host(arcrc, uri)
if host is None:
raise _make_exception(no_cert, arcrc_no_entry)
cert = host.get("cert", None)
explanations.append(
"got cert from uri's entry in .arcrc\n"
" path: {0}\n"
" cert: {1}".format(arcrc_path, obscured_cert(cert)))
if cert is None:
raise _make_exception(no_cert, arcrc_no_entry)
# make a generic statement if we've missed an error case
if not (uri and user and cert) or arcrc_path is None:
raise Exception("unexpected error determinining uri, user or cert")
return uri, user, cert, '\n\n'.join(explanations)
def _load_arcconfig():
# try to load arcconfig, if we can find it
arcconfig_path = phlsys_arcconfig.find_arcconfig()
arcconfig = None
try:
if arcconfig_path is not None:
arcconfig = phlsys_arcconfig.load(arcconfig_path)
except ValueError:
pass
except EnvironmentError:
pass
return arcconfig_path, arcconfig
def _load_arcrc():
# try to load arcrc, if we can find it
arcrc_path = phlsys_arcrc.find_arcrc()
arcrc = None
try:
if arcrc_path is not None:
arcrc = phlsys_arcrc.load(arcrc_path)
except ValueError:
pass
except EnvironmentError:
pass
return arcrc, arcrc_path
def _fix_uri(explanations, uri):
old_uri = uri
uri = phlsys_conduit.make_conduit_uri(uri)
if uri != old_uri:
diff = list(difflib.Differ().compare([old_uri], [uri]))
diff = [' ' + s.strip() for s in diff]
diff = '\n'.join(diff)
explanations.append("assumed uri to conduit:\n{0}".format(diff))
return uri
# -----------------------------------------------------------------------------
# Copyright (C) 2013-2014 Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------ END-OF-FILE ----------------------------------
| |
import sys
import shutil
import os
import stat
import re
import posixpath
import pkg_resources
import zipfile
import tarfile
from pip.exceptions import InstallationError, BadCommand
from pip.backwardcompat import WindowsError, string_types, raw_input
from pip.locations import site_packages, running_under_virtualenv
from pip.log import logger
__all__ = ['rmtree', 'display_path', 'backup_dir',
'find_command', 'ask', 'Inf',
'normalize_name', 'splitext',
'format_size', 'is_installable_dir',
'is_svn_page', 'file_contents',
'split_leading_dir', 'has_leading_dir',
'make_path_relative', 'normalize_path',
'renames', 'get_terminal_size',
'unzip_file', 'untar_file', 'create_download_cache_folder',
'cache_download', 'unpack_file']
def rmtree(dir, ignore_errors=False):
shutil.rmtree(dir, ignore_errors=ignore_errors,
onerror=rmtree_errorhandler)
def rmtree_errorhandler(func, path, exc_info):
"""On Windows, the files in .svn are read-only, so when rmtree() tries to
remove them, an exception is thrown. We catch that here, remove the
read-only attribute, and hopefully continue without problems."""
exctype, value = exc_info[:2]
# On Python 2.4, it will be OSError number 13
# On all more recent Pythons, it'll be WindowsError number 5
if not ((exctype is WindowsError and value.args[0] == 5) or
(exctype is OSError and value.args[0] == 13)):
raise
# file type should currently be read only
if ((os.stat(path).st_mode & stat.S_IREAD) != stat.S_IREAD):
raise
# convert to read/write
os.chmod(path, stat.S_IWRITE)
# use the original function to repeat the operation
func(path)
def display_path(path):
"""Gives the display value for a given path, making it relative to cwd
if possible."""
path = os.path.normcase(os.path.abspath(path))
if path.startswith(os.getcwd() + os.path.sep):
path = '.' + path[len(os.getcwd()):]
return path
def backup_dir(dir, ext='.bak'):
"""Figure out the name of a directory to back up the given dir to
(adding .bak, .bak2, etc)"""
n = 1
extension = ext
while os.path.exists(dir + extension):
n += 1
extension = ext + str(n)
return dir + extension
def find_command(cmd, paths=None, pathext=None):
"""Searches the PATH for the given command and returns its path"""
if paths is None:
paths = os.environ.get('PATH', '').split(os.pathsep)
if isinstance(paths, string_types):
paths = [paths]
# check if there are funny path extensions for executables, e.g. Windows
if pathext is None:
pathext = get_pathext()
pathext = [ext for ext in pathext.lower().split(os.pathsep)]
# don't use extensions if the command ends with one of them
if os.path.splitext(cmd)[1].lower() in pathext:
pathext = ['']
# check if we find the command on PATH
for path in paths:
# try without extension first
cmd_path = os.path.join(path, cmd)
for ext in pathext:
# then including the extension
cmd_path_ext = cmd_path + ext
if os.path.isfile(cmd_path_ext):
return cmd_path_ext
if os.path.isfile(cmd_path):
return cmd_path
raise BadCommand('Cannot find command %r' % cmd)
def get_pathext(default_pathext=None):
"""Returns the path extensions from environment or a default"""
if default_pathext is None:
default_pathext = os.pathsep.join([ '.COM', '.EXE', '.BAT', '.CMD' ])
pathext = os.environ.get('PATHEXT', default_pathext)
return pathext
def ask(message, options):
"""Ask the message interactively, with the given possible responses"""
while 1:
if os.environ.get('PIP_NO_INPUT'):
raise Exception('No input was expected ($PIP_NO_INPUT set); question: %s' % message)
response = raw_input(message)
response = response.strip().lower()
if response not in options:
print('Your response (%r) was not one of the expected responses: %s' % (
response, ', '.join(options)))
else:
return response
class _Inf(object):
"""I am bigger than everything!"""
def __cmp__(self, a):
if self is a:
return 0
return 1
def __repr__(self):
return 'Inf'
Inf = _Inf()
del _Inf
_normalize_re = re.compile(r'[^a-z]', re.I)
def normalize_name(name):
return _normalize_re.sub('-', name.lower())
def format_size(bytes):
if bytes > 1000*1000:
return '%.1fMb' % (bytes/1000.0/1000)
elif bytes > 10*1000:
return '%iKb' % (bytes/1000)
elif bytes > 1000:
return '%.1fKb' % (bytes/1000.0)
else:
return '%ibytes' % bytes
def is_installable_dir(path):
"""Return True if `path` is a directory containing a setup.py file."""
if not os.path.isdir(path):
return False
setup_py = os.path.join(path, 'setup.py')
if os.path.isfile(setup_py):
return True
return False
def is_svn_page(html):
"""Returns true if the page appears to be the index page of an svn repository"""
return (re.search(r'<title>[^<]*Revision \d+:', html)
and re.search(r'Powered by (?:<a[^>]*?>)?Subversion', html, re.I))
def file_contents(filename):
fp = open(filename, 'rb')
try:
return fp.read().decode('utf-8')
finally:
fp.close()
def split_leading_dir(path):
path = str(path)
path = path.lstrip('/').lstrip('\\')
if '/' in path and (('\\' in path and path.find('/') < path.find('\\'))
or '\\' not in path):
return path.split('/', 1)
elif '\\' in path:
return path.split('\\', 1)
else:
return path, ''
def has_leading_dir(paths):
"""Returns true if all the paths have the same leading path name
(i.e., everything is in one subdirectory in an archive)"""
common_prefix = None
for path in paths:
prefix, rest = split_leading_dir(path)
if not prefix:
return False
elif common_prefix is None:
common_prefix = prefix
elif prefix != common_prefix:
return False
return True
def make_path_relative(path, rel_to):
"""
Make a filename relative, where the filename path, and it is
relative to rel_to
>>> make_relative_path('/usr/share/something/a-file.pth',
... '/usr/share/another-place/src/Directory')
'../../../something/a-file.pth'
>>> make_relative_path('/usr/share/something/a-file.pth',
... '/home/user/src/Directory')
'../../../usr/share/something/a-file.pth'
>>> make_relative_path('/usr/share/a-file.pth', '/usr/share/')
'a-file.pth'
"""
path_filename = os.path.basename(path)
path = os.path.dirname(path)
path = os.path.normpath(os.path.abspath(path))
rel_to = os.path.normpath(os.path.abspath(rel_to))
path_parts = path.strip(os.path.sep).split(os.path.sep)
rel_to_parts = rel_to.strip(os.path.sep).split(os.path.sep)
while path_parts and rel_to_parts and path_parts[0] == rel_to_parts[0]:
path_parts.pop(0)
rel_to_parts.pop(0)
full_parts = ['..']*len(rel_to_parts) + path_parts + [path_filename]
if full_parts == ['']:
return '.' + os.path.sep
return os.path.sep.join(full_parts)
def normalize_path(path):
"""
Convert a path to its canonical, case-normalized, absolute version.
"""
return os.path.normcase(os.path.realpath(path))
def splitext(path):
"""Like os.path.splitext, but take off .tar too"""
base, ext = posixpath.splitext(path)
if base.lower().endswith('.tar'):
ext = base[-4:] + ext
base = base[:-4]
return base, ext
def renames(old, new):
"""Like os.renames(), but handles renaming across devices."""
# Implementation borrowed from os.renames().
head, tail = os.path.split(new)
if head and tail and not os.path.exists(head):
os.makedirs(head)
shutil.move(old, new)
head, tail = os.path.split(old)
if head and tail:
try:
os.removedirs(head)
except OSError:
pass
def is_local(path):
"""
Return True if path is within sys.prefix, if we're running in a virtualenv.
If we're not in a virtualenv, all paths are considered "local."
"""
if not running_under_virtualenv():
return True
return normalize_path(path).startswith(normalize_path(sys.prefix))
def dist_is_local(dist):
"""
Return True if given Distribution object is installed locally
(i.e. within current virtualenv).
Always True if we're not in a virtualenv.
"""
return is_local(dist_location(dist))
def get_installed_distributions(local_only=True, skip=('setuptools', 'pip', 'python')):
"""
Return a list of installed Distribution objects.
If ``local_only`` is True (default), only return installations
local to the current virtualenv, if in a virtualenv.
``skip`` argument is an iterable of lower-case project names to
ignore; defaults to ('setuptools', 'pip', 'python'). [FIXME also
skip virtualenv?]
"""
if local_only:
local_test = dist_is_local
else:
local_test = lambda d: True
return [d for d in pkg_resources.working_set if local_test(d) and d.key not in skip]
def egg_link_path(dist):
"""
Return the path where we'd expect to find a .egg-link file for
this distribution. (There doesn't seem to be any metadata in the
Distribution object for a develop egg that points back to its
.egg-link and easy-install.pth files).
This won't find a globally-installed develop egg if we're in a
virtualenv.
"""
return os.path.join(site_packages, dist.project_name) + '.egg-link'
def dist_location(dist):
"""
Get the site-packages location of this distribution. Generally
this is dist.location, except in the case of develop-installed
packages, where dist.location is the source code location, and we
want to know where the egg-link file is.
"""
egg_link = egg_link_path(dist)
if os.path.exists(egg_link):
return egg_link
return dist.location
def get_terminal_size():
"""Returns a tuple (x, y) representing the width(x) and the height(x)
in characters of the terminal window."""
def ioctl_GWINSZ(fd):
try:
import fcntl
import termios
import struct
cr = struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ,
'1234'))
except:
return None
if cr == (0, 0):
return None
if cr == (0, 0):
return None
return cr
cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
if not cr:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = ioctl_GWINSZ(fd)
os.close(fd)
except:
pass
if not cr:
cr = (os.environ.get('LINES', 25), os.environ.get('COLUMNS', 80))
return int(cr[1]), int(cr[0])
def unzip_file(filename, location, flatten=True):
"""Unzip the file (zip file located at filename) to the destination
location"""
if not os.path.exists(location):
os.makedirs(location)
zipfp = open(filename, 'rb')
try:
zip = zipfile.ZipFile(zipfp)
leading = has_leading_dir(zip.namelist()) and flatten
for name in zip.namelist():
data = zip.read(name)
fn = name
if leading:
fn = split_leading_dir(name)[1]
fn = os.path.join(location, fn)
dir = os.path.dirname(fn)
if not os.path.exists(dir):
os.makedirs(dir)
if fn.endswith('/') or fn.endswith('\\'):
# A directory
if not os.path.exists(fn):
os.makedirs(fn)
else:
fp = open(fn, 'wb')
try:
fp.write(data)
finally:
fp.close()
finally:
zipfp.close()
def untar_file(filename, location):
"""Untar the file (tar file located at filename) to the destination location"""
if not os.path.exists(location):
os.makedirs(location)
if filename.lower().endswith('.gz') or filename.lower().endswith('.tgz'):
mode = 'r:gz'
elif filename.lower().endswith('.bz2') or filename.lower().endswith('.tbz'):
mode = 'r:bz2'
elif filename.lower().endswith('.tar'):
mode = 'r'
else:
logger.warn('Cannot determine compression type for file %s' % filename)
mode = 'r:*'
tar = tarfile.open(filename, mode)
try:
# note: python<=2.5 doesnt seem to know about pax headers, filter them
leading = has_leading_dir([
member.name for member in tar.getmembers()
if member.name != 'pax_global_header'
])
for member in tar.getmembers():
fn = member.name
if fn == 'pax_global_header':
continue
if leading:
fn = split_leading_dir(fn)[1]
path = os.path.join(location, fn)
if member.isdir():
if not os.path.exists(path):
os.makedirs(path)
else:
try:
fp = tar.extractfile(member)
except (KeyError, AttributeError):
e = sys.exc_info()[1]
# Some corrupt tar files seem to produce this
# (specifically bad symlinks)
logger.warn(
'In the tar file %s the member %s is invalid: %s'
% (filename, member.name, e))
continue
if not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
destfp = open(path, 'wb')
try:
shutil.copyfileobj(fp, destfp)
finally:
destfp.close()
fp.close()
finally:
tar.close()
def create_download_cache_folder(folder):
logger.indent -= 2
logger.notify('Creating supposed download cache at %s' % folder)
logger.indent += 2
os.makedirs(folder)
def cache_download(target_file, temp_location, content_type):
logger.notify('Storing download in cache at %s' % display_path(target_file))
shutil.copyfile(temp_location, target_file)
fp = open(target_file+'.content-type', 'w')
fp.write(content_type)
fp.close()
os.unlink(temp_location)
def unpack_file(filename, location, content_type, link):
if (content_type == 'application/zip'
or filename.endswith('.zip')
or filename.endswith('.pybundle')
or zipfile.is_zipfile(filename)):
unzip_file(filename, location, flatten=not filename.endswith('.pybundle'))
elif (content_type == 'application/x-gzip'
or tarfile.is_tarfile(filename)
or splitext(filename)[1].lower() in ('.tar', '.tar.gz', '.tar.bz2', '.tgz', '.tbz')):
untar_file(filename, location)
elif (content_type and content_type.startswith('text/html')
and is_svn_page(file_contents(filename))):
# We don't really care about this
from pip.vcs.subversion import Subversion
Subversion('svn+' + link.url).unpack(location)
else:
## FIXME: handle?
## FIXME: magic signatures?
logger.fatal('Cannot unpack file %s (downloaded from %s, content-type: %s); cannot detect archive format'
% (filename, location, content_type))
raise InstallationError('Cannot determine archive format of %s' % location)
| |
# Some code has been reused and modified from the original by Mathieu Fenniak:
# Parameters management in Flate and LZW algorithms, asciiHexDecode and ascii85Decode
#
# Copyright (c) 2006, Mathieu Fenniak
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# peepdf is a tool to analyse and modify PDF files
# http://peepdf.eternal-todo.com
# By Jose Miguel Esparza <jesparza AT eternal-todo.com>
#
# Copyright (C) 2011 Jose Miguel Esparza
#
# This file is part of peepdf.
#
# peepdf is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# peepdf is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with peepdf. If not, see <http://www.gnu.org/licenses/>.
#
'''
Module to manage encoding/decoding in PDF files
'''
import sys, zlib, lzw, struct
from PDFUtils import getNumsFromBytes, getBytesFromBits, getBitsFromNum
from ccitt import CCITTFax
def decodeStream(stream, filter, parameters = {}):
'''
Decode the given stream
@param stream: Stream to be decoded (string)
@param filter: Filter to apply to decode the stream
@param parameters: List of PDFObjects containing the parameters for the filter
@return: A tuple (status,statusContent), where statusContent is the decoded stream in case status = 0 or an error in case status = -1
'''
if filter == '/ASCIIHexDecode' or filter == 'AHx':
ret = asciiHexDecode(stream)
elif filter == '/ASCII85Decode' or filter == 'A85':
ret = ascii85Decode(stream)
elif filter == '/LZWDecode' or filter == 'LZW':
ret = lzwDecode(stream, parameters)
elif filter == '/FlateDecode' or filter == 'Fl':
ret = flateDecode(stream, parameters)
elif filter == '/RunLengthDecode' or filter == 'RL':
ret = runLengthDecode(stream)
elif filter == '/CCITTFaxDecode' or filter == 'CCF':
ret = ccittFaxDecode(stream, parameters)
elif filter == '/JBIG2Decode':
ret = jbig2Decode(stream, parameters)
elif filter == '/DCTDecode' or filter == 'DCT':
ret = dctDecode(stream, parameters)
elif filter == '/JPXDecode':
ret = jpxDecode(stream)
elif filter == '/Crypt':
ret = crypt(stream, parameters)
return ret
def encodeStream(stream, filter, parameters = {}):
'''
Encode the given stream
@param stream: Stream to be decoded (string)
@param filter: Filter to apply to decode the stream
@param parameters: List of PDFObjects containing the parameters for the filter
@return: A tuple (status,statusContent), where statusContent is the encoded stream in case status = 0 or an error in case status = -1
'''
if filter == '/ASCIIHexDecode':
ret = asciiHexEncode(stream)
elif filter == '/ASCII85Decode':
ret = ascii85Encode(stream)
elif filter == '/LZWDecode':
ret = lzwEncode(stream, parameters)
elif filter == '/FlateDecode':
ret = flateEncode(stream, parameters)
elif filter == '/RunLengthDecode':
ret = runLengthEncode(stream)
elif filter == '/CCITTFaxDecode':
ret = ccittFaxEncode(stream, parameters)
elif filter == '/JBIG2Decode':
ret = jbig2Encode(stream, parameters)
elif filter == '/DCTDecode':
ret = dctEncode(stream, parameters)
elif filter == '/JPXDecode':
ret = jpxEncode(stream)
elif filter == '/Crypt':
ret = crypt(stream, parameters)
return ret
'''
The ascii85Decode code is part of pdfminer (http://pypi.python.org/pypi/pdfminer/)
Copyright (c) 2004-2010 Yusuke Shinyama <yusuke at cs dot nyu dot edu>
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
In ASCII85 encoding, every four bytes are encoded with five ASCII
letters, using 85 different types of characters (as 256**4 < 85**5).
When the length of the original bytes is not a multiple of 4, a special
rule is used for round up.
The Adobe's ASCII85 implementation is slightly different from
its original in handling the last characters.
The sample string is taken from:
http://en.wikipedia.org/w/index.php?title=Ascii85
>>> ascii85decode('9jqo^BlbD-BleB1DJ+*+F(f,q')
'Man is distinguished'
>>> ascii85decode('E,9)oF*2M7/c~>')
'pleasure.'
"""
'''
def ascii85Decode(stream):
'''
Method to decode streams using ASCII85
@param stream: A PDF stream
@return: A tuple (status,statusContent), where statusContent is the decoded PDF stream in case status = 0 or an error in case status = -1
'''
n = b = 0
decodedStream = ''
try:
for c in stream:
if '!' <= c and c <= 'u':
n += 1
b = b*85+(ord(c)-33)
if n == 5:
decodedStream += struct.pack('>L',b)
n = b = 0
elif c == 'z':
assert n == 0
decodedStream += '\0\0\0\0'
elif c == '~':
if n:
for _ in range(5-n):
b = b*85+84
decodedStream += struct.pack('>L',b)[:n-1]
break
except:
return (-1,'Unspecified error')
return (0,decodedStream)
def ascii85Encode(stream):
'''
Method to encode streams using ASCII85 (NOT SUPPORTED YET)
@param stream: A PDF stream
@return: A tuple (status,statusContent), where statusContent is the encoded PDF stream in case status = 0 or an error in case status = -1
'''
encodedStream = ''
return (-1,'Ascii85Encode not supported yet')
def asciiHexDecode(stream):
'''
Method to decode streams using hexadecimal encoding
@param stream: A PDF stream
@return: A tuple (status,statusContent), where statusContent is the decoded PDF stream in case status = 0 or an error in case status = -1
'''
eod = '>'
decodedStream = ''
char = ''
index = 0
while index < len(stream):
c = stream[index]
if c == eod:
if len(decodedStream) % 2 != 0:
char += '0'
try:
decodedStream += chr(int(char, base=16))
except:
return (-1,'Error in hexadecimal conversion')
break
elif c.isspace():
index += 1
continue
char += c
if len(char) == 2:
try:
decodedStream += chr(int(char, base=16))
except:
return (-1,'Error in hexadecimal conversion')
char = ''
index += 1
return (0,decodedStream)
def asciiHexEncode(stream):
'''
Method to encode streams using hexadecimal encoding
@param stream: A PDF stream
@return: A tuple (status,statusContent), where statusContent is the encoded PDF stream in case status = 0 or an error in case status = -1
'''
try:
encodedStream = stream.encode('hex')
except:
return (-1,'Error in hexadecimal conversion')
return (0,encodedStream)
def flateDecode(stream, parameters):
'''
Method to decode streams using the Flate algorithm
@param stream: A PDF stream
@return: A tuple (status,statusContent), where statusContent is the decoded PDF stream in case status = 0 or an error in case status = -1
'''
decodedStream = ''
try:
decodedStream = zlib.decompress(stream)
except:
return (-1,'Error decompressing string')
if parameters == None or parameters == {}:
return (0,decodedStream)
else:
if parameters.has_key('/Predictor'):
predictor = parameters['/Predictor'].getRawValue()
else:
predictor = 1
# Columns = number of samples per row
if parameters.has_key('/Columns'):
columns = parameters['/Columns'].getRawValue()
else:
columns = 1
# Colors = number of components per sample
if parameters.has_key('/Colors'):
colors = parameters['/Colors'].getRawValue()
if colors < 1:
colors = 1
else:
colors = 1
# BitsPerComponent: number of bits per color component
if parameters.has_key('/BitsPerComponent'):
bits = parameters['/BitsPerComponent'].getRawValue()
if bits not in [1,2,4,8,16]:
bits = 8
else:
bits = 8
if predictor != None and predictor != 1:
ret = post_prediction(decodedStream, predictor, columns, colors, bits)
return ret
else:
return (0,decodedStream)
def flateEncode(stream, parameters):
'''
Method to encode streams using the Flate algorithm
@param stream: A PDF stream
@return: A tuple (status,statusContent), where statusContent is the encoded PDF stream in case status = 0 or an error in case status = -1
'''
encodedStream = ''
if parameters == None or parameters == {}:
try:
return (0,zlib.compress(stream))
except:
return (-1,'Error compressing string')
else:
if parameters.has_key('/Predictor'):
predictor = parameters['/Predictor'].getRawValue()
else:
predictor = 1
# Columns = number of samples per row
if parameters.has_key('/Columns'):
columns = parameters['/Columns'].getRawValue()
else:
columns = 1
# Colors = number of components per sample
if parameters.has_key('/Colors'):
colors = parameters['/Colors'].getRawValue()
if colors < 1:
colors = 1
else:
colors = 1
# BitsPerComponent: number of bits per color component
if parameters.has_key('/BitsPerComponent'):
bits = parameters['/BitsPerComponent'].getRawValue()
if bits not in [1,2,4,8,16]:
bits = 8
else:
bits = 8
if predictor != None and predictor != 1:
ret = pre_prediction(stream, predictor, columns, colors, bits)
if ret[0] == -1:
return ret
output = ret[1]
else:
output = stream
try:
return (0,zlib.compress(output))
except:
return (-1,'Error compressing string')
def lzwDecode(stream, parameters):
'''
Method to decode streams using the LZW algorithm
@param stream: A PDF stream
@return: A tuple (status,statusContent), where statusContent is the decoded PDF stream in case status = 0 or an error in case status = -1
'''
decodedStream = ''
try:
decodedStream = lzw.lzwdecode(stream)
except:
return (-1,'Error decompressing string')
if parameters == None or parameters == {}:
return (0,decodedStream)
else:
if parameters.has_key('/Predictor'):
predictor = parameters['/Predictor'].getRawValue()
else:
predictor = 1
# Columns = number of samples per row
if parameters.has_key('/Columns'):
columns = parameters['/Columns'].getRawValue()
else:
columns = 1
# Colors = number of components per sample
if parameters.has_key('/Colors'):
colors = parameters['/Colors'].getRawValue()
if colors < 1:
colors = 1
else:
colors = 1
# BitsPerComponent: number of bits per color component
if parameters.has_key('/BitsPerComponent'):
bits = parameters['/BitsPerComponent'].getRawValue()
if bits not in [1,2,4,8,16]:
bits = 8
else:
bits = 8
if parameters.has_key('/EarlyChange'):
earlyChange = parameters['/EarlyChange'].getRawValue()
else:
earlyChange = 1
if predictor != None and predictor != 1:
ret = post_prediction(decodedStream, predictor, columns, colors, bits)
return ret
else:
return (0,decodedStream)
def lzwEncode(stream, parameters):
'''
Method to encode streams using the LZW algorithm
@param stream: A PDF stream
@return: A tuple (status,statusContent), where statusContent is the encoded PDF stream in case status = 0 or an error in case status = -1
'''
encodedStream = ''
if parameters == None or parameters == {}:
try:
generator = lzw.compress(stream)
for c in generator:
encodedStream += c
return (0,encodedStream)
except:
return (-1,'Error compressing string')
else:
if parameters.has_key('/Predictor'):
predictor = parameters['/Predictor'].getRawValue()
else:
predictor = 1
# Columns = number of samples per row
if parameters.has_key('/Columns'):
columns = parameters['/Columns'].getRawValue()
else:
columns = 1
# Colors = number of components per sample
if parameters.has_key('/Colors'):
colors = parameters['/Colors'].getRawValue()
if colors < 1:
colors = 1
else:
colors = 1
# BitsPerComponent: number of bits per color component
if parameters.has_key('/BitsPerComponent'):
bits = parameters['/BitsPerComponent'].getRawValue()
if bits not in [1,2,4,8,16]:
bits = 8
else:
bits = 8
if parameters.has_key('/EarlyChange'):
earlyChange = parameters['/EarlyChange'].getRawValue()
else:
earlyChange = 1
if predictor != None and predictor != 1:
ret = pre_prediction(stream, predictor, columns, colors, bits)
if ret[0] == -1:
return ret
output = ret[1]
else:
output = stream
try:
generator = lzw.compress(output)
for c in generator:
encodedStream += c
return (0,encodedStream)
except:
return (-1,'Error decompressing string')
def pre_prediction(stream, predictor, columns, colors, bits):
'''
Predictor function to make the stream more predictable and improve compression (PDF Specification)
@param stream: The stream to be modified
@param predictor: The type of predictor to apply
@param columns: Number of samples per row
@param colors: Number of colors per sample
@param bits: Number of bits per color
@return: A tuple (status,statusContent), where statusContent is the modified stream in case status = 0 or an error in case status = -1
'''
output = ''
#TODO: TIFF and more PNG predictions
# PNG prediction
if predictor >= 10 and predictor <= 15:
# PNG prediction can vary from row to row
for row in xrange(len(stream) / columns):
rowdata = [ord(x) for x in stream[(row*columns):((row+1)*columns)]]
filterByte = predictor - 10
rowdata = [filterByte]+rowdata
if filterByte == 0:
pass
elif filterByte == 1:
for i in range(len(rowdata)-1,1,-1):
if rowdata[i] < rowdata[i-1]:
rowdata[i] = rowdata[i] + 256 - rowdata[i-1]
else:
rowdata[i] = rowdata[i] - rowdata[i-1]
elif filterByte == 2:
(-1,'Unsupported predictor')
else:
return (-1,'Unsupported predictor')
output += (''.join([chr(x) for x in rowdata]))
return (0,output)
else:
return (-1,'Unsupported predictor')
def post_prediction(decodedStream, predictor, columns, colors, bits):
'''
Predictor function to obtain the real stream, removing the prediction (PDF Specification)
@param decodedStream: The decoded stream to be modified
@param predictor: The type of predictor to apply
@param columns: Number of samples per row
@param colors: Number of colors per sample
@param bits: Number of bits per color
@return: A tuple (status,statusContent), where statusContent is the modified decoded stream in case status = 0 or an error in case status = -1
'''
output = ''
bytesPerRow = (colors * bits * columns + 7) / 8
# TIFF - 2
# http://www.gnupdf.org/PNG_and_TIFF_Predictors_Filter#TIFF
if predictor == 2:
numRows = len(decodedStream) / bytesPerRow
bitmask = 2 ** bits - 1
outputBitsStream = ''
for rowIndex in range(numRows):
row = decodedStream[rowIndex*bytesPerRow:rowIndex*bytesPerRow+bytesPerRow]
ret,colorNums = getNumsFromBytes(row, bits)
if ret == -1:
return (ret,colorNums)
pixel = [0 for x in range(colors)]
for i in range(columns):
for j in range(colors):
diffPixel = colorNums[i+j]
pixel[j] = (pixel[j] + diffPixel) & bitmask
ret, outputBits = getBitsFromNum(pixel[j],bits)
if ret == -1:
return (ret,outputBits)
outputBitsStream += outputBits
output = getBytesFromBits(outputBitsStream)
return output
# PNG prediction
# http://www.libpng.org/pub/png/spec/1.2/PNG-Filters.html
# http://www.gnupdf.org/PNG_and_TIFF_Predictors_Filter#TIFF
elif predictor >= 10 and predictor <= 15:
bytesPerRow += 1
numRows = (len(decodedStream) + bytesPerRow -1) / bytesPerRow
numSamplesPerRow = columns + 1
bytesPerSample = (colors * bits + 7) / 8
upRowdata = (0,) * numSamplesPerRow
for row in xrange(numRows):
rowdata = [ord(x) for x in decodedStream[(row*bytesPerRow):((row+1)*bytesPerRow)]]
# PNG prediction can vary from row to row
filterByte = rowdata[0]
rowdata[0] = 0
if filterByte == 0:
# None
pass
elif filterByte == 1:
# Sub - 11
for i in range(1, numSamplesPerRow):
if i < bytesPerSample:
prevSample = 0
else:
prevSample = rowdata[i-bytesPerSample]
rowdata[i] = (rowdata[i] + prevSample) % 256
elif filterByte == 2:
# Up - 12
for i in range(1, numSamplesPerRow):
upSample = upRowdata[i]
rowdata[i] = (rowdata[i] + upSample) % 256
elif filterByte == 3:
# Average - 13
for i in range(1, numSamplesPerRow):
upSample = upRowdata[i]
if i < bytesPerSample:
prevSample = 0
else:
prevSample = rowdata[i-bytesPerSample]
rowdata[i] = (rowdata[i] + ((prevSample+upSample)/2)) % 256
elif filterByte == 4:
# Paeth - 14
for i in range(1, numSamplesPerRow):
upSample = upRowdata[i]
if i < bytesPerSample:
prevSample = 0
upPrevSample = 0
else:
prevSample = rowdata[i-bytesPerSample]
upPrevSample = upRowdata[i-bytesPerSample]
p = prevSample + upSample - upPrevSample
pa = abs(p - prevSample)
pb = abs(p - upSample)
pc = abs(p - upPrevSample)
if pa <= pb and pa <= pc:
nearest = prevSample
elif pb <= pc:
nearest = upSample
else:
nearest = upPrevSample
rowdata[i] = (rowdata[i] + nearest) % 256
else:
# Optimum - 15
#return (-1,'Unsupported predictor')
pass
upRowdata = rowdata
output += (''.join([chr(x) for x in rowdata[1:]]))
return (0,output)
else:
return (-1,'Wrong value for predictor')
def runLengthDecode(stream):
'''
Method to decode streams using the Run-Length algorithm
@param stream: A PDF stream
@return: A tuple (status,statusContent), where statusContent is the decoded PDF stream in case status = 0 or an error in case status = -1
'''
decodedStream = ''
index = 0
try:
while index < len(stream):
length = ord(stream[index])
if length >= 0 and length < 128:
decodedStream += stream[index+1:index+length+2]
index += length+2
elif length > 128 and length < 256:
decodedStream += stream[index+1] * (257 - length)
index += 2
else:
break
except:
return (-1,'Error decoding string')
return (0,decodedStream)
def runLengthEncode(stream):
'''
Method to encode streams using the Run-Length algorithm (NOT IMPLEMENTED YET)
@param stream: A PDF stream
@return: A tuple (status,statusContent), where statusContent is the encoded PDF stream in case status = 0 or an error in case status = -1
'''
encodedStream = ''
return (-1,'RunLengthEncode not supported yet')
def ccittFaxDecode(stream, parameters):
'''
Method to decode streams using the CCITT facsimile standard (NOT IMPLEMENTED YET)
@param stream: A PDF stream
@return: A tuple (status,statusContent), where statusContent is the decoded PDF stream in case status = 0 or an error in case status = -1
'''
decodedStream = ''
if parameters == None or parameters == {}:
try:
decodedStream = CCITTFax().decode(stream)
return (0, decodedStream)
except:
return (-1,'Error decompressing string')
else:
# K = A code identifying the encoding scheme used
if parameters.has_key('/K'):
k = parameters['/K'].getRawValue()
if type(k) != int:
k = 0
else:
if k != 0:
# Only supported "Group 3, 1-D" encoding (Pure one-dimensional encoding)
return (-1,'CCITT encoding scheme not supported')
else:
k = 0
# EndOfLine = A flag indicating whether end-of-line bit patterns are required to be present in the encoding.
if parameters.has_key('/EndOfLine'):
eol = parameters['/EndOfLine'].getRawValue()
if eol == 'true':
eol = True
else:
eol = False
else:
eol = False
# EncodedByteAlign = A flag indicating whether the filter expects extra 0 bits before each encoded line so that the line begins on a byte boundary
if parameters.has_key('/EncodedByteAlign'):
byteAlign = parameters['/EncodedByteAlign'].getRawValue()
if byteAlign == 'true':
byteAlign = True
else:
byteAlign = False
else:
byteAlign = False
# Columns = The width of the image in pixels.
if parameters.has_key('/Columns'):
columns = parameters['/Columns'].getRawValue()
if type(columns) != int:
columns = 1728
else:
columns = 1728
# Rows = The height of the image in scan lines.
if parameters.has_key('/Rows'):
rows = parameters['/Rows'].getRawValue()
if type(rows) != int:
rows = 0
else:
rows = 0
# EndOfBlock = number of samples per row
if parameters.has_key('/EndOfBlock'):
eob = parameters['/EndOfBlock'].getRawValue()
if eob == 'false':
eob = False
else:
eob = True
else:
eob = True
# BlackIs1 = A flag indicating whether 1 bits are to be interpreted as black pixels and 0 bits as white pixels
if parameters.has_key('/BlackIs1'):
blackIs1 = parameters['/BlackIs1'].getRawValue()
if blackIs1 == 'true':
blackIs1 = True
else:
blackIs1 = False
else:
blackIs1 = False
# DamagedRowsBeforeError = The number of damaged rows of data to be tolerated before an error occurs
if parameters.has_key('/DamagedRowsBeforeError'):
damagedRowsBeforeError = parameters['/DamagedRowsBeforeError'].getRawValue()
else:
damagedRowsBeforeError = 0
try:
decodedStream = CCITTFax().decode(stream, k, eol, byteAlign, columns, rows, eob, blackIs1, damagedRowsBeforeError)
return (0, decodedStream)
except:
return (-1,'Error decompressing string')
def ccittFaxEncode(stream, parameters):
'''
Method to encode streams using the CCITT facsimile standard (NOT IMPLEMENTED YET)
@param stream: A PDF stream
@return: A tuple (status,statusContent), where statusContent is the encoded PDF stream in case status = 0 or an error in case status = -1
'''
encodedStream = ''
return (-1,'CcittFaxEncode not supported yet')
def crypt(stream, parameters):
'''
Method to encrypt streams using a PDF security handler (NOT IMPLEMENTED YET)
@param stream: A PDF stream
@return: A tuple (status,statusContent), where statusContent is the encrypted PDF stream in case status = 0 or an error in case status = -1
'''
decodedStream = ''
return (-1,'Crypt not supported yet')
def decrypt(stream, parameters):
'''
Method to decrypt streams using a PDF security handler (NOT IMPLEMENTED YET)
@param stream: A PDF stream
@return: A tuple (status,statusContent), where statusContent is the decrypted PDF stream in case status = 0 or an error in case status = -1
'''
encodedStream = ''
return (-1,'Decrypt not supported yet')
def dctDecode(stream, parameters):
'''
Method to decode streams using a DCT technique based on the JPEG standard (NOT IMPLEMENTED YET)
@param stream: A PDF stream
@return: A tuple (status,statusContent), where statusContent is the decoded PDF stream in case status = 0 or an error in case status = -1
'''
decodedStream = ''
return (-1,'DctDecode not supported yet')
def dctEncode(stream, parameters):
'''
Method to encode streams using a DCT technique based on the JPEG standard (NOT IMPLEMENTED YET)
@param stream: A PDF stream
@return: A tuple (status,statusContent), where statusContent is the encoded PDF stream in case status = 0 or an error in case status = -1
'''
encodedStream = ''
return (-1,'DctEncode not supported yet')
def jbig2Decode(stream, parameters):
'''
Method to decode streams using the JBIG2 standard (NOT IMPLEMENTED YET)
@param stream: A PDF stream
@return: A tuple (status,statusContent), where statusContent is the decoded PDF stream in case status = 0 or an error in case status = -1
'''
decodedStream = ''
return (-1,'Jbig2Decode not supported yet')
def jbig2Encode(stream, parameters):
'''
Method to encode streams using the JBIG2 standard (NOT IMPLEMENTED YET)
@param stream: A PDF stream
@return: A tuple (status,statusContent), where statusContent is the encoded PDF stream in case status = 0 or an error in case status = -1
'''
encodedStream = ''
return (-1,'Jbig2Encode not supported yet')
def jpxDecode(stream):
'''
Method to decode streams using the JPEG2000 standard (NOT IMPLEMENTED YET)
@param stream: A PDF stream
@return: A tuple (status,statusContent), where statusContent is the decoded PDF stream in case status = 0 or an error in case status = -1
'''
decodedStream = ''
return (-1,'JpxDecode not supported yet')
def jpxEncode(stream):
'''
Method to encode streams using the JPEG2000 standard (NOT IMPLEMENTED YET)
@param stream: A PDF stream
@return: A tuple (status,statusContent), where statusContent is the encoded PDF stream in case status = 0 or an error in case status = -1
'''
encodedStream = ''
return (-1,'JpxEncode not supported yet')
| |
# coding: utf-8
import nipype.pipeline.engine as pe
import nipype.interfaces.utility as niu
import nipype.interfaces.fsl as fsl
import os
def create_dmri_preprocessing(name='dMRI_preprocessing', use_fieldmap=True, fieldmap_registration=False):
"""Creates a workflow that chains the necessary pipelines to
correct for motion, eddy currents, and, if selected, susceptibility
artifacts in EPI dMRI sequences.
.. warning::
IMPORTANT NOTICE: this workflow rotates the b-vectors, so please be adviced
that not all the dicom converters ensure the consistency between the resulting
nifti orientation and the b matrix table (e.g. dcm2nii checks it).
Example
-------
>>> nipype_dmri_preprocess = create_dmri_preprocessing('nipype_dmri_prep')
>>> nipype_dmri_preprocess.inputs.inputnode.in_file = 'diffusion.nii'
>>> nipype_dmri_preprocess.inputs.inputnode.in_bvec = 'diffusion.bvec'
>>> nipype_dmri_preprocess.inputs.inputnode.ref_num = 0
>>> nipype_dmri_preprocess.inputs.inputnode.fieldmap_mag = 'magnitude.nii'
>>> nipype_dmri_preprocess.inputs.inputnode.fieldmap_pha = 'phase.nii'
>>> nipype_dmri_preprocess.inputs.inputnode.te_diff = 2.46
>>> nipype_dmri_preprocess.inputs.inputnode.epi_echospacing = 0.77
>>> nipype_dmri_preprocess.inputs.inputnode.epi_rev_encoding = False
>>> nipype_dmri_preprocess.inputs.inputnode.pi_accel_factor = True
>>> nipype_dmri_preprocess.run() # doctest: +SKIP
Inputs::
inputnode.in_file - The diffusion data
inputnode.in_bvec - The b-matrix file, in FSL format and consistent with the in_file orientation
inputnode.ref_num - The reference volume (a b=0 volume in dMRI)
inputnode.fieldmap_mag - The magnitude of the fieldmap
inputnode.fieldmap_pha - The phase difference of the fieldmap
inputnode.te_diff - TE increment used (in msec.) on the fieldmap acquisition (generally 2.46ms for 3T scanners)
inputnode.epi_echospacing - The EPI EchoSpacing parameter (in msec.)
inputnode.epi_rev_encoding - True if reverse encoding was used (generally False)
inputnode.pi_accel_factor - Parallel imaging factor (aka GRAPPA acceleration factor)
inputnode.vsm_sigma - Sigma (in mm.) of the gaussian kernel used for in-slice smoothing of the deformation field (voxel shift map, vsm)
Outputs::
outputnode.dmri_corrected
outputnode.bvec_rotated
Optional arguments::
use_fieldmap - True if there are fieldmap files that should be used (default True)
fieldmap_registration - True if registration to fieldmap should be performed (default False)
"""
pipeline = pe.Workflow(name=name)
inputnode = pe.Node(niu.IdentityInterface(
fields=['in_file', 'in_bvec', 'ref_num', 'fieldmap_mag',
'fieldmap_pha', 'te_diff', 'epi_echospacing',
'epi_rev_encoding', 'pi_accel_factor', 'vsm_sigma']),
name='inputnode')
outputnode = pe.Node(niu.IdentityInterface(
fields=['dmri_corrected', 'bvec_rotated']),
name='outputnode')
motion = create_motion_correct_pipeline()
eddy = create_eddy_correct_pipeline()
if use_fieldmap: # we have a fieldmap, so lets use it (yay!)
susceptibility = create_epidewarp_pipeline(
fieldmap_registration=fieldmap_registration)
pipeline.connect([
(inputnode, motion, [('in_file', 'inputnode.in_file'),
('in_bvec', 'inputnode.in_bvec'),
('ref_num', 'inputnode.ref_num')]),
(inputnode, eddy, [('ref_num', 'inputnode.ref_num')]),
(motion, eddy, [('outputnode.motion_corrected', 'inputnode.in_file')]),
(eddy, susceptibility, [('outputnode.eddy_corrected', 'inputnode.in_file')]),
(inputnode, susceptibility, [('ref_num', 'inputnode.ref_num'),
('fieldmap_mag', 'inputnode.fieldmap_mag'),
('fieldmap_pha', 'inputnode.fieldmap_pha'),
('te_diff', 'inputnode.te_diff'),
('epi_echospacing', 'inputnode.epi_echospacing'),
('epi_rev_encoding', 'inputnode.epi_rev_encoding'),
('pi_accel_factor', 'inputnode.pi_accel_factor'),
('vsm_sigma', 'inputnode.vsm_sigma')]),
(motion, outputnode, [('outputnode.out_bvec', 'bvec_rotated')]),
(susceptibility, outputnode, [('outputnode.epi_corrected', 'dmri_corrected')])
])
else: # we don't have a fieldmap, so we just carry on without it :(
pipeline.connect([
(inputnode, motion, [('in_file', 'inputnode.in_file'),
('in_bvec', 'inputnode.in_bvec'),
('ref_num', 'inputnode.ref_num')]),
(inputnode, eddy, [('ref_num', 'inputnode.ref_num')]),
(motion, eddy, [('outputnode.motion_corrected', 'inputnode.in_file')]),
(motion, outputnode, [('outputnode.out_bvec', 'bvec_rotated')]),
(eddy, outputnode, [('outputnode.eddy_corrected', 'dmri_corrected')])
])
return pipeline
def create_motion_correct_pipeline(name='motion_correct'):
"""Creates a pipeline that corrects for motion artifact in dMRI sequences.
It takes a series of diffusion weighted images and rigidly co-registers
them to one reference image. Finally, the b-matrix is rotated accordingly
(Leemans et al. 2009 - http://www.ncbi.nlm.nih.gov/pubmed/19319973),
making use of the rotation matrix obtained by FLIRT.
.. warning::
IMPORTANT NOTICE: this workflow rotates the b-vectors, so please be adviced
that not all the dicom converters ensure the consistency between the resulting
nifti orientation and the b matrix table (e.g. dcm2nii checks it).
Example
-------
>>> nipype_motioncorrect = create_motion_correct_pipeline('nipype_motioncorrect')
>>> nipype_motioncorrect.inputs.inputnode.in_file = 'diffusion.nii'
>>> nipype_motioncorrect.inputs.inputnode.in_bvec = 'diffusion.bvec'
>>> nipype_motioncorrect.inputs.inputnode.ref_num = 0
>>> nipype_motioncorrect.run() # doctest: +SKIP
Inputs::
inputnode.in_file
inputnode.ref_num
inputnode.in_bvec
Outputs::
outputnode.motion_corrected
outputnode.out_bvec
"""
inputnode = pe.Node(
niu.IdentityInterface(
fields=['in_file', 'ref_num', 'in_bvec']),
name='inputnode')
pipeline = pe.Workflow(name=name)
split = pe.Node(fsl.Split(dimension='t'), name='split')
pick_ref = pe.Node(niu.Select(), name='pick_ref')
coregistration = pe.MapNode(fsl.FLIRT(no_search=True, interp='spline',
padding_size=1, dof=6), name='coregistration', iterfield=['in_file'])
rotate_bvecs = pe.Node(niu.Function(input_names=['in_bvec', 'in_matrix'], output_names=[
'out_file'], function=_rotate_bvecs), name='rotate_b_matrix')
merge = pe.Node(fsl.Merge(dimension='t'), name='merge')
outputnode = pe.Node(
niu.IdentityInterface(
fields=['motion_corrected', 'out_bvec']),
name='outputnode')
pipeline.connect([
(inputnode, split, [('in_file', 'in_file')])
,(split, pick_ref, [('out_files', 'inlist')])
,(inputnode, pick_ref, [('ref_num', 'index')])
,(split, coregistration, [('out_files', 'in_file')])
,(inputnode, rotate_bvecs, [('in_bvec', 'in_bvec')])
,(coregistration, rotate_bvecs, [('out_matrix_file', 'in_matrix')])
,(pick_ref, coregistration, [('out', 'reference')])
,(coregistration, merge, [('out_file', 'in_files')])
,(merge, outputnode, [('merged_file', 'motion_corrected')])
,(rotate_bvecs, outputnode, [('out_file', 'out_bvec')])
])
return pipeline
def create_eddy_correct_pipeline(name='eddy_correct'):
"""Creates a pipeline that replaces eddy_correct script in FSL. It takes a
series of diffusion weighted images and linearly co-registers them to one
reference image. No rotation of the B-matrix is performed, so this pipeline
should be executed after the motion correction pipeline.
Example
-------
>>> nipype_eddycorrect = create_eddy_correct_pipeline('nipype_eddycorrect')
>>> nipype_eddycorrect.inputs.inputnode.in_file = 'diffusion.nii'
>>> nipype_eddycorrect.inputs.inputnode.ref_num = 0
>>> nipype_eddycorrect.run() # doctest: +SKIP
Inputs::
inputnode.in_file
inputnode.ref_num
Outputs::
outputnode.eddy_corrected
"""
inputnode = pe.Node(
niu.IdentityInterface(fields=['in_file', 'ref_num']),
name='inputnode')
pipeline = pe.Workflow(name=name)
split = pe.Node(fsl.Split(dimension='t'), name='split')
pick_ref = pe.Node(niu.Select(), name='pick_ref')
coregistration = pe.MapNode(fsl.FLIRT(no_search=True, padding_size=1,
dof=12, interp='spline'), name='coregistration', iterfield=['in_file'])
merge = pe.Node(fsl.Merge(dimension='t'), name='merge')
outputnode = pe.Node(
niu.IdentityInterface(fields=['eddy_corrected']),
name='outputnode')
pipeline.connect([
(inputnode, split, [('in_file', 'in_file')])
,(split, pick_ref, [('out_files', 'inlist')])
,(inputnode, pick_ref, [('ref_num', 'index')])
,(split, coregistration, [('out_files', 'in_file')])
,(pick_ref, coregistration, [('out', 'reference')])
,(coregistration, merge, [('out_file', 'in_files')])
,(merge, outputnode, [('merged_file', 'eddy_corrected')])
])
return pipeline
def fieldmap_correction(name='fieldmap_correction', nocheck=False):
"""
Fieldmap-based retrospective correction of EPI images for the susceptibility distortion
artifact (Jezzard et al., 1995). Fieldmap images are assumed to be already registered
to EPI data, and a brain mask is required.
Replaces the former workflow, still available as create_epidewarp_pipeline(). The difference
with respect the epidewarp pipeline is that now the workflow uses the new fsl_prepare_fieldmap
available as of FSL 5.0.
Example
-------
>>> nipype_epicorrect = fieldmap_correction('nipype_epidewarp')
>>> nipype_epicorrect.inputs.inputnode.in_file = 'diffusion.nii'
>>> nipype_epicorrect.inputs.inputnode.in_mask = 'brainmask.nii'
>>> nipype_epicorrect.inputs.inputnode.fieldmap_pha = 'phase.nii'
>>> nipype_epicorrect.inputs.inputnode.fieldmap_mag = 'magnitude.nii'
>>> nipype_epicorrect.inputs.inputnode.te_diff = 2.46
>>> nipype_epicorrect.inputs.inputnode.epi_echospacing = 0.77
>>> nipype_epicorrect.inputs.inputnode.encoding_direction = 'y'
>>> nipype_epicorrect.run() # doctest: +SKIP
Inputs::
inputnode.in_file - The volume acquired with EPI sequence
inputnode.in_mask - A brain mask
inputnode.fieldmap_pha - The phase difference map from the fieldmapping, registered to in_file
inputnode.fieldmap_mag - The magnitud maps (usually 4D, one magnitude per GRE scan)
from the fieldmapping, registered to in_file
inputnode.te_diff - Time difference in msec. between TE in ms of the fieldmapping (usually a GRE sequence).
inputnode.epi_echospacing - The effective echo spacing (aka dwell time) in msec. of the EPI sequence. If
EPI was acquired with parallel imaging, then the effective echo spacing is
eff_es = es / acc_factor.
inputnode.encoding_direction - The phase encoding direction in EPI acquisition (default y)
inputnode.vsm_sigma - Sigma value of the gaussian smoothing filter applied to the vsm (voxel shift map)
Outputs::
outputnode.epi_corrected
outputnode.out_vsm
"""
inputnode = pe.Node(niu.IdentityInterface(
fields=['in_file',
'in_mask',
'fieldmap_pha',
'fieldmap_mag',
'te_diff',
'epi_echospacing',
'vsm_sigma',
'encoding_direction'
]), name='inputnode'
)
pipeline = pe.Workflow(name=name)
# Keep first frame from magnitude
select_mag = pe.Node(fsl.utils.ExtractROI(
t_size=1, t_min=0), name='select_magnitude')
# Mask magnitude (it is required by PreparedFieldMap)
mask_mag = pe.Node( fsl.maths.ApplyMask(), name='mask_magnitude' )
# Run fsl_prepare_fieldmap
fslprep = pe.Node( fsl.PrepareFieldmap(), name='prepare_fieldmap' )
if nocheck:
fslprep.inputs.nocheck = True
# Use FUGUE to generate the voxel shift map (vsm)
vsm = pe.Node(fsl.FUGUE(save_shift=True), name='generate_vsm')
# VSM demean is not anymore present in the epi_reg script
#vsm_mean = pe.Node(niu.Function(input_names=['in_file', 'mask_file', 'in_unwarped'], output_names=[
# 'out_file'], function=_vsm_remove_mean), name='vsm_mean_shift')
# fugue_epi
dwi_split = pe.Node(niu.Function(input_names=[
'in_file'], output_names=['out_files'], function=_split_dwi), name='dwi_split')
# 'fugue -i %s -u %s --loadshift=%s --mask=%s' % ( vol_name, out_vol_name, vsm_name, mask_name )
dwi_applyxfm = pe.MapNode(fsl.FUGUE(
icorr=True, save_shift=False), iterfield=['in_file'], name='dwi_fugue')
# Merge back all volumes
dwi_merge = pe.Node(fsl.utils.Merge(
dimension='t'), name='dwi_merge')
outputnode = pe.Node(
niu.IdentityInterface(fields=['epi_corrected','out_vsm']),
name='outputnode')
pipeline.connect([
(inputnode, select_mag, [('fieldmap_mag', 'in_file')])
,(inputnode, fslprep, [('fieldmap_pha', 'in_phase'),('te_diff', 'delta_TE') ])
,(inputnode, mask_mag, [('in_mask', 'mask_file' )])
,(select_mag, mask_mag, [('roi_file', 'in_file')])
,(mask_mag, fslprep, [('out_file', 'in_magnitude')])
,(fslprep, vsm, [('out_fieldmap', 'phasemap_file')])
,(inputnode, vsm, [('fieldmap_mag', 'in_file'),
('encoding_direction','unwarp_direction'),
(('te_diff', _ms2sec), 'asym_se_time'),
('vsm_sigma', 'smooth2d'),
(('epi_echospacing', _ms2sec), 'dwell_time')])
,(mask_mag, vsm, [('out_file', 'mask_file')])
,(inputnode, dwi_split, [('in_file', 'in_file')])
,(dwi_split, dwi_applyxfm, [('out_files', 'in_file')])
,(mask_mag, dwi_applyxfm, [('out_file', 'mask_file')])
,(vsm, dwi_applyxfm, [('shift_out_file', 'shift_in_file')])
,(inputnode, dwi_applyxfm, [('encoding_direction','unwarp_direction')])
,(dwi_applyxfm, dwi_merge, [('unwarped_file', 'in_files')])
,(dwi_merge, outputnode, [('merged_file', 'epi_corrected')])
,(vsm, outputnode, [('shift_out_file','out_vsm') ])
])
return pipeline
def topup_correction( name='topup_correction' ):
"""
Corrects for susceptibilty distortion of EPI images when one reverse encoding dataset has
been acquired
Example
-------
>>> nipype_epicorrect = topup_correction('nipype_topup')
>>> nipype_epicorrect.inputs.inputnode.in_file_dir = 'epi.nii'
>>> nipype_epicorrect.inputs.inputnode.in_file_rev = 'epi_rev.nii'
>>> nipype_epicorrect.inputs.inputnode.encoding_direction = 'y'
>>> nipype_epicorrect.inputs.inputnode.ref_num = 0
>>> nipype_epicorrect.run() # doctest: +SKIP
Inputs::
inputnode.in_file_dir - EPI volume acquired in 'forward' phase encoding
inputnode.in_file_rev - EPI volume acquired in 'reversed' phase encoding
inputnode.encoding_direction - Direction encoding of in_file_dir
inputnode.ref_num - Identifier of the reference volumes (usually B0 volume)
Outputs::
outputnode.epi_corrected
"""
pipeline = pe.Workflow(name=name)
inputnode = pe.Node(niu.IdentityInterface(
fields=['in_file_dir',
'in_file_rev',
'encoding_direction',
'readout_times',
'ref_num'
]), name='inputnode'
)
outputnode = pe.Node( niu.IdentityInterface(
fields=['out_fieldcoef',
'out_movpar',
'out_topup',
'out_enc_file',
'epi_corrected'
]), name='outputnode'
)
b0_dir = pe.Node( fsl.ExtractROI( t_size=1 ), name='b0_1' )
b0_rev = pe.Node( fsl.ExtractROI( t_size=1 ), name='b0_2' )
combin = pe.Node( niu.Merge(2), name='merge' )
combin2 = pe.Node( niu.Merge(2), name='merge2' )
merged = pe.Node( fsl.Merge( dimension='t' ), name='b0_comb' )
topup = pe.Node( fsl.TOPUP(), name='topup' )
applytopup = pe.Node( fsl.ApplyTOPUP(in_index=[1,2] ), name='applytopup' )
pipeline.connect([
(inputnode, b0_dir, [('in_file_dir','in_file'),('ref_num','t_min')] )
,(inputnode, b0_rev, [('in_file_rev','in_file'),('ref_num','t_min')] )
,(inputnode, combin2, [('in_file_dir','in1'),('in_file_rev','in2') ] )
,(b0_dir, combin, [('roi_file','in1')] )
,(b0_rev, combin, [('roi_file','in2')] )
,(combin, merged, [('out', 'in_files')] )
,(merged, topup, [('merged_file','in_file')])
,(inputnode, topup, [('encoding_direction','encoding_direction'),('readout_times','readout_times') ])
,(topup, applytopup, [('out_topup','in_topup'),('out_enc_file','encoding_file')])
,(combin2, applytopup, [('out','in_files')] )
,(topup, outputnode, [('out_fieldcoef','out_fieldcoef'),('out_movpar','out_movpar'),
('out_topup','out_topup'),('out_enc_file','out_enc_file') ])
,(applytopup,outputnode, [('out_corrected','epi_corrected')])
])
return pipeline
def create_epidewarp_pipeline(name='epidewarp', fieldmap_registration=False):
""" Replaces the epidewarp.fsl script (http://www.nmr.mgh.harvard.edu/~greve/fbirn/b0/epidewarp.fsl)
for susceptibility distortion correction of dMRI & fMRI acquired with EPI sequences and the fieldmap
information (Jezzard et al., 1995) using FSL's FUGUE. The registration to the (warped) fieldmap
(strictly following the original script) is available using fieldmap_registration=True.
Example
-------
>>> nipype_epicorrect = create_epidewarp_pipeline('nipype_epidewarp', fieldmap_registration=False)
>>> nipype_epicorrect.inputs.inputnode.in_file = 'diffusion.nii'
>>> nipype_epicorrect.inputs.inputnode.fieldmap_mag = 'magnitude.nii'
>>> nipype_epicorrect.inputs.inputnode.fieldmap_pha = 'phase.nii'
>>> nipype_epicorrect.inputs.inputnode.te_diff = 2.46
>>> nipype_epicorrect.inputs.inputnode.epi_echospacing = 0.77
>>> nipype_epicorrect.inputs.inputnode.epi_rev_encoding = False
>>> nipype_epicorrect.inputs.inputnode.ref_num = 0
>>> nipype_epicorrect.inputs.inputnode.pi_accel_factor = 1.0
>>> nipype_epicorrect.run() # doctest: +SKIP
Inputs::
inputnode.in_file - The volume acquired with EPI sequence
inputnode.fieldmap_mag - The magnitude of the fieldmap
inputnode.fieldmap_pha - The phase difference of the fieldmap
inputnode.te_diff - Time difference between TE in ms.
inputnode.epi_echospacing - The echo spacing (aka dwell time) in the EPI sequence
inputnode.epi_ph_encoding_dir - The phase encoding direction in EPI acquisition (default y)
inputnode.epi_rev_encoding - True if it is acquired with reverse encoding
inputnode.pi_accel_factor - Acceleration factor used for EPI parallel imaging (GRAPPA)
inputnode.vsm_sigma - Sigma value of the gaussian smoothing filter applied to the vsm (voxel shift map)
inputnode.ref_num - The reference volume (B=0 in dMRI or a central frame in fMRI)
Outputs::
outputnode.epi_corrected
Optional arguments::
fieldmap_registration - True if registration to fieldmap should be done (default False)
"""
inputnode = pe.Node(niu.IdentityInterface(fields=['in_file',
'fieldmap_mag',
'fieldmap_pha',
'te_diff',
'epi_echospacing',
'epi_ph_encoding_dir',
'epi_rev_encoding',
'pi_accel_factor',
'vsm_sigma',
'ref_num',
'unwarp_direction'
]), name='inputnode')
pipeline = pe.Workflow(name=name)
# Keep first frame from magnitude
select_mag = pe.Node(fsl.utils.ExtractROI(
t_size=1, t_min=0), name='select_magnitude')
# mask_brain
mask_mag = pe.Node(fsl.BET(mask=True), name='mask_magnitude')
mask_mag_dil = pe.Node(niu.Function(input_names=[
'in_file'], output_names=['out_file'], function=_dilate_mask), name='mask_dilate')
# Compute dwell time
dwell_time = pe.Node(niu.Function(input_names=['dwell_time', 'pi_factor', 'is_reverse_encoding'], output_names=[
'dwell_time'], function=_compute_dwelltime), name='dwell_time')
# Normalize phase diff to be [-pi, pi)
norm_pha = pe.Node(niu.Function(input_names=['in_file'], output_names=[
'out_file'], function=_prepare_phasediff), name='normalize_phasediff')
# Execute FSL PRELUDE: prelude -p %s -a %s -o %s -f -v -m %s
prelude = pe.Node(fsl.PRELUDE(
process3d=True), name='phase_unwrap')
fill_phase = pe.Node(niu.Function(input_names=['in_file'], output_names=[
'out_file'], function=_fill_phase), name='fill_phasediff')
# to assure that vsm is same dimension as mag. The input only affects the output dimension.
# The content of the input has no effect on the vsm. The de-warped mag volume is
# meaningless and will be thrown away
# fugue -i %s -u %s -p %s --dwell=%s --asym=%s --mask=%s --saveshift=%s %
# ( mag_name, magdw_name, ph_name, esp, tediff, mask_name, vsmmag_name)
vsm = pe.Node(fsl.FUGUE(save_shift=True), name='generate_vsm')
vsm_mean = pe.Node(niu.Function(input_names=['in_file', 'mask_file', 'in_unwarped'], output_names=[
'out_file'], function=_vsm_remove_mean), name='vsm_mean_shift')
# fugue_epi
dwi_split = pe.Node(niu.Function(input_names=[
'in_file'], output_names=['out_files'], function=_split_dwi), name='dwi_split')
# 'fugue -i %s -u %s --loadshift=%s --mask=%s' % ( vol_name, out_vol_name, vsm_name, mask_name )
dwi_applyxfm = pe.MapNode(fsl.FUGUE(
icorr=True, save_shift=False), iterfield=['in_file'], name='dwi_fugue')
# Merge back all volumes
dwi_merge = pe.Node(fsl.utils.Merge(
dimension='t'), name='dwi_merge')
outputnode = pe.Node(
niu.IdentityInterface(fields=['epi_corrected']),
name='outputnode')
pipeline.connect([
(inputnode, dwell_time, [('epi_echospacing', 'dwell_time'), ('pi_accel_factor', 'pi_factor'), ('epi_rev_encoding', 'is_reverse_encoding')])
,(inputnode, select_mag, [('fieldmap_mag', 'in_file')])
,(inputnode, norm_pha, [('fieldmap_pha', 'in_file')])
,(select_mag, mask_mag, [('roi_file', 'in_file')])
,(mask_mag, mask_mag_dil, [('mask_file', 'in_file')])
,(select_mag, prelude, [('roi_file', 'magnitude_file')])
,(norm_pha, prelude, [('out_file', 'phase_file')])
,(mask_mag_dil, prelude, [('out_file', 'mask_file')])
,(prelude, fill_phase, [('unwrapped_phase_file', 'in_file')])
,(inputnode, vsm, [('fieldmap_mag', 'in_file')])
,(fill_phase, vsm, [('out_file', 'phasemap_file')])
,(inputnode, vsm, [(('te_diff', _ms2sec), 'asym_se_time'), ('vsm_sigma', 'smooth2d')])
,(dwell_time, vsm, [(('dwell_time', _ms2sec), 'dwell_time')])
,(mask_mag_dil, vsm, [('out_file', 'mask_file')])
,(mask_mag_dil, vsm_mean, [('out_file', 'mask_file')])
,(vsm, vsm_mean, [('unwarped_file', 'in_unwarped'), ('shift_out_file', 'in_file')])
,(inputnode, dwi_split, [('in_file', 'in_file')])
,(dwi_split, dwi_applyxfm, [('out_files', 'in_file')])
,(dwi_applyxfm, dwi_merge, [('unwarped_file', 'in_files')])
,(dwi_merge, outputnode, [('merged_file', 'epi_corrected')])
])
if fieldmap_registration:
""" Register magfw to example epi. There are some parameters here that may need to be tweaked. Should probably strip the mag
Pre-condition: forward warp the mag in order to reg with func. What does mask do here?
"""
# Select reference volume from EPI (B0 in dMRI and a middle frame in
# fMRI)
select_epi = pe.Node(fsl.utils.ExtractROI(
t_size=1), name='select_epi')
# fugue -i %s -w %s --loadshift=%s --mask=%s % ( mag_name, magfw_name,
# vsmmag_name, mask_name ), log ) # Forward Map
vsm_fwd = pe.Node(fsl.FUGUE(
save_warped=True), name='vsm_fwd')
vsm_reg = pe.Node(fsl.FLIRT(bins=256, cost='corratio', dof=6, interp='spline', searchr_x=[
-10, 10], searchr_y=[-10, 10], searchr_z=[-10, 10]), name='vsm_registration')
# 'flirt -in %s -ref %s -out %s -init %s -applyxfm' % ( vsmmag_name, ref_epi, vsmmag_name, magfw_mat_out )
vsm_applyxfm = pe.Node(fsl.ApplyXfm(
interp='spline'), name='vsm_apply_xfm')
# 'flirt -in %s -ref %s -out %s -init %s -applyxfm' % ( mask_name, ref_epi, mask_name, magfw_mat_out )
msk_applyxfm = pe.Node(fsl.ApplyXfm(
interp='nearestneighbour'), name='msk_apply_xfm')
pipeline.connect([
(inputnode, select_epi, [('in_file', 'in_file'), ('ref_num', 't_min')])
,(select_epi, vsm_reg, [('roi_file', 'reference')])
,(vsm, vsm_fwd, [('shift_out_file', 'shift_in_file')])
,(mask_mag_dil, vsm_fwd, [('out_file', 'mask_file')])
,(inputnode, vsm_fwd, [('fieldmap_mag', 'in_file')])
,(vsm_fwd, vsm_reg, [('warped_file', 'in_file')])
,(vsm_reg, msk_applyxfm, [('out_matrix_file', 'in_matrix_file')])
,(select_epi, msk_applyxfm, [('roi_file', 'reference')])
,(mask_mag_dil, msk_applyxfm, [('out_file', 'in_file')])
,(vsm_reg, vsm_applyxfm, [('out_matrix_file', 'in_matrix_file')])
,(select_epi, vsm_applyxfm, [('roi_file', 'reference')])
,(vsm_mean, vsm_applyxfm, [('out_file', 'in_file')])
,(msk_applyxfm, dwi_applyxfm, [('out_file', 'mask_file')])
,(vsm_applyxfm, dwi_applyxfm, [('out_file', 'shift_in_file')])
])
else:
pipeline.connect([
(mask_mag_dil, dwi_applyxfm, [('out_file', 'mask_file')])
,( vsm_mean, dwi_applyxfm, [('out_file', 'shift_in_file')])
])
return pipeline
def _rotate_bvecs(in_bvec, in_matrix):
import os
import numpy as np
name, fext = os.path.splitext(os.path.basename(in_bvec))
if fext == '.gz':
name, _ = os.path.splitext(name)
out_file = os.path.abspath('./%s_rotated.bvec' % name)
bvecs = np.loadtxt(in_bvec)
new_bvecs = np.zeros(shape=bvecs.T.shape) #pre-initialise array, 3 col format
for i, vol_matrix in enumerate(in_matrix[0::]): #start index at 0
bvec = np.matrix(bvecs[:, i])
rot = np.matrix(np.loadtxt(vol_matrix)[0:3, 0:3])
new_bvecs[i] = (np.array(rot * bvec.T).T)[0] #fill each volume with x,y,z as we go along
np.savetxt(out_file, np.array(new_bvecs).T, fmt='%0.15f')
return out_file
def _cat_logs(in_files):
import shutil
import os
name, fext = os.path.splitext(os.path.basename(in_files[0]))
if fext == '.gz':
name, _ = os.path.splitext(name)
out_file = os.path.abspath('./%s_ecclog.log' % name)
out_str = ''
with open(out_file, 'wb') as totallog:
for i, fname in enumerate(in_files):
totallog.write('\n\npreprocessing %d\n' % i)
with open(fname) as inlog:
for line in inlog:
totallog.write(line)
return out_file
def _compute_dwelltime(dwell_time=0.68, pi_factor=1.0, is_reverse_encoding=False):
dwell_time *= (1.0/pi_factor)
if is_reverse_encoding:
dwell_time *= -1.0
return dwell_time
def _effective_echospacing( dwell_time, pi_factor=1.0 ):
dwelltime = 1.0e-3 * dwell_time * ( 1.0/pi_factor )
return dwelltime
def _prepare_phasediff(in_file):
import nibabel as nib
import os
import numpy as np
img = nib.load(in_file)
max_diff = np.max(img.get_data().reshape(-1))
min_diff = np.min(img.get_data().reshape(-1))
A = (2.0 * np.pi)/(max_diff-min_diff)
B = np.pi - (A * max_diff)
diff_norm = img.get_data() * A + B
name, fext = os.path.splitext(os.path.basename(in_file))
if fext == '.gz':
name, _ = os.path.splitext(name)
out_file = os.path.abspath('./%s_2pi.nii.gz' % name)
nib.save(nib.Nifti1Image(
diff_norm, img.get_affine(), img.get_header()), out_file)
return out_file
def _dilate_mask(in_file, iterations=4):
import nibabel as nib
import scipy.ndimage as ndimage
import os
img = nib.load(in_file)
img._data = ndimage.binary_dilation(img.get_data(), iterations=iterations)
name, fext = os.path.splitext(os.path.basename(in_file))
if fext == '.gz':
name, _ = os.path.splitext(name)
out_file = os.path.abspath('./%s_dil.nii.gz' % name)
nib.save(img, out_file)
return out_file
def _fill_phase(in_file):
import nibabel as nib
import os
import numpy as np
img = nib.load(in_file)
dumb_img = nib.Nifti1Image(np.zeros(
img.get_shape()), img.get_affine(), img.get_header())
out_nii = nib.funcs.concat_images((img, dumb_img))
name, fext = os.path.splitext(os.path.basename(in_file))
if fext == '.gz':
name, _ = os.path.splitext(name)
out_file = os.path.abspath('./%s_fill.nii.gz' % name)
nib.save(out_nii, out_file)
return out_file
def _vsm_remove_mean(in_file, mask_file, in_unwarped):
import nibabel as nib
import os
import numpy as np
import numpy.ma as ma
img = nib.load(in_file)
msk = nib.load(mask_file).get_data()
img_data = img.get_data()
img_data[msk == 0] = 0
vsmmag_masked = ma.masked_values(img_data.reshape(-1), 0.0)
vsmmag_masked = vsmmag_masked - vsmmag_masked.mean()
img._data = vsmmag_masked.reshape(img.get_shape())
name, fext = os.path.splitext(os.path.basename(in_file))
if fext == '.gz':
name, _ = os.path.splitext(name)
out_file = os.path.abspath('./%s_demeaned.nii.gz' % name)
nib.save(img, out_file)
return out_file
def _ms2sec(val):
return val*1e-3;
def _split_dwi(in_file):
import nibabel as nib
import os
out_files = []
frames = nib.funcs.four_to_three(nib.load(in_file))
name, fext = os.path.splitext(os.path.basename(in_file))
if fext == '.gz':
name, _ = os.path.splitext(name)
for i, frame in enumerate(frames):
out_file = os.path.abspath('./%s_%03d.nii.gz' % (name, i))
nib.save(frame, out_file)
out_files.append(out_file)
return out_files
| |
from django.urls import reverse
from django.shortcuts import redirect, get_object_or_404,render
from django.contrib.auth import authenticate, login, logout, REDIRECT_FIELD_NAME
from django.contrib.auth import get_user_model
from django.contrib.auth.forms import PasswordChangeForm
from django.contrib.auth.views import logout as Signout
from django.views.generic import TemplateView
from django.views.generic.list import ListView
from django.contrib import messages
from django.core.exceptions import PermissionDenied
from django.utils.translation import ugettext as _
from django.http import Http404, HttpResponseRedirect,HttpResponse
from userena.forms import (SignupForm, SignupFormOnlyEmail, AuthenticationForm,
ChangeEmailForm, EditProfileForm,InviteForm)
from userena.models import UserenaSignup
from userena.decorators import secure_required
from userena.utils import signin_redirect, get_profile_model, get_user_profile
from userena import signals as userena_signals
from userena import settings as userena_settings
from guardian.decorators import permission_required_or_403
from django.contrib.auth.decorators import login_required
import warnings
class ExtraContextTemplateView(TemplateView):
""" Add extra context to a simple template view """
extra_context = None
def get_context_data(self, *args, **kwargs):
context = super(ExtraContextTemplateView, self).get_context_data(*args, **kwargs)
if self.extra_context:
context.update(self.extra_context)
return context
# this view is used in POST requests, e.g. signup when the form is not valid
post = TemplateView.get
class InvitedUsersListView(ListView):
""" Lists all profiles """
context_object_name='invited_user_list'
page=1
paginate_by=50
template_name='userena/list_invited_users.html'
extra_context=None
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super(InvitedUsersListView, self).get_context_data(**kwargs)
try:
page = int(self.request.GET.get('page', None))
except (TypeError, ValueError):
page = self.page
if not self.extra_context: self.extra_context = dict()
context['page'] = page
context['paginate_by'] = self.paginate_by
context['extra_context'] = self.extra_context
profile_model= get_profile_model()
currentProfile=profile_model.objects.get(user=self.request.user)
context['numOfRemainingInvitationTicket']= currentProfile.get_remaining_invite_tickets_number()
return context
def get_queryset(self):
profile_model= get_profile_model()
currentProfile=profile_model.objects.get(user=self.request.user)
queryset = currentProfile.invited_users.all()
return queryset
class ProfileListView(ListView):
""" Lists all profiles """
context_object_name='profile_list'
page=1
paginate_by=50
template_name=userena_settings.USERENA_PROFILE_LIST_TEMPLATE
extra_context=None
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super(ProfileListView, self).get_context_data(**kwargs)
try:
page = int(self.request.GET.get('page', None))
except (TypeError, ValueError):
page = self.page
if userena_settings.USERENA_DISABLE_PROFILE_LIST \
and not self.request.user.is_staff:
raise Http404
if not self.extra_context: self.extra_context = dict()
context['page'] = page
context['paginate_by'] = self.paginate_by
context['extra_context'] = self.extra_context
return context
def get_queryset(self):
profile_model = get_profile_model()
queryset = profile_model.objects.get_visible_profiles(self.request.user).select_related()
return queryset
@secure_required
@login_required
def invite_new_user(request,invite_form=InviteForm,template_name='userena/invite_new_user.html',success_url='userena_list_invited_users',extra_context=None):
if(request.user.has_perm('invite_user')):
if not extra_context:
extra_context = dict()
if request.method == 'POST':
form = invite_form(request.user,request.POST, request.FILES)
if form.is_valid():
result=form.save()
if result: #if result is True everythin was ok
return redirect(success_url)
else:
return HttpResponse(status=500)
extra_context['form'] = form
return ExtraContextTemplateView.as_view(template_name=template_name,
extra_context=extra_context)(request)
form=invite_form(request.user)
extra_context['form'] = form
return ExtraContextTemplateView.as_view(template_name=template_name,
extra_context=extra_context)(request)
else:
raise PermissionDenied
@secure_required
@login_required
def list_invited_users(request,template_name='userena/list_invited_users.html'):
return InvitedUsersListView.as_view(template_name=template_name)(request)
@secure_required
def signup(request, signup_form=SignupForm,
template_name='userena/signup_form.html', success_url=None,
extra_context=None):
"""
Signup of an account.
Signup requiring a username, email and password. After signup a user gets
an email with an activation link used to activate their account. After
successful signup redirects to ``success_url``.
:param signup_form:
Form that will be used to sign a user. Defaults to userena's
:class:`SignupForm`.
:param template_name:
String containing the template name that will be used to display the
signup form. Defaults to ``userena/signup_form.html``.
:param success_url:
String containing the URI which should be redirected to after a
successful signup. If not supplied will redirect to
``userena_signup_complete`` view.
:param extra_context:
Dictionary containing variables which are added to the template
context. Defaults to a dictionary with a ``form`` key containing the
``signup_form``.
**Context**
``form``
Form supplied by ``signup_form``.
"""
# If signup is disabled, return 403
if userena_settings.USERENA_DISABLE_SIGNUP:
raise PermissionDenied
# If no usernames are wanted and the default form is used, fallback to the
# default form that doesn't display to enter the username.
if userena_settings.USERENA_WITHOUT_USERNAMES and (signup_form == SignupForm):
signup_form = SignupFormOnlyEmail
form = signup_form()
if request.method == 'POST':
form = signup_form(request.POST, request.FILES)
if form.is_valid():
user = form.save()
# Send the signup complete signal
userena_signals.signup_complete.send(sender=None,
user=user)
if success_url: redirect_to = success_url
else: redirect_to = reverse('userena_signup_complete',
kwargs={'username': user.username})
# A new signed user should logout the old one.
if request.user.is_authenticated():
logout(request)
if (userena_settings.USERENA_SIGNIN_AFTER_SIGNUP and
not userena_settings.USERENA_ACTIVATION_REQUIRED):
user = authenticate(identification=user.email, check_password=False)
login(request, user)
return redirect(redirect_to)
if not extra_context: extra_context = dict()
extra_context['form'] = form
return ExtraContextTemplateView.as_view(template_name=template_name,
extra_context=extra_context)(request)
@secure_required
def activate(request, activation_key,
template_name='userena/activate_fail.html',
retry_template_name='userena/activate_retry.html',
success_url=None, extra_context=None):
"""
Activate a user with an activation key.
The key is a SHA1 string. When the SHA1 is found with an
:class:`UserenaSignup`, the :class:`User` of that account will be
activated. After a successful activation the view will redirect to
``success_url``. If the SHA1 is not found, the user will be shown the
``template_name`` template displaying a fail message.
If the SHA1 is found but expired, ``retry_template_name`` is used instead,
so the user can proceed to :func:`activate_retry` to get a new activation key.
:param activation_key:
String of a SHA1 string of 40 characters long. A SHA1 is always 160bit
long, with 4 bits per character this makes it --160/4-- 40 characters
long.
:param template_name:
String containing the template name that is used when the
``activation_key`` is invalid and the activation fails. Defaults to
``userena/activate_fail.html``.
:param retry_template_name:
String containing the template name that is used when the
``activation_key`` is expired. Defaults to
``userena/activate_retry.html``.
:param success_url:
String containing the URL where the user should be redirected to after
a successful activation. Will replace ``%(username)s`` with string
formatting if supplied. If ``success_url`` is left empty, will direct
to ``userena_profile_detail`` view.
:param extra_context:
Dictionary containing variables which could be added to the template
context. Default to an empty dictionary.
"""
try:
if (not UserenaSignup.objects.check_expired_activation(activation_key)
or not userena_settings.USERENA_ACTIVATION_RETRY):
user = UserenaSignup.objects.activate_user(activation_key)
if user:
# Sign the user in.
auth_user = authenticate(identification=user.email,
check_password=False)
login(request, auth_user)
if userena_settings.USERENA_USE_MESSAGES:
messages.success(request, _('Your account has been activated and you have been signed in.'),
fail_silently=True)
if success_url: redirect_to = success_url % {'username': user.username }
else: redirect_to = reverse('userena_profile_detail',
kwargs={'username': user.username})
return redirect(redirect_to)
else:
if not extra_context: extra_context = dict()
return ExtraContextTemplateView.as_view(template_name=template_name,
extra_context=extra_context)(
request)
else:
if not extra_context: extra_context = dict()
extra_context['activation_key'] = activation_key
return ExtraContextTemplateView.as_view(template_name=retry_template_name,
extra_context=extra_context)(request)
except UserenaSignup.DoesNotExist:
if not extra_context: extra_context = dict()
return ExtraContextTemplateView.as_view(template_name=template_name,
extra_context=extra_context)(request)
@secure_required
def activate_retry(request, activation_key,
template_name='userena/activate_retry_success.html',
extra_context=None):
"""
Reissue a new ``activation_key`` for the user with the expired
``activation_key``.
If ``activation_key`` does not exists, or ``USERENA_ACTIVATION_RETRY`` is
set to False and for any other error condition user is redirected to
:func:`activate` for error message display.
:param activation_key:
String of a SHA1 string of 40 characters long. A SHA1 is always 160bit
long, with 4 bits per character this makes it --160/4-- 40 characters
long.
:param template_name:
String containing the template name that is used when new
``activation_key`` has been created. Defaults to
``userena/activate_retry_success.html``.
:param extra_context:
Dictionary containing variables which could be added to the template
context. Default to an empty dictionary.
"""
if not userena_settings.USERENA_ACTIVATION_RETRY:
return redirect(reverse('userena_activate', args=(activation_key,)))
try:
if UserenaSignup.objects.check_expired_activation(activation_key):
new_key = UserenaSignup.objects.reissue_activation(activation_key)
if new_key:
if not extra_context: extra_context = dict()
return ExtraContextTemplateView.as_view(template_name=template_name,
extra_context=extra_context)(request)
else:
return redirect(reverse('userena_activate',args=(activation_key,)))
else:
return redirect(reverse('userena_activate',args=(activation_key,)))
except UserenaSignup.DoesNotExist:
return redirect(reverse('userena_activate',args=(activation_key,)))
@secure_required
def activate_invited_user(request, invitation_key,
template_name='userena/invite_fail.html',
retry_template_name='userena/invite_retry.html',
success_url=None, extra_context=None):
"""
Activate an invited user with an invitation key.
The key is a SHA1 string. When the SHA1 is found with an
:class:`UserenaSignup`, the :class:`User` of that account will be
activated. After a successful activation the view will redirect to
``success_url``. If the SHA1 is not found, the user will be shown the
``template_name`` template displaying a fail message.
If the SHA1 is found but expired, ``retry_template_name`` is used instead,
so the user can proceed to :func:`activate_retry` to get a new activation key.
:param invitation_key:
String of a SHA1 string of 40 characters long. A SHA1 is always 160bit
long, with 4 bits per character this makes it --160/4-- 40 characters
long.
:param template_name:
String containing the template name that is used when the
``activation_key`` is invalid and the activation fails. Defaults to
``userena/activate_fail.html``.
:param retry_template_name:
String containing the template name that is used when the
``activation_key`` is expired. Defaults to
``userena/activate_retry.html``.
:param success_url:
String containing the URL where the user should be redirected to after
a successful activation. Will replace ``%(username)s`` with string
formatting if supplied. If ``success_url`` is left empty, will direct
to ``userena_profile_detail`` view.
:param extra_context:
Dictionary containing variables which could be added to the template
context. Default to an empty dictionary.
"""
try:
if (not UserenaSignup.objects.check_expired_invitation(invitation_key)
or not userena_settings.USERENA_ACTIVATION_RETRY):
user = UserenaSignup.objects.activate_invited_user(invitation_key)
if user:
# Sign the user in.
auth_user = authenticate(identification=user.email,
check_password=False)
login(request, auth_user)
if userena_settings.USERENA_USE_MESSAGES:
messages.success(request, _('Your account has been activated and you have been signed in.'),
fail_silently=True)
if success_url: redirect_to = success_url % {'username': user.username }
else: redirect_to = reverse('userena_profile_detail',
kwargs={'username': user.username})
return redirect(redirect_to)
else:
if not extra_context: extra_context = dict()
return ExtraContextTemplateView.as_view(template_name=template_name,
extra_context=extra_context)(
request)
else:
if not extra_context: extra_context = dict()
extra_context['invitation_key'] = invitation_key
return ExtraContextTemplateView.as_view(template_name=retry_template_name,
extra_context=extra_context)(request)
except UserenaSignup.DoesNotExist:
if not extra_context: extra_context = dict()
return ExtraContextTemplateView.as_view(template_name=template_name,
extra_context=extra_context)(request)
@secure_required
def email_confirm(request, confirmation_key,
template_name='userena/email_confirm_fail.html',
success_url=None, extra_context=None):
"""
Confirms an email address with a confirmation key.
Confirms a new email address by running :func:`User.objects.confirm_email`
method. If the method returns an :class:`User` the user will have his new
e-mail address set and redirected to ``success_url``. If no ``User`` is
returned the user will be represented with a fail message from
``template_name``.
:param confirmation_key:
String with a SHA1 representing the confirmation key used to verify a
new email address.
:param template_name:
String containing the template name which should be rendered when
confirmation fails. When confirmation is successful, no template is
needed because the user will be redirected to ``success_url``.
:param success_url:
String containing the URL which is redirected to after a successful
confirmation. Supplied argument must be able to be rendered by
``reverse`` function.
:param extra_context:
Dictionary of variables that are passed on to the template supplied by
``template_name``.
"""
user = UserenaSignup.objects.confirm_email(confirmation_key)
if user:
if userena_settings.USERENA_USE_MESSAGES:
messages.success(request, _('Your email address has been changed.'),
fail_silently=True)
if success_url: redirect_to = success_url
else: redirect_to = reverse('userena_email_confirm_complete',
kwargs={'username': user.username})
return redirect(redirect_to)
else:
if not extra_context: extra_context = dict()
return ExtraContextTemplateView.as_view(template_name=template_name,
extra_context=extra_context)(request)
def direct_to_user_template(request, username, template_name,
extra_context=None):
"""
Simple wrapper for Django's :func:`direct_to_template` view.
This view is used when you want to show a template to a specific user. A
wrapper for :func:`direct_to_template` where the template also has access to
the user that is found with ``username``. For ex. used after signup,
activation and confirmation of a new e-mail.
:param username:
String defining the username of the user that made the action.
:param template_name:
String defining the name of the template to use. Defaults to
``userena/signup_complete.html``.
**Keyword arguments**
``extra_context``
A dictionary containing extra variables that should be passed to the
rendered template. The ``account`` key is always the ``User``
that completed the action.
**Extra context**
``viewed_user``
The currently :class:`User` that is viewed.
"""
user = get_object_or_404(get_user_model(), username__iexact=username)
if not extra_context: extra_context = dict()
extra_context['viewed_user'] = user
extra_context['profile'] = get_user_profile(user=user)
return ExtraContextTemplateView.as_view(template_name=template_name,
extra_context=extra_context)(request)
def disabled_account(request, username, template_name, extra_context=None):
"""
Checks if the account is disabled, if so, returns the disabled account template.
:param username:
String defining the username of the user that made the action.
:param template_name:
String defining the name of the template to use. Defaults to
``userena/signup_complete.html``.
**Keyword arguments**
``extra_context``
A dictionary containing extra variables that should be passed to the
rendered template. The ``account`` key is always the ``User``
that completed the action.
**Extra context**
``viewed_user``
The currently :class:`User` that is viewed.
``profile``
Profile of the viewed user.
"""
user = get_object_or_404(get_user_model(), username__iexact=username)
if user.is_active:
raise Http404
if not extra_context: extra_context = dict()
extra_context['viewed_user'] = user
extra_context['profile'] = get_user_profile(user=user)
return ExtraContextTemplateView.as_view(template_name=template_name,
extra_context=extra_context)(request)
@secure_required
def signin(request, auth_form=AuthenticationForm,
template_name='userena/signin_form.html',
redirect_field_name=REDIRECT_FIELD_NAME,
redirect_signin_function=signin_redirect, extra_context=None):
"""
Signin using email or username with password.
Signs a user in by combining email/username with password. If the
combination is correct and the user :func:`is_active` the
:func:`redirect_signin_function` is called with the arguments
``REDIRECT_FIELD_NAME`` and an instance of the :class:`User` who is is
trying the login. The returned value of the function will be the URL that
is redirected to.
A user can also select to be remembered for ``USERENA_REMEMBER_DAYS``.
:param auth_form:
Form to use for signing the user in. Defaults to the
:class:`AuthenticationForm` supplied by userena.
:param template_name:
String defining the name of the template to use. Defaults to
``userena/signin_form.html``.
:param redirect_field_name:
Form field name which contains the value for a redirect to the
succeeding page. Defaults to ``next`` and is set in
``REDIRECT_FIELD_NAME`` setting.
:param redirect_signin_function:
Function which handles the redirect. This functions gets the value of
``REDIRECT_FIELD_NAME`` and the :class:`User` who has logged in. It
must return a string which specifies the URI to redirect to.
:param extra_context:
A dictionary containing extra variables that should be passed to the
rendered template. The ``form`` key is always the ``auth_form``.
**Context**
``form``
Form used for authentication supplied by ``auth_form``.
"""
form = auth_form()
if request.method == 'POST':
form = auth_form(request.POST, request.FILES)
if form.is_valid():
identification, password, remember_me = (form.cleaned_data['identification'],
form.cleaned_data['password'],
form.cleaned_data['remember_me'])
user = authenticate(identification=identification,
password=password)
if user.is_active:
login(request, user)
if remember_me:
request.session.set_expiry(userena_settings.USERENA_REMEMBER_ME_DAYS[1] * 86400)
else: request.session.set_expiry(0)
if userena_settings.USERENA_USE_MESSAGES:
messages.success(request, _('You have been signed in.'),
fail_silently=True)
#send a signal that a user has signed in
userena_signals.account_signin.send(sender=None, user=user)
# Whereto now?
redirect_to = redirect_signin_function(
request.GET.get(redirect_field_name,
request.POST.get(redirect_field_name)), user)
return redirect(redirect_to)
else:
return redirect(reverse('userena_disabled',
kwargs={'username': user.username}))
if not extra_context: extra_context = dict()
extra_context.update({
'form': form,
'next': request.GET.get(redirect_field_name,
request.POST.get(redirect_field_name)),
})
return ExtraContextTemplateView.as_view(template_name=template_name,
extra_context=extra_context)(request)
@secure_required
def signout(request, next_page=userena_settings.USERENA_REDIRECT_ON_SIGNOUT,
template_name='userena/signout.html', *args, **kwargs):
"""
Signs out the user and adds a success message ``You have been signed
out.`` If next_page is defined you will be redirected to the URI. If
not the template in template_name is used.
:param next_page:
A string which specifies the URI to redirect to.
:param template_name:
String defining the name of the template to use. Defaults to
``userena/signout.html``.
"""
if request.user.is_authenticated() and userena_settings.USERENA_USE_MESSAGES: # pragma: no cover
messages.success(request, _('You have been signed out.'), fail_silently=True)
userena_signals.account_signout.send(sender=None, user=request.user)
return Signout(request, next_page=next_page, template_name=template_name, *args, **kwargs)
@secure_required
@permission_required_or_403('change_user', (get_user_model(), 'username', 'username'))
def email_change(request, username, email_form=ChangeEmailForm,
template_name='userena/email_form.html', success_url=None,
extra_context=None):
"""
Change email address
:param username:
String of the username which specifies the current account.
:param email_form:
Form that will be used to change the email address. Defaults to
:class:`ChangeEmailForm` supplied by userena.
:param template_name:
String containing the template to be used to display the email form.
Defaults to ``userena/email_form.html``.
:param success_url:
Named URL where the user will get redirected to when successfully
changing their email address. When not supplied will redirect to
``userena_email_complete`` URL.
:param extra_context:
Dictionary containing extra variables that can be used to render the
template. The ``form`` key is always the form supplied by the keyword
argument ``form`` and the ``user`` key by the user whose email address
is being changed.
**Context**
``form``
Form that is used to change the email address supplied by ``form``.
``account``
Instance of the ``Account`` whose email address is about to be changed.
**Todo**
Need to have per-object permissions, which enables users with the correct
permissions to alter the email address of others.
"""
user = get_object_or_404(get_user_model(), username__iexact=username)
prev_email = user.email
form = email_form(user)
if request.method == 'POST':
form = email_form(user, request.POST, request.FILES)
if form.is_valid():
form.save()
if success_url:
# Send a signal that the email has changed
userena_signals.email_change.send(sender=None,
user=user,
prev_email=prev_email,
new_email=user.email)
redirect_to = success_url
else: redirect_to = reverse('userena_email_change_complete',
kwargs={'username': user.username})
return redirect(redirect_to)
if not extra_context: extra_context = dict()
extra_context['form'] = form
extra_context['profile'] = get_user_profile(user=user)
return ExtraContextTemplateView.as_view(template_name=template_name,
extra_context=extra_context)(request)
@secure_required
@permission_required_or_403('change_user', (get_user_model(), 'username', 'username'))
def password_change(request, username, template_name='userena/password_form.html',
pass_form=PasswordChangeForm, success_url=None, extra_context=None):
""" Change password of user.
This view is almost a mirror of the view supplied in
:func:`contrib.auth.views.password_change`, with the minor change that in
this view we also use the username to change the password. This was needed
to keep our URLs logical (and REST) across the entire application. And
that in a later stadium administrators can also change the users password
through the web application itself.
:param username:
String supplying the username of the user who's password is about to be
changed.
:param template_name:
String of the name of the template that is used to display the password
change form. Defaults to ``userena/password_form.html``.
:param pass_form:
Form used to change password. Default is the form supplied by Django
itself named ``PasswordChangeForm``.
:param success_url:
Named URL that is passed onto a :func:`reverse` function with
``username`` of the active user. Defaults to the
``userena_password_complete`` URL.
:param extra_context:
Dictionary of extra variables that are passed on to the template. The
``form`` key is always used by the form supplied by ``pass_form``.
**Context**
``form``
Form used to change the password.
"""
user = get_object_or_404(get_user_model(),
username__iexact=username)
form = pass_form(user=user)
if request.method == "POST":
form = pass_form(user=user, data=request.POST)
if form.is_valid():
form.save()
# Send a signal that the password has changed
userena_signals.password_complete.send(sender=None,
user=user)
if success_url: redirect_to = success_url
else: redirect_to = reverse('userena_password_change_complete',
kwargs={'username': user.username})
return redirect(redirect_to)
if not extra_context: extra_context = dict()
extra_context['form'] = form
extra_context['profile'] = get_user_profile(user=user)
return ExtraContextTemplateView.as_view(template_name=template_name,
extra_context=extra_context)(request)
@secure_required
@permission_required_or_403('change_profile', (get_profile_model(), 'user__username', 'username'))
def profile_edit(request, username, edit_profile_form=EditProfileForm,
template_name='userena/profile_form.html', success_url=None,
extra_context=None, **kwargs):
"""
Edit profile.
Edits a profile selected by the supplied username. First checks
permissions if the user is allowed to edit this profile, if denied will
show a 404. When the profile is successfully edited will redirect to
``success_url``.
:param username:
Username of the user which profile should be edited.
:param edit_profile_form:
Form that is used to edit the profile. The :func:`EditProfileForm.save`
method of this form will be called when the form
:func:`EditProfileForm.is_valid`. Defaults to :class:`EditProfileForm`
from userena.
:param template_name:
String of the template that is used to render this view. Defaults to
``userena/edit_profile_form.html``.
:param success_url:
Named URL which will be passed on to a django ``reverse`` function after
the form is successfully saved. Defaults to the ``userena_detail`` url.
:param extra_context:
Dictionary containing variables that are passed on to the
``template_name`` template. ``form`` key will always be the form used
to edit the profile, and the ``profile`` key is always the edited
profile.
**Context**
``form``
Form that is used to alter the profile.
``profile``
Instance of the ``Profile`` that is edited.
"""
user = get_object_or_404(get_user_model(), username__iexact=username)
profile = get_user_profile(user=user)
user_initial = {'first_name': user.first_name,
'last_name': user.last_name}
form = edit_profile_form(instance=profile, initial=user_initial)
if request.method == 'POST':
form = edit_profile_form(request.POST, request.FILES, instance=profile,
initial=user_initial)
if form.is_valid():
profile = form.save()
if userena_settings.USERENA_USE_MESSAGES:
messages.success(request, _('Your profile has been updated.'),
fail_silently=True)
if success_url:
# Send a signal that the profile has changed
userena_signals.profile_change.send(sender=None,
user=user)
redirect_to = success_url
else: redirect_to = reverse('userena_profile_detail', kwargs={'username': username})
return redirect(redirect_to)
if not extra_context: extra_context = dict()
extra_context['form'] = form
extra_context['profile'] = profile
return ExtraContextTemplateView.as_view(template_name=template_name,
extra_context=extra_context)(request)
def profile_detail(request, username,
template_name=userena_settings.USERENA_PROFILE_DETAIL_TEMPLATE,
extra_context=None, **kwargs):
"""
Detailed view of an user.
:param username:
String of the username of which the profile should be viewed.
:param template_name:
String representing the template name that should be used to display
the profile.
:param extra_context:
Dictionary of variables which should be supplied to the template. The
``profile`` key is always the current profile.
**Context**
``profile``
Instance of the currently viewed ``Profile``.
"""
user = get_object_or_404(get_user_model(), username__iexact=username)
profile = get_user_profile(user=user)
if not profile.can_view_profile(request.user):
raise PermissionDenied
if not extra_context: extra_context = dict()
extra_context['profile'] = profile
extra_context['hide_email'] = userena_settings.USERENA_HIDE_EMAIL
return ExtraContextTemplateView.as_view(template_name=template_name,
extra_context=extra_context)(request)
def profile_list(request, page=1, template_name='userena/profile_list.html',
paginate_by=50, extra_context=None, **kwargs): # pragma: no cover
"""
Returns a list of all profiles that are public.
It's possible to disable this by changing ``USERENA_DISABLE_PROFILE_LIST``
to ``True`` in your settings.
:param page:
Integer of the active page used for pagination. Defaults to the first
page.
:param template_name:
String defining the name of the template that is used to render the
list of all users. Defaults to ``userena/list.html``.
:param paginate_by:
Integer defining the amount of displayed profiles per page. Defaults to
50 profiles per page.
:param extra_context:
Dictionary of variables that are passed on to the ``template_name``
template.
**Context**
``profile_list``
A list of profiles.
``is_paginated``
A boolean representing whether the results are paginated.
If the result is paginated. It will also contain the following variables.
``paginator``
An instance of ``django.core.paginator.Paginator``.
``page_obj``
An instance of ``django.core.paginator.Page``.
"""
warnings.warn("views.profile_list is deprecated. Use ProfileListView instead", DeprecationWarning, stacklevel=2)
try:
page = int(request.GET.get('page', None))
except (TypeError, ValueError):
page = page
if userena_settings.USERENA_DISABLE_PROFILE_LIST \
and not request.user.is_staff:
raise Http404
profile_model = get_profile_model()
queryset = profile_model.objects.get_visible_profiles(request.user)
if not extra_context: extra_context = dict()
return ProfileListView.as_view(queryset=queryset,
paginate_by=paginate_by,
page=page,
template_name=template_name,
extra_context=extra_context,
**kwargs)(request)
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for GBDT train function."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from google.protobuf import text_format
from tensorflow.contrib import layers
from tensorflow.contrib.boosted_trees.proto import learner_pb2
from tensorflow.contrib.boosted_trees.proto import tree_config_pb2
from tensorflow.contrib.boosted_trees.python.ops import model_ops
from tensorflow.contrib.boosted_trees.python.training.functions import gbdt_batch
from tensorflow.contrib.boosted_trees.python.utils import losses
from tensorflow.contrib.layers.python.layers import feature_column as feature_column_lib
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resources
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
def _squared_loss(label, unused_weights, predictions):
"""Unweighted loss implementation."""
loss = math_ops.reduce_sum(
math_ops.square(predictions - label), 1, keep_dims=True)
return loss
class GbdtTest(test_util.TensorFlowTestCase):
def setUp(self):
super(GbdtTest, self).setUp()
def testExtractFeatures(self):
"""Tests feature extraction."""
with self.test_session():
features = {}
features["dense_float"] = array_ops.zeros([2, 1], dtypes.float32)
features["sparse_float"] = sparse_tensor.SparseTensor(
array_ops.zeros([2, 2], dtypes.int64),
array_ops.zeros([2], dtypes.float32),
array_ops.zeros([2], dtypes.int64))
features["sparse_int"] = sparse_tensor.SparseTensor(
array_ops.zeros([2, 2], dtypes.int64),
array_ops.zeros([2], dtypes.int64),
array_ops.zeros([2], dtypes.int64))
(fc_names, dense_floats, sparse_float_indices, sparse_float_values,
sparse_float_shapes, sparse_int_indices, sparse_int_values,
sparse_int_shapes) = (gbdt_batch.extract_features(features, None))
self.assertEqual(len(fc_names), 3)
self.assertAllEqual(fc_names,
["dense_float", "sparse_float", "sparse_int"])
self.assertEqual(len(dense_floats), 1)
self.assertEqual(len(sparse_float_indices), 1)
self.assertEqual(len(sparse_float_values), 1)
self.assertEqual(len(sparse_float_shapes), 1)
self.assertEqual(len(sparse_int_indices), 1)
self.assertEqual(len(sparse_int_values), 1)
self.assertEqual(len(sparse_int_shapes), 1)
self.assertAllEqual(dense_floats[0].eval(),
features["dense_float"].eval())
self.assertAllEqual(sparse_float_indices[0].eval(),
features["sparse_float"].indices.eval())
self.assertAllEqual(sparse_float_values[0].eval(),
features["sparse_float"].values.eval())
self.assertAllEqual(sparse_float_shapes[0].eval(),
features["sparse_float"].dense_shape.eval())
self.assertAllEqual(sparse_int_indices[0].eval(),
features["sparse_int"].indices.eval())
self.assertAllEqual(sparse_int_values[0].eval(),
features["sparse_int"].values.eval())
self.assertAllEqual(sparse_int_shapes[0].eval(),
features["sparse_int"].dense_shape.eval())
def testExtractFeaturesWithTransformation(self):
"""Tests feature extraction."""
with self.test_session():
features = {}
features["dense_float"] = array_ops.zeros([2, 1], dtypes.float32)
features["sparse_float"] = sparse_tensor.SparseTensor(
array_ops.zeros([2, 2], dtypes.int64),
array_ops.zeros([2], dtypes.float32),
array_ops.zeros([2], dtypes.int64))
features["sparse_categorical"] = sparse_tensor.SparseTensor(
array_ops.zeros([2, 2], dtypes.int64),
array_ops.zeros(
[2], dtypes.string), array_ops.zeros([2], dtypes.int64))
feature_columns = set()
feature_columns.add(layers.real_valued_column("dense_float"))
feature_columns.add(
layers.feature_column._real_valued_var_len_column(
"sparse_float", is_sparse=True))
feature_columns.add(
feature_column_lib.sparse_column_with_hash_bucket(
"sparse_categorical", hash_bucket_size=1000000))
(fc_names, dense_floats, sparse_float_indices, sparse_float_values,
sparse_float_shapes, sparse_int_indices, sparse_int_values,
sparse_int_shapes) = (gbdt_batch.extract_features(
features, feature_columns))
self.assertEqual(len(fc_names), 3)
self.assertAllEqual(fc_names,
["dense_float", "sparse_float", "sparse_categorical"])
self.assertEqual(len(dense_floats), 1)
self.assertEqual(len(sparse_float_indices), 1)
self.assertEqual(len(sparse_float_values), 1)
self.assertEqual(len(sparse_float_shapes), 1)
self.assertEqual(len(sparse_int_indices), 1)
self.assertEqual(len(sparse_int_values), 1)
self.assertEqual(len(sparse_int_shapes), 1)
self.assertAllEqual(dense_floats[0].eval(),
features["dense_float"].eval())
self.assertAllEqual(sparse_float_indices[0].eval(),
features["sparse_float"].indices.eval())
self.assertAllEqual(sparse_float_values[0].eval(),
features["sparse_float"].values.eval())
self.assertAllEqual(sparse_float_shapes[0].eval(),
features["sparse_float"].dense_shape.eval())
self.assertAllEqual(sparse_int_indices[0].eval(),
features["sparse_categorical"].indices.eval())
self.assertAllEqual(sparse_int_values[0].eval(), [397263, 397263])
self.assertAllEqual(sparse_int_shapes[0].eval(),
features["sparse_categorical"].dense_shape.eval())
def testTrainFnChiefNoBiasCentering(self):
"""Tests the train function running on chief without bias centering."""
with self.test_session() as sess:
ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0, tree_ensemble_config="", name="tree_ensemble")
learner_config = learner_pb2.LearnerConfig()
learner_config.learning_rate_tuner.fixed.learning_rate = 0.1
learner_config.num_classes = 2
learner_config.regularization.l1 = 0
learner_config.regularization.l2 = 0
learner_config.constraints.max_tree_depth = 1
learner_config.constraints.min_node_weight = 0
features = {}
features["dense_float"] = array_ops.ones([4, 1], dtypes.float32)
gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
is_chief=True,
num_ps_replicas=0,
center_bias=False,
ensemble_handle=ensemble_handle,
examples_per_layer=1,
learner_config=learner_config,
logits_dimension=1, features=features)
predictions = array_ops.constant(
[[0.0], [1.0], [0.0], [2.0]], dtype=dtypes.float32)
partition_ids = array_ops.zeros([4], dtypes.int32)
ensemble_stamp = variables.Variable(
initial_value=0,
name="ensemble_stamp",
trainable=False,
dtype=dtypes.int64)
predictions_dict = {
"predictions": predictions,
"predictions_no_dropout": predictions,
"partition_ids": partition_ids,
"ensemble_stamp": ensemble_stamp,
"num_trees": 12,
}
labels = array_ops.ones([4, 1], dtypes.float32)
weights = array_ops.ones([4, 1], dtypes.float32)
# Create train op.
train_op = gbdt_model.train(
loss=math_ops.reduce_mean(
_squared_loss(labels, weights, predictions)),
predictions_dict=predictions_dict,
labels=labels)
variables.global_variables_initializer().run()
resources.initialize_resources(resources.shared_resources()).run()
# On first run, expect no splits to be chosen because the quantile
# buckets will not be ready.
train_op.run()
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
self.assertEquals(len(output.trees), 0)
self.assertEquals(len(output.tree_weights), 0)
self.assertEquals(stamp_token.eval(), 1)
# Update the stamp to be able to run a second time.
sess.run([ensemble_stamp.assign_add(1)])
# On second run, expect a trivial split to be chosen to basically
# predict the average.
train_op.run()
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
self.assertEquals(len(output.trees), 1)
self.assertAllClose(output.tree_weights, [0.1])
self.assertEquals(stamp_token.eval(), 2)
expected_tree = """
nodes {
dense_float_binary_split {
threshold: 1.0
left_id: 1
right_id: 2
}
node_metadata {
gain: 0
}
}
nodes {
leaf {
vector {
value: 0.25
}
}
}
nodes {
leaf {
vector {
value: 0.0
}
}
}"""
self.assertProtoEquals(expected_tree, output.trees[0])
def testTrainFnChiefScalingNumberOfExamples(self):
"""Tests the train function running on chief without bias centering."""
with self.test_session() as sess:
ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0, tree_ensemble_config="", name="tree_ensemble")
learner_config = learner_pb2.LearnerConfig()
learner_config.learning_rate_tuner.fixed.learning_rate = 0.1
learner_config.num_classes = 2
learner_config.regularization.l1 = 0
learner_config.regularization.l2 = 0
learner_config.constraints.max_tree_depth = 1
learner_config.constraints.min_node_weight = 0
num_examples_fn = (
lambda layer: math_ops.pow(math_ops.cast(2, dtypes.int64), layer) * 1)
features = {}
features["dense_float"] = array_ops.ones([4, 1], dtypes.float32)
gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
is_chief=True,
num_ps_replicas=0,
center_bias=False,
ensemble_handle=ensemble_handle,
examples_per_layer=num_examples_fn,
learner_config=learner_config,
logits_dimension=1, features=features)
predictions = array_ops.constant(
[[0.0], [1.0], [0.0], [2.0]], dtype=dtypes.float32)
partition_ids = array_ops.zeros([4], dtypes.int32)
ensemble_stamp = variables.Variable(
initial_value=0,
name="ensemble_stamp",
trainable=False,
dtype=dtypes.int64)
predictions_dict = {
"predictions": predictions,
"predictions_no_dropout": predictions,
"partition_ids": partition_ids,
"ensemble_stamp": ensemble_stamp,
"num_trees": 12,
}
labels = array_ops.ones([4, 1], dtypes.float32)
weights = array_ops.ones([4, 1], dtypes.float32)
# Create train op.
train_op = gbdt_model.train(
loss=math_ops.reduce_mean(
_squared_loss(labels, weights, predictions)),
predictions_dict=predictions_dict,
labels=labels)
variables.global_variables_initializer().run()
resources.initialize_resources(resources.shared_resources()).run()
# On first run, expect no splits to be chosen because the quantile
# buckets will not be ready.
train_op.run()
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
self.assertEquals(len(output.trees), 0)
self.assertEquals(len(output.tree_weights), 0)
self.assertEquals(stamp_token.eval(), 1)
# Update the stamp to be able to run a second time.
sess.run([ensemble_stamp.assign_add(1)])
# On second run, expect a trivial split to be chosen to basically
# predict the average.
train_op.run()
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
self.assertEquals(len(output.trees), 1)
self.assertAllClose(output.tree_weights, [0.1])
self.assertEquals(stamp_token.eval(), 2)
expected_tree = """
nodes {
dense_float_binary_split {
threshold: 1.0
left_id: 1
right_id: 2
}
node_metadata {
gain: 0
}
}
nodes {
leaf {
vector {
value: 0.25
}
}
}
nodes {
leaf {
vector {
value: 0.0
}
}
}"""
self.assertProtoEquals(expected_tree, output.trees[0])
def testTrainFnChiefWithBiasCentering(self):
"""Tests the train function running on chief with bias centering."""
with self.test_session():
ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0, tree_ensemble_config="", name="tree_ensemble")
learner_config = learner_pb2.LearnerConfig()
learner_config.learning_rate_tuner.fixed.learning_rate = 0.1
learner_config.num_classes = 2
learner_config.regularization.l1 = 0
learner_config.regularization.l2 = 0
learner_config.constraints.max_tree_depth = 1
learner_config.constraints.min_node_weight = 0
features = {}
features["dense_float"] = array_ops.ones([4, 1], dtypes.float32)
gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
is_chief=True,
num_ps_replicas=0,
center_bias=True,
ensemble_handle=ensemble_handle,
examples_per_layer=1,
learner_config=learner_config,
logits_dimension=1, features=features)
predictions = array_ops.constant(
[[0.0], [1.0], [0.0], [2.0]], dtype=dtypes.float32)
partition_ids = array_ops.zeros([4], dtypes.int32)
ensemble_stamp = variables.Variable(
initial_value=0,
name="ensemble_stamp",
trainable=False,
dtype=dtypes.int64)
predictions_dict = {
"predictions": predictions,
"predictions_no_dropout": predictions,
"partition_ids": partition_ids,
"ensemble_stamp": ensemble_stamp,
"num_trees": 12,
}
labels = array_ops.ones([4, 1], dtypes.float32)
weights = array_ops.ones([4, 1], dtypes.float32)
# Create train op.
train_op = gbdt_model.train(
loss=math_ops.reduce_mean(
_squared_loss(labels, weights, predictions)),
predictions_dict=predictions_dict,
labels=labels)
variables.global_variables_initializer().run()
resources.initialize_resources(resources.shared_resources()).run()
# On first run, expect bias to be centered.
train_op.run()
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
expected_tree = """
nodes {
leaf {
vector {
value: 0.25
}
}
}"""
self.assertEquals(len(output.trees), 1)
self.assertAllEqual(output.tree_weights, [1.0])
self.assertProtoEquals(expected_tree, output.trees[0])
self.assertEquals(stamp_token.eval(), 1)
def testTrainFnNonChiefNoBiasCentering(self):
"""Tests the train function running on worker without bias centering."""
with self.test_session():
ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0, tree_ensemble_config="", name="tree_ensemble")
learner_config = learner_pb2.LearnerConfig()
learner_config.learning_rate_tuner.fixed.learning_rate = 0.1
learner_config.num_classes = 2
learner_config.regularization.l1 = 0
learner_config.regularization.l2 = 0
learner_config.constraints.max_tree_depth = 1
learner_config.constraints.min_node_weight = 0
features = {}
features["dense_float"] = array_ops.ones([4, 1], dtypes.float32)
gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
is_chief=False,
num_ps_replicas=0,
center_bias=False,
ensemble_handle=ensemble_handle,
examples_per_layer=1,
learner_config=learner_config,
logits_dimension=1, features=features)
predictions = array_ops.constant(
[[0.0], [1.0], [0.0], [2.0]], dtype=dtypes.float32)
partition_ids = array_ops.zeros([4], dtypes.int32)
ensemble_stamp = variables.Variable(
initial_value=0,
name="ensemble_stamp",
trainable=False,
dtype=dtypes.int64)
predictions_dict = {
"predictions": predictions,
"predictions_no_dropout": predictions,
"partition_ids": partition_ids,
"ensemble_stamp": ensemble_stamp
}
labels = array_ops.ones([4, 1], dtypes.float32)
weights = array_ops.ones([4, 1], dtypes.float32)
# Create train op.
train_op = gbdt_model.train(
loss=math_ops.reduce_mean(
_squared_loss(labels, weights, predictions)),
predictions_dict=predictions_dict,
labels=labels)
variables.global_variables_initializer().run()
resources.initialize_resources(resources.shared_resources()).run()
# Regardless of how many times the train op is run, a non-chief worker
# can only accumulate stats so the tree ensemble never changes.
for _ in range(5):
train_op.run()
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
self.assertEquals(len(output.trees), 0)
self.assertEquals(len(output.tree_weights), 0)
self.assertEquals(stamp_token.eval(), 0)
def testTrainFnNonChiefWithCentering(self):
"""Tests the train function running on worker with bias centering."""
with self.test_session():
ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0, tree_ensemble_config="", name="tree_ensemble")
learner_config = learner_pb2.LearnerConfig()
learner_config.learning_rate_tuner.fixed.learning_rate = 0.1
learner_config.num_classes = 2
learner_config.regularization.l1 = 0
learner_config.regularization.l2 = 0
learner_config.constraints.max_tree_depth = 1
learner_config.constraints.min_node_weight = 0
features = {}
features["dense_float"] = array_ops.ones([4, 1], dtypes.float32)
gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
is_chief=False,
num_ps_replicas=0,
center_bias=True,
ensemble_handle=ensemble_handle,
examples_per_layer=1,
learner_config=learner_config,
logits_dimension=1, features=features)
predictions = array_ops.constant(
[[0.0], [1.0], [0.0], [2.0]], dtype=dtypes.float32)
partition_ids = array_ops.zeros([4], dtypes.int32)
ensemble_stamp = variables.Variable(
initial_value=0,
name="ensemble_stamp",
trainable=False,
dtype=dtypes.int64)
predictions_dict = {
"predictions": predictions,
"predictions_no_dropout": predictions,
"partition_ids": partition_ids,
"ensemble_stamp": ensemble_stamp
}
labels = array_ops.ones([4, 1], dtypes.float32)
weights = array_ops.ones([4, 1], dtypes.float32)
# Create train op.
train_op = gbdt_model.train(
loss=math_ops.reduce_mean(
_squared_loss(labels, weights, predictions)),
predictions_dict=predictions_dict,
labels=labels)
variables.global_variables_initializer().run()
resources.initialize_resources(resources.shared_resources()).run()
# Regardless of how many times the train op is run, a non-chief worker
# can only accumulate stats so the tree ensemble never changes.
for _ in range(5):
train_op.run()
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
self.assertEquals(len(output.trees), 0)
self.assertEquals(len(output.tree_weights), 0)
self.assertEquals(stamp_token.eval(), 0)
def testPredictFn(self):
"""Tests the predict function."""
with self.test_session() as sess:
# Create ensemble with one bias node.
ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
text_format.Merge("""
trees {
nodes {
leaf {
vector {
value: 0.25
}
}
}
}
tree_weights: 1.0
tree_metadata {
num_tree_weight_updates: 1
num_layers_grown: 1
is_finalized: true
}""", ensemble_config)
ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=3,
tree_ensemble_config=ensemble_config.SerializeToString(),
name="tree_ensemble")
resources.initialize_resources(resources.shared_resources()).run()
learner_config = learner_pb2.LearnerConfig()
learner_config.learning_rate_tuner.fixed.learning_rate = 0.1
learner_config.num_classes = 2
learner_config.regularization.l1 = 0
learner_config.regularization.l2 = 0
learner_config.constraints.max_tree_depth = 1
learner_config.constraints.min_node_weight = 0
features = {}
features["dense_float"] = array_ops.ones([4, 1], dtypes.float32)
gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
is_chief=False,
num_ps_replicas=0,
center_bias=True,
ensemble_handle=ensemble_handle,
examples_per_layer=1,
learner_config=learner_config,
logits_dimension=1, features=features)
# Create predict op.
mode = model_fn.ModeKeys.EVAL
predictions_dict = sess.run(gbdt_model.predict(mode))
self.assertEquals(predictions_dict["ensemble_stamp"], 3)
self.assertAllClose(predictions_dict["predictions"], [[0.25], [0.25],
[0.25], [0.25]])
self.assertAllClose(predictions_dict["partition_ids"], [0, 0, 0, 0])
def testTrainFnMulticlassFullHessian(self):
"""Tests the GBDT train for multiclass full hessian."""
with self.test_session() as sess:
ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0, tree_ensemble_config="", name="tree_ensemble")
learner_config = learner_pb2.LearnerConfig()
learner_config.learning_rate_tuner.fixed.learning_rate = 1
# Use full hessian multiclass strategy.
learner_config.multi_class_strategy = (
learner_pb2.LearnerConfig.FULL_HESSIAN)
learner_config.num_classes = 5
learner_config.regularization.l1 = 0
# To make matrix inversible.
learner_config.regularization.l2 = 1e-5
learner_config.constraints.max_tree_depth = 1
learner_config.constraints.min_node_weight = 0
features = {}
batch_size = 3
features["dense_float"] = array_ops.constant(
[0.3, 1.5, 1.1], dtype=dtypes.float32)
gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
is_chief=True,
num_ps_replicas=0,
center_bias=False,
ensemble_handle=ensemble_handle,
examples_per_layer=1,
learner_config=learner_config,
logits_dimension=5, features=features)
predictions = array_ops.constant(
[[0.0, -1.0, 0.5, 1.2, 3.1], [1.0, 0.0, 0.8, 0.3, 1.0],
[0.0, 0.0, 0.0, 0.0, 1.2]],
dtype=dtypes.float32)
labels = array_ops.constant([[2], [2], [3]], dtype=dtypes.float32)
weights = array_ops.ones([batch_size, 1], dtypes.float32)
partition_ids = array_ops.zeros([batch_size], dtypes.int32)
ensemble_stamp = variables.Variable(
initial_value=0,
name="ensemble_stamp",
trainable=False,
dtype=dtypes.int64)
predictions_dict = {
"predictions": predictions,
"predictions_no_dropout": predictions,
"partition_ids": partition_ids,
"ensemble_stamp": ensemble_stamp,
"num_trees": 0,
}
# Create train op.
train_op = gbdt_model.train(
loss=math_ops.reduce_mean(
losses.per_example_maxent_loss(
labels,
weights,
predictions,
num_classes=learner_config.num_classes)[0]),
predictions_dict=predictions_dict,
labels=labels)
variables.global_variables_initializer().run()
resources.initialize_resources(resources.shared_resources()).run()
# On first run, expect no splits to be chosen because the quantile
# buckets will not be ready.
train_op.run()
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
self.assertEquals(len(output.trees), 0)
self.assertEquals(len(output.tree_weights), 0)
self.assertEquals(stamp_token.eval(), 1)
# Update the stamp to be able to run a second time.
sess.run([ensemble_stamp.assign_add(1)])
# On second run, expect a trivial split to be chosen to basically
# predict the average.
train_op.run()
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output.ParseFromString(serialized.eval())
self.assertEqual(len(output.trees), 1)
# We got 3 nodes: one parent and 2 leafs.
self.assertEqual(len(output.trees[0].nodes), 3)
self.assertAllClose(output.tree_weights, [1])
self.assertEquals(stamp_token.eval(), 2)
# Leafs should have a dense vector of size 5.
expected_leaf_1 = [-3.4480, -3.4429, 13.8490, -3.45, -3.4508]
expected_leaf_2 = [-1.2547, -1.3145, 1.52, 2.3875, -1.3264]
self.assertArrayNear(expected_leaf_1,
output.trees[0].nodes[1].leaf.vector.value, 1e-3)
self.assertArrayNear(expected_leaf_2,
output.trees[0].nodes[2].leaf.vector.value, 1e-3)
def testTrainFnMulticlassDiagonalHessian(self):
"""Tests the GBDT train for multiclass diagonal hessian."""
with self.test_session() as sess:
ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0, tree_ensemble_config="", name="tree_ensemble")
learner_config = learner_pb2.LearnerConfig()
learner_config.learning_rate_tuner.fixed.learning_rate = 1
# Use full hessian multiclass strategy.
learner_config.multi_class_strategy = (
learner_pb2.LearnerConfig.DIAGONAL_HESSIAN)
learner_config.num_classes = 5
learner_config.regularization.l1 = 0
# To make matrix inversible.
learner_config.regularization.l2 = 1e-5
learner_config.constraints.max_tree_depth = 1
learner_config.constraints.min_node_weight = 0
batch_size = 3
features = {}
features["dense_float"] = array_ops.constant(
[0.3, 1.5, 1.1], dtype=dtypes.float32)
gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
is_chief=True,
num_ps_replicas=0,
center_bias=False,
ensemble_handle=ensemble_handle,
examples_per_layer=1,
learner_config=learner_config,
logits_dimension=5, features=features)
predictions = array_ops.constant(
[[0.0, -1.0, 0.5, 1.2, 3.1], [1.0, 0.0, 0.8, 0.3, 1.0],
[0.0, 0.0, 0.0, 0.0, 1.2]],
dtype=dtypes.float32)
labels = array_ops.constant([[2], [2], [3]], dtype=dtypes.float32)
weights = array_ops.ones([batch_size, 1], dtypes.float32)
partition_ids = array_ops.zeros([batch_size], dtypes.int32)
ensemble_stamp = variables.Variable(
initial_value=0,
name="ensemble_stamp",
trainable=False,
dtype=dtypes.int64)
predictions_dict = {
"predictions": predictions,
"predictions_no_dropout": predictions,
"partition_ids": partition_ids,
"ensemble_stamp": ensemble_stamp,
"num_trees": 0,
}
# Create train op.
train_op = gbdt_model.train(
loss=math_ops.reduce_mean(
losses.per_example_maxent_loss(
labels,
weights,
predictions,
num_classes=learner_config.num_classes)[0]),
predictions_dict=predictions_dict,
labels=labels)
variables.global_variables_initializer().run()
resources.initialize_resources(resources.shared_resources()).run()
# On first run, expect no splits to be chosen because the quantile
# buckets will not be ready.
train_op.run()
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
self.assertEqual(len(output.trees), 0)
self.assertEqual(len(output.tree_weights), 0)
self.assertEqual(stamp_token.eval(), 1)
# Update the stamp to be able to run a second time.
sess.run([ensemble_stamp.assign_add(1)])
# On second run, expect a trivial split to be chosen to basically
# predict the average.
train_op.run()
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output.ParseFromString(serialized.eval())
self.assertEqual(len(output.trees), 1)
# We got 3 nodes: one parent and 2 leafs.
self.assertEqual(len(output.trees[0].nodes), 3)
self.assertAllClose(output.tree_weights, [1])
self.assertEqual(stamp_token.eval(), 2)
# Leafs should have a dense vector of size 5.
expected_leaf_1 = [-1.0354, -1.0107, 17.2976, -1.1313, -4.5023]
expected_leaf_2 = [-1.2924, -1.1376, 2.2042, 3.1052, -1.6269]
self.assertArrayNear(expected_leaf_1,
output.trees[0].nodes[1].leaf.vector.value, 1e-3)
self.assertArrayNear(expected_leaf_2,
output.trees[0].nodes[2].leaf.vector.value, 1e-3)
def testTrainFnMulticlassTreePerClass(self):
"""Tests the GBDT train for multiclass tree per class strategy."""
with self.test_session() as sess:
ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0, tree_ensemble_config="", name="tree_ensemble")
learner_config = learner_pb2.LearnerConfig()
learner_config.learning_rate_tuner.fixed.learning_rate = 1
# Use full hessian multiclass strategy.
learner_config.multi_class_strategy = (
learner_pb2.LearnerConfig.TREE_PER_CLASS)
learner_config.num_classes = 5
learner_config.regularization.l1 = 0
# To make matrix inversible.
learner_config.regularization.l2 = 1e-5
learner_config.constraints.max_tree_depth = 1
learner_config.constraints.min_node_weight = 0
features = {
"dense_float": array_ops.constant(
[[1.0], [1.5], [2.0]], dtypes.float32),
}
gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
is_chief=True,
num_ps_replicas=0,
center_bias=False,
ensemble_handle=ensemble_handle,
examples_per_layer=1,
learner_config=learner_config,
logits_dimension=5, features=features)
batch_size = 3
predictions = array_ops.constant(
[[0.0, -1.0, 0.5, 1.2, 3.1], [1.0, 0.0, 0.8, 0.3, 1.0],
[0.0, 0.0, 0.0, 2.0, 1.2]],
dtype=dtypes.float32)
labels = array_ops.constant([[2], [2], [3]], dtype=dtypes.float32)
weights = array_ops.ones([batch_size, 1], dtypes.float32)
partition_ids = array_ops.zeros([batch_size], dtypes.int32)
ensemble_stamp = variables.Variable(
initial_value=0,
name="ensemble_stamp",
trainable=False,
dtype=dtypes.int64)
predictions_dict = {
"predictions": predictions,
"predictions_no_dropout": predictions,
"partition_ids": partition_ids,
"ensemble_stamp": ensemble_stamp,
# This should result in a tree built for a class 2.
"num_trees": 13,
}
# Create train op.
train_op = gbdt_model.train(
loss=math_ops.reduce_mean(
losses.per_example_maxent_loss(
labels,
weights,
predictions,
num_classes=learner_config.num_classes)[0]),
predictions_dict=predictions_dict,
labels=labels)
variables.global_variables_initializer().run()
resources.initialize_resources(resources.shared_resources()).run()
# On first run, expect no splits to be chosen because the quantile
# buckets will not be ready.
train_op.run()
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
self.assertEqual(len(output.trees), 0)
self.assertEqual(len(output.tree_weights), 0)
self.assertEqual(stamp_token.eval(), 1)
# Update the stamp to be able to run a second time.
sess.run([ensemble_stamp.assign_add(1)])
# On second run, expect a trivial split to be chosen to basically
# predict the average.
train_op.run()
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output.ParseFromString(serialized.eval())
self.assertEqual(len(output.trees), 1)
self.assertAllClose(output.tree_weights, [1])
self.assertEqual(stamp_token.eval(), 2)
# One node for a split, two children nodes.
self.assertEqual(3, len(output.trees[0].nodes))
# Leafs will have a sparse vector for class 3.
self.assertEqual(1,
len(output.trees[0].nodes[1].leaf.sparse_vector.index))
self.assertEqual(3, output.trees[0].nodes[1].leaf.sparse_vector.index[0])
self.assertAlmostEqual(
-1.13134455681, output.trees[0].nodes[1].leaf.sparse_vector.value[0])
self.assertEqual(1,
len(output.trees[0].nodes[2].leaf.sparse_vector.index))
self.assertEqual(3, output.trees[0].nodes[2].leaf.sparse_vector.index[0])
self.assertAllClose(
0.893284678459,
output.trees[0].nodes[2].leaf.sparse_vector.value[0],
atol=1e-4, rtol=1e-4)
if __name__ == "__main__":
googletest.main()
| |
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""The fsl module provides classes for interfacing with the `FSL
<http://www.fmrib.ox.ac.uk/fsl/index.html>`_ command line tools. This
was written to work with FSL version 4.1.4.
These are the base tools for working with FSL.
Preprocessing tools are found in fsl/preprocess.py
Model tools are found in fsl/model.py
DTI tools are found in fsl/dti.py
XXX Make this doc current!
Currently these tools are supported:
* BET v2.1: brain extraction
* FAST v4.1: segmentation and bias correction
* FLIRT v5.5: linear registration
* MCFLIRT: motion correction
* FNIRT v1.0: non-linear warp
Examples
--------
See the docstrings of the individual classes for examples.
"""
from glob import glob
import os
import warnings
from nipype.utils.filemanip import fname_presuffix
from nipype.interfaces.base import (CommandLine, traits, CommandLineInputSpec,
isdefined)
warn = warnings.warn
warnings.filterwarnings('always', category=UserWarning)
class Info(object):
"""Handle fsl output type and version information.
version refers to the version of fsl on the system
output type refers to the type of file fsl defaults to writing
eg, NIFTI, NIFTI_GZ
"""
ftypes = {'NIFTI': '.nii',
'NIFTI_PAIR': '.img',
'NIFTI_GZ': '.nii.gz',
'NIFTI_PAIR_GZ': '.img.gz'}
@staticmethod
def version():
"""Check for fsl version on system
Parameters
----------
None
Returns
-------
version : str
Version number as string or None if FSL not found
"""
# find which fsl being used....and get version from
# /path/to/fsl/etc/fslversion
try:
basedir = os.environ['FSLDIR']
except KeyError:
return None
clout = CommandLine(command='cat',
args='%s/etc/fslversion' % (basedir),
terminal_output='allatonce').run()
out = clout.runtime.stdout
return out.strip('\n')
@classmethod
def output_type_to_ext(cls, output_type):
"""Get the file extension for the given output type.
Parameters
----------
output_type : {'NIFTI', 'NIFTI_GZ', 'NIFTI_PAIR', 'NIFTI_PAIR_GZ'}
String specifying the output type.
Returns
-------
extension : str
The file extension for the output type.
"""
try:
return cls.ftypes[output_type]
except KeyError:
msg = 'Invalid FSLOUTPUTTYPE: ', output_type
raise KeyError(msg)
@classmethod
def output_type(cls):
"""Get the global FSL output file type FSLOUTPUTTYPE.
This returns the value of the environment variable
FSLOUTPUTTYPE. An exception is raised if it is not defined.
Returns
-------
fsl_ftype : string
Represents the current environment setting of FSLOUTPUTTYPE
"""
try:
return os.environ['FSLOUTPUTTYPE']
except KeyError:
warnings.warn(('FSL environment variables not set. setting output '
'type to NIFTI'))
return 'NIFTI'
@staticmethod
def standard_image(img_name=None):
'''Grab an image from the standard location.
Returns a list of standard images if called without arguments.
Could be made more fancy to allow for more relocatability'''
try:
fsldir = os.environ['FSLDIR']
except KeyError:
raise Exception('FSL environment variables not set')
stdpath = os.path.join(fsldir, 'data', 'standard')
if img_name is None:
return [filename.replace(stdpath + '/', '')
for filename in glob(os.path.join(stdpath, '*nii*'))]
return os.path.join(stdpath, img_name)
class FSLCommandInputSpec(CommandLineInputSpec):
"""
Base Input Specification for all FSL Commands
All command support specifying FSLOUTPUTTYPE dynamically
via output_type.
Example
-------
fsl.ExtractRoi(tmin=42, tsize=1, output_type='NIFTI')
"""
output_type = traits.Enum('NIFTI', Info.ftypes.keys(),
desc='FSL output type')
class FSLCommand(CommandLine):
"""Base support for FSL commands.
"""
input_spec = FSLCommandInputSpec
_output_type = None
def __init__(self, **inputs):
super(FSLCommand, self).__init__(**inputs)
self.inputs.on_trait_change(self._output_update, 'output_type')
if self._output_type is None:
self._output_type = Info.output_type()
if not isdefined(self.inputs.output_type):
self.inputs.output_type = self._output_type
else:
self._output_update()
def _output_update(self):
self._output_type = self.inputs.output_type
self.inputs.environ.update({'FSLOUTPUTTYPE': self.inputs.output_type})
@classmethod
def set_default_output_type(cls, output_type):
"""Set the default output type for FSL classes.
This method is used to set the default output type for all fSL
subclasses. However, setting this will not update the output
type for any existing instances. For these, assign the
<instance>.inputs.output_type.
"""
if output_type in Info.ftypes:
cls._output_type = output_type
else:
raise AttributeError('Invalid FSL output_type: %s' % output_type)
@property
def version(self):
return Info.version()
def _gen_fname(self, basename, cwd=None, suffix=None, change_ext=True,
ext=None):
"""Generate a filename based on the given parameters.
The filename will take the form: cwd/basename<suffix><ext>.
If change_ext is True, it will use the extentions specified in
<instance>intputs.output_type.
Parameters
----------
basename : str
Filename to base the new filename on.
cwd : str
Path to prefix to the new filename. (default is os.getcwd())
suffix : str
Suffix to add to the `basename`. (defaults is '' )
change_ext : bool
Flag to change the filename extension to the FSL output type.
(default True)
Returns
-------
fname : str
New filename based on given parameters.
"""
if basename == '':
msg = 'Unable to generate filename for command %s. ' % self.cmd
msg += 'basename is not set!'
raise ValueError(msg)
if cwd is None:
cwd = os.getcwd()
if ext is None:
ext = Info.output_type_to_ext(self.inputs.output_type)
if change_ext:
if suffix:
suffix = ''.join((suffix, ext))
else:
suffix = ext
if suffix is None:
suffix = ''
fname = fname_presuffix(basename, suffix=suffix,
use_ext=False, newpath=cwd)
return fname
@property
def version(self):
return Info.version()
def check_fsl():
ver = Info.version()
if ver:
return 0
else:
return 1
def no_fsl():
"""Checks if FSL is NOT installed
used with skipif to skip tests that will
fail if FSL is not installed"""
if Info.version() is None:
return True
else:
return False
def no_fsl_course_data():
"""check if fsl_course data is present"""
return not (os.path.isdir(os.path.abspath('fsl_course_data')))
| |
# Minimal support for git commands on an hg repository
#
# Copyright 2005, 2006 Chris Mason <mason@suse.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
'''browse the repository in a graphical way
The hgk extension allows browsing the history of a repository in a
graphical way. It requires Tcl/Tk version 8.4 or later. (Tcl/Tk is not
distributed with Mercurial.)
hgk consists of two parts: a Tcl script that does the displaying and
querying of information, and an extension to Mercurial named hgk.py,
which provides hooks for hgk to get information. hgk can be found in
the contrib directory, and the extension is shipped in the hgext
repository, and needs to be enabled.
The hg view command will launch the hgk Tcl script. For this command
to work, hgk must be in your search path. Alternately, you can specify
the path to hgk in your .hgrc file::
[hgk]
path=/location/of/hgk
hgk can make use of the extdiff extension to visualize revisions.
Assuming you had already configured extdiff vdiff command, just add::
[hgk]
vdiff=vdiff
Revisions context menu will now display additional entries to fire
vdiff on hovered and selected revisions.
'''
import os
from mercurial import commands, util, patch, revlog, cmdutil
from mercurial.node import nullid, nullrev, short
from mercurial.i18n import _
def difftree(ui, repo, node1=None, node2=None, *files, **opts):
"""diff trees from two commits"""
def __difftree(repo, node1, node2, files=[]):
assert node2 is not None
mmap = repo[node1].manifest()
mmap2 = repo[node2].manifest()
m = cmdutil.match(repo, files)
modified, added, removed = repo.status(node1, node2, m)[:3]
empty = short(nullid)
for f in modified:
# TODO get file permissions
ui.write(":100664 100664 %s %s M\t%s\t%s\n" %
(short(mmap[f]), short(mmap2[f]), f, f))
for f in added:
ui.write(":000000 100664 %s %s N\t%s\t%s\n" %
(empty, short(mmap2[f]), f, f))
for f in removed:
ui.write(":100664 000000 %s %s D\t%s\t%s\n" %
(short(mmap[f]), empty, f, f))
##
while True:
if opts['stdin']:
try:
line = raw_input().split(' ')
node1 = line[0]
if len(line) > 1:
node2 = line[1]
else:
node2 = None
except EOFError:
break
node1 = repo.lookup(node1)
if node2:
node2 = repo.lookup(node2)
else:
node2 = node1
node1 = repo.changelog.parents(node1)[0]
if opts['patch']:
if opts['pretty']:
catcommit(ui, repo, node2, "")
m = cmdutil.match(repo, files)
chunks = patch.diff(repo, node1, node2, match=m,
opts=patch.diffopts(ui, {'git': True}))
for chunk in chunks:
ui.write(chunk)
else:
__difftree(repo, node1, node2, files=files)
if not opts['stdin']:
break
def catcommit(ui, repo, n, prefix, ctx=None):
nlprefix = '\n' + prefix
if ctx is None:
ctx = repo[n]
ui.write("tree %s\n" % short(ctx.changeset()[0])) # use ctx.node() instead ??
for p in ctx.parents():
ui.write("parent %s\n" % p)
date = ctx.date()
description = ctx.description().replace("\0", "")
lines = description.splitlines()
if lines and lines[-1].startswith('committer:'):
committer = lines[-1].split(': ')[1].rstrip()
else:
committer = ctx.user()
ui.write("author %s %s %s\n" % (ctx.user(), int(date[0]), date[1]))
ui.write("committer %s %s %s\n" % (committer, int(date[0]), date[1]))
ui.write("revision %d\n" % ctx.rev())
ui.write("branch %s\n\n" % ctx.branch())
if prefix != "":
ui.write("%s%s\n" % (prefix, description.replace('\n', nlprefix).strip()))
else:
ui.write(description + "\n")
if prefix:
ui.write('\0')
def base(ui, repo, node1, node2):
"""output common ancestor information"""
node1 = repo.lookup(node1)
node2 = repo.lookup(node2)
n = repo.changelog.ancestor(node1, node2)
ui.write(short(n) + "\n")
def catfile(ui, repo, type=None, r=None, **opts):
"""cat a specific revision"""
# in stdin mode, every line except the commit is prefixed with two
# spaces. This way the our caller can find the commit without magic
# strings
#
prefix = ""
if opts['stdin']:
try:
(type, r) = raw_input().split(' ')
prefix = " "
except EOFError:
return
else:
if not type or not r:
ui.warn(_("cat-file: type or revision not supplied\n"))
commands.help_(ui, 'cat-file')
while r:
if type != "commit":
ui.warn(_("aborting hg cat-file only understands commits\n"))
return 1
n = repo.lookup(r)
catcommit(ui, repo, n, prefix)
if opts['stdin']:
try:
(type, r) = raw_input().split(' ')
except EOFError:
break
else:
break
# git rev-tree is a confusing thing. You can supply a number of
# commit sha1s on the command line, and it walks the commit history
# telling you which commits are reachable from the supplied ones via
# a bitmask based on arg position.
# you can specify a commit to stop at by starting the sha1 with ^
def revtree(ui, args, repo, full="tree", maxnr=0, parents=False):
def chlogwalk():
count = len(repo)
i = count
l = [0] * 100
chunk = 100
while True:
if chunk > i:
chunk = i
i = 0
else:
i -= chunk
for x in xrange(chunk):
if i + x >= count:
l[chunk - x:] = [0] * (chunk - x)
break
if full != None:
l[x] = repo[i + x]
l[x].changeset() # force reading
else:
l[x] = 1
for x in xrange(chunk - 1, -1, -1):
if l[x] != 0:
yield (i + x, full != None and l[x] or None)
if i == 0:
break
# calculate and return the reachability bitmask for sha
def is_reachable(ar, reachable, sha):
if len(ar) == 0:
return 1
mask = 0
for i in xrange(len(ar)):
if sha in reachable[i]:
mask |= 1 << i
return mask
reachable = []
stop_sha1 = []
want_sha1 = []
count = 0
# figure out which commits they are asking for and which ones they
# want us to stop on
for i, arg in enumerate(args):
if arg.startswith('^'):
s = repo.lookup(arg[1:])
stop_sha1.append(s)
want_sha1.append(s)
elif arg != 'HEAD':
want_sha1.append(repo.lookup(arg))
# calculate the graph for the supplied commits
for i, n in enumerate(want_sha1):
reachable.append(set())
visit = [n]
reachable[i].add(n)
while visit:
n = visit.pop(0)
if n in stop_sha1:
continue
for p in repo.changelog.parents(n):
if p not in reachable[i]:
reachable[i].add(p)
visit.append(p)
if p in stop_sha1:
continue
# walk the repository looking for commits that are in our
# reachability graph
for i, ctx in chlogwalk():
n = repo.changelog.node(i)
mask = is_reachable(want_sha1, reachable, n)
if mask:
parentstr = ""
if parents:
pp = repo.changelog.parents(n)
if pp[0] != nullid:
parentstr += " " + short(pp[0])
if pp[1] != nullid:
parentstr += " " + short(pp[1])
if not full:
ui.write("%s%s\n" % (short(n), parentstr))
elif full == "commit":
ui.write("%s%s\n" % (short(n), parentstr))
catcommit(ui, repo, n, ' ', ctx)
else:
(p1, p2) = repo.changelog.parents(n)
(h, h1, h2) = map(short, (n, p1, p2))
(i1, i2) = map(repo.changelog.rev, (p1, p2))
date = ctx.date()[0]
ui.write("%s %s:%s" % (date, h, mask))
mask = is_reachable(want_sha1, reachable, p1)
if i1 != nullrev and mask > 0:
ui.write("%s:%s " % (h1, mask)),
mask = is_reachable(want_sha1, reachable, p2)
if i2 != nullrev and mask > 0:
ui.write("%s:%s " % (h2, mask))
ui.write("\n")
if maxnr and count >= maxnr:
break
count += 1
def revparse(ui, repo, *revs, **opts):
"""parse given revisions"""
def revstr(rev):
if rev == 'HEAD':
rev = 'tip'
return revlog.hex(repo.lookup(rev))
for r in revs:
revrange = r.split(':', 1)
ui.write('%s\n' % revstr(revrange[0]))
if len(revrange) == 2:
ui.write('^%s\n' % revstr(revrange[1]))
# git rev-list tries to order things by date, and has the ability to stop
# at a given commit without walking the whole repo. TODO add the stop
# parameter
def revlist(ui, repo, *revs, **opts):
"""print revisions"""
if opts['header']:
full = "commit"
else:
full = None
copy = [x for x in revs]
revtree(ui, copy, repo, full, opts['max_count'], opts['parents'])
def config(ui, repo, **opts):
"""print extension options"""
def writeopt(name, value):
ui.write('k=%s\nv=%s\n' % (name, value))
writeopt('vdiff', ui.config('hgk', 'vdiff', ''))
def view(ui, repo, *etc, **opts):
"start interactive history viewer"
os.chdir(repo.root)
optstr = ' '.join(['--%s %s' % (k, v) for k, v in opts.iteritems() if v])
cmd = ui.config("hgk", "path", "hgk") + " %s %s" % (optstr, " ".join(etc))
ui.debug("running %s\n" % cmd)
util.system(cmd)
cmdtable = {
"^view":
(view,
[('l', 'limit', '', _('limit number of changes displayed'))],
_('hg view [-l LIMIT] [REVRANGE]')),
"debug-diff-tree":
(difftree,
[('p', 'patch', None, _('generate patch')),
('r', 'recursive', None, _('recursive')),
('P', 'pretty', None, _('pretty')),
('s', 'stdin', None, _('stdin')),
('C', 'copy', None, _('detect copies')),
('S', 'search', "", _('search'))],
_('hg git-diff-tree [OPTION]... NODE1 NODE2 [FILE]...')),
"debug-cat-file":
(catfile,
[('s', 'stdin', None, _('stdin'))],
_('hg debug-cat-file [OPTION]... TYPE FILE')),
"debug-config":
(config, [], _('hg debug-config')),
"debug-merge-base":
(base, [], _('hg debug-merge-base REV REV')),
"debug-rev-parse":
(revparse,
[('', 'default', '', _('ignored'))],
_('hg debug-rev-parse REV')),
"debug-rev-list":
(revlist,
[('H', 'header', None, _('header')),
('t', 'topo-order', None, _('topo-order')),
('p', 'parents', None, _('parents')),
('n', 'max-count', 0, _('max-count'))],
_('hg debug-rev-list [OPTION]... REV...')),
}
| |
"""
Script that analyzes MPG Ranch archive logs of commands that add clip
start indices.
"""
from collections import defaultdict
from pathlib import Path
import csv
import re
# 2016
# ARCHIVE_DIR_PATH = Path('/Volumes/2012_2015_2016/2016_NFC/2016_NFC_All')
# JOB_NUMS = (88, 100, 118, 122, 123, 125, 126)
# OUTPUT_FILE_PATH = \
# ARCHIVE_DIR_PATH / 'MPG Ranch 2016 Add Clip Start Index Clip Counts.csv'
# 2015
# ARCHIVE_DIR_PATH = Path('/Volumes/2012_2015_2016/2015_NFC/2015_NFC_All')
# JOB_NUMS = (42, 43, 44, 45)
# OUTPUT_FILE_PATH = \
# ARCHIVE_DIR_PATH / 'MPG Ranch 2015 Add Clip Start Index Clip Counts.csv'
# 2014
ARCHIVE_DIR_PATH = Path('/Volumes/2012_2015_2016/2014_NFC/2014_NFC_All')
JOB_NUMS = (24, 25, 26, 32)
OUTPUT_FILE_PATH = \
ARCHIVE_DIR_PATH / 'MPG Ranch 2014 Add Clip Start Index Clip Counts.csv'
LOG_DIR_PATH = ARCHIVE_DIR_PATH / 'Logs' / 'Jobs'
LOG_FILE_NAME_FORMAT = 'Job {}.log'
OUTPUT_COLUMN_NAMES = (
'Station Channel',
'Start Time',
'Duration',
'Detector',
'Clips',
'Not Found',
'Short',
'All-Zero',
'Zero-Padded',
)
SHORT_DETECTOR_NAMES = {
'Old Bird Thrush Detector': 'Thrush',
'Old Bird Tseep Detector': 'Tseep'
}
# We make the final double quote optional in the following since that was
# accidentally omitted for awhile from the logs we analyze.
CHANNEL_START_RE = re.compile(
r'INFO Processing (\d+) clips for recording channel "(.*) / '
r'.* / start (.*) / duration (.*) h / Channel (\d)" and detector '
r'"(.*)"\.\.\.')
SHORT_CLIP_RE = re.compile(
r'WARNING Found \d+ copies of length-(\d+) clip')
'''
2020-11-21 21:52:20,845 INFO Processing 1816 clips for recording channel "Ridge / SM2+ 010798 / start 2016-05-27 02:47:00+00:00 / duration 6.763 h / Channel 0" and detector "Old Bird Thrush Detector...
2020-11-21 21:52:21,751 WARNING Could not find samples of clip "Ridge / SMX-NFC RD Output / Old Bird Thrush Detector / start 2016-05-27 02:50:25+00:00 / duration 0.408 s" in recording channel.
2020-11-21 21:52:52,794 INFO For clip 4367378 at end of recording, found 462 of 13820 clip samples, including 0 trailing zeros.
2020-11-21 22:49:15,427 WARNING Found 25 copies of length-2 clip "Ridge / SMX-NFC RD Output / Old Bird Tseep Detector / start 2016-07-03 10:58:56+00:00 / duration 0.000 s".
2020-11-22 04:38:19,370 WARNING Encountered unexpected all-zero clip "Ridge / SMX-NFC RD Output / Old Bird Thrush Detector / start 2016-09-16 13:46:03+00:00 / duration 0.113 s".
'''
def main():
job_logs = [JobLog(n) for n in JOB_NUMS]
write_clip_count_csv_file(job_logs)
show_short_clip_counts(job_logs)
class JobLog:
def __init__(self, job_num):
print(f'Parsing log for job {job_num}...')
self.job_num = job_num
self._channel_clip_counts = []
self._short_clip_counts = defaultdict(int)
self._current_clip_counts = None
lines = read_job_log(job_num)
for line in lines:
if self._parse_channel_start_line(line):
pass
elif self._parse_not_found_clip_line(line):
pass
elif self._parse_short_clip_line(line):
pass
elif self._parse_all_zero_clip_line(line):
pass
elif self._parse_zero_padded_clip_line(line):
pass
else:
self._check_for_warning_line(line)
self._complete_channel_counts_if_needed()
@property
def counts(self):
return self._channel_clip_counts
@property
def short_clip_counts(self):
return self._short_clip_counts
def _parse_channel_start_line(self, line):
m = CHANNEL_START_RE.search(line)
if m is not None:
(clip_count, station_name, start_time, duration, channel_num,
detector_name) = m.groups()
clip_count = int(clip_count)
# print(
# f' Channel start {clip_count}, {station_name}, '
# f'{channel_num}, {start_time}, {duration}, '
# f'{detector_name}...')
self._complete_channel_counts_if_needed()
counts = defaultdict(int)
counts['Station Channel'] = f'{station_name} {channel_num}'
counts['Start Time'] = start_time
counts['Duration'] = duration
counts['Detector'] = SHORT_DETECTOR_NAMES[detector_name]
counts['Clips'] = clip_count
self._current_clip_counts = counts
return True
else:
return False
def _complete_channel_counts_if_needed(self):
if self._current_clip_counts is not None:
self._channel_clip_counts.append(self._current_clip_counts)
def _parse_not_found_clip_line(self, line):
if line.find('WARNING Could not find samples of clip') != -1:
# print(' Clip not found...')
self._current_clip_counts['Not Found'] += 1
return True
else:
return False
def _parse_short_clip_line(self, line):
m = SHORT_CLIP_RE.search(line)
if m is not None:
# print(' Short clip...')
self._current_clip_counts['Short'] += 1
self._current_clip_counts['Not Found'] += 1
clip_length = int(m.group(1))
self._short_clip_counts[clip_length] += 1
return True
else:
return False
def _parse_all_zero_clip_line(self, line):
if line.find(' WARNING Encountered unexpected all-zero clip ') \
!= -1:
# print(' Zero clip...')
self._current_clip_counts['All-Zero'] += 1
self._current_clip_counts['Not Found'] += 1
return True
else:
return False
def _parse_zero_padded_clip_line(self, line):
if line.find(' at end of recording, found ') != -1:
# print(' End of recording clip...')
self._current_clip_counts['Zero-Padded'] += 1
return True
else:
return False
def _check_for_warning_line(self, line):
if line.find('WARNING') != -1:
print(f' Unhandled WARNING line: {line}')
def read_job_log(job_num):
log_file_path = get_log_file_path(job_num)
with open(log_file_path, 'r') as log_file:
return log_file.read().split('\n')
def get_log_file_path(job_num):
log_file_name = LOG_FILE_NAME_FORMAT.format(job_num)
return LOG_DIR_PATH / log_file_name
def write_clip_count_csv_file(job_logs):
tuples = []
for log in job_logs:
for c in log.counts:
t = tuple(c[n] for n in OUTPUT_COLUMN_NAMES)
tuples.append(t)
tuples.sort()
with open(OUTPUT_FILE_PATH, 'w') as csv_file:
writer = csv.writer(csv_file)
writer.writerow(OUTPUT_COLUMN_NAMES)
for t in tuples:
writer.writerow(t)
def show_short_clip_counts(job_logs):
total_counts = defaultdict(int)
for log in job_logs:
for length, count in log.short_clip_counts.items():
total_counts[length] += count
print('Short clip counts:')
lengths = sorted(total_counts.keys())
for length in lengths:
print(f' {length}: {total_counts[length]}')
if __name__ == '__main__':
main()
| |
# Copyright (c) 2013,2014 Donald Talton
# All rights reserved.
# Redistribution and use in source and binary forms,
# with or without modification,
# are permitted provided that the following conditions are met:
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# Neither the name of Donald Talton nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import re
from collections import defaultdict
from socket import gethostbyaddr
from django.http import JsonResponse
from django.shortcuts import render_to_response
from django.conf import settings
from cephclient import wrapper
from humanize import suffixes
from rgwadmin import RGWAdmin, exceptions
from multiprocessing import Pool
def home(request):
"""
Main dashboard, Overall cluster health and status
"""
response = {}
ceph = wrapper.CephWrapper(endpoint=settings.CEPH_BASE_URL)
cresp, response['cluster_health'] = ceph.health(body='json')
sresp, cluster_status = ceph.status(body='json')
# Monitors
all_mons = cluster_status['output']['monmap']['mons']
up_mons = (cluster_status['output']['health']['health']
['health_services'][0]['mons'])
total_mon_count = len(all_mons)
response['mons'] = {'ok': 0, 'warn': 0, 'crit': 0}
for mon in up_mons:
if mon['health'] == "HEALTH_OK":
response['mons']['ok'] += 1
else:
response['mons']['warn'] += 1
response['mons']['crit'] = total_mon_count - (
response['mons']['ok'] + response['mons']['crit']
)
# Get a rough estimate of cluster free space. Is this accurate ?
bytes_total = cluster_status['output']['pgmap']['bytes_total']
bytes_used = cluster_status['output']['pgmap']['bytes_used']
def filesize(value):
value = float(value)
if value == 1:
return 0, 1, 'Byte'
elif value < 1024 ** 2:
return 0, str(value), 'Bytes'
for i, s in enumerate(suffixes['decimal']):
unit = 1024 ** (i + 2)
if value < unit * 1024:
return i, (1024 * value / unit), s
return i, (1024 * value / unit), s
(response['scale'],
response['data_avail'],
response['data_scale']) = filesize(bytes_total)
response['data_used'] = round(float(bytes_used) /
(1024.0 ** (response['scale'] + 1)), 1)
# pgs
pg_statuses = cluster_status['output']['pgmap']
response['pg'] = {'ok': 0, 'warn': 0, 'crit': 0}
# pg states
pg_warn_status = re.compile("(creating|degraded|replay|splitting|"
"scrubbing|repair|recovering|backfill"
"|wait-backfill|remapped)")
pg_crit_status = re.compile("(down|inconsistent|incomplete|stale|peering)")
for state in pg_statuses['pgs_by_state']:
if state['state_name'] == "active+clean":
response['pg']['ok'] += state['count']
elif pg_warn_status.search(state['state_name']):
response['pg']['warn'] += state['count']
elif pg_crit_status.search(state['state_name']):
response['pg']['crit'] += state['count']
# pg statuses
response['pg']['stat'] = dict()
for state in pg_statuses['pgs_by_state']:
response['pg']['stat'][state['state_name']] = state['count']
# osds
dresp, osd_dump = ceph.osd_dump(body='json')
osd_stat = {}
for osd in osd_dump['output']['osds']:
try:
hostname = gethostbyaddr(
osd["public_addr"].split(":")[0]
)[0].split(".")[0]
except:
continue
if hostname in osd_stat:
osd_stat[hostname].append(osd)
else:
osd_stat[hostname] = [osd,]
response['osd'] = {'state': sorted(osd_stat.items()),
'ok': 0, 'warn': 0, 'crit': 0}
for hostname, storages in response['osd']['state']:
for osd_status in storages:
if osd_status["in"] and osd_status["up"]:
response['osd']['ok'] += 1
elif osd_status["in"] == 0 and osd_status["up"] == 0:
response['osd']['crit'] += 1
else:
response['osd']['warn'] += 1
# RGW statuses
pool = Pool(len(settings.S3_SERVERS))
response['radosgw'] = {'stat': dict(
pool.map(get_rgw_stat, settings.S3_SERVERS)
)}
response['radosgw']['ok'] = len(
filter(lambda (i,v): v > 0, response['radosgw']['stat'].items())
)
response['radosgw']['fail'] = len(
settings.S3_SERVERS) - response['radosgw']['ok']
# Users and stats
for s3server, s3server_stat in response['radosgw']['stat'].items():
if s3server_stat:
response['users'] = get_users_stat(s3server)
break
_, df = ceph.df(body="json")
response['cephfs'] = round(
float(filter(
lambda pool: pool['name'] == 'data',
df['output']['pools']
)[0]['stats']['bytes_used']) /
(1024.0 ** (response['scale'] + 1)), 1)
if 'json' in request.GET:
return JsonResponse(response)
else:
return render_to_response('dashboard.html', response)
def get_rgw_stat(server):
try:
rgwAdmin = RGWAdmin(settings.S3_CRED['access_key'],
settings.S3_CRED['secret_key'],
server, secure=False)
if rgwAdmin.get_users():
return server, 1
else:
return server, 0
except:
return server, 0
def get_users_stat(s3_server):
users_stat = defaultdict(lambda: [0, defaultdict(lambda: defaultdict(dict))])
try:
rgwAdmin = RGWAdmin(settings.S3_CRED['access_key'],
settings.S3_CRED['secret_key'],
s3_server, secure=False)
buckets_list = rgwAdmin.get_bucket()
for bucket in buckets_list:
try:
bucket_stat = rgwAdmin.get_bucket(bucket)
username = rgwAdmin.get_user(
bucket_stat["owner"]
)["display_name"]
if ':' in username:
group_name = username.split(":")[0].upper()
else:
group_name = "_"
users_stat[group_name][1][bucket_stat["owner"]][bucket] = (
bucket_stat["usage"]["rgw.main"]
)
users_stat[group_name][0] += bucket_stat[
"usage"
]["rgw.main"]["size_kb"]
except:
pass
for i in users_stat:
users_stat[i][0] *= 1024
for j in users_stat[i][1]:
users_stat[i][1][j].default_factory = None
users_stat[i][1].default_factory = None
return dict(users_stat)
except:
return dict(users_stat)
def osd_details(request, osd_num):
ceph = wrapper.CephWrapper(endpoint=settings.CEPH_BASE_URL)
osd_num = int(osd_num)
reponse, osd_dump = ceph.osd_dump(body='json')
osd_disk_details = filter(
lambda x: x['osd'] == int(osd_num), osd_dump['output']['osds']
)[0]
osd_disk_details["name"] = gethostbyaddr(
osd_disk_details["public_addr"].split(":")[0]
)[0].split(".")[0]
response, osd_perf = ceph.osd_perf(body='json')
osd_disk_perf = filter(lambda x: x['id'] == int(osd_num),
osd_perf['output']['osd_perf_infos'])[0]
return render_to_response('osd_details.html',
{'osd_disk': {'details': osd_disk_details,
'perf': osd_disk_perf}})
def activity(_):
ceph = wrapper.CephWrapper(endpoint=settings.CEPH_BASE_URL)
sresp, cluster_status = ceph.status(body='json')
pgmap = cluster_status['output']['pgmap']
activities = {}
if 'read_bytes_sec' in pgmap:
activities['Read'] = pgmap.get('read_bytes_sec')
if 'write_bytes_sec' in pgmap:
activities['Write'] = pgmap.get('write_bytes_sec')
if 'op_per_sec' in pgmap:
activities['Ops'] = pgmap.get('op_per_sec')
if 'recovering_objects_per_sec' in pgmap:
activities['Recovering_Objects'] = pgmap.get(
'recovering_objects_per_sec')
if 'recovering_bytes_per_sec' in pgmap:
activities['Recovery_Speed'] = pgmap.get('recovering_bytes_per_sec')
if 'recovering_keys_per_sec' in pgmap:
activities['Recovering_Keys'] = pgmap.get('recovering_keys_per_sec')
# Free size
bytes_total = pgmap.get('bytes_total')
bytes_used = pgmap.get('bytes_used')
activities['Used'] = bytes_used
activities['Total'] = bytes_total
return JsonResponse(activities)
def api(_):
return render_to_response("api.html")
| |
#!/usr/bin/python3
import argparse
import os
import random
import sys
import yaml
from time import sleep
from scoring import Scoring
from random_search import RandomSearch
from incremental_genetic import IncrementalGenetic
import output
from mqtt import MQTTPublisher, MQTTClient, MQTTConfig
from util import new_scenario_id, extant_file, MapNamespace, hyphen_to_underscore
from sessions import Scenario, SessionCache
from driver import StrategyDriver
from worker import Worker
from tracker import Tracker
from importer import ImportSource, MQTTImporter
random.seed()
default_args = {'cars': 4,
'sessions': 12,
'strategy': 'incrementalGenetic',
'population': 10000,
'mutation_rate': 100,
'fittest_selections': 5,
'weakest_selections': 5,
'import_frequency': 0,
'initial_pause': 1,
'delay': 5}
status_frequency = 1000
strategies = {'randomSearch' : RandomSearch, 'incrementalGenetic' : IncrementalGenetic}
strategy_names = sorted(list(strategies.keys()))
def build_parser():
parser = argparse.ArgumentParser(description='Generate best driver dance card')
parser.add_argument('--config', type=extant_file, help='YAML file containing configuration. Other command-line args will override setting from this file.')
parser.add_argument('--cars', metavar='N', type=int, help='number of cars (default is 4)')
parser.add_argument('--sessions', metavar='N', type=int, help='number of sessions (default is 12)')
parser.add_argument('--strategy', choices=strategy_names, help='strategy to use (default is incrementalGenetic)')
parser.add_argument('--cache', metavar='dir', help='optional directory to cache session permutations in order to improve startup time (default "./cache")')
group = parser.add_argument_group('strategy-specific parameters')
group.add_argument('--population', metavar='N', type=int, help='size of population for genetic strategies (default 10000)')
group.add_argument('--mutation-rate', metavar='N', type=int, help='mutation rate, expressed as 1 in N (default 100)')
group.add_argument('--fittest-selections', metavar='N', type=int, help='number of individuals to sample for best-of-n fittest selection strategy (default 5)')
group.add_argument('--weakest-selections', metavar='N', type=int, help='number of individuals to sample for worst-of-n weakest selection strategy (default 5)')
group.add_argument('--import-frequency', metavar='N', type=int, help='if specified, will import a candidate from a remote worker every N iterations.')
group = parser.add_argument_group('output controls')
group.add_argument('--output-stats', action='store_true', help='outputs ongoing statistics to stderr')
group.add_argument('--score-details', action='store_true', help='outputs details of score calculation with each dance card')
MQTTConfig.add_argument_group(parser)
command_parsers = {}
subparsers = parser.add_subparsers(dest='command', help='sub-commands')
help_parser = subparsers.add_parser('help', help='show help for a strategy or a sub-command')
help_parser.add_argument('command_name', nargs='?', help='either a sub-command or a strategy name')
command_parsers['help'] = help_parser
start_parser = subparsers.add_parser('start', help='start a new distributed calc')
start_parser.add_argument('-f', '--follow', action='store_true', help='continues to monitor status')
command_parsers['start'] = start_parser
stop_parser = subparsers.add_parser('stop', help='stop the current distributed calc')
command_parsers['stop'] = stop_parser
status_parser = subparsers.add_parser('status', help='show current status of a distributed calc')
status_parser.add_argument('-f', '--follow', action='store_true', help='continues to monitor status')
status_parser.add_argument('--initial-pause', metavar='N', type=int, help='number of seconds to wait for data when not following')
status_parser.add_argument('--delay', metavar='N', type=int, help='number of seconds to wait between status output')
status_parser.add_argument('--csv', action='store_true', help='output as CSV for import into Excel or similar')
command_parsers['status'] = status_parser
worker_parser = subparsers.add_parser('worker', help='run as a worker node')
worker_parser.add_argument('--name', metavar='abc', help='optional human-friendly name to give to a worker node so identify it')
command_parsers['worker'] = worker_parser
return parser, command_parsers
def main():
parser, command_parsers = build_parser()
args = parser.parse_args()
config = load_config(args)
if config.command == 'help':
help(config, command_parsers)
elif config.command == 'start':
start(config)
elif config.command == 'stop':
stop(config)
elif config.command == 'status':
status(config)
elif config.command == 'worker':
worker(config)
else:
standalone(config)
def start(args):
mqtt_client = get_mqtt(args, force=True)
scenario = get_scenario(args)
mqtt_client.publish_yaml('control/active_scenario', scenario.to_dict(), retain=True, as_root=True)
mqtt_client.stop_loop()
print('Published scenario %s' + scenario.id)
def stop(args):
mqtt_client = get_mqtt(args, force=True)
mqtt_client.publish_yaml('control/active_scenario', {}, retain=True, as_root=True)
mqtt_client.stop_loop()
print('Published stop command.')
def status(args):
mqtt_client = get_mqtt(args, force=True)
tracker = Tracker(mqtt_client, args.initial_pause, args.csv)
tracker.listen()
tracker.print()
if args.follow:
try:
# Just loop until we get killed
while True:
sleep(args.delay)
if not args.csv:
print()
tracker.print()
sys.stdout.flush()
except KeyboardInterrupt:
print('Keyboard interrupt. Stopping.')
def standalone(args):
mqtt_client = get_mqtt(args)
publishers = get_publishers(args, mqtt_client)
strategy_factory = lambda generator, scoring: strategies[args.strategy](args, generator, scoring)
scenario = get_scenario(args)
session_cache = SessionCache(args.cache or './cache')
driver = StrategyDriver(scenario, strategy_factory, publishers, ImportSource(), args.import_frequency, session_cache)
try:
driver.run_strategy()
except KeyboardInterrupt:
print('Interrupted', file=sys.stderr)
def worker(args):
mqtt_client = get_mqtt(args, force=True, advertise_node=True)
publishers = get_publishers(args, mqtt_client)
importer = MQTTImporter(mqtt_client)
strategy_factory = lambda generator, scoring: strategies[args.strategy](args, generator, scoring)
session_cache = SessionCache(args.cache or './cache')
worker = Worker(mqtt_client, strategy_factory, publishers, importer, args.import_frequency, args.name, session_cache)
worker.listen()
wait_for_interrupt()
worker.stop()
def load_config(args):
config = MapNamespace(default_args)
merge_config_from_first_file(config, [args.config, './dancecard-config.yaml', '/etc/dancecard/dancecard-config.yaml'])
config.merge(vars(args))
return config
def merge_config_from_first_file(config, filenames):
for filename in filenames:
if filename and os.path.exists(filename):
with open(filename) as stream:
config.merge(hyphen_to_underscore(yaml.load(stream)))
break
def wait_for_interrupt():
try:
# Just loop until we get killed
while True:
sleep(100)
except KeyboardInterrupt:
print('Keyboard interrupt. Stopping.')
def get_scenario(args, scenario_id=None):
if not scenario_id:
scenario_id = new_scenario_id()
return Scenario(scenario_id, args.cars, args.cars * 2, args.sessions)
def get_mqtt(args, force=False, advertise_node=False):
config = MQTTConfig.from_args(args)
if force and not config:
print("Must specify MQTT settings for %s command." % args.command, file=sys.stderr)
exit(1)
return MQTTClient(config, advertise_node) if config else None
def get_publishers(args, mqtt_client=None):
publishers = output.Multipublisher()
publishers.add(output.FileBestOutputter(sys.stdout))
if args.score_details:
publishers.add(output.FileDetailedScoresOutputter(sys.stdout))
if args.output_stats:
publishers.add(output.FileStatsOutputter(sys.stderr))
publishers.add(output.FileScenarioOutputter(sys.stderr))
publishers.add(output.FileSettingsOutputter(sys.stderr))
if mqtt_client:
publishers.add(MQTTPublisher(mqtt_client))
return publishers
def help(args, command_parsers):
name = args.command_name
if name in command_parsers:
command_parsers[name].print_help()
else:
if name:
s = [name]
else:
s = strategy_names
for name in s:
print('Help for %s:' % name)
print(strategies[name].__doc__)
main()
| |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Adaptation of the extern.tclib classes for our needs.
'''
import re
import types
from grit import exception
from grit import lazy_re
import grit.extern.tclib
# Matches whitespace sequences which can be folded into a single whitespace
# character. This matches single characters so that non-spaces are replaced
# with spaces.
_FOLD_WHITESPACE = re.compile(r'\s+')
def Identity(i):
return i
class BaseMessage(object):
'''Base class with methods shared by Message and Translation.
'''
def __init__(self, text='', placeholders=[], description='', meaning=''):
self.parts = []
self.placeholders = []
self.meaning = meaning
self.dirty = True # True if self.id is (or might be) wrong
self.id = 0
self.SetDescription(description)
if text != '':
if not placeholders or placeholders == []:
self.AppendText(text)
else:
tag_map = {}
for placeholder in placeholders:
tag_map[placeholder.GetPresentation()] = [placeholder, 0]
# This creates a regexp like '(TAG1|TAG2|TAG3)'.
# The tags have to be sorted in order of decreasing length, so that
# longer tags are substituted before shorter tags that happen to be
# substrings of the longer tag.
# E.g. "EXAMPLE_FOO_NAME" must be matched before "EXAMPLE_FOO",
# otherwise "EXAMPLE_FOO" splits "EXAMPLE_FOO_NAME" too.
tags = tag_map.keys()
tags.sort(cmp=lambda x,y: len(x) - len(y) or cmp(x, y), reverse=True)
tag_re = '(' + '|'.join(tags) + ')'
chunked_text = re.split(tag_re, text)
for chunk in chunked_text:
if chunk: # ignore empty chunk
if tag_map.has_key(chunk):
self.AppendPlaceholder(tag_map[chunk][0])
tag_map[chunk][1] += 1 # increase placeholder use count
else:
self.AppendText(chunk)
for key in tag_map.keys():
assert tag_map[key][1] != 0
def GetRealContent(self, escaping_function=Identity):
'''Returns the original content, i.e. what your application and users
will see.
Specify a function to escape each translateable bit, if you like.
'''
bits = []
for item in self.parts:
if isinstance(item, types.StringTypes):
bits.append(escaping_function(item))
else:
bits.append(item.GetOriginal())
return ''.join(bits)
def GetPresentableContent(self):
presentable_content = []
for part in self.parts:
if isinstance(part, Placeholder):
presentable_content.append(part.GetPresentation())
else:
presentable_content.append(part)
return ''.join(presentable_content)
def AppendPlaceholder(self, placeholder):
assert isinstance(placeholder, Placeholder)
dup = False
for other in self.GetPlaceholders():
if other.presentation == placeholder.presentation:
assert other.original == placeholder.original
dup = True
if not dup:
self.placeholders.append(placeholder)
self.parts.append(placeholder)
self.dirty = True
def AppendText(self, text):
assert isinstance(text, types.StringTypes)
assert text != ''
self.parts.append(text)
self.dirty = True
def GetContent(self):
'''Returns the parts of the message. You may modify parts if you wish.
Note that you must not call GetId() on this object until you have finished
modifying the contents.
'''
self.dirty = True # user might modify content
return self.parts
def GetDescription(self):
return self.description
def SetDescription(self, description):
self.description = _FOLD_WHITESPACE.sub(' ', description)
def GetMeaning(self):
return self.meaning
def GetId(self):
if self.dirty:
self.id = self.GenerateId()
self.dirty = False
return self.id
def GenerateId(self):
# Must use a UTF-8 encoded version of the presentable content, along with
# the meaning attribute, to match the TC.
return grit.extern.tclib.GenerateMessageId(
self.GetPresentableContent().encode('utf-8'), self.meaning)
def GetPlaceholders(self):
return self.placeholders
def FillTclibBaseMessage(self, msg):
msg.SetDescription(self.description.encode('utf-8'))
for part in self.parts:
if isinstance(part, Placeholder):
ph = grit.extern.tclib.Placeholder(
part.presentation.encode('utf-8'),
part.original.encode('utf-8'),
part.example.encode('utf-8'))
msg.AppendPlaceholder(ph)
else:
msg.AppendText(part.encode('utf-8'))
class Message(BaseMessage):
'''A message.'''
def __init__(self, text='', placeholders=[], description='', meaning='',
assigned_id=None):
super(Message, self).__init__(text, placeholders, description, meaning)
self.assigned_id = assigned_id
def ToTclibMessage(self):
msg = grit.extern.tclib.Message('utf-8', meaning=self.meaning)
self.FillTclibBaseMessage(msg)
return msg
def GetId(self):
'''Use the assigned id if we have one.'''
if self.assigned_id:
return self.assigned_id
return super(Message, self).GetId()
def HasAssignedId(self):
'''Returns True if this message has an assigned id.'''
return bool(self.assigned_id)
class Translation(BaseMessage):
'''A translation.'''
def __init__(self, text='', id='', placeholders=[], description='', meaning=''):
super(Translation, self).__init__(text, placeholders, description, meaning)
self.id = id
def GetId(self):
assert id != '', "ID has not been set."
return self.id
def SetId(self, id):
self.id = id
def ToTclibMessage(self):
msg = grit.extern.tclib.Message(
'utf-8', id=self.id, meaning=self.meaning)
self.FillTclibBaseMessage(msg)
return msg
class Placeholder(grit.extern.tclib.Placeholder):
'''Modifies constructor to accept a Unicode string
'''
# Must match placeholder presentation names
_NAME_RE = lazy_re.compile('^[A-Za-z0-9_]+$')
def __init__(self, presentation, original, example):
'''Creates a new placeholder.
Args:
presentation: 'USERNAME'
original: '%s'
example: 'Joi'
'''
assert presentation != ''
assert original != ''
assert example != ''
if not self._NAME_RE.match(presentation):
raise exception.InvalidPlaceholderName(presentation)
self.presentation = presentation
self.original = original
self.example = example
def GetPresentation(self):
return self.presentation
def GetOriginal(self):
return self.original
def GetExample(self):
return self.example
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=
"""Dataset container."""
import os
import gzip
import tarfile
import struct
import warnings
import numpy as np
from . import dataset
from ..utils import download, check_sha1
from ... import nd, image, recordio
class _DownloadedDataset(dataset.Dataset):
"""Base class for MNIST, cifar10, etc."""
def __init__(self, root, train, transform):
self._root = os.path.expanduser(root)
self._train = train
self._transform = transform
self._data = None
self._label = None
if not os.path.isdir(self._root):
os.makedirs(self._root)
self._get_data()
def __getitem__(self, idx):
if self._transform is not None:
return self._transform(self._data[idx], self._label[idx])
return self._data[idx], self._label[idx]
def __len__(self):
return len(self._label)
def _get_data(self):
raise NotImplementedError
class MNIST(_DownloadedDataset):
"""MNIST handwritten digits dataset from `http://yann.lecun.com/exdb/mnist`_.
Each sample is an image (in 3D NDArray) with shape (28, 28, 1).
Parameters
----------
root : str
Path to temp folder for storing data.
train : bool
Whether to load the training or testing set.
transform : function
A user defined callback that transforms each instance. For example::
transform=lambda data, label: (data.astype(np.float32)/255, label)
"""
def __init__(self, root='~/.mxnet/datasets/mnist', train=True,
transform=None):
self._base_url = 'https://apache-mxnet.s3.amazonaws.com/gluon/dataset/mnist/'
self._train_data = ('train-images-idx3-ubyte.gz',
'6c95f4b05d2bf285e1bfb0e7960c31bd3b3f8a7d')
self._train_label = ('train-labels-idx1-ubyte.gz',
'2a80914081dc54586dbdf242f9805a6b8d2a15fc')
self._test_data = ('t10k-images-idx3-ubyte.gz',
'c3a25af1f52dad7f726cce8cacb138654b760d48')
self._test_label = ('t10k-labels-idx1-ubyte.gz',
'763e7fa3757d93b0cdec073cef058b2004252c17')
super(MNIST, self).__init__(root, train, transform)
def _get_data(self):
if self._train:
data, label = self._train_data, self._train_label
else:
data, label = self._test_data, self._test_label
data_file = download(self._base_url + data[0], self._root,
sha1_hash=data[1])
label_file = download(self._base_url + label[0], self._root,
sha1_hash=label[1])
with gzip.open(label_file, 'rb') as fin:
struct.unpack(">II", fin.read(8))
label = np.fromstring(fin.read(), dtype=np.uint8).astype(np.int32)
with gzip.open(data_file, 'rb') as fin:
struct.unpack(">IIII", fin.read(16))
data = np.fromstring(fin.read(), dtype=np.uint8)
data = data.reshape(len(label), 28, 28, 1)
self._data = nd.array(data, dtype=data.dtype)
self._label = label
class FashionMNIST(MNIST):
"""A dataset of Zalando's article images consisting of fashion products,
a drop-in replacement of the original MNIST dataset from
`https://github.com/zalandoresearch/fashion-mnist`_.
Each sample is an image (in 3D NDArray) with shape (28, 28, 1).
Parameters
----------
root : str
Path to temp folder for storing data.
train : bool
Whether to load the training or testing set.
transform : function
A user defined callback that transforms each instance. For example::
transform=lambda data, label: (data.astype(np.float32)/255, label)
"""
def __init__(self, root='~/.mxnet/datasets/fashion-mnist', train=True,
transform=None):
self._base_url = 'https://apache-mxnet.s3.amazonaws.com/gluon/dataset/fashion-mnist/'
self._train_data = ('train-images-idx3-ubyte.gz',
'0cf37b0d40ed5169c6b3aba31069a9770ac9043d')
self._train_label = ('train-labels-idx1-ubyte.gz',
'236021d52f1e40852b06a4c3008d8de8aef1e40b')
self._test_data = ('t10k-images-idx3-ubyte.gz',
'626ed6a7c06dd17c0eec72fa3be1740f146a2863')
self._test_label = ('t10k-labels-idx1-ubyte.gz',
'17f9ab60e7257a1620f4ad76bbbaf857c3920701')
super(MNIST, self).__init__(root, train, transform) # pylint: disable=bad-super-call
class CIFAR10(_DownloadedDataset):
"""CIFAR10 image classification dataset from `https://www.cs.toronto.edu/~kriz/cifar.html`_.
Each sample is an image (in 3D NDArray) with shape (32, 32, 1).
Parameters
----------
root : str
Path to temp folder for storing data.
train : bool
Whether to load the training or testing set.
transform : function
A user defined callback that transforms each instance. For example::
transform=lambda data, label: (data.astype(np.float32)/255, label)
"""
def __init__(self, root='~/.mxnet/datasets/cifar10', train=True,
transform=None):
self._file_hashes = {'data_batch_1.bin': 'aadd24acce27caa71bf4b10992e9e7b2d74c2540',
'data_batch_2.bin': 'c0ba65cce70568cd57b4e03e9ac8d2a5367c1795',
'data_batch_3.bin': '1dd00a74ab1d17a6e7d73e185b69dbf31242f295',
'data_batch_4.bin': 'aab85764eb3584312d3c7f65fd2fd016e36a258e',
'data_batch_5.bin': '26e2849e66a845b7f1e4614ae70f4889ae604628',
'test_batch.bin': '67eb016db431130d61cd03c7ad570b013799c88c'}
super(CIFAR10, self).__init__(root, train, transform)
def _read_batch(self, filename):
with open(filename, 'rb') as fin:
data = np.fromstring(fin.read(), dtype=np.uint8).reshape(-1, 3072+1)
return data[:, 1:].reshape(-1, 3, 32, 32).transpose(0, 2, 3, 1), \
data[:, 0].astype(np.int32)
def _get_data(self):
file_paths = [(name, os.path.join(self._root, 'cifar-10-batches-bin/', name))
for name in self._file_hashes]
if any(not os.path.exists(path) or not check_sha1(path, self._file_hashes[name])
for name, path in file_paths):
url = 'https://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz'
filename = download(url, self._root,
sha1_hash='e8aa088b9774a44ad217101d2e2569f823d2d491')
with tarfile.open(filename) as tar:
tar.extractall(self._root)
if self._train:
filename = os.path.join(self._root, 'cifar-10-batches-bin/data_batch_%d.bin')
data, label = zip(*[self._read_batch(filename%i) for i in range(1, 6)])
data = np.concatenate(data)
label = np.concatenate(label)
else:
filename = os.path.join(self._root, 'cifar-10-batches-bin/test_batch.bin')
data, label = self._read_batch(filename)
self._data = nd.array(data, dtype=data.dtype)
self._label = label
class ImageRecordDataset(dataset.RecordFileDataset):
"""A dataset wrapping over a RecordIO file containing images.
Each sample is an image and its corresponding label.
Parameters
----------
filename : str
Path to rec file.
flag : {0, 1}, default 1
If 0, always convert images to greyscale.
If 1, always convert images to colored (RGB).
transform : function
A user defined callback that transforms each instance. For example::
transform=lambda data, label: (data.astype(np.float32)/255, label)
"""
def __init__(self, filename, flag=1, transform=None):
super(ImageRecordDataset, self).__init__(filename)
self._flag = flag
self._transform = transform
def __getitem__(self, idx):
record = super(ImageRecordDataset, self).__getitem__(idx)
header, img = recordio.unpack(record)
if self._transform is not None:
return self._transform(image.imdecode(img, self._flag), header.label)
return image.imdecode(img, self._flag), header.label
class ImageFolderDataset(dataset.Dataset):
"""A dataset for loading image files stored in a folder structure like::
root/car/0001.jpg
root/car/xxxa.jpg
root/car/yyyb.jpg
root/bus/123.jpg
root/bus/023.jpg
root/bus/wwww.jpg
Parameters
----------
root : str
Path to root directory.
flag : {0, 1}, default 1
If 0, always convert loaded images to greyscale (1 channel).
If 1, always convert loaded images to colored (3 channels).
transform : callable
A function that takes data and label and transforms them::
transform = lambda data, label: (data.astype(np.float32)/255, label)
Attributes
----------
synsets : list
List of class names. `synsets[i]` is the name for the integer label `i`
items : list of tuples
List of all images in (filename, label) pairs.
"""
def __init__(self, root, flag=1, transform=None):
self._root = os.path.expanduser(root)
self._flag = flag
self._transform = transform
self._exts = ['.jpg', '.jpeg', '.png']
self._list_iamges(self._root)
def _list_iamges(self, root):
self.synsets = []
self.items = []
for folder in sorted(os.listdir(root)):
path = os.path.join(root, folder)
if not os.path.isdir(path):
warnings.warn('Ignoring %s, which is not a directory.'%path, stacklevel=3)
continue
label = len(self.synsets)
self.synsets.append(folder)
for filename in sorted(os.listdir(path)):
filename = os.path.join(path, filename)
ext = os.path.splitext(filename)[1]
if ext.lower() not in self._exts:
warnings.warn('Ignoring %s of type %s. Only support %s'%(
filename, ext, ', '.join(self._exts)))
continue
self.items.append((filename, label))
def __getitem__(self, idx):
img = image.imread(self.items[idx][0], self._flag)
label = self.items[idx][1]
if self._transform is not None:
return self._transform(img, label)
return img, label
def __len__(self):
return len(self.items)
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model evaluation tools for TFGAN.
These methods come from https://arxiv.org/abs/1606.03498 and
https://arxiv.org/abs/1706.08500.
NOTE: This implementation uses the same weights as in
https://github.com/openai/improved-gan/blob/master/inception_score/model.py,
but is more numerically stable and is an unbiased estimator of the true
Inception score even when splitting the inputs into batches.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import sys
import tarfile
from six.moves import urllib
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.core.framework import graph_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import gfile
from tensorflow.python.platform import resource_loader
__all__ = [
'get_graph_def_from_disk',
'get_graph_def_from_resource',
'get_graph_def_from_url_tarball',
'preprocess_image',
'run_image_classifier',
'run_inception',
'inception_score',
'classifier_score',
'frechet_inception_distance',
'frechet_classifier_distance',
'INCEPTION_DEFAULT_IMAGE_SIZE',
]
INCEPTION_URL = 'http://download.tensorflow.org/models/frozen_inception_v1_2015_12_05.tar.gz'
INCEPTION_FROZEN_GRAPH = 'inceptionv1_for_inception_score.pb'
INCEPTION_INPUT = 'Mul:0'
INCEPTION_OUTPUT = 'logits:0'
INCEPTION_FINAL_POOL = 'pool_3:0'
INCEPTION_DEFAULT_IMAGE_SIZE = 299
def _validate_images(images, image_size):
images = ops.convert_to_tensor(images)
images.shape.with_rank(4)
images.shape.assert_is_compatible_with(
[None, image_size, image_size, None])
return images
def _symmetric_matrix_square_root(mat, eps=1e-10):
"""Compute square root of a symmetric matrix.
Note that this is different from an elementwise square root. We want to
compute M' where M' = sqrt(mat) such that M' * M' = mat.
Also note that this method **only** works for symmetric matrices.
Args:
mat: Matrix to take the square root of.
eps: Small epsilon such that any element less than eps will not be square
rooted to guard against numerical instability.
Returns:
Matrix square root of mat.
"""
# Unlike numpy, tensorflow's return order is (s, u, v)
s, u, v = linalg_ops.svd(mat)
# sqrt is unstable around 0, just use 0 in such case
si = array_ops.where(math_ops.less(s, eps), s, math_ops.sqrt(s))
# Note that the v returned by Tensorflow is v = V
# (when referencing the equation A = U S V^T)
# This is unlike Numpy which returns v = V^T
return math_ops.matmul(
math_ops.matmul(u, array_ops.diag(si)), v, transpose_b=True)
def preprocess_image(
images, height=INCEPTION_DEFAULT_IMAGE_SIZE,
width=INCEPTION_DEFAULT_IMAGE_SIZE, scope=None):
"""Prepare a batch of images for evaluation.
This is the preprocessing portion of the graph from
http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz.
Note that it expects Tensors in [0, 255]. This function maps pixel values to
[-1, 1] and resizes to match the InceptionV1 network.
Args:
images: 3-D or 4-D Tensor of images. Values are in [0, 255].
height: Integer. Height of resized output image.
width: Integer. Width of resized output image.
scope: Optional scope for name_scope.
Returns:
3-D or 4-D float Tensor of prepared image(s). Values are in [-1, 1].
"""
is_single = images.shape.ndims == 3
with ops.name_scope(scope, 'preprocess', [images, height, width]):
if not images.dtype.is_floating:
images = math_ops.to_float(images)
images = (images - 128.0) / 128.0
if is_single:
images = array_ops.expand_dims(images, axis=0)
resized = image_ops.resize_bilinear(images, [height, width])
if is_single:
resized = array_ops.squeeze(resized, axis=0)
return resized
def _kl_divergence(p, p_logits, q):
"""Computes the Kullback-Liebler divergence between p and q.
This function uses p's logits in some places to improve numerical stability.
Specifically:
KL(p || q) = sum[ p * log(p / q) ]
= sum[ p * ( log(p) - log(q) ) ]
= sum[ p * ( log_softmax(p_logits) - log(q) ) ]
Args:
p: A 2-D floating-point Tensor p_ij, where `i` corresponds to the minibatch
example and `j` corresponds to the probability of being in class `j`.
p_logits: A 2-D floating-point Tensor corresponding to logits for `p`.
q: A 1-D floating-point Tensor, where q_j corresponds to the probability
of class `j`.
Returns:
KL divergence between two distributions. Output dimension is 1D, one entry
per distribution in `p`.
Raises:
ValueError: If any of the inputs aren't floating-point.
ValueError: If p or p_logits aren't 2D.
ValueError: If q isn't 1D.
"""
for tensor in [p, p_logits, q]:
if not tensor.dtype.is_floating:
raise ValueError('Input %s must be floating type.', tensor.name)
p.shape.assert_has_rank(2)
p_logits.shape.assert_has_rank(2)
q.shape.assert_has_rank(1)
return math_ops.reduce_sum(
p * (nn_ops.log_softmax(p_logits) - math_ops.log(q)), axis=1)
def get_graph_def_from_disk(filename):
"""Get a GraphDef proto from a disk location."""
with gfile.FastGFile(filename, 'rb') as f:
return graph_pb2.GraphDef.FromString(f.read())
def get_graph_def_from_resource(filename):
"""Get a GraphDef proto from within a .par file."""
return graph_pb2.GraphDef.FromString(resource_loader.load_resource(filename))
def get_graph_def_from_url_tarball(url, filename):
"""Get a GraphDef proto from a tarball on the web."""
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (
url, float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
tar_filename, _ = urllib.request.urlretrieve(url, reporthook=_progress)
with tarfile.open(tar_filename, 'r:gz') as tar:
proto_str = tar.extractfile(filename).read()
return graph_pb2.GraphDef.FromString(proto_str)
def _default_graph_def_fn():
return get_graph_def_from_url_tarball(INCEPTION_URL, INCEPTION_FROZEN_GRAPH)
def run_inception(images,
graph_def=None,
default_graph_def_fn=_default_graph_def_fn,
image_size=INCEPTION_DEFAULT_IMAGE_SIZE,
input_tensor=INCEPTION_INPUT,
output_tensor=INCEPTION_OUTPUT):
"""Run images through a pretrained Inception classifier.
Args:
images: Input tensors. Must be [batch, height, width, channels]. Input shape
and values must be in [-1, 1], which can be achieved using
`preprocess_image`.
graph_def: A GraphDef proto of a pretrained Inception graph. If `None`,
call `default_graph_def_fn` to get GraphDef.
default_graph_def_fn: A function that returns a GraphDef. Used if
`graph_def` is `None. By default, returns a pretrained InceptionV3 graph.
image_size: Required image width and height. See unit tests for the default
values.
input_tensor: Name of input Tensor.
output_tensor: Name of output Tensor. This function will compute activations
at the specified layer. Examples include INCEPTION_V3_OUTPUT and
INCEPTION_V3_FINAL_POOL which would result in this function computing
the final logits or the penultimate pooling layer.
Returns:
Logits.
Raises:
ValueError: If images are not the correct size.
ValueError: If neither `graph_def` nor `default_graph_def_fn` are provided.
"""
images = _validate_images(images, image_size)
if graph_def is None:
if default_graph_def_fn is None:
raise ValueError('If `graph_def` is `None`, must provide '
'`default_graph_def_fn`.')
graph_def = default_graph_def_fn()
activations = run_image_classifier(images, graph_def, input_tensor,
output_tensor)
if array_ops.rank(activations) != 2:
activations = layers.flatten(activations)
return activations
def run_image_classifier(tensor, graph_def, input_tensor,
output_tensor, scope='RunClassifier'):
"""Runs a network from a frozen graph.
Args:
tensor: An Input tensor.
graph_def: A GraphDef proto.
input_tensor: Name of input tensor in graph def.
output_tensor: Name of output tensor in graph def.
scope: Name scope for classifier.
Returns:
Classifier output. Shape depends on the classifier used, but is often
[batch, classes].
Raises:
ValueError: If `image_size` is not `None`, and `tensor` are not the correct
size.
"""
input_map = {input_tensor: tensor}
return_elements = [output_tensor]
classifier_output = importer.import_graph_def(
graph_def, input_map, return_elements, name=scope)[0]
return classifier_output
def classifier_score(images, classifier_fn, num_batches=1):
"""Classifier score for evaluating a conditional generative model.
This is based on the Inception Score, but for an arbitrary classifier.
This technique is described in detail in https://arxiv.org/abs/1606.03498. In
summary, this function calculates
exp( E[ KL(p(y|x) || p(y)) ] )
which captures how different the network's classification prediction is from
the prior distribution over classes.
Args:
images: Images to calculate the classifier score for.
classifier_fn: A function that takes images and produces logits based on a
classifier.
num_batches: Number of batches to split `generated_images` in to in order to
efficiently run them through the classifier network.
Returns:
The classifier score. A floating-point scalar.
"""
generated_images_list = array_ops.split(
images, num_or_size_splits=num_batches)
# Compute the classifier splits using the memory-efficient `map_fn`.
logits = functional_ops.map_fn(
fn=classifier_fn,
elems=array_ops.stack(generated_images_list),
parallel_iterations=1,
back_prop=False,
swap_memory=True,
name='RunClassifier')
logits = array_ops.concat(array_ops.unstack(logits), 0)
logits.shape.assert_has_rank(2)
# Use maximum precision for best results.
logits_dtype = logits.dtype
if logits_dtype != dtypes.float64:
logits = math_ops.cast(logits, dtypes.float64)
p = nn_ops.softmax(logits)
q = math_ops.reduce_mean(p, axis=0)
kl = _kl_divergence(p, logits, q)
kl.shape.assert_has_rank(1)
log_score = math_ops.reduce_mean(kl)
final_score = math_ops.exp(log_score)
if logits_dtype != dtypes.float64:
final_score = math_ops.cast(final_score, dtypes.float64)
return final_score
inception_score = functools.partial(
classifier_score,
classifier_fn=functools.partial(
run_inception, output_tensor=INCEPTION_OUTPUT))
def trace_sqrt_product(sigma, sigma_v):
"""Find the trace of the positive sqrt of product of covariance matrices.
'_symmetric_matrix_square_root' only works for symmetric matrices, so we
cannot just take _symmetric_matrix_square_root(sigma * sigma_v).
('sigma' and 'sigma_v' are symmetric, but their product is not necessarily).
Let sigma = A A so A = sqrt(sigma), and sigma_v = B B.
We want to find trace(sqrt(sigma sigma_v)) = trace(sqrt(A A B B))
Note the following properties:
(i) forall M1, M2: eigenvalues(M1 M2) = eigenvalues(M2 M1)
=> eigenvalues(A A B B) = eigenvalues (A B B A)
(ii) if M1 = sqrt(M2), then eigenvalues(M1) = sqrt(eigenvalues(M2))
=> eigenvalues(sqrt(sigma sigma_v)) = sqrt(eigenvalues(A B B A))
(iii) forall M: trace(M) = sum(eigenvalues(M))
=> trace(sqrt(sigma sigma_v)) = sum(eigenvalues(sqrt(sigma sigma_v)))
= sum(sqrt(eigenvalues(A B B A)))
= sum(eigenvalues(sqrt(A B B A)))
= trace(sqrt(A B B A))
= trace(sqrt(A sigma_v A))
A = sqrt(sigma). Both sigma and A sigma_v A are symmetric, so we **can**
use the _symmetric_matrix_square_root function to find the roots of these
matrices.
Args:
sigma: a square, symmetric, real, positive semi-definite covariance matrix
sigma_v: same as sigma
Returns:
The trace of the positive square root of sigma*sigma_v
"""
# Note sqrt_sigma is called "A" in the proof above
sqrt_sigma = _symmetric_matrix_square_root(sigma)
# This is sqrt(A sigma_v A) above
sqrt_a_sigmav_a = math_ops.matmul(
sqrt_sigma, math_ops.matmul(sigma_v, sqrt_sigma))
return math_ops.trace(_symmetric_matrix_square_root(sqrt_a_sigmav_a))
def frechet_classifier_distance(real_images,
generated_images,
classifier_fn,
num_batches=1):
"""Classifier distance for evaluating a generative model.
This is based on the Frechet Inception distance, but for an arbitrary
classifier.
This technique is described in detail in https://arxiv.org/abs/1706.08500.
Given two Gaussian distribution with means m and m_w and covariance matrices
C and C_w, this function calcuates
|m - m_w|^2 + Tr(C + C_w - 2(C * C_w)^(1/2))
which captures how different the distributions of real images and generated
images (or more accurately, their visual features) are. Note that unlike the
Inception score, this is a true distance and utilizes information about real
world images.
Note that when computed using sample means and sample covariance matrices,
Frechet distance is biased. It is more biased for small sample sizes. (e.g.
even if the two distributions are the same, for a small sample size, the
expected Frechet distance is large). It is important to use the same
sample size to compute frechet classifier distance when comparing two
generative models.
Args:
real_images: Real images to use to compute Frechet Inception distance.
generated_images: Generated images to use to compute Frechet Inception
distance.
classifier_fn: A function that takes images and produces activations
based on a classifier.
num_batches: Number of batches to split images in to in order to
efficiently run them through the classifier network.
Returns:
The Frechet Inception distance. A floating-point scalar.
"""
real_images_list = array_ops.split(
real_images, num_or_size_splits=num_batches)
generated_images_list = array_ops.split(
generated_images, num_or_size_splits=num_batches)
imgs = array_ops.stack(real_images_list + generated_images_list)
# Compute the activations using the memory-efficient `map_fn`.
activations = functional_ops.map_fn(
fn=classifier_fn,
elems=imgs,
parallel_iterations=1,
back_prop=False,
swap_memory=True,
name='RunClassifier')
# Split the activations by the real and generated images.
real_a, gen_a = array_ops.split(activations, [num_batches, num_batches], 0)
# Ensure the activations have the right shapes.
real_a = array_ops.concat(array_ops.unstack(real_a), 0)
gen_a = array_ops.concat(array_ops.unstack(gen_a), 0)
real_a.shape.assert_has_rank(2)
gen_a.shape.assert_has_rank(2)
# Compute mean and covariance matrices of activations.
m = math_ops.reduce_mean(real_a, 0)
m_v = math_ops.reduce_mean(gen_a, 0)
num_examples = math_ops.to_float(array_ops.shape(real_a)[0])
# sigma = (1 / (n - 1)) * (X - mu) (X - mu)^T
sigma = math_ops.matmul(
real_a - m, real_a - m, transpose_a=True) / (num_examples - 1)
sigma_v = math_ops.matmul(
gen_a - m_v, gen_a - m_v, transpose_a=True) / (num_examples - 1)
# Find the Tr(sqrt(sigma sigma_v)) component of FID
sqrt_trace_component = trace_sqrt_product(sigma, sigma_v)
# Compute the two components of FID.
# First the covariance component.
# Here, note that trace(A + B) = trace(A) + trace(B)
trace = math_ops.trace(sigma + sigma_v) - 2.0 * sqrt_trace_component
# Next the distance between means.
mean = math_ops.square(linalg_ops.norm(m - m_v)) # This uses the L2 norm.
fid = trace + mean
return fid
frechet_inception_distance = functools.partial(
frechet_classifier_distance,
classifier_fn=functools.partial(
run_inception, output_tensor=INCEPTION_FINAL_POOL))
| |
# Copyright 2011 Citrix System.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import urllib
from oslo_config import cfg
from webob import exc
from neutron.common import constants
from neutron.common import exceptions
from neutron.i18n import _LW
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
def get_filters(request, attr_info, skips=[]):
"""Extracts the filters from the request string.
Returns a dict of lists for the filters:
check=a&check=b&name=Bob&
becomes:
{'check': [u'a', u'b'], 'name': [u'Bob']}
"""
res = {}
for key, values in request.GET.dict_of_lists().iteritems():
if key in skips:
continue
values = [v for v in values if v]
key_attr_info = attr_info.get(key, {})
if 'convert_list_to' in key_attr_info:
values = key_attr_info['convert_list_to'](values)
elif 'convert_to' in key_attr_info:
convert_to = key_attr_info['convert_to']
values = [convert_to(v) for v in values]
if values:
res[key] = values
return res
def get_previous_link(request, items, id_key):
params = request.GET.copy()
params.pop('marker', None)
if items:
marker = items[0][id_key]
params['marker'] = marker
params['page_reverse'] = True
return "%s?%s" % (request.path_url, urllib.urlencode(params))
def get_next_link(request, items, id_key):
params = request.GET.copy()
params.pop('marker', None)
if items:
marker = items[-1][id_key]
params['marker'] = marker
params.pop('page_reverse', None)
return "%s?%s" % (request.path_url, urllib.urlencode(params))
def get_limit_and_marker(request):
"""Return marker, limit tuple from request.
:param request: `wsgi.Request` possibly containing 'marker' and 'limit'
GET variables. 'marker' is the id of the last element
the client has seen, and 'limit' is the maximum number
of items to return. If limit == 0, it means we needn't
pagination, then return None.
"""
max_limit = _get_pagination_max_limit()
limit = _get_limit_param(request, max_limit)
if max_limit > 0:
limit = min(max_limit, limit) or max_limit
if not limit:
return None, None
marker = request.GET.get('marker', None)
return limit, marker
def _get_pagination_max_limit():
max_limit = -1
if (cfg.CONF.pagination_max_limit.lower() !=
constants.PAGINATION_INFINITE):
try:
max_limit = int(cfg.CONF.pagination_max_limit)
if max_limit == 0:
raise ValueError()
except ValueError:
LOG.warn(_LW("Invalid value for pagination_max_limit: %s. It "
"should be an integer greater to 0"),
cfg.CONF.pagination_max_limit)
return max_limit
def _get_limit_param(request, max_limit):
"""Extract integer limit from request or fail."""
try:
limit = int(request.GET.get('limit', 0))
if limit >= 0:
return limit
except ValueError:
pass
msg = _("Limit must be an integer 0 or greater and not '%d'")
raise exceptions.BadRequest(resource='limit', msg=msg)
def list_args(request, arg):
"""Extracts the list of arg from request."""
return [v for v in request.GET.getall(arg) if v]
def get_sorts(request, attr_info):
"""Extract sort_key and sort_dir from request.
Return as: [(key1, value1), (key2, value2)]
"""
sort_keys = list_args(request, "sort_key")
sort_dirs = list_args(request, "sort_dir")
if len(sort_keys) != len(sort_dirs):
msg = _("The number of sort_keys and sort_dirs must be same")
raise exc.HTTPBadRequest(explanation=msg)
valid_dirs = [constants.SORT_DIRECTION_ASC, constants.SORT_DIRECTION_DESC]
absent_keys = [x for x in sort_keys if x not in attr_info]
if absent_keys:
msg = _("%s is invalid attribute for sort_keys") % absent_keys
raise exc.HTTPBadRequest(explanation=msg)
invalid_dirs = [x for x in sort_dirs if x not in valid_dirs]
if invalid_dirs:
msg = (_("%(invalid_dirs)s is invalid value for sort_dirs, "
"valid value is '%(asc)s' and '%(desc)s'") %
{'invalid_dirs': invalid_dirs,
'asc': constants.SORT_DIRECTION_ASC,
'desc': constants.SORT_DIRECTION_DESC})
raise exc.HTTPBadRequest(explanation=msg)
return zip(sort_keys,
[x == constants.SORT_DIRECTION_ASC for x in sort_dirs])
def get_page_reverse(request):
data = request.GET.get('page_reverse', 'False')
return data.lower() == "true"
def get_pagination_links(request, items, limit,
marker, page_reverse, key="id"):
key = key if key else 'id'
links = []
if not limit:
return links
if not (len(items) < limit and not page_reverse):
links.append({"rel": "next",
"href": get_next_link(request, items,
key)})
if not (len(items) < limit and page_reverse):
links.append({"rel": "previous",
"href": get_previous_link(request, items,
key)})
return links
class PaginationHelper(object):
def __init__(self, request, primary_key='id'):
self.request = request
self.primary_key = primary_key
def update_fields(self, original_fields, fields_to_add):
pass
def update_args(self, args):
pass
def paginate(self, items):
return items
def get_links(self, items):
return {}
class PaginationEmulatedHelper(PaginationHelper):
def __init__(self, request, primary_key='id'):
super(PaginationEmulatedHelper, self).__init__(request, primary_key)
self.limit, self.marker = get_limit_and_marker(request)
self.page_reverse = get_page_reverse(request)
def update_fields(self, original_fields, fields_to_add):
if not original_fields:
return
if self.primary_key not in original_fields:
original_fields.append(self.primary_key)
fields_to_add.append(self.primary_key)
def paginate(self, items):
if not self.limit:
return items
i = -1
if self.marker:
for item in items:
i = i + 1
if item[self.primary_key] == self.marker:
break
if self.page_reverse:
return items[i - self.limit:i]
return items[i + 1:i + self.limit + 1]
def get_links(self, items):
return get_pagination_links(
self.request, items, self.limit, self.marker,
self.page_reverse, self.primary_key)
class PaginationNativeHelper(PaginationEmulatedHelper):
def update_args(self, args):
if self.primary_key not in dict(args.get('sorts', [])).keys():
args.setdefault('sorts', []).append((self.primary_key, True))
args.update({'limit': self.limit, 'marker': self.marker,
'page_reverse': self.page_reverse})
def paginate(self, items):
return items
class NoPaginationHelper(PaginationHelper):
pass
class SortingHelper(object):
def __init__(self, request, attr_info):
pass
def update_args(self, args):
pass
def update_fields(self, original_fields, fields_to_add):
pass
def sort(self, items):
return items
class SortingEmulatedHelper(SortingHelper):
def __init__(self, request, attr_info):
super(SortingEmulatedHelper, self).__init__(request, attr_info)
self.sort_dict = get_sorts(request, attr_info)
def update_fields(self, original_fields, fields_to_add):
if not original_fields:
return
for key in dict(self.sort_dict).keys():
if key not in original_fields:
original_fields.append(key)
fields_to_add.append(key)
def sort(self, items):
def cmp_func(obj1, obj2):
for key, direction in self.sort_dict:
ret = cmp(obj1[key], obj2[key])
if ret:
return ret * (1 if direction else -1)
return 0
return sorted(items, cmp=cmp_func)
class SortingNativeHelper(SortingHelper):
def __init__(self, request, attr_info):
self.sort_dict = get_sorts(request, attr_info)
def update_args(self, args):
args['sorts'] = self.sort_dict
class NoSortingHelper(SortingHelper):
pass
class NeutronController(object):
"""Base controller class for Neutron API."""
# _resource_name will be redefined in sub concrete controller
_resource_name = None
def __init__(self, plugin):
self._plugin = plugin
super(NeutronController, self).__init__()
def _prepare_request_body(self, body, params):
"""Verifies required parameters are in request body.
Sets default value for missing optional parameters.
Body argument must be the deserialized body.
"""
try:
if body is None:
# Initialize empty resource for setting default value
body = {self._resource_name: {}}
data = body[self._resource_name]
except KeyError:
# raise if _resource_name is not in req body.
raise exc.HTTPBadRequest(_("Unable to find '%s' in request body") %
self._resource_name)
for param in params:
param_name = param['param-name']
param_value = data.get(param_name)
# If the parameter wasn't found and it was required, return 400
if param_value is None and param['required']:
msg = (_("Failed to parse request. "
"Parameter '%s' not specified") % param_name)
LOG.error(msg)
raise exc.HTTPBadRequest(msg)
data[param_name] = param_value or param.get('default-value')
return body
| |
"""
The central part of the application.
"""
from Geometry import *
from GTKInterface import *
from GTKWindow import *
from GestureDispatch import *
from ActionManager import *
from Scheduler import *
from Label import *
from Transition import *
from Color import *
import gobject
import time
import warnings
gobject.threads_init()
class Director(object):
"""
The central point of the application. It handles the main run loop, L{Scene} transitions, and propagating redraw events to the Scene's children. It is also the owner of the L{GestureDispatch}, L{ActionManager}, L{Scheduler}, and the L{GTKInterface}.
Normally only one Director should exist per application.
"""
def __init__(self, windowSize=None):
"""
Initialization method. If windowSize is not defined, the default is C{Size(800,600)}.
@param windowSize: The size of the window.
@type windowSize: L{Size}
"""
if windowSize is None:
self._windowSize = Size(800, 600)
else:
self._windowSize = windowSize
self._gtkInterface = None # set this up in setWindow()
self._gestureDispatch = GestureDispatch()
self._scheduler = Scheduler()
self._actionManager = ActionManager(self._scheduler)
self._isShowingFPS = False
self._runningScene = None
self._nextScene = None
self._scenesStack = []
self._oldFramerate = 1.0/60.0
self._framerate = 1.0/60.0
self._frames = 0
self._isPaused = False
self._dt = 0
self._isNextDeltaTimeZero = False
self._lastTimeStamp = 0
self._accumDt = 0
self._frames = 0
self._displayedFramerate = 0
self._isRecording = False
self._backgroundColor = BlackColor()
#{ Accessor methods.
def isShowingFPS(self):
"""
Whether or not the Director is displaying frames per second. Default is C{False}.
@return: Whether or not the Director is displaying frames per second (FPS).
@rtype: L{bool}
"""
return self._isShowingFPS
def setShowingFPS(self, isShowingFPS):
"""
Sets whether or not the Director displays frames per second.
@param isShowingFPS: Whether or not the Director displays frames per second.
@type isShowingFPS: C{bool}
"""
self._isShowingFPS = isShowingFPS
showingFPS = property(isShowingFPS, setShowingFPS, doc="Whether or not to display the framerate.")
def getSize(self):
"""
Returns the size of the main application window.
@return: Size of the main application window.
@rtype: L{Size}
"""
return self._gtkInterface.getSize()
size = property(getSize, doc="The size of the main application window.")
def getGestureDispatch(self):
"""
Returns the L{GestureDispatch} for the application, which sends out notifications of L{GestureEvent}C{s} to L{GestureListener}C{s}.
@return: The dispatch.
@rtype: L{GestureDispatch}
"""
return self._gestureDispatch
gestureDispatch = property(getGestureDispatch, doc="The application's GestureDispatch.")
def getActionManager(self):
"""
Reutrns the L{ActionManager} for the application, which manages the L{Action}C{s}.
@return: The manager.
@rtype: L{ActionManager}
"""
return self._actionManager
actionManager = property(getActionManager, doc="The application's ActionManager.")
def getScheduler(self):
"""
Returns the L{Scheduler} for the application, which manages the L{Timer}C{s}.
@return: The scheduler.
@rtype: L{Scheduler}
"""
return self._scheduler
scheduler = property(getScheduler, doc="The application's Scheduler.")
def getBackgroundColor(self):
return self._backgroundColor
def setBackgroundColor(self, color):
"""
Sets the color for the background of the application.
@param color: The color of the background.
@type color: L{Color}
"""
if self._gtkInterface is not None:
self._gtkInterface.setBackgroundColor(color)
self._backgroundColor = color
backgroundColor = property(getBackgroundColor, setBackgroundColor, doc="The application's background color.")
def setWindow(self, window=None):
"""
Sets the main window for the application. If window is C{None}, a L{GTKWindow} is generated automatically.
@param window: The main application window.
@type window: L{AbstractWindow} (or C{None})
"""
if self._gtkInterface == None:
self._gtkInterface = GTKInterface(self, window, self._windowSize) # if window is None, defaults to GTKWindow()
self._gtkInterface.setBackgroundColor(self._backgroundColor)
else:
warnings.warn("Window is already set.")
def getGTKLayout(self):
"""
Returns the main gtk.Layout to which cocosCairo draws. Use this method to perform PyGTK actions such as attaching C{gtk.Widgets} to the application. If the GTKInterface has not yet been initialized, then it will return C{None}.
@return: The main gtk.Layout.
@rtype: L{GTKLayout} (or C{None})
"""
if self._gtkInterface is not None:
return self._gtkInterface.getGTKLayout()
else:
return None
gtkLayout = property(getGTKLayout, doc="The application's main gtk.Layout.")
#}
#{ Scene methods.
def getRunningScene(self):
"""
Returns the L{Scene} which is currently running or C{None} if there is not a scene currently running.
@return: The running Scene.
@rtype: L{Scene} (or C{None})
"""
return self._runningScene
runningScene = property(getRunningScene, doc="The currently-running scene.")
def runWithScene(self, scene):
"""
Starts the application with a L{Scene}. This starts the main run loop.
@param scene: The opening scene.
@type scene: L{Scene}
"""
if self._runningScene:
warnings.warn("Scene is already running. Use replaceScene or pushScene instead.")
return
if self._gtkInterface is None:
self.setWindow()
self.pushScene(scene)
self._startAnimation()
def replaceScene(self, scene):
"""
Replaces the currently running L{Scene} with a new Scene.
@param scene: The new Scene.
@type scene: L{Scene}
"""
scene.setRect(Rect(Point(0,0), self._gtkInterface.getSize()))
scene._setDirector(self)
index = len(self._scenesStack)-1
self._scenesStack[index] = scene
self._nextScene = scene
def pushScene(self, scene):
"""
Pushes a new L{Scene} onto the stack of Scenes.
@param scene: The new Scene.
@type scene: L{Scene}
"""
scene.setRect(Rect(Point(0,0), self._gtkInterface.getSize()))
scene._setDirector(self)
self._scenesStack.append(scene)
self._nextScene = scene
def popScene(self):
"""
Pops the most recently-pushed L{Scene} off the stack of Scenes.
"""
scene = self._scenesStack.pop()
count = len(self._scenesStack)
if count == 0:
self.end()
else:
self._nextScene = self._scenesStack[count-1]
#}
#{ Run methods.
def end(self):
"""
Ends all animations, unregisters all L{GestureDispatch} listeners, and clears out the stack of L{Scene}C{s}. This does not end the program by itself, however.
"""
self._runningScene.onExit()
self._runningScene.cleanup()
self._scenesStack = []
self._gestureDispatch.removeAllListeners()
self._stopAnimation()
def _setNextScene(self):
"""
Private method which handles setting the next L{Scene}. This should not normally be called manually.
"""
isTransitionRunning = isinstance(self._runningScene, AbstractTransition)
isTransitionNext = isinstance(self._nextScene, AbstractTransition)
if isTransitionNext is not True and self._runningScene is not None:
self._runningScene.onExit()
self._runningScene = self._nextScene
self._nextScene = None
if isTransitionRunning is not True:
self._runningScene.onEnter()
self._runningScene.onEnterFromFinishedTransition()
def getFramerate(self):
"""
Returns the animation interval. Default is C{30.0} FPS.
@return: Animation interval.
@rtype: C{float}
"""
return self._framerate
def setFramerate(self, framerate):
"""
Sets the animation interval, that is, the frames per second for the application. Note that this must be set before L{runWithScene} is called, otherwise this method will have no effect.
@param framerate: The animation interval (FPS).
@type framerate: C{float}
"""
self._framerate = framerate
framerate = property(getFramerate, setFramerate, doc="The application's framerate.")
def pause(self):
"""
Pauses the application.
"""
if self._isPaused:
return
self._oldFramerate = self._framerate
self.setFramerate(1.0/4.0)
self._isPaused = True
def resume(self):
"""
Resumes the application.
"""
if not self._isPaused:
return
self.setFramerate(self._oldFramerate)
self._isPaused = False
self._dt = 0
#}
#{ Private methods.
def _startAnimation(self):
"""
Private method that calls L{_preMainLoop} to begin the main loop.
"""
self._isRunning = True
self._preMainLoop()
self._gtkInterface.start()
def _preMainLoop(self):
"""
Private method that sets up the L{_mainLoop} method to be called repeatedly.
"""
interval = self._framerate*1000
interval = int(interval)
gobject.timeout_add(interval, self._mainLoop)
def _mainLoop(self):
"""
Private method which is called repeatedly to redraw the L{Node}C{s} and to update the L{Scheduler} with the time that has passed since the last loop.
"""
self._calculateDeltaTime()
if not self._isPaused:
if not self._isRecording:
self._scheduler.tick(self._dt)
else:
self._scheduler.tick(self._framerate)
if self._nextScene is not None:
self._setNextScene()
self._gtkInterface.redraw() # This is not guaranteed to redraw within the same loop iteration as PyGTK accumulates redraw events before dispatching.
if self._isShowingFPS is True:
self._showFPS()
if self._isRunning:
return True
else:
return False
def _calculateDeltaTime(self):
"""
Private method which calculates how much time has elapsed since the last loop iteration. This method should generally not be called manually.
"""
timestamp = time.time() # now
if self._isNextDeltaTimeZero:
self._dt = 0
self._isNextDeltaTimeZero = False
#elif self._lastTimeStamp == 0.0:
# self._dt = 0
else:
self._dt = timestamp - self._lastTimeStamp
self._lastTimeStamp = timestamp
def _showFPS(self):
self._frames += 1
self._accumDt += self._dt
if (self._accumDt > 0.1):
self._displayedFramerate = self._frames/self._accumDt
self._frames = 0
self._accumDt = 0
string = "%.1f" % self._displayedFramerate
self._gtkInterface._layout.setFramerate(string)
#self._fpsLabel.setText(string)
def _stopAnimation(self):
"""
Private method which stops the main loop.
"""
self._isRunning = False
self.stopRecording()
#}
#{ Recording methods.
def takeScreenshot(self, imagePath):
"""
Takes a screenshot of the application. Note that PyGTK widgets will not be rendered to the image.
@param imagePath: The name of the file to be saved.
@type imagePath: C{string}
"""
self._gtkInterface._layout.takeScreenshot(imagePath)
def startRecording(self, videoPath):
"""
Begins saving a sequence of image stills to be rendered to video.
@param videoPath: The location where the video (and temporary image files) will be saved.
@type videoPath: C{string}
"""
self._isRecording = True
self._gtkInterface._layout.startRecording(videoPath)
def stopRecording(self):
"""
Stops recording and, if FFmpeg is available, will automatically render the video. If there is already a movie file with the same name as the one given in L{startRecording}, that movie file will be deleted.
"""
self._isRecording = False
self._gtkInterface._layout.stopRecording()
#}
| |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import logging
log = logging.getLogger(__name__)
class SchemaConstructor(object):
@classmethod
def get_Ubicacion_schema(self, cfdi_version):
cfdi_version = self.get_cfdi_version(cfdi_version)
_versions = {
'3.2': {
'calle': {
'type': 'string',
'minlength': 1,
},
'noExterior': {
'type': 'string',
'minlength': 1,
},
'noInterior': {
'type': 'string',
'minlength': 1,
},
'colonia': {
'type': 'string',
'minlength': 1,
},
'localidad': {
'type': 'string',
'minlength': 1,
},
'referencia': {
'type': 'string',
'minlength': 1,
},
'municipio': {
'type': 'string',
'minlength': 1,
},
'estado': {
'type': 'string',
'minlength': 1,
},
'pais': {
'type': 'string',
'minlength': 1,
'required': True,
},
'codigoPostal': {
'type': 'string',
'minlength': 1,
}
}
}
return self.get_schema_by_version(_versions, cfdi_version)
@classmethod
def get_UbicacionFiscal_schema(self, cfdi_version):
cfdi_version = self.get_cfdi_version(cfdi_version)
_versions = {}
version_3_2 = self.get_Ubicacion_schema(cfdi_version)
for field in ('calle', 'municipio', 'estado', 'pais', 'codigoPostal'):
version_3_2[field]['required'] = True
_versions['3.2'] = version_3_2
return self.get_schema_by_version(_versions, cfdi_version)
@classmethod
def get_InformacionAduanera_schema(self, cfdi_version):
cfdi_version = self.get_cfdi_version(cfdi_version)
_versions = {
'3.2': {
'numero': {
'type': 'string',
'minlength': 1,
'required': True,
},
'fecha': {
'type': 'iso8601_date',
'required': True,
},
'aduana': {
'type': 'string',
'minlength': 1,
},
}
}
return self.get_schema_by_version(_versions, cfdi_version)
@classmethod
def get_Comprobante_schema(self, cfdi_version=None):
cfdi_version = self.get_cfdi_version(cfdi_version)
_versions = {
'3.2': {
'sello' : {
'type': 'string',
'default': '',
},
'certificado' : {
'type': 'string',
'default': '',
},
'noCertificado' : {
'type': 'string',
'default': '',
},
'version': {
'type': 'string',
'default': '3.2',
'allowed': ['3.2', ],
},
'serie': {
'type': 'alphanumeric',
'minlength': 1,
'maxlength': 25,
'coerce': str,
},
'folio': {
'type': 'numeric',
},
'fecha': {
'type': 'iso8601_datetime',
'required': True,
},
'formaDePago': {
'type': 'string',
'required': True,
},
'condicionesDePago': {
'type': 'string',
'minlength': 1,
},
'subTotal': {
'type': 'importe',
'coerce': float,
'required': True,
},
'descuento': {
'type': 'importe',
'coerce': float,
},
'motivoDescuento': {
'type': 'string',
'minlength': 1,
},
'TipoCambio': {
'type': 'string',
},
'Moneda': {
'type': 'string',
},
'total': {
'type': 'importe',
'required': True,
'coerce': float,
},
'tipoDeComprobante': {
'type': 'string',
'allowed': ['ingreso', 'egreso', 'traslado'],
'required': True,
},
'metodoDePago': {
'type': 'string',
'minlength': 1,
'required': True,
},
'TipoCambio': {
'type': 'string',
},
'Moneda': {
'type': 'string',
},
'LugarExpedicion': {
'type': 'string',
'minlength': 1,
'required': True,
},
'NumCtaPago': {
'type': 'numeric',
'minlength': 4,
},
'FolioFiscalOrig': {
'type': 'string',
},
'SerieFolioFiscalOrig': {
'type': 'string',
},
'FechaFolioFiscalOrig': {
'type': 'iso8601_datetime',
},
'MontoFolioFiscalOrig': {
'type': 'importe',
'coerce': float,
},
'Emisor': {
'type': 'dict', 'required': True,
'schema': {
'DomicilioFiscal': {
'type': 'dict',
'schema': self.get_UbicacionFiscal_schema(cfdi_version),
},
'ExpedidoEn': {
'type': 'dict',
'required': True,
'schema': self.get_Ubicacion_schema(cfdi_version),
},
'RegimenFiscal': {
'type': 'list',
'minlength': 1,
'required': True,
'coerce': 'regimen',
'schema': {
'type': 'dict',
'schema': {
'_tag': {
'type': 'string',
'allowed': ['RegimenFiscal', ],
'default': 'RegimenFiscal',
},
'Regimen': {
'type': 'string',
'minlength': 1,
'required': True,
}
}
}
},
'rfc': {
'type': 'rfc',
'required': True,
},
'nombre': {
'type': 'string',
'minlength': 1,
'required': True,
},
}
},
'Receptor': {
'type': 'dict', 'required': True,
'schema': {
'Domicilio' : {
'type': 'dict',
'schema': self.get_Ubicacion_schema(cfdi_version),
},
'rfc': {
'type': 'rfc',
'required': True,
},
'nombre': {
'type': 'string',
'minlength': 1,
'required': True,
}
}
},
'Conceptos': {
'type': 'list',
'required': True,
'minlength': 1,
'schema': {
'type': 'dict',
'schema': {
'_tag': {
'type': 'string',
'allowed': ['Concepto', ],
'default': 'Concepto',
},
'cantidad': {
'type': 'number',
'required': True,
'coerce': float,
},
'unidad': {
'type': 'string',
'minlength': 1,
'required': True,
},
'noIdentificacion': {
'type': 'string',
'minlength': 1,
},
'descripcion': {
'type': 'string',
'minlength': 1,
'required': True
},
'valorUnitario': {
'type': 'importe',
'required': True,
'coerce': float,
},
'importe': {
'type': 'importe',
'required': True,
'coerce': float,
},
'InformacionAduanera': {
'type': 'dict',
'schema': self.get_InformacionAduanera_schema(cfdi_version),
},
'CuentaPredial': {
'type': 'dict',
'schema': {
'numero': {
'type': 'string',
'minlength': 1,
'required': True,
'coerce': str
}
}
},
'ComplementoConcepto': {
'type': 'dict',
}
# TODO: 'Parte': {}
}
}
},
'Impuestos': {
'type': 'dict',
'required': True,
'default': {},
'schema': {
'totalImpuestosRetenidos': {
'type': 'importe',
'coerce': float,
},
'totalImpuestosTrasladados': {
'type': 'importe',
'coerce': float,
},
'Retenciones': {
'type': 'list',
'minlength': 1,
'schema': {
'type': 'dict',
'schema': {
'_tag': {
'type': 'string',
'allowed': ['Retencion', ],
'default': 'Retencion',
},
'impuesto': {
'type': 'string',
'allowed': ['IVA', 'ISR'],
'required': True,
},
'importe': {
'type': 'importe',
'required': True,
'coerce': float,
}
}
}
},
'Traslados': {
'type': 'list',
'minlength': 1,
'schema': {
'type': 'dict',
'schema': {
'_tag': {
'type': 'string',
'allowed': ['Traslado', ],
'default': 'Traslado',
},
'impuesto': {
'type': 'string',
'allowed': ['IVA', 'IEPS'],
'required': True,
},
'importe': {
'type': 'importe',
'required': True,
'coerce': float,
},
'tasa': {
'type': 'importe',
'required': True,
'coerce': float,
}
}
}
},
},
},
'Complemento': {'type': 'dict'},
}
}
return self.get_schema_by_version(_versions, cfdi_version)
@classmethod
def get_schema(self, cfdi_version):
self.cfdi_version = cfdi_version
_versions = {
'3.2': {
'Comprobante': {
'type': 'dict', 'required': True,
'schema': self.get_Comprobante_schema(self.cfdi_version),
},
}
}
return self.get_schema_by_version(_versions, self.cfdi_version)
@classmethod
def get_cfdi_version(self, cfdi_version=None):
if cfdi_version is None:
return self.cfdi_version
return cfdi_version
@staticmethod
def get_schema_by_version(versions, cfdi_version):
try:
return versions[cfdi_version]
except KeyError:
log.exception("CFDI version \"{}\" not supported.".format(cfdi_version))
raise
| |
# Copyright [2015] Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
try:
# handle py34
import builtins
except ImportError:
# and py27
import __builtin__ as builtins
import base64
import fixtures
import mock
import re
import six
import testtools
import troveclient.client
from troveclient import exceptions
import troveclient.shell
from troveclient.tests import fakes
from troveclient.tests import utils
import troveclient.v1.modules
import troveclient.v1.shell
class ShellFixture(fixtures.Fixture):
def setUp(self):
super(ShellFixture, self).setUp()
self.shell = troveclient.shell.OpenStackTroveShell()
def tearDown(self):
if hasattr(self.shell, 'cs'):
self.shell.cs.clear_callstack()
super(ShellFixture, self).tearDown()
class ShellTest(utils.TestCase):
FAKE_ENV = {
'OS_USERNAME': 'username',
'OS_PASSWORD': 'password',
'OS_PROJECT_ID': 'project_id',
'OS_AUTH_URL': 'http://no.where/v2.0',
}
def setUp(self, *args):
"""Run before each test."""
super(ShellTest, self).setUp()
for var in self.FAKE_ENV:
self.useFixture(fixtures.EnvironmentVariable(var,
self.FAKE_ENV[var]))
self.shell = self.useFixture(ShellFixture()).shell
@mock.patch('sys.stdout', new_callable=six.StringIO)
@mock.patch('troveclient.client.get_version_map',
return_value=fakes.get_version_map())
@mock.patch('troveclient.v1.shell._find_instance_or_cluster',
return_value=('1234', 'instance'))
def run_command(self, cmd, mock_find_instance_or_cluster,
mock_get_version_map, mock_stdout):
if isinstance(cmd, list):
self.shell.main(cmd)
else:
self.shell.main(cmd.split())
return mock_stdout.getvalue()
@mock.patch('sys.stdout', new_callable=six.StringIO)
@mock.patch('troveclient.client.get_version_map',
return_value=fakes.get_version_map())
@mock.patch('troveclient.v1.shell._find_instance_or_cluster',
return_value=('cls-1234', 'cluster'))
def run_command_clusters(self, cmd, mock_find_instance_or_cluster,
mock_get_version_map, mock_stdout):
if isinstance(cmd, list):
self.shell.main(cmd)
else:
self.shell.main(cmd.split())
return mock_stdout.getvalue()
def assert_called(self, method, url, body=None, **kwargs):
return self.shell.cs.assert_called(method, url, body, **kwargs)
def assert_called_anytime(self, method, url, body=None):
return self.shell.cs.assert_called_anytime(method, url, body)
def test__strip_option(self):
# Format is: opt_name, opt_string, _strip_options_kwargs,
# expected_value, expected_opt_string, exception_msg
data = [
["volume", "volume=10",
{}, "10", "", None],
["volume", ",volume=10,,type=mine,",
{}, "10", "type=mine", None],
["volume", "type=mine",
{}, "", "type=mine", "Missing option 'volume'.*"],
["volume", "type=mine",
{'is_required': False}, None, "type=mine", None],
["volume", "volume=1, volume=2",
{}, "", "", "Option 'volume' found more than once.*"],
["volume", "volume=1, volume=2",
{'allow_multiple': True}, ['1', '2'], "", None],
["volume", "volume=1, volume=2,, volume=4, volume=6",
{'allow_multiple': True}, ['1', '2', '4', '6'], "", None],
["module", ",flavor=10,,nic='net-id=net',module=test, module=test",
{'allow_multiple': True}, ['test'],
"flavor=10,,nic='net-id=net'", None],
["nic", ",flavor=10,,nic=net-id=net, module=test",
{'quotes_required': True}, "", "",
"Invalid 'nic' option. The value must be quoted.*"],
["nic", ",flavor=10,,nic='net-id=net', module=test",
{'quotes_required': True}, "net-id=net",
"flavor=10,, module=test", None],
["nic",
",nic='port-id=port',flavor=10,,nic='net-id=net', module=test",
{'quotes_required': True, 'allow_multiple': True},
["net-id=net", "port-id=port"],
"flavor=10,, module=test", None],
]
count = 0
for datum in data:
count += 1
opt_name = datum[0]
opts_str = datum[1]
kwargs = datum[2]
expected_value = datum[3]
expected_opt_string = datum[4]
exception_msg = datum[5]
msg = "Error (test data line %s): " % count
try:
value, opt_string = troveclient.v1.shell._strip_option(
opts_str, opt_name, **kwargs)
if exception_msg:
self.assertEqual(True, False,
"%sException not thrown, expecting %s" %
(msg, exception_msg))
if isinstance(expected_value, list):
self.assertEqual(
set(value), set(expected_value),
"%sValue not correct" % msg)
else:
self.assertEqual(value, expected_value,
"%sValue not correct" % msg)
self.assertEqual(opt_string, expected_opt_string,
"%sOption string not correct" % msg)
except Exception as ex:
if exception_msg:
msg = ex.message if hasattr(ex, 'message') else str(ex)
self.assertThat(msg,
testtools.matchers.MatchesRegex(
exception_msg, re.DOTALL),
exception_msg, "%sWrong ex" % msg)
else:
raise
def test_instance_list(self):
self.run_command('list')
self.assert_called('GET', '/instances')
def test_instance_show(self):
self.run_command('show 1234')
self.assert_called('GET', '/instances/1234')
def test_instance_delete(self):
self.run_command('delete 1234')
self.assert_called('DELETE', '/instances/1234')
def test_resize_instance(self):
self.run_command('resize-instance 1234 1')
self.assert_called('POST', '/instances/1234/action')
def test_resize_volume(self):
self.run_command('resize-volume 1234 3')
self.assert_called('POST', '/instances/1234/action')
def test_restart(self):
self.run_command('restart 1234')
self.assert_called('POST', '/instances/1234/action')
def test_detach_replica(self):
self.run_command('detach-replica 1234')
self.assert_called('PATCH', '/instances/1234')
def test_promote_to_replica_source(self):
self.run_command('promote-to-replica-source 1234')
self.assert_called('POST', '/instances/1234/action')
def test_eject_replica_source(self):
self.run_command('eject-replica-source 1234')
self.assert_called('POST', '/instances/1234/action')
def test_flavor_list(self):
self.run_command('flavor-list')
self.assert_called('GET', '/flavors')
def test_flavor_list_with_datastore(self):
cmd = ('flavor-list --datastore_type mysql '
'--datastore_version_id some-version-id')
self.run_command(cmd)
self.assert_called(
'GET', '/datastores/mysql/versions/some-version-id/flavors')
def test_flavor_list_error(self):
cmd = 'flavor-list --datastore_type mysql'
exepcted_error_msg = ('Missing argument\(s\): '
'datastore_type, datastore_version_id')
self.assertRaisesRegexp(
exceptions.MissingArgs, exepcted_error_msg, self.run_command,
cmd)
def test_flavor_show(self):
self.run_command('flavor-show 1')
self.assert_called('GET', '/flavors/1')
def test_flavor_show_by_name(self):
self.run_command('flavor-show m1.tiny') # defined in fakes.py
self.assert_called('GET', '/flavors/m1.tiny')
def test_flavor_show_uuid(self):
self.run_command('flavor-show m1.uuid')
self.assert_called('GET', '/flavors/m1.uuid')
def test_volume_type_list(self):
self.run_command('volume-type-list')
self.assert_called('GET', '/volume-types')
def test_volume_type_list_with_datastore(self):
cmd = ('volume-type-list --datastore_type mysql '
'--datastore_version_id some-version-id')
self.run_command(cmd)
self.assert_called(
'GET', '/datastores/mysql/versions/some-version-id/volume-types')
def test_volume_type_list_error(self):
cmd = 'volume-type-list --datastore_type mysql'
exepcted_error_msg = ('Missing argument\(s\): '
'datastore_type, datastore_version_id')
self.assertRaisesRegexp(
exceptions.MissingArgs, exepcted_error_msg, self.run_command,
cmd)
def test_volume_type_show(self):
self.run_command('volume-type-show 1')
self.assert_called('GET', '/volume-types/1')
def test_cluster_list(self):
self.run_command('cluster-list')
self.assert_called('GET', '/clusters')
def test_cluster_show(self):
self.run_command('cluster-show cls-1234')
self.assert_called('GET', '/clusters/cls-1234')
def test_cluster_instances(self):
self.run_command('cluster-instances cls-1234')
self.assert_called('GET', '/clusters/cls-1234')
def test_cluster_delete(self):
self.run_command('cluster-delete cls-1234')
self.assert_called('DELETE', '/clusters/cls-1234')
def test_boot(self):
self.run_command('create test-member-1 1 --size 1 --volume_type lvm')
self.assert_called_anytime(
'POST', '/instances',
{'instance': {
'volume': {'size': 1, 'type': 'lvm'},
'flavorRef': 1,
'name': 'test-member-1'
}})
def test_boot_with_modules(self):
self.run_command('create test-member-1 1 --size 1 --volume_type lvm '
'--module 4321 --module 8765')
self.assert_called_anytime(
'POST', '/instances',
{'instance': {
'volume': {'size': 1, 'type': 'lvm'},
'flavorRef': 1,
'name': 'test-member-1',
'modules': [{'id': '4321'}, {'id': '8765'}]
}})
def test_boot_by_flavor_name(self):
self.run_command(
'create test-member-1 m1.tiny --size 1 --volume_type lvm')
self.assert_called_anytime(
'POST', '/instances',
{'instance': {
'volume': {'size': 1, 'type': 'lvm'},
'flavorRef': 1,
'name': 'test-member-1'
}})
def test_boot_repl_set(self):
self.run_command('create repl-1 1 --size 1 --locality=anti-affinity '
'--replica_count=4')
self.assert_called_anytime(
'POST', '/instances',
{'instance': {
'volume': {'size': 1, 'type': None},
'flavorRef': 1,
'name': 'repl-1',
'replica_count': 4,
'locality': 'anti-affinity'
}})
def test_boot_replica(self):
self.run_command('create slave-1 1 --size 1 --replica_of=master_1')
self.assert_called_anytime(
'POST', '/instances',
{'instance': {
'volume': {'size': 1, 'type': None},
'flavorRef': 1,
'name': 'slave-1',
'replica_of': 'myid',
'replica_count': 1
}})
def test_boot_replica_count(self):
self.run_command('create slave-1 1 --size 1 --replica_of=master_1 '
'--replica_count=3')
self.assert_called_anytime(
'POST', '/instances',
{'instance': {
'volume': {'size': 1, 'type': None},
'flavorRef': 1,
'name': 'slave-1',
'replica_of': 'myid',
'replica_count': 3
}})
def test_boot_locality(self):
self.run_command('create master-1 1 --size 1 --locality=affinity')
self.assert_called_anytime(
'POST', '/instances',
{'instance': {
'volume': {'size': 1, 'type': None},
'flavorRef': 1,
'name': 'master-1',
'locality': 'affinity'
}})
def test_boot_locality_error(self):
cmd = ('create slave-1 1 --size 1 --locality=affinity '
'--replica_of=master_1')
self.assertRaisesRegexp(
exceptions.ValidationError,
'Cannot specify locality when adding replicas to existing '
'master.',
self.run_command, cmd)
def test_boot_nic_error(self):
cmd = ('create test-member-1 1 --size 1 --volume_type lvm '
'--nic net-id=some-id,port-id=some-id')
self.assertRaisesRegexp(
exceptions.ValidationError,
'Invalid NIC argument: nic=\'net-id=some-id,port-id=some-id\'',
self.run_command, cmd)
def test_cluster_create(self):
cmd = ('cluster-create test-clstr vertica 7.1 '
'--instance flavor=2,volume=2 '
'--instance flavor=2,volume=1 '
'--instance flavor=2,volume=1,volume_type=my-type-1')
self.run_command(cmd)
self.assert_called_anytime(
'POST', '/clusters',
{'cluster': {
'instances': [
{
'volume': {'size': '2'},
'flavorRef': '2'
},
{
'volume': {'size': '1'},
'flavorRef': '2'
},
{
'volume': {'size': '1', 'type': 'my-type-1'},
'flavorRef': '2'
}],
'datastore': {'version': '7.1', 'type': 'vertica'},
'name': 'test-clstr'}})
def test_cluster_create_by_flavor_name(self):
cmd = ('cluster-create test-clstr vertica 7.1 '
'--instance flavor=m1.small,volume=2 '
'--instance flavor=m1.small,volume=1')
self.run_command(cmd)
self.assert_called_anytime(
'POST', '/clusters',
{'cluster': {
'instances': [
{
'volume': {'size': '2'},
'flavorRef': '2'
},
{
'volume': {'size': '1'},
'flavorRef': '2'
}],
'datastore': {'version': '7.1', 'type': 'vertica'},
'name': 'test-clstr'}})
def test_cluster_create_error(self):
cmd = ('cluster-create test-clstr vertica 7.1 --instance volume=2 '
'--instance flavor=2,volume=1')
self.assertRaisesRegexp(
exceptions.MissingArgs, "Missing option 'flavor'",
self.run_command, cmd)
def test_cluster_grow(self):
cmd = ('cluster-grow cls-1234 '
'--instance flavor=2,volume=2 '
'--instance flavor=2,volume=1')
self.run_command(cmd)
self.assert_called('POST', '/clusters/cls-1234')
def test_cluster_shrink(self):
cmd = ('cluster-shrink cls-1234 1234')
self.run_command(cmd)
self.assert_called('POST', '/clusters/cls-1234')
def test_cluster_create_with_nic_az(self):
cmd = ('cluster-create test-clstr1 vertica 7.1 '
'--instance flavor=2,volume=2,nic=\'net-id=some-id\','
'availability_zone=2 '
'--instance flavor=2,volume=2,nic=\'net-id=some-id\','
'availability_zone=2')
self.run_command(cmd)
self.assert_called_anytime(
'POST', '/clusters',
{'cluster': {
'instances': [
{
'flavorRef': '2',
'volume': {'size': '2'},
'nics': [{'net-id': 'some-id'}],
'availability_zone': '2'
},
{
'flavorRef': '2',
'volume': {'size': '2'},
'nics': [{'net-id': 'some-id'}],
'availability_zone': '2'
}],
'datastore': {'version': '7.1', 'type': 'vertica'},
'name': 'test-clstr1'}})
def test_cluster_create_with_nic_az_error(self):
cmd = ('cluster-create test-clstr vertica 7.1 '
'--instance flavor=2,volume=2,nic=net-id=some-id,'
'port-id=some-port-id,availability_zone=2 '
'--instance flavor=2,volume=1,nic=net-id=some-id,'
'port-id=some-port-id,availability_zone=2')
self.assertRaisesRegexp(
exceptions.ValidationError, "Invalid 'nic' option. "
"The value must be quoted.",
self.run_command, cmd)
def test_cluster_create_with_nic_az_error_again(self):
cmd = ('cluster-create test-clstr vertica 7.1 '
'--instance flavor=2,volume=2,nic=\'v4-fixed-ip=10.0.0.1\','
'availability_zone=2 '
'--instance flavor=2,volume=1,nic=\'v4-fixed-ip=10.0.0.1\','
'availability_zone=2')
self.assertRaisesRegexp(
exceptions.ValidationError, 'Invalid NIC argument',
self.run_command, cmd)
def test_datastore_list(self):
self.run_command('datastore-list')
self.assert_called('GET', '/datastores')
def test_datastore_show(self):
self.run_command('datastore-show d-123')
self.assert_called('GET', '/datastores/d-123')
def test_datastore_version_list(self):
self.run_command('datastore-version-list d-123')
self.assert_called('GET', '/datastores/d-123/versions')
def test_datastore_version_show(self):
self.run_command('datastore-version-show v-56 --datastore d-123')
self.assert_called('GET', '/datastores/d-123/versions/v-56')
def test_datastore_version_show_error(self):
expected_error_msg = ('The datastore name or id is required to '
'retrieve a datastore version by name.')
self.assertRaisesRegexp(exceptions.NoUniqueMatch, expected_error_msg,
self.run_command,
'datastore-version-show v-56')
def test_configuration_list(self):
self.run_command('configuration-list')
self.assert_called('GET', '/configurations')
def test_configuration_show(self):
self.run_command('configuration-show c-123')
self.assert_called('GET', '/configurations/c-123')
def test_configuration_create(self):
cmd = "configuration-create c-123 some-thing"
self.assertRaises(ValueError, self.run_command, cmd)
def test_configuration_update(self):
cmd = "configuration-update c-123 some-thing"
self.assertRaises(ValueError, self.run_command, cmd)
def test_configuration_patch(self):
cmd = "configuration-patch c-123 some-thing"
self.assertRaises(ValueError, self.run_command, cmd)
def test_configuration_parameter_list(self):
cmd = 'configuration-parameter-list v-156 --datastore d-123'
self.run_command(cmd)
self.assert_called('GET',
'/datastores/d-123/versions/v-156/parameters')
def test_configuration_parameter_list_error(self):
expected_error_msg = ('The datastore name or id is required to '
'retrieve the parameters for the configuration '
'group by name')
self.assertRaisesRegexp(
exceptions.NoUniqueMatch, expected_error_msg,
self.run_command, 'configuration-parameter-list v-156')
def test_configuration_parameter_show(self):
cmd = ('configuration-parameter-show v_56 '
'max_connections --datastore d_123')
self.run_command(cmd)
self.assert_called(
'GET',
'/datastores/d_123/versions/v_56/parameters/max_connections')
def test_configuration_instances(self):
cmd = 'configuration-instances c-123'
self.run_command(cmd)
self.assert_called('GET', '/configurations/c-123/instances')
def test_configuration_delete(self):
self.run_command('configuration-delete c-123')
self.assert_called('DELETE', '/configurations/c-123')
def test_configuration_default(self):
self.run_command('configuration-default 1234')
self.assert_called('GET', '/instances/1234/configuration')
def test_configuration_attach(self):
self.run_command('configuration-attach 1234 c-123')
self.assert_called('PUT', '/instances/1234')
def test_configuration_detach(self):
self.run_command('configuration-detach 1234')
self.assert_called('PUT', '/instances/1234')
def test_upgrade(self):
self.run_command('upgrade 1234 c-123')
self.assert_called('PATCH', '/instances/1234')
def test_metadata_edit(self):
self.run_command('metadata-edit 1234 key-123 value-123')
self.assert_called('PATCH', '/instances/1234/metadata/key-123')
def test_metadata_update(self):
self.run_command('metadata-update 1234 key-123 key-456 value-123')
self.assert_called('PUT', '/instances/1234/metadata/key-123')
def test_metadata_delete(self):
self.run_command('metadata-delete 1234 key-123')
self.assert_called('DELETE', '/instances/1234/metadata/key-123')
def test_metadata_create(self):
self.run_command('metadata-create 1234 key123 value123')
self.assert_called_anytime(
'POST', '/instances/1234/metadata/key123',
{'metadata': {'value': 'value123'}})
def test_metadata_list(self):
self.run_command('metadata-list 1234')
self.assert_called('GET', '/instances/1234/metadata')
def test_metadata_show(self):
self.run_command('metadata-show 1234 key123')
self.assert_called('GET', '/instances/1234/metadata/key123')
def test_module_list(self):
self.run_command('module-list')
self.assert_called('GET', '/modules')
def test_module_list_datastore(self):
self.run_command('module-list --datastore all')
self.assert_called('GET', '/modules?datastore=all')
def test_module_show(self):
self.run_command('module-show 4321')
self.assert_called('GET', '/modules/4321')
def test_module_create(self):
with mock.patch.object(builtins, 'open'):
return_value = b'mycontents'
expected_contents = str(return_value.decode('utf-8'))
mock_encode = mock.Mock(return_value=return_value)
with mock.patch.object(base64, 'b64encode', mock_encode):
self.run_command('module-create mod1 type filename')
self.assert_called_anytime(
'POST', '/modules',
{'module': {'contents': expected_contents,
'all_tenants': 0,
'module_type': 'type', 'visible': 1,
'auto_apply': 0, 'live_update': 0,
'name': 'mod1'}})
def test_module_update(self):
with mock.patch.object(troveclient.v1.modules.Module, '__repr__',
mock.Mock(return_value='4321')):
self.run_command('module-update 4321 --name mod3')
self.assert_called_anytime(
'PUT', '/modules/4321',
{'module': {'name': 'mod3'}})
def test_module_delete(self):
with mock.patch.object(troveclient.v1.modules.Module, '__repr__',
mock.Mock(return_value='4321')):
self.run_command('module-delete 4321')
self.assert_called_anytime('DELETE', '/modules/4321')
def test_module_list_instance(self):
self.run_command('module-list-instance 1234')
self.assert_called_anytime('GET', '/instances/1234/modules')
def test_module_instances(self):
with mock.patch.object(troveclient.v1.modules.Module, '__repr__',
mock.Mock(return_value='4321')):
self.run_command('module-instances 4321')
self.assert_called_anytime('GET', '/modules/4321/instances')
def test_module_instances_clustered(self):
with mock.patch.object(troveclient.v1.modules.Module, '__repr__',
mock.Mock(return_value='4321')):
self.run_command('module-instances 4321 --include_clustered')
self.assert_called_anytime(
'GET', '/modules/4321/instances?include_clustered=True')
def test_cluster_modules(self):
self.run_command('cluster-modules cls-1234')
self.assert_called_anytime('GET', '/clusters/cls-1234')
def test_module_apply(self):
self.run_command('module-apply 1234 4321 8765')
self.assert_called_anytime('POST', '/instances/1234/modules',
{'modules':
[{'id': '4321'}, {'id': '8765'}]})
def test_module_remove(self):
self.run_command('module-remove 1234 4321')
self.assert_called_anytime('DELETE', '/instances/1234/modules/4321')
def test_module_query(self):
self.run_command('module-query 1234')
self.assert_called('GET', '/instances/1234/modules?from_guest=True')
def test_module_retrieve(self):
with mock.patch.object(troveclient.v1.modules.Module, '__getattr__',
mock.Mock(return_value='4321')):
self.run_command('module-retrieve 1234')
self.assert_called(
'GET',
'/instances/1234/modules?'
'include_contents=True&from_guest=True')
def test_limit_list(self):
self.run_command('limit-list')
self.assert_called('GET', '/limits')
def test_backup_list(self):
self.run_command('backup-list')
self.assert_called('GET', '/backups')
def test_backup_show(self):
self.run_command('backup-show bk-1234')
self.assert_called('GET', '/backups/bk-1234')
def test_backup_list_instance(self):
self.run_command('backup-list-instance 1234')
self.assert_called('GET', '/instances/1234/backups')
def test_backup_delete(self):
self.run_command('backup-delete bk-1234')
self.assert_called('DELETE', '/backups/bk-1234')
def test_backup_create(self):
self.run_command('backup-create 1234 bkp_1')
self.assert_called_anytime(
'POST', '/backups',
{'backup': {
'instance': '1234',
'name': 'bkp_1'
}})
def test_backup_copy(self):
self.run_command('backup-copy new_bkp bk-1234')
self.assert_called_anytime(
'POST', '/backups',
{'backup': {
'name': 'new_bkp',
'backup': {'region': None, 'id': 'bk-1234'}
}})
def test_database_list(self):
self.run_command('database-list 1234')
self.assert_called('GET', '/instances/1234/databases')
def test_database_delete(self):
self.run_command('database-delete 1234 db_1')
self.assert_called('DELETE', '/instances/1234/databases/db_1')
def test_database_create(self):
cmd = ('database-create 1234 db_1 --character_set utf8 '
'--collate utf8_general_ci')
self.run_command(cmd)
self.assert_called_anytime(
'POST', '/instances/1234/databases',
{'databases': [{'character_set': 'utf8',
'name': 'db_1',
'collate': 'utf8_general_ci'}]})
def test_user_list(self):
self.run_command('user-list 1234')
self.assert_called('GET', '/instances/1234/users')
def test_user_show(self):
self.run_command('user-show 1234 jacob')
self.assert_called('GET', '/instances/1234/users/jacob')
def test_user_delete(self):
self.run_command('user-delete 1234 jacob')
self.assert_called('DELETE', '/instances/1234/users/jacob')
def test_user_create(self):
self.run_command('user-create 1234 jacob password')
self.assert_called_anytime(
'POST', '/instances/1234/users',
{'users': [{
'password': 'password',
'name': 'jacob',
'databases': []}]})
def test_user_show_access(self):
self.run_command('user-show-access 1234 jacob')
self.assert_called('GET', '/instances/1234/users/jacob/databases')
def test_user_update_host(self):
cmd = 'user-update-attributes 1234 jacob --new_host 10.0.0.1'
self.run_command(cmd)
self.assert_called('PUT', '/instances/1234/users/jacob')
def test_user_update_name(self):
self.run_command('user-update-attributes 1234 jacob --new_name sam')
self.assert_called('PUT', '/instances/1234/users/jacob')
def test_user_update_password(self):
cmd = 'user-update-attributes 1234 jacob --new_password new_pwd'
self.run_command(cmd)
self.assert_called('PUT', '/instances/1234/users/jacob')
def test_user_grant_access(self):
self.run_command('user-grant-access 1234 jacob db1 db2')
self.assert_called('PUT', '/instances/1234/users/jacob/databases')
def test_user_revoke_access(self):
self.run_command('user-revoke-access 1234 jacob db1')
self.assert_called('DELETE',
'/instances/1234/users/jacob/databases/db1')
def test_root_enable_instance(self):
self.run_command('root-enable 1234')
self.assert_called_anytime('POST', '/instances/1234/root')
def test_root_enable_cluster(self):
self.run_command_clusters('root-enable cls-1234')
self.assert_called_anytime('POST', '/clusters/cls-1234/root')
def test_root_disable_instance(self):
self.run_command('root-disable 1234')
self.assert_called_anytime('DELETE', '/instances/1234/root')
def test_root_show_instance(self):
self.run_command('root-show 1234')
self.assert_called('GET', '/instances/1234/root')
def test_root_show_cluster(self):
self.run_command_clusters('root-show cls-1234')
self.assert_called('GET', '/clusters/cls-1234/root')
def test_secgroup_list(self):
self.run_command('secgroup-list')
self.assert_called('GET', '/security-groups')
def test_secgroup_show(self):
self.run_command('secgroup-show 2')
self.assert_called('GET', '/security-groups/2')
def test_secgroup_list_rules(self):
self.run_command('secgroup-list-rules 2')
self.assert_called('GET', '/security-groups/2')
def test_secgroup_delete_rule(self):
self.run_command('secgroup-delete-rule 2')
self.assert_called('DELETE', '/security-group-rules/2')
def test_secgroup_add_rule(self):
self.run_command('secgroup-add-rule 2 15.0.0.0/24')
self.assert_called_anytime(
'POST', '/security-group-rules',
{'security_group_rule': {
'cidr': '15.0.0.0/24',
'group_id': '2',
}})
@mock.patch('sys.stdout', new_callable=six.StringIO)
@mock.patch('troveclient.client.get_version_map',
return_value=fakes.get_version_map())
@mock.patch('troveclient.v1.shell._find_instance',
side_effect=exceptions.CommandError)
@mock.patch('troveclient.v1.shell._find_cluster',
return_value='cls-1234')
def test_find_instance_or_cluster_find_cluster(self, mock_find_cluster,
mock_find_instance,
mock_get_version_map,
mock_stdout):
cmd = 'root-show cls-1234'
self.shell.main(cmd.split())
self.assert_called('GET', '/clusters/cls-1234/root')
@mock.patch('sys.stdout', new_callable=six.StringIO)
@mock.patch('troveclient.client.get_version_map',
return_value=fakes.get_version_map())
@mock.patch('troveclient.v1.shell._find_instance',
return_value='1234')
def test_find_instance_or_cluster(self, mock_find_instance,
mock_get_version_map, mock_stdout):
cmd = 'root-show 1234'
self.shell.main(cmd.split())
self.assert_called('GET', '/instances/1234/root')
| |
import simplejson
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse, NoReverseMatch
from django.http import (HttpResponse, HttpResponseRedirect,
Http404, HttpResponsePermanentRedirect)
from django.shortcuts import get_object_or_404, render_to_response
from django.template import RequestContext
from django.views.generic.list_detail import object_list, object_detail
from django.utils.datastructures import SortedDict
from core.views import serve_docs
from projects.models import Project
from projects.utils import highest_version
from taggit.models import Tag
def project_index(request, username=None, tag=None):
"""
The list of projects, which will optionally filter by user or tag,
in which case a 'person' or 'tag' will be added to the context
"""
queryset = Project.objects.live()
if username:
user = get_object_or_404(User, username=username)
queryset = queryset.filter(user=user)
else:
user = None
if tag:
tag = get_object_or_404(Tag, slug=tag)
queryset = queryset.filter(tags__name__in=[tag.slug])
else:
tag = None
return object_list(
request,
queryset=queryset,
extra_context={'person': user, 'tag': tag},
page=int(request.GET.get('page', 1)),
template_object_name='project',
)
def slug_detail(request, project_slug, filename):
"""
A detail view for a project with various dataz
"""
version_slug = 'latest'
if not filename:
filename = "index.html"
split_filename = filename.split('/')
if len(split_filename) > 1:
version = split_filename[1]
proj = get_object_or_404(Project, slug=project_slug)
valid_version = proj.versions.filter(slug=version).count()
if valid_version:
version_slug = version
filename = '/'.join(split_filename[1:])
return serve_docs(request=request, project_slug=project_slug, version_slug=version, filename=filename)
def project_detail(request, project_slug):
"""
A detail view for a project with various dataz
"""
project = get_object_or_404(Project, slug=project_slug)
return render_to_response(
'projects/project_detail.html',
{
'project': project,
},
context_instance=RequestContext(request),
)
def project_downloads(request, project_slug):
"""
A detail view for a project with various dataz
"""
project = get_object_or_404(Project, slug=project_slug)
versions = project.ordered_active_versions()
version_data = SortedDict()
for version in versions:
version_data[version.slug] = {}
if project.has_pdf(version.slug):
version_data[version.slug]['pdf_url'] = project.get_pdf_url(version.slug)
if project.has_htmlzip(version.slug):
version_data[version.slug]['htmlzip_url'] = project.get_htmlzip_url(version.slug)
if project.has_epub(version.slug):
version_data[version.slug]['epub_url'] = project.get_epub_url(version.slug)
if project.has_manpage(version.slug):
version_data[version.slug]['manpage_url'] = project.get_manpage_url(version.slug)
#Kill ones that have no downloads.
if not len(version_data[version.slug]):
del version_data[version.slug]
return render_to_response(
'projects/project_downloads.html',
{
'project': project,
'version_data': version_data,
'versions': versions,
},
context_instance=RequestContext(request),
)
def legacy_project_detail(request, username, project_slug):
return HttpResponsePermanentRedirect(reverse(
project_detail, kwargs = {
'project_slug': project_slug,
}
))
def tag_index(request):
"""
List of all tags by most common
"""
tag_qs = Project.tags.most_common()
return object_list(
request,
queryset=tag_qs,
page=int(request.GET.get('page', 1)),
template_object_name='tag',
template_name='projects/tag_list.html',
)
def search(request):
"""
our ghetto site search. see roadmap.
"""
if 'q' in request.GET:
term = request.GET['q']
else:
raise Http404
queryset = Project.objects.live(name__icontains=term)
if queryset.count() == 1:
return HttpResponseRedirect(queryset[0].get_absolute_url())
return object_list(
request,
queryset=queryset,
template_object_name='term',
extra_context={'term': term},
template_name='projects/search.html',
)
def search_autocomplete(request):
"""
return a json list of project names
"""
if 'term' in request.GET:
term = request.GET['term']
else:
raise Http404
queryset = Project.objects.live(name__icontains=term)[:20]
project_names = queryset.values_list('name', flat=True)
json_response = simplejson.dumps(list(project_names))
return HttpResponse(json_response, mimetype='text/javascript')
def subdomain_handler(request, lang_slug=None, version_slug=None, filename=''):
"""
This provides the fall-back routing for subdomain requests.
This was made primarily to redirect old subdomain's to their version'd brothers.
"""
if not filename:
filename = "index.html"
project = get_object_or_404(Project, slug=request.slug)
if version_slug is None:
#Handle / on subdomain.
default_version = project.get_default_version()
url = reverse(serve_docs, kwargs={
'version_slug': default_version,
'lang_slug': 'en',
'filename': filename
})
return HttpResponseRedirect(url)
if version_slug and lang_slug is None:
#Handle /version/ on subdomain.
aliases = project.aliases.filter(from_slug=version_slug)
#Handle Aliases.
if aliases.count():
if aliases[0].largest:
highest_ver = highest_version(project.versions.filter(slug__contains=version_slug, active=True))
version_slug = highest_ver[0].slug
else:
version_slug = aliases[0].to_slug
url = reverse(serve_docs, kwargs={
'version_slug': version_slug,
'lang_slug': 'en',
'filename': filename
})
else:
try:
url = reverse(serve_docs, kwargs={
'version_slug': version_slug,
'lang_slug': 'en',
'filename': filename
})
except NoReverseMatch:
raise Http404
return HttpResponseRedirect(url)
return serve_docs(request=request,
project_slug=project.slug,
lang_slug=lang_slug,
version_slug=version_slug,
filename=filename)
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2013 eNovance , Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for image utils."""
import contextlib
import mox
import tempfile
from cinder import context
from cinder import exception
from cinder.image import image_utils
from cinder.openstack.common import processutils
from cinder import test
from cinder import utils
class FakeImageService:
def __init__(self):
self._imagedata = {}
def download(self, context, image_id, data):
self.show(context, image_id)
data.write(self._imagedata.get(image_id, ''))
def show(self, context, image_id):
return {'size': 2 * 1024 * 1024 * 1024,
'disk_format': 'qcow2',
'container_format': 'bare'}
def update(self, context, image_id, metadata, path):
pass
class TestUtils(test.TestCase):
TEST_IMAGE_ID = 321
TEST_DEV_PATH = "/dev/ether/fake_dev"
def setUp(self):
super(TestUtils, self).setUp()
self._mox = mox.Mox()
self._image_service = FakeImageService()
self.addCleanup(self._mox.UnsetStubs)
def test_resize_image(self):
mox = self._mox
mox.StubOutWithMock(utils, 'execute')
TEST_IMG_SOURCE = 'boobar.img'
TEST_IMG_SIZE_IN_GB = 1
utils.execute('qemu-img', 'resize', TEST_IMG_SOURCE,
'%sG' % TEST_IMG_SIZE_IN_GB, run_as_root=False)
mox.ReplayAll()
image_utils.resize_image(TEST_IMG_SOURCE, TEST_IMG_SIZE_IN_GB)
mox.VerifyAll()
def test_convert_image(self):
mox = self._mox
mox.StubOutWithMock(utils, 'execute')
TEST_OUT_FORMAT = 'vmdk'
TEST_SOURCE = 'img/qemu.img'
TEST_DEST = '/img/vmware.vmdk'
utils.execute('qemu-img', 'convert', '-O', TEST_OUT_FORMAT,
TEST_SOURCE, TEST_DEST, run_as_root=True)
mox.ReplayAll()
image_utils.convert_image(TEST_SOURCE, TEST_DEST, TEST_OUT_FORMAT)
mox.VerifyAll()
def test_qemu_img_info(self):
TEST_PATH = "img/qemu.qcow2"
TEST_RETURN = "image: qemu.qcow2\n"\
"backing_file: qemu.qcow2 (actual path: qemu.qcow2)\n"\
"file_format: qcow2\n"\
"virtual_size: 50M (52428800 bytes)\n"\
"cluster_size: 65536\n"\
"disk_size: 196K (200704 bytes)\n"\
"Snapshot list:\n"\
"ID TAG VM SIZE DATE VM CLOCK\n"\
"1 snap1 1.7G 2011-10-04 19:04:00 32:06:34.974"
TEST_STR = "image: qemu.qcow2\n"\
"file_format: qcow2\n"\
"virtual_size: 52428800\n"\
"disk_size: 200704\n"\
"cluster_size: 65536\n"\
"backing_file: qemu.qcow2\n"\
"snapshots: [{'date': '2011-10-04', "\
"'vm_clock': '19:04:00 32:06:34.974', "\
"'vm_size': '1.7G', 'tag': 'snap1', 'id': '1'}]"
mox = self._mox
mox.StubOutWithMock(utils, 'execute')
utils.execute(
'env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info',
TEST_PATH, run_as_root=True).AndReturn(
(TEST_RETURN, 'ignored')
)
mox.ReplayAll()
inf = image_utils.qemu_img_info(TEST_PATH)
self.assertEqual(inf.image, 'qemu.qcow2')
self.assertEqual(inf.backing_file, 'qemu.qcow2')
self.assertEqual(inf.file_format, 'qcow2')
self.assertEqual(inf.virtual_size, 52428800)
self.assertEqual(inf.cluster_size, 65536)
self.assertEqual(inf.disk_size, 200704)
self.assertEqual(inf.snapshots[0]['id'], '1')
self.assertEqual(inf.snapshots[0]['tag'], 'snap1')
self.assertEqual(inf.snapshots[0]['vm_size'], '1.7G')
self.assertEqual(inf.snapshots[0]['date'], '2011-10-04')
self.assertEqual(inf.snapshots[0]['vm_clock'], '19:04:00 32:06:34.974')
self.assertEqual(str(inf), TEST_STR)
def test_qemu_img_info_alt(self):
"""Test a slightly different variation of qemu-img output.
(Based on Fedora 19's qemu-img 1.4.2.)
"""
TEST_PATH = "img/qemu.qcow2"
TEST_RETURN = "image: qemu.qcow2\n"\
"backing file: qemu.qcow2 (actual path: qemu.qcow2)\n"\
"file format: qcow2\n"\
"virtual size: 50M (52428800 bytes)\n"\
"cluster_size: 65536\n"\
"disk size: 196K (200704 bytes)\n"\
"Snapshot list:\n"\
"ID TAG VM SIZE DATE VM CLOCK\n"\
"1 snap1 1.7G 2011-10-04 19:04:00 32:06:34.974"
TEST_STR = "image: qemu.qcow2\n"\
"file_format: qcow2\n"\
"virtual_size: 52428800\n"\
"disk_size: 200704\n"\
"cluster_size: 65536\n"\
"backing_file: qemu.qcow2\n"\
"snapshots: [{'date': '2011-10-04', "\
"'vm_clock': '19:04:00 32:06:34.974', "\
"'vm_size': '1.7G', 'tag': 'snap1', 'id': '1'}]"
mox = self._mox
mox.StubOutWithMock(utils, 'execute')
cmd = ['env', 'LC_ALL=C', 'LANG=C',
'qemu-img', 'info', TEST_PATH]
utils.execute(*cmd, run_as_root=True).AndReturn(
(TEST_RETURN, 'ignored'))
mox.ReplayAll()
inf = image_utils.qemu_img_info(TEST_PATH)
self.assertEquals(inf.image, 'qemu.qcow2')
self.assertEquals(inf.backing_file, 'qemu.qcow2')
self.assertEquals(inf.file_format, 'qcow2')
self.assertEquals(inf.virtual_size, 52428800)
self.assertEquals(inf.cluster_size, 65536)
self.assertEquals(inf.disk_size, 200704)
self.assertEquals(inf.snapshots[0]['id'], '1')
self.assertEquals(inf.snapshots[0]['tag'], 'snap1')
self.assertEquals(inf.snapshots[0]['vm_size'], '1.7G')
self.assertEquals(inf.snapshots[0]['date'], '2011-10-04')
self.assertEquals(inf.snapshots[0]['vm_clock'],
'19:04:00 32:06:34.974')
self.assertEquals(str(inf), TEST_STR)
def _test_fetch_to_raw(self, has_qemu=True, src_inf=None, dest_inf=None):
mox = self._mox
mox.StubOutWithMock(image_utils, 'create_temporary_file')
mox.StubOutWithMock(utils, 'execute')
mox.StubOutWithMock(image_utils, 'fetch')
TEST_INFO = ("image: qemu.qcow2\n"
"file format: raw\n"
"virtual size: 0 (0 bytes)\n"
"disk size: 0")
image_utils.create_temporary_file().AndReturn(self.TEST_DEV_PATH)
test_qemu_img = utils.execute(
'env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info',
self.TEST_DEV_PATH, run_as_root=True)
if has_qemu:
test_qemu_img.AndReturn((TEST_INFO, 'ignored'))
image_utils.fetch(context, self._image_service, self.TEST_IMAGE_ID,
self.TEST_DEV_PATH, None, None)
else:
test_qemu_img.AndRaise(processutils.ProcessExecutionError())
if has_qemu and src_inf:
utils.execute(
'env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info',
self.TEST_DEV_PATH, run_as_root=True).AndReturn(
(src_inf, 'ignored')
)
if has_qemu and dest_inf:
utils.execute(
'qemu-img', 'convert', '-O', 'raw',
self.TEST_DEV_PATH, self.TEST_DEV_PATH, run_as_root=True)
utils.execute(
'env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info',
self.TEST_DEV_PATH, run_as_root=True).AndReturn(
(dest_inf, 'ignored')
)
self._mox.ReplayAll()
def test_fetch_to_raw(self):
SRC_INFO = ("image: qemu.qcow2\n"
"file_format: qcow2 \n"
"virtual_size: 50M (52428800 bytes)\n"
"cluster_size: 65536\n"
"disk_size: 196K (200704 bytes)")
DST_INFO = ("image: qemu.raw\n"
"file_format: raw\n"
"virtual_size: 50M (52428800 bytes)\n"
"cluster_size: 65536\n"
"disk_size: 196K (200704 bytes)\n")
self._test_fetch_to_raw(src_inf=SRC_INFO, dest_inf=DST_INFO)
image_utils.fetch_to_raw(context, self._image_service,
self.TEST_IMAGE_ID, self.TEST_DEV_PATH)
self._mox.VerifyAll()
def test_fetch_to_raw_no_qemu_img(self):
self._test_fetch_to_raw(has_qemu=False)
self.assertRaises(exception.ImageUnacceptable,
image_utils.fetch_to_raw,
context, self._image_service,
self.TEST_IMAGE_ID, self.TEST_DEV_PATH)
self._mox.VerifyAll()
def test_fetch_to_raw_on_error_parsing_failed(self):
SRC_INFO_NO_FORMAT = ("image: qemu.qcow2\n"
"virtual_size: 50M (52428800 bytes)\n"
"cluster_size: 65536\n"
"disk_size: 196K (200704 bytes)")
self._test_fetch_to_raw(src_inf=SRC_INFO_NO_FORMAT)
self.assertRaises(exception.ImageUnacceptable,
image_utils.fetch_to_raw,
context, self._image_service,
self.TEST_IMAGE_ID, self.TEST_DEV_PATH)
self._mox.VerifyAll()
def test_fetch_to_raw_on_error_backing_file(self):
SRC_INFO_BACKING_FILE = ("image: qemu.qcow2\n"
"backing_file: qemu.qcow2\n"
"file_format: qcow2 \n"
"virtual_size: 50M (52428800 bytes)\n"
"cluster_size: 65536\n"
"disk_size: 196K (200704 bytes)")
self._test_fetch_to_raw(src_inf=SRC_INFO_BACKING_FILE)
self.assertRaises(exception.ImageUnacceptable,
image_utils.fetch_to_raw,
context, self._image_service,
self.TEST_IMAGE_ID, self.TEST_DEV_PATH)
self._mox.VerifyAll()
def test_fetch_to_raw_on_error_not_convert_to_raw(self):
IMG_INFO = ("image: qemu.qcow2\n"
"file_format: qcow2 \n"
"virtual_size: 50M (52428800 bytes)\n"
"cluster_size: 65536\n"
"disk_size: 196K (200704 bytes)")
self._test_fetch_to_raw(src_inf=IMG_INFO, dest_inf=IMG_INFO)
self.assertRaises(exception.ImageUnacceptable,
image_utils.fetch_to_raw,
context, self._image_service,
self.TEST_IMAGE_ID, self.TEST_DEV_PATH)
def test_fetch_to_raw_on_error_image_size(self):
TEST_VOLUME_SIZE = 1
SRC_INFO = ("image: qemu.qcow2\n"
"file_format: qcow2 \n"
"virtual_size: 2G (2147483648 bytes)\n"
"cluster_size: 65536\n"
"disk_size: 196K (200704 bytes)")
self._test_fetch_to_raw(src_inf=SRC_INFO)
self.assertRaises(exception.ImageUnacceptable,
image_utils.fetch_to_raw,
context, self._image_service,
self.TEST_IMAGE_ID, self.TEST_DEV_PATH,
size=TEST_VOLUME_SIZE)
def _test_fetch_verify_image(self, qemu_info, volume_size=1):
fake_image_service = FakeImageService()
mox = self._mox
mox.StubOutWithMock(image_utils, 'fetch')
mox.StubOutWithMock(utils, 'execute')
image_utils.fetch(context, fake_image_service,
self.TEST_IMAGE_ID, self.TEST_DEV_PATH, None, None)
utils.execute(
'env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info',
self.TEST_DEV_PATH, run_as_root=True).AndReturn(
(qemu_info, 'ignored')
)
self._mox.ReplayAll()
self.assertRaises(exception.ImageUnacceptable,
image_utils.fetch_verify_image,
context, fake_image_service,
self.TEST_IMAGE_ID, self.TEST_DEV_PATH,
size=volume_size)
def test_fetch_verify_image_with_backing_file(self):
TEST_RETURN = "image: qemu.qcow2\n"\
"backing_file: qemu.qcow2 (actual path: qemu.qcow2)\n"\
"file_format: qcow2\n"\
"virtual_size: 50M (52428800 bytes)\n"\
"cluster_size: 65536\n"\
"disk_size: 196K (200704 bytes)\n"\
"Snapshot list:\n"\
"ID TAG VM SIZE DATE VM CLOCK\n"\
"1 snap1 1.7G 2011-10-04 19:04:00 32:06:34.974"
self._test_fetch_verify_image(TEST_RETURN)
def test_fetch_verify_image_without_file_format(self):
TEST_RETURN = "image: qemu.qcow2\n"\
"virtual_size: 50M (52428800 bytes)\n"\
"cluster_size: 65536\n"\
"disk_size: 196K (200704 bytes)\n"\
"Snapshot list:\n"\
"ID TAG VM SIZE DATE VM CLOCK\n"\
"1 snap1 1.7G 2011-10-04 19:04:00 32:06:34.974"
self._test_fetch_verify_image(TEST_RETURN)
def test_fetch_verify_image_image_size(self):
TEST_RETURN = "image: qemu.qcow2\n"\
"file_format: qcow2\n"\
"virtual_size: 2G (2147483648 bytes)\n"\
"cluster_size: 65536\n"\
"disk_size: 196K (200704 bytes)\n"\
"Snapshot list:\n"\
"ID TAG VM SIZE DATE VM CLOCK\n"\
"1 snap1 1.7G 2011-10-04 19:04:00 32:06:34.974"
self._test_fetch_verify_image(TEST_RETURN)
def test_upload_volume(self):
image_meta = {'id': 1, 'disk_format': 'qcow2'}
TEST_RET = "image: qemu.qcow2\n"\
"file_format: qcow2 \n"\
"virtual_size: 50M (52428800 bytes)\n"\
"cluster_size: 65536\n"\
"disk_size: 196K (200704 bytes)"
m = self._mox
m.StubOutWithMock(utils, 'execute')
utils.execute('qemu-img', 'convert', '-O', 'qcow2',
mox.IgnoreArg(), mox.IgnoreArg(), run_as_root=True)
utils.execute(
'env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info',
mox.IgnoreArg(), run_as_root=True).AndReturn(
(TEST_RET, 'ignored')
)
m.ReplayAll()
image_utils.upload_volume(context, FakeImageService(),
image_meta, '/dev/loop1')
m.VerifyAll()
def test_upload_volume_with_raw_image(self):
image_meta = {'id': 1, 'disk_format': 'raw'}
mox = self._mox
mox.StubOutWithMock(image_utils, 'convert_image')
mox.ReplayAll()
with tempfile.NamedTemporaryFile() as f:
image_utils.upload_volume(context, FakeImageService(),
image_meta, f.name)
mox.VerifyAll()
def test_upload_volume_on_error(self):
image_meta = {'id': 1, 'disk_format': 'qcow2'}
TEST_RET = "image: qemu.vhd\n"\
"file_format: vhd \n"\
"virtual_size: 50M (52428800 bytes)\n"\
"cluster_size: 65536\n"\
"disk_size: 196K (200704 bytes)"
m = self._mox
m.StubOutWithMock(utils, 'execute')
utils.execute('qemu-img', 'convert', '-O', 'qcow2',
mox.IgnoreArg(), mox.IgnoreArg(), run_as_root=True)
utils.execute(
'env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info',
mox.IgnoreArg(), run_as_root=True).AndReturn(
(TEST_RET, 'ignored')
)
m.ReplayAll()
self.assertRaises(exception.ImageUnacceptable,
image_utils.upload_volume,
context, FakeImageService(),
image_meta, '/dev/loop1')
m.VerifyAll()
class TestExtractTo(test.TestCase):
def test_extract_to_calls_tar(self):
mox = self.mox
mox.StubOutWithMock(utils, 'execute')
utils.execute(
'tar', '-xzf', 'archive.tgz', '-C', 'targetpath').AndReturn(
('ignored', 'ignored')
)
mox.ReplayAll()
image_utils.extract_targz('archive.tgz', 'targetpath')
mox.VerifyAll()
class TestSetVhdParent(test.TestCase):
def test_vhd_util_call(self):
mox = self.mox
mox.StubOutWithMock(utils, 'execute')
utils.execute(
'vhd-util', 'modify', '-n', 'child', '-p', 'parent').AndReturn(
('ignored', 'ignored')
)
mox.ReplayAll()
image_utils.set_vhd_parent('child', 'parent')
mox.VerifyAll()
class TestFixVhdChain(test.TestCase):
def test_empty_chain(self):
mox = self.mox
mox.StubOutWithMock(image_utils, 'set_vhd_parent')
mox.ReplayAll()
image_utils.fix_vhd_chain([])
def test_single_vhd_file_chain(self):
mox = self.mox
mox.StubOutWithMock(image_utils, 'set_vhd_parent')
mox.ReplayAll()
image_utils.fix_vhd_chain(['0.vhd'])
def test_chain_with_two_elements(self):
mox = self.mox
mox.StubOutWithMock(image_utils, 'set_vhd_parent')
image_utils.set_vhd_parent('0.vhd', '1.vhd')
mox.ReplayAll()
image_utils.fix_vhd_chain(['0.vhd', '1.vhd'])
class TestGetSize(test.TestCase):
def test_vhd_util_call(self):
mox = self.mox
mox.StubOutWithMock(utils, 'execute')
utils.execute(
'vhd-util', 'query', '-n', 'vhdfile', '-v').AndReturn(
('1024', 'ignored')
)
mox.ReplayAll()
result = image_utils.get_vhd_size('vhdfile')
mox.VerifyAll()
self.assertEqual(1024, result)
class TestResize(test.TestCase):
def test_vhd_util_call(self):
mox = self.mox
mox.StubOutWithMock(utils, 'execute')
utils.execute(
'vhd-util', 'resize', '-n', 'vhdfile', '-s', '1024',
'-j', 'journal').AndReturn(('ignored', 'ignored'))
mox.ReplayAll()
image_utils.resize_vhd('vhdfile', 1024, 'journal')
mox.VerifyAll()
class TestCoalesce(test.TestCase):
def test_vhd_util_call(self):
mox = self.mox
mox.StubOutWithMock(utils, 'execute')
utils.execute(
'vhd-util', 'coalesce', '-n', 'vhdfile'
).AndReturn(('ignored', 'ignored'))
mox.ReplayAll()
image_utils.coalesce_vhd('vhdfile')
mox.VerifyAll()
@contextlib.contextmanager
def fake_context(return_value):
yield return_value
class TestTemporaryFile(test.TestCase):
def test_file_unlinked(self):
mox = self.mox
mox.StubOutWithMock(image_utils, 'create_temporary_file')
mox.StubOutWithMock(image_utils.os, 'unlink')
image_utils.create_temporary_file().AndReturn('somefile')
image_utils.os.unlink('somefile')
mox.ReplayAll()
with image_utils.temporary_file():
pass
def test_file_unlinked_on_error(self):
mox = self.mox
mox.StubOutWithMock(image_utils, 'create_temporary_file')
mox.StubOutWithMock(image_utils.os, 'unlink')
image_utils.create_temporary_file().AndReturn('somefile')
image_utils.os.unlink('somefile')
mox.ReplayAll()
def sut():
with image_utils.temporary_file():
raise test.TestingException()
self.assertRaises(test.TestingException, sut)
class TestCoalesceChain(test.TestCase):
def test_single_vhd(self):
mox = self.mox
mox.StubOutWithMock(image_utils, 'get_vhd_size')
mox.StubOutWithMock(image_utils, 'resize_vhd')
mox.StubOutWithMock(image_utils, 'coalesce_vhd')
mox.ReplayAll()
result = image_utils.coalesce_chain(['0.vhd'])
mox.VerifyAll()
self.assertEqual('0.vhd', result)
def test_chain_of_two_vhds(self):
self.mox.StubOutWithMock(image_utils, 'get_vhd_size')
self.mox.StubOutWithMock(image_utils, 'temporary_dir')
self.mox.StubOutWithMock(image_utils, 'resize_vhd')
self.mox.StubOutWithMock(image_utils, 'coalesce_vhd')
self.mox.StubOutWithMock(image_utils, 'temporary_file')
image_utils.get_vhd_size('0.vhd').AndReturn(1024)
image_utils.temporary_dir().AndReturn(fake_context('tdir'))
image_utils.resize_vhd('1.vhd', 1024, 'tdir/vhd-util-resize-journal')
image_utils.coalesce_vhd('0.vhd')
self.mox.ReplayAll()
result = image_utils.coalesce_chain(['0.vhd', '1.vhd'])
self.mox.VerifyAll()
self.assertEqual('1.vhd', result)
class TestDiscoverChain(test.TestCase):
def test_discovery_calls(self):
mox = self.mox
mox.StubOutWithMock(image_utils, 'file_exist')
image_utils.file_exist('some/path/0.vhd').AndReturn(True)
image_utils.file_exist('some/path/1.vhd').AndReturn(True)
image_utils.file_exist('some/path/2.vhd').AndReturn(False)
mox.ReplayAll()
result = image_utils.discover_vhd_chain('some/path')
mox.VerifyAll()
self.assertEqual(
['some/path/0.vhd', 'some/path/1.vhd'], result)
class TestXenServerImageToCoalescedVhd(test.TestCase):
def test_calls(self):
mox = self.mox
mox.StubOutWithMock(image_utils, 'temporary_dir')
mox.StubOutWithMock(image_utils, 'extract_targz')
mox.StubOutWithMock(image_utils, 'discover_vhd_chain')
mox.StubOutWithMock(image_utils, 'fix_vhd_chain')
mox.StubOutWithMock(image_utils, 'coalesce_chain')
mox.StubOutWithMock(image_utils.os, 'unlink')
mox.StubOutWithMock(image_utils, 'rename_file')
image_utils.temporary_dir().AndReturn(fake_context('somedir'))
image_utils.extract_targz('image', 'somedir')
image_utils.discover_vhd_chain('somedir').AndReturn(
['somedir/0.vhd', 'somedir/1.vhd'])
image_utils.fix_vhd_chain(['somedir/0.vhd', 'somedir/1.vhd'])
image_utils.coalesce_chain(
['somedir/0.vhd', 'somedir/1.vhd']).AndReturn('somedir/1.vhd')
image_utils.os.unlink('image')
image_utils.rename_file('somedir/1.vhd', 'image')
mox.ReplayAll()
image_utils.replace_xenserver_image_with_coalesced_vhd('image')
mox.VerifyAll()
| |
import re
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import periodictable as pt
from lmfit import Parameters
from lmfit import minimize
import ResoFit._utilities as fit_util
from ResoFit._gap_functions import y_gap_for_fitting
from ResoFit._gap_functions import y_gap_for_iso_fitting
from ResoFit._utilities import Layer
from ResoFit.experiment import Experiment
from ResoFit.simulation import Simulation
class FitResonance(object):
def __init__(self, spectra_file, data_file,
calibrated_offset_us, calibrated_source_to_detector_m,
folder, norm_factor=1, baseline=False,
norm_to_file=None, slice_start=None, slice_end=None,
energy_min=1e-5, energy_max=1000, energy_step=0.01,
database='ENDF_VII'):
self.experiment = Experiment(spectra_file=spectra_file, data_file=data_file, folder=folder)
self.energy_min = energy_min
self.energy_max = energy_max
self.energy_step = energy_step
self.database = database
self.calibrated_offset_us = calibrated_offset_us
self.calibrated_source_to_detector_m = calibrated_source_to_detector_m
self.raw_layer = None
self.experiment.slice(start=slice_start, end=slice_end)
self.baseline = baseline
if norm_to_file is not None:
self.experiment.norm_to(norm_to_file, norm_factor=norm_factor)
self.exp_x_interp, self.exp_y_interp = self.experiment.xy_scaled(
energy_min=self.energy_min,
energy_max=self.energy_max,
energy_step=self.energy_step,
x_type='energy', y_type='attenuation',
offset_us=self.calibrated_offset_us,
source_to_detector_m=self.calibrated_source_to_detector_m,
baseline=self.baseline
)
self.fit_result = None
self.fitted_density_gcm3 = None
self.fitted_thickness_mm = None
self.fitted_residual = None
self.fitted_gap = None
self.fitted_fjac = None
self.fitted_layer = None
self.fitted_simulation = None
self.layer_list = None
self.raw_layer = None
self.fitted_iso_result = None
self.fitted_iso_residual = None
self.params_for_fit = None
self.params_for_iso_fit = None
self.isotope_stack = {}
self.sample_vary = None
self.df = None
# self.peak_map_full = None
# self.peak_map_indexed = None
def fit(self, raw_layer: fit_util.Layer, vary='density', each_step=False):
if vary not in ['density', 'thickness', 'none']:
raise ValueError("'vary=' can only be one of ['density', 'thickness', 'none']")
# Default vary is: 'density'
self.sample_vary = vary
thickness_vary_tag = False
density_vary_tag = True
if vary == 'thickness':
thickness_vary_tag = True
density_vary_tag = False
if vary == 'none':
density_vary_tag = False
self.raw_layer = raw_layer
'''Load params'''
print(raw_layer)
self.layer_list = list(raw_layer.info.keys())
self.params_for_fit = Parameters()
for _each_layer in self.layer_list:
if self.raw_layer.info[_each_layer]['density']['value'] is np.NaN:
self.raw_layer.info[_each_layer]['density']['value'] = pt.elements.isotope(_each_layer).density
self.params_for_fit.add('thickness_mm_' + _each_layer,
value=self.raw_layer.info[_each_layer]['thickness']['value'],
vary=thickness_vary_tag,
min=0)
self.params_for_fit.add('density_gcm3_' + _each_layer,
value=self.raw_layer.info[_each_layer]['density']['value'],
vary=density_vary_tag,
min=0)
# Print before
print("+----------------- Fitting ({}) -----------------+\nParams before:".format(vary))
self.params_for_fit.pretty_print()
# Fitting
self.fit_result = minimize(y_gap_for_fitting, self.params_for_fit, method='leastsq',
args=(self.exp_x_interp, self.exp_y_interp, self.layer_list,
self.energy_min, self.energy_max, self.energy_step,
self.database, each_step))
# Print after
print("\nParams after:")
self.fit_result.__dict__['params'].pretty_print()
# Print chi^2
self.fitted_residual = self.fit_result.__dict__['residual']
print("Fitting chi^2 : {}\n".format(sum(self.fitted_residual ** 2)))
'''Export fitted params as Layer()'''
# Save the fitted 'density' or 'thickness' in Layer()
self.fitted_layer = Layer()
for _each_layer in self.layer_list:
self.fitted_layer.add_layer(layer=_each_layer,
thickness_mm=self.fit_result.__dict__['params'].valuesdict()[
'thickness_mm_' + _each_layer],
density_gcm3=self.fit_result.__dict__['params'].valuesdict()[
'density_gcm3_' + _each_layer])
# self.fitted_fjac = self.fit_result.__dict__['fjac']
# print(self.fit_result.__dict__['fjac'][0])
'''Create fitted simulation'''
self.fitted_simulation = Simulation(energy_min=self.energy_min,
energy_max=self.energy_max,
energy_step=self.energy_step,
database=self.database)
for each_layer in self.layer_list:
self.fitted_simulation.add_layer(layer=each_layer,
thickness_mm=self.fitted_layer.info[each_layer]['thickness'][
'value'],
density_gcm3=self.fitted_layer.info[each_layer]['density']['value'])
return self.fit_result
def fit_iso(self, layer, each_step=False):
"""
:param layer:
:type layer:
:param each_step:
:type each_step:
:return:
:rtype:
"""
self.params_for_iso_fit = Parameters()
self.isotope_stack[layer] = {'list': self.fitted_simulation.o_reso.stack[layer][layer]['isotopes']['list'],
'ratios': self.fitted_simulation.o_reso.stack[layer][layer]['isotopes'][
'isotopic_ratio']}
_formatted_isotope_list = []
_params_name_list = []
# Form list of param name
for _isotope_index in range(len(self.isotope_stack[layer]['list'])):
_split = self.isotope_stack[layer]['list'][_isotope_index].split('-')
_flip = _split[::-1]
_formatted_isotope_name = ''.join(_flip)
# _formatted_isotope_name = self.isotope_stack[layer]['list'][_isotope_index].replace('-', '_')
_formatted_isotope_list.append(_formatted_isotope_name)
_params_name_list = _formatted_isotope_list
# Form Parameters() for fitting
for _name_index in range(len(_params_name_list)):
self.params_for_iso_fit.add(_params_name_list[_name_index],
value=self.isotope_stack[layer]['ratios'][_name_index],
min=0,
max=1)
# Constrain sum of isotope ratios to be 1
# _params_name_list_temp = _params_name_list[:]
# _constraint = '+'.join(_params_name_list_temp)
# self.params_for_iso_fit.add('sum', expr=_constraint)
_constraint_param = _params_name_list[-1]
_params_name_list_temp = _params_name_list[:]
_params_name_list_temp.remove(_constraint_param)
_constraint = '-'.join(_params_name_list_temp)
_constraint = '1-' + _constraint
self.params_for_iso_fit[_constraint_param].set(expr=_constraint)
# Print params before
print("+----------------- Fitting (isotopic at.%) -----------------+\nParams before:")
self.params_for_iso_fit.pretty_print()
# Fitting
self.fitted_iso_result = minimize(y_gap_for_iso_fitting, self.params_for_iso_fit, method='leastsq',
args=(self.exp_x_interp, self.exp_y_interp, layer, _formatted_isotope_list,
self.fitted_simulation, each_step))
# Print params after
print("\nParams after:")
self.fitted_iso_result.__dict__['params'].pretty_print()
# Print chi^2
self.fitted_iso_residual = self.fitted_iso_result.__dict__['residual']
print("Fit iso chi^2 : {}\n".format(self.fitted_iso_result.__dict__['chisqr']))
return
def molar_conc(self):
molar_conc_units = 'mol/cm3'
print("Molar-conc. ({})\tBefore_fit\tAfter_fit".format(molar_conc_units))
for _each_layer in self.layer_list:
molar_mass_value = self.fitted_simulation.o_reso.stack[_each_layer][_each_layer]['molar_mass']['value']
molar_mass_units = self.fitted_simulation.o_reso.stack[_each_layer][_each_layer]['molar_mass']['units']
# Adding molar_mass to fitted_layer info
self.fitted_layer.info[_each_layer]['molar_mass']['value'] = molar_mass_value
self.fitted_layer.info[_each_layer]['molar_mass']['units'] = molar_mass_units
# Adding molar_mass to raw_layer info
self.raw_layer.info[_each_layer]['molar_mass']['value'] = molar_mass_value
self.raw_layer.info[_each_layer]['molar_mass']['units'] = molar_mass_units
# Adding molar_concentration to fitted_layer info
molar_conc_value = self.fitted_layer.info[_each_layer]['density']['value'] / molar_mass_value
self.fitted_layer.info[_each_layer]['molar_conc']['value'] = molar_conc_value
self.fitted_layer.info[_each_layer]['molar_conc']['units'] = molar_conc_units
# Calculate starting molar_concentration and fitted_layer info
start_molar_conc_value = self.raw_layer.info[_each_layer]['density']['value'] / molar_mass_value
self.raw_layer.info[_each_layer]['molar_conc']['value'] = start_molar_conc_value
self.raw_layer.info[_each_layer]['molar_conc']['units'] = molar_conc_units
# molar_conc_output[_each_layer] = {'Before_fit': start_molar_conc_value,
# 'After_fit': molar_conc_value}
print("{}\t{}\t{}".format(_each_layer, start_molar_conc_value, molar_conc_value))
print('\n')
return self.fitted_layer.info
def index_peak(self, thres, min_dist, map_thres=0.01, map_min_dist=20, rel_tol=5e-3, isotope=False):
if self.experiment.o_peak is None:
self.experiment.find_peak(thres=thres, min_dist=min_dist)
self.experiment._scale_peak_with_ev(energy_min=self.energy_min,
energy_max=self.energy_max,
offset_us=self.calibrated_offset_us,
source_to_detector_m=self.calibrated_source_to_detector_m)
assert self.experiment.o_peak.peak_df is not None
assert self.experiment.o_peak.peak_df_scaled is not None
_peak_map = self.fitted_simulation.peak_map(thres=map_thres,
min_dist=map_min_dist,
impr_reso=True,
# isotope=isotope,
)
self.experiment.o_peak.peak_map_full = _peak_map
self.experiment.o_peak.index_peak(peak_map=_peak_map,
rel_tol=rel_tol)
return self.experiment.o_peak.peak_map_indexed
# def analyze_peak(self):
# pass
def plot(self, error=True, table=True, grid=True, before=False, interp=False, total=True,
all_elements=False, all_isotopes=False, items_to_plot=None,
peak_mark=True, peak_id='indexed',
y_type='transmission', x_type='energy', t_unit='us', logx=False, logy=False,
save_fig=False):
"""
:param error:
:type error:
:param table:
:type table:
:param grid:
:type grid:
:param before:
:type before:
:param interp:
:type interp:
:param total:
:type total:
:param all_elements:
:type all_elements:
:param all_isotopes:
:type all_isotopes:
:param items_to_plot:
:type items_to_plot:
:param peak_mark:
:type peak_mark:
:param peak_id:
:type peak_id:
:param y_type:
:type y_type:
:param x_type:
:type x_type:
:param t_unit:
:type t_unit:
:param logx:
:type logx:
:param logy:
:type logy:
:param save_fig:
:type save_fig:
:return:
:rtype:
"""
# Form signals from fitted_layer
if self.fitted_simulation is None:
self.fitted_simulation = Simulation(energy_min=self.energy_min,
energy_max=self.energy_max,
energy_step=self.energy_step)
for each_layer in self.layer_list:
self.fitted_simulation.add_layer(layer=each_layer,
thickness_mm=self.fitted_layer.info[each_layer]['thickness'][
'value'],
density_gcm3=self.fitted_layer.info[each_layer]['density'][
'value'])
if peak_id not in ['indexed', 'all']:
raise ValueError("'peak=' must be one of ['indexed', 'full'].")
simu_x = self.fitted_simulation.get_x(x_type='energy')
simu_y = self.fitted_simulation.get_y(y_type='attenuation')
# Get plot labels
simu_label = 'Fit'
simu_before_label = 'Fit_init'
exp_label = 'Exp'
exp_interp_label = 'Exp_interp'
sample_name = ' & '.join(self.layer_list)
if self.sample_vary is None:
raise ValueError("Vary type ['density'|'thickness'] is not set.")
fig_title = 'Fitting result of sample (' + sample_name + ')'
# Create pd.DataFrame
self.df = pd.DataFrame()
# Clear any left plt
plt.close()
# plot table + graph
if table is True:
ax1 = plt.subplot2grid(shape=(10, 10), loc=(0, 1), rowspan=8, colspan=8)
# plot graph only
else:
ax1 = plt.subplot(111)
# Plot after fitting
if total is True:
ax1.plot(simu_x, simu_y, 'b-', label=simu_label, linewidth=1)
# Save to df
_live_df_x_label = simu_label + '_eV'
_live_df_y_label = simu_label + '_attenuation'
self.df[_live_df_x_label] = simu_x
self.df[_live_df_y_label] = simu_y
"""Plot options"""
# 1.
if before is True:
# Plot before fitting
# Form signals from raw_layer
simulation = Simulation(energy_min=self.energy_min,
energy_max=self.energy_max,
energy_step=self.energy_step)
for each_layer in self.layer_list:
simulation.add_layer(layer=each_layer,
thickness_mm=self.raw_layer.info[each_layer]['thickness']['value'],
density_gcm3=self.raw_layer.info[each_layer]['density']['value'])
simu_x = simulation.get_x(x_type='energy')
simu_y_before = simulation.get_y(y_type='attenuation')
ax1.plot(simu_x, simu_y_before,
'c-.', label=simu_before_label, linewidth=1)
# Save to df
_live_df_x_label = simu_before_label + '_eV'
_live_df_y_label = simu_before_label + '_attenuation'
self.df[_live_df_x_label] = simu_x
self.df[_live_df_y_label] = simu_y_before
# 2.
if interp is True:
# Plot exp. data (interpolated)
x_interp, y_interp = self.experiment.xy_scaled(energy_max=self.energy_max, energy_min=self.energy_min,
energy_step=self.energy_step,
x_type='energy', y_type='attenuation',
baseline=self.baseline,
offset_us=self.calibrated_offset_us,
source_to_detector_m=self.calibrated_source_to_detector_m)
ax1.plot(x_interp, y_interp, 'r:', label=exp_interp_label, linewidth=1)
# Save to df
_live_df_x_label = exp_interp_label + '_eV'
_live_df_y_label = exp_interp_label + '_attenuation'
self.df[_live_df_x_label] = x_interp
self.df[_live_df_y_label] = y_interp
else:
# Plot exp. data (raw)
exp_x = self.experiment.get_x(x_type='energy', offset_us=self.calibrated_offset_us,
source_to_detector_m=self.calibrated_source_to_detector_m)
exp_y = self.experiment.get_y(y_type='attenuation', baseline=self.baseline)
ax1.plot(exp_x, exp_y,
linestyle='-', linewidth=1,
marker='o', markersize=2,
color='r', label=exp_label)
# Save to df
_df = pd.DataFrame()
_live_df_x_label = exp_label + '_eV'
_live_df_y_label = exp_label + '_attenuation'
_df[_live_df_x_label] = exp_x
_df[_live_df_y_label] = exp_y
# Concatenate since the length of raw and simu are not the same
self.df = pd.concat([self.df, _df], axis=1)
# 3.
if error is True:
# Plot fitting differences
error_label = 'Diff.'
_move_below_by = 0.2
moved_fitted_residual = self.fitted_residual - _move_below_by
ax1.plot(simu_x, moved_fitted_residual, 'g-', label=error_label, linewidth=1, alpha=1)
# Save to df
_live_df_x_label = error_label + '_eV'
_live_df_y_label = error_label + '_attenuation'
self.df[_live_df_x_label] = simu_x
self.df[_live_df_y_label] = moved_fitted_residual
# 4.
if all_elements is True:
# show signal from each elements
_stack_signal = self.fitted_simulation.o_reso.stack_signal
_stack = self.fitted_simulation.o_reso.stack
y_axis_tag = 'attenuation'
for _layer in _stack.keys():
for _element in _stack[_layer]['elements']:
_y_axis = _stack_signal[_layer][_element][y_axis_tag]
ax1.plot(simu_x, _y_axis, label="{}".format(_element), linewidth=1, alpha=0.85)
# Save to df
_live_df_x_label = _element + '_eV'
_live_df_y_label = _element + '_attenuation'
self.df[_live_df_x_label] = simu_x
self.df[_live_df_y_label] = _y_axis
# 4.
if all_isotopes is True:
# show signal from each isotopes
_stack_signal = self.fitted_simulation.o_reso.stack_signal
_stack = self.fitted_simulation.o_reso.stack
y_axis_tag = 'attenuation'
for _layer in _stack.keys():
for _element in _stack[_layer]['elements']:
for _isotope in _stack[_layer][_element]['isotopes']['list']:
_y_axis = _stack_signal[_layer][_element][_isotope][y_axis_tag]
ax1.plot(simu_x, _y_axis, label="{}".format(_isotope), linewidth=1, alpha=1)
# Save to df
_live_df_x_label = _isotope + '_eV'
_live_df_y_label = _isotope + '_attenuation'
self.df[_live_df_x_label] = simu_x
self.df[_live_df_y_label] = _y_axis
# 5.
if items_to_plot is not None:
# plot specified from 'items_to_plot'
y_axis_tag = 'attenuation'
items = fit_util.Items(o_reso=self.fitted_simulation.o_reso, database=self.database)
shaped_items = items.shaped(items_list=items_to_plot)
_signal_dict = items.values(y_axis_type=y_axis_tag)
for _each_label in list(_signal_dict.keys()):
ax1.plot(simu_x, _signal_dict[_each_label], '--', label=_each_label, linewidth=1, alpha=1)
# Save to df
_live_df_x_label = _each_label + '_eV'
_live_df_y_label = _each_label + '_attenuation'
self.df[_live_df_x_label] = simu_x
self.df[_live_df_y_label] = _signal_dict[_each_label]
# plot peaks detected and indexed
if self.experiment.o_peak and self.experiment.o_peak.peak_map_indexed is not None:
_peak_df_scaled = self.experiment.o_peak.peak_df_scaled
_peak_map_indexed = self.experiment.o_peak.peak_map_indexed
_peak_map_full = self.experiment.o_peak.peak_map_full
if peak_mark is True:
ax1.plot(_peak_df_scaled['x'],
_peak_df_scaled['y'],
'kx', label='_nolegend_')
if error is False:
ax1.set_ylim(ymin=-0.1)
for _ele_name in _peak_map_indexed.keys():
if peak_id is 'all':
ax1.plot(_peak_map_full[_ele_name]['ideal']['x'],
[-0.05] * len(_peak_map_full[_ele_name]['ideal']['x']),
'|', ms=10,
label=_ele_name)
elif peak_id is 'indexed':
ax1.plot(_peak_map_indexed[_ele_name]['exp']['x'],
[-0.05] * len(_peak_map_indexed[_ele_name]['exp']['x']),
'|', ms=8,
label=_ele_name)
if 'peak_span' in _peak_map_indexed[_ele_name].keys():
_data_point_x = _peak_map_indexed[_ele_name]['peak_span']['energy_ev']
_data_point_y = _peak_map_indexed[_ele_name]['peak_span']['y']
ax1.scatter(_data_point_x,
_data_point_y,
label='_nolegend_')
# Set plot limit and captions
fit_util.set_plt(ax=ax1, fig_title=fig_title, grid=grid,
x_type=x_type, y_type=y_type, t_unit=t_unit, logx=logx, logy=logy)
# Plot table
if table is True:
if self.fitted_iso_result is None:
columns = list(self.fit_result.__dict__['params'].valuesdict().keys())
else:
columns = self.fit_result.__dict__['var_names']
columns_to_show_dict = {}
for _each in columns:
_split = _each.split('_')
if _split[0] == 'thickness':
_name_to_show = r'$d_{\rm{' + _split[-1] + '}}$' + ' (mm)'
else:
_name_to_show = r'$\rho_{\rm{' + _split[-1] + '}}$' + ' (g/cm$^3$)'
columns_to_show_dict[_each] = _name_to_show
columns_to_show = list(columns_to_show_dict.values())
rows = ['Before', 'After']
_row_before = []
_row_after = []
for _each in columns:
_row_after.append(round(self.fit_result.__dict__['params'].valuesdict()[_each], 3))
_row_before.append(round(self.params_for_fit.valuesdict()[_each], 3))
if self.fitted_iso_result is not None:
_iso_columns = list(self.fitted_iso_result.__dict__['params'].valuesdict().keys())
columns = columns + _iso_columns
_iso_columns_to_show_dict = {}
for _each_iso in _iso_columns:
_num_str = re.findall('\d+', _each_iso)[0]
_name_str = _each_iso[0]
_sup_name = r"$^{" + _num_str + "}$" + _name_str
_iso_columns_to_show_dict[_each_iso] = _sup_name
_iso_columns_to_show = list(_iso_columns_to_show_dict.values())
columns_to_show = columns_to_show + _iso_columns_to_show
for _each in _iso_columns:
_row_after.append(round(self.fitted_iso_result.__dict__['params'].valuesdict()[_each], 3))
_row_before.append(round(self.params_for_iso_fit.valuesdict()[_each], 3))
table = ax1.table(rowLabels=rows, colLabels=columns_to_show, cellText=[_row_before, _row_after],
loc='upper right',
bbox=[0, -0.33, 1.0, 0.18])
table.auto_set_font_size(False)
table.set_fontsize(10)
plt.tight_layout()
if save_fig:
_sample_name = '_'.join(self.layer_list)
_filename = 'fitting_' + _sample_name + '.png'
plt.savefig(_filename, dpi=600, transparent=True)
plt.close()
else:
plt.show()
def export(self, filename=None):
if self.df is None:
raise ValueError("pd.DataFrame is empty, please run required step: FitResonance.plot()")
elif filename is None:
self.df.to_clipboard(excel=True)
else:
self.df.to_csv(filename)
| |
'''\
rendering HTML text with piddle
HTMLPiddler.py
jjk 02/02/00 001 first working version
jjk 02/03/00 002 some enhancements, renamed from piddleTaggedText.py
jjk 02/04/00 003 more enhancements, renamed from piddleHtmlWriter.py
Purpose:
The HTMLPiddler class accepts an HTML string and a few layout constraints,
and renders the HTML on a piddle canvas. This code is mainly a demonstration
and proof-of-concept, and is not really intended for real work.
Usage:
piddler = HTMLPiddler(html=aHtmlString,
start=(startX, startY),
xLimits=(leftMarginX, rightMarginX),
font=aPiddleFontForDefault,
color=aPiddleColorForDefault)
piddler.renderOn(aPiddleCanvas)
see also demo() functions at end of this file
Example:
Just run this module as a python script (must have piddle installed)
Limitations:
-only renders a subset of HTML (mostly text-related tags)
-content between unsupported tags may not be rendered at all
Implementation:
-derived largely on "demo07.py" from Jim Ahlstrom's WPY distribution (see credits)
-uses the parser-formatter-writer model from the python standard library
Credits:
PIDDLE
author: Joe Strout <joe@strout.net>, et al
download: http://www.strout.net/python/piddle/
disclaimers: see __copyrite_jim__ below
WPY
author: Jim Ahlstrom <jim@interet.com>
download: http://www.python.org/ftp/python/wpy/
disclaimers: see __copyrite_jim__ below
HTMLPiddler.py
author: Jeff Kunce <kuncej@mail.conservation.state.mo.us>
download: http://starship.python.net/crew/jjkunce/
disclaimers: * NO WARRANTIES * USE AT YOUR OWN RISK *
'''
import htmllib
import formatter
import string
from types import *
import piddle
TRACE = 0
class HTMLPiddler:
'''jjk 02/01/00'''
def __init__(self, html='', start=(0, 0), xLimits=(0, 800), font=None, color=None):
'''instance initializer
jjk 02/01/00'''
self.html = html
self.start = start
self.xLimits = xLimits
if not font:
font = piddle.Font()
self.font = font
self.color = color
def renderOn(self, aPiddleCanvas):
'''draw the text with aPiddleCanvas
jjk 02/01/00'''
writer = _HtmlPiddleWriter(self, aPiddleCanvas)
fmt = formatter.AbstractFormatter(writer)
parser = _HtmlParser(fmt)
parser.feed(self.html)
parser.close()
class _HtmlParser(htmllib.HTMLParser):
def anchor_bgn(self, href, name, type):
htmllib.HTMLParser.anchor_bgn(self, href, name, type)
self.formatter.writer.anchor_bgn(href, name, type)
def anchor_end(self):
htmllib.HTMLParser.anchor_end(self)
self.formatter.writer.anchor_end()
class _HtmlPiddleWriter:
FontSizeDict = {"h1": 36, "h2": 24, "h3": 18, "h4": 12, "h5": 10, "h6": 8}
DefaultFontSize = 12
def __init__(self, aHTMLPiddler, aPiddleCanvas):
self.piddler = aHTMLPiddler # view = view
self.pc = aPiddleCanvas
self.anchor = None
self.lineHeight = 0
self.atbreak = 0
self.color = self.piddler.color
self.defaultFont = self.font = self.piddler.font
s = "W" * 20
x = self.pc.stringWidth(s, self.font)
y = self.pc.fontHeight(self.font)
x = (x + 19) / 20 # Largest character size
self.fsizex = x
self.fsizey = self.oldLineHeight = y
self.indentSize = x * 3
self.lmargin, self.rmargin = self.piddler.xLimits
self.x, self.y = self.piddler.start
self.indent = self.lmargin + (x / 3)
def anchor_bgn(self, href, name, type):
if href:
self.oldcolor = self.color
self.color = piddle.Color(0.0, 0.0, 200 / 255.0)
self.anchor = (href, name, type)
def anchor_end(self):
if self.anchor:
self.color = self.oldcolor
self.anchor = None
# Start of methods required by the formatter
def new_font(self, fontParams):
if TRACE:
print('nf', fontParams)
# fontParams is None, or the tuple (size, i, b, tt)
if not fontParams:
fontParams = (None, None, None, None)
size = fontParams[0]
try:
points = self.FontSizeDict[size]
except KeyError:
points = self.DefaultFontSize
if fontParams[3]:
face = "courier" # "modern"
elif isinstance(size, str) and size[0] == "h":
face = "helvetica" # "swiss"
else:
face = "times" # "roman"
italic = fontParams[1] # Italic indicator
if italic == None:
italic = 0
bold = fontParams[2] # Bold indicator
if bold == None:
bold = 0
self.font = piddle.Font(points, bold, italic, face=face)
x = self.pc.stringWidth('W' * 20, self.font)
self.fsizex = (x + 19) / 20 # Largest character size
self.fsizey = self.pc.fontHeight(self.font)
def new_margin(self, margin, level):
self.send_line_break()
self.indent = self.x = self.lmargin + self.indentSize * level
def new_spacing(self, spacing):
self.send_line_break()
t = "new_spacing(%s)" % repr(spacing)
self.OutputLine(t, 1)
def new_styles(self, styles):
self.send_line_break()
t = "new_styles(%s)" % repr(styles)
self.OutputLine(t, 1)
def send_label_data(self, data):
if data == "*":
w = self.pc.stringWidth(data, self.font) / 3
h = self.pc.fontHeight(self.font) / 3
x = self.indent - w
y = self.y - w
self.pc.drawRect(x, y, x - w, y - w)
else:
w = self.pc.stringWidth(data, self.font)
h = self.pc.fontHeight(self.font)
x = self.indent - w - self.fsizex / 3
if x < 0:
x = 0
self.pc.drawString(data, x, self.y, self.font, self.color)
def send_paragraph(self, blankline):
self.send_line_break()
self.y = self.y + self.oldLineHeight * blankline
def send_line_break(self):
if self.lineHeight:
self.y = self.y + self.lineHeight
self.oldLineHeight = self.lineHeight
self.lineHeight = 0
self.x = self.indent
self.atbreak = 0
if TRACE:
input('lb')
def send_hor_rule(self):
self.send_line_break()
self.y = self.y + self.oldLineHeight
border = self.fsizex
self.pc.drawLine(border, self.y, self.rmargin - border, self.y, piddle.Color(0.0, 0.0,
200 / 255.0))
self.y = self.y + self.oldLineHeight
def send_literal_data(self, data):
if not data:
return
lines = data.split(data, '\n')
text = lines[0].replace('\t', ' ' * 8)
for l in lines[1:]:
self.OutputLine(text, 1)
text = l.replace('\t', ' ' * 8)
self.OutputLine(text, 0)
self.atbreak = 0
def send_flowing_data(self, data):
if not data:
return
atbreak = self.atbreak or data[0] in string.whitespace
text = ""
pixels = chars = 0
for word in data.split():
bword = " " + word # blank + word
length = len(bword)
# The current line is "text" and its size is
# "pixels" pixels plus "chars" characters.
if not atbreak:
text = word
chars = chars + length - 1
elif self.x + pixels + (chars + length) * self.fsizex < self.rmargin:
# Word fits easily on current line.
text = text + bword
chars = chars + length
else:
w = self.pc.stringWidth(text + bword, self.font)
h = self.pc.fontHeight(self.font)
if TRACE:
print('sfd T:', text + bword)
if TRACE:
print('sfd', self.x, w, self.x + w, self.rmargin)
if self.x + w < self.rmargin:
# Word fits.
text = text + bword
pixels = w
chars = 0
else:
# Word does not fit. Output current line.
self.OutputLine(text, 1)
text = word
chars = length - 1
pixels = 0
atbreak = 1
self.OutputLine(text, 0)
self.atbreak = data[-1] in string.whitespace
def OutputLine(self, text, linebreak=0):
if text:
if TRACE:
print('olt:', text)
if TRACE:
print('olf:', self.font.size, self.font.bold, self.font.italic, self.font.underline,
self.font.face)
self.pc.drawString(text, self.x, self.y, self.font, self.color)
# if self.anchor:
# o.anchor = self.anchor
self.lineHeight = max(self.lineHeight, self.pc.fontHeight(self.font))
self.x = self.x + self.pc.stringWidth(text, self.font)
if linebreak:
self.send_line_break()
__copyrite_jim__ = '''\
Copyright 1994, 1995 by James C. Ahlstrom, Stirling NJ, USA.
Comments and complaints to jim@interet.com
All Rights Reserved
Permission to use, copy, modify, and distribute this software and its
documentation for any purpose and without fee is hereby granted,
provided that the above copyright notice appear in all copies and that
both that copyright notice and this permission notice appear in
supporting documentation.
JAMES C. AHLSTROM DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS, IN NO EVENT SHALL HE BE LIABLE FOR ANY SPECIAL, INDIRECT
OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
'''
DEMO_HTML = '''Here's some starting text - should be rendered in
default font and color, and should start at specified start coordinates.
<BR><BR>
<H2>HTMLPiddler.py</H2>
<H3>Rendering HTML with Piddle</H3>
<P>The <b>HTMLPiddler</b> class accepts an HTML string and a few layout constraints,
and renders the HTML on a piddle canvas
<P>An Example:
<PRE>
piddler = HTMLPiddler(html=aHtmlString,
start=(startX, startY),
xLimits=(leftMarginX, rightMarginX),
font=aPiddleFontForDefault,
color=aPiddleColorForDefault)
piddler.renderOn(aPiddleCanvas)
</PRE>
<P>Limitations
<UL>
<LI>only renders a subset of HTML (mostly text-related tags)
<LI>content between unsupported tags may not be rendered at all
</UL>
<P><b>HTMLPiddler</b> is derived largely on "demo07.py" from the
<A HREF="http://www.python.org/ftp/python/wpy/">WPY distribution</A>
by <A HREF="mailto:jim@interet.com">Jim Ahlstrom</A>
<P><i><b>HTMLPiddler.py</b> is mainly a demonstration/proof-of-concept, and
is not really intended for real work.</i>
<BR>
<BR><A HREF="mailto:kuncej@mail.conservation.state.mo.us">Jeff Kunce</A>
<BR><A HREF="http://starship.python.net/crew/jjkunce/">
http://starship.python.net/crew/jjkunce/</A>
'''
def demoPDF(html):
import piddlePDF
pc = piddlePDF.PDFCanvas((750, 1000), 'HTMLPiddler.pdf')
pc.drawLine(100, 100, 250, 150, color=piddle.green)
pc.drawRect(100, 100, 650, 900, edgeColor=piddle.pink)
ptt = HTMLPiddler(html, (250, 150), (100, 650))
ptt.renderOn(pc)
pc.save()
def demoPIL(html):
print('be patient, this is a little slow...')
import piddlePIL
pc = piddlePIL.PILCanvas((800, 600), 'HTMLPiddler')
pc.drawLine(0, 0, 100, 80, color=piddle.green)
pc.drawRect(50, 50, 750, 550, edgeColor=piddle.pink)
ptt = HTMLPiddler(html, (100, 80), (50, 750))
ptt.renderOn(pc)
pc.save(format='tif')
def demoTK(html):
import piddleTK
pc = piddleTK.TKCanvas((800, 600))
pc.drawLine(0, 0, 50, 50, color=piddle.green)
pc.drawRect(10, 10, 590, 790, edgeColor=piddle.pink)
ptt = HTMLPiddler(html, (50, 50), (10, 790))
pc.flush()
ptt.renderOn(pc)
def demoWX(html):
import piddleWX
pc = piddleWX.WXCanvas((800, 600))
pc.drawLine(0, 0, 50, 50, color=piddle.green)
pc.drawRect(10, 10, 590, 790, edgeColor=piddle.pink)
ptt = HTMLPiddler(html, (50, 50), (10, 790))
pc.flush()
ptt.renderOn(pc)
def demo(html=DEMO_HTML):
while 1:
print('Demo of HTMLPiddler.py')
print(' 1. piddlePDF')
print(' 2. piddlePIL')
#print(' 3. piddleTK')
#print(' 4. piddleWX')
print(' 0. EXIT')
sel = input('Enter Selection Number: ')
try:
sel = int(sel.strip())
except Exception:
sel = -1
if (sel == 0):
break
elif (sel == 1):
demoPDF(html)
elif (sel == 2):
demoPIL(html)
elif (sel == 3):
demoTK(html)
elif (sel == 4):
demoWX(html)
if __name__ == '__main__':
import pdb
demo()
| |
from __future__ import unicode_literals
import datetime
import unittest
from decimal import Decimal
from django import forms, test
from django.core import checks, validators
from django.core.exceptions import ValidationError
from django.db import IntegrityError, connection, models, transaction
from django.db.models.fields import (
NOT_PROVIDED, AutoField, BigIntegerField, BinaryField, BooleanField,
CharField, CommaSeparatedIntegerField, DateField, DateTimeField,
DecimalField, EmailField, FilePathField, FloatField, GenericIPAddressField,
IntegerField, IPAddressField, NullBooleanField, PositiveIntegerField,
PositiveSmallIntegerField, SlugField, SmallIntegerField, TextField,
TimeField, URLField,
)
from django.db.models.fields.files import FileField, ImageField
from django.utils import six
from django.utils.functional import lazy
from .models import (
Bar, BigD, BigIntegerModel, BigS, BooleanModel, DataModel, DateTimeModel,
Document, FksToBooleans, FkToChar, FloatModel, Foo, GenericIPAddress,
IntegerModel, NullBooleanModel, PositiveIntegerModel,
PositiveSmallIntegerModel, Post, PrimaryKeyCharModel, RenamedField,
SmallIntegerModel, VerboseNameField, Whiz, WhizIter, WhizIterEmpty,
)
class BasicFieldTests(test.TestCase):
def test_show_hidden_initial(self):
"""
Regression test for #12913. Make sure fields with choices respect
show_hidden_initial as a kwarg to models.Field.formfield()
"""
choices = [(0, 0), (1, 1)]
model_field = models.Field(choices=choices)
form_field = model_field.formfield(show_hidden_initial=True)
self.assertTrue(form_field.show_hidden_initial)
form_field = model_field.formfield(show_hidden_initial=False)
self.assertFalse(form_field.show_hidden_initial)
def test_nullbooleanfield_blank(self):
"""
Regression test for #13071: NullBooleanField should not throw
a validation error when given a value of None.
"""
nullboolean = NullBooleanModel(nbfield=None)
try:
nullboolean.full_clean()
except ValidationError as e:
self.fail("NullBooleanField failed validation with value of None: %s" % e.messages)
def test_field_repr(self):
"""
Regression test for #5931: __repr__ of a field also displays its name
"""
f = Foo._meta.get_field('a')
self.assertEqual(repr(f), '<django.db.models.fields.CharField: a>')
f = models.fields.CharField()
self.assertEqual(repr(f), '<django.db.models.fields.CharField>')
def test_field_name(self):
"""
Regression test for #14695: explicitly defined field name overwritten
by model's attribute name.
"""
instance = RenamedField()
self.assertTrue(hasattr(instance, 'get_fieldname_display'))
self.assertFalse(hasattr(instance, 'get_modelname_display'))
def test_field_verbose_name(self):
m = VerboseNameField
for i in range(1, 25):
self.assertEqual(m._meta.get_field('field%d' % i).verbose_name,
'verbose field%d' % i)
self.assertEqual(m._meta.get_field('id').verbose_name, 'verbose pk')
def test_float_validates_object(self):
instance = FloatModel(size=2.5)
# Try setting float field to unsaved object
instance.size = instance
with transaction.atomic():
with self.assertRaises(TypeError):
instance.save()
# Set value to valid and save
instance.size = 2.5
instance.save()
self.assertTrue(instance.id)
# Set field to object on saved instance
instance.size = instance
with transaction.atomic():
with self.assertRaises(TypeError):
instance.save()
# Try setting field to object on retrieved object
obj = FloatModel.objects.get(pk=instance.id)
obj.size = obj
with self.assertRaises(TypeError):
obj.save()
def test_choices_form_class(self):
"""Can supply a custom choices form class. Regression for #20999."""
choices = [('a', 'a')]
field = models.CharField(choices=choices)
klass = forms.TypedMultipleChoiceField
self.assertIsInstance(field.formfield(choices_form_class=klass), klass)
def test_field_str(self):
from django.utils.encoding import force_str
f = Foo._meta.get_field('a')
self.assertEqual(force_str(f), "model_fields.Foo.a")
class DecimalFieldTests(test.TestCase):
def test_to_python(self):
f = models.DecimalField(max_digits=4, decimal_places=2)
self.assertEqual(f.to_python(3), Decimal("3"))
self.assertEqual(f.to_python("3.14"), Decimal("3.14"))
self.assertRaises(ValidationError, f.to_python, "abc")
def test_default(self):
f = models.DecimalField(default=Decimal("0.00"))
self.assertEqual(f.get_default(), Decimal("0.00"))
def test_format(self):
f = models.DecimalField(max_digits=5, decimal_places=1)
self.assertEqual(f._format(f.to_python(2)), '2.0')
self.assertEqual(f._format(f.to_python('2.6')), '2.6')
self.assertEqual(f._format(None), None)
def test_get_db_prep_lookup(self):
f = models.DecimalField(max_digits=5, decimal_places=1)
self.assertEqual(f.get_db_prep_lookup('exact', None, connection=connection), [None])
def test_filter_with_strings(self):
"""
We should be able to filter decimal fields using strings (#8023)
"""
Foo.objects.create(id=1, a='abc', d=Decimal("12.34"))
self.assertEqual(list(Foo.objects.filter(d='1.23')), [])
def test_save_without_float_conversion(self):
"""
Ensure decimals don't go through a corrupting float conversion during
save (#5079).
"""
bd = BigD(d="12.9")
bd.save()
bd = BigD.objects.get(pk=bd.pk)
self.assertEqual(bd.d, Decimal("12.9"))
def test_lookup_really_big_value(self):
"""
Ensure that really big values can be used in a filter statement, even
with older Python versions.
"""
# This should not crash. That counts as a win for our purposes.
Foo.objects.filter(d__gte=100000000000)
class ForeignKeyTests(test.TestCase):
def test_callable_default(self):
"""Test the use of a lazy callable for ForeignKey.default"""
a = Foo.objects.create(id=1, a='abc', d=Decimal("12.34"))
b = Bar.objects.create(b="bcd")
self.assertEqual(b.a, a)
@test.skipIfDBFeature('interprets_empty_strings_as_nulls')
def test_empty_string_fk(self):
"""
Test that foreign key values to empty strings don't get converted
to None (#19299)
"""
char_model_empty = PrimaryKeyCharModel.objects.create(string='')
fk_model_empty = FkToChar.objects.create(out=char_model_empty)
fk_model_empty = FkToChar.objects.select_related('out').get(id=fk_model_empty.pk)
self.assertEqual(fk_model_empty.out, char_model_empty)
def test_warning_when_unique_true_on_fk(self):
class FKUniqueTrue(models.Model):
fk_field = models.ForeignKey(Foo, unique=True)
model = FKUniqueTrue()
expected_warnings = [
checks.Warning(
'Setting unique=True on a ForeignKey has the same effect as using a OneToOneField.',
hint='ForeignKey(unique=True) is usually better served by a OneToOneField.',
obj=FKUniqueTrue.fk_field.field,
id='fields.W342',
)
]
warnings = model.check()
self.assertEqual(warnings, expected_warnings)
def test_related_name_converted_to_text(self):
rel_name = Bar._meta.get_field('a').rel.related_name
self.assertIsInstance(rel_name, six.text_type)
class DateTimeFieldTests(test.TestCase):
def test_datetimefield_to_python_usecs(self):
"""DateTimeField.to_python should support usecs"""
f = models.DateTimeField()
self.assertEqual(f.to_python('2001-01-02 03:04:05.000006'),
datetime.datetime(2001, 1, 2, 3, 4, 5, 6))
self.assertEqual(f.to_python('2001-01-02 03:04:05.999999'),
datetime.datetime(2001, 1, 2, 3, 4, 5, 999999))
def test_timefield_to_python_usecs(self):
"""TimeField.to_python should support usecs"""
f = models.TimeField()
self.assertEqual(f.to_python('01:02:03.000004'),
datetime.time(1, 2, 3, 4))
self.assertEqual(f.to_python('01:02:03.999999'),
datetime.time(1, 2, 3, 999999))
@test.skipUnlessDBFeature("supports_microsecond_precision")
def test_datetimes_save_completely(self):
dat = datetime.date(2014, 3, 12)
datetim = datetime.datetime(2014, 3, 12, 21, 22, 23, 240000)
tim = datetime.time(21, 22, 23, 240000)
DateTimeModel.objects.create(d=dat, dt=datetim, t=tim)
obj = DateTimeModel.objects.first()
self.assertTrue(obj)
self.assertEqual(obj.d, dat)
self.assertEqual(obj.dt, datetim)
self.assertEqual(obj.t, tim)
class BooleanFieldTests(test.TestCase):
def _test_get_db_prep_lookup(self, f):
self.assertEqual(f.get_db_prep_lookup('exact', True, connection=connection), [True])
self.assertEqual(f.get_db_prep_lookup('exact', '1', connection=connection), [True])
self.assertEqual(f.get_db_prep_lookup('exact', 1, connection=connection), [True])
self.assertEqual(f.get_db_prep_lookup('exact', False, connection=connection), [False])
self.assertEqual(f.get_db_prep_lookup('exact', '0', connection=connection), [False])
self.assertEqual(f.get_db_prep_lookup('exact', 0, connection=connection), [False])
self.assertEqual(f.get_db_prep_lookup('exact', None, connection=connection), [None])
def _test_to_python(self, f):
self.assertIs(f.to_python(1), True)
self.assertIs(f.to_python(0), False)
def test_booleanfield_get_db_prep_lookup(self):
self._test_get_db_prep_lookup(models.BooleanField())
def test_nullbooleanfield_get_db_prep_lookup(self):
self._test_get_db_prep_lookup(models.NullBooleanField())
def test_booleanfield_to_python(self):
self._test_to_python(models.BooleanField())
def test_nullbooleanfield_to_python(self):
self._test_to_python(models.NullBooleanField())
def test_charfield_textfield_max_length_passed_to_formfield(self):
"""
Test that CharField and TextField pass their max_length attributes to
form fields created using their .formfield() method (#22206).
"""
cf1 = models.CharField()
cf2 = models.CharField(max_length=1234)
self.assertIsNone(cf1.formfield().max_length)
self.assertEqual(1234, cf2.formfield().max_length)
tf1 = models.TextField()
tf2 = models.TextField(max_length=2345)
self.assertIsNone(tf1.formfield().max_length)
self.assertEqual(2345, tf2.formfield().max_length)
def test_booleanfield_choices_blank(self):
"""
Test that BooleanField with choices and defaults doesn't generate a
formfield with the blank option (#9640, #10549).
"""
choices = [(1, 'Si'), (2, 'No')]
f = models.BooleanField(choices=choices, default=1, null=False)
self.assertEqual(f.formfield().choices, choices)
def test_return_type(self):
b = BooleanModel()
b.bfield = True
b.save()
b2 = BooleanModel.objects.get(pk=b.pk)
self.assertIsInstance(b2.bfield, bool)
self.assertEqual(b2.bfield, True)
b3 = BooleanModel()
b3.bfield = False
b3.save()
b4 = BooleanModel.objects.get(pk=b3.pk)
self.assertIsInstance(b4.bfield, bool)
self.assertEqual(b4.bfield, False)
b = NullBooleanModel()
b.nbfield = True
b.save()
b2 = NullBooleanModel.objects.get(pk=b.pk)
self.assertIsInstance(b2.nbfield, bool)
self.assertEqual(b2.nbfield, True)
b3 = NullBooleanModel()
b3.nbfield = False
b3.save()
b4 = NullBooleanModel.objects.get(pk=b3.pk)
self.assertIsInstance(b4.nbfield, bool)
self.assertEqual(b4.nbfield, False)
# http://code.djangoproject.com/ticket/13293
# Verify that when an extra clause exists, the boolean
# conversions are applied with an offset
b5 = BooleanModel.objects.all().extra(
select={'string_col': 'string'})[0]
self.assertNotIsInstance(b5.pk, bool)
def test_select_related(self):
"""
Test type of boolean fields when retrieved via select_related() (MySQL,
#15040)
"""
bmt = BooleanModel.objects.create(bfield=True)
bmf = BooleanModel.objects.create(bfield=False)
nbmt = NullBooleanModel.objects.create(nbfield=True)
nbmf = NullBooleanModel.objects.create(nbfield=False)
m1 = FksToBooleans.objects.create(bf=bmt, nbf=nbmt)
m2 = FksToBooleans.objects.create(bf=bmf, nbf=nbmf)
# Test select_related('fk_field_name')
ma = FksToBooleans.objects.select_related('bf').get(pk=m1.id)
# verify types -- shouldn't be 0/1
self.assertIsInstance(ma.bf.bfield, bool)
self.assertIsInstance(ma.nbf.nbfield, bool)
# verify values
self.assertEqual(ma.bf.bfield, True)
self.assertEqual(ma.nbf.nbfield, True)
# Test select_related()
mb = FksToBooleans.objects.select_related().get(pk=m1.id)
mc = FksToBooleans.objects.select_related().get(pk=m2.id)
# verify types -- shouldn't be 0/1
self.assertIsInstance(mb.bf.bfield, bool)
self.assertIsInstance(mb.nbf.nbfield, bool)
self.assertIsInstance(mc.bf.bfield, bool)
self.assertIsInstance(mc.nbf.nbfield, bool)
# verify values
self.assertEqual(mb.bf.bfield, True)
self.assertEqual(mb.nbf.nbfield, True)
self.assertEqual(mc.bf.bfield, False)
self.assertEqual(mc.nbf.nbfield, False)
def test_null_default(self):
"""
Check that a BooleanField defaults to None -- which isn't
a valid value (#15124).
"""
# Patch the boolean field's default value. We give it a default
# value when defining the model to satisfy the check tests
# #20895.
boolean_field = BooleanModel._meta.get_field('bfield')
self.assertTrue(boolean_field.has_default())
old_default = boolean_field.default
try:
boolean_field.default = NOT_PROVIDED
# check patch was successful
self.assertFalse(boolean_field.has_default())
b = BooleanModel()
self.assertIsNone(b.bfield)
with transaction.atomic():
with self.assertRaises(IntegrityError):
b.save()
finally:
boolean_field.default = old_default
nb = NullBooleanModel()
self.assertIsNone(nb.nbfield)
nb.save() # no error
class ChoicesTests(test.TestCase):
def test_choices_and_field_display(self):
"""
Check that get_choices and get_flatchoices interact with
get_FIELD_display to return the expected values (#7913).
"""
self.assertEqual(Whiz(c=1).get_c_display(), 'First') # A nested value
self.assertEqual(Whiz(c=0).get_c_display(), 'Other') # A top level value
self.assertEqual(Whiz(c=9).get_c_display(), 9) # Invalid value
self.assertEqual(Whiz(c=None).get_c_display(), None) # Blank value
self.assertEqual(Whiz(c='').get_c_display(), '') # Empty value
def test_iterator_choices(self):
"""
Check that get_choices works with Iterators (#23112).
"""
self.assertEqual(WhizIter(c=1).c, 1) # A nested value
self.assertEqual(WhizIter(c=9).c, 9) # Invalid value
self.assertEqual(WhizIter(c=None).c, None) # Blank value
self.assertEqual(WhizIter(c='').c, '') # Empty value
def test_empty_iterator_choices(self):
"""
Check that get_choices works with empty iterators (#23112).
"""
self.assertEqual(WhizIterEmpty(c="a").c, "a") # A nested value
self.assertEqual(WhizIterEmpty(c="b").c, "b") # Invalid value
self.assertEqual(WhizIterEmpty(c=None).c, None) # Blank value
self.assertEqual(WhizIterEmpty(c='').c, '') # Empty value
def test_charfield_get_choices_with_blank_iterator(self):
"""
Check that get_choices works with an empty Iterator
"""
f = models.CharField(choices=(x for x in []))
self.assertEqual(f.get_choices(include_blank=True), [('', '---------')])
class SlugFieldTests(test.TestCase):
def test_slugfield_max_length(self):
"""
Make sure SlugField honors max_length (#9706)
"""
bs = BigS.objects.create(s='slug' * 50)
bs = BigS.objects.get(pk=bs.pk)
self.assertEqual(bs.s, 'slug' * 50)
class ValidationTest(test.TestCase):
def test_charfield_raises_error_on_empty_string(self):
f = models.CharField()
self.assertRaises(ValidationError, f.clean, "", None)
def test_charfield_cleans_empty_string_when_blank_true(self):
f = models.CharField(blank=True)
self.assertEqual('', f.clean('', None))
def test_integerfield_cleans_valid_string(self):
f = models.IntegerField()
self.assertEqual(2, f.clean('2', None))
def test_integerfield_raises_error_on_invalid_intput(self):
f = models.IntegerField()
self.assertRaises(ValidationError, f.clean, "a", None)
def test_charfield_with_choices_cleans_valid_choice(self):
f = models.CharField(max_length=1,
choices=[('a', 'A'), ('b', 'B')])
self.assertEqual('a', f.clean('a', None))
def test_charfield_with_choices_raises_error_on_invalid_choice(self):
f = models.CharField(choices=[('a', 'A'), ('b', 'B')])
self.assertRaises(ValidationError, f.clean, "not a", None)
def test_charfield_get_choices_with_blank_defined(self):
f = models.CharField(choices=[('', '<><>'), ('a', 'A')])
self.assertEqual(f.get_choices(True), [('', '<><>'), ('a', 'A')])
def test_charfield_get_choices_doesnt_evaluate_lazy_strings(self):
# Regression test for #23098
# Will raise ZeroDivisionError if lazy is evaluated
lazy_func = lazy(lambda x: 0 / 0, int)
f = models.CharField(choices=[(lazy_func('group'), (('a', 'A'), ('b', 'B')))])
self.assertEqual(f.get_choices(True)[0], ('', '---------'))
def test_choices_validation_supports_named_groups(self):
f = models.IntegerField(
choices=(('group', ((10, 'A'), (20, 'B'))), (30, 'C')))
self.assertEqual(10, f.clean(10, None))
def test_nullable_integerfield_raises_error_with_blank_false(self):
f = models.IntegerField(null=True, blank=False)
self.assertRaises(ValidationError, f.clean, None, None)
def test_nullable_integerfield_cleans_none_on_null_and_blank_true(self):
f = models.IntegerField(null=True, blank=True)
self.assertIsNone(f.clean(None, None))
def test_integerfield_raises_error_on_empty_input(self):
f = models.IntegerField(null=False)
self.assertRaises(ValidationError, f.clean, None, None)
self.assertRaises(ValidationError, f.clean, '', None)
def test_integerfield_validates_zero_against_choices(self):
f = models.IntegerField(choices=((1, 1),))
self.assertRaises(ValidationError, f.clean, '0', None)
def test_charfield_raises_error_on_empty_input(self):
f = models.CharField(null=False)
self.assertRaises(ValidationError, f.clean, None, None)
def test_datefield_cleans_date(self):
f = models.DateField()
self.assertEqual(datetime.date(2008, 10, 10), f.clean('2008-10-10', None))
def test_boolean_field_doesnt_accept_empty_input(self):
f = models.BooleanField()
self.assertRaises(ValidationError, f.clean, None, None)
class IntegerFieldTests(test.TestCase):
model = IntegerModel
documented_range = (-2147483648, 2147483647)
def test_documented_range(self):
"""
Ensure that values within the documented safe range pass validation,
can be saved and retrieved without corruption.
"""
min_value, max_value = self.documented_range
instance = self.model(value=min_value)
instance.full_clean()
instance.save()
qs = self.model.objects.filter(value__lte=min_value)
self.assertEqual(qs.count(), 1)
self.assertEqual(qs[0].value, min_value)
instance = self.model(value=max_value)
instance.full_clean()
instance.save()
qs = self.model.objects.filter(value__gte=max_value)
self.assertEqual(qs.count(), 1)
self.assertEqual(qs[0].value, max_value)
def test_backend_range_validation(self):
"""
Ensure that backend specific range are enforced at the model
validation level. ref #12030.
"""
field = self.model._meta.get_field('value')
internal_type = field.get_internal_type()
min_value, max_value = connection.ops.integer_field_range(internal_type)
if min_value is not None:
instance = self.model(value=min_value - 1)
expected_message = validators.MinValueValidator.message % {
'limit_value': min_value
}
with self.assertRaisesMessage(ValidationError, expected_message):
instance.full_clean()
instance.value = min_value
instance.full_clean()
if max_value is not None:
instance = self.model(value=max_value + 1)
expected_message = validators.MaxValueValidator.message % {
'limit_value': max_value
}
with self.assertRaisesMessage(ValidationError, expected_message):
instance.full_clean()
instance.value = max_value
instance.full_clean()
def test_types(self):
instance = self.model(value=0)
self.assertIsInstance(instance.value, six.integer_types)
instance.save()
self.assertIsInstance(instance.value, six.integer_types)
instance = self.model.objects.get()
self.assertIsInstance(instance.value, six.integer_types)
def test_coercing(self):
self.model.objects.create(value='10')
instance = self.model.objects.get(value='10')
self.assertEqual(instance.value, 10)
class SmallIntegerFieldTests(IntegerFieldTests):
model = SmallIntegerModel
documented_range = (-32768, 32767)
class BigIntegerFieldTests(IntegerFieldTests):
model = BigIntegerModel
documented_range = (-9223372036854775808, 9223372036854775807)
class PositiveSmallIntegerFieldTests(IntegerFieldTests):
model = PositiveSmallIntegerModel
documented_range = (0, 32767)
class PositiveIntegerFieldTests(IntegerFieldTests):
model = PositiveIntegerModel
documented_range = (0, 2147483647)
class TypeCoercionTests(test.TestCase):
"""
Test that database lookups can accept the wrong types and convert
them with no error: especially on Postgres 8.3+ which does not do
automatic casting at the DB level. See #10015.
"""
def test_lookup_integer_in_charfield(self):
self.assertEqual(Post.objects.filter(title=9).count(), 0)
def test_lookup_integer_in_textfield(self):
self.assertEqual(Post.objects.filter(body=24).count(), 0)
class FileFieldTests(unittest.TestCase):
def test_clearable(self):
"""
Test that FileField.save_form_data will clear its instance attribute
value if passed False.
"""
d = Document(myfile='something.txt')
self.assertEqual(d.myfile, 'something.txt')
field = d._meta.get_field('myfile')
field.save_form_data(d, False)
self.assertEqual(d.myfile, '')
def test_unchanged(self):
"""
Test that FileField.save_form_data considers None to mean "no change"
rather than "clear".
"""
d = Document(myfile='something.txt')
self.assertEqual(d.myfile, 'something.txt')
field = d._meta.get_field('myfile')
field.save_form_data(d, None)
self.assertEqual(d.myfile, 'something.txt')
def test_changed(self):
"""
Test that FileField.save_form_data, if passed a truthy value, updates
its instance attribute.
"""
d = Document(myfile='something.txt')
self.assertEqual(d.myfile, 'something.txt')
field = d._meta.get_field('myfile')
field.save_form_data(d, 'else.txt')
self.assertEqual(d.myfile, 'else.txt')
def test_delete_when_file_unset(self):
"""
Calling delete on an unset FileField should not call the file deletion
process, but fail silently (#20660).
"""
d = Document()
try:
d.myfile.delete()
except OSError:
self.fail("Deleting an unset FileField should not raise OSError.")
class BinaryFieldTests(test.TestCase):
binary_data = b'\x00\x46\xFE'
def test_set_and_retrieve(self):
data_set = (self.binary_data, six.memoryview(self.binary_data))
for bdata in data_set:
dm = DataModel(data=bdata)
dm.save()
dm = DataModel.objects.get(pk=dm.pk)
self.assertEqual(bytes(dm.data), bytes(bdata))
# Resave (=update)
dm.save()
dm = DataModel.objects.get(pk=dm.pk)
self.assertEqual(bytes(dm.data), bytes(bdata))
# Test default value
self.assertEqual(bytes(dm.short_data), b'\x08')
def test_max_length(self):
dm = DataModel(short_data=self.binary_data * 4)
self.assertRaises(ValidationError, dm.full_clean)
class GenericIPAddressFieldTests(test.TestCase):
def test_genericipaddressfield_formfield_protocol(self):
"""
Test that GenericIPAddressField with a specified protocol does not
generate a formfield with no specified protocol. See #20740.
"""
model_field = models.GenericIPAddressField(protocol='IPv4')
form_field = model_field.formfield()
self.assertRaises(ValidationError, form_field.clean, '::1')
model_field = models.GenericIPAddressField(protocol='IPv6')
form_field = model_field.formfield()
self.assertRaises(ValidationError, form_field.clean, '127.0.0.1')
def test_null_value(self):
"""
Null values should be resolved to None in Python (#24078).
"""
GenericIPAddress.objects.create()
o = GenericIPAddress.objects.get()
self.assertIsNone(o.ip)
def test_save_load(self):
instance = GenericIPAddress.objects.create(ip='::1')
loaded = GenericIPAddress.objects.get()
self.assertEqual(loaded.ip, instance.ip)
class PromiseTest(test.TestCase):
def test_AutoField(self):
lazy_func = lazy(lambda: 1, int)
self.assertIsInstance(
AutoField(primary_key=True).get_prep_value(lazy_func()),
int)
@unittest.skipIf(six.PY3, "Python 3 has no `long` type.")
def test_BigIntegerField(self):
lazy_func = lazy(lambda: long(9999999999999999999), long)
self.assertIsInstance(
BigIntegerField().get_prep_value(lazy_func()),
long)
def test_BinaryField(self):
lazy_func = lazy(lambda: b'', bytes)
self.assertIsInstance(
BinaryField().get_prep_value(lazy_func()),
bytes)
def test_BooleanField(self):
lazy_func = lazy(lambda: True, bool)
self.assertIsInstance(
BooleanField().get_prep_value(lazy_func()),
bool)
def test_CharField(self):
lazy_func = lazy(lambda: '', six.text_type)
self.assertIsInstance(
CharField().get_prep_value(lazy_func()),
six.text_type)
lazy_func = lazy(lambda: 0, int)
self.assertIsInstance(
CharField().get_prep_value(lazy_func()),
six.text_type)
def test_CommaSeparatedIntegerField(self):
lazy_func = lazy(lambda: '1,2', six.text_type)
self.assertIsInstance(
CommaSeparatedIntegerField().get_prep_value(lazy_func()),
six.text_type)
lazy_func = lazy(lambda: 0, int)
self.assertIsInstance(
CommaSeparatedIntegerField().get_prep_value(lazy_func()),
six.text_type)
def test_DateField(self):
lazy_func = lazy(lambda: datetime.date.today(), datetime.date)
self.assertIsInstance(
DateField().get_prep_value(lazy_func()),
datetime.date)
def test_DateTimeField(self):
lazy_func = lazy(lambda: datetime.datetime.now(), datetime.datetime)
self.assertIsInstance(
DateTimeField().get_prep_value(lazy_func()),
datetime.datetime)
def test_DecimalField(self):
lazy_func = lazy(lambda: Decimal('1.2'), Decimal)
self.assertIsInstance(
DecimalField().get_prep_value(lazy_func()),
Decimal)
def test_EmailField(self):
lazy_func = lazy(lambda: 'mailbox@domain.com', six.text_type)
self.assertIsInstance(
EmailField().get_prep_value(lazy_func()),
six.text_type)
def test_FileField(self):
lazy_func = lazy(lambda: 'filename.ext', six.text_type)
self.assertIsInstance(
FileField().get_prep_value(lazy_func()),
six.text_type)
lazy_func = lazy(lambda: 0, int)
self.assertIsInstance(
FileField().get_prep_value(lazy_func()),
six.text_type)
def test_FilePathField(self):
lazy_func = lazy(lambda: 'tests.py', six.text_type)
self.assertIsInstance(
FilePathField().get_prep_value(lazy_func()),
six.text_type)
lazy_func = lazy(lambda: 0, int)
self.assertIsInstance(
FilePathField().get_prep_value(lazy_func()),
six.text_type)
def test_FloatField(self):
lazy_func = lazy(lambda: 1.2, float)
self.assertIsInstance(
FloatField().get_prep_value(lazy_func()),
float)
def test_ImageField(self):
lazy_func = lazy(lambda: 'filename.ext', six.text_type)
self.assertIsInstance(
ImageField().get_prep_value(lazy_func()),
six.text_type)
def test_IntegerField(self):
lazy_func = lazy(lambda: 1, int)
self.assertIsInstance(
IntegerField().get_prep_value(lazy_func()),
int)
def test_IPAddressField(self):
lazy_func = lazy(lambda: '127.0.0.1', six.text_type)
self.assertIsInstance(
IPAddressField().get_prep_value(lazy_func()),
six.text_type)
lazy_func = lazy(lambda: 0, int)
self.assertIsInstance(
IPAddressField().get_prep_value(lazy_func()),
six.text_type)
def test_IPAddressField_deprecated(self):
class IPAddressModel(models.Model):
ip = IPAddressField()
model = IPAddressModel()
self.assertEqual(
model.check(),
[checks.Warning(
'IPAddressField has been deprecated. Support for it '
'(except in historical migrations) will be removed in Django 1.9.',
hint='Use GenericIPAddressField instead.',
obj=IPAddressModel._meta.get_field('ip'),
id='fields.W900',
)],
)
def test_GenericIPAddressField(self):
lazy_func = lazy(lambda: '127.0.0.1', six.text_type)
self.assertIsInstance(
GenericIPAddressField().get_prep_value(lazy_func()),
six.text_type)
lazy_func = lazy(lambda: 0, int)
self.assertIsInstance(
GenericIPAddressField().get_prep_value(lazy_func()),
six.text_type)
def test_NullBooleanField(self):
lazy_func = lazy(lambda: True, bool)
self.assertIsInstance(
NullBooleanField().get_prep_value(lazy_func()),
bool)
def test_PositiveIntegerField(self):
lazy_func = lazy(lambda: 1, int)
self.assertIsInstance(
PositiveIntegerField().get_prep_value(lazy_func()),
int)
def test_PositiveSmallIntegerField(self):
lazy_func = lazy(lambda: 1, int)
self.assertIsInstance(
PositiveSmallIntegerField().get_prep_value(lazy_func()),
int)
def test_SlugField(self):
lazy_func = lazy(lambda: 'slug', six.text_type)
self.assertIsInstance(
SlugField().get_prep_value(lazy_func()),
six.text_type)
lazy_func = lazy(lambda: 0, int)
self.assertIsInstance(
SlugField().get_prep_value(lazy_func()),
six.text_type)
def test_SmallIntegerField(self):
lazy_func = lazy(lambda: 1, int)
self.assertIsInstance(
SmallIntegerField().get_prep_value(lazy_func()),
int)
def test_TextField(self):
lazy_func = lazy(lambda: 'Abc', six.text_type)
self.assertIsInstance(
TextField().get_prep_value(lazy_func()),
six.text_type)
lazy_func = lazy(lambda: 0, int)
self.assertIsInstance(
TextField().get_prep_value(lazy_func()),
six.text_type)
def test_TimeField(self):
lazy_func = lazy(lambda: datetime.datetime.now().time(), datetime.time)
self.assertIsInstance(
TimeField().get_prep_value(lazy_func()),
datetime.time)
def test_URLField(self):
lazy_func = lazy(lambda: 'http://domain.com', six.text_type)
self.assertIsInstance(
URLField().get_prep_value(lazy_func()),
six.text_type)
class CustomFieldTests(unittest.TestCase):
def test_14786(self):
"""
Regression test for #14786 -- Test that field values are not prepared
twice in get_db_prep_lookup().
"""
class NoopField(models.TextField):
def __init__(self, *args, **kwargs):
self.prep_value_count = 0
super(NoopField, self).__init__(*args, **kwargs)
def get_prep_value(self, value):
self.prep_value_count += 1
return super(NoopField, self).get_prep_value(value)
field = NoopField()
field.get_db_prep_lookup(
'exact', 'TEST', connection=connection, prepared=False
)
self.assertEqual(field.prep_value_count, 1)
| |
import requests
import urllib
import shlex
from django.db import models
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from polymorphic.models import PolymorphicModel
from mc2.controllers.base import exceptions, namers
from mc2.controllers.base.builders import Builder
from mc2.controllers.base.managers import (
ControllerInfrastructureManager, ControllerRabbitMQManager)
class Controller(PolymorphicModel):
# state
marathon_cpus = models.FloatField(
default=settings.MESOS_DEFAULT_CPU_SHARE)
marathon_mem = models.FloatField(
default=settings.MESOS_DEFAULT_MEMORY_ALLOCATION)
marathon_instances = models.IntegerField(
default=settings.MESOS_DEFAULT_INSTANCES)
marathon_cmd = models.TextField(default='', blank=True, null=True)
marathon_args = models.TextField(default='', blank=True, null=True)
name = models.TextField(
help_text='A descriptive name to uniquely identify a controller')
description = models.TextField(
help_text='A description to provide more info about a controller',
blank=True, null=True, default='')
slug = models.SlugField(
max_length=255,
db_index=True,
help_text='Unique name for use in marathon id',
)
state = models.CharField(max_length=50, default='initial')
# create postgres databases through mission control
postgres_db_needed = models.BooleanField(default=False)
postgres_db_name = models.TextField(default='', blank=True, null=True)
postgres_db_host = models.TextField(default='', blank=True, null=True)
postgres_db_username = models.TextField(default='', blank=True, null=True)
postgres_db_password = models.TextField(default='', blank=True, null=True)
# create postgres databases through mission control
rabbitmq_vhost_needed = models.BooleanField(default=False)
rabbitmq_vhost_name = models.TextField(default='', blank=True, null=True)
rabbitmq_vhost_host = models.TextField(default='', blank=True, null=True)
rabbitmq_vhost_username = models.TextField(
default='', blank=True, null=True)
rabbitmq_vhost_password = models.TextField(
default='', blank=True, null=True)
# Ownership and auth fields
owner = models.ForeignKey('auth.User', on_delete=models.PROTECT)
team_id = models.IntegerField(blank=True, null=True)
organization = models.ForeignKey(
'organizations.Organization', blank=True, null=True,
on_delete=models.PROTECT)
created_at = models.DateTimeField(
_('Created Date & Time'),
db_index=True,
auto_now_add=True,
help_text=_(
'Date and time on which this item was created. This is'
'automatically set on creation')
)
modified_at = models.DateTimeField(
_('Modified Date & Time'),
db_index=True,
editable=False,
auto_now=True,
help_text=_(
'Date and time on which this item was last modified. This'
'is automatically set each time the item is saved.')
)
webhook_token = models.UUIDField(null=True)
class Meta:
ordering = ('name', )
def __init__(self, *args, **kwargs):
super(Controller, self).__init__(*args, **kwargs)
self.infra_manager = ControllerInfrastructureManager(self)
self.rabbitmq_manager = ControllerRabbitMQManager(self)
def save(self, *args, **kwargs):
if not self.slug:
self.slug = namers.do_me_a_unique_slug(self.__class__, 'slug')
super(Controller, self).save(*args, **kwargs)
def get_state_display(self):
return self.get_builder().workflow.get_state()
def to_dict(self):
return {
'id': self.id,
'name': self.name,
'app_id': self.app_id,
'state': self.state,
'state_display': self.get_state_display(),
'marathon_cmd': self.marathon_cmd,
'marathon_args': self.marathon_args,
}
@property
def app_id(self):
"""
The app id to use for marathon
"""
return self.slug
def get_builder(self):
return Builder(self)
def get_or_create_postgres_db(self):
resp = requests.post(
'%s/queues/postgres/wait/create_database'
% settings.SEED_XYLEM_API_HOST, json={
'name': self.app_id.replace('-', '_')})
if resp.status_code != 200:
raise exceptions.XylemApiException(
'Create Postgres DB app failed with response: %s - %s' %
(resp.status_code, resp.json().get('result', {}).get('Err')))
result = resp.json().get('result')
if not result:
raise exceptions.XylemApiException('Invalid response from api.')
db_username = result.get('username') or result.get('user')
db_host = result.get('host') or result.get('hostname')
self.postgres_db_name = result.get('name')
self.postgres_db_username = db_username
self.postgres_db_password = result.get('password')
self.postgres_db_host = db_host
self.save()
def get_default_app_labels(self):
return {
"name": self.name,
"org": self.organization.slug if self.organization else '',
}
def get_marathon_app_data(self):
"""
Override this method to specify the app definition sent to marathon
"""
data = {
"id": self.app_id,
"cpus": self.marathon_cpus,
"mem": self.marathon_mem,
"instances": self.marathon_instances,
}
if self.marathon_args:
data.update({"args": shlex.split(self.marathon_args)})
elif self.marathon_cmd:
data.update({"cmd": self.marathon_cmd})
envs = {}
if self.env_variables.exists():
envs = dict([
(env.key, env.value)
for env in self.env_variables.all()])
if self.postgres_db_needed:
self.get_or_create_postgres_db()
envs.update({
'DATABASE_URL': 'postgres://%(username)s:'
'%(password)s@%(host)s/%(name)s' % {
'username': self.postgres_db_username,
'password': self.postgres_db_password,
'host': self.postgres_db_host,
'name': self.postgres_db_name,
}})
else:
self.postgres_db_username = None
self.postgres_db_password = None
self.postgres_db_host = None
self.postgres_db_name = None
self.save()
if self.rabbitmq_vhost_needed and self.rabbitmq_vhost_name:
self.rabbitmq_manager.create_rabbitmq_vhost()
envs.update({
'BROKER_URL':
'amqp://%(username)s:%(password)s@%(host)s/%(name)s' %
{
'username': self.rabbitmq_vhost_username,
'password': self.rabbitmq_vhost_password,
'host': self.rabbitmq_vhost_host,
'name': urllib.quote(self.rabbitmq_vhost_name),
}
})
# TODO: seed-xylem currently doesn't support deleting of databases
# Once support is added, we should delete this database here.
if envs:
data.update({'env': envs})
service_labels = self.get_default_app_labels()
# Update custom labels
if self.label_variables.exists():
for label in self.label_variables.all():
service_labels[label.name] = label.value
data.update({'labels': service_labels})
return data
def create_marathon_app(self):
post_data = self.get_marathon_app_data()
resp = requests.post(
'%s/v2/apps' % settings.MESOS_MARATHON_HOST,
json=post_data)
if resp.status_code != 201:
raise exceptions.MarathonApiException(
'Create Marathon app failed with response: %s - %s' %
(resp.status_code, resp.json().get('message')))
def update_marathon_app(self):
post_data = self.get_marathon_app_data()
app_id = post_data.pop('id')
resp = requests.put(
'%(host)s/v2/apps/%(id)s' % {
'host': settings.MESOS_MARATHON_HOST,
'id': app_id
},
json=post_data)
if resp.status_code not in [200, 201]:
raise exceptions.MarathonApiException(
'Update Marathon app failed with response: %s - %s' %
(resp.status_code, resp.json().get('message')))
def marathon_restart_app(self):
resp = requests.post(
'%(host)s/v2/apps/%(id)s/restart' % {
'host': settings.MESOS_MARATHON_HOST,
'id': self.app_id
},
json={})
if resp.status_code != 200:
raise exceptions.MarathonApiException(
'Restart Marathon app failed with response: %s - %s' %
(resp.status_code, resp.json().get('message')))
def marathon_destroy_app(self):
resp = requests.delete(
'%(host)s/v2/apps/%(id)s' % {
'host': settings.MESOS_MARATHON_HOST,
'id': self.app_id
},
json={})
if resp.status_code != 200:
raise exceptions.MarathonApiException(
'Marathon app deletion failed with response: %s - %s' %
(resp.status_code, resp.json().get('message')))
def exists_on_marathon(self):
resp = requests.get(
'%(host)s/v2/apps/%(id)s' % {
'host': settings.MESOS_MARATHON_HOST,
'id': self.app_id
},
json={})
return resp.status_code == 200
def destroy(self):
"""
TODO: destoy running marathon instance
"""
pass
class EnvVariable(models.Model):
controller = models.ForeignKey(Controller, related_name='env_variables')
key = models.TextField(blank=True, null=False)
value = models.TextField(blank=True, null=True)
class MarathonLabel(models.Model):
controller = models.ForeignKey(Controller, related_name='label_variables')
name = models.TextField(blank=True, null=False)
value = models.TextField(blank=True, null=True)
class AdditionalLink(models.Model):
controller = models.ForeignKey(Controller, related_name='additional_link')
name = models.TextField(blank=True, null=False)
link = models.TextField(blank=True, null=True)
| |
import json
import responses
from conftest import Mock
from upcloud_api import IPAddress, Server, Storage, login_user_block
class TestCreateServer:
def test_storage_prepare_post_body(self, manager):
s1 = Storage(os='01000000-0000-4000-8000-000030200200', size=10)
body1 = s1.to_dict()
assert body1['tier'] == 'maxiops'
assert body1['size'] == 10
s2 = Storage(size=100)
body2 = s2.to_dict()
assert body2['tier'] == 'maxiops'
assert body2['size'] == 100
def test_storage_prepare_post_body_optional_attributes(self, manager):
s2 = Storage(size=100, address='virtio:0')
body2 = s2.to_dict()
assert body2['tier'] == 'maxiops'
assert body2['size'] == 100
assert body2['address'] == 'virtio:0'
def test_server_init(self, manager):
server1 = Server(
core_number=2,
memory_amount=1024,
hostname='my.example.com',
zone='us-chi1',
storage_devices=[
Storage(os='01000000-0000-4000-8000-000030200200', size=10),
Storage(size=100, title='storage disk 1'),
],
)
assert server1.title == 'my.example.com'
assert server1.core_number == 2
assert server1.memory_amount == 1024
assert server1.hostname == server1.title
assert server1.zone == 'us-chi1'
def test_server_prepare_post_body(self):
server = Server(
core_number=2,
memory_amount=1024,
hostname='my.example.com',
zone='us-chi1',
storage_devices=[
Storage(os='01000000-0000-4000-8000-000030200200', size=10),
Storage(),
],
)
body = server.prepare_post_body()
s1 = body['server']['storage_devices']['storage_device'][0]
assert s1['title'] == 'my.example.com OS disk'
assert s1['tier'] == 'maxiops'
assert s1['size'] == 10
assert s1['storage'] == '01000000-0000-4000-8000-000030200200'
assert s1['action'] == 'clone'
s2 = body['server']['storage_devices']['storage_device'][1]
assert s2['title'] == 'my.example.com storage disk 1'
assert s2['tier'] == 'maxiops'
assert s2['action'] == 'create'
assert s2['size'] == 10
assert body['server']['title'] == 'my.example.com'
assert body['server']['core_number'] == 2
assert body['server']['memory_amount'] == 1024
assert body['server']['hostname'] == server.title
assert body['server']['zone'] == 'us-chi1'
def test_server_prepare_post_body_optional_attributes(self):
server1 = Server(
core_number=2,
memory_amount=1024,
hostname='my.example.com',
zone='us-chi1',
storage_devices=[Storage(os='01000000-0000-4000-8000-000030200200', size=10)],
vnc_password='my-passwd',
password_delivery='email',
login_user=login_user_block('upclouduser', ['this-is-a-SSH-key']),
avoid_host='12345678',
user_data='https://my.script.com/some_script.py',
ip_addresses=[
IPAddress(family='IPv4', access='public'),
IPAddress(family='IPv6', access='public'),
],
)
server2_dict = {
'core_number': 2,
'memory_amount': 1024,
'hostname': 'my.example.com',
'zone': 'us-chi1',
'storage_devices': [{'os': '01000000-0000-4000-8000-000030200200', 'size': 10}],
'vnc_password': 'my-passwd',
'password_delivery': 'email',
'login_user': login_user_block('upclouduser', ['this-is-a-SSH-key']),
'avoid_host': '12345678',
'user_data': 'https://my.script.com/some_script.py',
'ip_addresses': [
{'family': 'IPv4', 'access': 'public'},
{'family': 'IPv6', 'access': 'public'},
],
}
server2 = Server._create_server_obj(server2_dict, cloud_manager=self)
body1 = server1.prepare_post_body()
body2 = server2.prepare_post_body()
for body in [body1, body2]:
assert body['server']['title'] == 'my.example.com'
assert body['server']['core_number'] == 2
assert body['server']['memory_amount'] == 1024
assert body['server']['hostname'] == server1.title
assert body['server']['zone'] == 'us-chi1'
assert body['server']['vnc_password'] == 'my-passwd'
assert body['server']['password_delivery'] == 'email'
assert body['server']['login_user'] == {
'username': 'upclouduser',
'create_password': 'no',
'ssh_keys': {'ssh_key': ['this-is-a-SSH-key']},
}
assert body['server']['avoid_host'] == '12345678'
assert body['server']['user_data'] == 'https://my.script.com/some_script.py'
assert body['server']['ip_addresses'] == {
'ip_address': [
{'family': 'IPv4', 'access': 'public'},
{'family': 'IPv6', 'access': 'public'},
]
}
@responses.activate
def test_create_server(self, manager):
responses.add(
responses.POST,
Mock.base_url + '/server',
body=Mock.read_from_file('server_create.json'),
status=202,
content_type='application/json',
)
server1 = Server(
core_number=2,
memory_amount=1024,
hostname='my.example.com',
zone='us-chi1',
storage_devices=[
Storage(os='01000000-0000-4000-8000-000030200200', size=10),
Storage(size=100, title='storage disk 1'),
],
)
manager.create_server(server1)
# assert correct values in response
assert type(server1).__name__ == 'Server'
assert server1.core_number == '2'
assert server1.memory_amount == '1024'
# assert ips and storages have correct types
assert type(server1.storage_devices[0]).__name__ == 'Storage'
assert type(server1.ip_addresses[0]).__name__ == 'IPAddress'
# assert new data was populated
assert server1.video_model == 'cirrus'
assert server1.vnc == 'off'
assert server1.vnc_password == 'aabbccdd'
@responses.activate
def test_create_server_with_dict(self, manager):
responses.add(
responses.POST,
Mock.base_url + '/server',
body=Mock.read_from_file('server_create.json'),
status=202,
content_type='application/json',
)
server1 = {
'core_number': 2,
'memory_amount': 1024,
'hostname': 'my.example.com',
'zone': 'us-chi1',
'storage_devices': [
{'os': '01000000-0000-4000-8000-000030200200', 'size': 10},
{'size': 100, 'title': 'storage disk 1'},
],
}
server1 = manager.create_server(server1)
# assert correct values in response
assert type(server1).__name__ == 'Server'
assert server1.core_number == '2'
assert server1.memory_amount == '1024'
# assert ips and storages have correct types
assert type(server1.storage_devices[0]).__name__ == 'Storage'
assert type(server1.ip_addresses[0]).__name__ == 'IPAddress'
# assert new data was populated
assert server1.video_model == 'cirrus'
assert server1.vnc == 'off'
assert server1.vnc_password == 'aabbccdd'
@responses.activate
def test_create_server_from_template(self, manager):
UUID = '01215a5a-c330-4565-81ca-0e0e22eac672'
def _from_template_callback(request):
request_body = json.loads(request.body)
storage = request_body['server']['storage_devices']['storage_device'][0]
# https://www.upcloud.com/api/8-servers/#creating-from-a-template
assert storage['action'] == 'clone'
assert storage['storage'] == UUID
return (201, {}, Mock.read_from_file('server_create.json'))
responses.add_callback(
responses.POST,
Mock.base_url + '/server',
content_type='application/json',
callback=_from_template_callback,
)
manager.create_server(
Server(
core_number=2,
memory_amount=1024,
hostname='my.example.com',
zone='us-chi1',
storage_devices=[
Storage(storage=UUID, size=10),
],
)
)
| |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""Classes for handling build and repackaging of clients.
This handles invocations for the build across the supported platforms including
handling Visual Studio, pyinstaller and other packaging mechanisms.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import io
import logging
import os
import re
import shutil
import struct
import subprocess
import tempfile
import zipfile
from future.builtins import str
from future.utils import iteritems
from future.utils import iterkeys
from future.utils import itervalues
# pylint: disable=g-import-not-at-top,unused-import
# This is a workaround so we don't need to maintain the whole PyInstaller
# codebase as a full-fledged dependency.
try:
# pytype: disable=import-error
from PyInstaller import __main__ as PyInstallerMain
# pytype: enable=import-error
except ImportError:
# We ignore this failure since most people running the code don't build their
# own clients and printing an error message causes confusion. Those building
# their own clients will need PyInstaller installed.
pass
from google.protobuf import descriptor_pool
from google.protobuf import text_format
from fleetspeak.src.client.daemonservice.proto.fleetspeak_daemonservice import config_pb2 as fs_config_pb2
from fleetspeak.src.common.proto.fleetspeak import system_pb2 as fs_system_pb2
from grr_response_core import config
from grr_response_core import version
from grr_response_core.config import contexts
from grr_response_core.lib import config_validator_base
from grr_response_core.lib import rdfvalue
from grr_response_core.lib import utils
# Pull in local config validators.
from grr_response_core.lib.local import plugins
from grr_response_core.lib.rdfvalues import client as rdf_client
from grr_response_core.lib.rdfvalues import crypto as rdf_crypto
from grr_response_core.lib.util.compat import yaml
# pylint: enable=g-import-not-at-top,unused-import
class BuildError(Exception):
pass
class BuilderBase(object):
"""A base class for builder / repacker that provides utility functions."""
def __init__(self, context=None):
self.context = context or config.CONFIG.context[:]
self.context = [contexts.CLIENT_BUILD_CONTEXT] + self.context
def GenerateDirectory(self,
input_dir=None,
output_dir=None,
replacements=None):
input_dir = utils.NormalizePath(input_dir)
output_dir = utils.NormalizePath(output_dir)
replacements = replacements or []
for (root, _, files) in os.walk(input_dir):
for filename in files:
in_file = utils.JoinPath(root, filename)
out_file = in_file.replace(input_dir, output_dir)
for (s, replacement) in replacements:
out_file = out_file.replace(s, replacement)
utils.EnsureDirExists(os.path.dirname(out_file))
self.GenerateFile(in_file, out_file)
def GenerateFile(self, input_filename=None, output_filename=None):
"""Generates a file from a template, interpolating config values."""
if input_filename is None:
input_filename = output_filename + ".in"
if output_filename[-3:] == ".in":
output_filename = output_filename[:-3]
logging.debug("Generating file %s from %s", output_filename, input_filename)
with io.open(input_filename, "r") as fd:
data = fd.read()
with io.open(output_filename, "w") as fd:
fd.write(config.CONFIG.InterpolateValue(data, context=self.context))
class ClientBuilder(BuilderBase):
"""A client builder is responsible for building the binary template.
This is an abstract client builder class, used by the OS specific
implementations. Note that client builders typically run on the target
operating system.
"""
REQUIRED_BUILD_YAML_KEYS = set([
"Client.build_environment", "Client.build_time", "Template.build_type",
"Template.build_context", "Template.version_major",
"Template.version_minor", "Template.version_revision",
"Template.version_release", "Template.arch"
])
def __init__(self, context=None):
super(ClientBuilder, self).__init__(context=context)
self.build_dir = ""
def MakeBuildDirectory(self):
"""Prepares the build directory."""
# Create the build directory and let pyinstaller loose on it.
self.build_dir = config.CONFIG.Get(
"PyInstaller.build_dir", context=self.context)
self.work_path = config.CONFIG.Get(
"PyInstaller.workpath_dir", context=self.context)
self.CleanDirectory(self.build_dir)
self.CleanDirectory(self.work_path)
def CleanDirectory(self, directory):
logging.info("Clearing directory %s", directory)
try:
shutil.rmtree(directory)
except OSError:
pass
utils.EnsureDirExists(directory)
def BuildWithPyInstaller(self):
"""Use pyinstaller to build a client package."""
self.CleanDirectory(
config.CONFIG.Get("PyInstaller.distpath", context=self.context))
logging.info("Copying pyinstaller support files")
self.spec_file = os.path.join(self.build_dir, "grr.spec")
with open(self.spec_file, "wb") as fd:
fd.write(config.CONFIG.Get("PyInstaller.spec", context=self.context))
with open(os.path.join(self.build_dir, "version.txt"), "wb") as fd:
fd.write(config.CONFIG.Get("PyInstaller.version", context=self.context))
shutil.copy(
src=config.CONFIG.Get("PyInstaller.icon_path", context=self.context),
dst=os.path.join(self.build_dir, u"grr.ico"))
# We expect the onedir output at this location.
self.output_dir = os.path.join(
config.CONFIG.Get("PyInstaller.distpath", context=self.context),
"grr-client")
args = [
"--distpath",
config.CONFIG.Get("PyInstaller.distpath", context=self.context),
"--workpath",
config.CONFIG.Get("PyInstaller.workpath_dir", context=self.context),
self.spec_file,
]
logging.info("Running pyinstaller: %s", args)
PyInstallerMain.run(pyi_args=args)
# Clear out some crud that pyinstaller includes.
for path in ["tcl", "tk", "pytz"]:
dir_path = os.path.join(self.output_dir, path)
try:
shutil.rmtree(dir_path)
except OSError:
logging.error("Unable to remove directory: %s", dir_path)
try:
os.mkdir(dir_path)
except OSError:
logging.error("Unable to create directory: %s", dir_path)
file_path = os.path.join(dir_path, path)
try:
# Create an empty file so the directories get put in the installers.
with open(file_path, "wb"):
pass
except IOError:
logging.error("Unable to create file: %s", file_path)
version_ini = version.VersionPath()
shutil.copy(version_ini, os.path.join(self.output_dir, "version.ini"))
with open(os.path.join(self.output_dir, "build.yaml"), "wb") as fd:
self.WriteBuildYaml(fd)
def WriteBuildYaml(self, fd, build_timestamp=True):
"""Write build spec to fd."""
output = {
"Client.build_environment":
rdf_client.Uname.FromCurrentSystem().signature(),
"Template.build_type":
config.CONFIG.Get("ClientBuilder.build_type", context=self.context),
"Template.version_major":
config.CONFIG.Get("Source.version_major", context=self.context),
"Template.version_minor":
config.CONFIG.Get("Source.version_minor", context=self.context),
"Template.version_revision":
config.CONFIG.Get("Source.version_revision", context=self.context),
"Template.version_release":
config.CONFIG.Get("Source.version_release", context=self.context),
"Template.arch":
config.CONFIG.Get("Client.arch", context=self.context)
}
if build_timestamp:
output["Client.build_time"] = rdfvalue.RDFDatetime.Now()
else:
self.REQUIRED_BUILD_YAML_KEYS.remove("Client.build_time")
for key, value in iteritems(output):
output[key] = str(value)
output["Template.build_context"] = self.context
output_keys = set(iterkeys(output))
if output_keys != self.REQUIRED_BUILD_YAML_KEYS:
raise RuntimeError("Bad build.yaml: expected %s, got %s" %
(self.REQUIRED_BUILD_YAML_KEYS, output_keys))
fd.write(yaml.Dump(output).encode("utf-8"))
def CopyMissingModules(self):
"""Copy any additional DLLs that cant be found."""
def MakeExecutableTemplate(self, output_file=None):
"""Create the executable template.
The client is built in two phases. First an executable template is created
with the client binaries contained inside a zip file. Then the installation
package is created by appending the SFX extractor to this template and
writing a config file into the zip file. This technique allows the
client build to be carried out once on the supported platform (e.g.
windows with MSVS), but the deployable installer can be build on any
platform which supports python. Subclasses for each OS do the actual
work, we just make sure the output directory is set up correctly here.
Args:
output_file: string filename where we will write the template.
"""
self.template_file = output_file or config.CONFIG.Get(
"ClientBuilder.template_path", context=self.context)
utils.EnsureDirExists(os.path.dirname(self.template_file))
def MakeZip(self, input_dir, output_file):
"""Creates a ZIP archive of the files in the input directory.
Args:
input_dir: the name of the input directory.
output_file: the name of the output ZIP archive without extension.
"""
logging.info("Generating zip template file at %s", output_file)
basename, _ = os.path.splitext(output_file)
# TODO(user):pytype: incorrect make_archive() definition in typeshed.
# pytype: disable=wrong-arg-types
shutil.make_archive(
basename, "zip", base_dir=".", root_dir=input_dir, verbose=True)
# pytype: enable=wrong-arg-types
class ClientRepacker(BuilderBase):
"""Takes the binary template and producing an installer.
Note that this should be runnable on all operating systems.
"""
CONFIG_SECTIONS = [
"CA", "Client", "ClientRepacker", "Logging", "Config", "Nanny", "Osquery",
"Installer", "Template"
]
# Config options that should never make it to a deployable binary.
SKIP_OPTION_LIST = ["Client.private_key"]
def __init__(self, context=None, signer=None):
super(ClientRepacker, self).__init__(context=context)
self.signer = signer
self.signed_template = False
def GetClientConfig(self, context, validate=True, deploy_timestamp=True):
"""Generates the client config file for inclusion in deployable binaries."""
with utils.TempDirectory() as tmp_dir:
# Make sure we write the file in yaml format.
filename = os.path.join(
tmp_dir,
config.CONFIG.Get("ClientBuilder.config_filename", context=context))
new_config = config.CONFIG.MakeNewConfig()
new_config.Initialize(reset=True, data="")
new_config.SetWriteBack(filename)
# Only copy certain sections to the client. We enumerate all
# defined options and then resolve those from the config in the
# client's context. The result is the raw option as if the
# client read our config file.
client_context = context[:]
while contexts.CLIENT_BUILD_CONTEXT in client_context:
client_context.remove(contexts.CLIENT_BUILD_CONTEXT)
for descriptor in sorted(config.CONFIG.type_infos, key=lambda x: x.name):
if descriptor.name in self.SKIP_OPTION_LIST:
continue
if descriptor.section in self.CONFIG_SECTIONS:
value = config.CONFIG.GetRaw(
descriptor.name, context=client_context, default=None)
if value is not None:
logging.debug("Copying config option to client: %s",
descriptor.name)
new_config.SetRaw(descriptor.name, value)
if deploy_timestamp:
deploy_time_string = str(rdfvalue.RDFDatetime.Now())
new_config.Set("Client.deploy_time", deploy_time_string)
new_config.Write()
if validate:
self.ValidateEndConfig(new_config)
private_validator = config.CONFIG.Get(
"ClientBuilder.private_config_validator_class", context=context)
if private_validator:
try:
validator = config_validator_base.PrivateConfigValidator.classes[
private_validator]()
except KeyError:
logging.error(
"Couldn't find config validator class %s, "
"you probably need to copy it into lib/local", private_validator)
raise
validator.ValidateEndConfig(new_config, self.context)
return io.open(filename, "r").read()
def ValidateEndConfig(self, config_obj, errors_fatal=True):
"""Given a generated client config, attempt to check for common errors."""
errors = []
if not config.CONFIG["Client.fleetspeak_enabled"]:
location = config_obj.Get("Client.server_urls", context=self.context)
if not location:
errors.append("Empty Client.server_urls")
for url in location:
if not url.startswith("http"):
errors.append("Bad Client.server_urls specified %s" % url)
key_data = config_obj.GetRaw(
"Client.executable_signing_public_key",
default=None,
context=self.context)
if key_data is None:
errors.append("Missing Client.executable_signing_public_key.")
elif not key_data.startswith("-----BEGIN PUBLIC"):
errors.append(
"Invalid Client.executable_signing_public_key: %s" % key_data)
else:
rsa_key = rdf_crypto.RSAPublicKey()
rsa_key.ParseFromHumanReadable(key_data)
if not config.CONFIG["Client.fleetspeak_enabled"]:
certificate = config_obj.GetRaw(
"CA.certificate", default=None, context=self.context)
if certificate is None or not certificate.startswith("-----BEGIN CERTIF"):
errors.append("CA certificate missing from config.")
for bad_opt in ["Client.private_key"]:
if config_obj.Get(bad_opt, context=self.context, default=""):
errors.append("Client cert in conf, this should be empty at deployment"
" %s" % bad_opt)
if errors_fatal and errors:
for error in errors:
logging.error("Build Config Error: %s", error)
raise RuntimeError("Bad configuration generated. Terminating.")
else:
return errors
def MakeDeployableBinary(self, template_path, output_path):
"""Use the template to create a customized installer."""
class WindowsClientRepacker(ClientRepacker):
"""Repackages windows installers."""
def ValidateEndConfig(self, config_obj, errors_fatal=True):
"""Windows specific config validations."""
errors = super(WindowsClientRepacker, self).ValidateEndConfig(
config_obj, errors_fatal=errors_fatal)
install_dir = config_obj["Client.install_path"]
for path in config_obj["Client.tempdir_roots"]:
if path.startswith("/"):
errors.append(
"Client.tempdir_root %s starts with /, probably has Unix path." %
path)
if not path.startswith(install_dir):
errors.append(
"Client.tempdir_root %s is not inside the install_dir %s, this is "
"a security risk" % ((path, install_dir)))
if config_obj.Get("Logging.path").startswith("/"):
errors.append("Logging.path starts with /, probably has Unix path. %s" %
config_obj["Logging.path"])
if "Windows\\" in config_obj.GetRaw("Logging.path"):
errors.append("Windows in Logging.path, you probably want "
"%(WINDIR|env) instead")
if not config_obj["Client.binary_name"].endswith(".exe"):
errors.append("Missing .exe extension on binary_name %s" %
config_obj["Client.binary_name"])
if not config_obj["Nanny.binary"].endswith(".exe"):
errors.append("Missing .exe extension on nanny_binary")
if errors_fatal and errors:
for error in errors:
logging.error("Build Config Error: %s", error)
raise RuntimeError("Bad configuration generated. Terminating.")
else:
return errors
def MakeDeployableBinary(self, template_path, output_path):
"""Repackage the template zip with the installer."""
context = self.context + ["Client Context"]
zip_data = io.BytesIO()
output_zip = zipfile.ZipFile(
zip_data, mode="w", compression=zipfile.ZIP_DEFLATED)
z_template = zipfile.ZipFile(open(template_path, "rb"))
# Track which files we've copied already.
completed_files = [
"grr-client.exe", "GRRservice.exe", "dbg_grr-client.exe",
"dbg_GRRservice.exe"
]
# Change the name of the main binary to the configured name.
client_bin_name = config.CONFIG.Get("Client.binary_name", context=context)
console_build = config.CONFIG.Get("ClientBuilder.console", context=context)
if console_build:
client_filename = "dbg_grr-client.exe"
service_filename = "dbg_GRRservice.exe"
else:
client_filename = "grr-client.exe"
service_filename = "GRRservice.exe"
bin_name = z_template.getinfo(client_filename)
output_zip.writestr(client_bin_name, z_template.read(bin_name))
CopyFileInZip(z_template, "grr-client.exe.manifest", output_zip,
"%s.manifest" % client_bin_name)
completed_files.append("grr-client.exe.manifest")
# Change the name of the service binary to the configured name.
service_template = z_template.getinfo(service_filename)
service_bin_name = config.CONFIG.Get(
"Nanny.service_binary_name", context=context)
output_zip.writestr(service_bin_name, z_template.read(service_template))
if config.CONFIG["Client.fleetspeak_enabled"]:
self._GenerateFleetspeakServiceConfig(output_zip)
if self.signed_template:
# If the template libs were already signed we can skip signing
CreateNewZipWithSignedLibs(
z_template, output_zip, ignore_files=completed_files)
else:
CreateNewZipWithSignedLibs(
z_template,
output_zip,
ignore_files=completed_files,
signer=self.signer)
output_zip.close()
return self.MakeSelfExtractingZip(zip_data.getvalue(), output_path)
def _GenerateFleetspeakServiceConfig(self, zip_file):
orig_fs_config_path = config.CONFIG.Get(
"ClientBuilder.fleetspeak_config_path", context=self.context)
final_fs_config_fname = config.CONFIG.Get(
"Client.fleetspeak_unsigned_config_fname", context=self.context)
if orig_fs_config_path.endswith(".in"):
logging.info("Interpolating %s", orig_fs_config_path)
logging.warning("Backslashes will be naively re-escaped after "
"interpolation. If this is not desired, use a Fleetspeak "
"config file without the '.in' extension.")
with utils.TempDirectory() as temp_dir:
temp_fs_config_path = os.path.join(temp_dir, final_fs_config_fname)
with io.open(orig_fs_config_path, "r") as source:
with io.open(temp_fs_config_path, "w") as dest:
interpolated = config.CONFIG.InterpolateValue(
source.read(), context=self.context)
dest.write(re.sub(r"\\", r"\\\\", interpolated))
self._ValidateFleetspeakServiceConfig(temp_fs_config_path)
zip_file.write(temp_fs_config_path, final_fs_config_fname)
else:
self._ValidateFleetspeakServiceConfig(orig_fs_config_path)
zip_file.write(orig_fs_config_path, final_fs_config_fname)
def _ValidateFleetspeakServiceConfig(self, config_path):
"""Validates a Fleetspeak service config.
Checks that the given file is a valid TextFormat representation of
a Fleetspeak service config proto.
Args:
config_path: Path to the config file.
Raises:
BuildError: If the config is not valid.
"""
with open(config_path, "rb") as f:
pool = descriptor_pool.DescriptorPool()
pool.AddDescriptor(fs_config_pb2.Config.DESCRIPTOR)
parsed_config = text_format.Parse(
f.read(), fs_system_pb2.ClientServiceConfig(), descriptor_pool=pool)
if parsed_config.factory != "Daemon":
raise BuildError(
"Fleetspeak config does not have the expected factory type.")
daemon_cfg = fs_config_pb2.Config()
parsed_config.config.Unpack(daemon_cfg)
if not daemon_cfg.argv:
raise BuildError(
"Fleetspeak daemon service config does not specify command line "
"args.")
def MakeSelfExtractingZip(self, payload_data, output_path):
"""Repack the installer into the payload.
Args:
payload_data: data payload for zip file
output_path: filename for the zip output
Raises:
RuntimeError: if the ClientBuilder.unzipsfx_stub doesn't require admin.
Returns:
output_path: filename string of zip output file
"""
context = self.context + ["Client Context"]
src_zip = zipfile.ZipFile(io.BytesIO(payload_data), mode="r")
zip_data = io.BytesIO()
output_zip = zipfile.ZipFile(
zip_data, mode="w", compression=zipfile.ZIP_DEFLATED)
config_file_name = config.CONFIG.Get(
"ClientBuilder.config_filename", context=context)
# Copy the rest of the files from the package to the new zip.
for template_file in src_zip.namelist():
if template_file != config_file_name:
# Avoid writing the config file twice if we're repacking a binary that
# has already been run through deployment. We write it in the next step,
# so no need to copy over from the original here.
CopyFileInZip(src_zip, template_file, output_zip)
client_config_content = self.GetClientConfig(context)
output_zip.writestr(
config_file_name,
client_config_content.encode("utf-8"), # pytype: disable=attribute-error
compress_type=zipfile.ZIP_STORED)
# The zip file comment is used by the self extractor to run the installation
# script. Comment has to be `bytes` object because `zipfile` module is not
# smart enough to properly handle `unicode` objects.
output_zip.comment = b"$AUTORUN$>%s" % config.CONFIG.Get(
"ClientBuilder.autorun_command_line", context=context).encode("utf-8")
output_zip.close()
utils.EnsureDirExists(os.path.dirname(output_path))
with open(output_path, "wb") as fd:
# First write the installer stub
stub_data = io.BytesIO()
unzipsfx_stub = config.CONFIG.Get(
"ClientBuilder.unzipsfx_stub", context=context)
stub_raw = open(unzipsfx_stub, "rb").read()
# Check stub has been compiled with the requireAdministrator manifest.
if b"level=\"requireAdministrator" not in stub_raw:
raise RuntimeError("Bad unzip binary in use. Not compiled with the"
"requireAdministrator manifest option.")
stub_data.write(stub_raw)
# If in verbose mode, modify the unzip bins PE header to run in console
# mode for easier debugging.
SetPeSubsystem(
stub_data,
console=config.CONFIG.Get("ClientBuilder.console", context=context))
# Now patch up the .rsrc section to contain the payload.
end_of_file = zip_data.tell() + stub_data.tell()
# This is the IMAGE_SECTION_HEADER.Name which is also the start of
# IMAGE_SECTION_HEADER.
offset_to_rsrc = stub_data.getvalue().find(b".rsrc")
# IMAGE_SECTION_HEADER.PointerToRawData is a 32 bit int.
stub_data.seek(offset_to_rsrc + 20)
start_of_rsrc_section = struct.unpack("<I", stub_data.read(4))[0]
# Adjust IMAGE_SECTION_HEADER.SizeOfRawData to span from the old start to
# the end of file.
stub_data.seek(offset_to_rsrc + 16)
stub_data.write(struct.pack("<I", end_of_file - start_of_rsrc_section))
# Concatenate stub and zip file.
out_data = io.BytesIO()
out_data.write(stub_data.getvalue())
out_data.write(zip_data.getvalue())
# Then write the actual output file.
fd.write(out_data.getvalue())
if self.signer:
self.signer.SignFile(output_path)
logging.info("Deployable binary generated at %s", output_path)
return output_path
class DarwinClientRepacker(ClientRepacker):
"""Repackage OSX clients."""
def MakeDeployableBinary(self, template_path, output_path):
"""This will add the config to the client template."""
context = self.context + ["Client Context"]
utils.EnsureDirExists(os.path.dirname(output_path))
client_config_data = self.GetClientConfig(context)
shutil.copyfile(template_path, output_path)
zip_file = zipfile.ZipFile(output_path, mode="a")
zip_info = zipfile.ZipInfo(filename="config.yaml")
zip_file.writestr(zip_info, client_config_data)
zip_file.close()
return output_path
class LinuxClientRepacker(ClientRepacker):
"""Repackage Linux templates."""
# TODO(user):pytype: incorrect shutil.move() definition in typeshed.
# pytype: disable=wrong-arg-types
def GenerateDPKGFiles(self, template_path):
"""Generates the files needed by dpkg-buildpackage."""
# Rename the generated binaries to the correct name.
template_binary_dir = os.path.join(template_path, "dist/debian/grr-client")
package_name = config.CONFIG.Get(
"ClientBuilder.package_name", context=self.context)
target_binary_dir = os.path.join(
template_path, "dist/debian/%s%s" %
(package_name,
config.CONFIG.Get("ClientBuilder.target_dir", context=self.context)))
if package_name == "grr-client":
# Need to rename the template path or the move will fail.
shutil.move(template_binary_dir, "%s-template" % template_binary_dir)
template_binary_dir = "%s-template" % template_binary_dir
utils.EnsureDirExists(os.path.dirname(target_binary_dir))
shutil.move(template_binary_dir, target_binary_dir)
shutil.move(
os.path.join(target_binary_dir, "grr-client"),
os.path.join(
target_binary_dir,
config.CONFIG.Get("Client.binary_name", context=self.context)))
deb_in_dir = os.path.join(template_path, "dist/debian/debian.in/")
self.GenerateDirectory(deb_in_dir, os.path.join(
template_path, "dist/debian"), [("grr-client", package_name)])
# Generate directories for the /usr/sbin link.
utils.EnsureDirExists(
os.path.join(template_path, "dist/debian/%s/usr/sbin" % package_name))
if config.CONFIG.Get("Client.fleetspeak_enabled", context=self.context):
self._GenerateFleetspeakConfig(template_path)
shutil.rmtree(deb_in_dir)
shutil.rmtree(os.path.join(template_path, "dist", "fleetspeak"))
return
# Generate the nanny template. This only exists from client version 3.1.2.5
# onwards.
if config.CONFIG["Template.version_numeric"] >= 3125:
self.GenerateFile(
os.path.join(target_binary_dir, "nanny.sh.in"),
os.path.join(target_binary_dir, "nanny.sh"))
# Generate the upstart template.
self.GenerateFile(
os.path.join(template_path, "dist/debian/upstart.in/grr-client.conf"),
os.path.join(template_path, "dist/debian/%s.upstart" % package_name))
# Generate the initd template. The init will not run if it detects upstart
# is present.
self.GenerateFile(
os.path.join(template_path, "dist/debian/initd.in/grr-client"),
os.path.join(template_path, "dist/debian/%s.init" % package_name))
# Generate the systemd unit file.
self.GenerateFile(
os.path.join(template_path,
"dist/debian/systemd.in/grr-client.service"),
os.path.join(template_path, "dist/debian/%s.service" % package_name))
# Clean up the template dirs.
shutil.rmtree(deb_in_dir)
shutil.rmtree(os.path.join(template_path, "dist/debian/upstart.in"))
shutil.rmtree(os.path.join(template_path, "dist/debian/initd.in"))
shutil.rmtree(os.path.join(template_path, "dist/debian/systemd.in"))
# pytype: enable=wrong-arg-types
def _GenerateFleetspeakConfig(self, build_dir):
"""Generates a Fleetspeak config for GRR in the debian build dir."""
source_config = os.path.join(
build_dir, "dist", "fleetspeak",
os.path.basename(
config.CONFIG.Get(
"ClientBuilder.fleetspeak_config_path", context=self.context)))
fleetspeak_service_dir = config.CONFIG.Get(
"ClientBuilder.fleetspeak_service_dir", context=self.context)
package_name = config.CONFIG.Get(
"ClientBuilder.package_name", context=self.context)
dest_config_dir = os.path.join(build_dir, "dist", "debian", package_name,
fleetspeak_service_dir[1:])
utils.EnsureDirExists(dest_config_dir)
dest_config_path = os.path.join(
dest_config_dir,
config.CONFIG.Get(
"Client.fleetspeak_unsigned_config_fname", context=self.context))
self.GenerateFile(
input_filename=source_config, output_filename=dest_config_path)
def MakeDeployableBinary(self, template_path, output_path):
"""This will add the config to the client template and create a .deb."""
buildpackage_binary = "/usr/bin/dpkg-buildpackage"
if not os.path.exists(buildpackage_binary):
logging.error("dpkg-buildpackage not found, unable to repack client.")
return
with utils.TempDirectory() as tmp_dir:
template_dir = os.path.join(tmp_dir, "dist")
utils.EnsureDirExists(template_dir)
zf = zipfile.ZipFile(template_path)
for name in zf.namelist():
dirname = os.path.dirname(name)
utils.EnsureDirExists(os.path.join(template_dir, dirname))
with open(os.path.join(template_dir, name), "wb") as fd:
fd.write(zf.read(name))
# Generate the dpkg files.
self.GenerateDPKGFiles(tmp_dir)
# Create a client config.
client_context = ["Client Context"] + self.context
client_config_content = self.GetClientConfig(client_context)
# We need to strip leading /'s or .join will ignore everything that comes
# before it.
target_dir = config.CONFIG.Get(
"ClientBuilder.target_dir", context=self.context).lstrip("/")
agent_dir = os.path.join(
template_dir, "debian",
config.CONFIG.Get("ClientBuilder.package_name", context=self.context),
target_dir)
with open(
os.path.join(
agent_dir,
config.CONFIG.Get(
"ClientBuilder.config_filename", context=self.context)),
"wb") as fd:
fd.write(client_config_content)
# Set the daemon to executable.
os.chmod(
os.path.join(
agent_dir,
config.CONFIG.Get("Client.binary_name", context=self.context)),
0o755)
arch = config.CONFIG.Get("Template.arch", context=self.context)
try:
old_working_dir = os.getcwd()
except OSError:
old_working_dir = os.environ.get("HOME", "/tmp")
try:
os.chdir(template_dir)
command = [buildpackage_binary, "-uc", "-d", "-b", "-a%s" % arch]
try:
subprocess.check_output(command, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
if "Failed to sign" not in e.output:
logging.error("Error calling %s.", command)
logging.error(e.output)
raise
filename_base = config.CONFIG.Get(
"ClientBuilder.debian_package_base", context=self.context)
output_base = config.CONFIG.Get(
"ClientRepacker.output_basename", context=self.context)
finally:
try:
os.chdir(old_working_dir)
except OSError:
pass
utils.EnsureDirExists(os.path.dirname(output_path))
for extension in [
".changes",
config.CONFIG.Get(
"ClientBuilder.output_extension", context=self.context)
]:
input_name = "%s%s" % (filename_base, extension)
output_name = "%s%s" % (output_base, extension)
# TODO(user):pytype: incorrect move() definition in typeshed.
# pytype: disable=wrong-arg-types
shutil.move(
os.path.join(tmp_dir, input_name),
os.path.join(os.path.dirname(output_path), output_name))
# pytype: enable=wrong-arg-types
logging.info("Created package %s", output_path)
return output_path
class CentosClientRepacker(LinuxClientRepacker):
"""Repackages Linux RPM templates."""
def Sign(self, rpm_filename):
if self.signer:
return self.signer.AddSignatureToRPMs([rpm_filename])
def MakeDeployableBinary(self, template_path, output_path):
"""This will add the config to the client template and create a .rpm."""
rpmbuild_binary = "/usr/bin/rpmbuild"
if not os.path.exists(rpmbuild_binary):
logging.error("rpmbuild not found, unable to repack client.")
return
with utils.TempDirectory() as tmp_dir:
template_dir = os.path.join(tmp_dir, "dist")
utils.EnsureDirExists(template_dir)
zf = zipfile.ZipFile(template_path)
for name in zf.namelist():
dirname = os.path.dirname(name)
utils.EnsureDirExists(os.path.join(template_dir, dirname))
with open(os.path.join(template_dir, name), "wb") as fd:
fd.write(zf.read(name))
# Set up a RPM building environment.
rpm_root_dir = os.path.join(tmp_dir, "rpmbuild")
rpm_build_dir = os.path.join(rpm_root_dir, "BUILD")
utils.EnsureDirExists(rpm_build_dir)
rpm_buildroot_dir = os.path.join(rpm_root_dir, "BUILDROOT")
utils.EnsureDirExists(rpm_buildroot_dir)
rpm_rpms_dir = os.path.join(rpm_root_dir, "RPMS")
utils.EnsureDirExists(rpm_rpms_dir)
rpm_specs_dir = os.path.join(rpm_root_dir, "SPECS")
utils.EnsureDirExists(rpm_specs_dir)
template_binary_dir = os.path.join(tmp_dir, "dist/rpmbuild/grr-client")
target_binary_dir = "%s%s" % (
rpm_build_dir,
config.CONFIG.Get("ClientBuilder.target_dir", context=self.context))
utils.EnsureDirExists(os.path.dirname(target_binary_dir))
try:
shutil.rmtree(target_binary_dir)
except OSError:
pass
# TODO(user):pytype: incorrect move() definition in typeshed.
# pytype: disable=wrong-arg-types
shutil.move(template_binary_dir, target_binary_dir)
# pytype: enable=wrong-arg-types
client_name = config.CONFIG.Get("Client.name", context=self.context)
client_binary_name = config.CONFIG.Get(
"Client.binary_name", context=self.context)
if client_binary_name != "grr-client":
# TODO(user):pytype: incorrect move() definition in typeshed.
# pytype: disable=wrong-arg-types
shutil.move(
os.path.join(target_binary_dir, "grr-client"),
os.path.join(target_binary_dir, client_binary_name))
# pytype: enable=wrong-arg-types
if config.CONFIG.Get("Client.fleetspeak_enabled", context=self.context):
self._GenerateFleetspeakConfig(template_dir, rpm_build_dir)
if not config.CONFIG.Get(
"Client.fleetspeak_service_name", context=self.context):
# The Fleetspeak service name is required when generating the RPM
# spec file.
raise BuildError("Client.fleetspeak_service_name is not set.")
else:
self._GenerateInitConfigs(template_dir, rpm_build_dir)
# Generate spec
spec_filename = os.path.join(rpm_specs_dir, "%s.spec" % client_name)
self.GenerateFile(
os.path.join(tmp_dir, "dist/rpmbuild/grr.spec.in"), spec_filename)
# Generate prelinking blacklist file
prelink_target_filename = os.path.join(rpm_build_dir,
"etc/prelink.conf.d",
"%s.conf" % client_name)
utils.EnsureDirExists(os.path.dirname(prelink_target_filename))
self.GenerateFile(
os.path.join(tmp_dir, "dist/rpmbuild/prelink_blacklist.conf.in"),
prelink_target_filename)
# Create a client config.
client_context = ["Client Context"] + self.context
client_config_content = self.GetClientConfig(client_context)
with open(
os.path.join(
target_binary_dir,
config.CONFIG.Get(
"ClientBuilder.config_filename", context=self.context)),
"wb") as fd:
fd.write(client_config_content)
# Set the daemon to executable.
os.chmod(os.path.join(target_binary_dir, client_binary_name), 0o755)
client_arch = config.CONFIG.Get("Template.arch", context=self.context)
if client_arch == "amd64":
client_arch = "x86_64"
command = [
rpmbuild_binary, "--define", "_topdir " + rpm_root_dir, "--target",
client_arch, "--buildroot", rpm_buildroot_dir, "-bb", spec_filename
]
try:
subprocess.check_output(command, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
logging.error("Error calling %s.", command)
logging.error(e.output)
raise
client_version = config.CONFIG.Get(
"Template.version_string", context=self.context)
rpm_filename = os.path.join(
rpm_rpms_dir, client_arch,
"%s-%s-1.%s.rpm" % (client_name, client_version, client_arch))
utils.EnsureDirExists(os.path.dirname(output_path))
shutil.move(rpm_filename, output_path)
logging.info("Created package %s", output_path)
self.Sign(output_path)
return output_path
def _GenerateFleetspeakConfig(self, template_dir, rpm_build_dir):
"""Generates a Fleetspeak config for GRR."""
source_config = os.path.join(
template_dir, "fleetspeak",
os.path.basename(
config.CONFIG.Get(
"ClientBuilder.fleetspeak_config_path", context=self.context)))
fleetspeak_service_dir = config.CONFIG.Get(
"ClientBuilder.fleetspeak_service_dir", context=self.context)
dest_config_dir = os.path.join(rpm_build_dir, fleetspeak_service_dir[1:])
utils.EnsureDirExists(dest_config_dir)
dest_config_path = os.path.join(
dest_config_dir,
config.CONFIG.Get(
"Client.fleetspeak_unsigned_config_fname", context=self.context))
self.GenerateFile(
input_filename=source_config, output_filename=dest_config_path)
def _GenerateInitConfigs(self, template_dir, rpm_build_dir):
"""Generates init-system configs."""
client_name = config.CONFIG.Get("Client.name", context=self.context)
initd_target_filename = os.path.join(rpm_build_dir, "etc/init.d",
client_name)
# Generate init.d
utils.EnsureDirExists(os.path.dirname(initd_target_filename))
self.GenerateFile(
os.path.join(template_dir, "rpmbuild/grr-client.initd.in"),
initd_target_filename)
# Generate systemd unit
if config.CONFIG["Template.version_numeric"] >= 3125:
systemd_target_filename = os.path.join(rpm_build_dir,
"usr/lib/systemd/system/",
"%s.service" % client_name)
utils.EnsureDirExists(os.path.dirname(systemd_target_filename))
self.GenerateFile(
os.path.join(template_dir, "rpmbuild/grr-client.service.in"),
systemd_target_filename)
def CopyFileInZip(from_zip, from_name, to_zip, to_name=None, signer=None):
"""Read a file from a ZipFile and write it to a new ZipFile."""
data = from_zip.read(from_name)
if to_name is None:
to_name = from_name
if signer:
logging.debug("Signing %s", from_name)
data = signer.SignBuffer(data)
to_zip.writestr(to_name, data)
def CreateNewZipWithSignedLibs(z_in,
z_out,
ignore_files=None,
signer=None,
skip_signing_files=None):
"""Copies files from one zip to another, signing all qualifying files."""
ignore_files = ignore_files or []
skip_signing_files = skip_signing_files or []
extensions_to_sign = [".sys", ".exe", ".dll", ".pyd"]
to_sign = []
for template_file in z_in.namelist():
if template_file not in ignore_files:
extension = os.path.splitext(template_file)[1].lower()
if (signer and template_file not in skip_signing_files and
extension in extensions_to_sign):
to_sign.append(template_file)
else:
CopyFileInZip(z_in, template_file, z_out)
temp_files = {}
for filename in to_sign:
fd, path = tempfile.mkstemp()
with os.fdopen(fd, "wb") as temp_fd:
temp_fd.write(z_in.read(filename))
temp_files[filename] = path
try:
signer.SignFiles(itervalues(temp_files))
except AttributeError:
for f in itervalues(temp_files):
signer.SignFile(f)
for filename, tempfile_path in iteritems(temp_files):
z_out.writestr(filename, open(tempfile_path, "rb").read())
def SetPeSubsystem(fd, console=True):
"""Takes file like obj and returns (offset, value) for the PE subsystem."""
current_pos = fd.tell()
fd.seek(0x3c) # _IMAGE_DOS_HEADER.e_lfanew
header_offset = struct.unpack("<I", fd.read(4))[0]
# _IMAGE_NT_HEADERS.OptionalHeader.Subsystem ( 0x18 + 0x44)
subsystem_offset = header_offset + 0x5c
fd.seek(subsystem_offset)
if console:
fd.write(b"\x03")
else:
fd.write(b"\x02")
fd.seek(current_pos)
| |
# Copyright 2016 Pinterest, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
"""Collection of all environs related calls
"""
from deploy_board.webapp.helpers.deployclient import DeployClient
from deploy_board.settings import IS_PINTEREST
DEFAULT_ENV_SIZE = 30
BUILD_STAGE = 'BUILD'
DEPLOY_STAGE_VALUES = ['UNKNOWN', 'PRE_DOWNLOAD', 'DOWNLOADING', 'POST_DOWNLOAD', 'STAGING',
'PRE_RESTART', 'RESTARTING', 'POST_RESTART', 'SERVING_BUILD', 'STOPPING', 'STOPPED']
DEPLOY_PRIORITY_VALUES = ['LOWER', 'LOW', 'NORMAL', 'HIGH', 'HIGHER']
ACCEPTANCE_TYPE_VALUES = ['AUTO', 'MANUAL']
ACCEPTANCE_STATUS_VALUES = ['PENDING_DEPLOY', 'OUTSTANDING', 'PENDING_ACCEPT', 'ACCEPTED',
'REJECTED',
'TERMINATED']
AGENT_STATE_VALUES = ["NORMAL", "PAUSED_BY_SYSTEM", "PAUSED_BY_USER", "RESET", "DELETE",
"UNREACHABLE", "STOP"]
AGENT_STATUS_VALUES = ["SUCCEEDED", "UNKNOWN", "AGENT_FAILED", "RETRYABLE_AGENT_FAILED",
"SCRIPT_FAILED", "ABORTED_BY_SERVICE", "SCRIPT_TIMEOUT", "TOO_MANY_RETRY",
"RUNTIME_MISMATCH"]
PROMOTE_TYPE_VALUES = ['MANUAL', 'AUTO']
PROMOTE_FAILED_POLICY_VALUES = ['CONTINUE', 'DISABLE', 'ROLLBACK']
PROMOTE_DISABLE_POLICY_VALUES = ['MANUAL', 'AUTO']
OVERRIDE_POLICY_VALUES = ['OVERRIDE', 'WARN']
DEPLOY_CONSTRAINT_TYPES = ['GROUP_BY_GROUP', 'ALL_GROUPS_IN_PARALLEL']
deployclient = DeployClient()
if IS_PINTEREST:
from deploy_board.webapp.helpers.nimbusclient import NimbusClient
nimbusclient = NimbusClient()
# Nimbus-related helpers
def get_nimbus_identifier(name):
return nimbusclient.get_one_identifier(name)
def create_nimbus_identifier(data):
return nimbusclient.create_one_identifier(data)
def delete_nimbus_identifier(name):
return nimbusclient.delete_one_identifier(name)
# Teletraan Deploy client helpers
def set_external_id_on_stage(request, env_name, stage_name, external_id):
return deployclient.post("/envs/{}/{}/external_id".format(env_name, stage_name), request.teletraan_user_id.token, data=external_id)
def get_all_env_names(request, name_filter=None, name_only=True, index=1, size=DEFAULT_ENV_SIZE):
params = [('pageIndex', index), ('pageSize', size)]
if name_filter:
params.append(('nameFilter', name_filter))
return deployclient.get("/envs/names", request.teletraan_user_id.token, params=params)
def get_all_env_stages(request, env_name):
return deployclient.get("/envs", request.teletraan_user_id.token,
params=[("envName", env_name)])
def get_all_envs_by_group(request, group_name):
params = [('groupName', group_name)]
return deployclient.get("/envs/", request.teletraan_user_id.token, params=params)
def get(request, id):
return deployclient.get("/envs/%s" % id, request.teletraan_user_id.token)
def get_env_by_stage(request, env_name, stage_name):
return deployclient.get("/envs/%s/%s" % (env_name, stage_name), request.teletraan_user_id.token)
def get_env_capacity(request, env_name, stage_name, capacity_type=None):
params = []
if capacity_type:
params.append(("capacityType", capacity_type))
return deployclient.get("/envs/%s/%s/capacity" % (env_name, stage_name),
request.teletraan_user_id.token, params=params)
def update_env_capacity(request, env_name, stage_name, capacity_type=None, data=None):
params = []
if capacity_type:
params.append(("capacityType", capacity_type))
return deployclient.put("/envs/%s/%s/capacity" % (env_name, stage_name),
request.teletraan_user_id.token, params=params, data=data)
def add_env_capacity(request, env_name, stage_name, capacity_type=None, data=None):
params = []
if capacity_type:
params.append(("capacityType", capacity_type))
return deployclient.post("/envs/%s/%s/capacity" % (env_name, stage_name),
request.teletraan_user_id.token, params=params, data=data)
def remove_env_capacity(request, env_name, stage_name, capacity_type=None, data=None):
params = []
if capacity_type:
params.append(("capacityType", capacity_type))
return deployclient.delete("/envs/%s/%s/capacity" % (env_name, stage_name),
request.teletraan_user_id.token, params=params, data=data)
def create_env(request, data):
return deployclient.post("/envs", request.teletraan_user_id.token, data=data)
def update_env_basic_config(request, env_name, stage_name, data):
return deployclient.put("/envs/%s/%s" % (env_name, stage_name), request.teletraan_user_id.token,
data=data)
def get_env_script_config(request, env_name, stage_name):
return deployclient.get("/envs/%s/%s/script_configs" % (env_name, stage_name),
request.teletraan_user_id.token)
def update_env_script_config(request, env_name, stage_name, data):
return deployclient.put("/envs/%s/%s/script_configs" % (env_name, stage_name),
request.teletraan_user_id.token, data=data)
def get_env_agent_config(request, env_name, stage_name):
return deployclient.get("/envs/%s/%s/agent_configs" % (env_name, stage_name),
request.teletraan_user_id.token)
def update_env_agent_config(request, env_name, stage_name, data):
return deployclient.put("/envs/%s/%s/agent_configs" % (env_name, stage_name),
request.teletraan_user_id.token, data=data)
def get_env_alarms_config(request, env_name, stage_name):
return deployclient.get("/envs/%s/%s/alarms" % (env_name, stage_name),
request.teletraan_user_id.token)
def update_env_alarms_config(request, env_name, stage_name, data):
return deployclient.put("/envs/%s/%s/alarms" % (env_name, stage_name),
request.teletraan_user_id.token, data=data)
def get_env_metrics_config(request, env_name, stage_name):
return deployclient.get("/envs/%s/%s/metrics" % (env_name, stage_name),
request.teletraan_user_id.token)
def update_env_metrics_config(request, env_name, stage_name, data):
return deployclient.put("/envs/%s/%s/metrics" % (env_name, stage_name),
request.teletraan_user_id.token, data=data)
def get_env_hooks_config(request, env_name, stage_name):
return deployclient.get("/envs/%s/%s/web_hooks" % (env_name, stage_name),
request.teletraan_user_id.token)
def update_env_hooks_config(request, env_name, stage_name, data):
return deployclient.put("/envs/%s/%s/web_hooks" % (env_name, stage_name),
request.teletraan_user_id.token, data=data)
def get_env_promotes_config(request, env_name, stage_name):
return deployclient.get("/envs/%s/%s/promotes" % (env_name, stage_name),
request.teletraan_user_id.token)
def update_env_promotes_config(request, env_name, stage_name, data):
return deployclient.put("/envs/%s/%s/promotes" % (env_name, stage_name),
request.teletraan_user_id.token, data=data)
def delete_env(request, env_name, stage_name):
return deployclient.delete("/envs/%s/%s" % (env_name, stage_name),
request.teletraan_user_id.token)
def get_config_history(request, env_name, stage_name, index, size):
params = [('pageIndex', index), ('pageSize', size)]
return deployclient.get("/envs/%s/%s/history" % (env_name, stage_name),
request.teletraan_user_id.token, params=params)
def set_active_max_parallel(env):
max_parallel_pecentage = int(env['maxParallelPct'])
env['showNumber'] = True
if max_parallel_pecentage > 0:
env['showNumber'] = False
def enable_all_env_changes(request, description):
params = [("actionType", "ENABLE"), ("description", description)]
return deployclient.post("/envs/actions", request.teletraan_user_id.token, params=params)
def disable_all_env_changes(request, description):
params = [("actionType", "DISABLE"), ("description", description)]
return deployclient.post("/envs/actions", request.teletraan_user_id.token, params=params)
def enable_env_changes(request, env_name, stage_name, description):
params = [("actionType", "ENABLE"), ("description", description)]
return deployclient.post("/envs/%s/%s/actions" % (env_name, stage_name), request.teletraan_user_id.token,
params=params)
def disable_env_changes(request, env_name, stage_name, description):
params = [("actionType", "DISABLE"), ("description", description)]
return deployclient.post("/envs/%s/%s/actions" % (env_name, stage_name), request.teletraan_user_id.token,
params=params)
def pause_hosts(request, env_name, stage_name, host_ids):
params = [("actionType", "PAUSED_BY_USER")]
return deployclient.put("/envs/%s/%s/deploys/hostactions" % (env_name, stage_name), request.teletraan_user_id.token,
params=params, data=host_ids)
def resume_hosts(request, env_name, stage_name, host_ids):
params = [("actionType", "NORMAL")]
return deployclient.put("/envs/%s/%s/deploys/hostactions" % (env_name, stage_name), request.teletraan_user_id.token,
params=params, data=host_ids)
def reset_hosts(request, env_name, stage_name, host_ids):
params = [("actionType", "RESET")]
return deployclient.put("/envs/%s/%s/deploys/hostactions" % (env_name, stage_name), request.teletraan_user_id.token,
params=params, data=host_ids)
| |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import datetime
import os
import json
import pendulum
import time
import random
from dateutil import relativedelta
from sqlalchemy import event, exc, select
from sqlalchemy.types import Text, DateTime, TypeDecorator
from airflow.utils.log.logging_mixin import LoggingMixin
log = LoggingMixin().log
utc = pendulum.timezone('UTC')
def setup_event_handlers(engine,
reconnect_timeout_seconds,
initial_backoff_seconds=0.2,
max_backoff_seconds=120):
@event.listens_for(engine, "engine_connect")
def ping_connection(connection, branch):
"""
Pessimistic SQLAlchemy disconnect handling. Ensures that each
connection returned from the pool is properly connected to the database.
http://docs.sqlalchemy.org/en/rel_1_1/core/pooling.html#disconnect-handling-pessimistic
"""
if branch:
# "branch" refers to a sub-connection of a connection,
# we don't want to bother pinging on these.
return
start = time.time()
backoff = initial_backoff_seconds
# turn off "close with result". This flag is only used with
# "connectionless" execution, otherwise will be False in any case
save_should_close_with_result = connection.should_close_with_result
while True:
connection.should_close_with_result = False
try:
connection.scalar(select([1]))
# If we made it here then the connection appears to be healty
break
except exc.DBAPIError as err:
if time.time() - start >= reconnect_timeout_seconds:
log.error(
"Failed to re-establish DB connection within %s secs: %s",
reconnect_timeout_seconds,
err)
raise
if err.connection_invalidated:
log.warning("DB connection invalidated. Reconnecting...")
# Use a truncated binary exponential backoff. Also includes
# a jitter to prevent the thundering herd problem of
# simultaneous client reconnects
backoff += backoff * random.random()
time.sleep(min(backoff, max_backoff_seconds))
# run the same SELECT again - the connection will re-validate
# itself and establish a new connection. The disconnect detection
# here also causes the whole connection pool to be invalidated
# so that all stale connections are discarded.
continue
else:
log.error(
"Unknown database connection error. Not retrying: %s",
err)
raise
finally:
# restore "close with result"
connection.should_close_with_result = save_should_close_with_result
@event.listens_for(engine, "connect")
def connect(dbapi_connection, connection_record):
connection_record.info['pid'] = os.getpid()
if engine.dialect.name == "sqlite":
@event.listens_for(engine, "connect")
def set_sqlite_pragma(dbapi_connection, connection_record):
cursor = dbapi_connection.cursor()
cursor.execute("PRAGMA foreign_keys=ON")
cursor.close()
# this ensures sanity in mysql when storing datetimes (not required for postgres)
if engine.dialect.name == "mysql":
@event.listens_for(engine, "connect")
def set_mysql_timezone(dbapi_connection, connection_record):
cursor = dbapi_connection.cursor()
cursor.execute("SET time_zone = '+00:00'")
cursor.close()
@event.listens_for(engine, "checkout")
def checkout(dbapi_connection, connection_record, connection_proxy):
pid = os.getpid()
if connection_record.info['pid'] != pid:
connection_record.connection = connection_proxy.connection = None
raise exc.DisconnectionError(
"Connection record belongs to pid {}, "
"attempting to check out in pid {}".format(connection_record.info['pid'], pid)
)
class UtcDateTime(TypeDecorator):
"""
Almost equivalent to :class:`~sqlalchemy.types.DateTime` with
``timezone=True`` option, but it differs from that by:
- Never silently take naive :class:`~datetime.datetime`, instead it
always raise :exc:`ValueError` unless time zone aware value.
- :class:`~datetime.datetime` value's :attr:`~datetime.datetime.tzinfo`
is always converted to UTC.
- Unlike SQLAlchemy's built-in :class:`~sqlalchemy.types.DateTime`,
it never return naive :class:`~datetime.datetime`, but time zone
aware value, even with SQLite or MySQL.
- Always returns DateTime in UTC
"""
impl = DateTime(timezone=True)
def process_bind_param(self, value, dialect):
if value is not None:
if not isinstance(value, datetime.datetime):
raise TypeError('expected datetime.datetime, not ' +
repr(value))
elif value.tzinfo is None:
raise ValueError('naive datetime is disallowed')
return value.astimezone(utc)
def process_result_value(self, value, dialect):
"""
Processes DateTimes from the DB making sure it is always
returning UTC. Not using timezone.convert_to_utc as that
converts to configured TIMEZONE while the DB might be
running with some other setting. We assume UTC datetimes
in the database.
"""
if value is not None:
if value.tzinfo is None:
value = value.replace(tzinfo=utc)
else:
value = value.astimezone(utc)
return value
class Interval(TypeDecorator):
impl = Text
attr_keys = {
datetime.timedelta: ('days', 'seconds', 'microseconds'),
relativedelta.relativedelta: (
'years', 'months', 'days', 'leapdays', 'hours', 'minutes', 'seconds', 'microseconds',
'year', 'month', 'day', 'hour', 'minute', 'second', 'microsecond',
),
}
def process_bind_param(self, value, dialect):
if type(value) in self.attr_keys:
attrs = {
key: getattr(value, key)
for key in self.attr_keys[type(value)]
}
return json.dumps({'type': type(value).__name__, 'attrs': attrs})
return json.dumps(value)
def process_result_value(self, value, dialect):
if not value:
return value
data = json.loads(value)
if isinstance(data, dict):
type_map = {key.__name__: key for key in self.attr_keys}
return type_map[data['type']](**data['attrs'])
return data
| |
import warnings
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from pytest import raises as assert_raises
from scipy.sparse.csgraph import (shortest_path, dijkstra, johnson,
bellman_ford, construct_dist_matrix,
NegativeCycleError)
import scipy.sparse
import pytest
directed_G = np.array([[0, 3, 3, 0, 0],
[0, 0, 0, 2, 4],
[0, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
[2, 0, 0, 2, 0]], dtype=float)
undirected_G = np.array([[0, 3, 3, 1, 2],
[3, 0, 0, 2, 4],
[3, 0, 0, 0, 0],
[1, 2, 0, 0, 2],
[2, 4, 0, 2, 0]], dtype=float)
unweighted_G = (directed_G > 0).astype(float)
directed_SP = [[0, 3, 3, 5, 7],
[3, 0, 6, 2, 4],
[np.inf, np.inf, 0, np.inf, np.inf],
[1, 4, 4, 0, 8],
[2, 5, 5, 2, 0]]
directed_sparse_zero_G = scipy.sparse.csr_matrix(([0, 1, 2, 3, 1],
([0, 1, 2, 3, 4],
[1, 2, 0, 4, 3])),
shape = (5, 5))
directed_sparse_zero_SP = [[0, 0, 1, np.inf, np.inf],
[3, 0, 1, np.inf, np.inf],
[2, 2, 0, np.inf, np.inf],
[np.inf, np.inf, np.inf, 0, 3],
[np.inf, np.inf, np.inf, 1, 0]]
undirected_sparse_zero_G = scipy.sparse.csr_matrix(([0, 0, 1, 1, 2, 2, 1, 1],
([0, 1, 1, 2, 2, 0, 3, 4],
[1, 0, 2, 1, 0, 2, 4, 3])),
shape = (5, 5))
undirected_sparse_zero_SP = [[0, 0, 1, np.inf, np.inf],
[0, 0, 1, np.inf, np.inf],
[1, 1, 0, np.inf, np.inf],
[np.inf, np.inf, np.inf, 0, 1],
[np.inf, np.inf, np.inf, 1, 0]]
directed_pred = np.array([[-9999, 0, 0, 1, 1],
[3, -9999, 0, 1, 1],
[-9999, -9999, -9999, -9999, -9999],
[3, 0, 0, -9999, 1],
[4, 0, 0, 4, -9999]], dtype=float)
undirected_SP = np.array([[0, 3, 3, 1, 2],
[3, 0, 6, 2, 4],
[3, 6, 0, 4, 5],
[1, 2, 4, 0, 2],
[2, 4, 5, 2, 0]], dtype=float)
undirected_SP_limit_2 = np.array([[0, np.inf, np.inf, 1, 2],
[np.inf, 0, np.inf, 2, np.inf],
[np.inf, np.inf, 0, np.inf, np.inf],
[1, 2, np.inf, 0, 2],
[2, np.inf, np.inf, 2, 0]], dtype=float)
undirected_SP_limit_0 = np.ones((5, 5), dtype=float) - np.eye(5)
undirected_SP_limit_0[undirected_SP_limit_0 > 0] = np.inf
undirected_pred = np.array([[-9999, 0, 0, 0, 0],
[1, -9999, 0, 1, 1],
[2, 0, -9999, 0, 0],
[3, 3, 0, -9999, 3],
[4, 4, 0, 4, -9999]], dtype=float)
methods = ['auto', 'FW', 'D', 'BF', 'J']
def test_dijkstra_limit():
limits = [0, 2, np.inf]
results = [undirected_SP_limit_0,
undirected_SP_limit_2,
undirected_SP]
def check(limit, result):
SP = dijkstra(undirected_G, directed=False, limit=limit)
assert_array_almost_equal(SP, result)
for limit, result in zip(limits, results):
check(limit, result)
def test_directed():
def check(method):
SP = shortest_path(directed_G, method=method, directed=True,
overwrite=False)
assert_array_almost_equal(SP, directed_SP)
for method in methods:
check(method)
def test_undirected():
def check(method, directed_in):
if directed_in:
SP1 = shortest_path(directed_G, method=method, directed=False,
overwrite=False)
assert_array_almost_equal(SP1, undirected_SP)
else:
SP2 = shortest_path(undirected_G, method=method, directed=True,
overwrite=False)
assert_array_almost_equal(SP2, undirected_SP)
for method in methods:
for directed_in in (True, False):
check(method, directed_in)
def test_directed_sparse_zero():
# test directed sparse graph with zero-weight edge and two connected components
def check(method):
SP = shortest_path(directed_sparse_zero_G, method=method, directed=True,
overwrite=False)
assert_array_almost_equal(SP, directed_sparse_zero_SP)
for method in methods:
check(method)
def test_undirected_sparse_zero():
def check(method, directed_in):
if directed_in:
SP1 = shortest_path(directed_sparse_zero_G, method=method, directed=False,
overwrite=False)
assert_array_almost_equal(SP1, undirected_sparse_zero_SP)
else:
SP2 = shortest_path(undirected_sparse_zero_G, method=method, directed=True,
overwrite=False)
assert_array_almost_equal(SP2, undirected_sparse_zero_SP)
for method in methods:
for directed_in in (True, False):
check(method, directed_in)
@pytest.mark.parametrize('directed, SP_ans',
((True, directed_SP),
(False, undirected_SP)))
@pytest.mark.parametrize('indices', ([0, 2, 4], [0, 4], [3, 4], [0, 0]))
def test_dijkstra_indices_min_only(directed, SP_ans, indices):
SP_ans = np.array(SP_ans)
indices = np.array(indices, dtype=np.int64)
min_ind_ans = indices[np.argmin(SP_ans[indices, :], axis=0)]
min_d_ans = np.zeros(SP_ans.shape[0], SP_ans.dtype)
for k in range(SP_ans.shape[0]):
min_d_ans[k] = SP_ans[min_ind_ans[k], k]
min_ind_ans[np.isinf(min_d_ans)] = -9999
SP, pred, sources = dijkstra(directed_G,
directed=directed,
indices=indices,
min_only=True,
return_predecessors=True)
assert_array_almost_equal(SP, min_d_ans)
assert_array_equal(min_ind_ans, sources)
SP = dijkstra(directed_G,
directed=directed,
indices=indices,
min_only=True,
return_predecessors=False)
assert_array_almost_equal(SP, min_d_ans)
@pytest.mark.parametrize('n', (10, 100, 1000))
def test_shortest_path_min_only_random(n):
np.random.seed(1234)
data = scipy.sparse.rand(n, n, density=0.5, format='lil',
random_state=42, dtype=np.float64)
data.setdiag(np.zeros(n, dtype=np.bool_))
# choose some random vertices
v = np.arange(n)
np.random.shuffle(v)
indices = v[:int(n*.1)]
ds, pred, sources = dijkstra(data,
directed=False,
indices=indices,
min_only=True,
return_predecessors=True)
for k in range(n):
p = pred[k]
s = sources[k]
while(p != -9999):
assert(sources[p] == s)
p = pred[p]
def test_shortest_path_indices():
indices = np.arange(4)
def check(func, indshape):
outshape = indshape + (5,)
SP = func(directed_G, directed=False,
indices=indices.reshape(indshape))
assert_array_almost_equal(SP, undirected_SP[indices].reshape(outshape))
for indshape in [(4,), (4, 1), (2, 2)]:
for func in (dijkstra, bellman_ford, johnson, shortest_path):
check(func, indshape)
assert_raises(ValueError, shortest_path, directed_G, method='FW',
indices=indices)
def test_predecessors():
SP_res = {True: directed_SP,
False: undirected_SP}
pred_res = {True: directed_pred,
False: undirected_pred}
def check(method, directed):
SP, pred = shortest_path(directed_G, method, directed=directed,
overwrite=False,
return_predecessors=True)
assert_array_almost_equal(SP, SP_res[directed])
assert_array_almost_equal(pred, pred_res[directed])
for method in methods:
for directed in (True, False):
check(method, directed)
def test_construct_shortest_path():
def check(method, directed):
SP1, pred = shortest_path(directed_G,
directed=directed,
overwrite=False,
return_predecessors=True)
SP2 = construct_dist_matrix(directed_G, pred, directed=directed)
assert_array_almost_equal(SP1, SP2)
for method in methods:
for directed in (True, False):
check(method, directed)
def test_unweighted_path():
def check(method, directed):
SP1 = shortest_path(directed_G,
directed=directed,
overwrite=False,
unweighted=True)
SP2 = shortest_path(unweighted_G,
directed=directed,
overwrite=False,
unweighted=False)
assert_array_almost_equal(SP1, SP2)
for method in methods:
for directed in (True, False):
check(method, directed)
def test_negative_cycles():
# create a small graph with a negative cycle
graph = np.ones([5, 5])
graph.flat[::6] = 0
graph[1, 2] = -2
def check(method, directed):
assert_raises(NegativeCycleError, shortest_path, graph, method,
directed)
for method in ['FW', 'J', 'BF']:
for directed in (True, False):
check(method, directed)
def test_masked_input():
np.ma.masked_equal(directed_G, 0)
def check(method):
SP = shortest_path(directed_G, method=method, directed=True,
overwrite=False)
assert_array_almost_equal(SP, directed_SP)
for method in methods:
check(method)
def test_overwrite():
G = np.array([[0, 3, 3, 1, 2],
[3, 0, 0, 2, 4],
[3, 0, 0, 0, 0],
[1, 2, 0, 0, 2],
[2, 4, 0, 2, 0]], dtype=float)
foo = G.copy()
shortest_path(foo, overwrite=False)
assert_array_equal(foo, G)
@pytest.mark.parametrize('method', methods)
def test_buffer(method):
# Smoke test that sparse matrices with read-only buffers (e.g., those from
# joblib workers) do not cause::
#
# ValueError: buffer source array is read-only
#
G = scipy.sparse.csr_matrix([[1.]])
G.data.flags['WRITEABLE'] = False
shortest_path(G, method=method)
def test_NaN_warnings():
with warnings.catch_warnings(record=True) as record:
shortest_path(np.array([[0, 1], [np.nan, 0]]))
for r in record:
assert r.category is not RuntimeWarning
def test_sparse_matrices():
# Test that using lil,csr and csc sparse matrix do not cause error
G_dense = np.array([[0, 3, 0, 0, 0],
[0, 0, -1, 0, 0],
[0, 0, 0, 2, 0],
[0, 0, 0, 0, 4],
[0, 0, 0, 0, 0]], dtype=float)
SP = shortest_path(G_dense)
G_csr = scipy.sparse.csr_matrix(G_dense)
G_csc = scipy.sparse.csc_matrix(G_dense)
G_lil = scipy.sparse.lil_matrix(G_dense)
assert_array_almost_equal(SP, shortest_path(G_csr))
assert_array_almost_equal(SP, shortest_path(G_csc))
assert_array_almost_equal(SP, shortest_path(G_lil))
| |
#!/usr/bin/env python
#coding: utf-8
# The MIT License
#
# Copyright (c) 2009-2011 the bpython authors.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
from __future__ import with_statement
import os
import sys
import re
import textwrap
from itertools import takewhile
from pygments.token import Token
from bpython.completion import inspection
from bpython.completion.completer import BPythonCompleter
from bpython.parser import ReplParser
from bpython.history import History
from bpython.util import getpreferredencoding, debug, TimeOutException
from bpython._py3compat import PythonLexer, PY3
if PY3:
basestring = str
class MatchesIterator(object):
def __init__(self, current_word='', matches=None):
self.current_word = current_word
if matches:
self.matches = list(matches)
else:
self.matches = []
self.index = -1
self.is_wait = False
def __nonzero__(self):
return self.index != -1
def __bool__(self):
return self.index != -1
def __iter__(self):
return self
def current(self):
if self.index == -1:
raise ValueError('No current match.')
return self.matches[self.index]
def wait(self):
self.is_wait = True
def reset(self):
self.index = -1
def next(self):
if self.is_wait:
self.is_wait = False
else:
self.index = (self.index + 1) % len(self.matches)
return self.matches[self.index]
def previous(self):
self.is_wait = False
if self.index <= 0:
self.index = len(self.matches)
self.index -= 1
return self.matches[self.index]
def update(self, current_word='', matches=None):
if not matches: matches = []
if current_word != self.current_word:
self.current_word = current_word
self.matches = list(matches)
self.index = -1
class Interaction(object):
def __init__(self, config, statusbar=None):
self.config = config
if statusbar:
self.statusbar = statusbar
def confirm(self, s):
raise NotImplementedError
def notify(self, s, n=10):
raise NotImplementedError
def file_prompt(self, s):
raise NotImplementedError
class Repl(object):
"""Implements the necessary guff for a Python-repl-alike interface
The execution of the code entered and all that stuff was taken from the
Python code module, I had to copy it instead of inheriting it, I can't
remember why. The rest of the stuff is basically what makes it fancy.
It reads what you type, passes it to a lexer and highlighter which
returns a formatted string. This then gets passed to echo() which
parses that string and prints to the curses screen in appropriate
colours and/or bold attribute.
The Repl class also keeps two stacks of lines that the user has typed in:
One to be used for the undo feature. I am not happy with the way this
works. The only way I have been able to think of is to keep the code
that's been typed in in memory and re-evaluate it in its entirety for each
"undo" operation. Obviously this means some operations could be extremely
slow. I'm not even by any means certain that this truly represents a
genuine "undo" implementation, but it does seem to be generally pretty
effective.
If anyone has any suggestions for how this could be improved, I'd be happy
to hear them and implement it/accept a patch. I researched a bit into the
idea of keeping the entire Python state in memory, but this really seems
very difficult (I believe it may actually be impossible to work) and has
its own problems too.
The other stack is for keeping a history for pressing the up/down keys
to go back and forth between lines.
XXX Subclasses should implement echo, current_line, current_word
"""
def __init__(self, interp, config):
"""Initialise the repl.
interp is a Python code.InteractiveInterpreter instance
config is a populated bpython.config.Struct.
"""
self.config = config
self.buffer = []
self.interp = interp
self.interp.syntaxerror_callback = self.clear_current_line
self.match = False
self.s = ""
self.cpos = 0
self.s_hist = []
self.rl_history = History(allow_duplicates=self.config.hist_duplicates)
self.stdin_history = History()
self.stdout_history = History()
self.evaluating = False
self.completer = BPythonCompleter(self.interp.locals, config)
self.parser = ReplParser(self)
self.matches = []
self.matches_iter = MatchesIterator()
self.argspec = None
self.list_win_visible = False
self._C = {}
self.interact = Interaction(self.config)
self.ps1 = '>>> '
self.ps2 = '... '
# Necessary to fix mercurial.ui.ui expecting sys.stderr to have this
# attribute
self.closed = False
pythonhist = os.path.expanduser(self.config.hist_file)
if os.path.exists(pythonhist):
self.rl_history.load(pythonhist,
getpreferredencoding() or "ascii")
def register_command(self, name, function=None, without_completion=False):
def inner(function, name=name):
if not name:
name = function.__name__.replace('_', '-')
if self.interp.register_command(name, function) and not without_completion:
name += " "
self.completer.register_command(name)
if not function:
return inner
else:
return inner(function, name)
@property
def history(self):
return self.stdin_history
@property
def current_line(self):
raise (NotImplementedError("current_line should be implemented in subclass"))
def clear_current_line(self):
"""This is used as the exception callback for the Interpreter instance.
It prevents autoindentation from occuring after a traceback."""
raise (NotImplementedError("clear_current_line should be implemented in subclass"))
def reevaluate(self):
raise (NotImplementedError("reevaluate should be implemented in subclass"))
def tab(self):
raise (NotImplementedError("tab should be implemented in subclass"))
def tokenize(self, s, newline=False):
"""Tokenize a line of code."""
return self.parser.tokenize(s, newline)
def startup(self):
"""
Execute PYTHONSTARTUP file if it exits. Call this after front
end-specific initialisation.
"""
self.interp.startup()
@property
def stdout(self):
return str(self.stdout_history)
@property
def stdin(self):
return str(self.stdin_history)
@property
def current_string(self):
"""If the line ends in a string get it, otherwise return ''"""
return self.parser.get_current_string()
@property
def current_word(self):
"""Return the current word, i.e. the (incomplete) word directly to the
left of the cursor"""
return self.parser.get_current_word()
def get_current_sbracket(self):
return self.parser.get_current_sbracket()
@property
def is_first_word(self):
return self.parser.is_first_word()
@property
def is_only_word(self):
return self.parser.is_only_word()
@property
def is_assignment_statement(self):
return self.parser.is_assignment_statement()
def get_object(self, name):
return self.interp.get_object(name)
def set_argspec(self):
"""Check if an unclosed parenthesis exists, then attempt to get the
argspec() for it. On success, update self.argspec and return True,
otherwise set self.argspec to None and return False"""
if not self.config.arg_spec:
self.argspec = None
else:
func, arg_number = self.parser.get_current_func()
self.argspec = self.interp.get_argspec(self, func, arg_number)
@property
def current_object(self):
"""Return the object which is bound to the
current name in the current input line. Return `None` if the
source cannot be found."""
obj = None
line = self.current_line
if inspection.is_eval_safe_name(line):
obj = self.get_object(line)
return obj
def complete(self, tab=False):
"""Construct a full list of possible completions and construct and
display them in a window. Also check if there's an available argspec
(via the inspect module) and bang that on top of the completions too.
The return value is whether the list_win is visible or not."""
self.set_argspec()
current_word = self.current_word
current_string = self.current_string
sb_expr, sb_attr = self.get_current_sbracket()
line = self.current_line.lstrip()
if sb_expr:
self.completer.get_item_complete(sb_expr, sb_attr)
self.matches = self.completer.matches
self.matches_iter.update(sb_attr, self.matches)
return bool(self.matches)
elif not current_word:
self.matches = []
self.matches_iter.update()
return bool(self.argspec)
elif not (current_word or current_string):
return bool(self.argspec)
elif current_string:
if tab:
# Filename completion
self.completer.file_complete(current_string)
self.matches = self.completer.matches
self.matches_iter.update(current_string, self.matches)
return bool(self.matches)
else:
# Do not provide suggestions inside strings, as one cannot tab
# them so they would be really confusing.
self.matches = []
self.matches_iter.update()
return False
elif (self.config.complete_magic_methods
and self.buffer
and self.buffer[0].startswith("class ")
and line.startswith('def ')):
self.matches = [name for name in self.config.magic_methods
if name.startswith(current_word)]
self.matches_iter.update(current_word, self.matches)
return bool(self.matches)
elif line.startswith('class ') or line.startswith('def '):
self.matches = []
self.matches_iter.update()
return False
elif line.startswith('from ') or line.startswith('import '):
self.completer.import_complete(current_word, self.current_line)
self.matches = self.completer.matches
self.matches_iter.update(current_word, self.matches)
return bool(self.matches)
e = False
try:
if len(self.buffer) == 0 and self.is_first_word:
self.completer.complete(current_word, with_command=True)
else:
self.completer.complete(current_word)
except (AttributeError, re.error, TimeOutException):
e = True
except Exception as err:
raise err
# This sucks, but it's either that or list all the exceptions that could
# possibly be raised here, so if anyone wants to do that, feel free to send me
# a patch. XXX: Make sure you raise here if you're debugging the completion
# stuff !
e = True
else:
matches = self.completer.matches
if not e and self.argspec and isinstance(self.argspec, inspection.ArgSpec):
matches.extend(name + '=' for name in self.argspec[1][0]
if isinstance(name, basestring) and name.startswith(current_word))
if PY3:
matches.extend(name + '=' for name in self.argspec[1][4]
if name.startswith(current_word))
if e or not matches:
self.matches = []
self.matches_iter.update()
if not self.argspec:
return False
else:
# remove duplicates
self.matches = sorted(set(matches))
if len(self.matches) == 1 and not self.config.auto_display_list:
self.list_win_visible = True
self.tab()
return False
self.matches_iter.update(current_word, self.matches)
return True
def format_docstring(self, docstring, width, height):
"""Take a string and try to format it into a sane list of strings to be
put into the suggestion box."""
lines = docstring.split('\n')
out = []
i = 0
for line in lines:
i += 1
if not line.strip():
out.append('\n')
for block in textwrap.wrap(line, width):
out.append(' ' + block + '\n')
if i >= height:
return out
i += 1
# Drop the last newline
out[-1] = out[-1].rstrip()
return out
def next_indentation(self):
"""Return the indentation of the next line based on the current
input buffer."""
if self.buffer:
indentation = next_indentation(self.buffer[-1],
self.config.tab_length)
if indentation and self.config.dedent_after > 0:
line_is_empty = lambda line: not line.strip()
empty_lines = takewhile(line_is_empty, reversed(self.buffer))
if sum(1 for _ in empty_lines) >= self.config.dedent_after:
indentation -= 1
else:
indentation = 0
return indentation
def formatforfile(self, s):
"""Format the stdout buffer to something suitable for writing to disk,
i.e. without >>> and ... at input lines and with "# OUT: " prepended to
output lines."""
def process():
for line in s.split('\n'):
if line.startswith(self.ps1):
yield line[len(self.ps1):]
elif line.startswith(self.ps2):
yield line[len(self.ps2):]
elif line.rstrip():
yield "# OUT: %s" % (line,)
return "\n".join(process())
def write2file(self):
"""Prompt for a filename and write the current contents of the stdout
buffer to disk."""
try:
fn = self.interact.file_prompt('Save to file (Esc to cancel): ')
if not fn:
self.interact.notify("Save cancelled.")
return
except ValueError:
self.interact.notify("Save cancelled.")
return
if fn.startswith('~'):
fn = os.path.expanduser(fn)
if not fn.endswith('.py') and self.config.save_append_py:
fn += '.py'
mode = 'w'
if os.path.exists(fn):
mode = self.interact.file_prompt('%s already exists. Do you want '
'to (c)ancel, (o)verwrite or '
'(a)ppend? ' % (fn, ))
if mode in ('o', 'overwrite'):
mode = 'w'
elif mode in ('a', 'append'):
mode = 'a'
else:
self.interact.notify('Save cancelled.')
return
s = self.formatforfile(self.stdout)
try:
f = open(fn, mode)
f.write(s)
f.close()
except IOError:
self.interact.notify("Disk write error for file '%s'." % (fn, ))
else:
self.interact.notify('Saved to %s.' % (fn, ))
def push(self, s, insert_into_history=True):
"""Push a line of code onto the buffer so it can process it all
at once when a code block ends"""
if s.lstrip(' ') and s.lstrip(' ')[0] == '!':
self.buffer = []
return
s = s.rstrip('\n')
self.buffer.append(s)
if insert_into_history:
if self.config.hist_length:
histfilename = os.path.expanduser(self.config.hist_file)
oldhistory = self.rl_history.entries
self.rl_history.entries = []
if os.path.exists(histfilename):
self.rl_history.load(histfilename, getpreferredencoding())
self.rl_history.append(s)
try:
self.rl_history.save(histfilename, getpreferredencoding(), self.config.hist_length)
except EnvironmentError as e:
self.interact.notify("Error occured while writing to file %s (%s) " % (histfilename, e.strerror))
self.rl_history.entries = oldhistory
self.rl_history.append(s)
else:
self.rl_history.append(s)
if len(self.buffer) == 1:
line = self.buffer[0]
if self.interp.is_commandline(line) and not self.is_assignment_statement:
result = self.interp.runcommand(line)
self.buffer = []
return result
more = self.interp.runsource('\n'.join(self.buffer))
if not more:
self.buffer = []
return more
def undo(self, n=1):
"""Go back in the undo history n steps and call reeavluate()
Note that in the program this is called "Rewind" because I
want it to be clear that this is by no means a true undo
implementation, it is merely a convenience bonus."""
if not self.history:
return None
if len(self.history) < n:
n = len(self.history)
entries = list(self.rl_history.entries)
self.history.entries = self.history[:-n]
self.reevaluate()
self.rl_history.entries = entries
def flush(self):
"""Olivier Grisel brought it to my attention that the logging
module tries to call this method, since it makes assumptions
about stdout that may not necessarily be true. The docs for
sys.stdout say:
"stdout and stderr needn't be built-in file objects: any
object is acceptable as long as it has a write() method
that takes a string argument."
So I consider this to be a bug in logging, and this is a hack
to fix it, unfortunately. I'm sure it's not the only module
to do it."""
def close(self):
"""See the flush() method docstring."""
def next_indentation(line, tab_length):
"""Given a code line, return the indentation of the next line."""
line = line.expandtabs(tab_length)
indentation = (len(line) - len(line.lstrip(' '))) // tab_length
if line.rstrip().endswith(':'):
indentation += 1
elif indentation >= 1:
if line.lstrip().startswith(('return', 'pass', 'raise', 'yield')):
indentation -= 1
return indentation
def extract_exit_value(args):
"""Given the arguments passed to `SystemExit`, return the value that
should be passed to `sys.exit`.
"""
if len(args) == 0:
return None
elif len(args) == 1:
return args[0]
else:
return args
| |
"""Tets the functionality of the :mod:`jukeboxcore.reftrack` module"""
import pytest
import mock
from django.contrib.contenttypes.models import ContentType
from jukeboxcore.reftrack import Reftrack, RefobjInterface, ReftypeInterface, ReftrackRoot
from jukeboxcore import djadapter
from jukeboxcore.filesys import TaskFileInfo
from jukeboxcore.gui.treemodel import TreeItem, TreeModel, ListItemData
from jukeboxcore.gui.filesysitemdata import TaskFileInfoItemData
from jukeboxcore.errors import ReftrackIntegrityError
class Reference(object):
"""A dummy reference object that does nothing but just tells
if it is currently loaded or unloaded
"""
def __init__(self, loaded=True, content=None):
"""Initialize a new reference with the given status
:param loaded: True if loaded, false if unloaded
:type loaded: :class:`bool`
:param content: a list of refobjs
:type content: list | None
:raises: None
"""
if content is None:
content = []
self.content = content
for refobj in content:
if refobj.referencedby is None:
refobj.referencedby = self
self.loaded = loaded
if not loaded:
self.unload()
def load(self, ):
"""Set loaded to True and put the content back to Refobj.instances
:returns: None
:rtype: None
:raises: None
"""
for refobj in self.content:
Refobj.instances.append(refobj)
self.loaded = True
def unload(self, ):
"""Set loaded to False and remove the content from Refobj.instances
:returns: None
:rtype: None
:raises: None
"""
for refobj in self.content:
Refobj.instances.remove(refobj)
self.loaded = False
class Refobj(object):
"""An refobj for testing with a refobject interface
The :class:`RefobjInterface` is abstract and we need some kind of object for testing.
This refobj stores type, status, parent, a reference, the taskfile.
"""
instances = []
def __init__(self, typ, parent, reference, taskfile, referencedby, identifier=-1):
"""Initialize a new refobj
:param typ: the type of the entity
:type typ: str
:param parent: the parent refobj
:type parent: :class:`Refobj` | None
:param reference: the reference object
:type reference: :class:`Referencce`
:param taskfile: the taskfile that is loaded
:type taskfile: :class:`jukeboxcore.djadapter.models.TaskFile`
:param referencedby: The reference that holds this refobj.
:type referencedby: :class:`Reference` | None
:param identifier: a identifier for the gui
:type identifier: int
:rtype: None
:raises: None
"""
Refobj.instances.append(self)
self.typ = typ
self.parent = parent
self.deleted = False
self.children = []
if parent:
parent.children.append(self)
self.reference = reference
self.taskfile = taskfile
self.referencedby = referencedby
self.identifier = identifier
def get_status(self, ):
"""Return the status
:returns: the status, loaded, unloaded, imported
:rtype: str
:raises: None
"""
if not self.reference:
return Reftrack.IMPORTED
else:
if self.reference.loaded:
return Reftrack.LOADED
else:
return Reftrack.UNLOADED
class DummyRefobjInterface(RefobjInterface):
"""A implementation for the refobjinterface for testing
uses :class:`Refobj` as refobjects
"""
def __init__(self, current):
"""
:param current: the current shot or element that is open
:type current: Shot or Asset
:raises: None
"""
super(DummyRefobjInterface, self).__init__()
self.current = current
def exists(self, refobj):
"""Check if the given refobj is still in the scene
or if it has been deleted/dissapeared
:param refobj: a reference object to query
:type refobj: refobj
:returns: True, if it still exists
:rtype: :class:`bool`
:raises: None
"""
return not refobj.deleted
def get_parent(self, refobj):
"""Return the parent
:param refobj: the refobj to query
:type refobj: :class:`Refobj`
:returns: the parent
:rtype: :class:`Refobj`
:raises: None
"""
return refobj.parent
def set_parent(self, refobj, parent):
"""Set the parent
:param refobj: the refobj to edit
:type refobj: :class:`Refobj`
:param parent: the new parent
:type parent: :class:`Refobj`
:returns: None
:rtype: None
:raises: None
"""
if refobj.parent:
refobj.parent.children.remove(refobj)
if parent:
parent.children.append(refobj)
refobj.parent = parent
def get_children(self, refobj):
"""Return the children of the refobj
:param refobj: the refobj to query
:type refobj: :class:`Refobj`
:returns: the children
:rtype: list of refobjects
:raises: None
"""
return refobj.children
def get_typ(self, refobj):
"""Return the entity type of the given refobject
:param refobj: the refobj to query
:type refobj: refobj
:returns: the entity type
:rtype: str
:raises: None
"""
return refobj.typ
def set_typ(self, refobj, typ):
"""Set the type of the given refobj
:param refobj: the refobj to query
:type refobj: refobj
:param typ: the entity type
:type typ: str
:returns: None
:rtype: None
:raises: None
"""
refobj.typ = typ
def create_refobj(self, ):
"""Create and return a new refobj
:returns: the new refobj
:rtype: refobj
:raises: None
"""
return Refobj(None, None, None, None, False,)
def referenced_by(self, refobj):
"""Return the reference that holds the given refobj.
:param refobj: the refobj to query
:type refobj: refobj
:returns: The reference
:rtype: :class:`Reference` | None
:raises: None
"""
return refobj.referencedby
def delete_refobj(self, refobj):
"""Delete the given refobj
:param refobj: the refobj to delete
:type refobj: refobj
:returns: None
:rtype: None
:raises: None
"""
if refobj.parent:
refobj.parent.children.remove(refobj)
Refobj.instances.remove(refobj)
def get_all_refobjs(self, ):
"""Return all refobjs in the scene that are not referenced
:returns: all refobjs in the scene
:rtype: list
:raises: None
"""
return Refobj.instances
def get_current_element(self, ):
"""Return the currenty open Shot or Asset
:returns: the currently open element
:rtype: :class:`jukeboxcore.djadapter.models.Asset` | :class:`jukeboxcore.djadapter.models.Shot` | None
:raises: None
"""
return self.current
def set_reference(self, refobj, reference):
"""Set the reference of the given refobj to reference
:param refobj: the refobj to update
:type refobj: refobj
:param reference: the value for the refobj
:returns: None
:rtype: None
:raises: None
"""
refobj.reference = reference
def get_reference(self, refobj):
"""Return the reference that the refobj represents or None if it is imported.
:param refobj: the refobj to query
:type refobj: refobj
:returns: the reference object in the scene | None
:raises: None
"""
return refobj.reference
def get_status(self, refobj):
"""Return the status of the given refobj
:param refobj: the refobj to query
:type refobj: refobj
:returns: the status of the given refobj
:rtype: str
:raises: None
"""
return refobj.get_status()
def get_taskfile(self, refobj):
"""Return the taskfile that is loaded and represented by the refobj
:param refobj: the refobj to query
:type refobj: refobj
:returns: The taskfile that is loaded in the scene
:rtype: :class:`jukeboxcore.djadapter.TaskFile`
:raises: None
"""
return refobj.taskfile
def get_id(self, refobj):
"""Return the id of the reftrack
An id is a integer number that will be unique between
all reftracks of the same parent, element and type, that have a
refobject
:param refobj: the refobj to query
:type refobj: refobj
:returns: the identifier
:rtype: int
:raises: None
"""
return refobj.identifier
def set_id(self, refobj, identifier):
"""Set the identifier on the given refobj
:param refobj: the refobj to edit
:type refobj: refobj
:param identifier: the refobj id. Used to identify refobjects of the same parent, element and type in the UI
:type identifier: int
:returns: None
:rtype: None
:raises: None
"""
refobj.identifier = identifier
class AssetReftypeInterface(ReftypeInterface):
"""Implementation for the reftype interface for testing
for the type asset
"""
def __init__(self, refobjinter):
"""Initialize a new ReftypeInterface
:param refobjinter: the refobject interface
:type refobjinter: :class:`RefobjInterface`
:raises: None
"""
super(AssetReftypeInterface, self).__init__(refobjinter)
def reference(self, refobj, taskfileinfo):
"""Reference the given taskfileinfo into the scene and return the created reference object
:param refobj: the refobj
:type refobj: :class:`Refobj`
:param taskfileinfo: The taskfileinfo that holds the information for what to reference
:type taskfileinfo: :class:`jukeboxcore.filesys.TaskFileInfo`
:returns: the reference that was created and should set on the appropriate refobj
:raises: NotImplementedError
"""
refobj.taskfile = djadapter.taskfiles.get(task=taskfileinfo.task,
version=taskfileinfo.version,
releasetype=taskfileinfo.releasetype,
descriptor=taskfileinfo.descriptor,
typ=taskfileinfo.typ)
# also create a new refobj. This acts like the content of the reference
# contains another refobject
ref = Reference()
refobj1 = Refobj("Asset", None, None, refobj.taskfile, ref)
ref.content.append(refobj1)
return ref
def load(self, refobj, reference):
"""Load the given reference
:param refobj: the refobj
:type refobj: :class:`Refobj`
:param reference: the reference object. E.g. in Maya a reference node
:returns: None
:rtype: None
:raises: NotImplementedError
"""
reference.load()
def unload(self, refobj, reference):
"""Unload the given reference
:param refobj: the refobj
:type refobj: :class:`Refobj`
:param reference: the reference object. E.g. in Maya a reference node
:returns: None
:rtype: None
:raises: NotImplementedError
"""
reference.unload()
def replace(self, refobj, reference, taskfileinfo):
"""Replace the given reference with the given taskfileinfo
:param refobj: the refobj
:type refobj: :class:`Refobj`
:param reference: the reference object. E.g. in Maya a reference node
:param taskfileinfo: the taskfileinfo that will replace the old entity
:type taskfileinfo: :class:`jukeboxcore.filesys.TaskFileInfo`
:returns: None
:rtype: None
:raises: NotImplementedError
"""
refobj.taskfile = djadapter.taskfiles.get(task=taskfileinfo.task,
version=taskfileinfo.version,
releasetype=taskfileinfo.releasetype,
descriptor=taskfileinfo.descriptor,
typ=taskfileinfo.typ)
for r in refobj.reference.content:
self.get_refobjinter().delete(r)
robj1 = Refobj("Asset", None, None, refobj.taskfile, refobj.reference)
refobj.reference.content.append(robj1)
def delete(self, refobj):
"""Delete the content of the given refobj
:param refobj: the refobj that represents the content that should be deleted
:type refobj: refobj
:returns: None
:rtype: None
:raises: NotImplementedError
"""
refobj.deleted = True
if refobj.reference:
assert refobj not in refobj.reference.content
for r in refobj.reference.content:
self.get_refobjinter().delete(r)
def import_reference(self, refobj, reference):
"""Import the given reference
:param refobj: the refobj
:type refobj: :class:`Refobj`
:param reference: the reference object. E.g. in Maya a reference node
:returns: None
:rtype: None
:raises: NotImplementedError
"""
for r in reference.content:
r.referencedby = None
reference.content = []
refobj.reference = None
def import_taskfile(self, refobj, taskfileinfo):
"""Import the given taskfileinfo and update the refobj
:param refobj: the refobject
:type refobj: refobject
:param taskfileinfo: the taskfileinfo to reference
:type taskfileinfo: :class:`jukeboxcore.filesys.TaskFileInfo`
:returns: None
:rtype: None
:raises: NotImplementedError
"""
refobj.taskfile = djadapter.taskfiles.get(task=taskfileinfo.task,
version=taskfileinfo.version,
releasetype=taskfileinfo.releasetype,
descriptor=taskfileinfo.descriptor,
typ=taskfileinfo.typ)
Refobj("Asset", None, None, refobj.taskfile, None)
def is_replaceable(self, refobj):
"""Return whether the given reference of the refobject is replaceable or
if it should just get deleted and loaded again.
:param refobj: the refobject to query
:type refobj: refobj
:returns: True, if replaceable
:rtype: bool
:raises: NotImplementedError
"""
return True
def fetch_option_taskfileinfos(self, element):
"""Fetch the options for possible files to load, replace etc for the given element.
:param element: The element for which the options should be fetched.
:type element: :class:`jukeboxcore.djadapter.models.Asset` | :class:`jukeboxcore.djadapter.models.Shot`
:returns: The options
:rtype: list of :class:`TaskFileInfo`
:raises: NotImplementedError
"""
tfs = djadapter.taskfiles.filter(task__content_type=ContentType.objects.get_for_model(element),
task__object_id=element.pk,
typ=djadapter.FILETYPES['mayamainscene'],
releasetype=djadapter.RELEASETYPES['release'])
l = []
for tf in tfs:
tfi = TaskFileInfo(task=tf.task, version=tf.version, releasetype=tf.releasetype, descriptor=tf.descriptor, typ=tf.typ)
l.append(tfi)
return l
def create_options_model(self, taskfileinfos):
"""Create a new treemodel that has the taskfileinfos as internal_data of the leaves.
:returns: the option model with :class:`TaskFileInfo` as internal_data of the leaves.
:rtype: :class:`jukeboxcore.gui.treemodel.TreeModel`
:raises: NotImplementedError
"""
rootdata = ListItemData(["Asset/Shot", "Task", "Descriptor", "Version", "Releasetype"])
rootitem = TreeItem(rootdata)
for tfi in taskfileinfos:
tfidata = TaskFileInfoItemData(tfi)
TreeItem(tfidata, parent=rootitem)
return TreeModel(rootitem)
def get_suggestions(self, reftrack):
"""Return a list with possible children for this reftrack
:param reftrack: the reftrack which needs suggestions
:type reftrack: :class:`Reftrack`
:returns: list of suggestions, tuples of type and element.
:rtype: list
:raises: NotImplementedError
"""
element = reftrack.get_element()
elements = list(element.assets.all())
elements.append(element)
typ = reftrack.get_typ()
return [(typ, e) for e in elements]
def get_scene_suggestions(self, current):
"""Return a list with elements for reftracks for the current scene with this type.
:param reftrack: the reftrack which needs suggestions
:type reftrack: :class:`Reftrack`
:returns: list of suggestions, tuples of type and element.
:rtype: list
:raises: None
"""
return [current]
RefobjInterface.register_type('Asset', AssetReftypeInterface)
@pytest.fixture(scope='function', autouse=True)
def refobjclass(request):
def fin():
Refobj.instances = []
request.addfinalizer(fin)
@pytest.fixture(scope='function')
def reftrackroot():
return ReftrackRoot()
@pytest.fixture(scope='function')
def refobjinter(djprj):
current = djprj.shots[0]
return DummyRefobjInterface(current)
def test_wrap(djprj, reftrackroot, refobjinter):
l = []
for tf in djprj.assettaskfiles[:6]:
refobj = Refobj('Asset', None, None, tf, False)
l.append(refobj)
l[0].parent = l[1]
l[2].parent = l[1]
l[3].parent = l[2]
l[1].parent = l[4]
tracks = Reftrack.wrap(reftrackroot, refobjinter, l)
assert tracks[0].get_parent() is tracks[1]
assert tracks[1].get_parent() is tracks[4]
assert tracks[2].get_parent() is tracks[1]
assert tracks[3].get_parent() is tracks[2]
assert tracks[4].get_parent() is None
for t in tracks:
assert t.get_typ() == 'Asset'
assert t is reftrackroot.get_reftrack(t.get_refobj())
assert t.status() == Reftrack.IMPORTED
# assert if suggestions have been created
suggestions = t.get_suggestions()
for c in t._children:
for i, (typ, element) in enumerate(suggestions):
if c.get_typ() == typ and c.get_element() == element:
del suggestions[i]
break
assert suggestions == [],\
"Not all suggestions were created after wrapping. Suggestions missing %s" % suggestions
def test_wrap_scene(djprj, reftrackroot, refobjinter):
tf = djprj.assettaskfiles[0]
Refobj('Asset', None, None, tf, False)
Refobj('Asset', None, None, tf, False)
Refobj('Asset', None, None, tf, False)
tracks = Reftrack.wrap_scene(reftrackroot, refobjinter)
assert len(tracks) == 4
for t in tracks:
if t.get_refobj() is None:
assert t.get_element() == djprj.shots[0]
else:
assert t.get_element() == tf.task.element
def test_reftrackinit_raise_error(djprj, reftrackroot, refobjinter):
with pytest.raises(TypeError):
Reftrack(reftrackroot, refobjinter, typ='Asset')
with pytest.raises(TypeError):
Reftrack(reftrackroot, refobjinter, element=djprj.assets[0])
with pytest.raises(TypeError):
refobj = Refobj('Asset', None, None, djprj.assettaskfiles[0], None)
Reftrack(reftrackroot, refobjinter, refobj=refobj, typ='Asset', element=djprj.assets[0])
with pytest.raises(ValueError):
Reftrack(reftrackroot, refobjinter, typ='Shader', element=djprj.assets[0])
def test_create_empty(djprj, reftrackroot, refobjinter):
r1 = Reftrack(reftrackroot, refobjinter, typ='Asset', element=djprj.assets[0])
r2 = Reftrack(reftrackroot, refobjinter, typ='Asset', element=djprj.assets[1], parent=r1)
assert r1 in reftrackroot._reftracks
assert r2 in reftrackroot._reftracks
assert r2.get_treeitem().parent() is r1.get_treeitem()
assert r2.get_parent() is r1
assert r1.status() is None
assert r2.status() is None
assert not r1.alien()
assert r2.alien()
assert r1.get_typ() == 'Asset'
assert r2.get_typ() == 'Asset'
def test_alien(djprj, reftrackroot):
current = djprj.assets[0]
refobjinter = DummyRefobjInterface(current)
r1 = Reftrack(reftrackroot, refobjinter, typ='Asset', element=djprj.assets[0])
r2 = Reftrack(reftrackroot, refobjinter, typ='Asset', element=djprj.assets[1])
r3 = Reftrack(reftrackroot, refobjinter, typ='Asset', element=djprj.assets[1], parent=r2)
assert not r1.alien()
assert r2.alien()
assert not r3.alien()
refobjinter = DummyRefobjInterface(None)
r4 = Reftrack(reftrackroot, refobjinter, typ='Asset', element=djprj.assets[0])
r5 = Reftrack(reftrackroot, refobjinter, typ='Asset', element=djprj.assets[1])
r6 = Reftrack(reftrackroot, refobjinter, typ='Asset', element=djprj.assets[1], parent=r5)
assert r4.alien()
assert r5.alien()
assert not r6.alien()
@mock.patch.object(AssetReftypeInterface, "get_suggestions")
def test_delete(mock_suggestions, djprj, reftrackroot):
mock_suggestions.return_value = []
current = djprj.assets[0]
refobjinter = DummyRefobjInterface(current)
ref0 = Reference()
ref1 = Reference()
ref2 = Reference()
ref4 = Reference()
robj0 = Refobj('Asset', None, ref0, djprj.assettaskfiles[-1], None)
robj1 = Refobj('Asset', robj0, ref1, djprj.assettaskfiles[0], ref0)
robj2 = Refobj('Asset', robj1, ref2, djprj.assettaskfiles[0], ref1)
robj3 = Refobj('Asset', robj2, None, djprj.assettaskfiles[0], None)
robj4 = Refobj('Asset', robj0, ref4, djprj.assettaskfiles[0], None)
robj5 = Refobj('Asset', robj4, None, djprj.assettaskfiles[0], ref0)
robj6 = Refobj('Asset', robj4, None, djprj.assettaskfiles[0], None)
tracks = Reftrack.wrap(reftrackroot, refobjinter, [robj0, robj1, robj2, robj3, robj4, robj5, robj6])
assert tracks[0].get_all_children() == [tracks[1], tracks[4], tracks[2], tracks[5], tracks[6], tracks[3]]
assert tracks[6].get_all_children() == []
assert tracks[4].get_all_children() == [tracks[5], tracks[6]]
assert tracks[2].get_children_to_delete() == [tracks[3]]
assert tracks[0].get_children_to_delete() == [tracks[4], tracks[6], tracks[3]]
assert tracks[4].get_children_to_delete() == [tracks[5], tracks[6]]
tracks[2].delete()
assert tracks[2]._children == []
assert tracks[2].get_refobj() is None
assert tracks[2] in reftrackroot._reftracks
assert tracks[3].get_refobj() is None
assert tracks[3] not in reftrackroot._reftracks
assert tracks[3].get_parent() is None
assert robj3.deleted
assert tracks[0].alien()
tracks[0].delete()
assert robj4.deleted
assert robj0.deleted
for i, t in enumerate(tracks):
assert t not in reftrackroot._reftracks
def test_duplicate(djprj, reftrackroot, refobjinter):
ref = Reference()
robj0 = Refobj('Asset', None, ref, djprj.assettaskfiles[0], None)
robj1 = Refobj('Asset', robj0, None, djprj.assettaskfiles[0], None)
tracks = Reftrack.wrap(reftrackroot, refobjinter, [robj0, robj1])
tracks.append(Reftrack(root=reftrackroot,
refobjinter=refobjinter,
typ='Asset',
element=djprj.assettaskfiles[1],
parent=tracks[1]))
for t in tracks:
d = t.duplicate()
assert d.get_root() is t.get_root()
assert d.get_refobjinter() is t.get_refobjinter()
assert d.get_typ() is t.get_typ()
assert d.get_element() is t.get_element()
assert d.get_parent() is t.get_parent()
assert d.status() is None
assert d.get_treeitem().parent() is t.get_treeitem().parent()
def test_throw_children_away(djprj, reftrackroot, refobjinter):
robj0 = Refobj('Asset', None, None, djprj.assettaskfiles[0], None)
robj1 = Refobj('Asset', robj0, None, djprj.assettaskfiles[0], None)
robj2 = Refobj('Asset', robj1, None, djprj.assettaskfiles[0], None)
robj3 = Refobj('Asset', None, None, djprj.assettaskfiles[0], None)
tracks = Reftrack.wrap(reftrackroot, refobjinter, [robj0, robj1, robj2, robj3])
tracks[0].throw_children_away()
assert tracks[0]._children == []
assert tracks[1].get_parent() is None
assert tracks[1].get_treeitem().parent() is None
assert tracks[1].get_treeitem()._model is None
assert tracks[0].get_treeitem().child_count() == 0
assert tracks[1] not in reftrackroot._reftracks
assert tracks[2] not in reftrackroot._reftracks
assert tracks[3] in reftrackroot._reftracks
assert refobjinter.exists(tracks[1].get_refobj())
assert refobjinter.exists(tracks[2].get_refobj())
def test_fetch_new_children(djprj, reftrackroot, refobjinter):
robj0 = Refobj('Asset', None, None, djprj.assettaskfiles[0], None)
t0 = Reftrack.wrap(reftrackroot, refobjinter, [robj0])[0]
robj1 = Refobj('Asset', None, None, djprj.assettaskfiles[0], None)
t1 = Reftrack.wrap(reftrackroot, refobjinter, [robj1])[0]
t0.throw_children_away()
assert t0 in reftrackroot._reftracks
assert t1 in reftrackroot._reftracks
assert t0.get_refobj() in reftrackroot._parentsearchdict
assert t1.get_refobj() in reftrackroot._parentsearchdict
robj2 = Refobj('Asset', robj0, None, djprj.assettaskfiles[0], None)
robj3 = Refobj('Asset', robj2, None, djprj.assettaskfiles[0], None)
t2, t3 = Reftrack.wrap(reftrackroot, refobjinter, [robj2, robj3])
t0.throw_children_away()
for t in (t2, t3):
assert t not in reftrackroot._reftracks
assert t.get_refobj() not in reftrackroot._parentsearchdict
assert refobjinter.exists(t.get_refobj())
assert t2.get_parent() is None
assert t2.get_treeitem().parent() is None
# try wrapping them again
Reftrack.wrap(reftrackroot, refobjinter, [robj2, robj3])
def test_create_refobject(djprj, reftrackroot, refobjinter):
robj0 = Refobj('Asset', None, None, djprj.assettaskfiles[0], None)
t0 = Reftrack.wrap(reftrackroot, refobjinter, [robj0])[0]
t1 = Reftrack(reftrackroot, refobjinter, typ='Asset', element=djprj.assets[0], parent=t0)
robj1 = t1.create_refobject()
assert robj1.parent is robj0
assert robj1.typ == 'Asset'
@mock.patch.object(AssetReftypeInterface, "get_suggestions")
def test_reference(mock_suggestions, djprj, reftrackroot, refobjinter):
mock_suggestions.return_value = []
t0 = Reftrack(reftrackroot, refobjinter, typ='Asset', element=djprj.assets[0], parent=None)
assert reftrackroot._reftracks == set([t0])
t0.reference(TaskFileInfo.create_from_taskfile(djprj.assettaskfiles[0]))
assert t0 in reftrackroot._reftracks
assert len(t0._children) == 1
t1 = t0._children[0]
assert t1.get_parent() is t0
robj0 = t0.get_refobj()
robj1 = t1.get_refobj()
assert robj0.taskfile == djprj.assettaskfiles[0]
assert robj1.parent is robj0
assert robj0.parent is None
assert robj0.typ == 'Asset'
assert robj0.get_status() == Reftrack.LOADED
assert t0.status() == Reftrack.LOADED
tfi = t0.get_taskfileinfo()
reftfi = TaskFileInfo.create_from_taskfile(djprj.assettaskfiles[0])
assert tfi.version == reftfi.version
assert tfi.task == reftfi.task
assert tfi.releasetype == reftfi.releasetype
assert tfi.descriptor == reftfi.descriptor
t2 = Reftrack(reftrackroot, refobjinter, typ='Asset', element=djprj.assets[0], parent=t0)
t2.reference(TaskFileInfo.create_from_taskfile(djprj.assettaskfiles[0]))
t3 = t2._children[0]
assert len(t2._children) == 1
assert t2.get_parent() is t0
robj2 = t2.get_refobj()
robj3 = t3.get_refobj()
assert robj2.taskfile == djprj.assettaskfiles[0]
assert robj2.parent is robj0
assert robj3.parent is robj2
assert robj2.typ == 'Asset'
assert robj2.get_status() == Reftrack.LOADED
assert t2.status() == Reftrack.LOADED
tfi = t2.get_taskfileinfo()
reftfi = TaskFileInfo.create_from_taskfile(djprj.assettaskfiles[0])
assert tfi.version == reftfi.version
assert tfi.task == reftfi.task
assert tfi.releasetype == reftfi.releasetype
assert tfi.descriptor == reftfi.descriptor
@mock.patch.object(AssetReftypeInterface, "get_suggestions")
def test_load(mock_suggestions, djprj, reftrackroot, refobjinter):
mock_suggestions.return_value = []
ref2 = Reference(True)
robj2 = Refobj('Asset', None, ref2, djprj.assettaskfiles[0], None)
robj0 = Refobj('Asset', robj2, None, djprj.assettaskfiles[0], ref2)
robj1 = Refobj('Asset', robj0, None, djprj.assettaskfiles[0], ref2)
ref2.content.append(robj0)
ref2.content.append(robj1)
ref2.unload()
t2 = Reftrack.wrap(reftrackroot, refobjinter, [robj2])[0]
assert t2.status() == Reftrack.UNLOADED
for r in [robj0, robj1]:
assert r not in Refobj.instances
assert t2._children == []
t2.load()
t0 = t2._children[0]
t1 = t0._children[0]
assert t0.get_refobj() is robj0
assert t1.get_refobj() is robj1
assert t2.status() == Reftrack.LOADED
@mock.patch.object(AssetReftypeInterface, "get_suggestions")
def test_unload(mock_suggestions, djprj, reftrackroot, refobjinter):
mock_suggestions.return_value = []
ref0 = Reference(True)
robj0 = Refobj('Asset', None, ref0, djprj.assettaskfiles[0], None)
robj1 = Refobj('Asset', robj0, None, djprj.assettaskfiles[0], ref0)
robj2 = Refobj('Asset', robj1, None, djprj.assettaskfiles[0], ref0)
ref0.content.append(robj1)
ref0.content.append(robj2)
t0, t1, t2 = Reftrack.wrap(reftrackroot, refobjinter, [robj0, robj1, robj2])
assert t0._children
t0.unload()
assert t0.status() == Reftrack.UNLOADED
assert robj0.get_status() == Reftrack.UNLOADED
assert t0._children == []
assert t1.get_parent() is None
assert t1.get_treeitem().parent() is None
ref3 = Reference(True)
robj3 = Refobj('Asset', None, ref3, djprj.assettaskfiles[0], None)
robj4 = Refobj('Asset', robj3, None, djprj.assettaskfiles[0], None)
t3, t4 = Reftrack.wrap(reftrackroot, refobjinter, [robj3, robj4])
with pytest.raises(ReftrackIntegrityError):
t3.unload()
def test_import_reference(djprj, reftrackroot, refobjinter):
ref0 = Reference(True)
robj0 = Refobj('Asset', None, ref0, djprj.assettaskfiles[0], None)
robj1 = Refobj('Asset', robj0, None, djprj.assettaskfiles[0], ref0)
robj2 = Refobj('Asset', robj1, None, djprj.assettaskfiles[0], ref0)
ref0.content.append(robj1)
ref0.content.append(robj2)
t0, t1, t2 = Reftrack.wrap(reftrackroot, refobjinter, [robj0, robj1, robj2])
assert t0.status() == Reftrack.LOADED
t0.import_reference()
assert t0.status() == Reftrack.IMPORTED
for r in (robj0, robj1, robj2):
assert r.get_status() == Reftrack.IMPORTED
@mock.patch.object(AssetReftypeInterface, "get_suggestions")
def test_import_taskfile(mock_suggestions, djprj, reftrackroot, refobjinter):
mock_suggestions.return_value = []
t0 = Reftrack(reftrackroot, refobjinter, typ='Asset', element=djprj.assets[0])
assert t0._children == []
assert t0.get_refobj() is None
t0.import_file(TaskFileInfo.create_from_taskfile(djprj.assettaskfiles[0]))
assert len(t0._children) == 1
t1 = t0._children[0]
robj0 = t0.get_refobj()
robj1 = t1.get_refobj()
assert robj0.typ == 'Asset'
assert robj0.parent is None
assert robj0.children == [robj1]
assert robj0.taskfile == djprj.assettaskfiles[0]
assert robj1.parent is robj0
assert t0.status() == Reftrack.IMPORTED
tfi = t0.get_taskfileinfo()
reftfi = TaskFileInfo.create_from_taskfile(djprj.assettaskfiles[0])
assert tfi.version == reftfi.version
assert tfi.task == reftfi.task
assert tfi.releasetype == reftfi.releasetype
assert tfi.descriptor == reftfi.descriptor
@mock.patch.object(AssetReftypeInterface, "is_replaceable")
@mock.patch.object(AssetReftypeInterface, "get_suggestions")
def test_replace_notreplaceable_reference(mock_suggestions, mock_replaceable, djprj, reftrackroot, refobjinter):
mock_suggestions.return_value = []
mock_replaceable.return_value = False
ref0 = Reference(True)
robj0 = Refobj('Asset', None, ref0, djprj.assettaskfiles[-4], None)
robj1 = Refobj('Asset', robj0, None, djprj.assettaskfiles[0], ref0)
robj2 = Refobj('Asset', robj1, None, djprj.assettaskfiles[0], ref0)
print robj0, robj1, robj2
ref0.content.append(robj1)
ref0.content.append(robj2)
t0, t1, t2 = Reftrack.wrap(reftrackroot, refobjinter, [robj0, robj1, robj2])
assert not t0.uptodate()
assert t0.get_refobjinter().is_replaceable(t0.get_refobj()) is False
assert t0.alien()
t0.replace(TaskFileInfo.create_from_taskfile(djprj.assettaskfiles[2]))
assert t0 in reftrackroot._reftracks
assert t0.status() == Reftrack.LOADED
assert len(t0._children) == 1
assert t0.get_refobj().taskfile == djprj.assettaskfiles[2]
tfi = t0.get_taskfileinfo()
reftfi = TaskFileInfo.create_from_taskfile(djprj.assettaskfiles[2])
assert tfi.version == reftfi.version
assert tfi.task == reftfi.task
assert tfi.releasetype == reftfi.releasetype
assert tfi.descriptor == reftfi.descriptor
t4 = t0._children[0]
assert t4.get_refobj().parent is t0.get_refobj()
@mock.patch.object(AssetReftypeInterface, "is_replaceable")
@mock.patch.object(AssetReftypeInterface, "get_suggestions")
def test_replace_notreplaceable_import(mock_suggestions, mock_replaceable, djprj, reftrackroot, refobjinter):
mock_suggestions.return_value = []
mock_replaceable.return_value = False
robj0 = Refobj('Asset', None, None, djprj.assettaskfiles[-4], None)
robj1 = Refobj('Asset', robj0, None, djprj.assettaskfiles[0], None)
robj2 = Refobj('Asset', robj1, None, djprj.assettaskfiles[0], None)
t0, t1, t2 = Reftrack.wrap(reftrackroot, refobjinter, [robj0, robj1, robj2])
assert not t0.uptodate()
assert t0.get_refobjinter().is_replaceable(t0.get_refobj()) is False
assert t0.alien()
t0.replace(TaskFileInfo.create_from_taskfile(djprj.assettaskfiles[2]))
assert t0 in reftrackroot._reftracks
assert t0.status() == Reftrack.IMPORTED
assert len(t0._children) == 1
assert t0.get_refobj().taskfile == djprj.assettaskfiles[2]
tfi = t0.get_taskfileinfo()
reftfi = TaskFileInfo.create_from_taskfile(djprj.assettaskfiles[2])
assert tfi.version == reftfi.version
assert tfi.task == reftfi.task
assert tfi.releasetype == reftfi.releasetype
assert tfi.descriptor == reftfi.descriptor
t4 = t0._children[0]
assert t4.get_refobj().parent is t0.get_refobj()
@mock.patch.object(AssetReftypeInterface, "get_suggestions")
def test_replace_replaceable(mock_suggestions, djprj, reftrackroot, refobjinter):
mock_suggestions.return_value = []
ref0 = Reference()
robj0 = Refobj('Asset', None, ref0, djprj.assettaskfiles[-4], None)
robj1 = Refobj('Asset', robj0, None, djprj.assettaskfiles[0], ref0)
robj2 = Refobj('Asset', robj1, None, djprj.assettaskfiles[0], None)
ref0.content.append(robj1)
t0, t1, t2 = Reftrack.wrap(reftrackroot, refobjinter, [robj0, robj1, robj2])
assert not t0.uptodate()
t0.replace(TaskFileInfo.create_from_taskfile(djprj.assettaskfiles[2]))
assert t0.get_refobjinter().is_replaceable(t0.get_refobj()) is True
assert t0.alien()
assert t0.status() == Reftrack.LOADED
assert len(t0._children) == 1
assert t0.get_refobj().taskfile == djprj.assettaskfiles[2]
tfi = t0.get_taskfileinfo()
reftfi = TaskFileInfo.create_from_taskfile(djprj.assettaskfiles[2])
assert tfi.version == reftfi.version
assert tfi.task == reftfi.task
assert tfi.releasetype == reftfi.releasetype
assert tfi.descriptor == reftfi.descriptor
t4 = t0._children[0]
assert t4.get_refobj().parent is t0.get_refobj()
def test_get_scene_suggestions(djprj, reftrackroot, refobjinter):
r1 = Reftrack(reftrackroot, refobjinter, typ='Asset', element=djprj.assets[0])
Reftrack(reftrackroot, refobjinter, typ='Asset', element=djprj.shots[0], parent=r1)
sugs = reftrackroot.get_scene_suggestions(refobjinter)
assert len(sugs) == 1
assert sugs[0] == ('Asset', djprj.shots[0])
Reftrack(reftrackroot, refobjinter, typ='Asset', element=djprj.shots[0])
sugs = reftrackroot.get_scene_suggestions(refobjinter)
assert sugs == []
def test_restricted(djprj, reftrackroot, refobjinter):
r1 = Reftrack(reftrackroot, refobjinter, typ='Asset', element=djprj.assets[0],)
assert not r1.is_restricted(r1.reference)
r1.set_restricted(r1.reference, True)
assert r1.is_restricted(r1.reference)
| |
import os
import re
import cx_Oracle
import collections
import datetime
earDiagnosisCode = [6100,6200,6201,6202,6204,6205,6207,6209,6210,6211,6260]
#Primary query, Look for all decisions where a claim has been processed already. Much of the filtering is based on the engineering notebook, the only add on is the prev_evaltn_ind which is a poor flag mechanism in Ratings.
#Because of historical copies being represented across all rating profiles grouping is required to clean up the data.
#Organize them based first by participant id, then profile date, disability id, begin date, then code and percent.
SQL="select rd.ptcpnt_vet_id, rd.prfil_dt, rd.begin_dt, rd.end_dt, rd.prmlgn_dt, rd.dgnstc_txt, rd.dsblty_id, rd.diagnosis_code, rd.hypntd_dgnstc_type_cd, rd.prcnt_nbr \
from COMBO_RATING_DECISION rd \
inner join V_EAR_AGGREGATE_CONTENTION ac on ac.vet_id = rd.ptcpnt_vet_id \
where rd.begin_dt IS NOT NULL and rd.begin_dt < rd.prmlgn_dt and (rd.end_dt is NULL or rd.end_dt >= rd.prmlgn_dt) and rd.system_type_cd = 'C' and rd.dsblty_decn_type_cd in ('SVCCONNCTED','1151GRANTED') and (rd.prev_evaltn_ind IS NULL OR rd.prev_evaltn_ind = 'N') \
group by rd.begin_dt, rd.end_dt, rd.prmlgn_dt, rd.dgnstc_txt, rd.dsblty_id, rd.diagnosis_code, rd.hypntd_dgnstc_type_cd, rd.prcnt_nbr, rd.ptcpnt_vet_id, rd.prfil_dt \
order by ptcpnt_vet_id,prfil_dt,dsblty_id,begin_dt,diagnosis_code,prcnt_nbr"
class DecisionPercentage:
def __init__(self,code,percentage):
self.code = code
self.percentage = percentage
class AggregateDecision:
def __init__(self):
self.VET_ID = None
self.PROFILE_DATE = None
self.PROMULGATION_DATE = None
self.RECENT_EAR_DATE = None
self.CDD = 0
self.EAR_CDD = None
self.A6100 = 0
self.A6200 = 0
self.A6201 = 0
self.A6202 = 0
self.A6204 = 0
self.A6205 = 0
self.A6207 = 0
self.A6209 = 0
self.A6210 = 0
self.A6211 = 0
self.A6260 = 0
self.TXT_LOSS = 0
self.TXT_TINITU = 0
def __str__(self):
from pprint import pprint
return str(vars(self))
class Decision:
def __init__(self, ptcpnt_vet_id, prfil_dt, begin_dt, end_dt, prmlgn_dt, dgnstc_txt, dsblty_id, diagnosis_code, hypntd_dgnstc_type_cd, prcnt_nbr):
self.ptcpnt_vet_id = ptcpnt_vet_id
self.prfil_dt = prfil_dt
self.begin_dt = begin_dt
self.end_dt = end_dt
self.prmlgn_dt = prmlgn_dt
if dgnstc_txt is None:
self.dgnstc_txt = ''
else:
self.dgnstc_txt = dgnstc_txt
self.dsblty_id = dsblty_id
if diagnosis_code == 'Unknown':
self.diagnosis_code = -99
else:
self.diagnosis_code = int(diagnosis_code)
self.hypntd_dgnstc_type_cd = hypntd_dgnstc_type_cd
if prcnt_nbr is None:
print(ptcpnt_vet_id)
self.prcnt_nbr = 0
else:
self.prcnt_nbr = int(prcnt_nbr)
def __str__(self):
from pprint import pprint
return str(vars(self))
print(str(datetime.datetime.now()))
connection = cx_Oracle.connect('developer/D3vVV0Rd@127.0.0.1:1521/DEV.BCDSS')
cursor = connection.cursor()
cursor.execute(SQL)
writeCursor = connection.cursor()
writeCursor.prepare('INSERT INTO DEVELOPER.V_EAR_AGGREGATE_DECISION (VET_ID, PROFILE_DATE, PROMULGATION_DATE, RECENT_EAR_DATE, CDD, EAR_CDD, A6100, A6200,A6201,A6202,A6204,A6205,A6207,A6209,A6210,A6211,A6260,TXT_LOSS,TXT_TINITU) \
VALUES (:VET_ID, :PROFILE_DATE, :PROMULGATION_DATE, :RECENT_EAR_DATE, :CDD, :EAR_CDD, \
:A6100, :A6200, :A6201, :A6202, :A6204, :A6205, :A6207, :A6209, :A6210, :A6211, :A6260, \
:TXT_LOSS, :TXT_TINITU)')
aggregateDecision = None
currRatingProfile = -1
currParticipant = -1
counter = 0
hasMultipleDisabilityCodes = collections.Counter()
recentEarBeginDate = collections.Counter()
totalCDD = 1
totalEarCDD = 1
hasEarCDD = False
for row in cursor:
if counter == 1000: #Commit every 1000 records. Improvement would be to look into aggregate inserts
connection.commit()
counter=0
decision = Decision(row[0],row[1],row[2],row[3],row[4],row[5],row[6],row[7],row[8],row[9]) #Map loose fields into a Contention object. (Contention is a convenience object)
if currParticipant != decision.ptcpnt_vet_id or currRatingProfile != decision.prfil_dt: #Process insert statement and reset aggregation variables when profile date changes
if currRatingProfile != -1: #Skip if first time through
#Calculate the CDD values
for disabilityPercentage in multipleDisabilityCodes.values():
totalCDD *= (1 - (disabilityPercentage.percentage / 100))
if disabilityPercentage.code in earDiagnosisCode:
hasEarCDD = True
totalEarCDD *= (1 - (disabilityPercentage.percentage / 100))
aggregateDecision.CDD = 100 * (1 - totalCDD)
if hasEarCDD:
aggregateDecision.EAR_CDD = 100 * (1 - totalEarCDD)
else:
aggregateDecision.EAR_CDD = None
if recentEarBeginDate[currRatingProfile] == 0: #Oracle will use the number 0 to indicate it has not been set, empty values list does not appear to work
aggregateDecision.RECENT_EAR_DATE = None
else:
aggregateDecision.RECENT_EAR_DATE = recentEarBeginDate[currRatingProfile]
writeCursor.execute(None, {'VET_ID' :aggregateDecision.VET_ID, 'PROFILE_DATE' :aggregateDecision.PROFILE_DATE, 'PROMULGATION_DATE' :aggregateDecision.PROMULGATION_DATE, 'RECENT_EAR_DATE' :aggregateDecision.RECENT_EAR_DATE, 'CDD' :aggregateDecision.CDD, 'EAR_CDD' :aggregateDecision.EAR_CDD,
'A6100' :aggregateDecision.A6100, 'A6200' :aggregateDecision.A6200, 'A6201' :aggregateDecision.A6201, 'A6202' :aggregateDecision.A6202, 'A6204' :aggregateDecision.A6204, 'A6205' :aggregateDecision.A6205, 'A6207' :aggregateDecision.A6207, 'A6209' :aggregateDecision.A6209, 'A6210' :aggregateDecision.A6210, 'A6211' :aggregateDecision.A6211, 'A6260' :aggregateDecision.A6260,
'TXT_LOSS' :aggregateDecision.TXT_LOSS, 'TXT_TINITU' :aggregateDecision.TXT_TINITU})
counter += 1
#Reset the counters
totalCDD = 1
totalEarCDD = 1
hasEarCDD = False
currRatingProfile = decision.prfil_dt
currParticipant = decision.ptcpnt_vet_id
multipleDisabilityCodes = collections.Counter()
recentEarBeginDate = collections.Counter()
#Capture all rating profile level items that do not change per contention
aggregateDecision = AggregateDecision()
aggregateDecision.VET_ID = decision.ptcpnt_vet_id
aggregateDecision.PROFILE_DATE = currRatingProfile
aggregateDecision.PROMULGATION_DATE = decision.prmlgn_dt
#Since we are ordering by disability id then begin_dt we choose the most recent percent number
multipleDisabilityCodes[decision.dsblty_id] = DecisionPercentage(decision.diagnosis_code, decision.prcnt_nbr)
if decision.diagnosis_code in earDiagnosisCode: #Is the diagnosis an ear?
if recentEarBeginDate[currRatingProfile] == 0 or decision.begin_dt > recentEarBeginDate[currRatingProfile]: #Is the date container empty, or is the ear decision date more recent?
recentEarBeginDate[currRatingProfile] = decision.begin_dt #Set it
#Use regex to look for a hit and then if it hits make it true. No need to track how many times, just true or false
if re.search("Loss",decision.dgnstc_txt,re.IGNORECASE):
aggregateDecision.TXT_LOSS += 1
if re.search("Tinnitus",decision.dgnstc_txt,re.IGNORECASE):
aggregateDecision.TXT_TINITU += 1
#Simply test the codes and again true or false
if decision.diagnosis_code == 6100:
aggregateDecision.A6100 += 1
if decision.diagnosis_code == 6200:
aggregateDecision.A6200 += 1
if decision.diagnosis_code == 6201:
aggregateDecision.A6201 += 1
if decision.diagnosis_code == 6202:
aggregateDecision.A6202 += 1
if decision.diagnosis_code == 6204:
aggregateDecision.A6204 += 1
if decision.diagnosis_code == 6205:
aggregateDecision.A6205 += 1
if decision.diagnosis_code == 6207:
aggregateDecision.A6207 += 1
if decision.diagnosis_code == 6209:
aggregateDecision.A6209 += 1
if decision.diagnosis_code == 6210:
aggregateDecision.A6210 += 1
if decision.diagnosis_code == 6211:
aggregateDecision.A6211 += 1
if decision.diagnosis_code == 6260:
aggregateDecision.A6260 += 1
#A bit strange looking but due to Python's identation approach this occurs after the for loop in order to capture the last claim.
for disabilityPercentage in multipleDisabilityCodes.values():
#Calculate the CDD values
totalCDD *= (1 - (disabilityPercentage.percentage / 100))
if disabilityPercentage.code in earDiagnosisCode:
hasEarCDD = True
totalEarCDD *= (1 - (disabilityPercentage.percentage / 100))
aggregateDecision.CDD = 100 * (1 - totalCDD)
if hasEarCDD:
aggregateDecision.EAR_CDD = 100 * (1 - totalEarCDD)
else:
aggregateDecision.EAR_CDD = None
if recentEarBeginDate[currRatingProfile] == 0:
aggregateDecision.RECENT_EAR_DATE = None
else:
aggregateDecision.RECENT_EAR_DATE = recentEarBeginDate[currRatingProfile]
writeCursor.execute(None, {'VET_ID' :aggregateDecision.VET_ID, 'PROFILE_DATE' :aggregateDecision.PROFILE_DATE, 'PROMULGATION_DATE' :aggregateDecision.PROMULGATION_DATE, 'RECENT_EAR_DATE' :aggregateDecision.RECENT_EAR_DATE, 'CDD' :aggregateDecision.CDD, 'EAR_CDD' :aggregateDecision.EAR_CDD,
'A6100' :aggregateDecision.A6100, 'A6200' :aggregateDecision.A6200, 'A6201' :aggregateDecision.A6201, 'A6202' :aggregateDecision.A6202, 'A6204' :aggregateDecision.A6204, 'A6205' :aggregateDecision.A6205, 'A6207' :aggregateDecision.A6207, 'A6209' :aggregateDecision.A6209, 'A6210' :aggregateDecision.A6210, 'A6211' :aggregateDecision.A6211, 'A6260' :aggregateDecision.A6260,
'TXT_LOSS' :aggregateDecision.TXT_LOSS, 'TXT_TINITU' :aggregateDecision.TXT_TINITU})
connection.commit()
print(str(datetime.datetime.now()))
writeCursor.close()
cursor.close()
connection.close()
| |
from __future__ import absolute_import, print_function, division
import itertools
import operator
from collections import OrderedDict
from petl.compat import next, string_types, reduce, text_type
from petl.errors import ArgumentError
from petl.util.base import Table, iterpeek, rowgroupby
from petl.transform.sorts import sort, mergesort
from petl.transform.basics import cut
from petl.transform.dedup import distinct
def rowreduce(table, key, reducer, header=None, presorted=False,
buffersize=None, tempdir=None, cache=True):
"""
Group rows under the given key then apply `reducer` to produce a single
output row for each input group of rows. E.g.::
>>> import petl as etl
>>> table1 = [['foo', 'bar'],
... ['a', 3],
... ['a', 7],
... ['b', 2],
... ['b', 1],
... ['b', 9],
... ['c', 4]]
>>> def sumbar(key, rows):
... return [key, sum(row[1] for row in rows)]
...
>>> table2 = etl.rowreduce(table1, key='foo', reducer=sumbar,
... header=['foo', 'barsum'])
>>> table2
+-----+--------+
| foo | barsum |
+=====+========+
| 'a' | 10 |
+-----+--------+
| 'b' | 12 |
+-----+--------+
| 'c' | 4 |
+-----+--------+
N.B., this is not strictly a "reduce" in the sense of the standard Python
:func:`reduce` function, i.e., the `reducer` function is *not* applied
recursively to values within a group, rather it is applied once to each row
group as a whole.
See also :func:`petl.transform.reductions.aggregate` and
:func:`petl.transform.reductions.fold`.
"""
return RowReduceView(table, key, reducer, header=header,
presorted=presorted,
buffersize=buffersize, tempdir=tempdir, cache=cache)
Table.rowreduce = rowreduce
class RowReduceView(Table):
def __init__(self, source, key, reducer, header=None,
presorted=False, buffersize=None, tempdir=None, cache=True):
if presorted:
self.source = source
else:
self.source = sort(source, key, buffersize=buffersize,
tempdir=tempdir, cache=cache)
self.key = key
self.header = header
self.reducer = reducer
def __iter__(self):
return iterrowreduce(self.source, self.key, self.reducer, self.header)
def iterrowreduce(source, key, reducer, header):
if header is None:
# output header from source
header, source = iterpeek(source)
yield tuple(header)
for key, rows in rowgroupby(source, key):
yield tuple(reducer(key, rows))
def aggregate(table, key, aggregation=None, value=None, presorted=False,
buffersize=None, tempdir=None, cache=True):
"""Group rows under the given key then apply aggregation functions.
E.g.::
>>> import petl as etl
>>>
>>> table1 = [['foo', 'bar', 'baz'],
... ['a', 3, True],
... ['a', 7, False],
... ['b', 2, True],
... ['b', 2, False],
... ['b', 9, False],
... ['c', 4, True]]
>>> # aggregate whole rows
... table2 = etl.aggregate(table1, 'foo', len)
>>> table2
+-----+-------+
| foo | value |
+=====+=======+
| 'a' | 2 |
+-----+-------+
| 'b' | 3 |
+-----+-------+
| 'c' | 1 |
+-----+-------+
>>> # aggregate single field
... table3 = etl.aggregate(table1, 'foo', sum, 'bar')
>>> table3
+-----+-------+
| foo | value |
+=====+=======+
| 'a' | 10 |
+-----+-------+
| 'b' | 13 |
+-----+-------+
| 'c' | 4 |
+-----+-------+
>>> # alternative signature using keyword args
... table4 = etl.aggregate(table1, key=('foo', 'bar'),
... aggregation=list, value=('bar', 'baz'))
>>> table4
+-----+-----+-------------------------+
| foo | bar | value |
+=====+=====+=========================+
| 'a' | 3 | [(3, True)] |
+-----+-----+-------------------------+
| 'a' | 7 | [(7, False)] |
+-----+-----+-------------------------+
| 'b' | 2 | [(2, True), (2, False)] |
+-----+-----+-------------------------+
| 'b' | 9 | [(9, False)] |
+-----+-----+-------------------------+
| 'c' | 4 | [(4, True)] |
+-----+-----+-------------------------+
>>> # aggregate multiple fields
... from collections import OrderedDict
>>> import petl as etl
>>>
>>> aggregation = OrderedDict()
>>> aggregation['count'] = len
>>> aggregation['minbar'] = 'bar', min
>>> aggregation['maxbar'] = 'bar', max
>>> aggregation['sumbar'] = 'bar', sum
>>> # default aggregation function is list
... aggregation['listbar'] = 'bar'
>>> aggregation['listbarbaz'] = ('bar', 'baz'), list
>>> aggregation['bars'] = 'bar', etl.strjoin(', ')
>>> table5 = etl.aggregate(table1, 'foo', aggregation)
>>> table5
+-----+-------+--------+--------+--------+-----------+-------------------------------------+-----------+
| foo | count | minbar | maxbar | sumbar | listbar | listbarbaz | bars |
+=====+=======+========+========+========+===========+=====================================+===========+
| 'a' | 2 | 3 | 7 | 10 | [3, 7] | [(3, True), (7, False)] | '3, 7' |
+-----+-------+--------+--------+--------+-----------+-------------------------------------+-----------+
| 'b' | 3 | 2 | 9 | 13 | [2, 2, 9] | [(2, True), (2, False), (9, False)] | '2, 2, 9' |
+-----+-------+--------+--------+--------+-----------+-------------------------------------+-----------+
| 'c' | 1 | 4 | 4 | 4 | [4] | [(4, True)] | '4' |
+-----+-------+--------+--------+--------+-----------+-------------------------------------+-----------+
If `presorted` is True, it is assumed that the data are already sorted by
the given key, and the `buffersize`, `tempdir` and `cache` arguments are
ignored. Otherwise, the data are sorted, see also the discussion of the
`buffersize`, `tempdir` and `cache` arguments under the
:func:`petl.transform.sorts.sort` function.
"""
if callable(aggregation):
return SimpleAggregateView(table, key, aggregation=aggregation,
value=value, presorted=presorted,
buffersize=buffersize, tempdir=tempdir,
cache=cache)
elif aggregation is None or isinstance(aggregation, (list, tuple, dict)):
# ignore value arg
return MultiAggregateView(table, key, aggregation=aggregation,
presorted=presorted, buffersize=buffersize,
tempdir=tempdir, cache=cache)
else:
raise ArgumentError('expected aggregation is callable, list, tuple, dict '
'or None')
Table.aggregate = aggregate
class SimpleAggregateView(Table):
def __init__(self, table, key, aggregation=list, value=None,
presorted=False, buffersize=None, tempdir=None, cache=True):
if presorted:
self.table = table
else:
self.table = sort(table, key, buffersize=buffersize,
tempdir=tempdir, cache=cache)
self.key = key
self.aggregation = aggregation
self.value = value
def __iter__(self):
return itersimpleaggregate(self.table, self.key, self.aggregation,
self.value)
def itersimpleaggregate(table, key, aggregation, value):
# special case counting
if aggregation == len:
aggregation = lambda g: sum(1 for _ in g) # count length of iterable
# determine output header
if isinstance(key, (list, tuple)):
outhdr = tuple(key) + ('value',)
elif callable(key):
outhdr = ('key', 'value')
else:
outhdr = (key, 'value')
yield outhdr
# generate data
if isinstance(key, (list, tuple)):
for k, grp in rowgroupby(table, key, value):
yield tuple(k) + (aggregation(grp),)
else:
for k, grp in rowgroupby(table, key, value):
yield k, aggregation(grp)
class MultiAggregateView(Table):
def __init__(self, source, key, aggregation=None, presorted=False,
buffersize=None, tempdir=None, cache=True):
if presorted:
self.source = source
else:
self.source = sort(source, key, buffersize=buffersize,
tempdir=tempdir, cache=cache)
self.key = key
if aggregation is None:
self.aggregation = OrderedDict()
elif isinstance(aggregation, (list, tuple)):
self.aggregation = OrderedDict()
for t in aggregation:
self.aggregation[t[0]] = t[1:]
elif isinstance(aggregation, dict):
self.aggregation = aggregation
else:
raise ArgumentError(
'expected aggregation is None, list, tuple or dict, found %r'
% aggregation
)
def __iter__(self):
return itermultiaggregate(self.source, self.key, self.aggregation)
def __setitem__(self, key, value):
self.aggregation[key] = value
def itermultiaggregate(source, key, aggregation):
aggregation = OrderedDict(aggregation.items()) # take a copy
it = iter(source)
hdr = next(it)
# push back header to ensure we iterate only once
it = itertools.chain([hdr], it)
# normalise aggregators
for outfld in aggregation:
agg = aggregation[outfld]
if callable(agg):
aggregation[outfld] = None, agg
elif isinstance(agg, string_types):
aggregation[outfld] = agg, list # list is default
elif len(agg) == 1 and isinstance(agg[0], string_types):
aggregation[outfld] = agg[0], list # list is default
elif len(agg) == 1 and callable(agg[0]):
aggregation[outfld] = None, agg[0] # aggregate whole rows
elif len(agg) == 2:
pass # no need to normalise
else:
raise ArgumentError('invalid aggregation: %r, %r' % (outfld, agg))
# determine output header
if isinstance(key, (list, tuple)):
outhdr = list(key)
elif callable(key):
outhdr = ['key']
else:
outhdr = [key]
for outfld in aggregation:
outhdr.append(outfld)
yield tuple(outhdr)
# generate data
for k, rows in rowgroupby(it, key):
rows = list(rows) # may need to iterate over these more than once
# handle compound key
if isinstance(key, (list, tuple)):
outrow = list(k)
else:
outrow = [k]
for outfld in aggregation:
srcfld, aggfun = aggregation[outfld]
if srcfld is None:
aggval = aggfun(rows)
outrow.append(aggval)
elif isinstance(srcfld, (list, tuple)):
idxs = [hdr.index(f) for f in srcfld]
valgetter = operator.itemgetter(*idxs)
vals = (valgetter(row) for row in rows)
aggval = aggfun(vals)
outrow.append(aggval)
else:
idx = hdr.index(srcfld)
# try using generator comprehension
vals = (row[idx] for row in rows)
aggval = aggfun(vals)
outrow.append(aggval)
yield tuple(outrow)
def groupcountdistinctvalues(table, key, value):
"""Group by the `key` field then count the number of distinct values in the
`value` field."""
s1 = cut(table, key, value)
s2 = distinct(s1)
s3 = aggregate(s2, key, len)
return s3
Table.groupcountdistinctvalues = groupcountdistinctvalues
def groupselectfirst(table, key, presorted=False, buffersize=None,
tempdir=None, cache=True):
"""Group by the `key` field then return the first row within each group.
E.g.::
>>> import petl as etl
>>> table1 = [['foo', 'bar', 'baz'],
... ['A', 1, True],
... ['C', 7, False],
... ['B', 2, False],
... ['C', 9, True]]
>>> table2 = etl.groupselectfirst(table1, key='foo')
>>> table2
+-----+-----+-------+
| foo | bar | baz |
+=====+=====+=======+
| 'A' | 1 | True |
+-----+-----+-------+
| 'B' | 2 | False |
+-----+-----+-------+
| 'C' | 7 | False |
+-----+-----+-------+
See also :func:`petl.transform.reductions.groupselectlast`,
:func:`petl.transform.dedup.distinct`.
"""
def _reducer(k, rows):
return next(rows)
return rowreduce(table, key, reducer=_reducer, presorted=presorted,
buffersize=buffersize, tempdir=tempdir, cache=cache)
Table.groupselectfirst = groupselectfirst
def groupselectlast(table, key, presorted=False, buffersize=None,
tempdir=None, cache=True):
"""Group by the `key` field then return the last row within each group.
E.g.::
>>> import petl as etl
>>> table1 = [['foo', 'bar', 'baz'],
... ['A', 1, True],
... ['C', 7, False],
... ['B', 2, False],
... ['C', 9, True]]
>>> table2 = etl.groupselectlast(table1, key='foo')
>>> table2
+-----+-----+-------+
| foo | bar | baz |
+=====+=====+=======+
| 'A' | 1 | True |
+-----+-----+-------+
| 'B' | 2 | False |
+-----+-----+-------+
| 'C' | 9 | True |
+-----+-----+-------+
See also :func:`petl.transform.reductions.groupselectfirst`,
:func:`petl.transform.dedup.distinct`.
.. versionadded:: 1.1.0
"""
def _reducer(k, rows):
row = None
for row in rows:
pass
return row
return rowreduce(table, key, reducer=_reducer, presorted=presorted,
buffersize=buffersize, tempdir=tempdir, cache=cache)
Table.groupselectlast = groupselectlast
def groupselectmin(table, key, value, presorted=False, buffersize=None,
tempdir=None, cache=True):
"""Group by the `key` field then return the row with the minimum of the
`value` field within each group. N.B., will only return one row for each
group, even if multiple rows have the same (minimum) value."""
return groupselectfirst(sort(table, value, reverse=False), key,
presorted=presorted, buffersize=buffersize,
tempdir=tempdir, cache=cache)
Table.groupselectmin = groupselectmin
def groupselectmax(table, key, value, presorted=False, buffersize=None,
tempdir=None, cache=True):
"""Group by the `key` field then return the row with the maximum of the
`value` field within each group. N.B., will only return one row for each
group, even if multiple rows have the same (maximum) value."""
return groupselectfirst(sort(table, value, reverse=True), key,
presorted=presorted, buffersize=buffersize,
tempdir=tempdir, cache=cache)
Table.groupselectmax = groupselectmax
def mergeduplicates(table, key, missing=None, presorted=False, buffersize=None,
tempdir=None, cache=True):
"""
Merge duplicate rows under the given key. E.g.::
>>> import petl as etl
>>> table1 = [['foo', 'bar', 'baz'],
... ['A', 1, 2.7],
... ['B', 2, None],
... ['D', 3, 9.4],
... ['B', None, 7.8],
... ['E', None, 42.],
... ['D', 3, 12.3],
... ['A', 2, None]]
>>> table2 = etl.mergeduplicates(table1, 'foo')
>>> table2
+-----+------------------+-----------------------+
| foo | bar | baz |
+=====+==================+=======================+
| 'A' | Conflict({1, 2}) | 2.7 |
+-----+------------------+-----------------------+
| 'B' | 2 | 7.8 |
+-----+------------------+-----------------------+
| 'D' | 3 | Conflict({9.4, 12.3}) |
+-----+------------------+-----------------------+
| 'E' | None | 42.0 |
+-----+------------------+-----------------------+
Missing values are overridden by non-missing values. Conflicting values are
reported as an instance of the Conflict class (sub-class of frozenset).
If `presorted` is True, it is assumed that the data are already sorted by
the given key, and the `buffersize`, `tempdir` and `cache` arguments are
ignored. Otherwise, the data are sorted, see also the discussion of the
`buffersize`, `tempdir` and `cache` arguments under the
:func:`petl.transform.sorts.sort` function.
See also :func:`petl.transform.dedup.conflicts`.
"""
return MergeDuplicatesView(table, key, missing=missing, presorted=presorted,
buffersize=buffersize, tempdir=tempdir,
cache=cache)
Table.mergeduplicates = mergeduplicates
class MergeDuplicatesView(Table):
def __init__(self, table, key, missing=None, presorted=False,
buffersize=None, tempdir=None, cache=True):
if presorted:
self.table = table
else:
self.table = sort(table, key, buffersize=buffersize,
tempdir=tempdir, cache=cache)
self.key = key
self.missing = missing
def __iter__(self):
return itermergeduplicates(self.table, self.key, self.missing)
def itermergeduplicates(table, key, missing):
it = iter(table)
hdr, it = iterpeek(it)
flds = list(map(text_type, hdr))
# determine output fields
if isinstance(key, string_types):
outhdr = [key]
keyflds = {key}
else:
outhdr = list(key)
keyflds = set(key)
valflds = [f for f in flds if f not in keyflds]
valfldidxs = [flds.index(f) for f in valflds]
outhdr.extend(valflds)
yield tuple(outhdr)
# do the work
for k, grp in rowgroupby(it, key):
grp = list(grp)
if isinstance(key, string_types):
outrow = [k]
else:
outrow = list(k)
mergedvals = [set(row[i] for row in grp
if len(row) > i and row[i] != missing)
for i in valfldidxs]
normedvals = [vals.pop() if len(vals) == 1
else missing if len(vals) == 0
else Conflict(vals)
for vals in mergedvals]
outrow.extend(normedvals)
yield tuple(outrow)
def merge(*tables, **kwargs):
"""
Convenience function to combine multiple tables (via
:func:`petl.transform.sorts.mergesort`) then combine duplicate rows by
merging under the given key (via
:func:`petl.transform.reductions.mergeduplicates`). E.g.::
>>> import petl as etl
>>> table1 = [['foo', 'bar', 'baz'],
... [1, 'A', True],
... [2, 'B', None],
... [4, 'C', True]]
>>> table2 = [['bar', 'baz', 'quux'],
... ['A', True, 42.0],
... ['B', False, 79.3],
... ['C', False, 12.4]]
>>> table3 = etl.merge(table1, table2, key='bar')
>>> table3
+-----+-----+-------------------------+------+
| bar | foo | baz | quux |
+=====+=====+=========================+======+
| 'A' | 1 | True | 42.0 |
+-----+-----+-------------------------+------+
| 'B' | 2 | False | 79.3 |
+-----+-----+-------------------------+------+
| 'C' | 4 | Conflict({False, True}) | 12.4 |
+-----+-----+-------------------------+------+
Keyword arguments are the same as for
:func:`petl.transform.sorts.mergesort`, except `key` is required.
"""
assert 'key' in kwargs, 'keyword argument "key" is required'
key = kwargs['key']
t1 = mergesort(*tables, **kwargs)
t2 = mergeduplicates(t1, key=key, presorted=True)
return t2
Table.merge = merge
class Conflict(frozenset):
def __new__(cls, items):
s = super(Conflict, cls).__new__(cls, items)
return s
def fold(table, key, f, value=None, presorted=False, buffersize=None,
tempdir=None, cache=True):
"""
Reduce rows recursively via the Python standard :func:`reduce` function.
E.g.::
>>> import petl as etl
>>> table1 = [['id', 'count'],
... [1, 3],
... [1, 5],
... [2, 4],
... [2, 8]]
>>> import operator
>>> table2 = etl.fold(table1, 'id', operator.add, 'count',
... presorted=True)
>>> table2
+-----+-------+
| key | value |
+=====+=======+
| 1 | 8 |
+-----+-------+
| 2 | 12 |
+-----+-------+
See also :func:`petl.transform.reductions.aggregate`,
:func:`petl.transform.reductions.rowreduce`.
"""
return FoldView(table, key, f, value=value, presorted=presorted,
buffersize=buffersize, tempdir=tempdir, cache=cache)
Table.fold = fold
class FoldView(Table):
def __init__(self, table, key, f, value=None, presorted=False,
buffersize=None, tempdir=None, cache=True):
if presorted:
self.table = table
else:
self.table = sort(table, key, buffersize=buffersize,
tempdir=tempdir, cache=cache)
self.key = key
self.f = f
self.value = value
def __iter__(self):
return iterfold(self.table, self.key, self.f, self.value)
def iterfold(table, key, f, value):
yield ('key', 'value')
for k, grp in rowgroupby(table, key, value):
yield k, reduce(f, grp)
| |
import logging
import sys
import vstruct
from vstruct.primitives import *
g_logger = logging.getLogger("sdb")
class InvalidSDBFileError(Exception):
pass
class SDBHeader(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.unknown0 = v_uint32()
self.unknown1 = v_uint32()
self.magic = v_str(size=4)
def pcb_magic(self):
if self.magic != "sdbf":
raise InvalidSDBFileError("invalid magic")
SDB_TAG_TYPES = v_enum()
SDB_TAG_TYPES.TAG_TYPE_NULL = 0x1000
SDB_TAG_TYPES.TAG_TYPE_WORD = 0x3000
SDB_TAG_TYPES.TAG_TYPE_DWORD = 0x4000
SDB_TAG_TYPES.TAG_TYPE_QWORD = 0x5000
SDB_TAG_TYPES.TAG_TYPE_STRINGREF = 0x6000
SDB_TAG_TYPES.TAG_TYPE_LIST = 0x7000
SDB_TAG_TYPES.TAG_TYPE_STRING = 0x8000
SDB_TAG_TYPES.TAG_TYPE_BINARY = 0x9000
SDB_KNOWN_TAG_TYPES = SDB_TAG_TYPES._vs_reverseMap.keys()
SDB_TAGS = v_enum()
SDB_TAGS.TAG_DATABASE = (0x1 | SDB_TAG_TYPES.TAG_TYPE_LIST)
SDB_TAGS.TAG_LIBRARY = (0x2 | SDB_TAG_TYPES.TAG_TYPE_LIST)
SDB_TAGS.TAG_INEXCLUDE = (0x3 | SDB_TAG_TYPES.TAG_TYPE_LIST)
SDB_TAGS.TAG_SHIM = (0x4 | SDB_TAG_TYPES.TAG_TYPE_LIST)
SDB_TAGS.TAG_PATCH = (0x5 | SDB_TAG_TYPES.TAG_TYPE_LIST)
SDB_TAGS.TAG_APP = (0x6 | SDB_TAG_TYPES.TAG_TYPE_LIST)
SDB_TAGS.TAG_EXE = (0x7 | SDB_TAG_TYPES.TAG_TYPE_LIST)
SDB_TAGS.TAG_MATCHING_FILE = (0x8 | SDB_TAG_TYPES.TAG_TYPE_LIST)
SDB_TAGS.TAG_SHIM_REF = (0x9 | SDB_TAG_TYPES.TAG_TYPE_LIST)
SDB_TAGS.TAG_PATCH_REF = (0xA | SDB_TAG_TYPES.TAG_TYPE_LIST)
SDB_TAGS.TAG_LAYER = (0xB | SDB_TAG_TYPES.TAG_TYPE_LIST)
SDB_TAGS.TAG_FILE = (0xC | SDB_TAG_TYPES.TAG_TYPE_LIST)
SDB_TAGS.TAG_APPHELP = (0xD | SDB_TAG_TYPES.TAG_TYPE_LIST)
SDB_TAGS.TAG_LINK = (0xE | SDB_TAG_TYPES.TAG_TYPE_LIST)
SDB_TAGS.TAG_DATA = (0xF | SDB_TAG_TYPES.TAG_TYPE_LIST)
SDB_TAGS.TAG_MSI_TRANSFORM = (0x10 | SDB_TAG_TYPES.TAG_TYPE_LIST)
SDB_TAGS.TAG_MSI_TRANSFORM_REF = (0x11 | SDB_TAG_TYPES.TAG_TYPE_LIST)
SDB_TAGS.TAG_MSI_PACKAGE = (0x12 | SDB_TAG_TYPES.TAG_TYPE_LIST)
SDB_TAGS.TAG_FLAG = (0x13 | SDB_TAG_TYPES.TAG_TYPE_LIST)
SDB_TAGS.TAG_MSI_CUSTOM_ACTION = (0x14 | SDB_TAG_TYPES.TAG_TYPE_LIST)
SDB_TAGS.TAG_FLAG_REF = (0x15 | SDB_TAG_TYPES.TAG_TYPE_LIST)
SDB_TAGS.TAG_ACTION = (0x16 | SDB_TAG_TYPES.TAG_TYPE_LIST)
SDB_TAGS.TAG_LOOKUP = (0x17 | SDB_TAG_TYPES.TAG_TYPE_LIST)
SDB_TAGS.TAG_STRINGTABLE = (0x801 | SDB_TAG_TYPES.TAG_TYPE_LIST)
SDB_TAGS.TAG_INDEXES = (0x802 | SDB_TAG_TYPES.TAG_TYPE_LIST)
SDB_TAGS.TAG_INDEX = (0x803 | SDB_TAG_TYPES.TAG_TYPE_LIST)
SDB_TAGS.TAG_NAME = (0x1 | SDB_TAG_TYPES.TAG_TYPE_STRINGREF)
SDB_TAGS.TAG_DESCRIPTION = (0x2 | SDB_TAG_TYPES.TAG_TYPE_STRINGREF)
SDB_TAGS.TAG_MODULE = (0x3 | SDB_TAG_TYPES.TAG_TYPE_STRINGREF)
SDB_TAGS.TAG_API = (0x4 | SDB_TAG_TYPES.TAG_TYPE_STRINGREF)
SDB_TAGS.TAG_VENDOR = (0x5 | SDB_TAG_TYPES.TAG_TYPE_STRINGREF)
SDB_TAGS.TAG_APP_NAME = (0x6 | SDB_TAG_TYPES.TAG_TYPE_STRINGREF)
SDB_TAGS.TAG_COMMAND_LINE = (0x8 | SDB_TAG_TYPES.TAG_TYPE_STRINGREF)
SDB_TAGS.TAG_COMPANY_NAME = (0x9 | SDB_TAG_TYPES.TAG_TYPE_STRINGREF)
SDB_TAGS.TAG_DLLFILE = (0xA | SDB_TAG_TYPES.TAG_TYPE_STRINGREF)
SDB_TAGS.TAG_WILDCARD_NAME = (0xB | SDB_TAG_TYPES.TAG_TYPE_STRINGREF)
SDB_TAGS.TAG_PRODUCT_NAME = (0x10 | SDB_TAG_TYPES.TAG_TYPE_STRINGREF)
SDB_TAGS.TAG_PRODUCT_VERSION = (0x11 | SDB_TAG_TYPES.TAG_TYPE_STRINGREF)
SDB_TAGS.TAG_FILE_DESCRIPTION = (0x12 | SDB_TAG_TYPES.TAG_TYPE_STRINGREF)
SDB_TAGS.TAG_FILE_VERSION = (0x13 | SDB_TAG_TYPES.TAG_TYPE_STRINGREF)
SDB_TAGS.TAG_ORIGINAL_FILENAME = (0x14 | SDB_TAG_TYPES.TAG_TYPE_STRINGREF)
SDB_TAGS.TAG_INTERNAL_NAME = (0x15 | SDB_TAG_TYPES.TAG_TYPE_STRINGREF)
SDB_TAGS.TAG_LEGAL_COPYRIGHT = (0x16 | SDB_TAG_TYPES.TAG_TYPE_STRINGREF)
SDB_TAGS.TAG_16BIT_DESCRIPTION = (0x17 | SDB_TAG_TYPES.TAG_TYPE_STRINGREF)
SDB_TAGS.TAG_APPHELP_DETAILS = (0x18 | SDB_TAG_TYPES.TAG_TYPE_STRINGREF)
SDB_TAGS.TAG_LINK_URL = (0x19 | SDB_TAG_TYPES.TAG_TYPE_STRINGREF)
SDB_TAGS.TAG_LINK_TEXT = (0x1A | SDB_TAG_TYPES.TAG_TYPE_STRINGREF)
SDB_TAGS.TAG_APPHELP_TITLE = (0x1B | SDB_TAG_TYPES.TAG_TYPE_STRINGREF)
SDB_TAGS.TAG_APPHELP_CONTACT = (0x1C | SDB_TAG_TYPES.TAG_TYPE_STRINGREF)
SDB_TAGS.TAG_SXS_MANIFEST = (0x1D | SDB_TAG_TYPES.TAG_TYPE_STRINGREF)
SDB_TAGS.TAG_DATA_STRING = (0x1E | SDB_TAG_TYPES.TAG_TYPE_STRINGREF)
SDB_TAGS.TAG_MSI_TRANSFORM_FILE = (0x1F | SDB_TAG_TYPES.TAG_TYPE_STRINGREF)
SDB_TAGS.TAG_16BIT_MODULE_NAME = (0x20 | SDB_TAG_TYPES.TAG_TYPE_STRINGREF)
SDB_TAGS.TAG_LAYER_DISPLAYNAME = (0x21 | SDB_TAG_TYPES.TAG_TYPE_STRINGREF)
SDB_TAGS.TAG_COMPILER_VERSION = (0x22 | SDB_TAG_TYPES.TAG_TYPE_STRINGREF)
SDB_TAGS.TAG_ACTION_TYPE = (0x23 | SDB_TAG_TYPES.TAG_TYPE_STRINGREF)
SDB_TAGS.TAG_EXPORT_NAME = (0x24 | SDB_TAG_TYPES.TAG_TYPE_STRINGREF)
SDB_TAGS.TAG_SIZE = (0x1 | SDB_TAG_TYPES.TAG_TYPE_DWORD)
SDB_TAGS.TAG_OFFSET = (0x2 | SDB_TAG_TYPES.TAG_TYPE_DWORD)
SDB_TAGS.TAG_CHECKSUM = (0x3 | SDB_TAG_TYPES.TAG_TYPE_DWORD)
SDB_TAGS.TAG_SHIM_TAGID = (0x4 | SDB_TAG_TYPES.TAG_TYPE_DWORD)
SDB_TAGS.TAG_PATCH_TAGID = (0x5 | SDB_TAG_TYPES.TAG_TYPE_DWORD)
SDB_TAGS.TAG_MODULE_TYPE = (0x6 | SDB_TAG_TYPES.TAG_TYPE_DWORD)
SDB_TAGS.TAG_VERDATEHI = (0x7 | SDB_TAG_TYPES.TAG_TYPE_DWORD)
SDB_TAGS.TAG_VERDATELO = (0x8 | SDB_TAG_TYPES.TAG_TYPE_DWORD)
SDB_TAGS.TAG_VERFILEOS = (0x9 | SDB_TAG_TYPES.TAG_TYPE_DWORD)
SDB_TAGS.TAG_VERFILETYPE = (0xA | SDB_TAG_TYPES.TAG_TYPE_DWORD)
SDB_TAGS.TAG_PE_CHECKSUM = (0xB | SDB_TAG_TYPES.TAG_TYPE_DWORD)
SDB_TAGS.TAG_PREVOSMAJORVER = (0xC | SDB_TAG_TYPES.TAG_TYPE_DWORD)
SDB_TAGS.TAG_PREVOSMINORVER = (0xD | SDB_TAG_TYPES.TAG_TYPE_DWORD)
SDB_TAGS.TAG_PREVOSPLATFORMID = (0xE | SDB_TAG_TYPES.TAG_TYPE_DWORD)
SDB_TAGS.TAG_PREVOSBUILDNO = (0xF | SDB_TAG_TYPES.TAG_TYPE_DWORD)
SDB_TAGS.TAG_PROBLEMSEVERITY = (0x10 | SDB_TAG_TYPES.TAG_TYPE_DWORD)
SDB_TAGS.TAG_LANGID = (0x11 | SDB_TAG_TYPES.TAG_TYPE_DWORD)
SDB_TAGS.TAG_VER_LANGUAGE = (0x12 | SDB_TAG_TYPES.TAG_TYPE_DWORD)
SDB_TAGS.TAG_ENGINE = (0x14 | SDB_TAG_TYPES.TAG_TYPE_DWORD)
SDB_TAGS.TAG_HTMLHELPID = (0x15 | SDB_TAG_TYPES.TAG_TYPE_DWORD)
SDB_TAGS.TAG_INDEX_FLAGS = (0x16 | SDB_TAG_TYPES.TAG_TYPE_DWORD)
SDB_TAGS.TAG_FLAGS = (0x17 | SDB_TAG_TYPES.TAG_TYPE_DWORD)
SDB_TAGS.TAG_DATA_VALUETYPE = (0x18 | SDB_TAG_TYPES.TAG_TYPE_DWORD)
SDB_TAGS.TAG_DATA_DWORD = (0x19 | SDB_TAG_TYPES.TAG_TYPE_DWORD)
SDB_TAGS.TAG_LAYER_TAGID = (0x1A | SDB_TAG_TYPES.TAG_TYPE_DWORD)
SDB_TAGS.TAG_MSI_TRANSFORM_TAGID = (0x1B | SDB_TAG_TYPES.TAG_TYPE_DWORD)
SDB_TAGS.TAG_LINKER_VERSION = (0x1C | SDB_TAG_TYPES.TAG_TYPE_DWORD)
SDB_TAGS.TAG_LINK_DATE = (0x1D | SDB_TAG_TYPES.TAG_TYPE_DWORD)
SDB_TAGS.TAG_UPTO_LINK_DATE = (0x1E | SDB_TAG_TYPES.TAG_TYPE_DWORD)
SDB_TAGS.TAG_OS_SERVICE_PACK = (0x1F | SDB_TAG_TYPES.TAG_TYPE_DWORD)
SDB_TAGS.TAG_FLAG_TAGID = (0x20 | SDB_TAG_TYPES.TAG_TYPE_DWORD)
SDB_TAGS.TAG_RUNTIME_PLATFORM = (0x21 | SDB_TAG_TYPES.TAG_TYPE_DWORD)
SDB_TAGS.TAG_OS_SKU = (0x22 | SDB_TAG_TYPES.TAG_TYPE_DWORD)
SDB_TAGS.TAG_OS_PLATFORM = (0x23 | SDB_TAG_TYPES.TAG_TYPE_DWORD)
SDB_TAGS.TAG_APP_NAME_RC_ID = (0x24 | SDB_TAG_TYPES.TAG_TYPE_DWORD)
SDB_TAGS.TAG_VENDOR_NAME_RC_ID = (0x25 | SDB_TAG_TYPES.TAG_TYPE_DWORD)
SDB_TAGS.TAG_SUMMARY_MSG_RC_ID = (0x26 | SDB_TAG_TYPES.TAG_TYPE_DWORD)
SDB_TAGS.TAG_VISTA_SKU = (0x27 | SDB_TAG_TYPES.TAG_TYPE_DWORD)
SDB_TAGS.TAG_DESCRIPTION_RC_ID = (0x28 | SDB_TAG_TYPES.TAG_TYPE_DWORD)
SDB_TAGS.TAG_PARAMETER1_RC_ID = (0x29 | SDB_TAG_TYPES.TAG_TYPE_DWORD)
SDB_TAGS.TAG_TAGID = (0x801 | SDB_TAG_TYPES.TAG_TYPE_DWORD)
SDB_TAGS.TAG_STRINGTABLE_ITEM = (0x801 | SDB_TAG_TYPES.TAG_TYPE_STRING)
SDB_TAGS.TAG_INCLUDE = (0x1 | SDB_TAG_TYPES.TAG_TYPE_NULL)
SDB_TAGS.TAG_GENERAL = (0x2 | SDB_TAG_TYPES.TAG_TYPE_NULL)
SDB_TAGS.TAG_MATCH_LOGIC_NOT = (0x3 | SDB_TAG_TYPES.TAG_TYPE_NULL)
SDB_TAGS.TAG_APPLY_ALL_SHIMS = (0x4 | SDB_TAG_TYPES.TAG_TYPE_NULL)
SDB_TAGS.TAG_USE_SERVICE_PACK_FILES = (0x5 | SDB_TAG_TYPES.TAG_TYPE_NULL)
SDB_TAGS.TAG_MITIGATION_OS = (0x6 | SDB_TAG_TYPES.TAG_TYPE_NULL)
SDB_TAGS.TAG_BLOCK_UPGRADE = (0x7 | SDB_TAG_TYPES.TAG_TYPE_NULL)
SDB_TAGS.TAG_INCLUDEEXCLUDEDLL = (0x8 | SDB_TAG_TYPES.TAG_TYPE_NULL)
SDB_TAGS.TAG_TIME = (0x1 | SDB_TAG_TYPES.TAG_TYPE_QWORD)
SDB_TAGS.TAG_BIN_FILE_VERSION = (0x2 | SDB_TAG_TYPES.TAG_TYPE_QWORD)
SDB_TAGS.TAG_BIN_PRODUCT_VERSION = (0x3 | SDB_TAG_TYPES.TAG_TYPE_QWORD)
SDB_TAGS.TAG_MODTIME = (0x4 | SDB_TAG_TYPES.TAG_TYPE_QWORD)
SDB_TAGS.TAG_FLAG_MASK_KERNEL = (0x5 | SDB_TAG_TYPES.TAG_TYPE_QWORD)
SDB_TAGS.TAG_UPTO_BIN_PRODUCT_VERSION = (0x6 | SDB_TAG_TYPES.TAG_TYPE_QWORD)
SDB_TAGS.TAG_DATA_QWORD = (0x7 | SDB_TAG_TYPES.TAG_TYPE_QWORD)
SDB_TAGS.TAG_FLAG_MASK_USER = (0x8 | SDB_TAG_TYPES.TAG_TYPE_QWORD)
SDB_TAGS.TAG_FLAGS_NTVDM1 = (0x9 | SDB_TAG_TYPES.TAG_TYPE_QWORD)
SDB_TAGS.TAG_FLAGS_NTVDM2 = (0xA | SDB_TAG_TYPES.TAG_TYPE_QWORD)
SDB_TAGS.TAG_FLAGS_NTVDM3 = (0xB | SDB_TAG_TYPES.TAG_TYPE_QWORD)
SDB_TAGS.TAG_FLAG_MASK_SHELL = (0xC | SDB_TAG_TYPES.TAG_TYPE_QWORD)
SDB_TAGS.TAG_UPTO_BIN_FILE_VERSION = (0xD | SDB_TAG_TYPES.TAG_TYPE_QWORD)
SDB_TAGS.TAG_FLAG_MASK_FUSION = (0xE | SDB_TAG_TYPES.TAG_TYPE_QWORD)
SDB_TAGS.TAG_FLAG_PROCESSPARAM = (0xF | SDB_TAG_TYPES.TAG_TYPE_QWORD)
SDB_TAGS.TAG_FLAG_LUA = (0x10 | SDB_TAG_TYPES.TAG_TYPE_QWORD)
SDB_TAGS.TAG_FLAG_INSTALL = (0x11 | SDB_TAG_TYPES.TAG_TYPE_QWORD)
SDB_TAGS.TAG_PATCH_BITS = (0x2 | SDB_TAG_TYPES.TAG_TYPE_BINARY)
SDB_TAGS.TAG_FILE_BITS = (0x3 | SDB_TAG_TYPES.TAG_TYPE_BINARY)
SDB_TAGS.TAG_EXE_ID = (0x4 | SDB_TAG_TYPES.TAG_TYPE_BINARY)
SDB_TAGS.TAG_DATA_BITS = (0x5 | SDB_TAG_TYPES.TAG_TYPE_BINARY)
SDB_TAGS.TAG_MSI_PACKAGE_ID = (0x6 | SDB_TAG_TYPES.TAG_TYPE_BINARY)
SDB_TAGS.TAG_DATABASE_ID = (0x7 | SDB_TAG_TYPES.TAG_TYPE_BINARY)
SDB_TAGS.TAG_INDEX_BITS = (0x801 | SDB_TAG_TYPES.TAG_TYPE_BINARY)
SDB_TAGS.TAG_APP_ID = (0x11 | SDB_TAG_TYPES.TAG_TYPE_BINARY)
SDB_TAGS.TAG_FIX_ID = (0x10 | SDB_TAG_TYPES.TAG_TYPE_BINARY)
SDB_TAGS.TAG_MATCH_MODE = (0x1 | SDB_TAG_TYPES.TAG_TYPE_WORD)
SDB_TAGS.TAG_TAG = (0x801 | SDB_TAG_TYPES.TAG_TYPE_WORD)
SDB_TAGS.TAG_INDEX_TAG = (0x802 | SDB_TAG_TYPES.TAG_TYPE_WORD)
SDB_TAGS.TAG_INDEX_KEY = (0x803 | SDB_TAG_TYPES.TAG_TYPE_WORD)
SDB_KNOWN_TAGS = set([c & 0xFF for c in SDB_TAGS._vs_reverseMap.keys()])
class SDBItemHeader(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.tagtype = v_uint8()
self.valuetype = v_uint8()
@property
def tag(self):
return (self.valuetype << 8) | self.tagtype
def __str__(self):
return "SDBItemHeader(tagtype: 0x%x, valuetype: 0x%x)" % (self.tagtype, self.valuetype)
class SDBItemArray(vstruct.VArray):
def __init__(self, size=0):
"""
size is number of bytes this array takes up
"""
vstruct.VArray.__init__(self)
self.size = size
def vsSetLength(self, size):
self.size = size
def vsParse(self, bytez, offset=0):
# we have to override vsParse since we have
# variably sized items, and have to keep
# parsing them until we overrun the prescribed
# buffer
start_offset = offset
while offset - start_offset < self.size:
i = SDBItem()
offset = i.vsParse(bytez, offset=offset)
self.vsAddElement(i)
return offset
def __len__(self):
return self.size + 0x4
class SDBValueList(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.size = v_uint32()
self.children = SDBItemArray()
def pcb_size(self):
self["children"].vsSetLength(self.size)
def __len__(self):
return self.size + (self.size % 2) + 0x4
class SDBValueString(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.size = v_uint32()
self.value = v_wstr(size=0)
def pcb_size(self):
self["value"].vsSetLength(self.size)
def __len__(self):
return self.size + 0x4
class SDBValueBinary(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.size = v_uint32()
self.value = v_bytes(size=0)
def pcb_size(self):
self["value"].vsSetLength(self.size)
def __len__(self):
return self.size + (self.size % 2) + 0x4
class SDBValueNull(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
class SDBValueWord(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.value = v_uint16()
class SDBValueDword(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.value = v_uint32()
class SDBValueQword(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.value = v_uint64()
class SDBValueStringRef(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.reference = v_uint32()
def getItemClass(header):
m = (header.valuetype & 0xF0) << 8
if m == SDB_TAG_TYPES.TAG_TYPE_LIST:
return SDBValueList
elif m == SDB_TAG_TYPES.TAG_TYPE_STRINGREF:
return SDBValueStringRef
elif m == SDB_TAG_TYPES.TAG_TYPE_DWORD:
return SDBValueDword
elif m == SDB_TAG_TYPES.TAG_TYPE_STRING:
return SDBValueString
elif m == SDB_TAG_TYPES.TAG_TYPE_NULL:
return SDBValueNull
elif m == SDB_TAG_TYPES.TAG_TYPE_QWORD:
return SDBValueQword
elif m == SDB_TAG_TYPES.TAG_TYPE_BINARY:
return SDBValueBinary
elif m == SDB_TAG_TYPES.TAG_TYPE_WORD:
return SDBValueWord
else:
raise RuntimeError("Unexpected itemtype: 0x%x" % header.valuetype)
class SDBItem(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
# this is what we *should* have
# however, empirically, it seems there are occasionally
# junk bytes in the file. we can only detect this by
# peeking at two bytes (tag type, value type) and checking
# our list of known tag combos, while possibly not consuming
# those bytes.
# so, we stuff the parsing logic in vsParse
#
# self.header = SDBItemHeader()
# self.value = SDBValueNULL()
def vsParse(self, bytez, offset=0):
b1 = bytez[offset]
b2 = bytez[offset+1]
if b1 not in SDB_KNOWN_TAGS and (b2 & 0xF0) << 8 not in SDB_KNOWN_TAG_TYPES:
g_logger.warning("ignoring byte [offset=%s]: 0x%02x 0x%02x",
hex(offset), b1, b2)
self.vsAddField("unknown", v_uint8())
else:
self.vsAddField("header", SDBItemHeader())
self.vsAddField("value", SDBValueNull())
# copied from vstruct
for fname in self._vs_fields:
fobj = self._vs_values.get(fname)
offset = fobj.vsParse(bytez, offset=offset)
self._vsFireCallbacks(fname)
return offset
def pcb_header(self):
c = getItemClass(self.header)
self.vsSetField("value", c())
def __str__(self):
return "SDBItem(tag: 0x%x, type: %s)" % (self.header.tag,
self["value"].__class__.__name__)
class SDB(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.header = SDBHeader()
self.indexes_root = SDBItem()
self.database_root = SDBItem()
self.strtab_root = SDBItem()
| |
#List of functions :
# colorsGraphs(df, feature, genderConfidence = 1, nbToRemove = 1)
# text_normalizer(s)
# compute_bag_of_words(text)
# print_most_frequent(bow, vocab, gender, n=20)
# model_test(model,X_train,y_train,X_test,y_test, full_voc, displayResults = True, displayColors = False)
# predictors(df, feature, model, modelname, displayResults = True, displayColors = False)
# test_external_data(text, full_voc, model)
# combine_features(model_text, model_pic, model_color, data, voc_text, voc_pic, voc_color, acc_text, acc_pic, acc_color)
import pandas as pd
import numpy as np
from IPython.display import display
import re
#graph
from bokeh.plotting import output_notebook, figure, show
from bokeh.layouts import gridplot
from bokeh.models import ColumnDataSource
import matplotlib.pyplot as plt
plt.rcParams.update({'figure.max_open_warning': 0})
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scipy import ndimage
from matplotlib import pyplot as plt
# 3D visualization
import pylab
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import pyplot
from collections import Counter
from sklearn.preprocessing import LabelEncoder
from sklearn.feature_extraction.text import CountVectorizer
from IPython.display import display
from sklearn import linear_model, metrics
from sklearn import naive_bayes
from sklearn import neural_network
#Definition of function for data exploration for the colors
#feature : 'sidebar_color', 'link_color'
# The colorGraphs function plots the most used colors by gender in 3 bar graphs
def colorsGraphs(df, feature, genderConfidence = 1, nbToRemove = 1):
dfCol = df.loc[:,['gender:confidence', 'gender', feature]] #Remove weird values : E+17...
dfColFiltered = dfCol[(dfCol['gender:confidence'] >= genderConfidence)&((dfCol[feature]).str.contains('E\+') != True)]
dfColFilteredMale = dfColFiltered[dfColFiltered['gender'] == 'male']
dfColFilteredFemale = dfColFiltered[dfColFiltered['gender'] == 'female']
dfColFilteredBrand = dfColFiltered[dfColFiltered['gender'] == 'brand']
colorMale = dfColFilteredMale[feature]
colorFemale = dfColFilteredFemale[feature]
colorBrand = dfColFilteredBrand[feature]
listMale = list(colorMale.values.flatten())
listFemale = list(colorFemale.values.flatten())
listBrand = list(colorBrand.values.flatten())
nCommon = 30
commonFemale = Counter(listFemale).most_common(nCommon)
commonMale = Counter(listMale).most_common(nCommon)
commonBrand = Counter(listBrand).most_common(nCommon)
#print(commonBrand[0])
del commonFemale[0:nbToRemove]
del commonMale[0:nbToRemove]
del commonBrand[0:nbToRemove]
colorsFemale = [x[0] for x in commonFemale]
colorsMale = [x[0] for x in commonMale]
colorsBrand = [x[0] for x in commonBrand]
colorsNumbFemale = [x[1] for x in commonFemale]
colorsNumbMale = [x[1] for x in commonMale]
colorsNumbBrand = [x[1] for x in commonBrand]
colorsHexFemale = ['#' + x + '000000' for x in colorsFemale]
colorsHexFemale = [x[0:7] for x in colorsHexFemale]
colorsHexMale = ['#' + x + '000000' for x in colorsMale]
colorsHexMale = [x[0:7] for x in colorsHexMale]
colorsHexBrand = ['#' + x + '000000' for x in colorsBrand]
colorsHexBrand = [x[0:7] for x in colorsHexBrand]
rangeColFemale = list(range(len(colorsFemale)))
rangeColMale = list(range(len(colorsMale)))
rangeColBrand = list(range(len(colorsBrand)))
fig1, ax1 = plt.subplots()
bar_width = 0.5
rects1 = plt.barh(rangeColFemale, colorsNumbFemale, bar_width, label = 'Female', color = colorsHexFemale)
plt.yticks(rangeColFemale, colorsHexFemale)
plt.xlabel('Color')
plt.ylabel(feature)
plt.title('Most used colors by Females for ' + feature + '\n' + str(nbToRemove) + ' most common occurences removed')
plt.tight_layout()
plt.show()
fig2, ax2 = plt.subplots()
bar_width = 0.5
rects1 = plt.barh(rangeColMale, colorsNumbMale, bar_width, label = 'Male', color = colorsHexMale)
plt.yticks(rangeColMale, colorsHexMale)
plt.xlabel('Color')
plt.ylabel(feature)
plt.title('Most used colors by Males for ' + feature + '\n' + str(nbToRemove) + ' most common occurences removed')
plt.tight_layout()
plt.show()
fig3, ax3 = plt.subplots()
bar_width = 0.5
rects1 = plt.barh(rangeColBrand, colorsNumbBrand, bar_width, label = 'Brand', color = colorsHexBrand)
plt.yticks(rangeColBrand, colorsHexBrand)
plt.xlabel('Color')
plt.ylabel(feature)
plt.title('Most used colors by Brands for ' + feature + '\n' + str(nbToRemove) + ' most common occurences removed')
plt.tight_layout()
plt.show()
def text_normalizer(s):
#we will normalize the text by using strings, lowercases and removing all the punctuations
s = str(s)
s = s.lower()
s = re.sub('\W\s',' ',s)
s = re.sub('\s\W',' ',s)
#s = re.sub('\s[^[@\w]]',' ',s) #to keep the @ symbols used for "addressing"
#s = re.sub('@',' search_arobass_sign ',s) #The CountVectorizer cant handle the @
s = re.sub('\s+',' ',s) #replace double spaces with single spaces
return s
# The compute_bag_of_words function returns a table with the # of occurence of a word in the text
# and a vocabulary of all the different words
def compute_bag_of_words(text):
vectorizer = CountVectorizer()
vectors = vectorizer.fit_transform(text)
vocabulary = vectorizer.get_feature_names()
return vectors, vocabulary
#Exploration of which words are most used by which gender
def print_most_frequent(bow, vocab, gender, n=20, feature = 'text'):
switcher = {
'all_text' : "text",
'pic_text' : "profile picture features",
}
featureText = switcher.get(feature, 'text')
color_idx = ['brand', 'female', 'male']
color_table = ['#4a913c', '#f5abb5', '#0084b4']
label_table = ['Most used words by brands for ' + featureText, 'Most used words by females for ' + featureText, 'Most used words by males for ' + featureText]
idx = np.argsort(bow.sum(axis=0))
idx_most_used = np.zeros(n)
occurence_number = np.zeros(n)
words_most_used = ["" for x in range(n)]
for i in range(0,n):
idx_most_used[i] = idx[0, -1-i]
words_most_used[i] = vocab[np.int64(idx_most_used[i])]
occurence_number[i] = (bow.sum(axis=0))[0,idx_most_used[i]]
#print(vocab[j])
fig, ax = plt.subplots()
bar_width = 0.5
word_number = np.arange(n)+1
rects1 = plt.barh(word_number,occurence_number, bar_width, label = label_table[color_idx.index(gender)], color = color_table[color_idx.index(gender)])
plt.yticks(word_number,words_most_used)
plt.ylabel('Most used words')
plt.xlabel('Number of occurences')
plt.title(label_table[color_idx.index(gender)])
plt.tight_layout()
plt.show()
# Definition of functions for data analysis and classification
# The model_test function is used to extract the best word predictors and
# anti-predictors for each gender. The model used must have a coef_ attribute
# representing the weight of each word
def model_test(model,X_train,y_train,X_test,y_test, full_voc, displayResults = True, displayColors = False, featureIntent = 'text'):
switcher = {
'all_text' : "text",
'pic_text' : "profile picture features",
'link_color' : "theme color",
}
featureText = switcher.get(featureIntent, '')
model.fit(X_train,y_train)
y_pred = model.predict(X_test)
# compute MSE
mse = metrics.mean_squared_error(y_test,y_pred)
print('mse: {:.4f}'.format(mse))
# Prints the accuracy of the gender prediction
acc = model.score(X_test,y_test)
print('score: ', acc)
if(displayResults&hasattr(model,'coef_')):
# W contain the weight for each predictor, for each gender
W = model.coef_
# Male Predictors
print('Best 20 male predictors:')
idx_male = np.argsort((W[2,:]))
weight_male_pred = np.zeros(20)
male_pred_label = ["" for x in range(20)]
for i in range(20):
j = idx_male[-1-i]
weight_male_pred[i] = W[2,j]
male_pred_label[i] = full_voc[j]
fig1, ax1 = plt.subplots()
bar_width = 0.5
pred_number = np.arange(20)+1
if(displayColors):
colorsHexMale = ['#' + x + '000000' for x in male_pred_label]
colorsHexMale = [x[0:7] for x in colorsHexMale]
rects1 = plt.barh(pred_number,weight_male_pred, bar_width, label = 'Male Predictors', color = colorsHexMale)
plt.yticks(pred_number,colorsHexMale)
else:
rects1 = plt.barh(pred_number,weight_male_pred, bar_width, label = 'Male Predictors', color = '#0084b4')
plt.yticks(pred_number,male_pred_label)
plt.xlabel('Predictor')
plt.ylabel('Weight')
plt.title('Best 20 male predictors for ' + featureText)
plt.tight_layout()
plt.show()
# Male Anti-Predictors
print('Best 20 male anti-predictors for ' + featureText + ':')
idx_male = np.argsort(-(W[2,:]))
weight_male_antipred = np.zeros(20)
male_antipred_label = ["" for x in range(20)]
for i in range(20):
j = idx_male[-1-i]
weight_male_antipred[i] = W[2,j]
male_antipred_label[i] = full_voc[j]
fig2, ax2 = plt.subplots()
bar_width = 0.5
pred_number = np.arange(20)+1
if(displayColors):
colorsHexMaleAnti = ['#' + x + '000000' for x in male_antipred_label]
colorsHexMaleAnti = [x[0:7] for x in colorsHexMaleAnti]
rects1 = plt.barh(pred_number,weight_male_antipred, bar_width, label = 'Male Anti-Predictors', color = colorsHexMaleAnti)
plt.yticks(pred_number,colorsHexMaleAnti)
else:
rects1 = plt.barh(pred_number,weight_male_antipred, bar_width, label = 'Male Anti-Predictors', color = '#0084b4')
plt.yticks(pred_number,male_antipred_label)
plt.xlabel('Anti-Predictor')
plt.ylabel('Weight')
plt.title('Best 20 male anti-predictors for ' + featureText)
plt.tight_layout()
plt.show()
# Female Predictors
print('Best 20 female predictors for ' + featureText + ':')
idx_female = np.argsort((W[1,:]))
weight_female_pred = np.zeros(20)
female_pred_label = ["" for x in range(20)]
for i in range(20):
j = idx_female[-1-i]
weight_female_pred[i] = W[1,j]
female_pred_label[i] = full_voc[j]
fig3, ax3 = plt.subplots()
bar_width = 0.5
pred_number = np.arange(20)+1
if(displayColors):
colorsHexFemale = ['#' + x + '000000' for x in female_pred_label]
colorsHexFemale = [x[0:7] for x in colorsHexFemale]
rects1 = plt.barh(pred_number,weight_female_pred, bar_width, label = 'Female Predictors', color = colorsHexFemale)
plt.yticks(pred_number,colorsHexFemale)
else:
rects1 = plt.barh(pred_number,weight_female_pred, bar_width, label = 'Female Predictors', color = '#f5abb5')
plt.yticks(pred_number,female_pred_label)
plt.xlabel('Predictor')
plt.ylabel('Weight')
plt.title('Best 20 Female predictors for ' + featureText)
plt.tight_layout()
plt.show()
# Female Anti-Predictors
print('Best 20 Female anti-predictors for ' + featureText + ':')
idx_female = np.argsort(-(W[1,:]))
weight_female_antipred = np.zeros(20)
female_antipred_label = ["" for x in range(20)]
for i in range(20):
j = idx_female[-1-i]
weight_female_antipred[i] = W[1,j]
female_antipred_label[i] = full_voc[j]
fig4, ax4 = plt.subplots()
bar_width = 0.5
pred_number = np.arange(20)+1
if(displayColors):
colorsHexFemaleAnti = ['#' + x + '000000' for x in female_antipred_label]
colorsHexFemaleAnti = [x[0:7] for x in colorsHexFemaleAnti]
rects1 = plt.barh(pred_number,weight_female_antipred, bar_width, label = 'Female Anti-Predictors', color = colorsHexFemaleAnti)
plt.yticks(pred_number,colorsHexFemaleAnti)
else:
rects1 = plt.barh(pred_number,weight_female_antipred, bar_width, label = 'Female Anti-Predictors', color = '#f5abb5')
plt.yticks(pred_number,female_antipred_label)
plt.xlabel('Anti-Predictor')
plt.ylabel('Weight')
plt.title('Best 20 Female anti-predictors for ' + featureText)
plt.tight_layout()
plt.show()
# Brand Predictors
print('Best 20 brand predictors for ' + featureText + ':')
idx_brand = np.argsort((W[0,:]))
weight_brand_pred = np.zeros(20)
brand_pred_label = ["" for x in range(20)]
for i in range(20):
j = idx_brand[-1-i]
weight_brand_pred[i] = W[0,j]
brand_pred_label[i] = full_voc[j]
fig5, ax5 = plt.subplots()
bar_width = 0.5
pred_number = np.arange(20)+1
if(displayColors):
colorsHexBrand = ['#' + x + '000000' for x in brand_pred_label]
colorsHexBrand = [x[0:7] for x in colorsHexBrand]
rects1 = plt.barh(pred_number,weight_brand_pred, bar_width, label = 'Brand Predictors', color = colorsHexBrand)
plt.yticks(pred_number,colorsHexBrand)
else:
rects1 = plt.barh(pred_number,weight_brand_pred, bar_width, label = 'Brand Predictors', color = '#4a913c')
plt.yticks(pred_number,brand_pred_label)
plt.xlabel('Predictor')
plt.ylabel('Weight')
plt.title('Best 20 Brand predictors for ' + featureText)
plt.tight_layout()
plt.show()
# Brand Anti-Predictors
print('Best 20 Brand anti-predictors for ' + featureText + ':')
idx_brand = np.argsort(-(W[0,:]))
weight_brand_antipred = np.zeros(20)
brand_antipred_label = ["" for x in range(20)]
for i in range(20):
j = idx_brand[-1-i]
weight_brand_antipred[i] = W[0,j]
brand_antipred_label[i] = full_voc[j]
fig6, ax6 = plt.subplots()
bar_width = 0.5
pred_number = np.arange(20)+1
if(displayColors):
colorsHexBrandAnti = ['#' + x + '000000' for x in brand_antipred_label]
colorsHexBrandAnti = [x[0:7] for x in colorsHexBrandAnti]
rects1 = plt.barh(pred_number,weight_brand_antipred, bar_width, label = 'Brand Anti-Predictors', color = colorsHexBrandAnti)
plt.yticks(pred_number,colorsHexBrandAnti)
else:
rects1 = plt.barh(pred_number,weight_brand_antipred, bar_width, label = 'Brand Anti-Predictors', color = '#4a913c')
plt.yticks(pred_number,brand_antipred_label)
plt.xlabel('Anti-Predictor')
plt.ylabel('Weight')
plt.title('Best 20 Brand anti-predictors for ' + featureText)
plt.tight_layout()
plt.show()
return model, acc
# feature is a string in order to use df[feature]
# The predictors function takes a dataframe, a specific feature (should be a string) and a model
# and performs the gender prediction. The set is split in 5 for cross-validation
def predictors(df, feature, model, modelname, displayResults = True, displayColors = False):
print('Testing', modelname, 'model for gender prediction using', feature)
full_bow, full_voc = compute_bag_of_words(df[feature])
X = full_bow
y = LabelEncoder().fit_transform(df['gender'])
# Create Training and testing sets.
n,d = X.shape
test_size = n // 5
print('Split: {} testing and {} training samples'.format(test_size, y.size - test_size))
perm = np.random.permutation(y.size)
X_test = X[perm[:test_size]]
X_train = X[perm[test_size:]]
y_test = y[perm[:test_size]]
y_train = y[perm[test_size:]]
print('model: ', modelname)
model, acc = model_test(model,X_train,y_train,X_test,y_test, full_voc, displayResults = displayResults, displayColors = displayColors, featureIntent = feature)
return model, full_voc, acc
def test_external_data(text, full_voc, model, feature, display = True):
gender_list = ['brand', 'female', 'male']
vect = CountVectorizer(vocabulary=full_voc)
new_bow = vect.fit_transform(text).toarray()
predicted_class = model.predict(new_bow)
if(hasattr(model, 'predict_proba')):
proba = model.predict_proba(new_bow)
if(display):
print('The predicted gender by using the', feature, 'is', gender_list[predicted_class[0]],'with probability',np.sort(proba[0])[2])
return proba, predicted_class
else:
if(display):
print('The predicted gender by using the', feature, 'is', gender_list[predicted_class[0]])
return [], predicted_class
def combine_features(model_text, model_pic, model_color, data, voc_text, voc_pic, voc_color, acc_text, acc_pic, acc_color, display = True):
gender_list =['brand', 'female', 'male']
success = 0
resultList = [[0,0,0],[0,0,0],[0,0,0]]
for i in range(0,len(data)):
proba_text, class_text = test_external_data(data['all_text'][i:i+1], voc_text, model_text, 'text', display = display)
proba_pic, class_pic = test_external_data(data['pic_text'][i:i+1], voc_pic, model_pic, 'profile picture', display = display)
proba_color, class_color = test_external_data(data['link_color'][i:i+1], voc_color, model_color, 'link color', display = display)
if(proba_text!=[] and proba_pic!=[] and proba_color!=[]):
weighted_proba = (proba_text*acc_text + proba_pic*acc_pic + proba_color*acc_color)/(acc_text + acc_pic + acc_color)
proba = weighted_proba[0]
pred_class = (np.argsort(proba))[2]
if(display):
print('Overall, the predicted gender of user',data.iloc[i]['user_name'], 'is' ,gender_list[pred_class], 'with a confidence of',proba[pred_class])
else:
result=np.zeros(3)
result[class_text] = result[class_text]+1
result[class_pic] = result[class_pic]+1
result[class_color] = result[class_color]+1
if(np.max(result)==1):
pred_class = class_pic
else:
pred_class = (np.argsort(result))[2]
if(display):
print('Overall, the predicted gender of user',data.iloc[i]['user_name'], 'is' ,gender_list[pred_class])
if(gender_list[pred_class]==data.iloc[i]['gender']):
success = success+1
originalGender = gender_list.index(data.iloc[i]['gender'])
resultList[originalGender][pred_class] = resultList[originalGender][pred_class] + 1
success_rate = success/len(data)
print('The average success rate for this test data is',success_rate)
return resultList
def display_resultList(resultList):
fig2, ax2 = plt.subplots()
ax2.set_ylim([-0.5, 3.5]);
bar_width = 0.5
rects1 = plt.barh(range(0,3),[x[0] for x in resultList], bar_width, color = '#4a913c', label = 'brand')
rects2 = plt.barh(range(0,3), [x[1] for x in resultList], bar_width, color = '#f5abb5', left = [x[0] for x in resultList], label = 'female')
rects3 = plt.barh(range(0,3), [x[2] for x in resultList], bar_width, color = '#0084b4', left = [x[0] + x[1] for x in resultList], label = 'male')
plt.yticks(range(0,3) ,['brand', 'female', 'male'])
plt.xlabel('Number of users')
plt.ylabel('Gender')
plt.title('Predicted gender vs. Real gender')
plt.legend(loc='upper right')
plt.tight_layout()
plt.show()
def combine_features_without_pic(model_text, model_color, data, voc_text, voc_color, acc_text, acc_color, display = True):
gender_list =['brand', 'female', 'male']
success = 0
resultList = [[0,0,0],[0,0,0],[0,0,0]]
for i in range(0,len(data)):
proba_text, class_text = test_external_data(data['all_text'][i:i+1], voc_text, model_text, 'text', display = display)
proba_color, class_color = test_external_data(data['link_color'][i:i+1], voc_color, model_color, 'link color', display = display)
if(proba_text!=[] and proba_color!=[]):
weighted_proba = (proba_text*acc_text +proba_color*acc_color)/(acc_text + acc_color)
proba = weighted_proba[0]
pred_class = (np.argsort(proba))[2]
if(display):
print('Overall, the predicted gender of user',data.iloc[i]['user_name'], 'is' ,gender_list[pred_class], 'with a confidence of',proba[pred_class])
else:
result=np.zeros(2)
result[class_text] = result[class_text]+1
result[class_color] = result[class_color]+1
if(np.max(result)==1):
pred_class = class_text
else:
pred_class = (np.argsort(result))[1]
if(display):
print('Overall, the predicted gender of user',data.iloc[i]['user_name'], 'is' ,gender_list[pred_class])
if(gender_list[pred_class]==data.iloc[i]['gender']):
success = success+1
originalGender = gender_list.index(data.iloc[i]['gender'])
resultList[originalGender][pred_class] = resultList[originalGender][pred_class] + 1
success_rate = success/len(data)
print('The average success rate for this test data is',success_rate)
return resultList
| |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'CultivosFinca.image_cultivos'
db.add_column(u'promotores_cultivosfinca', 'image_cultivos',
self.gf('django.db.models.fields.files.ImageField')(max_length=100, null=True, blank=True),
keep_default=False)
# Adding field 'AnimalesFinca.image_animales'
db.add_column(u'promotores_animalesfinca', 'image_animales',
self.gf('django.db.models.fields.files.ImageField')(max_length=100, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'CultivosFinca.image_cultivos'
db.delete_column(u'promotores_cultivosfinca', 'image_cultivos')
# Deleting field 'AnimalesFinca.image_animales'
db.delete_column(u'promotores_animalesfinca', 'image_animales')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'lugar.departamento': {
'Meta': {'object_name': 'Departamento'},
'extension': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2'}),
'id': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'pais': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['lugar.Pais']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'unique': 'True', 'null': 'True'})
},
u'lugar.municipio': {
'Meta': {'ordering': "['departamento__nombre']", 'object_name': 'Municipio'},
'departamento': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['lugar.Departamento']"}),
'extension': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'id': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'latitud': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '5', 'blank': 'True'}),
'longitud': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '5', 'blank': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'unique': 'True', 'null': 'True'})
},
u'lugar.pais': {
'Meta': {'object_name': 'Pais'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'promotores.animalesfinca': {
'Meta': {'object_name': 'AnimalesFinca'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image_animales': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'promotores.cultivosfinca': {
'Meta': {'object_name': 'CultivosFinca'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image_cultivos': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'promotores.diascampoprueba': {
'Meta': {'object_name': 'DiasCampoPrueba'},
'comentario': ('django.db.models.fields.TextField', [], {}),
'fechas': ('django.db.models.fields.DateField', [], {}),
'hombres': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mujeres': ('django.db.models.fields.IntegerField', [], {}),
'prueba': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['promotores.PracticasProductivas']"})
},
u'promotores.escalapruebas': {
'Meta': {'object_name': 'EscalaPruebas'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'promotores.escuelacampo': {
'Meta': {'object_name': 'EscuelaCampo'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'promotores.fotospromotor': {
'Meta': {'object_name': 'FotosPromotor'},
'foto': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'promotor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['promotores.Promotor']"})
},
u'promotores.fotosprueba': {
'Meta': {'object_name': 'FotosPrueba'},
'foto': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'practicas': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['promotores.PracticasProductivas']"})
},
u'promotores.mercadoacceso': {
'Meta': {'object_name': 'MercadoAcceso'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'promotores.organizacioncampesina': {
'Meta': {'object_name': 'OrganizacionCampesina'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'promotores.organizacioncivil': {
'Meta': {'object_name': 'OrganizacionCivil'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'promotores.practicasproductivas': {
'Meta': {'object_name': 'PracticasProductivas'},
'agroecologico': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'anio': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'aprobar': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'calidad_producto': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'costo': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'escala_prueba': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['promotores.EscalaPruebas']"}),
'fecha_finalizacion': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'fecha_inicio': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'fecha_prueba': ('django.db.models.fields.DateField', [], {}),
'historia': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre_prueba': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'plagas': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'problema': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'prod_rendimiento': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'promotor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['promotores.Promotor']"}),
'resultados': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'rubro_prueba': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['promotores.RubroPruebas']"}),
'salud_planta': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'tema_prueba': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['promotores.TemasPruebas']"}),
'usuario': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'vida_suelo': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
u'promotores.productoprocesado': {
'Meta': {'object_name': 'ProductoProcesado'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'promotores.promotor': {
'Meta': {'object_name': 'Promotor'},
'activo': ('django.db.models.fields.IntegerField', [], {}),
'animales_finca': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['promotores.AnimalesFinca']", 'symmetrical': 'False'}),
'anuales': ('django.db.models.fields.FloatField', [], {}),
'bosque': ('django.db.models.fields.FloatField', [], {}),
'contacto': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'cultivos_finca': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['promotores.CultivosFinca']", 'symmetrical': 'False'}),
'departamento': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['lugar.Departamento']"}),
'edad': ('django.db.models.fields.IntegerField', [], {}),
'educacion': ('django.db.models.fields.IntegerField', [], {}),
'escuela': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['promotores.EscuelaCampo']"}),
'finca': ('django.db.models.fields.FloatField', [], {}),
'forestal': ('django.db.models.fields.FloatField', [], {}),
'frutales': ('django.db.models.fields.FloatField', [], {}),
'gps': ('geoposition.fields.GeopositionField', [], {'max_length': '42', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identificador': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'lena': ('django.db.models.fields.FloatField', [], {}),
'mercado_accede': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['promotores.MercadoAcceso']", 'symmetrical': 'False'}),
'meses_lluvia': ('django.db.models.fields.IntegerField', [], {}),
'municipio': ('smart_selects.db_fields.ChainedForeignKey', [], {'to': u"orm['lugar.Municipio']"}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organizacion_campesina': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['promotores.OrganizacionCampesina']"}),
'organizacion_civil': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['promotores.OrganizacionCivil']"}),
'patio': ('django.db.models.fields.FloatField', [], {}),
'perennes': ('django.db.models.fields.FloatField', [], {}),
'potrero': ('django.db.models.fields.FloatField', [], {}),
'potrero_abierto': ('django.db.models.fields.FloatField', [], {}),
'producto_procesado': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['promotores.ProductoProcesado']", 'symmetrical': 'False'}),
'riego': ('django.db.models.fields.IntegerField', [], {}),
'sexo': ('django.db.models.fields.IntegerField', [], {}),
'tacotales': ('django.db.models.fields.FloatField', [], {}),
'tipo_clima': ('django.db.models.fields.IntegerField', [], {}),
'tipo_suelo': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['promotores.TipoSuelo']"}),
'usuario': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'zona': ('django.db.models.fields.IntegerField', [], {})
},
u'promotores.rubropruebas': {
'Meta': {'object_name': 'RubroPruebas'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'promotores.temaspruebas': {
'Meta': {'object_name': 'TemasPruebas'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'promotores.tiposuelo': {
'Meta': {'object_name': 'TipoSuelo'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'})
}
}
complete_apps = ['promotores']
| |
import scapy.all as sca
import scapy_ex
import time
import thread
import threading
import signal
import sys
import matplotlib
matplotlib.use('pdf')
from matplotlib import pyplot as plt
from matplotlib import rcParams
import struct
import channel_hop
from subprocess import Popen, PIPE
import os
import re
DN = open(os.devnull, 'w')
# Console colors
W = '\033[0m' # white (normal)
R = '\033[31m' # red
G = '\033[32m' # green
O = '\033[33m' # orange
B = '\033[34m' # blue
P = '\033[35m' # purple
C = '\033[36m' # cyan
GR = '\033[37m' # gray
T = '\033[93m' # tan
# needed to gracefully exit all threads
stopEvent = threading.Event()
def signal_handler(signal, frame):
global stopEvent
print("Ctrl+C captured, exiting program!")
stopEvent.set()
time.sleep(1.0)
sys.exit()
signal.signal(signal.SIGINT, signal_handler)
class ScapyRssi:
def __init__(self, interface):
# Radiotap field specification
self.radiotap_formats = {"TSFT":"Q", "Flags":"B", "Rate":"B",
"Channel":"HH", "FHSS":"BB", "dBm_AntSignal":"b", "dBm_AntNoise":"b",
"Lock_Quality":"H", "TX_Attenuation":"H", "dB_TX_Attenuation":"H",
"dBm_TX_Power":"b", "Antenna":"B", "dB_AntSignal":"B",
"dB_AntNoise":"B", "b14":"H", "b15":"B", "b16":"B", "b17":"B", "b18":"B",
"b19":"BBB", "b20":"LHBB", "b21":"HBBBBBH", "b22":"B", "b23":"B",
"b24":"B", "b25":"B", "b26":"B", "b27":"B", "b28":"B", "b29":"B",
"b30":"B", "Ext":"B"}
# data
self.data = {}
self.interface = interface
self.dataMutex = thread.allocate_lock()
self.time0 = time.time()
thread.start_new_thread(self.sniff, (stopEvent,))
def sniff(self, stopEvent):
while not stopEvent.is_set():
t0 = time.time()
packets = sca.sniff(iface=self.interface, count = 100)
dt = time.time() - t0
print "current rate " + "{0:.2f}".format(100/dt) + " packets/sec"
for pkt in packets:
addr, rssi = self.parsePacket(pkt)
if addr is not None:
with self.dataMutex:
if addr in self.data.keys():
self.data[addr].append(rssi)
else:
self.data[addr] = [rssi]
def parsePacket(self, pkt):
if pkt.haslayer(sca.Dot11):
if pkt.addr2 is not None:
# check available Radiotap fields
#field, val = pkt.getfield_and_val("present")
#names = [field.names[i][0] for i in range(len(field.names)) if (1 << i) & val != 0]
# check if we measured signal strength
#if "dBm_AntSignal" in names:
# decode radiotap header
#fmt = "<"
#rssipos = 0
#channel_pos = 0
#for name in names:
# some fields consist of more than one value
#if name == "dBm_AntSignal":
# correct for little endian format sign
#rssipos = len(fmt)-1
#if name == "Channel":
#channel_pos = len(fmt)-1
#fmt = fmt + self.radiotap_formats[name]
# unfortunately not all platforms work equally well and on my arm
# platform notdecoded was padded with a ton of zeros without
# indicating more fields in pkt.len and/or padding in pkt.pad
#decoded = struct.unpack(fmt, pkt.notdecoded[:struct.calcsize(fmt)])
#print len(fmt)
#print decoded, channel_pos, decoded[len(fmt)-1]#decoded[len(fmt)-channel_pos]
return pkt.addr2, pkt.dBm_AntSignal#decoded[rssipos]
return None, None
def plot(self, num):
plt.clf()
rcParams["font.family"] = "serif"
rcParams["xtick.labelsize"] = 8
rcParams["ytick.labelsize"] = 8
rcParams["axes.labelsize"] = 8
rcParams["axes.titlesize"] = 8
data = {}
time1 = time.time()
with self.dataMutex:
data = dict(self.data)
nodes = [x[0] for x in sorted([(addr, len(data[addr])) for addr in data.keys()], key=lambda x:x[1], reverse=True)]
nplots = min(len(nodes), num)
for i in range(nplots):
plt.subplot(nplots, 1, i+1)
plt.title(str(nodes[i]) + ": "
+ str(len(data[nodes[i]])) + " packets @ " +
"{0:.2f}".format(len(data[nodes[i]])/(time1-self.time0))
+ " packets/sec")
plt.hist(data[nodes[i]], range=(-100, -20), bins=80)
plt.gca().set_xlim((-100, -20))
plt.gcf().set_size_inches((6, 4*nplots))
plt.savefig("hists.pdf")
def get_mon_iface():
global monitor_on
monitors, interfaces = iwconfig()
if len(monitors) > 0:
monitor_on = True
return monitors[0]
else:
# Start monitor mode on a wireless interface
print '['+G+'*'+W+'] Finding the most powerful interface...'
#interface = get_iface(interfaces)
interface = 'wlan1'
monmode = start_mon_mode(interface)
return monmode
def iwconfig():
monitors = []
interfaces = {}
try:
proc = Popen(['iwconfig'], stdout=PIPE, stderr=DN)
except OSError:
sys.exit('['+R+'-'+W+'] Could not execute "iwconfig"')
for line in proc.communicate()[0].split('\n'):
if len(line) == 0: continue # Isn't an empty string
if line[0] != ' ': # Doesn't start with space
wired_search = re.search('eth[0-9]|em[0-9]|p[1-9]p[1-9]', line)
if not wired_search: # Isn't wired
iface = line[:line.find(' ')] # is the interface
if 'Mode:Monitor' in line:
monitors.append(iface)
elif 'IEEE 802.11' in line:
if "ESSID:\"" in line:
interfaces[iface] = 1
else:
interfaces[iface] = 0
return monitors, interfaces
def get_iface(interfaces):
scanned_aps = []
if len(interfaces) < 1:
sys.exit('['+R+'-'+W+'] No wireless interfaces found, bring one up and try again')
if len(interfaces) == 1:
for interface in interfaces:
return interface
# Find most powerful interface
for iface in interfaces:
count = 0
proc = Popen(['iwlist', iface, 'scan'], stdout=PIPE, stderr=DN)
for line in proc.communicate()[0].split('\n'):
if ' - Address:' in line: # first line in iwlist scan for a new AP
count += 1
scanned_aps.append((count, iface))
print '['+G+'+'+W+'] Networks discovered by '+G+iface+W+': '+T+str(count)+W
try:
interface = max(scanned_aps)[1]
return interface
except Exception as e:
for iface in interfaces:
interface = iface
print '['+R+'-'+W+'] Minor error:',e
print ' Starting monitor mode on '+G+interface+W
return interface
def start_mon_mode(interface):
print '['+G+'+'+W+'] Starting monitor mode off '+G+interface+W
try:
os.system('ifconfig %s down' % interface)
os.system('iwconfig %s mode monitor' % interface)
os.system('ifconfig %s up' % interface)
return interface
except Exception:
sys.exit('['+R+'-'+W+'] Could not start monitor mode')
def remove_mon_iface(mon_iface):
os.system('ifconfig %s down' % mon_iface)
os.system('iwconfig %s mode managed' % mon_iface)
os.system('ifconfig %s up' % mon_iface)
if __name__ == "__main__":
# Start channel hopping
args = get_mon_iface()
hop = threading.Thread(target=channel_hop.channel_hop, args=[args])
hop.daemon = True
hop.start()
sniffer = ScapyRssi(args)
time.sleep(30)
sniffer.plot(20)
print "plotted"
| |
# -*- coding: utf-8 -*-
"""
This module provides support for closures and inner functions.
@autojit
def outer():
a = 10 # this is a cellvar
@jit('void()')
def inner():
print a # this is a freevar
inner()
a = 12
return inner
The 'inner' function closes over the outer scope. Each function with
cellvars packs them into a heap-allocated structure, the closure scope.
The closure scope is passed into 'inner' when called from within outer.
The execution of 'def' creates a NumbaFunction, which has itself as the
m_self attribute. So when 'inner' is invoked from Python, the numba
wrapper function gets called with NumbaFunction object and the args
tuple. The closure scope is then set in NumbaFunction.func_closure.
The closure scope is an extension type with the cellvars as attributes.
Closure scopes are chained together, since multiple inner scopes may need
to share a single outer scope. E.g.
def outer(a):
def inner(b):
def inner_inner():
print a, b
return inner_inner
return inner(1), inner(2)
We have three live closure scopes here:
scope_outer = { 'a': a } # call to 'outer'
scope_inner_1 = { 'scope_outer': scope_outer, 'b': 1 } # call to 'inner' with b=1
scope_inner_2 = { 'scope_outer': scope_outer, 'b': 2 } # call to 'inner' with b=2
Function 'inner_inner' defines no new scope, since it contains no cellvars.
But it does contain a freevar from scope_outer and scope_inner, so it gets
scope_inner passed as first argument. scope_inner has a reference to scope
outer, so all variables can be resolved.
These scopes are instances of a numba extension class.
"""
from __future__ import print_function, division, absolute_import
import ast
import ctypes
import logging
import numba.decorators
from numba import *
from numba import error
from numba import visitors
from numba import nodes
from numba import typesystem
from numba import typedefs
from numba import numbawrapper
from numba.exttypes import extension_types
from numba import utils
from numba.type_inference import module_type_inference
from numba.minivect import minitypes
from numba.symtab import Variable
from numba.exttypes import methodtable
from numba.exttypes import attributetable
logger = logging.getLogger(__name__)
#logger.setLevel(logging.DEBUG)
#------------------------------------------------------------------------
# Utilities
#------------------------------------------------------------------------
def is_closure_signature(func_signature):
return (func_signature is not None and
func_signature.args and
func_signature.args[0].is_closure_scope)
#------------------------------------------------------------------------
# Closure Signature Validation (type inference of outer function)
#------------------------------------------------------------------------
# Handle closures during type inference. Mostly performs error checking
# for closure signatures.
def err_decorator(decorator):
raise error.NumbaError(
decorator, "Only @jit and @autojit and signature decorators "
"are supported")
def check_valid_argtype(argtype_node, argtype):
if not isinstance(argtype, minitypes.Type):
raise error.NumbaError(argtype_node, "Invalid type: %r" % (argtype,))
def assert_constant(visit_func, decorator, result_node):
result = visit_func(result_node)
if not result.variable.is_constant:
raise error.NumbaError(decorator, "Expected a constant")
return result.variable.constant_value
def parse_argtypes(visit_func, decorator, func_def, jit_args):
argtypes_node = jit_args['argtypes']
if argtypes_node is None:
raise error.NumbaError(func_def.args[0],
"Expected an argument type")
argtypes = assert_constant(visit_func, decorator, argtypes_node)
if not isinstance(argtypes, (list, tuple)):
raise error.NumbaError(argtypes_node,
'Invalid argument for argtypes')
for argtype in argtypes:
check_valid_argtype(argtypes_node, argtype)
return argtypes
def parse_restype(visit_func, decorator, jit_args):
restype_node = jit_args['restype']
if restype_node is not None:
restype = assert_constant(visit_func, decorator, restype_node)
if isinstance(restype, (str, unicode)):
signature = utils.process_signature(restype)
restype = signature.return_type
argtypes = signature.args
check_valid_argtype(restype_node, restype)
for argtype in argtypes:
check_valid_argtype(restype_node, argtype)
restype = restype(*argtypes)
else:
check_valid_argtype(restype_node, restype)
else:
raise error.NumbaError(restype_node, "Return type expected")
return restype
def handle_jit_decorator(visit_func, func_def, decorator):
jit_args = module_type_inference.parse_args(
decorator, ['restype', 'argtypes', 'backend',
'target', 'nopython'])
if decorator.args or decorator.keywords:
restype = parse_restype(visit_func, decorator, jit_args)
if restype is not None and restype.is_function:
signature = restype
else:
argtypes = parse_argtypes(visit_func, decorator, func_def, jit_args)
signature = minitypes.FunctionType(restype, argtypes,
name=func_def.name)
else: #elif func_def.args:
raise error.NumbaError(decorator,
"The argument types and return type "
"need to be specified")
#else:
# signature = minitypes.FunctionType(None, [])
# TODO: Analyse closure at call or outer function return time to
# TODO: infer return type
# TODO: parse out nopython argument
return signature
def check_signature_decorator(visit_func, decorator):
dec = visit_func(decorator)
type = dec.variable.type
if type.is_cast and type.dst_type.is_function:
return type.dst_type
else:
err_decorator(decorator)
def process_decorators(env, visit_func, node):
if not node.decorator_list:
func_env = env.translation.get_env(node)
if func_env:
return func_env.func_signature
raise error.NumbaError(
node, "Closure must be decorated with 'jit' or 'autojit'")
if len(node.decorator_list) > 1:
raise error.NumbaError(
node, "Only one decorator may be specified for "
"closure (@jit/@autojit)")
decorator, = node.decorator_list
if isinstance(decorator, ast.Name):
decorator_name = decorator.id
elif (not isinstance(decorator, ast.Call) or not
isinstance(decorator.func, ast.Name)):
err_decorator(decorator)
else:
decorator_name = decorator.func.id
if decorator_name not in ('jit', 'autojit'):
signature = check_signature_decorator(visit_func, decorator)
else:
if decorator_name == 'autojit':
raise error.NumbaError(
decorator, "Dynamic closures not yet supported, use @jit")
signature = handle_jit_decorator(visit_func, node, decorator)
del node.decorator_list[:]
if len(signature.args) != len(node.args.args):
raise error.NumbaError(
decorator,
"Expected %d arguments type(s), got %d" % (
len(signature.args), len(node.args.args)))
return signature
#------------------------------------------------------------------------
# Closure Type Inference
#------------------------------------------------------------------------
def outer_scope_field(scope_type):
return scope_type.attribute_table.to_struct().fields[0]
def lookup_scope_attribute(cur_scope, var_name, ctx=None):
"""
Look up a variable in the closure scope
"""
ctx = ctx or ast.Load()
scope_type = cur_scope.type
outer_scope_name, outer_scope_type = outer_scope_field(scope_type)
if var_name in scope_type.attributedict:
return nodes.ExtTypeAttribute.from_known_attribute(
value=cur_scope, attr=var_name, ctx=ctx, ext_type=scope_type)
elif outer_scope_type.is_closure_scope:
scope = nodes.ExtTypeAttribute.from_known_attribute(
value=cur_scope, attr=outer_scope_name, ctx=ctx, ext_type=scope_type)
try:
return lookup_scope_attribute(scope, var_name, ctx)
except error.InternalError as e:
# Re-raise with full scope type
pass
# This indicates a bug
raise error.InternalError(
scope_type, "Unable to look up attribute", var_name)
CLOSURE_SCOPE_ARG_NAME = '__numba_closure_scope'
class ClosureTransformer(visitors.NumbaTransformer):
@property
def outer_scope(self):
outer_scope = None
if CLOSURE_SCOPE_ARG_NAME in self.symtab:
outer_scope = ast.Name(id=CLOSURE_SCOPE_ARG_NAME, ctx=ast.Load())
outer_scope.variable = self.symtab[CLOSURE_SCOPE_ARG_NAME]
outer_scope.type = outer_scope.variable.type
return outer_scope
class ClosureTypeInferer(ClosureTransformer):
"""
Runs just after type inference after the outer variables types are
resolved.
1) run type inferencer on inner functions
2) build scope extension types pre-order
3) generate nodes to instantiate scope extension type at call time
"""
def __init__(self, *args, **kwargs):
super(ClosureTypeInferer, self).__init__(*args, **kwargs)
self.warn = kwargs["warn"]
def visit_FunctionDef(self, node):
if node.closure_scope is None:
# Process inner functions and determine cellvars and freevars
# codes = [c for c in self.constants
# if isinstance(c, types.CodeType)]
process_closures(self.env, node, self.symtab,
func_globals=self.func_globals,
closures=self.closures,
warn=self.warn)
# cellvars are the variables we own
cellvars = dict((name, var) for name, var in self.symtab.iteritems()
if var.is_cellvar)
node.cellvars = cellvars
logger.debug("Cellvars in function %s: %s", node.name, cellvars)
outer_scope = self.outer_scope
if outer_scope:
outer_scope_type = outer_scope.type
else:
outer_scope_type = None
if not cellvars:
# No cellvars, so use parent closure scope if this is a closure
if outer_scope:
self.update_closures(node, outer_scope_type, None)
return self.visit_func_children(node)
# Create closure scope extension type
cellvar_fields = [(name, var.type)
for name, var in cellvars.iteritems()]
fields = numba.struct(cellvar_fields).fields
if outer_scope:
fields.insert(0, ('__numba_base_scope', outer_scope_type))
class py_class(object):
pass
func_name = self.func_name
py_class.__name__ = '%s_scope' % func_name
scope_type = typesystem.ClosureScopeType(py_class, outer_scope_type)
scope_type.unmangled_symtab = dict(fields)
AttrTable = attributetable.AttributeTable
scope_type.attribute_table = AttrTable.from_list(py_class=None,
attributes=fields)
ext_type = extension_types.create_new_extension_type(
type, func_name , (object,), {}, scope_type, None)
# Instantiate closure scope
logger.debug("Generate closure %s %s %s", node.name, scope_type,
outer_scope)
cellvar_scope = nodes.InstantiateClosureScope(
node, ext_type, scope_type, outer_scope)
node.body.insert(0, cellvar_scope)
self.update_closures(node, scope_type, ext_type)
return self.visit_func_children(node)
def update_closures(self, func_def, scope_type, ext_type):
"""
Patch closures to get the closure scope as the first argument.
"""
for closure in func_def.closures:
# closure.scope_type = scope_type
closure.func_def.scope_type = scope_type
closure.ext_type = ext_type
# patch function parameters
param = ast.Name(id=CLOSURE_SCOPE_ARG_NAME, ctx=ast.Param())
param.variable = Variable(scope_type, is_local=True)
param.type = param.variable.type
closure.symtab[CLOSURE_SCOPE_ARG_NAME] = param.variable
closure.func_def.args.args.insert(0, param)
closure.need_closure_scope = True
# patch closure signature
closure.type.add_scope_arg(scope_type)
closure.func_env.func_signature = closure.type.signature
def get_locals(symtab):
return dict((name, var) for name, var in symtab.iteritems()
if var.is_local)
def process_closures(env, outer_func_def, outer_symtab, **kwds):
"""
Process closures recursively and for each variable in each function
determine whether it is a freevar, a cellvar, a local or otherwise.
"""
import numba.pipeline
outer_symtab = get_locals(outer_symtab)
# closure_scope is set on the FunctionDef by TypeInferer
if outer_func_def.closure_scope is not None:
closure_scope = dict(outer_func_def.closure_scope, **outer_symtab)
else:
closure_scope = outer_symtab
for closure in outer_func_def.closures:
logger.debug("process closures: %s %s", outer_func_def.name,
closure.func_def.name)
closure_py_func = None # closure.py_func
func_env, _ = numba.pipeline.run_pipeline2(
env,
closure_py_func,
closure.func_def,
closure.type.signature,
closure_scope=closure_scope,
function_globals=env.translation.crnt.function_globals,
pipeline_name='type_infer',
is_closure=True,
**kwds)
closure.func_env = func_env
closure.symtab = func_env.symtab
env.translation.push_env(func_env)
process_closures(env, closure.func_def, func_env.symtab, **kwds)
env.translation.pop()
#------------------------------------------------------------------------
# Closure Lowering
#------------------------------------------------------------------------
class ClosureSpecializer(ClosureTransformer):
"""
Lowering of closure instantiation and calling.
- Instantiates the closure scope and makes the necessary assignments
- Rewrites local variable accesses to accesses on the instantiated scope
- Instantiate function with closure scope
Also rewrite calls to closures.
"""
def __init__(self, *args, **kwargs):
super(ClosureSpecializer, self).__init__(*args, **kwargs)
if not self.ast.cellvars:
self.ast.cur_scope = self.outer_scope
def _load_name(self, var_name, is_cellvar=False):
src = ast.Name(var_name, ast.Load())
src.variable = Variable.from_variable(self.symtab[var_name])
src.variable.is_cellvar = is_cellvar
src.type = src.variable.type
return src
def visit_InstantiateClosureScope(self, node):
"""
Instantiate a closure scope.
After instantiation, assign the parent scope and all function
arguments that belong in the scope to the scope.
"""
ctor = nodes.objconst(node.ext_type.__new__)
ext_type_arg = nodes.objconst(node.ext_type)
create_scope = nodes.ObjectCallNode(
signature=node.scope_type(object_), func=ctor,
args=[ext_type_arg])
create_scope = create_scope.cloneable
scope = create_scope.clone
stats = [create_scope]
# Chain outer scope - if present - to current scope
outer_scope = self.outer_scope
if outer_scope:
outer_scope_name, outer_scope_type = outer_scope_field(scope.type)
dst = lookup_scope_attribute(scope, outer_scope_name,
ctx=ast.Store())
assmt = ast.Assign(targets=[dst], value=outer_scope)
stats.append(assmt)
# Assign function arguments that are cellvars
for arg in self.ast.args.args:
name = arg.id
if name in node.scope_type.unmangled_symtab:
dst = lookup_scope_attribute(scope, name, ast.Store())
src = self._load_name(name)
src.variable.assign_in_closure_scope = True
assmt = ast.Assign(targets=[dst], value=src)
stats.append(assmt)
logger.debug("instantiating %s", scope.type)
self.ast.cur_scope = scope
return self.visit(nodes.ExpressionNode(stmts=stats, expr=scope))
def get_qname(self, closure_node):
ns = '.'.join([self.module_name, self.func_name])
closure_name = closure_node.name
qname = "%s.__closure__.%s" % (ns, closure_name)
return qname
def visit_ClosureNode(self, node):
"""
Compile the inner function.
"""
# Compile closure, skip CFA and type inference
node.func_env.qualified_name = self.get_qname(node)
numba.pipeline.run_env(self.env, node.func_env,
pipeline_name='compile')
translator = node.func_env.translator
# translator.link()
node.lfunc = translator.lfunc
node.lfunc_pointer = translator.lfunc_pointer
if node.need_numba_func:
return self.create_numba_function(node, node.func_env)
else:
func_name = node.func_def.name
self.symtab[func_name] = Variable(name=func_name, type=node.type,
is_local=True)
# return nodes.LLVMValueRefNode(node.type, node.lfunc)
# TODO: Remove assignment altogether!
# return nodes.NoneNode()
return nodes.ObjectInjectNode(None, type=object_)
def create_numba_function(self, node, func_env):
from numba.codegen import llvmwrapper
closure_scope = self.ast.cur_scope
if closure_scope is None:
closure_scope = nodes.NULL
scope_type = void.pointer()
else:
assert node.func_def.args.args[0].variable.type
scope_type = closure_scope.type
self.env.translation.push_env(func_env)
try:
node.wrapper_func, node.wrapper_lfunc, methoddef = (
llvmwrapper.build_wrapper_function(self.env))
finally:
self.env.translation.pop()
# Keep methoddef alive
# assert methoddef in node.py_func.live_objects
modname = self.module_name
self.keep_alive(modname)
# Create function signature with closure scope at runtime
create_numbafunc_signature = node.type(
void.pointer(), # PyMethodDef *ml
object_, # PyObject *module
void.pointer(), # PyObject *code
scope_type, # PyObject *closure
void.pointer(), # void *native_func
object_, # PyObject *native_signature
object_, # PyObject *keep_alive
)
# Create function with closure scope at runtime
create_numbafunc = nodes.ptrfromint(
numbawrapper.NumbaFunction_NewEx_pointer,
create_numbafunc_signature.pointer())
methoddef_p = ctypes.cast(ctypes.byref(methoddef),
ctypes.c_void_p).value
args = [
nodes.const(methoddef_p, void.pointer()),
nodes.const(modname, object_),
nodes.NULL,
closure_scope,
nodes.const(node.lfunc_pointer, void.pointer()),
nodes.const(node.type.signature, object_),
nodes.NULL, # nodes.const(node.py_func, object_),
]
func_call = nodes.NativeFunctionCallNode(
signature=create_numbafunc_signature,
function_node=create_numbafunc,
args=args)
result = func_call
#stats = [nodes.inject_print(nodes.const("calling...", c_string_type)),
# result]
#result = ast.Suite(body=stats)
result = self.visit(result)
return result
def visit_Name(self, node):
"Resolve cellvars and freevars"
is_param = isinstance(node.ctx, ast.Param)
if not is_param and (node.variable.is_cellvar or
node.variable.is_freevar):
logger.debug("Function %s, lookup %s in scope %s: %s",
self.ast.name, node.id, self.ast.cur_scope.type,
self.ast.cur_scope.type.attribute_table)
attr = lookup_scope_attribute(self.ast.cur_scope,
var_name=node.id, ctx=node.ctx)
return self.visit(attr)
else:
return node
def retrieve_closure_from_numbafunc(self, node):
"""
Retrieve the closure scope from ((NumbaFunctionObject *)
numba_func).func_closure
"""
# TODO: use llvmwrapper.get_closure_scope()
pointer = nodes.ptrfromobj(node.func)
type = typedefs.NumbaFunctionObject.ref()
closure_obj_struct = nodes.CoercionNode(pointer, type)
cur_scope = nodes.StructAttribute(closure_obj_struct, 'func_closure',
ctx=ast.Load(), type=type)
return cur_scope
def visit_ClosureCallNode(self, node):
if node.closure_type.closure.need_closure_scope:
if (self.ast.cur_scope is None or
self.ast.cur_scope.type != node.closure_type):
# Call to closure from outside outer function
# TODO: optimize calling a closure from an inner function, e.g.
# def outer():
# a = 1
# def inner1(): print a
# def inner2(): inner1()
cur_scope = self.retrieve_closure_from_numbafunc(node)
else:
# Call to closure from within outer function
cur_scope = self.ast.cur_scope
# node.args[0] = cur_scope
node.args.insert(0, cur_scope)
self.generic_visit(node)
return node
def visit_ClosureScopeLoadNode(self, node):
return self.ast.cur_scope or nodes.NULL_obj
| |
import sys
import codecs
from .exceptions import InvalidCommand
from .plugin import PluginDict
from .task import Task
from .control import TaskControl
from .runner import Runner, MRunner, MThreadRunner
from .cmd_base import DoitCmdBase
from . import reporter
# verbosity
opt_verbosity = {
'name':'verbosity',
'short':'v',
'long':'verbosity',
'type':int,
'default': None,
'help': """0 capture (do not print) stdout/stderr from task.
1 capture stdout only.
2 do not capture anything (print everything immediately).
[default: 1]"""
}
# select output file
opt_outfile = {
'name': 'outfile',
'short':'o',
'long': 'output-file',
'type': str,
'default': sys.stdout,
'help':"write output into file [default: stdout]"
}
# always execute task
opt_always = {
'name': 'always',
'short': 'a',
'long': 'always-execute',
'type': bool,
'default': False,
'help': "always execute tasks even if up-to-date [default: %(default)s]",
}
# continue executing tasks even after a failure
opt_continue = {
'name': 'continue',
'short': 'c',
'long': 'continue',
'inverse': 'no-continue',
'type': bool,
'default': False,
'help': ("continue executing tasks even after a failure " +
"[default: %(default)s]"),
}
opt_single = {
'name': 'single',
'short': 's',
'long': 'single',
'type': bool,
'default': False,
'help': ("Execute only specified tasks ignoring their task_dep " +
"[default: %(default)s]"),
}
opt_num_process = {
'name': 'num_process',
'short': 'n',
'long': 'process',
'type': int,
'default': 0,
'help': "number of subprocesses [default: %(default)s]"
}
# reporter
opt_reporter = {
'name':'reporter',
'short':'r',
'long':'reporter',
'type':str,
'default': 'console',
'help': """Choose output reporter.\n[default: %(default)s]"""
}
opt_parallel_type = {
'name':'par_type',
'short':'P',
'long':'parallel-type',
'type':str,
'default': 'process',
'help': """Tasks can be executed in parallel in different ways:
'process': uses python multiprocessing module
'thread': uses threads
[default: %(default)s]
"""
}
# pdb post-mortem
opt_pdb = {
'name':'pdb',
'short':'',
'long':'pdb',
'type': bool,
'default': None,
'help':
"""get into PDB (python debugger) post-mortem in case of unhandled exception"""
}
# use ".*" as default regex for delayed tasks without explicitly specified regex
opt_auto_delayed_regex = {
'name': 'auto_delayed_regex',
'short': '',
'long': 'auto-delayed-regex',
'type': bool,
'default': False,
'help':
"""Uses the default regex ".*" for every delayed task loader for which no regex was explicitly defined"""
}
class Run(DoitCmdBase):
doc_purpose = "run tasks"
doc_usage = "[TASK/TARGET...]"
doc_description = None
execute_tasks = True
cmd_options = (opt_always, opt_continue, opt_verbosity,
opt_reporter, opt_outfile, opt_num_process,
opt_parallel_type, opt_pdb, opt_single,
opt_auto_delayed_regex)
def __init__(self, **kwargs):
super(Run, self).__init__(**kwargs)
self.reporters = self.get_reporters() # dict
def get_reporters(self):
"""return dict of all available reporters
Also set CmdOption choices.
"""
# built-in reporters
reporters = {
'console': reporter.ConsoleReporter,
'executed-only': reporter.ExecutedOnlyReporter,
'json': reporter.JsonReporter,
'zero': reporter.ZeroReporter,
}
# plugins
plugins = PluginDict()
plugins.add_plugins(self.config, 'REPORTER')
reporters.update(plugins.to_dict())
# set choices for reporter cmdoption
# sub-classes might not have this option
if 'reporter' in self.cmdparser:
choices = {k: v.desc for k,v in reporters.items()}
self.cmdparser['reporter'].choices = choices
return reporters
def _execute(self, outfile,
verbosity=None, always=False, continue_=False,
reporter='console', num_process=0, par_type='process',
single=False, auto_delayed_regex=False):
"""
@param reporter:
(str) one of provided reporters or ...
(class) user defined reporter class (can only be specified
from DOIT_CONFIG - never from command line)
(reporter instance) - only used in unittests
"""
# get tasks to be executed
# self.control is saved on instance to be used by 'auto' command
self.control = TaskControl(self.task_list,
auto_delayed_regex=auto_delayed_regex)
self.control.process(self.sel_tasks)
if single:
for task_name in self.sel_tasks:
task = self.control.tasks[task_name]
if task.has_subtask:
for task_name in task.task_dep:
sub_task = self.control.tasks[task_name]
sub_task.task_dep = []
else:
task.task_dep = []
# reporter
if isinstance(reporter, str):
reporter_cls = self.reporters[reporter]
else:
# user defined class
reporter_cls = reporter
# verbosity
if verbosity is None:
use_verbosity = Task.DEFAULT_VERBOSITY
else:
use_verbosity = verbosity
show_out = use_verbosity < 2 # show on error report
# outstream
if isinstance(outfile, str):
outstream = codecs.open(outfile, 'w', encoding='utf-8')
else: # outfile is a file-like object (like StringIO or sys.stdout)
outstream = outfile
self.outstream = outstream
# run
try:
# FIXME stderr will be shown twice in case of task error/failure
if isinstance(reporter_cls, type):
reporter_obj = reporter_cls(outstream, {'show_out': show_out,
'show_err': True})
else: # also accepts reporter instances
reporter_obj = reporter_cls
run_args = [self.dep_manager, reporter_obj,
continue_, always, verbosity]
if num_process == 0:
RunnerClass = Runner
else:
if par_type == 'process':
RunnerClass = MRunner
if not MRunner.available():
RunnerClass = MThreadRunner
sys.stderr.write(
"WARNING: multiprocessing module not available, " +
"running in parallel using threads.")
elif par_type == 'thread':
RunnerClass = MThreadRunner
else:
msg = "Invalid parallel type %s"
raise InvalidCommand(msg % par_type)
run_args.append(num_process)
runner = RunnerClass(*run_args)
return runner.run_all(self.control.task_dispatcher())
finally:
if isinstance(outfile, str):
outstream.close()
| |
# Web UI for configuration of the CMSIS-DSP Build
#
# How to install
# pip install streamlit
#
# How to use
# streamlit run cmsisdspconfig.py
#
import streamlit as st
import textwrap
import re
st.set_page_config(page_title="CMSIS-DSP Configuration",layout="wide" )
# Options requiring a special management
NOTSTANDARD=["allTables","allInterpolations","allFFTs","Float16"]
HELIUM=False
config={}
# Used in UI
config["allTables"] = True
config["allFFTs"] = True
config["allInterpolations"] = True
config["MVEI"]=False
config["MVEF"]=False
config["NEON"]=False
config["HELIUM"]=False
config["HELIUMEXPERIMENTAL"]=False
config["Float16"]=True
config["HOST"]=False
config["COS_F32"]=False
config["COS_Q31"]=False
config["COS_Q15"]=False
config["SIN_F32"]=False
config["SIN_Q31"]=False
config["SIN_Q15"]=False
config["SIN_COS_F32"]=False
config["SIN_COS_Q31"]=False
config["SQRT_Q31"]=False
config["LMS_NORM_Q31"]=False
config["LMS_NORM_Q15"]=False
config["CMPLX_MAG_Q31"]=False
config["CMPLX_MAG_Q15"]=False
config["CMPLX_MAG_FAST_Q15"]=False
config["CFFT_RADIX2_Q15"]=False
config["CFFT_RADIX4_Q15"]=False
config["CFFT_RADIX2_Q31"]=False
config["CFFT_RADIX4_Q31"]=False
config["BASICMATH"]=True
config["COMPLEXMATH"]=True
config["CONTROLLER"]=True
config["FASTMATH"]=True
config["FILTERING"]=True
config["MATRIX"]=True
config["STATISTICS"]=True
config["SUPPORT"]=True
config["TRANSFORM"]=True
config["SVM"]=True
config["BAYES"]=True
config["DISTANCE"]=True
config["INTERPOLATION"]=True
config["QUATERNIONMATH"]=True
config["LOOPUNROLL"]=True
config["ROUNDING"]=False
config["MATRIXCHECK"]=False
config["AUTOVECTORIZE"] = False
# Used as options in command line
# in case the UI option is worded differently
realname={}
realname["COS_F32"]="ARM_COS_F32"
realname["COS_Q31"]="ARM_COS_Q31"
realname["COS_Q15"]="ARM_COS_Q15"
realname["SIN_F32"]="ARM_SIN_F32"
realname["SIN_Q31"]="ARM_SIN_Q31"
realname["SIN_Q15"]="ARM_SIN_Q15"
realname["SIN_COS_F32"]="ARM_SIN_COS_F32"
realname["SIN_COS_Q31"]="ARM_SIN_COS_Q31"
realname["SQRT_Q31"]="ARM_SQRT_Q31"
realname["LMS_NORM_Q31"]="ARM_LMS_NORM_Q31"
realname["LMS_NORM_Q15"]="ARM_LMS_NORM_Q15"
realname["CMPLX_MAG_Q31"]="ARM_CMPLX_MAG_Q31"
realname["CMPLX_MAG_Q15"]="ARM_CMPLX_MAG_Q15"
realname["CMPLX_MAG_FAST_Q15"]="ARM_CMPLX_MAG_FAST_Q15"
realname["CFFT_RADIX2_Q15"]="ARM_CFFT_RADIX2_Q15"
realname["CFFT_RADIX4_Q15"]="ARM_CFFT_RADIX4_Q15"
realname["CFFT_RADIX2_Q31"]="ARM_CFFT_RADIX2_Q31"
realname["CFFT_RADIX4_Q31"]="ARM_CFFT_RADIX4_Q31"
defaulton={}
defaulton["LOOPUNROLL"]=True
defaulton["BASICMATH"]=True
defaulton["COMPLEXMATH"]=True
defaulton["CONTROLLER"]=True
defaulton["FASTMATH"]=True
defaulton["FILTERING"]=True
defaulton["MATRIX"]=True
defaulton["STATISTICS"]=True
defaulton["SUPPORT"]=True
defaulton["TRANSFORM"]=True
defaulton["SVM"]=True
defaulton["BAYES"]=True
defaulton["DISTANCE"]=True
defaulton["INTERPOLATION"]=True
defaulton["QUATERNIONMATH"]=True
CFFTSIZE=[16,32,64,128,256,512,1024,2048,4096]
CFFTDATATYPE=['F64','F32','F16','Q31','Q15']
RFFTFASTSIZE=[32,64,128,256,512,1024,2048,4096]
RFFTFASTDATATYPE=['F64','F32','F16']
RFFTSIZE=[32,64,128,256,512,1024,2048,4096,8192]
RFFTDATATYPE=['F32','Q31','Q15']
DCTSIZE=[128,512,2048,8192]
DCTDATATYPE=['F32','Q31','Q15']
def joinit(iterable, delimiter):
# Intersperse a delimiter between element of a list
it = iter(iterable)
yield next(it)
for x in it:
yield delimiter
yield x
def options(l):
return("".join(joinit(l," ")))
def computeCmakeOptions(config):
global defaulton
cmake={}
if not config["allTables"]:
cmake["CONFIGTABLE"]=True
if config["allInterpolations"]:
cmake["ALLFAST"]=True
if config["allFFTs"]:
cmake["ALLFFT"]=True
if config["Float16"]:
cmake["FLOAT16"]=True
else:
cmake["DISABLEFLOAT16"]=True
for c in config:
if not (c in NOTSTANDARD):
if c in defaulton:
if not config[c]:
if c in realname:
cmake[realname[c]]=False
else:
cmake[c]=False
else:
if config[c]:
if c in realname:
cmake[realname[c]]=True
else:
cmake[c]=True
return cmake
def removeDuplicates(l):
return list(dict.fromkeys(l))
def genCMakeOptions(config):
r=[]
cmake = computeCmakeOptions(config)
for c in cmake:
if cmake[c]:
r.append("-D%s=ON" % c)
else:
r.append("-D%s=OFF" % c)
return(removeDuplicates(r),cmake)
def test(cmake,s):
global defaulton
if s in defaulton and not (s in cmake):
return True
return(s in cmake and cmake[s])
def cfftCF32Config(cmake,size):
result=[]
if test(cmake,"CFFT_F32_%d" % size):
a="-DARM_TABLE_TWIDDLECOEF_F32_%d" % size
if HELIUM:
b = "-DARM_TABLE_BITREVIDX_FXT_%d" % size
else:
b = "-DARM_TABLE_BITREVIDX_FLT_%d" % size
result=[a,b]
return(result)
def cfftCF16Config(cmake,size):
result=[]
if test(cmake,"CFFT_F16_%d" % size):
result =["-DARM_TABLE_TWIDDLECOEF_F16_%d" % size]
result.append("-DARM_TABLE_BITREVIDX_FXT_%d" % size)
result.append("-DARM_TABLE_BITREVIDX_FLT_%d" % size)
return(result)
def cfftCF64Config(cmake,size):
result=[]
if test(cmake,"CFFT_F64_%d" % size):
result =["-DARM_TABLE_TWIDDLECOEF_F64_%d" % size]
result.append("-DARM_TABLE_BITREVIDX_FLT64_%d" % size)
return(result)
def cfftCFixedConfig(cmake,dt,size):
result=[]
if test(cmake,"CFFT_%s_%d" % (dt,size)):
a="-DARM_TABLE_TWIDDLECOEF_%s_%d" % (dt,size)
b = "-DARM_TABLE_BITREVIDX_FXT_%d" % size
result=[a,b]
return(result)
def crfftFastCF64Config(cmake,size):
result=[]
s1 = size >> 1
if test(cmake,"RFFT_FAST_F64_%d" % size):
result =[]
result.append("-DARM_TABLE_TWIDDLECOEF_F64_%d" % s1)
result.append("-DARM_TABLE_BITREVIDX_FLT64_%d" % s1)
result.append("-DARM_TABLE_TWIDDLECOEF_RFFT_F64_%d" % size)
result.append("-DARM_TABLE_TWIDDLECOEF_F64_%d" % s1)
return(result)
def crfftFastCF32Config(cmake,size):
result=[]
s1 = size >> 1
if test(cmake,"RFFT_FAST_F32_%d" % size):
result =[]
result.append("-DARM_TABLE_TWIDDLECOEF_F32_%d" % s1)
result.append("-DARM_TABLE_BITREVIDX_FLT_%d" % s1)
result.append("-DARM_TABLE_TWIDDLECOEF_RFFT_F32_%d" % size)
return(result)
def crfftFastCF16Config(cmake,size):
result=[]
s1 = size >> 1
if test(cmake,"RFFT_FAST_F16_%d" % size):
result =[]
result.append("-DARM_TABLE_TWIDDLECOEF_F16_%d" % s1)
result.append("-DARM_TABLE_BITREVIDX_FLT_%d" % s1)
result.append("-DARM_TABLE_BITREVIDX_FXT_%d" % s1)
result.append("-DARM_TABLE_TWIDDLECOEF_RFFT_F16_%d" % size)
return(result)
# Deprecated RFFT used in DCT
def crfftF32Config(cmake,size):
result=[]
s1 = size >> 1
if test(cmake,"RFFT_FAST_F16_%d" % size):
result =[]
result.append("-DARM_TABLE_REALCOEF_F32")
result.append("-ARM_TABLE_BITREV_%d" % s1)
result.append("-ARM_TABLE_TWIDDLECOEF_F32_%d" % s1)
return(result)
def crfftFixedConfig(cmake,dt,size):
result=[]
s1 = size >> 1
if test(cmake,"RFFT_%s_%d" % (dt,size)):
result =[]
result.append("-DARM_TABLE_REALCOEF_%s" % dt)
result.append("-DARM_TABLE_TWIDDLECOEF_%s_%d" % (dt,s1))
result.append("-DARM_TABLE_BITREVIDX_FXT_%d" % s1)
return(result)
def dctConfig(cmake,dt,size):
result=[]
if test(cmake,"DCT4_%s_%d" % (dt,size)):
result =[]
result.append("-DARM_TABLE_DCT4_%s_%d" % (dt,size))
result.append("-DARM_TABLE_REALCOEF_F32")
result.append("-DARM_TABLE_BITREV_1024" )
result.append("-DARM_TABLE_TWIDDLECOEF_%s_4096" % dt)
return(result)
# Convert cmake options to make flags
def interpretCmakeOptions(cmake):
r=[]
if test(cmake,"CONFIGTABLE"):
r.append("-DARM_DSP_CONFIG_TABLES")
# In Make configuration we build all modules.
# So the code for FFT and FAST maths may be included
# so we allow the table to be included if they are needed.
r.append("-DARM_FAST_ALLOW_TABLES")
r.append("-DARM_FFT_ALLOW_TABLES")
for size in CFFTSIZE:
r += cfftCF32Config(cmake,size)
r += cfftCF16Config(cmake,size)
r += cfftCF64Config(cmake,size)
r += cfftCFixedConfig(cmake,"Q31",size)
r += cfftCFixedConfig(cmake,"Q15",size)
for size in RFFTFASTSIZE:
r += crfftFastCF64Config(cmake,size)
r += crfftFastCF32Config(cmake,size)
r += crfftFastCF16Config(cmake,size)
for size in RFFTSIZE:
r += crfftFixedConfig(cmake,"F32",size)
r += crfftFixedConfig(cmake,"Q31",size)
r += crfftFixedConfig(cmake,"Q15",size)
for size in DCTSIZE:
r += dctConfig(cmake,"F32",size)
r += dctConfig(cmake,"Q31",size)
r += dctConfig(cmake,"Q15",size)
if test(cmake,"ALLFAST"):
r.append("-DARM_ALL_FAST_TABLES")
if test(cmake,"ALLFFT"):
r.append("-DARM_ALL_FFT_TABLES")
if test(cmake,"LOOPUNROLL"):
r.append("-DARM_MATH_LOOPUNROLL")
if test(cmake,"ROUNDING"):
r.append("-DARM_MATH_ROUNDING")
if test(cmake,"MATRIXCHECK"):
r.append("-DARM_MATH_MATRIX_CHECK")
if test(cmake,"AUTOVECTORIZE"):
r.append("-DARM_MATH_AUTOVECTORIZE")
if test(cmake,"DISABLEFLOAT16"):
r.append("-DDISABLEFLOAT16")
if test(cmake,"NEON"):
r.append("-DARM_MATH_NEON")
r.append("-DARM_MATH_NEON_EXPERIMENTAL")
if test(cmake,"HOST"):
r.append("-D__GNUC_PYTHON__")
if test(cmake,"ARM_COS_F32"):
r.append("-DARM_TABLE_SIN_F32")
if test(cmake,"ARM_COS_Q31"):
r.append("-DARM_TABLE_SIN_Q31")
if test(cmake,"ARM_COS_Q15"):
r.append("-DARM_TABLE_SIN_Q15")
if test(cmake,"ARM_SIN_F32"):
r.append("-DARM_TABLE_SIN_F32")
if test(cmake,"ARM_SIN_Q31"):
r.append("-DARM_TABLE_SIN_Q31")
if test(cmake,"ARM_SIN_Q15"):
r.append("-DARM_TABLE_SIN_Q15")
if test(cmake,"ARM_SIN_COS_F32"):
r.append("-DARM_TABLE_SIN_F32")
if test(cmake,"ARM_SIN_COS_Q31"):
r.append("-DARM_TABLE_SIN_Q31")
if test(cmake,"ARM_SQRT_Q31"):
r.append("-DARM_TABLE_SQRT_Q31")
if test(cmake,"ARM_LMS_NORM_Q31"):
r.append("-DARM_TABLE_RECIP_Q31")
if test(cmake,"ARM_LMS_NORM_Q15"):
r.append("-DARM_TABLE_RECIP_Q15")
if test(cmake,"ARM_CMPLX_MAG_Q31"):
r.append("-DARM_TABLE_FAST_SQRT_Q31_MVE")
if test(cmake,"ARM_CMPLX_MAG_Q15"):
r.append("-DARM_TABLE_FAST_SQRT_Q31_MVE")
if test(cmake,"ARM_CMPLX_MAG_FAST_Q15"):
r.append("-DARM_TABLE_FAST_SQRT_Q15_MVE")
if test(cmake,"MVEI"):
r.append("-DARM_MATH_MVEI")
if test(cmake,"MVEF"):
r.append("-DARM_MATH_MVEF")
if test(cmake,"HELIUMEXPERIMENTAL"):
r.append("-DARM_MATH_HELIUM_EXPERIMENTAL")
if test(cmake,"HELIUM") or test(cmake,"MVEF") or test(cmake,"MVEI"):
r.append("-IPrivateInclude")
if test(cmake,"NEON") or test(cmake,"NEONEXPERIMENTAL"):
r.append("-IComputeLibrary/Include")
if test(cmake,"ARM_CFFT_RADIX2_Q15") or test(cmake,"ARM_CFFT_RADIX4_Q15"):
r.append("-DARM_TABLE_TWIDDLECOEF_Q15_4096")
r.append("-DARM_TABLE_BITREV_1024")
if test(cmake,"ARM_CFFT_RADIX2_Q31") or test(cmake,"ARM_CFFT_RADIX4_Q31"):
r.append("-DARM_TABLE_TWIDDLECOEF_Q31_4096")
r.append("-DARM_TABLE_BITREV_1024")
return (removeDuplicates(r))
def genMakeOptions(config):
cmake = computeCmakeOptions(config)
r=interpretCmakeOptions(cmake)
return(r,cmake)
def check(config,s,name=None,comment=None):
if comment is not None:
st.sidebar.text(comment)
if name is None:
config[s]=st.sidebar.checkbox(s,value=config[s])
else:
config[s]=st.sidebar.checkbox(name,value=config[s])
return(config[s])
def genconfig(config,transform,sizes,datatypes):
global realname
for size in sizes:
for dt in datatypes:
s="%s_%s_%s" % (transform,dt,size)
config[s] = False
realname[s] = s
def hasDCTF32(config):
result=False
for size in DCTSIZE:
s="DCT4_F32_%s" % size
if config[s]:
result = True
return(result)
def multiselect(config,name,options):
default=[]
for r in options:
if config[r]:
default.append(r)
result=st.sidebar.multiselect(name,options,default=default)
for r in options:
config[r] = False
for r in result:
config[r] = True
def genui(config,transform,sizes,datatypes):
keepF32 = True
# RFFT F32 is deprecated and needed only for DCT4
if transform == "RFFT":
keepF32 = hasDCTF32(config)
selected=st.sidebar.multiselect("Sizes",sizes)
for size in selected:
options=[]
for dt in datatypes:
if dt != "F32" or keepF32:
s="%s_%s_%s" % (transform,dt,size)
options.append(s)
multiselect(config,"Nb = %d" % size,options)
def configMake(config):
st.sidebar.header('Table Configuration')
st.sidebar.info("Several options to include only the tables needed in an app and minimize code size.")
if not check(config,"allTables","All tables included"):
if not check(config,"allFFTs","All FFT tables included"):
st.sidebar.markdown("#### CFFT")
genui(config,"CFFT",CFFTSIZE,CFFTDATATYPE)
st.sidebar.info("Following transforms are using the CFFT. You need to enable the needed CFFTs above.")
st.sidebar.markdown("#### RFFT FAST")
genui(config,"RFFT_FAST",RFFTFASTSIZE,RFFTFASTDATATYPE)
st.sidebar.markdown("#### DCT4")
genui(config,"DCT4",DCTSIZE,DCTDATATYPE)
st.sidebar.markdown("#### RFFT")
genui(config,"RFFT",RFFTSIZE,RFFTDATATYPE)
st.sidebar.markdown("#### Radix2 and Radix4 CFFT")
st.sidebar.info("Those functions are deprecated")
multiselect(config,"Radix",["CFFT_RADIX2_Q15","CFFT_RADIX4_Q15","CFFT_RADIX2_Q31","CFFT_RADIX4_Q31"])
if not check(config,"allInterpolations",'All interpolation tables included'):
selected=st.sidebar.multiselect("Functions",["Cosine","Sine","SineCosine","Normalized LMS"])
for s in selected:
if s == "Cosine":
multiselect(config,"Cosine",["COS_F32","COS_Q31","COS_Q15"])
if s == "Sine":
multiselect(config,"Sine",["SIN_F32","SIN_Q31","SIN_Q15"])
if s == "SineCosine":
multiselect(config,"SineCosine",["SIN_COS_F32","SIN_COS_Q31"])
if s == "Normalized LMS":
multiselect(config,"Normalized LMS",["LMS_NORM_Q31","LMS_NORM_Q15"])
if config["MVEI"]:
st.sidebar.markdown("#### Complex Magnitude")
multiselect(config,"Complex Magnitude",["CMPLX_MAG_Q31","CMPLX_MAG_Q15","CMPLX_MAG_FAST_Q15"])
def configCMake(config):
multiselect(config,"Folders",["BASICMATH",
"COMPLEXMATH",
"CONTROLLER",
"FASTMATH",
"FILTERING",
"MATRIX",
"STATISTICS",
"SUPPORT",
"TRANSFORM",
"SVM",
"BAYES",
"DISTANCE",
"INTERPOLATION","QUATERNIONMATH"])
configMake(config)
genconfig(config,"CFFT",CFFTSIZE,CFFTDATATYPE)
genconfig(config,"RFFT_FAST",RFFTFASTSIZE,RFFTFASTDATATYPE)
genconfig(config,"RFFT",RFFTSIZE,RFFTDATATYPE)
genconfig(config,"DCT4",DCTSIZE,DCTDATATYPE)
st.title('CMSIS-DSP Configuration')
st.warning("It is a work in progress. Only a small subset of the combinations has been tested.")
st.sidebar.header('Feature Configuration')
st.sidebar.info("To build on host. All features will be enabled.")
forHost=check(config,"HOST")
if not forHost:
st.sidebar.info("Enable or disable float16 support")
check(config,"Float16")
st.sidebar.info("Some configurations for the CMSIS-DSP code.")
check(config,"LOOPUNROLL")
st.sidebar.text("Decrease performances when selected:")
check(config,"ROUNDING")
check(config,"MATRIXCHECK")
st.sidebar.header('Vector extensions')
st.sidebar.info("Enable vector code. It is not automatic for Neon. Use of Helium will enable new options to select some interpolation tables.")
archi=st.sidebar.selectbox("Vector",('None','Helium','Neon'))
if archi == 'Neon':
config["NEON"]=True
if archi == 'Helium':
multiselect(config,"MVE configuration",["MVEI","MVEF"])
HELIUM=True
st.sidebar.info("When checked some experimental versions will be enabled and may be less performant than scalar version depending on the architecture.")
check(config,"HELIUMEXPERIMENTAL")
if archi != 'None':
st.sidebar.info("When autovectorization is on, pure C code will be compiled. The version with C intrinsics won't be compiled.")
check(config,"AUTOVECTORIZE")
st.sidebar.header('Build Method')
st.sidebar.info("With cmake, some folders can be removed from the build.")
selected=st.sidebar.selectbox('Select', ("Make","Cmake"),index=1)
if selected == "Make":
if not forHost:
configMake(config)
result,cmake=genMakeOptions(config)
else:
if not forHost:
configCMake(config)
result,cmake=genCMakeOptions(config)
st.header('Build options for %s command line' % selected)
if selected == "Make":
if test(cmake,"FLOAT16"):
st.info("Float16 is selected. You may need to pass compiler specific options for the compiler to recognize the float16 type.")
mode=st.selectbox("Mode",["txt","MDK","sh","bat"])
if mode=="txt":
st.code(textwrap.fill(options(result)))
if mode=="MDK":
opts=options(result)
includes=""
maybeincludes=re.findall(r'\-I([^\s]+)',opts)
# Managed in MDK pack file
#if maybeincludes:
# includes = maybeincludes
# st.text("Following include directories must be added")
# st.code(includes)
opts=re.sub(r'\-D','',opts)
opts=re.sub(r'\-I[^\s]+','',opts)
st.text("MDK Preprocessor Symbols ")
st.code(opts)
if mode=="sh":
lines=options(result).split()
txt=""
for l in lines:
txt += " %s \\\n" % l
txt += "\n"
st.code(txt)
if mode=="bat":
lines=options(result).split()
txt=""
for l in lines:
txt += " %s ^\n" % l
txt += "\n"
st.code(txt)
| |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import unittest
import boto3
import mock
from airflow.exceptions import AirflowTaskTimeout
from airflow.providers.amazon.aws.hooks.datasync import AWSDataSyncHook
def no_datasync(x):
return x
try:
from moto import mock_datasync
from moto.datasync.models import DataSyncBackend
# ToDo: Remove after the moto>1.3.14 is released and contains following commit:
# https://github.com/spulec/moto/commit/5cfbe2bb3d24886f2b33bb4480c60b26961226fc
if "create_task" not in dir(DataSyncBackend) or "delete_task" not in dir(DataSyncBackend):
mock_datasync = no_datasync
except ImportError:
# flake8: noqa: F811
mock_datasync = no_datasync
@mock_datasync
@unittest.skipIf(
mock_datasync == no_datasync, "moto datasync package missing"
) # pylint: disable=W0143
class TestAwsDataSyncHook(unittest.TestCase):
def test_get_conn(self):
hook = AWSDataSyncHook(aws_conn_id="aws_default")
self.assertIsNotNone(hook.get_conn())
# Explanation of: @mock.patch.object(AWSDataSyncHook, 'get_conn')
# aws_hook.py fiddles with config files and changes the region
# If you have any ~/.credentials then aws_hook uses it for the region
# This region might not match us-east-1 used for the mocked self.client
# Once patched, the AWSDataSyncHook.get_conn method is mocked and passed to the test as
# mock_get_conn. We then override it to just return the locally created self.client instead of
# the one created by the AWS self.hook.
# Unfortunately this means we cant test the get_conn method - which is why we have it in a
# separate class above
@mock_datasync
@mock.patch.object(AWSDataSyncHook, "get_conn")
@unittest.skipIf(
mock_datasync == no_datasync, "moto datasync package missing"
) # pylint: disable=W0143
class TestAWSDataSyncHookMocked(unittest.TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.source_server_hostname = "host"
self.source_subdirectory = "somewhere"
self.destination_bucket_name = "my_bucket"
self.destination_bucket_dir = "dir"
self.client = None
self.hook = None
self.source_location_arn = None
self.destination_location_arn = None
self.task_arn = None
def setUp(self):
self.client = boto3.client("datasync", region_name="us-east-1")
self.hook = AWSDataSyncHook(aws_conn_id="aws_default", wait_interval_seconds=0)
# Create default locations and tasks
self.source_location_arn = self.client.create_location_smb(
ServerHostname=self.source_server_hostname,
Subdirectory=self.source_subdirectory,
User="",
Password="",
AgentArns=["stuff"],
)["LocationArn"]
self.destination_location_arn = self.client.create_location_s3(
S3BucketArn="arn:aws:s3:::{0}".format(self.destination_bucket_name),
Subdirectory=self.destination_bucket_dir,
S3Config={"BucketAccessRoleArn": "role"},
)["LocationArn"]
self.task_arn = self.client.create_task(
SourceLocationArn=self.source_location_arn,
DestinationLocationArn=self.destination_location_arn,
)["TaskArn"]
def tearDown(self):
# Delete all tasks:
tasks = self.client.list_tasks()
for task in tasks["Tasks"]:
self.client.delete_task(TaskArn=task["TaskArn"])
# Delete all locations:
locations = self.client.list_locations()
for location in locations["Locations"]:
self.client.delete_location(LocationArn=location["LocationArn"])
self.client = None
def test_init(self, mock_get_conn):
# ### Configure mock:
mock_get_conn.return_value = self.client
# ### Begin tests:
self.assertIsNone(self.hook.conn)
self.assertFalse(self.hook.locations)
self.assertFalse(self.hook.tasks)
self.assertEqual(self.hook.wait_interval_seconds, 0)
def test_create_location_smb(self, mock_get_conn):
# ### Configure mock:
mock_get_conn.return_value = self.client
# ### Begin tests:
locations = self.hook.get_conn().list_locations()
self.assertEqual(len(locations["Locations"]), 2)
server_hostname = "my.hostname"
subdirectory = "my_dir"
agent_arns = ["stuff"]
user = "username123"
domain = "COMPANY.DOMAIN"
mount_options = {"Version": "SMB2"}
location_uri = "smb://{0}/{1}".format(server_hostname, subdirectory)
create_location_kwargs = {
"ServerHostname": server_hostname,
"Subdirectory": subdirectory,
"User": user,
"Password": "password",
"Domain": domain,
"AgentArns": agent_arns,
"MountOptions": mount_options,
}
location_arn = self.hook.create_location(location_uri, **create_location_kwargs)
self.assertIsNotNone(location_arn)
locations = self.client.list_locations()
self.assertEqual(len(locations["Locations"]), 3)
location_desc = self.client.describe_location_smb(LocationArn=location_arn)
self.assertEqual(location_desc["LocationArn"], location_arn)
self.assertEqual(location_desc["LocationUri"], location_uri)
self.assertEqual(location_desc["AgentArns"], agent_arns)
self.assertEqual(location_desc["User"], user)
self.assertEqual(location_desc["Domain"], domain)
self.assertEqual(location_desc["MountOptions"], mount_options)
def test_create_location_s3(self, mock_get_conn):
# ### Configure mock:
mock_get_conn.return_value = self.client
# ### Begin tests:
locations = self.hook.get_conn().list_locations()
self.assertEqual(len(locations["Locations"]), 2)
s3_bucket_arn = "some_s3_arn"
subdirectory = "my_subdir"
s3_config = {"BucketAccessRoleArn": "myrole"}
location_uri = "s3://{0}/{1}".format(s3_bucket_arn, subdirectory)
create_location_kwargs = {
"S3BucketArn": s3_bucket_arn,
"Subdirectory": subdirectory,
"S3Config": s3_config,
}
location_arn = self.hook.create_location(location_uri, **create_location_kwargs)
self.assertIsNotNone(location_arn)
locations = self.client.list_locations()
self.assertEqual(len(locations["Locations"]), 3)
location_desc = self.client.describe_location_s3(LocationArn=location_arn)
self.assertEqual(location_desc["LocationArn"], location_arn)
self.assertEqual(location_desc["LocationUri"], location_uri)
self.assertEqual(location_desc["S3Config"], s3_config)
def test_create_task(self, mock_get_conn):
# ### Configure mock:
mock_get_conn.return_value = self.client
# ### Begin tests:
log_group_arn = "cloudwatcharn123"
name = "my_task"
options = { # Random options
"VerifyMode": "NONE",
"Atime": "NONE",
"Mtime": "NONE",
"Uid": "BOTH",
"Gid": "INT_VALUE",
"PreserveDeletedFiles": "PRESERVE",
"PreserveDevices": "PRESERVE",
"PosixPermissions": "BEST_EFFORT",
"BytesPerSecond": 123,
}
create_task_kwargs = {
"CloudWatchLogGroupArn": log_group_arn,
"Name": name,
"Options": options,
}
task_arn = self.hook.create_task(
source_location_arn=self.source_location_arn,
destination_location_arn=self.destination_location_arn,
**create_task_kwargs
)
task = self.client.describe_task(TaskArn=task_arn)
self.assertEqual(task["TaskArn"], task_arn)
self.assertEqual(task["Name"], name)
self.assertEqual(task["CloudWatchLogGroupArn"], log_group_arn)
self.assertEqual(task["Options"], options)
def test_update_task(self, mock_get_conn):
# ### Configure mock:
mock_get_conn.return_value = self.client
# ### Begin tests:
task_arn = self.task_arn
task = self.client.describe_task(TaskArn=task_arn)
self.assertNotIn("Name", task)
update_task_kwargs = {"Name": "xyz"}
self.hook.update_task(task_arn, **update_task_kwargs)
task = self.client.describe_task(TaskArn=task_arn)
self.assertEqual(task["Name"], "xyz")
def test_delete_task(self, mock_get_conn):
# ### Configure mock:
mock_get_conn.return_value = self.client
# ### Begin tests:
task_arn = self.task_arn
tasks = self.client.list_tasks()
self.assertEqual(len(tasks["Tasks"]), 1)
self.hook.delete_task(task_arn)
tasks = self.client.list_tasks()
self.assertEqual(len(tasks["Tasks"]), 0)
def test_get_location_arns(self, mock_get_conn):
# ### Configure mock:
mock_get_conn.return_value = self.client
# ### Begin tests:
# Get true location_arn from boto/moto self.client
location_uri = "smb://{0}/{1}".format(
self.source_server_hostname, self.source_subdirectory
)
locations = self.client.list_locations()
for location in locations["Locations"]:
if location["LocationUri"] == location_uri:
location_arn = location["LocationArn"]
# Verify our self.hook gets the same
location_arns = self.hook.get_location_arns(location_uri)
self.assertEqual(len(location_arns), 1)
self.assertEqual(location_arns[0], location_arn)
def test_get_location_arns_case_sensitive(self, mock_get_conn):
# ### Configure mock:
mock_get_conn.return_value = self.client
# ### Begin tests:
# Get true location_arn from boto/moto self.client
location_uri = "smb://{0}/{1}".format(
self.source_server_hostname.upper(), self.source_subdirectory
)
locations = self.client.list_locations()
for location in locations["Locations"]:
if location["LocationUri"] == location_uri.lower():
location_arn = location["LocationArn"]
# Verify our self.hook can do case sensitive searches
location_arns = self.hook.get_location_arns(location_uri, case_sensitive=True)
self.assertEqual(len(location_arns), 0)
location_arns = self.hook.get_location_arns(location_uri, case_sensitive=False)
self.assertEqual(len(location_arns), 1)
self.assertEqual(location_arns[0], location_arn)
def test_get_location_arns_trailing_slash(self, mock_get_conn):
# ### Configure mock:
mock_get_conn.return_value = self.client
# ### Begin tests:
# Get true location_arn from boto/moto self.client
location_uri = "smb://{0}/{1}/".format(
self.source_server_hostname, self.source_subdirectory
)
locations = self.client.list_locations()
for location in locations["Locations"]:
if location["LocationUri"] == location_uri[:-1]:
location_arn = location["LocationArn"]
# Verify our self.hook manages trailing / correctly
location_arns = self.hook.get_location_arns(
location_uri, ignore_trailing_slash=False
)
self.assertEqual(len(location_arns), 0)
location_arns = self.hook.get_location_arns(
location_uri, ignore_trailing_slash=True
)
self.assertEqual(len(location_arns), 1)
self.assertEqual(location_arns[0], location_arn)
def test_get_task_arns_for_location_arns(self, mock_get_conn):
# ### Configure mock:
mock_get_conn.return_value = self.client
# ### Begin tests:
task_arns = self.hook.get_task_arns_for_location_arns(
[self.source_location_arn], [self.destination_location_arn]
)
self.assertEqual(len(task_arns), 1)
self.assertEqual(task_arns[0], self.task_arn)
task_arns = self.hook.get_task_arns_for_location_arns(["foo"], ["bar"])
self.assertEqual(len(task_arns), 0)
def test_start_task_execution(self, mock_get_conn):
# ### Configure mock:
mock_get_conn.return_value = self.client
# ### Begin tests:
task = self.client.describe_task(TaskArn=self.task_arn)
self.assertNotIn("CurrentTaskExecutionArn", task)
task_execution_arn = self.hook.start_task_execution(self.task_arn)
self.assertIsNotNone(task_execution_arn)
task = self.client.describe_task(TaskArn=self.task_arn)
self.assertIn("CurrentTaskExecutionArn", task)
self.assertEqual(task["CurrentTaskExecutionArn"], task_execution_arn)
task_execution = self.client.describe_task_execution(
TaskExecutionArn=task_execution_arn
)
self.assertIn("Status", task_execution)
def test_cancel_task_execution(self, mock_get_conn):
# ### Configure mock:
mock_get_conn.return_value = self.client
# ### Begin tests:
task_execution_arn = self.hook.start_task_execution(self.task_arn)
self.assertIsNotNone(task_execution_arn)
self.hook.cancel_task_execution(task_execution_arn=task_execution_arn)
task = self.client.describe_task(TaskArn=self.task_arn)
self.assertNotIn("CurrentTaskExecutionArn", task)
def test_get_task_description(self, mock_get_conn):
# ### Configure mock:
mock_get_conn.return_value = self.client
# ### Begin tests:
task = self.client.describe_task(TaskArn=self.task_arn)
self.assertIn("TaskArn", task)
self.assertIn("Status", task)
self.assertIn("SourceLocationArn", task)
self.assertIn("DestinationLocationArn", task)
self.assertNotIn("CurrentTaskExecutionArn", task)
def test_get_current_task_execution_arn(self, mock_get_conn):
# ### Configure mock:
mock_get_conn.return_value = self.client
# ### Begin tests:
task_execution_arn = self.hook.start_task_execution(self.task_arn)
current_task_execution = self.hook.get_current_task_execution_arn(self.task_arn)
self.assertEqual(current_task_execution, task_execution_arn)
def test_wait_for_task_execution(self, mock_get_conn):
# ### Configure mock:
mock_get_conn.return_value = self.client
# ### Begin tests:
task_execution_arn = self.hook.start_task_execution(self.task_arn)
result = self.hook.wait_for_task_execution(
task_execution_arn, max_iterations=20
)
self.assertIsNotNone(result)
def test_wait_for_task_execution_timeout(self, mock_get_conn):
# ### Configure mock:
mock_get_conn.return_value = self.client
# ### Begin tests:
task_execution_arn = self.hook.start_task_execution(self.task_arn)
with self.assertRaises(AirflowTaskTimeout):
result = self.hook.wait_for_task_execution(
task_execution_arn, max_iterations=1
)
self.assertIsNone(result)
if __name__ == "__main__":
unittest.main()
| |
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from unittest import TestCase, main
import numpy as np
import numpy.testing as npt
from skbio.stats.composition import (closure, multiplicative_replacement,
perturb, perturb_inv, power, inner,
clr, clr_inv, ilr, ilr_inv,
centralize)
class CompositionTests(TestCase):
def setUp(self):
# Compositional data
self.cdata1 = np.array([[2, 2, 6],
[4, 4, 2]])
self.cdata2 = np.array([2, 2, 6])
self.cdata3 = np.array([[1, 2, 3, 0, 5],
[1, 0, 0, 4, 5],
[1, 2, 3, 4, 5]])
self.cdata4 = np.array([1, 2, 3, 0, 5])
self.cdata5 = [[2, 2, 6], [4, 4, 2]]
self.cdata6 = [[1, 2, 3, 0, 5],
[1, 0, 0, 4, 5],
[1, 2, 3, 4, 5]]
self.cdata7 = [np.exp(1), 1, 1]
self.cdata8 = [np.exp(1), 1, 1, 1]
# Simplicial orthonormal basis obtained from Gram-Schmidt
self.ortho1 = [[0.44858053, 0.10905743, 0.22118102, 0.22118102],
[0.3379924, 0.3379924, 0.0993132, 0.22470201],
[0.3016453, 0.3016453, 0.3016453, 0.09506409]]
# Real data
self.rdata1 = [[0.70710678, -0.70710678, 0., 0.],
[0.40824829, 0.40824829, -0.81649658, 0.],
[0.28867513, 0.28867513, 0.28867513, -0.8660254]]
# Bad datasets
self.bad1 = np.array([1, 2, -1])
self.bad2 = np.array([[[1, 2, 3, 0, 5]]])
def test_closure(self):
npt.assert_allclose(closure(self.cdata1),
np.array([[.2, .2, .6],
[.4, .4, .2]]))
npt.assert_allclose(closure(self.cdata2),
np.array([.2, .2, .6]))
npt.assert_allclose(closure(self.cdata5),
np.array([[.2, .2, .6],
[.4, .4, .2]]))
with self.assertRaises(ValueError):
closure(self.bad1)
with self.assertRaises(ValueError):
closure(self.bad2)
# make sure that inplace modification is not occurring
closure(self.cdata2)
npt.assert_allclose(self.cdata2, np.array([2, 2, 6]))
def test_perturb(self):
pmat = perturb(closure(self.cdata1),
closure(np.array([1, 1, 1])))
npt.assert_allclose(pmat,
np.array([[.2, .2, .6],
[.4, .4, .2]]))
pmat = perturb(closure(self.cdata1),
closure(np.array([10, 10, 20])))
npt.assert_allclose(pmat,
np.array([[.125, .125, .75],
[1./3, 1./3, 1./3]]))
pmat = perturb(closure(self.cdata1),
closure(np.array([10, 10, 20])))
npt.assert_allclose(pmat,
np.array([[.125, .125, .75],
[1./3, 1./3, 1./3]]))
pmat = perturb(closure(self.cdata2),
closure([1, 2, 1]))
npt.assert_allclose(pmat, np.array([1./6, 2./6, 3./6]))
pmat = perturb(closure(self.cdata5),
closure(np.array([1, 1, 1])))
npt.assert_allclose(pmat,
np.array([[.2, .2, .6],
[.4, .4, .2]]))
with self.assertRaises(ValueError):
perturb(closure(self.cdata5), self.bad1)
# make sure that inplace modification is not occurring
perturb(self.cdata2, [1, 2, 3])
npt.assert_allclose(self.cdata2, np.array([2, 2, 6]))
def test_power(self):
pmat = power(closure(self.cdata1), 2)
npt.assert_allclose(pmat,
np.array([[.04/.44, .04/.44, .36/.44],
[.16/.36, .16/.36, .04/.36]]))
pmat = power(closure(self.cdata2), 2)
npt.assert_allclose(pmat, np.array([.04, .04, .36])/.44)
pmat = power(closure(self.cdata5), 2)
npt.assert_allclose(pmat,
np.array([[.04/.44, .04/.44, .36/.44],
[.16/.36, .16/.36, .04/.36]]))
with self.assertRaises(ValueError):
power(self.bad1, 2)
# make sure that inplace modification is not occurring
power(self.cdata2, 4)
npt.assert_allclose(self.cdata2, np.array([2, 2, 6]))
def test_perturb_inv(self):
pmat = perturb_inv(closure(self.cdata1),
closure([.1, .1, .1]))
imat = perturb(closure(self.cdata1),
closure([10, 10, 10]))
npt.assert_allclose(pmat, imat)
pmat = perturb_inv(closure(self.cdata1),
closure([1, 1, 1]))
npt.assert_allclose(pmat,
closure([[.2, .2, .6],
[.4, .4, .2]]))
pmat = perturb_inv(closure(self.cdata5),
closure([.1, .1, .1]))
imat = perturb(closure(self.cdata1), closure([10, 10, 10]))
npt.assert_allclose(pmat, imat)
with self.assertRaises(ValueError):
perturb_inv(closure(self.cdata1), self.bad1)
# make sure that inplace modification is not occurring
perturb_inv(self.cdata2, [1, 2, 3])
npt.assert_allclose(self.cdata2, np.array([2, 2, 6]))
def test_inner(self):
a = inner(self.cdata5, self.cdata5)
npt.assert_allclose(a, np.array([[0.80463264, -0.50766667],
[-0.50766667, 0.32030201]]))
b = inner(self.cdata7, self.cdata7)
npt.assert_allclose(b, 0.66666666666666663)
# Make sure that orthogonality holds
npt.assert_allclose(inner(self.ortho1, self.ortho1), np.identity(3),
rtol=1e-04, atol=1e-06)
with self.assertRaises(ValueError):
inner(self.cdata1, self.cdata8)
# make sure that inplace modification is not occurring
inner(self.cdata1, self.cdata1)
npt.assert_allclose(self.cdata1,
np.array([[2, 2, 6],
[4, 4, 2]]))
def test_multiplicative_replacement(self):
amat = multiplicative_replacement(closure(self.cdata3))
npt.assert_allclose(amat,
np.array([[0.087273, 0.174545, 0.261818,
0.04, 0.436364],
[0.092, 0.04, 0.04, 0.368, 0.46],
[0.066667, 0.133333, 0.2,
0.266667, 0.333333]]),
rtol=1e-5, atol=1e-5)
amat = multiplicative_replacement(closure(self.cdata4))
npt.assert_allclose(amat,
np.array([0.087273, 0.174545, 0.261818,
0.04, 0.436364]),
rtol=1e-5, atol=1e-5)
amat = multiplicative_replacement(closure(self.cdata6))
npt.assert_allclose(amat,
np.array([[0.087273, 0.174545, 0.261818,
0.04, 0.436364],
[0.092, 0.04, 0.04, 0.368, 0.46],
[0.066667, 0.133333, 0.2,
0.266667, 0.333333]]),
rtol=1e-5, atol=1e-5)
with self.assertRaises(ValueError):
multiplicative_replacement(self.bad1)
with self.assertRaises(ValueError):
multiplicative_replacement(self.bad2)
# make sure that inplace modification is not occurring
multiplicative_replacement(self.cdata4)
npt.assert_allclose(self.cdata4, np.array([1, 2, 3, 0, 5]))
def test_clr(self):
cmat = clr(closure(self.cdata1))
A = np.array([.2, .2, .6])
B = np.array([.4, .4, .2])
npt.assert_allclose(cmat,
[np.log(A / np.exp(np.log(A).mean())),
np.log(B / np.exp(np.log(B).mean()))])
cmat = clr(closure(self.cdata2))
A = np.array([.2, .2, .6])
npt.assert_allclose(cmat,
np.log(A / np.exp(np.log(A).mean())))
cmat = clr(closure(self.cdata5))
A = np.array([.2, .2, .6])
B = np.array([.4, .4, .2])
npt.assert_allclose(cmat,
[np.log(A / np.exp(np.log(A).mean())),
np.log(B / np.exp(np.log(B).mean()))])
with self.assertRaises(ValueError):
clr(self.bad1)
with self.assertRaises(ValueError):
clr(self.bad2)
# make sure that inplace modification is not occurring
clr(self.cdata2)
npt.assert_allclose(self.cdata2, np.array([2, 2, 6]))
def test_clr_inv(self):
npt.assert_allclose(clr_inv(self.rdata1), self.ortho1)
npt.assert_allclose(clr(clr_inv(self.rdata1)), self.rdata1)
# make sure that inplace modification is not occurring
clr_inv(self.rdata1)
npt.assert_allclose(self.rdata1,
np.array([[0.70710678, -0.70710678, 0., 0.],
[0.40824829, 0.40824829,
-0.81649658, 0.],
[0.28867513, 0.28867513,
0.28867513, -0.8660254]]))
def test_centralize(self):
cmat = centralize(closure(self.cdata1))
npt.assert_allclose(cmat,
np.array([[0.22474487, 0.22474487, 0.55051026],
[0.41523958, 0.41523958, 0.16952085]]))
cmat = centralize(closure(self.cdata5))
npt.assert_allclose(cmat,
np.array([[0.22474487, 0.22474487, 0.55051026],
[0.41523958, 0.41523958, 0.16952085]]))
with self.assertRaises(ValueError):
centralize(self.bad1)
with self.assertRaises(ValueError):
centralize(self.bad2)
# make sure that inplace modification is not occurring
centralize(self.cdata1)
npt.assert_allclose(self.cdata1,
np.array([[2, 2, 6],
[4, 4, 2]]))
def test_ilr(self):
mat = closure(self.cdata7)
npt.assert_array_almost_equal(ilr(mat),
np.array([0.70710678, 0.40824829]))
# Should give same result as inner
npt.assert_allclose(ilr(self.ortho1), np.identity(3),
rtol=1e-04, atol=1e-06)
with self.assertRaises(ValueError):
ilr(self.cdata1, basis=self.cdata1)
# make sure that inplace modification is not occurring
ilr(self.cdata1)
npt.assert_allclose(self.cdata1,
np.array([[2, 2, 6],
[4, 4, 2]]))
def test_ilr_inv(self):
mat = closure(self.cdata7)
npt.assert_array_almost_equal(ilr_inv(ilr(mat)), mat)
npt.assert_allclose(ilr_inv(np.identity(3)), self.ortho1,
rtol=1e-04, atol=1e-06)
with self.assertRaises(ValueError):
ilr_inv(self.cdata1, basis=self.cdata1)
# make sure that inplace modification is not occurring
ilr_inv(self.cdata1)
npt.assert_allclose(self.cdata1,
np.array([[2, 2, 6],
[4, 4, 2]]))
if __name__ == "__main__":
main()
| |
# Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the Apache 2.0 License.
# See the LICENSE file in the project root for more information.
##
## Testing IronPython Compiler
##
from iptest.assert_util import *
skiptest("win32")
from iptest.file_util import *
from iptest.process_util import *
import sys
import os
import System
from System.Collections.Generic import List
remove_ironpython_dlls(testpath.public_testdir)
load_iron_python_dll()
import IronPython
if False: #Needs to be updated or removed for DLR
from IronPython.Hosting import PythonCompiler
load_iron_python_test()
def CompileAsDll(fileName, assemblyName):
sources = List[str]()
sources.Add(fileName)
pc = PythonCompiler(sources, assemblyName)
pc.TargetKind = System.Reflection.Emit.PEFileKinds.Dll
pc.Compile()
def CompileOneFileAsConsoleApp1(fileName, assemblyName, setMainFile) :
sources = List[str]()
sources.Add(fileName)
pc = PythonCompiler(sources, assemblyName)
if setMainFile:
pc.MainFile = fileName
pc.Compile()
def CompileOneFileAsConsoleApp2(fileName, assemblyName):
sources = List[str]()
sources.Add(fileName)
pc = PythonCompiler(sources, assemblyName)
pc.MainFile = "NotExistFile"
pc.Compile()
def CompileTwoFilesAsConsoleApp(fileName1, fileName2, assemblyName, setMainFile):
sources = List[str]()
sources.Add(fileName1)
sources.Add(fileName2)
pc = PythonCompiler(sources, assemblyName)
if (setMainFile):
pc.MainFile = fileName1
pc.Compile()
def UsingReference(fileName, typeName, assemblyName):
sources = List[str]()
sources.Add(fileName)
pc = PythonCompiler(sources, assemblyName)
pc.MainFile = fileName
refAsms = List[str]()
refAsms.Add(System.Type.GetType(typeName).Assembly.FullName)
pc.ReferencedAssemblies = refAsms
pc.Compile()
def CheckIncludeDebugInformation(fileName, assemblyName, include):
sources = List[str]()
sources.Add(fileName)
pc = PythonCompiler(sources, assemblyName)
pc.IncludeDebugInformation = include
pc.Compile()
def FileExists(file):
return System.IO.File.Exists(file)
def DeleteFile(file):
for i in range(5):
try:
System.IO.File.Delete(file)
break
except:
System.Threading.Thread.Sleep(1000)
def FileRemoval(*files):
for file in files:
DeleteFile(file)
def GetFullPath(file):
return System.IO.Path.GetFullPath(file).ToLower()
def RunPythonExe(file, *args):
fullpath = GetFullPath(file)
temppath = System.IO.Path.Combine(sys.prefix, System.IO.FileInfo(fullpath).Name).ToLower()
if (fullpath != temppath):
System.IO.File.Copy(fullpath, temppath, True)
realargs = [temppath]
realargs.extend(args)
try:
retval = os.spawnv(0, temppath, realargs)
except:
retval = 1
# hack
if (fullpath != temppath):
DeleteFile(temppath)
Assert(not retval)
## compile as dll
source, assembly, pdbfile = "tempFile1.tpy", "tempFile1.dll", "tempFile1.pdb"
write_to_file(source, '''
class B:
def M1(self):
return 20
''')
@disabled("Needs to be updated or removed for DLR")
def test_sanity():
FileRemoval(assembly, pdbfile);
CompileAsDll(source, assembly)
Assert(FileExists(assembly))
Assert(FileExists(pdbfile))
@disabled("Needs to be updated or removed for DLR")
def test_one_source_consoleapp():
## compile as exe
## if only one source file, you do not necessarily specify the main file
source, assembly, pdbfile = "tempFile1.tpy", "tempFile1.exe", "tempFile1.pdb"
FileRemoval(assembly, pdbfile);
CompileOneFileAsConsoleApp1(source, assembly, True)
Assert(FileExists(assembly))
Assert(FileExists(pdbfile))
FileRemoval(assembly, pdbfile);
CompileOneFileAsConsoleApp1(source, assembly, False)
Assert(FileExists(assembly))
Assert(FileExists(pdbfile))
## compile as exe, but main file is INVALID
AssertError(Exception, CompileOneFileAsConsoleApp2, source, assembly)
@disabled("Needs to be updated or removed for DLR")
def test_two_source_consoleapp():
## compile 2 files as exe
source1, source2, assembly, pdbfile = "tempFile2.tpy", "tempFile1.tpy", "tempFile2.exe", "tempFile2.pdb"
write_to_file(source1, '''
import tempFile1
class D(tempFile1.B):
def M2(self):
return 100
b = tempFile1.B()
if (b.M1() != 20) :
raise AssertionError("failed 1")
d= D()
if (d.M2() != 100):
raise AssertionError("failed 2")
''')
FileRemoval(assembly, pdbfile);
CompileTwoFilesAsConsoleApp(source1, source2, assembly, True)
Assert(FileExists(assembly))
Assert(FileExists(pdbfile))
RunPythonExe(assembly)
## compile 2 files as exe, but main file is not set
AssertError(Exception, CompileTwoFilesAsConsoleApp, source1, source2, assembly, False)
@disabled("Needs to be updated or removed for DLR")
def test_debug_consoleapp():
## IncludeDebugInformation
source, assembly, pdbfile = "tempFile1.tpy", "tempFile1.dll", "tempFile1.pdb"
FileRemoval(assembly, pdbfile);
CheckIncludeDebugInformation(source, assembly, True)
Assert(FileExists(assembly))
Assert(FileExists(pdbfile))
FileRemoval(assembly, pdbfile);
CheckIncludeDebugInformation(source, assembly, False)
Assert(FileExists(assembly))
Assert(FileExists(pdbfile) == False)
@disabled("Needs to be updated or removed for DLR")
def test_referenced_assemblies_consoleapp():
## Test Using referenced assemblies
source, assembly, pdbfile = "tempFile3.tpy", "tempFile3.exe", "tempFile3.pdb"
import clr
clr.AddReferenceByPartialName("System.Xml")
# sys.LoadAssembly...("System.xml") is emitted because of referenced assemblies specified
write_to_file(source, '''
import System
import System.Xml
tw = System.Xml.XmlTextWriter("tempResult.xml", System.Text.Encoding.ASCII)
tw.WriteStartDocument()
tw.WriteStartElement("PythonCompiler")
tw.WriteEndElement()
tw.WriteEndDocument()
tw.Close()
''')
fullTypeName = System.Type.GetType("System.Int32").AssemblyQualifiedName.split(',', 2)
UsingReference(source, "System.Xml.XmlTextReader, System.Xml," + fullTypeName[2], assembly)
tempXml = "tempResult.xml"
## BE CLEAN
FileRemoval(tempXml)
## RUN
RunPythonExe(assembly)
## CHECK
Assert(FileExists(tempXml), "File was not generated after running the exe")
f = open(tempXml)
Assert(f.read().find("PythonCompiler") <> -1, "The specified word is not found in the file")
f.close()
FileRemoval(tempXml)
for filename in ['tempFile1', 'tempFile2', 'tempFile3']:
for suffix in [ 'tpy', 'dll', 'exe', 'pdb']:
FileRemoval(filename + '.' + suffix)
#
# verify that generated exe will run stand alone.
#
@disabled("Needs to be updated or removed for DLR")
def test_exe_standalone():
tempFile1 = '''
class B:
def M1(self):
return 20
'''
tempFile2 = '''
import tempFile1
class D(tempFile1.B):
def M2(self):
return 100
b = tempFile1.B()
if (b.M1() != 20) :
raise AssertionError("failed 1")
d= D()
if (d.M2() != 100):
raise AssertionError("failed 2")
'''
tempFileName1 = GetFullPath("tempFile1.py")
tempFileName2 = GetFullPath("tempFile2.py")
tempExeName1 = GetFullPath("tempFile1.exe")
tempExeName2 = GetFullPath("tempFile2.exe")
tempPdbName1 = GetFullPath("tempFile1.pdb")
tempPdbName2 = GetFullPath("tempFile2.pdb")
write_to_file(tempFileName1, tempFile1)
write_to_file(tempFileName2, tempFile2)
AreEqual(launch_ironpython_changing_extensions(tempFileName2, ["-X:SaveAssemblies"], ["-X:LightweightScopes", "-X:AssembliesDir"]), 0)
RunPythonExe(tempExeName2)
FileRemoval(tempFileName1, tempFileName2, tempExeName1, tempExeName2, tempPdbName1, tempPdbName2)
#
# Verify that the executable doesn't get generated
#
tempFile1 = """
import System
files = map(lambda extension: System.IO.Path.ChangeExtension(__file__, extension), [".dll", ".exe", ".pdb"])
for file in files:
if System.IO.File.Exists(file):
print file, "exists"
raise AssertionError(file + " exists")
"""
write_to_file(tempFileName1, tempFile1)
AreEqual(launch_ironpython_changing_extensions(tempFileName1, [], ["-X:SaveAssemblies"]), 0)
FileRemoval(tempFileName1, tempExeName1, tempPdbName1)
source1 = "tempFile1.tpy"
source2 = "tempFile2.tpy"
assembly = "tempFile1.exe"
pdbfile = "tempFile1.pdb"
write_to_file(source1, """
import tempFile2
if tempFile2.value != 8.0:
raise AssertionError("failed import built-in")
""")
write_to_file(source2, """
import math
value = math.pow(2, 3)
""")
CompileTwoFilesAsConsoleApp(source1, source2, assembly, True)
Assert(FileExists(assembly))
Assert(FileExists(pdbfile))
RunPythonExe(assembly)
FileRemoval(source1, source2, assembly)
# verify arguments are passed through...
write_to_file(source1, """
import sys
def CustomAssert(c):
if not c: raise AssertionError("Assertin Failed")
CustomAssert(sys.argv[0].lower() == sys.argv[4].lower())
sys.exit(int(sys.argv[1]) + int(sys.argv[2]) + int(sys.argv[3]))
""")
CompileOneFileAsConsoleApp1(source1, assembly, False)
RunPythonExe(assembly, 24, -22, -2, System.IO.Path.Combine(sys.prefix, assembly))
RunPythonExe(".\\" + assembly, 24, -22, -2, System.IO.Path.Combine(sys.prefix, assembly))
FileRemoval(source1, assembly)
@disabled("Needs to be updated or removed for DLR")
def test_compilersinktest():
from IronPythonTest import PythonCompilerSinkTest
st = PythonCompilerSinkTest()
for s in ['''
class Class:zxvc
"Description of Class"cxvxcvb
''',
'(1 and 1) = 1',
'(lambda x: x = 1 )',
' print 1',
'(name1 =1) = 1',
'''d = {}
for x in d.keys()
pass
''',
]:
Assert(st.CompileWithTestSink(s) > 0)
for s in [
'name = 1',
'(name) = 1',
'(name1,name2) = (1,2)',
'name, = (1,0)',
]:
Assert(st.CompileWithTestSink(s) == 0)
@disabled("ResourceFile is not available anymore")
def test_ip_hosting_resource_file():
'''
Test to hit IronPython.Hosting.ResourceFile.
'''
rf_list = [ IronPython.Hosting.ResourceFile("name0", "file0"),
IronPython.Hosting.ResourceFile("name1", "file1", False),
]
rf_list[0].PublicResource = False
for i in xrange(len(rf_list)):
AreEqual(rf_list[i].Name, "name" + str(i))
rf_list[i].Name = "name"
AreEqual(rf_list[i].Name, "name")
AreEqual(rf_list[i].File, "file" + str(i))
rf_list[i].File = "file"
AreEqual(rf_list[i].File, "file")
AreEqual(rf_list[i].PublicResource, False)
rf_list[i].PublicResource = True
AreEqual(rf_list[i].PublicResource, True)
@skip("multiple_execute")
@skip("netstandard") # no clr.CompileModules in netstandard
def test_compiled_code():
if System.Environment.GetEnvironmentVariable('DLR_SaveAssemblies'):
# The SaveAssemblies option is not compatible with saving code to disk
print '... skipping test if DLR_SaveAssemblies is set...'
return
import clr
pyil = os.path.join(testpath.temporary_dir, 'test.pyil')
# make sure we can compile
clr.CompileModules(pyil, os.path.join(testpath.public_testdir, 'test_class.py'))
# make sure we can compile multiple files
clr.CompileModules(pyil, os.path.join(testpath.public_testdir, 'test_class.py'), os.path.join(testpath.public_testdir, 'test_slice.py'))
clr.AddReferenceToFileAndPath(pyil)
import nt
# and make sure we can run some reasonable sophisticated code...
System.IO.File.Move(os.path.join(testpath.public_testdir, 'test_class.py'), 'old_test_class.py')
try:
import test_class
Assert(test_class.test_oldstyle_getattr.__doc__ != '')
finally:
System.IO.File.Move('old_test_class.py', os.path.join(testpath.public_testdir, 'test_class.py'))
@skip("multiple_execute")
@skip("netstandard") # no System.ICloneable in netstandard
def test_cached_types():
import clr
from System import IComparable, IFormattable, ICloneable
import IronPythonTest
cwd = os.getcwd()
os.chdir(testpath.temporary_dir)
# basic sanity test that we can compile...
clr.CompileSubclassTypes('test', (object, ))
clr.CompileSubclassTypes('test', object)
clr.CompileSubclassTypes('test', object, str, int, long, float, complex)
clr.CompileSubclassTypes('test', (object, IComparable[()]))
clr.CompileSubclassTypes('test', (object, IComparable[()]), (str, IComparable[()]))
# build an unlikely existing type and make sure construction gives us
# back the correct type.
clr.CompileSubclassTypes('cached_type_dll', (object, IComparable[()], IFormattable, ICloneable))
asm = System.Reflection.Assembly.LoadFrom(os.path.join(testpath.temporary_dir, 'cached_type_dll.dll'))
clr.AddReference(asm)
class x(object, IComparable[()], IFormattable, ICloneable):
pass
a = x()
AreEqual(clr.GetClrType(x).Assembly, asm)
# collect all types that are available in IronPythonTest and
# pre-gen them, then run test_inheritance to make sure it all works.
types = []
queue = [IronPythonTest]
while queue:
cur = queue.pop()
for name in dir(cur):
attr = getattr(cur, name)
if type(attr) is type:
clrType = clr.GetClrType(attr)
if clrType.IsEnum or clrType.IsSealed or clrType.IsValueType or clrType.ContainsGenericParameters:
continue
types.append(attr)
elif type(attr) == type(IronPythonTest):
queue.append(attr)
clr.CompileSubclassTypes('InheritanceTypes', *types)
clr.AddReferenceToFileAndPath(os.path.join(testpath.temporary_dir, 'InheritanceTypes.dll'))
import test_inheritance
#http://ironpython.codeplex.com/WorkItem/View.aspx?WorkItemId=21892
# verify that GetSubclassedTypes round trips with clr.CompileSubclassTypes
clr.CompileSubclassTypes('finaltest', *clr.GetSubclassedTypes())
clr.AddReference('finaltest')
os.chdir(cwd)
run_test(__name__)
os.remove('tempFile1.tpy')
| |
#
# tests/test_tensorflow_mnist_adapter.py - unit test for the tensorflow MNIST adapter.
#
# Copyright (c) 2018 SingularityNET
#
# Distributed under the MIT software license, see LICENSE file.
#
import logging
from pathlib import Path
import pytest
from adapters.tensorflow.mnist import TensorflowMNIST, MNIST_CLASSIFIER_ID
from sn_agent import ontology as onto
from sn_agent.job.job_descriptor import JobDescriptor
from sn_agent.log import setup_logging
from sn_agent.ontology.service_descriptor import ServiceDescriptor
from sn_agent.service_adapter import setup_service_manager
from sn_agent.test.mocks import MockApp
# A 28 x 28 image of a 7 which has been flattened into a single float 784-element vector format
# as required by the tensorflow mnist adapter.
mnist_seven_image = [
0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0.,
0.32941177, 0.72549021, 0.62352943, 0.59215689, 0.23529413, 0.14117648,
0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0.,
0., 0.8705883, 0.99607849, 0.99607849, 0.99607849, 0.99607849,
0.9450981, 0.77647066, 0.77647066, 0.77647066, 0.77647066, 0.77647066,
0.77647066, 0.77647066, 0.77647066, 0.66666669, 0.20392159, 0., 0.,
0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0.26274511, 0.44705886, 0.28235295,
0.44705886, 0.63921571, 0.89019614, 0.99607849, 0.88235301, 0.99607849,
0.99607849, 0.99607849, 0.98039222, 0.89803928, 0.99607849, 0.99607849,
0.54901963, 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0.06666667, 0.25882354,
0.05490196, 0.26274511, 0.26274511, 0.26274511, 0.23137257, 0.08235294,
0.92549026, 0.99607849, 0.41568631, 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0.,
0.32549021, 0.99215692, 0.81960791, 0.07058824, 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0.,
0.08627451, 0.91372555, 1., 0.32549021, 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0.,
0.50588238, 0.99607849, 0.9333334, 0.17254902, 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0.,
0.23137257, 0.97647065, 0.99607849, 0.24313727, 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0.,
0.52156866, 0.99607849, 0.73333335, 0.01960784, 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0.,
0.03529412, 0.80392164, 0.97254908, 0.227451, 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0.,
0.49411768, 0.99607849, 0.71372551, 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0.,
0.29411766, 0.98431379, 0.94117653, 0.22352943, 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0.,
0.07450981, 0.86666673, 0.99607849, 0.65098041, 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0.,
0.01176471, 0.7960785, 0.99607849, 0.8588236, 0.13725491, 0., 0.,
0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0.,
0.14901961, 0.99607849, 0.99607849, 0.3019608, 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0.,
0.12156864, 0.87843144, 0.99607849, 0.45098042, 0.00392157, 0., 0.,
0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0.,
0.52156866, 0.99607849, 0.99607849, 0.20392159, 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0.2392157,
0.94901967, 0.99607849, 0.99607849, 0.20392159, 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0.,
0.47450984, 0.99607849, 0.99607849, 0.8588236, 0.15686275, 0., 0.,
0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0.,
0.47450984, 0.99607849, 0.81176478, 0.07058824, 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., ]
log = logging.getLogger(__name__)
TEST_DIRECTORY = Path(__file__).parent
@pytest.fixture
def app():
app = MockApp()
onto.setup_ontology(app)
return app
def test_tensorflow_mnist_adapter(app):
setup_logging()
log.debug("Testing Tensorflow NNIST Adapter")
# Setup a test job for classifying a test mnist image. The test is a 28 x 28 image of a 7 which
# has been flattened into a single float 784 element vector format as required by the tensorflow
# example (see mnist_seven_image definition above).
job_parameters = { 'input_type': 'attached',
'input_data': {
'images': [mnist_seven_image],
},
'output_type': 'attached',
}
# Get the service for an MNIST classifier. A service identifies a unique service provided by
# SingularityNET and is part of the ontology.
ontology = app['ontology']
mnist_service = ontology.get_service(MNIST_CLASSIFIER_ID)
# Create the Tensorflow MNIST service adapter.
mnist_service_adapter = TensorflowMNIST(app, mnist_service)
# Create a service descriptor. These are post-contract negotiated descriptors that may include
# other parameters like quality of service, input and output formats, etc.
mnist_service_descriptor = ServiceDescriptor(MNIST_CLASSIFIER_ID)
# Create a new job descriptor with a single set of parameters for the test image of a 7 in the
# format defined above for the python variable: mnist_seven_image.
job_list = [job_parameters]
job = JobDescriptor(mnist_service_descriptor, job_list)
# Setup the service manager. NOTE: This will add services that are (optionally) passed in
# so you can manually create services in addition to those that are loaded from the config
# file. After all the services are added, it will call post_load_initialize on all the
# services.
setup_service_manager(app, [mnist_service_adapter])
# Test perform for the mnist service adapter.
try:
exception_caught = False
results = mnist_service_adapter.perform(job)
except RuntimeError as exception:
exception_caught = True
log.error(" Exception caught %s", exception)
log.debug(" Error performing %s %s", job, mnist_service_adapter)
assert not exception_caught
# Check our results for format and content.
assert len(results) == 1
assert results[0]['predictions'] == [7]
assert results[0]['confidences'][0] > 0.9900
if results[0]['predictions'] == [7]:
log.debug("Tensorflow NNIST Adapter - CORRECT evaluation of image as 7")
| |
import json
import logging
import pprint
import sys
import time
from gallium.interface import ICommand
from .drivers.rabbitmq import Driver
from .observer import Core, Observer, SYNC_START
from .helper import prepare_logger
logger = prepare_logger(logging.DEBUG if '-d' in sys.argv else logging.INFO)
def handle_driver_event(level, label, c, summary = None):
if not c or not hasattr(c, 'route') or not hasattr(c, 'queue_name'):
getattr(logger, level.lower())(label)
return
getattr(logger, level.lower())('{} ({}): {}'.format(
c.route,
c.queue_name,
label,
))
def on_connect(consumer = None, controller_id = None, route = None, queue_name = None, summary = None):
handle_driver_event('info', '<-------> CONNECTED', consumer)
def on_disconnect(consumer = None, controller_id = None, route = None, queue_name = None, summary = None):
handle_driver_event('error', '---/ /--- DISCONNECTED', consumer)
def make_error_handler(exception, consumer = None, controller_id = None, route = None, queue_name = None, summary = None):
handle_driver_event('error', '<--(!)--> ERROR', consumer)
class SampleObserveWithDefaultOptions(ICommand):
""" Run the sample observer with default options """
def identifier(self):
return 'sample.observe.default'
def define(self, parser):
parser.add_argument(
'--debug',
'-d',
action = 'store_true'
)
parser.add_argument(
'--bind-url',
'-b',
default='amqp://guest:guest@127.0.0.1:5672/%2F'
)
def execute(self, args):
driver = Driver(
args.bind_url,
unlimited_retries = True,
on_connect = on_connect,
on_disconnect = on_disconnect,
on_error = make_error_handler,
)
service = Observer(driver)
# In this example, delegation is disabled.
# vireo.open('vireo.sample.primary', delegation_ttl = 5000)
# vireo.on('vireo.sample.primary.delegated', lambda x: print('vireo.sample.primary.delegated: {}'.format(x)))
def wrapper(label, data):
print('[SAMPLE] {}:'.format(label))
pprint.pprint(data, indent = 2)
service.on('vireo.sample.direct', lambda x: wrapper('vireo.sample.direct', x))
service.on('vireo.sample.secondary', lambda x: wrapper('vireo.sample.secondary', x))
service.on('vireo.sample.direct.resumable', lambda x: wrapper('vireo.sample.direct', x), resumable = True)
# With custom TOPIC exchange
service.on(
'vireo.sample.custom_topic_exchange.route_1',
lambda x: wrapper('vireo.sample.custom_topic_exchange', x),
options = {
'exchange': {
'name': 'vireo_sample_topic_exchange',
'type': 'topic',
}
}
)
service.on(
'vireo.sample.custom_topic_exchange.route_2',
lambda x: wrapper('vireo.sample.custom_topic_exchange', x),
options = {
'exchange': {
'name': 'vireo_sample_topic_exchange',
'type': 'topic',
}
}
)
# With custom FANOUT exchange
service.on(
'vireo.sample.custom_fanout_exchange_1',
lambda x: wrapper('vireo.sample.custom_fanout_exchange_1', x),
options = {
'exchange': {
'name': 'vireo_sample_fanout_exchange',
'type': 'fanout',
}
}
)
service.on(
'vireo.sample.custom_fanout_exchange_2',
lambda x: wrapper('vireo.sample.custom_fanout_exchange_2', x),
options = {
'exchange': {
'name': 'vireo_sample_fanout_exchange',
'type': 'fanout',
}
}
)
# With handling errors
def error_demo(x):
if 'e' in x:
raise RuntimeError('Intentional Error')
wrapper('vireo.sample.error', x)
service.on('vireo.sample.error', error_demo)
service.on_broadcast('vireo.sample.broadcast.one', lambda x: wrapper('vireo.sample.broadcast.one', x))
service.on_broadcast('vireo.sample.broadcast.two', lambda x: wrapper('vireo.sample.broadcast.two', x))
service.join(SYNC_START)
class SampleObserveWithCustomOptions(ICommand):
""" Run the sample observer with custom options """
def identifier(self):
return 'sample.observe.custom'
def define(self, parser):
parser.add_argument(
'--debug',
'-d',
action = 'store_true'
)
parser.add_argument(
'--bind-url',
'-b',
default='amqp://guest:guest@127.0.0.1:5672/%2F'
)
def execute(self, args):
driver = Driver(
args.bind_url,
unlimited_retries = True,
on_connect = on_connect,
on_disconnect = on_disconnect,
on_error = make_error_handler,
default_consuming_shared_queue_options = {
'exchange': {
'name': 'vireo_sample_custom_default_exchange',
'type': 'topic',
},
},
)
service = Observer(driver)
def wrapper(label, data):
print('[SAMPLE] {}:'.format(label))
pprint.pprint(data, indent = 2)
# With custom TOPIC exchange
service.on(
'vireo.sample_custom.route_1',
lambda x: wrapper('vireo.sample_custom.route_1', x),
)
service.on(
'vireo.sample_custom.route_2',
lambda x: wrapper('vireo.sample_custom.route_2', x),
)
service.join(SYNC_START)
class SamplePublishWithCustomOptions(ICommand):
""" Run the sample observer with custom options """
def identifier(self):
return 'sample.publish.custom'
def define(self, parser):
parser.add_argument(
'--debug',
'-d',
action = 'store_true'
)
parser.add_argument(
'--bind-url',
'-b',
default='amqp://guest:guest@127.0.0.1:5672/%2F'
)
parser.add_argument(
'event_name',
help = 'The name of the event (e.g., "sample.primary")'
)
parser.add_argument(
'event_data',
help = 'The JSON-compatible string data of the event',
nargs = '?'
)
def execute(self, args):
prepare_logger(logging.DEBUG if args.debug else logging.INFO)
driver = Driver(
args.bind_url,
default_publishing_options = {
'exchange': 'vireo_sample_custom_default_exchange',
},
)
service = Core(driver)
service.emit(args.event_name, json.loads(args.event_data) if args.event_data else None)
class SampleObserveWithOneQueue(ICommand):
""" Run the sample observer with default options """
def identifier(self):
return 'sample.observe.single'
def define(self, parser):
parser.add_argument(
'--debug',
'-d',
action = 'store_true'
)
parser.add_argument(
'--bind-url',
'-b',
default='amqp://guest:guest@127.0.0.1:5672/%2F'
)
def execute(self, args):
driver = Driver(
args.bind_url,
unlimited_retries = False,
on_connect = on_connect,
on_disconnect = on_disconnect,
on_error = make_error_handler,
)
def handler(data):
print('[SAMPLE] Begin')
print('[SAMPLE] Received: {}'.format(pprint.pformat(data, indent = 2)))
if isinstance(data, dict) and 'sleep' in data:
print('[SAMPLE] Sleeping')
time.sleep(data.get('sleep'))
print('[SAMPLE] Resumed')
print('[SAMPLE] End')
service = Observer(driver)
service.on('vireo.sample.single', handler, resumable = True, max_retries = 30)
service.join(SYNC_START)
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.0-a53ec6ee1b (http://hl7.org/fhir/StructureDefinition/ServiceRequest) on 2019-05-07.
# 2019, SMART Health IT.
from . import domainresource
class ServiceRequest(domainresource.DomainResource):
""" A request for a service to be performed.
A record of a request for service such as diagnostic investigations,
treatments, or operations to be performed.
"""
resource_type = "ServiceRequest"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.asNeededBoolean = None
""" Preconditions for service.
Type `bool`. """
self.asNeededCodeableConcept = None
""" Preconditions for service.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.authoredOn = None
""" Date request signed.
Type `FHIRDate` (represented as `str` in JSON). """
self.basedOn = None
""" What request fulfills.
List of `FHIRReference` items (represented as `dict` in JSON). """
self.bodySite = None
""" Location on Body.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.category = None
""" Classification of service.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.code = None
""" What is being requested/ordered.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.doNotPerform = None
""" True if service/procedure should not be performed.
Type `bool`. """
self.encounter = None
""" Encounter in which the request was created.
Type `FHIRReference` (represented as `dict` in JSON). """
self.identifier = None
""" Identifiers assigned to this order.
List of `Identifier` items (represented as `dict` in JSON). """
self.instantiatesCanonical = None
""" Instantiates FHIR protocol or definition.
List of `str` items. """
self.instantiatesUri = None
""" Instantiates external protocol or definition.
List of `str` items. """
self.insurance = None
""" Associated insurance coverage.
List of `FHIRReference` items (represented as `dict` in JSON). """
self.intent = None
""" proposal | plan | order +.
Type `str`. """
self.locationCode = None
""" Requested location.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.locationReference = None
""" Requested location.
List of `FHIRReference` items (represented as `dict` in JSON). """
self.note = None
""" Comments.
List of `Annotation` items (represented as `dict` in JSON). """
self.occurrenceDateTime = None
""" When service should occur.
Type `FHIRDate` (represented as `str` in JSON). """
self.occurrencePeriod = None
""" When service should occur.
Type `Period` (represented as `dict` in JSON). """
self.occurrenceTiming = None
""" When service should occur.
Type `Timing` (represented as `dict` in JSON). """
self.orderDetail = None
""" Additional order information.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.patientInstruction = None
""" Patient or consumer-oriented instructions.
Type `str`. """
self.performer = None
""" Requested performer.
List of `FHIRReference` items (represented as `dict` in JSON). """
self.performerType = None
""" Performer role.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.priority = None
""" routine | urgent | asap | stat.
Type `str`. """
self.quantityQuantity = None
""" Service amount.
Type `Quantity` (represented as `dict` in JSON). """
self.quantityRange = None
""" Service amount.
Type `Range` (represented as `dict` in JSON). """
self.quantityRatio = None
""" Service amount.
Type `Ratio` (represented as `dict` in JSON). """
self.reasonCode = None
""" Explanation/Justification for procedure or service.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.reasonReference = None
""" Explanation/Justification for service or service.
List of `FHIRReference` items (represented as `dict` in JSON). """
self.relevantHistory = None
""" Request provenance.
List of `FHIRReference` items (represented as `dict` in JSON). """
self.replaces = None
""" What request replaces.
List of `FHIRReference` items (represented as `dict` in JSON). """
self.requester = None
""" Who/what is requesting service.
Type `FHIRReference` (represented as `dict` in JSON). """
self.requisition = None
""" Composite Request ID.
Type `Identifier` (represented as `dict` in JSON). """
self.specimen = None
""" Procedure Samples.
List of `FHIRReference` items (represented as `dict` in JSON). """
self.status = None
""" draft | active | suspended | completed | entered-in-error |
cancelled.
Type `str`. """
self.subject = None
""" Individual or Entity the service is ordered for.
Type `FHIRReference` (represented as `dict` in JSON). """
self.supportingInfo = None
""" Additional clinical information.
List of `FHIRReference` items (represented as `dict` in JSON). """
super(ServiceRequest, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ServiceRequest, self).elementProperties()
js.extend([
("asNeededBoolean", "asNeededBoolean", bool, False, "asNeeded", False),
("asNeededCodeableConcept", "asNeededCodeableConcept", codeableconcept.CodeableConcept, False, "asNeeded", False),
("authoredOn", "authoredOn", fhirdate.FHIRDate, False, None, False),
("basedOn", "basedOn", fhirreference.FHIRReference, True, None, False),
("bodySite", "bodySite", codeableconcept.CodeableConcept, True, None, False),
("category", "category", codeableconcept.CodeableConcept, True, None, False),
("code", "code", codeableconcept.CodeableConcept, False, None, False),
("doNotPerform", "doNotPerform", bool, False, None, False),
("encounter", "encounter", fhirreference.FHIRReference, False, None, False),
("identifier", "identifier", identifier.Identifier, True, None, False),
("instantiatesCanonical", "instantiatesCanonical", str, True, None, False),
("instantiatesUri", "instantiatesUri", str, True, None, False),
("insurance", "insurance", fhirreference.FHIRReference, True, None, False),
("intent", "intent", str, False, None, True),
("locationCode", "locationCode", codeableconcept.CodeableConcept, True, None, False),
("locationReference", "locationReference", fhirreference.FHIRReference, True, None, False),
("note", "note", annotation.Annotation, True, None, False),
("occurrenceDateTime", "occurrenceDateTime", fhirdate.FHIRDate, False, "occurrence", False),
("occurrencePeriod", "occurrencePeriod", period.Period, False, "occurrence", False),
("occurrenceTiming", "occurrenceTiming", timing.Timing, False, "occurrence", False),
("orderDetail", "orderDetail", codeableconcept.CodeableConcept, True, None, False),
("patientInstruction", "patientInstruction", str, False, None, False),
("performer", "performer", fhirreference.FHIRReference, True, None, False),
("performerType", "performerType", codeableconcept.CodeableConcept, False, None, False),
("priority", "priority", str, False, None, False),
("quantityQuantity", "quantityQuantity", quantity.Quantity, False, "quantity", False),
("quantityRange", "quantityRange", range.Range, False, "quantity", False),
("quantityRatio", "quantityRatio", ratio.Ratio, False, "quantity", False),
("reasonCode", "reasonCode", codeableconcept.CodeableConcept, True, None, False),
("reasonReference", "reasonReference", fhirreference.FHIRReference, True, None, False),
("relevantHistory", "relevantHistory", fhirreference.FHIRReference, True, None, False),
("replaces", "replaces", fhirreference.FHIRReference, True, None, False),
("requester", "requester", fhirreference.FHIRReference, False, None, False),
("requisition", "requisition", identifier.Identifier, False, None, False),
("specimen", "specimen", fhirreference.FHIRReference, True, None, False),
("status", "status", str, False, None, True),
("subject", "subject", fhirreference.FHIRReference, False, None, True),
("supportingInfo", "supportingInfo", fhirreference.FHIRReference, True, None, False),
])
return js
import sys
try:
from . import annotation
except ImportError:
annotation = sys.modules[__package__ + '.annotation']
try:
from . import codeableconcept
except ImportError:
codeableconcept = sys.modules[__package__ + '.codeableconcept']
try:
from . import fhirdate
except ImportError:
fhirdate = sys.modules[__package__ + '.fhirdate']
try:
from . import fhirreference
except ImportError:
fhirreference = sys.modules[__package__ + '.fhirreference']
try:
from . import identifier
except ImportError:
identifier = sys.modules[__package__ + '.identifier']
try:
from . import period
except ImportError:
period = sys.modules[__package__ + '.period']
try:
from . import quantity
except ImportError:
quantity = sys.modules[__package__ + '.quantity']
try:
from . import range
except ImportError:
range = sys.modules[__package__ + '.range']
try:
from . import ratio
except ImportError:
ratio = sys.modules[__package__ + '.ratio']
try:
from . import timing
except ImportError:
timing = sys.modules[__package__ + '.timing']
| |
## This is an educational random forest implementation
## References:
## * A. Criminisi, J. Shotton, and E. Konukoglu, Decision Forests:
## A Unified Framework for Classification, Regression, Density Estimation,
## Manifold Learning and Semi-Supervised Learning. Foundations and Trends in
## Computer Graphics and Computer Vision. NOW Publishers. Vol.7: No 2-3, pp 81-227. 2012.
##
## * Jamie Shotton, Toby Sharp, Pushmeet Kohli, Sebastian Nowozin, John Winn,
## and Antonio Criminisi, Decision Jungles: Compact and Rich Models for
## Classification, in Proc. NIPS, 2013
import random
from collections import Counter
import numpy as np
import copy
def split_data(data, label=0, length=50):
'Take a large text and divide it into chunks'
strings = [data[i:i+length] for i in range(0, len(data) - length, length)]
random.shuffle(strings)
strings = [(s, label) for s in strings]
test = strings[:len(strings) * 10 / 100]
training = strings[len(strings) * 10 / 100:]
return test, training
def entropy(data):
'Computes the binary entropy of labelled data'
v = Counter([b for _, b in data]).values()
d = np.array(v) / float(sum(v))
return - sum(d * np.log(d))
def split(train, feat):
'Split data according to an infromation gain criterium'
## first compute the entropy
Hx = entropy(train)
if Hx < 0.000001:
raise Exception("Entropy very low")
L1 = []
L2 = []
for t in train:
if feat in t[0]:
L1 += [t]
else:
L2 += [t]
E1 = entropy(L1)
E2 = entropy(L2)
L = float(len(train))
H = Hx - E1 * len(L1)/L - E2 * len(L2)/L
return H, L1, L2, feat
## --------------------------
## - The random forest code -
## --------------------------
def build_tree(train, features, levels=5, numfeatures=100):
'Train a decision tree based on labeled data and features'
if levels == 0:
C1 = Counter([b for _, b in train])
Leaf = (None, C1)
return Leaf
else:
try:
X = (split(train, F) for F in random.sample(features, numfeatures))
H, L1, L2, F = max(X)
M1 = build_tree(L1, features, levels - 1, numfeatures)
M2 = build_tree(L2, features, levels - 1, numfeatures)
Branch = (F, M1, M2)
return Branch
except:
return build_tree(train, features, levels=0)
def classify(tree, item):
'Get a decision for an item using a tree'
if len(tree) == 2:
assert tree[0] is None
return tree[1]
else:
fet, L1, L2 = tree
if fet in item:
return classify(L1, item)
else:
return classify(L2, item)
## ----------------------------
## - The decision jungle code -
## ----------------------------
def build_jungle(train, features, levels=20, numfeatures=100):
DAG = {0: copy.copy(train)}
Candidate_sets = [0]
next_ID = 0
M = 20
for level in range(levels):
result_sets = []
for tdata_idx in Candidate_sets:
tdata = DAG[tdata_idx]
if entropy(tdata) == 0.0:
next_ID += 1
idx1 = next_ID
result_sets += [idx1]
DAG[idx1] = tdata + []
del DAG[tdata_idx][:]
DAG[tdata_idx] += [True, idx1, idx1]
continue
X = (split(tdata, F) for F in random.sample(features, numfeatures))
H, L1, L2, F = max(X)
# Branch = (F, M1, M2)
next_ID += 1
idx1 = next_ID
DAG[idx1] = L1
next_ID += 1
idx2 = next_ID
DAG[idx2] = L2
result_sets += [idx1, idx2]
del DAG[tdata_idx][:]
DAG[tdata_idx] += [F, idx1, idx2]
## Now optimize the result sets here
random.shuffle(result_sets)
basic = result_sets[:M]
for r in result_sets[M:]:
maxv = None
maxi = None
for b in basic:
L = float(len(DAG[r] + DAG[b]))
e1 = len(DAG[r]) * entropy(DAG[r])
e2 = len(DAG[b]) * entropy(DAG[b])
newe = L * entropy(DAG[r] + DAG[b])
score = abs(e1 + e2 - newe)
if maxv is None:
maxv = score
maxi = b
continue
if score < maxv:
maxv = score
maxi = b
DAG[maxi] += DAG[r]
del DAG[r]
DAG[r] = DAG[maxi]
Candidate_sets = basic
for tdata_idx in Candidate_sets:
tdata = DAG[tdata_idx]
C1 = Counter([b for _, b in tdata])
del DAG[tdata_idx][:]
DAG[tdata_idx] += [None, C1]
return DAG
def classify_jungle(DAG, item):
branch = DAG[0]
while branch[0] is not None:
try:
fet, L1, L2 = branch
if fet == True or fet in item:
branch = DAG[L1]
else:
branch = DAG[L2]
except:
print len(branch)
raise
return branch[1]
## -------------------------
## - Sample classification -
## -------------------------
if __name__ == "__main__":
# dataEN = file("../data/pg23428.txt").read()
# dataFR = file("../data/pg5711.txt").read()
dataEN = file("../data/pg110.txt").read()
dataFR = file("../data/pg42671.txt").read()
length = 200
testEN, trainEN = split_data(dataEN, label=0, length=length)
testFR, trainFR = split_data(dataFR, label=1, length=length)
print "training: EN=%s FR=%s" % (len(trainEN), len(trainFR))
train = trainEN + trainFR
random.shuffle(train)
test = testEN + testFR
random.shuffle(test)
## Now make a bunch of features
## A feature is in at least 10% of strings
## but also at most in 90% of strings
sometrain = random.sample(train, 1000)
features = set()
while len(features) < 700:
fragment, _ = random.choice(sometrain)
l = int(round(random.expovariate(0.20)))
b = random.randint(0, max(0, length - l))
feat = fragment[b:b+l]
## Test
C = 0
for st, _ in sometrain:
if feat in st:
C += 1
f = float(C) / 1000
if f > 0.01 and f < 0.99 and feat not in features:
features.add(feat)
features = list(features)
manytrees = []
jungle = []
for i in range(10):
print "Build tree %s" % i
size = len(train) / 3
training_sample = random.sample(train, size)
tree = build_jungle(training_sample, features, numfeatures=100)
jungle += [tree]
tree = build_tree(training_sample, features, numfeatures=100)
manytrees += [tree]
testdata = test
results_tree = Counter()
results_jungle = Counter()
for item, cat in testdata:
# Trees
c = Counter()
for tree in manytrees:
c += classify(tree, item)
res = (max(c, key=lambda x: c[x]), cat)
results_tree.update([res])
# Jungle
c = Counter()
for tree in jungle:
c += classify_jungle(tree, item)
res = (max(c, key=lambda x: c[x]), cat)
results_jungle.update([res])
print
print "Results Tree Jungle"
print "True positives: %4d %4d" \
% (results_tree[(1, 1)], results_jungle[(1, 1)])
print "True negatives: %4d %4d" \
% (results_tree[(0, 0)], results_jungle[(0, 0)])
print "False positives: %4d %4d" \
% (results_tree[(1, 0)], results_jungle[(1, 0)])
print "False negatives: %4d %4d" \
% (results_tree[(0, 1)], results_jungle[(0, 1)])
| |
# Copyright 2015, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Conditional statements related codes.
Branches, conditions, truth checks.
"""
from nuitka import Options
from .AttributeCodes import getAttributeCheckBoolCode
from .ComparisonCodes import (
getBuiltinIsinstanceBoolCode,
getComparisonExpressionBoolCode
)
from .ErrorCodes import getErrorExitBoolCode, getReleaseCode
from .LabelCodes import getBranchingCode, getGotoCode, getLabelCode
def generateConditionCode(condition, emit, context):
# The complexity is needed to avoid unnecessary complex generated C++
# pylint: disable=R0914,R0915
# TODO: This will move to Helper module
from .CodeGeneration import generateExpressionCode
if condition.isExpressionConstantRef():
# TODO: Must not happen, optimization catches this.
assert False
value = condition.getConstant()
if value:
getGotoCode(context.getTrueBranchTarget(), emit)
else:
getGotoCode(context.getFalseBranchTarget(), emit)
elif condition.isExpressionComparison():
left_name = context.allocateTempName("compare_left")
generateExpressionCode(
to_name = left_name,
expression = condition.getLeft(),
emit = emit,
context = context
)
right_name = context.allocateTempName("compare_right")
generateExpressionCode(
to_name = right_name,
expression = condition.getRight(),
emit = emit,
context = context
)
old_source_ref = context.setCurrentSourceCodeReference(condition.getSourceReference())
getComparisonExpressionBoolCode(
comparator = condition.getComparator(),
left_name = left_name,
right_name = right_name,
needs_check = condition.mayRaiseExceptionBool(BaseException),
emit = emit,
context = context
)
context.setCurrentSourceCodeReference(old_source_ref)
elif condition.isExpressionOperationNOT():
# Lets just switch the targets temporarily to get at "NOT" without
# any effort really.
true_target = context.getTrueBranchTarget()
false_target = context.getFalseBranchTarget()
context.setTrueBranchTarget(false_target)
context.setFalseBranchTarget(true_target)
generateConditionCode(
condition = condition.getOperand(),
emit = emit,
context = context
)
context.setTrueBranchTarget(true_target)
context.setFalseBranchTarget(false_target)
elif condition.isExpressionConditional():
expression_yes = condition.getExpressionYes()
expression_no = condition.getExpressionNo()
condition = condition.getCondition()
old_true_target = context.getTrueBranchTarget()
old_false_target = context.getFalseBranchTarget()
select_true = context.allocateLabel("select_true")
select_false = context.allocateLabel("select_false")
# TODO: Could be avoided in some cases.
select_end = context.allocateLabel("select_end")
context.setTrueBranchTarget(select_true)
context.setFalseBranchTarget(select_false)
generateConditionCode(
condition = condition,
emit = emit,
context = context,
)
context.setTrueBranchTarget(old_true_target)
context.setFalseBranchTarget(old_false_target)
getLabelCode(select_true,emit)
generateConditionCode(
condition = expression_yes,
emit = emit,
context = context,
)
getGotoCode(select_end, emit)
getLabelCode(select_false,emit)
generateConditionCode(
condition = expression_no,
emit = emit,
context = context,
)
getLabelCode(select_end,emit)
elif condition.isExpressionBuiltinHasattr():
source_name = context.allocateTempName("hasattr_source")
attr_name = context.allocateTempName("hasattr_attr")
generateExpressionCode(
to_name = source_name,
expression = condition.getLookupSource(),
emit = emit,
context = context
)
generateExpressionCode(
to_name = attr_name,
expression = condition.getAttribute(),
emit = emit,
context = context
)
old_source_ref = context.setCurrentSourceCodeReference(
condition.getAttribute().getSourceReference()
if Options.isFullCompat() else
condition.getSourceReference()
)
getAttributeCheckBoolCode(
source_name = source_name,
attr_name = attr_name,
needs_check = condition.getLookupSource().mayRaiseExceptionAttributeCheckObject(
exception_type = BaseException,
attribute = condition.getAttribute()
),
emit = emit,
context = context
)
context.setCurrentSourceCodeReference(old_source_ref)
elif condition.isExpressionBuiltinIsinstance():
inst_name = context.allocateTempName("isinstance_inst")
cls_name = context.allocateTempName("isinstance_cls")
generateExpressionCode(
to_name = inst_name,
expression = condition.getInstance(),
emit = emit,
context = context
)
generateExpressionCode(
to_name = cls_name,
expression = condition.getCls(),
emit = emit,
context = context
)
old_source_ref = context.setCurrentSourceCodeReference(condition.getSourceReference())
getBuiltinIsinstanceBoolCode(
inst_name = inst_name,
cls_name = cls_name,
emit = emit,
context = context
)
context.setCurrentSourceCodeReference(old_source_ref)
else:
condition_name = context.allocateTempName("cond_value")
truth_name = context.allocateTempName("cond_truth", "int")
generateExpressionCode(
to_name = condition_name,
expression = condition,
emit = emit,
context = context
)
old_source_ref = context.setCurrentSourceCodeReference(condition.getSourceReference())
getConditionCheckTrueCode(
to_name = truth_name,
value_name = condition_name,
needs_check = condition.mayRaiseExceptionBool(BaseException),
emit = emit,
context = context
)
context.setCurrentSourceCodeReference(old_source_ref)
getReleaseCode(
release_name = condition_name,
emit = emit,
context = context
)
getBranchingCode(
condition = "%s == 1" % truth_name,
emit = emit,
context = context
)
def getConditionCheckTrueCode(to_name, value_name, needs_check, emit, context):
emit(
"%s = CHECK_IF_TRUE( %s );" % (
to_name,
value_name
)
)
getErrorExitBoolCode(
condition = "%s == -1" % to_name,
needs_check = needs_check,
emit = emit,
context = context
)
| |
import pathlib
import synapse.exc as s_exc
import synapse.tests.utils as s_test
from synapse.tests.utils import alist
import synapse.lib.hive as s_hive
import synapse.lib.nexus as s_nexus
import synapse.lib.hiveauth as s_hiveauth
class AuthTest(s_test.SynTest):
async def test_hive_auth(self):
with self.getTestDir() as testdirn:
async with self.getTestTeleHive() as hive:
nexsroot = await s_nexus.NexsRoot.anit(testdirn)
await nexsroot.startup(None)
node = await hive.open(('hive', 'auth'))
async with await s_hiveauth.Auth.anit(node, nexsroot=nexsroot) as auth:
auth.onfini(nexsroot.fini)
user = await auth.addUser('visi@vertex.link')
role = await auth.addRole('ninjas')
self.eq(user, auth.user(user.iden))
self.eq(user, await auth.getUserByName('visi@vertex.link'))
self.eq(role, auth.role(role.iden))
self.eq(role, await auth.getRoleByName('ninjas'))
with self.raises(s_exc.DupUserName):
await auth.addUser('visi@vertex.link')
with self.raises(s_exc.DupRoleName):
await auth.addRole('ninjas')
self.nn(user)
self.false(user.info.get('admin'))
self.len(0, user.info.get('rules'))
self.len(1, user.info.get('roles'))
await user.setAdmin(True)
self.true(user.info.get('admin'))
self.true(user.allowed(('foo', 'bar')))
await user.addRule((True, ('foo',)))
self.true(user.allowed(('foo', 'bar')))
self.len(1, user.permcache)
await user.delRule((True, ('foo',)))
self.len(0, user.permcache)
await user.addRule((True, ('foo',)))
await user.grant(role.iden)
self.len(0, user.permcache)
self.true(user.allowed(('baz', 'faz')))
self.len(1, user.permcache)
await role.addRule((True, ('baz', 'faz')))
self.len(0, user.permcache)
self.true(user.allowed(('baz', 'faz')))
self.len(1, user.permcache)
await user.setLocked(True)
self.false(user.allowed(('baz', 'faz')))
await user.setAdmin(False)
await user.setLocked(False)
self.true(user.allowed(('baz', 'faz')))
self.true(user.allowed(('foo', 'bar')))
# Add a DENY to the beginning of the rule list
await role.addRule((False, ('baz', 'faz')), indx=0)
self.false(user.allowed(('baz', 'faz')))
# Delete the DENY
await role.delRule((False, ('baz', 'faz')))
# After deleting, former ALLOW rule applies
self.true(user.allowed(('baz', 'faz')))
# non-existent rule returns default
self.none(user.allowed(('boo', 'foo')))
self.eq('yolo', user.allowed(('boo', 'foo'), default='yolo'))
await self.asyncraises(s_exc.NoSuchRole, user.revoke('newp'))
await user.revoke(role.iden)
self.none(user.allowed(('baz', 'faz')))
await user.grant(role.iden)
self.true(user.allowed(('baz', 'faz')))
await self.asyncraises(s_exc.NoSuchRole, auth.delRole('accountants'))
await auth.delRole(role.iden)
self.false(user.allowed(('baz', 'faz')))
await self.asyncraises(s_exc.NoSuchUser, auth.delUser('fred@accountancy.com'))
await auth.delUser(user.iden)
self.false(user.allowed(('baz', 'faz')))
role = await auth.addRole('lolusers')
role2 = await auth.addRole('lolusers2')
self.none(await role.setName('lolusers'))
with self.raises(s_exc.DupRoleName):
await role2.setName('lolusers')
await role.setName('roflusers')
self.nn(await auth.getRoleByName('roflusers'))
self.none(await auth.getRoleByName('lolusers'))
user = await auth.addUser('user1')
user2 = await auth.addUser('user')
# No problem if the user sets her own name to herself
self.none(await user.setName('user1'))
with self.raises(s_exc.DupUserName):
await user2.setName('user1')
await user.setName('user2')
self.nn(await auth.getUserByName('user2'))
self.none(await auth.getUserByName('user1'))
async def test_hive_tele_auth(self):
# confirm that the primitives used by higher level APIs
# work using telepath remotes and property synchronize.
async with self.getTestHiveDmon() as dmon:
hive = dmon.shared.get('hive')
hive.conf['auth:en'] = True
auth = await hive.getHiveAuth()
user = await auth.getUserByName('root')
await user.setPasswd('secret')
# tryPasswd
self.true(await user.tryPasswd('secret'))
self.false(await user.tryPasswd('beep'))
self.false(await user.tryPasswd(None))
# hive passwords must be non-zero length strings
with self.raises(s_exc.BadArg):
await user.setPasswd('')
with self.raises(s_exc.BadArg):
await user.setPasswd({'key': 'vau'})
# passwords can be set to none, preventing tryPasswd from working
await user.setPasswd(None)
self.false(await user.tryPasswd(None))
self.false(await user.tryPasswd('beep'))
self.false(await user.tryPasswd('secret'))
# Reset the password
await user.setPasswd('secret')
turl = self.getTestUrl(dmon, 'hive')
# User can't access after being locked
await user.setLocked(True)
with self.raises(s_exc.AuthDeny):
await s_hive.openurl(turl, user='root', passwd='secret')
await user.setLocked(False)
# User can't access after being unlocked with wrong password
with self.raises(s_exc.AuthDeny):
await s_hive.openurl(turl, user='root', passwd='newpnewp')
# User can access with correct password after being unlocked with
async with await s_hive.openurl(turl, user='root', passwd='secret'):
await hive.open(('foo', 'bar'))
async def test_hive_authgate_perms(self):
async with self.getTestCoreAndProxy() as (core, prox):
# We can retrieve the hivegate information
gate = await prox.getAuthGate(core.view.iden)
self.eq(gate['users'][0], {
'iden': core.auth.rootuser.iden,
'admin': True,
'rules': (),
})
gates = await prox.getAuthGates()
self.isin(core.view.iden, [g['iden'] for g in gates])
fred = await prox.addUser('fred')
bobo = await prox.addUser('bobo')
await prox.setUserPasswd(fred['iden'], 'secret')
await prox.setUserPasswd(bobo['iden'], 'secret')
vdef2 = await core.view.fork()
view2_iden = vdef2.get('iden')
view2 = core.getView(view2_iden)
await core.nodes('[test:int=10]')
await view2.nodes('[test:int=11]')
async with core.getLocalProxy(user='fred') as fredcore:
viewopts = {'view': view2.iden}
# Rando can access main view but not a fork
self.eq(1, await fredcore.count('test:int'))
await self.asyncraises(s_exc.AuthDeny, fredcore.count('test:int', opts=viewopts))
viewiden = view2.iden
layriden = view2.layers[0].iden
# Add to a non-existent authgate
rule = (True, ('view', 'read'))
badiden = 'XXX'
await self.asyncraises(s_exc.NoSuchAuthGate, prox.addUserRule(fred['iden'], rule, gateiden=badiden))
# Rando can access forked view with explicit perms
await prox.addUserRule(fred['iden'], rule, gateiden=viewiden)
self.eq(2, await fredcore.count('test:int', opts=viewopts))
friends = await prox.addRole('friends')
# But still can't write to layer
await self.asyncraises(s_exc.AuthDeny, fredcore.count('[test:int=12]', opts=viewopts))
await self.asyncraises(s_exc.AuthDeny, fredcore.count('test:int=11 [:loc=us]', opts=viewopts))
# fred can write to forked view's write layer with explicit perm through role
rule = (True, ('node', 'prop', 'set',))
await prox.addRoleRule(friends['iden'], rule, gateiden=layriden)
# Before granting, still fails
await self.asyncraises(s_exc.AuthDeny, fredcore.count('[test:int=12]', opts=viewopts))
# After granting, succeeds
await prox.addUserRole(fred['iden'], friends['iden'])
self.eq(1, await fredcore.count('test:int=11 [:loc=ru]', opts=viewopts))
# But adding a node still fails
await self.asyncraises(s_exc.AuthDeny, fredcore.count('[test:int=12]', opts=viewopts))
# After removing rule from friends, fails again
await prox.delRoleRule(friends['iden'], rule, gateiden=layriden)
await self.asyncraises(s_exc.AuthDeny, fredcore.count('test:int=11 [:loc=us]', opts=viewopts))
rule = (True, ('node', 'add',))
await prox.addUserRule(fred['iden'], rule, gateiden=layriden)
self.eq(1, await fredcore.count('[test:int=12]', opts=viewopts))
# Add an explicit DENY for adding test:int nodes
rule = (False, ('node', 'add', 'test:int'))
await prox.addUserRule(fred['iden'], rule, indx=0, gateiden=layriden)
await self.asyncraises(s_exc.AuthDeny, fredcore.count('[test:int=13]', opts=viewopts))
# Adding test:str is allowed though
self.eq(1, await fredcore.count('[test:str=foo]', opts=viewopts))
# An non-default world readable view works without explicit permission
view2.worldreadable = True
self.eq(3, await fredcore.count('test:int', opts=viewopts))
# Deleting a user that has a role with an Authgate-specific rule
rule = (True, ('node', 'prop', 'set',))
await prox.addRoleRule(friends['iden'], rule, gateiden=layriden)
self.eq(1, await fredcore.count('test:int=11 [:loc=sp]', opts=viewopts))
await prox.addUserRole(bobo['iden'], friends['iden'])
await prox.delAuthUser(bobo['iden'])
self.eq(1, await fredcore.count('test:int=11 [:loc=us]', opts=viewopts))
# Deleting a role removes all the authgate-specific role rules
await prox.delRole(friends['iden'])
await self.asyncraises(s_exc.AuthDeny, fredcore.count('test:int=11 [:loc=ru]', opts=viewopts))
wlyr = view2.layers[0]
await core.delView(view2.iden)
await core.delLayer(wlyr.iden)
# Verify that trashing the layer and view deletes the authgate from the hive
self.none(core.auth.getAuthGate(wlyr.iden))
self.none(core.auth.getAuthGate(view2.iden))
# Verify that trashing the write layer deletes the remaining rules and backing store
self.false(pathlib.Path(wlyr.dirn).exists())
fred = await core.auth.getUserByName('fred')
self.len(0, fred.getRules(gateiden=wlyr.iden))
self.len(0, fred.getRules(gateiden=view2.iden))
async def test_hive_auth_persistence(self):
with self.getTestDir() as fdir:
async with self.getTestCoreAndProxy(dirn=fdir) as (core, prox):
# Set a bunch of permissions
fred = await prox.addUser('fred')
await prox.setUserPasswd(fred['iden'], 'secret')
vdef2 = await core.view.fork()
view2_iden = vdef2.get('iden')
view2 = core.getView(view2_iden)
await alist(core.eval('[test:int=10] [test:int=11]'))
viewiden = view2.iden
layriden = view2.layers[0].iden
rule = (True, ('view', 'read',))
await prox.addUserRule(fred['iden'], rule, gateiden=viewiden)
friends = await prox.addAuthRole('friends')
rule = (True, ('node', 'prop', 'set',))
await prox.addRoleRule(friends['iden'], rule, gateiden=layriden)
await prox.addUserRole(fred['iden'], friends['iden'])
# Restart the core/auth and make sure perms work
async with self.getTestCoreAndProxy(dirn=fdir) as (core, prox):
async with core.getLocalProxy(user='fred') as fredcore:
viewopts = {'view': view2.iden}
self.eq(2, await fredcore.count('test:int', opts=viewopts))
self.eq(1, await fredcore.count('test:int=11 [:loc=ru]', opts=viewopts))
await core.auth.delUser(fred['iden'])
await core.auth.delRole(friends['iden'])
self.none(await core.auth.getUserByName('fred'))
self.none(await core.auth.getRoleByName('friends'))
# restart after user/role removal and test they stayed gone
async with self.getTestCoreAndProxy(dirn=fdir) as (core, prox):
self.none(await core.auth.getUserByName('fred'))
self.none(await core.auth.getRoleByName('friends'))
async def test_hive_auth_invalid(self):
async with self.getTestCore() as core:
with self.raises(s_exc.BadArg):
await core.auth.setRoleName(core.auth.allrole.iden, 'ninjas')
with self.raises(s_exc.BadArg):
await core.auth.rootuser.setName(1)
with self.raises(s_exc.BadArg):
await core.auth.allrole.setName(1)
with self.raises(s_exc.SchemaViolation):
await core.auth.rootuser.addRule('vi.si')
with self.raises(s_exc.SchemaViolation):
await core.auth.rootuser.setRules(None)
with self.raises(s_exc.SchemaViolation):
await core.auth.allrole.setRules(None)
with self.raises(s_exc.BadArg):
await core.auth.rootuser.setAdmin('lol')
with self.raises(s_exc.BadArg):
await core.auth.rootuser.setLocked('lol')
with self.raises(s_exc.BadArg):
await core.auth.rootuser.setArchived('lol')
with self.raises(s_exc.SchemaViolation):
await core.auth.allrole.addRule((1, ('hehe', 'haha')))
with self.raises(s_exc.SchemaViolation):
await core.auth.allrole.setRules([(True, ('hehe', 'haha'), 'newp')])
with self.raises(s_exc.SchemaViolation):
await core.auth.allrole.setRules([(True, )])
| |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Subregion'
db.create_table('development_subregion', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=50)),
('abbr', self.gf('django.db.models.fields.CharField')(max_length=50)),
))
db.send_create_signal('development', ['Subregion'])
# Adding field 'Municipality.subregion'
db.add_column('development_municipality', 'subregion',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['development.Subregion'], null=True),
keep_default=False)
def backwards(self, orm):
# Deleting model 'Subregion'
db.delete_table('development_subregion')
# Deleting field 'Municipality.subregion'
db.delete_column('development_municipality', 'subregion_id')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'development.communitytype': {
'Meta': {'object_name': 'CommunityType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'development.municipality': {
'Meta': {'ordering': "['name']", 'object_name': 'Municipality'},
'communitytype': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['development.CommunityType']", 'null': 'True', 'blank': 'True'}),
'geometry': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {}),
'muni_id': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'subregion': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['development.Subregion']", 'null': 'True'})
},
'development.project': {
'Meta': {'ordering': "['dd_id']", 'object_name': 'Project'},
'affordable_comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'as_of_right': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'ch40': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['development.ZoningTool']", 'null': 'True', 'blank': 'True'}),
'clustosrd': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'commsf': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'complyr': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'project_created_by'", 'null': 'True', 'to': "orm['auth.User']"}),
'dd_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ddname': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'dev_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'draft': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'edinstpct': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'emploss': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'gqpop': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'hotelrms': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'indmfpct': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'last_modified_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'project_last_modified_by'", 'null': 'True', 'to': "orm['auth.User']"}),
'lgmultifam': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'location': ('django.contrib.gis.db.models.fields.PointField', [], {'null': 'True', 'blank': 'True'}),
'mapcintrnl': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'mfdisc': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'mxduse': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'ofcmdpct': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'otheremprat2': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'othpct': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'ovr55': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'parking_spaces': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'pctaffall': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'phased': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'prjacrs': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'projecttype': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['development.ProjectType']", 'null': 'True'}),
'projecttype_detail': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'rdv': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'removed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'retpct': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'rndpct': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'rptdemp': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'singfamhu': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'stalled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['development.ProjectStatus']"}),
'taz': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['development.Taz']", 'null': 'True', 'blank': 'True'}),
'todstation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['development.TODStation']", 'null': 'True'}),
'total_cost': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'total_cost_allocated_pct': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'totemp': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'tothu': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'twnhsmmult': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'url_add': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'walkscore': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['development.WalkScore']", 'null': 'True'}),
'whspct': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'xcoord': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'ycoord': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
'development.projectstatus': {
'Meta': {'object_name': 'ProjectStatus'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
'development.projecttype': {
'Meta': {'ordering': "['order']", 'object_name': 'ProjectType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'development.subregion': {
'Meta': {'ordering': "['abbr']", 'object_name': 'Subregion'},
'abbr': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'development.taz': {
'Meta': {'ordering': "['taz_id']", 'object_name': 'Taz'},
'geometry': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {}),
'municipality': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['development.Municipality']"}),
'taz_id': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'})
},
'development.todstation': {
'Meta': {'object_name': 'TODStation'},
'comrail': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'geometry': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'station_id': ('django.db.models.fields.IntegerField', [], {}),
'station_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'subway': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'taz': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['development.Taz']"})
},
'development.walkscore': {
'Meta': {'object_name': 'WalkScore'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'geometry': ('django.contrib.gis.db.models.fields.PointField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'snapped_lat': ('django.db.models.fields.FloatField', [], {}),
'snapped_lon': ('django.db.models.fields.FloatField', [], {}),
'status': ('django.db.models.fields.IntegerField', [], {}),
'updated': ('django.db.models.fields.DateTimeField', [], {}),
'walkscore': ('django.db.models.fields.IntegerField', [], {}),
'ws_link': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'development.zipcode': {
'Meta': {'ordering': "['zipcode']", 'object_name': 'ZipCode'},
'geometry': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'zipcode': ('django.db.models.fields.CharField', [], {'max_length': '5'})
},
'development.zoningtool': {
'Meta': {'object_name': 'ZoningTool'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '3'})
}
}
complete_apps = ['development']
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# SOCKS5 UDP Request
# +----+------+------+----------+----------+----------+
# |RSV | FRAG | ATYP | DST.ADDR | DST.PORT | DATA |
# +----+------+------+----------+----------+----------+
# | 2 | 1 | 1 | Variable | 2 | Variable |
# +----+------+------+----------+----------+----------+
# SOCKS5 UDP Response
# +----+------+------+----------+----------+----------+
# |RSV | FRAG | ATYP | DST.ADDR | DST.PORT | DATA |
# +----+------+------+----------+----------+----------+
# | 2 | 1 | 1 | Variable | 2 | Variable |
# +----+------+------+----------+----------+----------+
# shadowsocks UDP Request (before encrypted)
# +------+----------+----------+----------+
# | ATYP | DST.ADDR | DST.PORT | DATA |
# +------+----------+----------+----------+
# | 1 | Variable | 2 | Variable |
# +------+----------+----------+----------+
# shadowsocks UDP Response (before encrypted)
# +------+----------+----------+----------+
# | ATYP | DST.ADDR | DST.PORT | DATA |
# +------+----------+----------+----------+
# | 1 | Variable | 2 | Variable |
# +------+----------+----------+----------+
# shadowsocks UDP Request and Response (after encrypted)
# +-------+--------------+
# | IV | PAYLOAD |
# +-------+--------------+
# | Fixed | Variable |
# +-------+--------------+
# HOW TO NAME THINGS
# ------------------
# `dest` means destination server, which is from DST fields in the SOCKS5
# request
# `local` means local server of shadowsocks
# `remote` means remote server of shadowsocks
# `client` means UDP clients that connects to other servers
# `server` means the UDP server that handles user requests
from __future__ import absolute_import, division, print_function, \
with_statement
import time
import socket
import logging
import struct
import errno
import random
from shadowsocks import encrypt, eventloop, lru_cache, common, shell
from shadowsocks.common import parse_header, pack_addr
BUF_SIZE = 65536
def client_key(source_addr, server_af):
# notice this is server af, not dest af
return '%s:%s:%d' % (source_addr[0], source_addr[1], server_af)
class UDPRelay(object):
def __init__(self, config, dns_resolver, is_local):
self._config = config
if is_local:
self._listen_addr = config['local_address']
self._listen_port = config['local_port']
self._remote_addr = config['server']
self._remote_port = config['udp_server_port']
else:
self._listen_addr = config['server']
self._listen_port = config['udp_server_port']
self._remote_addr = None
self._remote_port = None
self._dns_resolver = dns_resolver
self._password = config['password']
self._method = config['method']
self._timeout = config['timeout']
self._is_local = is_local
self._cache = lru_cache.LRUCache(timeout=config['timeout'],
close_callback=self._close_client)
self._client_fd_to_server_addr = \
lru_cache.LRUCache(timeout=config['timeout'])
self._dns_cache = lru_cache.LRUCache(timeout=300)
self._eventloop = None
self._closed = False
self._last_time = time.time()
self._sockets = set()
if 'forbidden_ip' in config:
self._forbidden_iplist = config['forbidden_ip']
else:
self._forbidden_iplist = None
addrs = socket.getaddrinfo(self._listen_addr, self._listen_port, 0,
socket.SOCK_DGRAM, socket.SOL_UDP)
if len(addrs) == 0:
raise Exception("can't get addrinfo for %s:%d" %
(self._listen_addr, self._listen_port))
af, socktype, proto, canonname, sa = addrs[0]
server_socket = socket.socket(af, socktype, proto)
server_socket.bind((self._listen_addr, self._listen_port))
server_socket.setblocking(False)
self._server_socket = server_socket
def _get_a_server(self):
server = self._config['server']
server_port = self._config['server_port']
if type(server_port) == list:
server_port = random.choice(server_port)
if type(server) == list:
server = random.choice(server)
logging.debug('chosen server: %s:%d', server, server_port)
return server, server_port
def _close_client(self, client):
if hasattr(client, 'close'):
self._sockets.remove(client.fileno())
self._eventloop.remove(client)
client.close()
else:
# just an address
pass
def _handle_server(self):
server = self._server_socket
data, r_addr = server.recvfrom(BUF_SIZE)
if not data:
logging.debug('UDP handle_server: data is empty')
if self._is_local:
frag = common.ord(data[2])
if frag != 0:
logging.warn('drop a message since frag is not 0')
return
else:
data = data[3:]
else:
data = encrypt.encrypt_all(self._password, self._method, 0, data)
# decrypt data
if not data:
logging.debug('UDP handle_server: data is empty after decrypt')
return
header_result = parse_header(data)
if header_result is None:
return
addrtype, dest_addr, dest_port, header_length = header_result
if self._is_local:
server_addr, server_port = self._get_a_server()
else:
server_addr, server_port = dest_addr, dest_port
addrs = self._dns_cache.get(server_addr, None)
if addrs is None:
addrs = socket.getaddrinfo(server_addr, server_port, 0,
socket.SOCK_DGRAM, socket.SOL_UDP)
if not addrs:
# drop
return
else:
self._dns_cache[server_addr] = addrs
af, socktype, proto, canonname, sa = addrs[0]
key = client_key(r_addr, af)
logging.debug(key)
client = self._cache.get(key, None)
if not client:
# TODO async getaddrinfo
if self._forbidden_iplist:
if common.to_str(sa[0]) in self._forbidden_iplist:
logging.debug('IP %s is in forbidden list, drop' %
common.to_str(sa[0]))
# drop
return
client = socket.socket(af, socktype, proto)
client.setblocking(False)
self._cache[key] = client
self._client_fd_to_server_addr[client.fileno()] = r_addr
self._sockets.add(client.fileno())
self._eventloop.add(client, eventloop.POLL_IN)
if self._is_local:
data = encrypt.encrypt_all(self._password, self._method, 1, data)
if not data:
return
else:
data = data[header_length:]
if not data:
return
try:
client.sendto(data, (server_addr, server_port))
except IOError as e:
err = eventloop.errno_from_exception(e)
if err in (errno.EINPROGRESS, errno.EAGAIN):
pass
else:
shell.print_exception(e)
def _handle_client(self, sock):
data, r_addr = sock.recvfrom(BUF_SIZE)
if not data:
logging.debug('UDP handle_client: data is empty')
return
if not self._is_local:
addrlen = len(r_addr[0])
if addrlen > 255:
# drop
return
data = pack_addr(r_addr[0]) + struct.pack('>H', r_addr[1]) + data
response = encrypt.encrypt_all(self._password, self._method, 1,
data)
if not response:
return
else:
data = encrypt.encrypt_all(self._password, self._method, 0,
data)
if not data:
return
header_result = parse_header(data)
if header_result is None:
return
# addrtype, dest_addr, dest_port, header_length = header_result
response = b'\x00\x00\x00' + data
client_addr = self._client_fd_to_server_addr.get(sock.fileno())
if client_addr:
self._server_socket.sendto(response, client_addr)
else:
# this packet is from somewhere else we know
# simply drop that packet
pass
def add_to_loop(self, loop):
if self._eventloop:
raise Exception('already add to loop')
if self._closed:
raise Exception('already closed')
self._eventloop = loop
loop.add_handler(self._handle_events)
server_socket = self._server_socket
self._eventloop.add(server_socket,
eventloop.POLL_IN | eventloop.POLL_ERR)
def _handle_events(self, events):
for sock, fd, event in events:
if sock == self._server_socket:
if event & eventloop.POLL_ERR:
logging.error('UDP server_socket err')
self._handle_server()
elif sock and (fd in self._sockets):
if event & eventloop.POLL_ERR:
logging.error('UDP client_socket err')
self._handle_client(sock)
now = time.time()
if now - self._last_time > 3:
self._cache.sweep()
self._client_fd_to_server_addr.sweep()
self._last_time = now
if self._closed:
self._server_socket.close()
for sock in self._sockets:
sock.close()
self._eventloop.remove_handler(self._handle_events)
def close(self, next_tick=False):
self._closed = True
if not next_tick:
self._server_socket.close()
| |
# Copyright (c) 2012 ARM Limited
# All rights reserved
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Sascha Bischoff
#
try:
from sqlalchemy import *
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
except:
print "Unable to import sqlalchemy!"
raise
import os
from m5.util import fatal, panic
from m5.internal.stats import \
ScalarInfo, VectorInfo, Vector2dInfo, FormulaInfo, DistInfo, \
Deviation, Dist, Hist
Base = declarative_base()
def create_database(filename):
""" Create the database used to store the stats. If it exists, delete it.
Args:
filename: The filename usd to store the stats.
Return:
A handle to the database.
"""
if os.path.exists(filename):
os.remove(filename)
try:
db = create_engine('sqlite:///' + filename)
except:
panic("Failed to open database %s!", filename)
db.echo = False
return db
def create_tables(db):
"""Create the tables used to store the stats and information about them.
Args:
db: The database in which to create the tables.
"""
metadata = MetaData(db)
# Stores the information about the stats
stats_table = Table('stats', metadata,
Column('id', Integer, primary_key = True),
Column('name', String),
Column('desc', String),
Column('subnames', String),
Column('y_subnames', String),
Column('subdescs', String),
Column('precision', Integer),
Column('prereq', Integer),
Column('flags', Integer),
Column('x', Integer),
Column('y', Integer),
Column('type', String),
Column('formula', String),
)
# Stores scalar values
scalar_value_table = Table('scalarValue', metadata,
Column('id', Integer),
Column('dump', Integer),
Column('value', Float),
)
# Stores vectors, 2d vectors and is also used to store formulas as they can
# be scalars or vectors based on the stats used in the calculation.
vector_value_table = Table('vectorValue', metadata,
Column('id', Integer),
Column('dump', Integer),
Column('value', Binary),
)
# Stores distributions.
dist_value_table = Table('distValue', metadata,
Column('id', Integer),
Column('dump', Integer),
Column('sum', Float),
Column('squares', Float),
Column('samples', Float),
Column('min', Float),
Column('max', Float),
Column('bucket', Float),
Column('vector', Binary),
Column('min_val', Float),
Column('max_val', Float),
Column('underflow', Float),
Column('overflow', Float),
)
# Attaches descriptions to each dump.
dump_desc_table = Table("dumpDesc", metadata,
Column('id', Integer),
Column('desc', String),
)
metadata.create_all()
class StatsInfoClass(Base):
""" Class used to insert the information about stats into the database. """
__tablename__ = 'stats'
id = Column(Integer, primary_key = True)
name = Column(String)
desc = Column(String)
subnames = Column(String)
y_subnames = Column(String)
subdescs = Column(String)
precision = Column(Integer)
prereq = Column(Integer)
flags = Column(Integer)
x = Column(Integer)
y = Column(Integer)
type = Column(String)
formula = Column(String)
def __init__(self, stat):
self.id = stat.id
self.name = stat.name
self.desc = stat.desc
self.flags = stat.flags
self.precision = stat.precision
if stat.prereq:
self.prereq = stat.prereq.id
self.type = str(type(stat))
if isinstance(stat, VectorInfo):
self.subnames = ','.join(stat.subnames)
self.subdescs = ','.join(stat.subdescs)
elif isinstance(stat, FormulaInfo):
self.subnames = ','.join(stat.subnames)
self.subdescs = ','.join(stat.subdescs)
self.formula = stat.formula
elif isinstance(stat, Vector2dInfo):
self.subnames = ','.join(stat.subnames)
self.y_subnames = ','.join(stat.y_subnames)
self.subdescs = ','.join(stat.subdescs)
self.x = stat.x
self.y = stat.y
elif isinstance(stat, DistInfo):
data = stat.data
if data.type == Deviation:
the_type = "Deviation"
elif data.type == Dist:
the_type = "Distribution"
elif data.type == Hist:
the_type = "Histogram"
self.type = the_type
def __repr__(self):
return "<User('%d','%s','%s','%s','%s','%s','%d','%d','%d','%d','%d', \
'%s','%s')>" % (self.id, self.name, self.desc, self.subnames,
self.y_subnames, self.subdescs, self.precision, self.prereq,
self.flags, self.x, self.y, self.type, self.formula)
class ScalarValueClass(Base):
""" Class used to insert scalar stats into the database. """
__tablename__ = 'scalarValue'
id = Column(Integer, primary_key = True)
dump = Column(Integer)
value = Column(Integer)
def __init__(self, id, dump, value):
self.id = id
self.dump = dump
self.value = value
def __repr__(self):
return "<User('%d','%d','%f')>" % (self.id, self.dump, self.value)
class VectorValueClass(Base):
""" Class used to insert vector stats into the database. """
__tablename__ = 'vectorValue'
id = Column(Integer, primary_key = True)
dump = Column(Integer)
value = Column(Binary)
def __init__(self, id, dump, value):
import array
self.id = id
self.dump = dump
a = array.array('f', value)
self.value = a.tostring()
def __repr__(self):
return "<User('%d','%d','%s')>" % (self.id, self.dump, self.value)
class DistValueClass(Base):
""" Class used to insert dictribution stats into the database. """
__tablename__ = 'distValue'
id = Column(Integer, primary_key = True)
dump = Column(Integer)
sum = Column(Float)
squares = Column(Float)
samples = Column(Float)
min = Column(Float)
max = Column(Float)
bucket = Column(Float)
vector = Column(Binary)
min_val = Column(Float)
max_val = Column(Float)
underflow = Column(Float)
overflow = Column(Float)
def __init__(self, id, dump):
self.id = id
self.dump = dump
def __repr__(self):
return "<User('%d','%d','%f','%f','%f','%f','%f','%f','%s','%f','%f', \
'%f','%f')>" % (self.id, self.dump, self.sum, self.squares,
self.samples, self.min, self.max, self.bucket, self.vector,
self.min_val, self.max_val, self.underflow, self.overflow)
class DumpDescValueClass(Base):
""" Class that stores the description of a stats dump. """
__tablename__ = 'dumpDesc'
id = Column(Integer, primary_key = True)
desc = Column(String)
def __init__(self, id, desc):
self.id = id
self.desc = desc
def __repr__(self):
return "<User('%d','%s')>" % (self.id, self.desc)
def add_stat_info(stat, session):
""" Add the information about a stat.
Args:
name: The name of the stat.
stat: The stat itself.
session: The session associated with the database.
"""
temp = StatsInfoClass(stat)
session.add(temp)
def store_stat_value(stat, session, dumpCount):
""" Stores the value of a stat.
Args:
stat: The stat itself.
session: The session associated with the database.
dumpCount: The number of dumps that have occured. Used to store multiple
stats dumps in one database.
"""
if isinstance(stat, ScalarInfo):
temp = ScalarValueClass(id = stat.id, dump = dumpCount,
value = stat.value())
session.add(temp)
elif isinstance(stat, VectorInfo) or isinstance(stat, FormulaInfo):
temp = VectorValueClass(id = stat.id, dump = dumpCount,
value = stat.result())
session.add(temp)
elif isinstance(stat, Vector2dInfo):
temp = VectorValueClass(id = stat.id, dump = dumpCount,
value = stat.cvec)
session.add(temp)
elif isinstance(stat, DistInfo):
import array
data = stat.data
temp = DistValueClass(id = stat.id, dump = dumpCount)
temp.sum = data.sum
temp.squares = data.squares
temp.samples = data.samples
if data.type == Dist or data.type == Hist:
temp.min = data.min
temp.max = data.max
temp.bucket = data.bucket_size
a = array.array('f', data.cvec)
temp.vector = a.tostring()
if data.type == Dist:
temp.min_val = data.min_val
temp.max_val = data.max_val
temp.underflow = data.underflow
temp.overflow = data.overflow
session.add(temp)
else:
panic("Unable to output stat %s. Unsupported stat type!", stat)
def store_dump_desc(session, desc, dump_count):
""" Stores the description of this dump. """
temp = DumpDescValueClass(id=dump_count, desc=desc)
session.add(temp)
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the private `_OptimizeDataset` transformation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import warnings
from absl.testing import parameterized
import numpy as np
from tensorflow.python.data.experimental.ops import batching
from tensorflow.python.data.experimental.ops import grouping
from tensorflow.python.data.experimental.ops import optimization
from tensorflow.python.data.experimental.ops import scan_ops
from tensorflow.python.data.experimental.ops import threadpool
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import test
def _generate_captured_refvar_test_cases():
"""Generates testcases.
Returns:
A list of tuples of (testcase_name, make_dataset_fn). make_dataset_fn takes
a tf.Variable as input and creates a test dataset that uses that variable.
"""
def make_map_dataset(var):
return dataset_ops.Dataset.from_tensors(0).map(lambda x: x + var)
def make_flat_map_dataset(var):
return dataset_ops.Dataset.from_tensors(
0).flat_map(lambda _: dataset_ops.Dataset.from_tensors(var))
def make_filter_dataset(var):
return dataset_ops.Dataset.from_tensors(0).filter(lambda x: x < var)
def make_map_and_batch_dataset(var):
def map_fn(x):
return x + var
return dataset_ops.Dataset.from_tensors(0).apply(
batching.map_and_batch(map_fn, 1))
def make_group_by_reducer_dataset(var):
reducer = grouping.Reducer(
init_func=lambda _: 0,
reduce_func=lambda x, y: x,
finalize_func=lambda _: var)
return dataset_ops.Dataset.range(5).apply(
grouping.group_by_reducer(lambda x: x % 2, reducer))
def make_group_by_window_dataset(var):
def reduce_fn(key, bucket):
del key, bucket
return dataset_ops.Dataset.from_tensors(var)
return dataset_ops.Dataset.from_tensors(0).repeat(10).apply(
grouping.group_by_window(lambda _: 0, reduce_fn, 10))
def make_scan_dataset(var):
return dataset_ops.Dataset.from_tensors(0).apply(
scan_ops.scan(
0, lambda old_state, elem: (old_state + 1, elem + old_state + var)))
return [
# Core datasets
("Map", make_map_dataset),
("FlatMap", make_flat_map_dataset),
("Filter", make_filter_dataset),
# Experimental datasets
("MapAndBatch", make_map_and_batch_dataset),
("GroupByReducer", make_group_by_reducer_dataset),
("GroupByWindow", make_group_by_window_dataset),
("Scan", make_scan_dataset)
]
@test_util.run_all_in_graph_and_eager_modes
class OptimizeDatasetTest(test_base.DatasetTestBase, parameterized.TestCase):
def testOptimizationStatefulFunction(self):
dataset = dataset_ops.Dataset.range(
10).map(lambda _: random_ops.random_uniform([])).batch(10)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
dataset = dataset.with_options(options)
get_next = self.getNext(dataset)
self.evaluate(get_next())
# TODO(b/123300735): Add eager coverage for the following tests.
def testSkipEagerOptimizationLargeInputFromTensor(self):
input_t = array_ops.placeholder(dtypes.int32, (None, None, None))
dataset = dataset_ops.Dataset.from_tensors(input_t)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
dataset = dataset.with_options(options)
iterator = dataset_ops.make_initializable_iterator(dataset)
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op, {input_t: np.ones([512, 1024, 1025], np.int32)})
self.evaluate(get_next)
# TODO(b/117581999): Add eager coverage for the following tests.
def testSkipEagerOptimizationLargeInputFromTensorSlices(self):
input_t = array_ops.placeholder(dtypes.int32, (None, None, None, None))
dataset = dataset_ops.Dataset.from_tensor_slices(input_t)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
dataset = dataset.with_options(options)
iterator = dataset_ops.make_initializable_iterator(dataset)
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op, {input_t: np.ones([1, 512, 1024, 1025], np.int32)})
self.evaluate(get_next)
def testOptimizationNestedDataset(self):
def flat_map_fn(_):
dataset = dataset_ops.Dataset.from_tensors(0)
dataset = dataset.apply(optimization.assert_next(["MemoryCacheImpl"]))
dataset = dataset.skip(0) # Should be removed by noop elimination
dataset = dataset.cache()
return dataset
dataset = dataset_ops.Dataset.range(1)
dataset = dataset.flat_map(flat_map_fn)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_optimization.noop_elimination = True
dataset = dataset.with_options(options)
self.assertDatasetProduces(dataset, expected_output=[0])
def testOptimizationNestedDatasetWithModifiedRetval(self):
def flat_map_fn(_):
dataset = dataset_ops.Dataset.from_tensors(0)
dataset = dataset.apply(optimization.assert_next(["MapAndBatch"]))
# Should be fused by map and batch fusion
dataset = dataset.map(lambda x: x)
dataset = dataset.batch(1)
return dataset
dataset = dataset_ops.Dataset.range(1)
dataset = dataset.flat_map(flat_map_fn)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_optimization.map_and_batch_fusion = True
dataset = dataset.with_options(options)
self.assertDatasetProduces(dataset, expected_output=[[0]])
def testOptimizationThreadPoolDataset(self):
dataset = dataset_ops.Dataset.range(10).batch(10)
dataset = threadpool.override_threadpool(
dataset,
threadpool.PrivateThreadPool(
2, display_name="private_thread_pool_%d" % 2))
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
dataset = dataset.with_options(options)
self.assertDatasetProduces(
dataset,
expected_output=[list(range(10))],
requires_initialization=True)
def testOptimizationNonSerializable(self):
dataset = dataset_ops.Dataset.from_tensors(0)
dataset = dataset.apply(optimization.assert_next(["FiniteSkip"]))
dataset = dataset.skip(0) # Should not be removed by noop elimination
dataset = dataset.apply(optimization.non_serializable())
dataset = dataset.apply(optimization.assert_next(["MemoryCacheImpl"]))
dataset = dataset.skip(0) # Should be removed by noop elimination
dataset = dataset.cache()
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_optimization.noop_elimination = True
dataset = dataset.with_options(options)
self.assertDatasetProduces(dataset, expected_output=[0])
def testOptimizationNonSerializableAsDirectInput(self):
"""Tests that non-serializable dataset can be OptimizeDataset's input."""
dataset = dataset_ops.Dataset.from_tensors(0)
dataset = dataset.apply(optimization.non_serializable())
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_optimization.noop_elimination = True
dataset = dataset.with_options(options)
self.assertDatasetProduces(dataset, expected_output=[0])
@parameterized.named_parameters(_generate_captured_refvar_test_cases())
# Skip eager because RefVariables are not supported in eager mode.
def testSkipEagerOptimizationWithCapturedRefVar(self, dataset_fn):
"""Tests that default optimizations are disabled with ref variables."""
variable = variable_scope.get_variable(
"v", initializer=0, use_resource=False)
assign_op = variable.assign_add(1)
# Check that warning is logged.
warnings.simplefilter("always")
with warnings.catch_warnings(record=True) as w:
unoptimized_dataset = dataset_fn(variable)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_optimization.noop_elimination = True
options.experimental_optimization.map_and_batch_fusion = True
optimized_dataset = unoptimized_dataset.with_options(options)
optimized_it = optimized_dataset.make_initializable_iterator()
self.assertGreaterEqual(len(w), 1)
expected = ("tf.data static optimizations are not compatible with "
"tf.Variable. The following optimizations will be disabled: %s."
" To enable optimizations, use resource variables instead by "
"calling `tf.enable_resource_variables()` at the start of the "
"program." % (", ".join(options._static_optimizations())))
self.assertTrue(any([expected in str(warning) for warning in w]))
# Check that outputs are the same in the optimized and unoptimized cases,
# when the variable value is changing.
unoptimized_it = unoptimized_dataset.make_initializable_iterator()
with ops.control_dependencies([assign_op]):
unoptimized_output = unoptimized_it.get_next()
optimized_output = optimized_it.get_next()
self.evaluate(variable.initializer)
self.evaluate((unoptimized_it.initializer, optimized_it.initializer))
while True:
try:
unoptimized, optimized = self.evaluate((unoptimized_output,
optimized_output))
self.assertEqual(unoptimized, optimized)
except errors.OutOfRangeError:
break
def testOptimizationEnabledByDefault(self):
"""Tests that some optimizations are applied to datasets by default."""
options = dataset_ops.Options()
expected_optimizations = [
"map_and_batch_fusion",
"noop_elimination",
"shuffle_and_repeat_fusion",
]
self.assertEqual(
set(options._static_optimizations()), set(expected_optimizations))
def testOptimizationDisableDefault(self):
"""Tests that we can disable all static optimizations enabled by default.
If the `apply_default_optimizations` optimization options flag is False,
only explicitly enabled optimizations will be applied.
"""
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_optimization.hoist_random_uniform = True
options.experimental_optimization.noop_elimination = True
expected_optimizations = [
"hoist_random_uniform",
"noop_elimination",
]
self.assertEqual(
set(options._static_optimizations()), set(expected_optimizations))
if __name__ == "__main__":
test.main()
| |
# Copyright 2016 Nexenta Systems, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from oslo_utils import excutils
import six
from cinder import exception
from cinder.i18n import _
from cinder import interface
from cinder.volume import driver
from cinder.volume.drivers.nexenta import jsonrpc
from cinder.volume.drivers.nexenta import options
from cinder.volume.drivers.nexenta import utils
VERSION = '1.3.1'
LOG = logging.getLogger(__name__)
@interface.volumedriver
class NexentaISCSIDriver(driver.ISCSIDriver):
"""Executes volume driver commands on Nexenta Appliance.
Version history:
.. code-block:: none
1.0.0 - Initial driver version.
1.0.1 - Fixed bug #1236626: catch "does not exist" exception of
lu_exists.
1.1.0 - Changed class name to NexentaISCSIDriver.
1.1.1 - Ignore "does not exist" exception of nms.snapshot.destroy.
1.1.2 - Optimized create_cloned_volume, replaced zfs send recv with zfs
clone.
1.1.3 - Extended volume stats provided by _update_volume_stats method.
1.2.0 - Added volume migration with storage assist method.
1.2.1 - Fixed bug #1263258: now migrate_volume update provider_location
of migrated volume; after migrating volume migrate_volume
destroy snapshot on migration destination.
1.3.0 - Added retype method.
1.3.0.1 - Target creation refactor.
1.3.1 - Added ZFS cleanup.
"""
VERSION = VERSION
# ThirdPartySystems wiki page
CI_WIKI_NAME = "Nexenta_CI"
def __init__(self, *args, **kwargs):
super(NexentaISCSIDriver, self).__init__(*args, **kwargs)
self.nms = None
self.targets = {}
if self.configuration:
self.configuration.append_config_values(
options.NEXENTA_CONNECTION_OPTS)
self.configuration.append_config_values(
options.NEXENTA_ISCSI_OPTS)
self.configuration.append_config_values(
options.NEXENTA_DATASET_OPTS)
self.configuration.append_config_values(
options.NEXENTA_RRMGR_OPTS)
self.nms_protocol = self.configuration.nexenta_rest_protocol
self.nms_host = self.configuration.nexenta_host
self.nms_port = self.configuration.nexenta_rest_port
self.nms_user = self.configuration.nexenta_user
self.nms_password = self.configuration.nexenta_password
self.volume = self.configuration.nexenta_volume
self.volume_compression = (
self.configuration.nexenta_dataset_compression)
self.volume_deduplication = self.configuration.nexenta_dataset_dedup
self.volume_description = (
self.configuration.nexenta_dataset_description)
self.rrmgr_compression = self.configuration.nexenta_rrmgr_compression
self.rrmgr_tcp_buf_size = self.configuration.nexenta_rrmgr_tcp_buf_size
self.rrmgr_connections = self.configuration.nexenta_rrmgr_connections
self.iscsi_target_portal_port = (
self.configuration.nexenta_iscsi_target_portal_port)
self._needless_objects = set()
@staticmethod
def get_driver_options():
return (options.NEXENTA_CONNECTION_OPTS + options.NEXENTA_ISCSI_OPTS +
options.NEXENTA_DATASET_OPTS + options.NEXENTA_RRMGR_OPTS)
@property
def backend_name(self):
backend_name = None
if self.configuration:
backend_name = self.configuration.safe_get('volume_backend_name')
if not backend_name:
backend_name = self.__class__.__name__
return backend_name
def do_setup(self, context):
if self.nms_protocol == 'auto':
protocol, auto = 'http', True
else:
protocol, auto = self.nms_protocol, False
self.nms = jsonrpc.NexentaJSONProxy(
protocol, self.nms_host, self.nms_port, '/rest/nms', self.nms_user,
self.nms_password, auto=auto)
def check_for_setup_error(self):
"""Verify that the volume for our zvols exists.
:raise: :py:exc:`LookupError`
"""
if not self.nms.volume.object_exists(self.volume):
raise LookupError(_("Volume %s does not exist in Nexenta SA") %
self.volume)
def _get_zvol_name(self, volume_name):
"""Return zvol name that corresponds given volume name."""
return '%s/%s' % (self.volume, volume_name)
def _create_target(self, target_idx):
target_name = '%s%s-%i' % (
self.configuration.nexenta_target_prefix,
self.nms_host,
target_idx
)
target_group_name = self._get_target_group_name(target_name)
if not self._target_exists(target_name):
try:
self.nms.iscsitarget.create_target({
'target_name': target_name})
except utils.NexentaException as exc:
if 'already' in exc.args[0]:
LOG.info('Ignored target creation error "%s" while '
'ensuring export.',
exc)
else:
raise
if not self._target_group_exists(target_group_name):
try:
self.nms.stmf.create_targetgroup(target_group_name)
except utils.NexentaException as exc:
if ('already' in exc.args[0]):
LOG.info('Ignored target group creation error "%s" '
'while ensuring export.',
exc)
else:
raise
if not self._target_member_in_target_group(target_group_name,
target_name):
try:
self.nms.stmf.add_targetgroup_member(target_group_name,
target_name)
except utils.NexentaException as exc:
if ('already' in exc.args[0]):
LOG.info('Ignored target group member addition error '
'"%s" while ensuring export.',
exc)
else:
raise
self.targets[target_name] = []
return target_name
def _get_target_name(self, volume):
"""Return iSCSI target name with least LUs."""
provider_location = volume.get('provider_location')
target_names = self.targets.keys()
if provider_location:
target_name = provider_location.split(',1 ')[1].split(' ')[0]
if not(self.targets.get(target_name)):
self.targets[target_name] = []
if not(volume['name'] in self.targets[target_name]):
self.targets[target_name].append(volume['name'])
elif not(target_names):
# create first target and target group
target_name = self._create_target(0)
self.targets[target_name].append(volume['name'])
else:
target_name = target_names[0]
for target in target_names:
if len(self.targets[target]) < len(self.targets[target_name]):
target_name = target
if len(self.targets[target_name]) >= 20:
# create new target and target group
target_name = self._create_target(len(target_names))
if not(volume['name'] in self.targets[target_name]):
self.targets[target_name].append(volume['name'])
return target_name
def _get_target_group_name(self, target_name):
"""Return Nexenta iSCSI target group name for volume."""
return target_name.replace(
self.configuration.nexenta_target_prefix,
self.configuration.nexenta_target_group_prefix
)
@staticmethod
def _get_clone_snapshot_name(volume):
"""Return name for snapshot that will be used to clone the volume."""
return 'cinder-clone-snapshot-%(id)s' % volume
@staticmethod
def _is_clone_snapshot_name(snapshot):
"""Check if snapshot is created for cloning."""
name = snapshot.split('@')[-1]
return name.startswith('cinder-clone-snapshot-')
def create_volume(self, volume):
"""Create a zvol on appliance.
:param volume: volume reference
:return: model update dict for volume reference
"""
self.nms.zvol.create(
self._get_zvol_name(volume['name']),
'%sG' % (volume['size'],),
six.text_type(self.configuration.nexenta_blocksize),
self.configuration.nexenta_sparse)
def extend_volume(self, volume, new_size):
"""Extend an existing volume.
:param volume: volume reference
:param new_size: volume new size in GB
"""
LOG.info('Extending volume: %(id)s New size: %(size)s GB',
{'id': volume['id'], 'size': new_size})
self.nms.zvol.set_child_prop(self._get_zvol_name(volume['name']),
'volsize', '%sG' % new_size)
def delete_volume(self, volume):
"""Destroy a zvol on appliance.
:param volume: volume reference
"""
volume_name = self._get_zvol_name(volume['name'])
try:
props = self.nms.zvol.get_child_props(volume_name, 'origin') or {}
self.nms.zvol.destroy(volume_name, '')
except utils.NexentaException as exc:
if 'does not exist' in exc.args[0]:
LOG.info('Volume %s does not exist, it '
'seems it was already deleted.', volume_name)
return
if 'zvol has children' in exc.args[0]:
self._mark_as_garbage(volume_name)
LOG.info('Volume %s will be deleted later.', volume_name)
return
raise
origin = props.get('origin')
self._collect_garbage(origin)
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume.
:param volume: new volume reference
:param src_vref: source volume reference
"""
snapshot = {'volume_name': src_vref['name'],
'name': self._get_clone_snapshot_name(volume),
'volume_size': src_vref['size']}
LOG.debug('Creating temp snapshot of the original volume: '
'%(volume_name)s@%(name)s', snapshot)
# We don't delete this snapshot, because this snapshot will be origin
# of new volume. This snapshot will be automatically promoted by NMS
# when user will delete origin volume. But when cloned volume deleted
# we check its origin property and delete source snapshot if needed.
self.create_snapshot(snapshot)
try:
self.create_volume_from_snapshot(volume, snapshot)
self._mark_as_garbage('@'.join(
(self._get_zvol_name(src_vref['name']), snapshot['name'])))
except utils.NexentaException:
with excutils.save_and_reraise_exception():
LOG.exception(
'Volume creation failed, deleting created snapshot '
'%(volume_name)s@%(name)s', snapshot)
try:
self.delete_snapshot(snapshot)
except (utils.NexentaException, exception.SnapshotIsBusy):
LOG.warning('Failed to delete zfs snapshot '
'%(volume_name)s@%(name)s', snapshot)
raise
def _get_zfs_send_recv_cmd(self, src, dst):
"""Returns rrmgr command for source and destination."""
return utils.get_rrmgr_cmd(src, dst,
compression=self.rrmgr_compression,
tcp_buf_size=self.rrmgr_tcp_buf_size,
connections=self.rrmgr_connections)
@staticmethod
def get_nms_for_url(url):
"""Returns initialized nms object for url."""
auto, scheme, user, password, host, port, path = (
utils.parse_nms_url(url))
return jsonrpc.NexentaJSONProxy(scheme, host, port, path, user,
password, auto=auto)
def migrate_volume(self, ctxt, volume, host):
"""Migrate if volume and host are managed by Nexenta appliance.
:param ctxt: context
:param volume: a dictionary describing the volume to migrate
:param host: a dictionary describing the host to migrate to
"""
LOG.debug('Enter: migrate_volume: id=%(id)s, host=%(host)s',
{'id': volume['id'], 'host': host})
false_ret = (False, None)
if volume['status'] not in ('available', 'retyping'):
return false_ret
if 'capabilities' not in host:
return false_ret
capabilities = host['capabilities']
if ('location_info' not in capabilities or
'iscsi_target_portal_port' not in capabilities or
'nms_url' not in capabilities):
return false_ret
nms_url = capabilities['nms_url']
dst_parts = capabilities['location_info'].split(':')
if (capabilities.get('vendor_name') != 'Nexenta' or
dst_parts[0] != self.__class__.__name__ or
capabilities['free_capacity_gb'] < volume['size']):
return false_ret
dst_host, dst_volume = dst_parts[1:]
ssh_bound = False
ssh_bindings = self.nms.appliance.ssh_list_bindings()
for bind in ssh_bindings:
if dst_host.startswith(ssh_bindings[bind][3]):
ssh_bound = True
break
if not ssh_bound:
LOG.warning("Remote NexentaStor appliance at %s should be "
"SSH-bound.", dst_host)
# Create temporary snapshot of volume on NexentaStor Appliance.
snapshot = {
'volume_name': volume['name'],
'name': utils.get_migrate_snapshot_name(volume)
}
self.create_snapshot(snapshot)
src = '%(volume)s/%(zvol)s@%(snapshot)s' % {
'volume': self.volume,
'zvol': volume['name'],
'snapshot': snapshot['name']
}
dst = ':'.join([dst_host, dst_volume])
try:
self.nms.appliance.execute(self._get_zfs_send_recv_cmd(src, dst))
except utils.NexentaException as exc:
LOG.warning("Cannot send source snapshot %(src)s to "
"destination %(dst)s. Reason: %(exc)s",
{'src': src, 'dst': dst, 'exc': exc})
return false_ret
finally:
try:
self.delete_snapshot(snapshot)
except utils.NexentaException as exc:
LOG.warning("Cannot delete temporary source snapshot "
"%(src)s on NexentaStor Appliance: %(exc)s",
{'src': src, 'exc': exc})
try:
self.delete_volume(volume)
except utils.NexentaException as exc:
LOG.warning("Cannot delete source volume %(volume)s on "
"NexentaStor Appliance: %(exc)s",
{'volume': volume['name'], 'exc': exc})
dst_nms = self.get_nms_for_url(nms_url)
dst_snapshot = '%s/%s@%s' % (dst_volume, volume['name'],
snapshot['name'])
try:
dst_nms.snapshot.destroy(dst_snapshot, '')
except utils.NexentaException as exc:
LOG.warning("Cannot delete temporary destination snapshot "
"%(dst)s on NexentaStor Appliance: %(exc)s",
{'dst': dst_snapshot, 'exc': exc})
return True, None
def retype(self, context, volume, new_type, diff, host):
"""Convert the volume to be of the new type.
:param context: Context
:param volume: A dictionary describing the volume to migrate
:param new_type: A dictionary describing the volume type to convert to
:param diff: A dictionary with the difference between the two types
:param host: A dictionary describing the host to migrate to, where
host['host'] is its name, and host['capabilities'] is a
dictionary of its reported capabilities.
"""
LOG.debug('Retype volume request %(vol)s to be %(type)s '
'(host: %(host)s), diff %(diff)s.',
{'vol': volume['name'],
'type': new_type,
'host': host,
'diff': diff})
options = dict(
compression='compression',
dedup='dedup',
description='nms:description'
)
retyped = False
migrated = False
capabilities = host['capabilities']
src_backend = self.__class__.__name__
dst_backend = capabilities['location_info'].split(':')[0]
if src_backend != dst_backend:
LOG.warning('Cannot retype from %(src_backend)s to '
'%(dst_backend)s.',
{'src_backend': src_backend,
'dst_backend': dst_backend})
return False
hosts = (volume['host'], host['host'])
old, new = hosts
if old != new:
migrated, provider_location = self.migrate_volume(
context, volume, host)
if not migrated:
nms = self.nms
else:
nms_url = capabilities['nms_url']
nms = self.get_nms_for_url(nms_url)
zvol = '%s/%s' % (
capabilities['location_info'].split(':')[-1], volume['name'])
for opt in options:
old, new = diff.get('extra_specs').get(opt, (False, False))
if old != new:
LOG.debug('Changing %(opt)s from %(old)s to %(new)s.',
{'opt': opt, 'old': old, 'new': new})
try:
nms.zvol.set_child_prop(
zvol, options[opt], new)
retyped = True
except utils.NexentaException:
LOG.error('Error trying to change %(opt)s'
' from %(old)s to %(new)s',
{'opt': opt, 'old': old, 'new': new})
return False, None
return retyped or migrated, None
def create_snapshot(self, snapshot):
"""Create snapshot of existing zvol on appliance.
:param snapshot: snapshot reference
"""
self.nms.zvol.create_snapshot(
self._get_zvol_name(snapshot['volume_name']),
snapshot['name'], '')
def create_volume_from_snapshot(self, volume, snapshot):
"""Create new volume from other's snapshot on appliance.
:param volume: reference of volume to be created
:param snapshot: reference of source snapshot
"""
self.nms.zvol.clone(
'%s@%s' % (self._get_zvol_name(snapshot['volume_name']),
snapshot['name']),
self._get_zvol_name(volume['name']))
if (('size' in volume) and (
volume['size'] > snapshot['volume_size'])):
self.extend_volume(volume, volume['size'])
def delete_snapshot(self, snapshot):
"""Delete volume's snapshot on appliance.
:param snapshot: snapshot reference
"""
volume_name = self._get_zvol_name(snapshot['volume_name'])
snapshot_name = '%s@%s' % (volume_name, snapshot['name'])
try:
self.nms.snapshot.destroy(snapshot_name, '')
except utils.NexentaException as exc:
if "does not exist" in exc.args[0]:
LOG.info('Snapshot %s does not exist, it seems it was '
'already deleted.', snapshot_name)
return
elif "snapshot has dependent clones" in exc.args[0]:
self._mark_as_garbage(snapshot_name)
LOG.info('Snapshot %s has dependent clones, will be '
'deleted later.', snapshot_name)
return
raise
self._collect_garbage(volume_name)
def local_path(self, volume):
"""Return local path to existing local volume.
We never have local volumes, so it raises NotImplementedError.
:raise: :py:exc:`NotImplementedError`
"""
raise NotImplementedError
def _target_exists(self, target):
"""Check if iSCSI target exist.
:param target: target name
:return: True if target exist, else False
"""
targets = self.nms.stmf.list_targets()
if not targets:
return False
return (target in self.nms.stmf.list_targets())
def _target_group_exists(self, target_group):
"""Check if target group exist.
:param target_group: target group
:return: True if target group exist, else False
"""
groups = self.nms.stmf.list_targetgroups()
if not groups:
return False
return target_group in groups
def _target_member_in_target_group(self, target_group, target_member):
"""Check if target member in target group.
:param target_group: target group
:param target_member: target member
:return: True if target member in target group, else False
:raises NexentaException: if target group doesn't exist
"""
members = self.nms.stmf.list_targetgroup_members(target_group)
if not members:
return False
return target_member in members
def _lu_exists(self, zvol_name):
"""Check if LU exists on appliance.
:param zvol_name: Zvol name
:raises NexentaException: if zvol not exists
:return: True if LU exists, else False
"""
try:
return bool(self.nms.scsidisk.lu_exists(zvol_name))
except utils.NexentaException as exc:
if 'does not exist' not in exc.args[0]:
raise
return False
def _is_lu_shared(self, zvol_name):
"""Check if LU exists on appliance and shared.
:param zvol_name: Zvol name
:raises NexentaException: if Zvol not exist
:return: True if LU exists and shared, else False
"""
try:
shared = self.nms.scsidisk.lu_shared(zvol_name) > 0
except utils.NexentaException as exc:
if 'does not exist for zvol' not in exc.args[0]:
raise # Zvol does not exists
shared = False # LU does not exist
return shared
def create_export(self, _ctx, volume, connector):
"""Create new export for zvol.
:param volume: reference of volume to be exported
:return: iscsiadm-formatted provider location string
"""
model_update = self._do_export(_ctx, volume)
return model_update
def ensure_export(self, _ctx, volume):
self._do_export(_ctx, volume)
def _do_export(self, _ctx, volume):
"""Recreate parts of export if necessary.
:param volume: reference of volume to be exported
"""
zvol_name = self._get_zvol_name(volume['name'])
target_name = self._get_target_name(volume)
target_group_name = self._get_target_group_name(target_name)
entry = None
if not self._lu_exists(zvol_name):
try:
entry = self.nms.scsidisk.create_lu(zvol_name, {})
except utils.NexentaException as exc:
if 'in use' not in exc.args[0]:
raise
LOG.info('Ignored LU creation error "%s" while ensuring '
'export.', exc)
if not self._is_lu_shared(zvol_name):
try:
entry = self.nms.scsidisk.add_lun_mapping_entry(zvol_name, {
'target_group': target_group_name})
except utils.NexentaException as exc:
if 'view entry exists' not in exc.args[0]:
raise
LOG.info('Ignored LUN mapping entry addition error "%s" '
'while ensuring export.', exc)
model_update = {}
if entry:
provider_location = '%(host)s:%(port)s,1 %(name)s %(lun)s' % {
'host': self.nms_host,
'port': self.configuration.nexenta_iscsi_target_portal_port,
'name': target_name,
'lun': entry['lun'],
}
model_update = {'provider_location': provider_location}
return model_update
def remove_export(self, _ctx, volume):
"""Destroy all resources created to export zvol.
:param volume: reference of volume to be unexported
"""
target_name = self._get_target_name(volume)
self.targets[target_name].remove(volume['name'])
zvol_name = self._get_zvol_name(volume['name'])
self.nms.scsidisk.delete_lu(zvol_name)
def _update_volume_stats(self):
"""Retrieve stats info for NexentaStor appliance."""
LOG.debug('Updating volume stats')
stats = self.nms.volume.get_child_props(
self.configuration.nexenta_volume, 'health|size|used|available')
total_amount = utils.str2gib_size(stats['size'])
free_amount = utils.str2gib_size(stats['available'])
location_info = '%(driver)s:%(host)s:%(volume)s' % {
'driver': self.__class__.__name__,
'host': self.nms_host,
'volume': self.volume
}
self._stats = {
'vendor_name': 'Nexenta',
'dedup': self.volume_deduplication,
'compression': self.volume_compression,
'description': self.volume_description,
'driver_version': self.VERSION,
'storage_protocol': 'iSCSI',
'total_capacity_gb': total_amount,
'free_capacity_gb': free_amount,
'reserved_percentage': self.configuration.reserved_percentage,
'QoS_support': False,
'volume_backend_name': self.backend_name,
'location_info': location_info,
'iscsi_target_portal_port': self.iscsi_target_portal_port,
'nms_url': self.nms.url
}
def _collect_garbage(self, zfs_object):
"""Destroys ZFS parent objects
Recursively destroys ZFS parent volumes and snapshots if they are
marked as garbage
:param zfs_object: full path to a volume or a snapshot
"""
if zfs_object and zfs_object in self._needless_objects:
sp = zfs_object.split('/')
path = '/'.join(sp[:-1])
name = sp[-1]
if '@' in name: # it's a snapshot:
volume, snap = name.split('@')
parent = '/'.join((path, volume))
try:
self.nms.snapshot.destroy(zfs_object, '')
except utils.NexentaException as exc:
LOG.debug('Error occurred while trying to delete a '
'snapshot: %s', exc)
return
else:
try:
props = self.nms.zvol.get_child_props(
zfs_object, 'origin') or {}
except utils.NexentaException:
props = {}
parent = (props['origin'] if 'origin' in props and
props['origin'] else '')
try:
self.nms.zvol.destroy(zfs_object, '')
except utils.NexentaException as exc:
LOG.debug('Error occurred while trying to delete a '
'volume: %s', exc)
return
self._needless_objects.remove(zfs_object)
self._collect_garbage(parent)
def _mark_as_garbage(self, zfs_object):
"""Puts ZFS object into list for further removal
:param zfs_object: full path to a volume or a snapshot
"""
self._needless_objects.add(zfs_object)
| |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import unittest
from pyspark import SparkConf, SparkContext
from pyspark.sql import SparkSession, SQLContext, Row
from pyspark.testing.sqlutils import ReusedSQLTestCase
from pyspark.testing.utils import PySparkTestCase
class SparkSessionTests(ReusedSQLTestCase):
def test_sqlcontext_reuses_sparksession(self):
sqlContext1 = SQLContext(self.sc)
sqlContext2 = SQLContext(self.sc)
self.assertTrue(sqlContext1.sparkSession is sqlContext2.sparkSession)
class SparkSessionTests1(ReusedSQLTestCase):
# We can't include this test into SQLTests because we will stop class's SparkContext and cause
# other tests failed.
def test_sparksession_with_stopped_sparkcontext(self):
self.sc.stop()
sc = SparkContext('local[4]', self.sc.appName)
spark = SparkSession.builder.getOrCreate()
try:
df = spark.createDataFrame([(1, 2)], ["c", "c"])
df.collect()
finally:
spark.stop()
sc.stop()
class SparkSessionTests2(PySparkTestCase):
# This test is separate because it's closely related with session's start and stop.
# See SPARK-23228.
def test_set_jvm_default_session(self):
spark = SparkSession.builder.getOrCreate()
try:
self.assertTrue(spark._jvm.SparkSession.getDefaultSession().isDefined())
finally:
spark.stop()
self.assertTrue(spark._jvm.SparkSession.getDefaultSession().isEmpty())
def test_jvm_default_session_already_set(self):
# Here, we assume there is the default session already set in JVM.
jsession = self.sc._jvm.SparkSession(self.sc._jsc.sc())
self.sc._jvm.SparkSession.setDefaultSession(jsession)
spark = SparkSession.builder.getOrCreate()
try:
self.assertTrue(spark._jvm.SparkSession.getDefaultSession().isDefined())
# The session should be the same with the exiting one.
self.assertTrue(jsession.equals(spark._jvm.SparkSession.getDefaultSession().get()))
finally:
spark.stop()
class SparkSessionTests3(unittest.TestCase):
def test_active_session(self):
spark = SparkSession.builder \
.master("local") \
.getOrCreate()
try:
activeSession = SparkSession.getActiveSession()
df = activeSession.createDataFrame([(1, 'Alice')], ['age', 'name'])
self.assertEqual(df.collect(), [Row(age=1, name=u'Alice')])
finally:
spark.stop()
def test_get_active_session_when_no_active_session(self):
active = SparkSession.getActiveSession()
self.assertEqual(active, None)
spark = SparkSession.builder \
.master("local") \
.getOrCreate()
active = SparkSession.getActiveSession()
self.assertEqual(active, spark)
spark.stop()
active = SparkSession.getActiveSession()
self.assertEqual(active, None)
def test_SparkSession(self):
spark = SparkSession.builder \
.master("local") \
.config("some-config", "v2") \
.getOrCreate()
try:
self.assertEqual(spark.conf.get("some-config"), "v2")
self.assertEqual(spark.sparkContext._conf.get("some-config"), "v2")
self.assertEqual(spark.version, spark.sparkContext.version)
spark.sql("CREATE DATABASE test_db")
spark.catalog.setCurrentDatabase("test_db")
self.assertEqual(spark.catalog.currentDatabase(), "test_db")
spark.sql("CREATE TABLE table1 (name STRING, age INT) USING parquet")
self.assertEqual(spark.table("table1").columns, ['name', 'age'])
self.assertEqual(spark.range(3).count(), 3)
finally:
spark.sql("DROP DATABASE test_db CASCADE")
spark.stop()
def test_global_default_session(self):
spark = SparkSession.builder \
.master("local") \
.getOrCreate()
try:
self.assertEqual(SparkSession.builder.getOrCreate(), spark)
finally:
spark.stop()
def test_default_and_active_session(self):
spark = SparkSession.builder \
.master("local") \
.getOrCreate()
activeSession = spark._jvm.SparkSession.getActiveSession()
defaultSession = spark._jvm.SparkSession.getDefaultSession()
try:
self.assertEqual(activeSession, defaultSession)
finally:
spark.stop()
def test_config_option_propagated_to_existing_session(self):
session1 = SparkSession.builder \
.master("local") \
.config("spark-config1", "a") \
.getOrCreate()
self.assertEqual(session1.conf.get("spark-config1"), "a")
session2 = SparkSession.builder \
.config("spark-config1", "b") \
.getOrCreate()
try:
self.assertEqual(session1, session2)
self.assertEqual(session1.conf.get("spark-config1"), "b")
finally:
session1.stop()
def test_new_session(self):
session = SparkSession.builder \
.master("local") \
.getOrCreate()
newSession = session.newSession()
try:
self.assertNotEqual(session, newSession)
finally:
session.stop()
newSession.stop()
def test_create_new_session_if_old_session_stopped(self):
session = SparkSession.builder \
.master("local") \
.getOrCreate()
session.stop()
newSession = SparkSession.builder \
.master("local") \
.getOrCreate()
try:
self.assertNotEqual(session, newSession)
finally:
newSession.stop()
def test_active_session_with_None_and_not_None_context(self):
from pyspark.context import SparkContext
from pyspark.conf import SparkConf
sc = None
session = None
try:
sc = SparkContext._active_spark_context
self.assertEqual(sc, None)
activeSession = SparkSession.getActiveSession()
self.assertEqual(activeSession, None)
sparkConf = SparkConf()
sc = SparkContext.getOrCreate(sparkConf)
activeSession = sc._jvm.SparkSession.getActiveSession()
self.assertFalse(activeSession.isDefined())
session = SparkSession(sc)
activeSession = sc._jvm.SparkSession.getActiveSession()
self.assertTrue(activeSession.isDefined())
activeSession2 = SparkSession.getActiveSession()
self.assertNotEqual(activeSession2, None)
finally:
if session is not None:
session.stop()
if sc is not None:
sc.stop()
class SparkSessionTests4(ReusedSQLTestCase):
def test_get_active_session_after_create_dataframe(self):
session2 = None
try:
activeSession1 = SparkSession.getActiveSession()
session1 = self.spark
self.assertEqual(session1, activeSession1)
session2 = self.spark.newSession()
activeSession2 = SparkSession.getActiveSession()
self.assertEqual(session1, activeSession2)
self.assertNotEqual(session2, activeSession2)
session2.createDataFrame([(1, 'Alice')], ['age', 'name'])
activeSession3 = SparkSession.getActiveSession()
self.assertEqual(session2, activeSession3)
session1.createDataFrame([(1, 'Alice')], ['age', 'name'])
activeSession4 = SparkSession.getActiveSession()
self.assertEqual(session1, activeSession4)
finally:
if session2 is not None:
session2.stop()
class SparkSessionBuilderTests(unittest.TestCase):
def test_create_spark_context_first_then_spark_session(self):
sc = None
session = None
try:
conf = SparkConf().set("key1", "value1")
sc = SparkContext('local[4]', "SessionBuilderTests", conf=conf)
session = SparkSession.builder.config("key2", "value2").getOrCreate()
self.assertEqual(session.conf.get("key1"), "value1")
self.assertEqual(session.conf.get("key2"), "value2")
self.assertEqual(session.sparkContext, sc)
self.assertFalse(sc.getConf().contains("key2"))
self.assertEqual(sc.getConf().get("key1"), "value1")
finally:
if session is not None:
session.stop()
if sc is not None:
sc.stop()
def test_another_spark_session(self):
session1 = None
session2 = None
try:
session1 = SparkSession.builder.config("key1", "value1").getOrCreate()
session2 = SparkSession.builder.config("key2", "value2").getOrCreate()
self.assertEqual(session1.conf.get("key1"), "value1")
self.assertEqual(session2.conf.get("key1"), "value1")
self.assertEqual(session1.conf.get("key2"), "value2")
self.assertEqual(session2.conf.get("key2"), "value2")
self.assertEqual(session1.sparkContext, session2.sparkContext)
self.assertEqual(session1.sparkContext.getConf().get("key1"), "value1")
self.assertFalse(session1.sparkContext.getConf().contains("key2"))
finally:
if session1 is not None:
session1.stop()
if session2 is not None:
session2.stop()
class SparkExtensionsTest(unittest.TestCase):
# These tests are separate because it uses 'spark.sql.extensions' which is
# static and immutable. This can't be set or unset, for example, via `spark.conf`.
@classmethod
def setUpClass(cls):
import glob
from pyspark.find_spark_home import _find_spark_home
SPARK_HOME = _find_spark_home()
filename_pattern = (
"sql/core/target/scala-*/test-classes/org/apache/spark/sql/"
"SparkSessionExtensionSuite.class")
if not glob.glob(os.path.join(SPARK_HOME, filename_pattern)):
raise unittest.SkipTest(
"'org.apache.spark.sql.SparkSessionExtensionSuite' is not "
"available. Will skip the related tests.")
# Note that 'spark.sql.extensions' is a static immutable configuration.
cls.spark = SparkSession.builder \
.master("local[4]") \
.appName(cls.__name__) \
.config(
"spark.sql.extensions",
"org.apache.spark.sql.MyExtensions") \
.getOrCreate()
@classmethod
def tearDownClass(cls):
cls.spark.stop()
def test_use_custom_class_for_extensions(self):
self.assertTrue(
self.spark._jsparkSession.sessionState().planner().strategies().contains(
self.spark._jvm.org.apache.spark.sql.MySparkStrategy(self.spark._jsparkSession)),
"MySparkStrategy not found in active planner strategies")
self.assertTrue(
self.spark._jsparkSession.sessionState().analyzer().extendedResolutionRules().contains(
self.spark._jvm.org.apache.spark.sql.MyRule(self.spark._jsparkSession)),
"MyRule not found in extended resolution rules")
if __name__ == "__main__":
from pyspark.sql.tests.test_session import *
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports')
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| |
# Copyright 2017 Google Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from gapit_test_framework import gapit_test, require, require_equal, require_true
from gapit_test_framework import require_not_equal, little_endian_bytes_to_int
from gapit_test_framework import GapitTest, get_read_offset_function
from struct_offsets import VulkanStruct, UINT32_T, ARRAY, UINT64_T, POINTER, HANDLE
from vulkan_constants import *
EVENT_CREATE_INFO = [
("sType", UINT32_T), ("pNext", POINTER), ("flags", UINT32_T)
]
BUFFER_SIZE = 4
def GetCreatedEvent(architecture, create_event):
require_equal(VK_SUCCESS, int(create_event.return_val))
require_not_equal(0, create_event.int_device)
require_not_equal(0, create_event.hex_pCreateInfo)
require_not_equal(0, create_event.hex_pEvent)
create_info = VulkanStruct(architecture, EVENT_CREATE_INFO,
get_read_offset_function(
create_event, create_event.hex_pCreateInfo))
require_equal(VK_STRUCTURE_TYPE_EVENT_CREATE_INFO, create_info.sType)
require_equal(0, create_info.pNext)
require_equal(0, create_info.flags)
event = little_endian_bytes_to_int(require(create_event.get_write_data(
create_event.hex_pEvent, NON_DISPATCHABLE_HANDLE_SIZE)))
return event
def GetMappedLocation(test):
require(test.nth_call_of("vkDestroyBuffer", 3))
map_coherent_memory = require(test.next_call_of("vkMapMemory"))
require_equal(VK_SUCCESS, int(map_coherent_memory.return_val))
require_not_equal(0, map_coherent_memory.int_device)
require_not_equal(0, map_coherent_memory.int_memory)
# All vkMapMemory called in this test has offset 0.
require_equal(0, map_coherent_memory.int_offset)
require_true(map_coherent_memory.int_size >= BUFFER_SIZE)
require_equal(0, map_coherent_memory.int_flags)
require_not_equal(0, map_coherent_memory.hex_ppData)
pData = little_endian_bytes_to_int(require(map_coherent_memory.get_write_data(
map_coherent_memory.hex_ppData, test.architecture.int_pointerSize)))
return pData
def GetStart(test, index):
while index > 0:
get_device_proc = require(test.next_call_of("vkGetDeviceProcAddr"))
if get_device_proc.pName == "TAG":
index -= 1
@gapit_test("Event_test")
class BasicHostSideCommandTest(GapitTest):
def expect(self):
architecture = self.architecture
GetStart(self, 1)
create_event = require(self.next_call_of("vkCreateEvent"))
device = create_event.int_device
first_status = require(self.next_call_of("vkGetEventStatus"))
set_event = require(self.next_call_of("vkSetEvent"))
second_status = require(self.next_call_of("vkGetEventStatus"))
reset_event = require(self.next_call_of("vkResetEvent"))
third_status = require(self.next_call_of("vkGetEventStatus"))
destroy_event = require(self.next_call_of("vkDestroyEvent"))
# Check vkCreateEvent
event = GetCreatedEvent(architecture, create_event)
require_not_equal(0, event)
# Check the first status
require_equal(device, first_status.int_device)
require_equal(event, first_status.int_event)
require_equal(VK_EVENT_RESET, int(first_status.return_val))
# Check the vkSetEvent
require_equal(device, set_event.int_device)
require_equal(event, set_event.int_event)
require_equal(VK_SUCCESS, int(set_event.return_val))
# Check the second status
require_equal(device, second_status.int_device)
require_equal(event, second_status.int_event)
require_equal(VK_EVENT_SET, int(second_status.return_val))
# Check the vkResetEvent
require_equal(device, reset_event.int_device)
require_equal(event, reset_event.int_event)
require_equal(VK_SUCCESS, int(reset_event.return_val))
# Check the third status
require_equal(device, third_status.int_device)
require_equal(event, third_status.int_event)
require_equal(VK_EVENT_RESET, int(third_status.return_val))
# Check the vkDestroyEvent
require_equal(device, destroy_event.int_device)
require_equal(event, destroy_event.int_event)
@gapit_test("Event_test")
class SingleThreadTest(GapitTest):
def expect(self):
pData = GetMappedLocation(self)
GetStart(self, 2)
require(self.next_call_of("vkAllocateCommandBuffers"))
require(self.next_call_of("vkCreateBuffer"))
require(self.next_call_of("vkCreateBuffer"))
require(self.next_call_of("vkCreateEvent"))
require(self.next_call_of("vkCreateEvent"))
# submit -> update -> set -> wait idle
require(self.next_call_of("vkCmdWaitEvents"))
require(self.next_call_of("vkCmdCopyBuffer"))
require(self.next_call_of("vkQueueSubmit"))
set_event = require(self.next_call_of("vkSetEvent"))
require(self.next_call_of("vkQueueWaitIdle"))
require(self.next_call_of("vkResetEvent"))
require_equal(0x11111111, little_endian_bytes_to_int(require(
set_event.get_read_data(pData, BUFFER_SIZE))))
# update -> set -> submit -> wait idle
require(self.next_call_of("vkSetEvent"))
require(self.next_call_of("vkCmdWaitEvents"))
submit = require(self.next_call_of("vkQueueSubmit"))
require(self.next_call_of("vkQueueWaitIdle"))
require(self.next_call_of("vkResetEvent"))
require_equal(0x22222222, little_endian_bytes_to_int(require(
submit.get_read_data(pData, BUFFER_SIZE))))
# submit [cmdSetEvent (multiple), ... cmdWaitEvents]
require(self.next_call_of("vkCmdSetEvent"))
require(self.next_call_of("vkCmdSetEvent"))
require(self.next_call_of("vkCmdWaitEvents"))
require(self.next_call_of("vkCmdCopyBuffer"))
submit = require(self.next_call_of("vkQueueSubmit"))
require(self.next_call_of("vkQueueWaitIdle"))
require(self.next_call_of("vkResetEvent"))
require(self.next_call_of("vkResetEvent"))
require_equal(0x33333333, little_endian_bytes_to_int(require(
submit.get_read_data(pData, BUFFER_SIZE))))
# submit [cmdSetEvent] -> submit [cmdWaitEvents]
require(self.next_call_of("vkCmdSetEvent"))
require(self.next_call_of("vkQueueSubmit"))
require(self.next_call_of("vkCmdWaitEvents"))
require(self.next_call_of("vkCmdCopyBuffer"))
submit = require(self.next_call_of("vkQueueSubmit"))
require(self.next_call_of("vkQueueWaitIdle"))
require(self.next_call_of("vkResetEvent"))
require_equal(0x44444444, little_endian_bytes_to_int(require(
submit.get_read_data(pData, BUFFER_SIZE))))
@gapit_test("Event_test")
class MultipleThreadTest(GapitTest):
def expect(self):
pData = GetMappedLocation(self)
GetStart(self, 3)
require(self.next_call_of("vkAllocateCommandBuffers"))
require(self.next_call_of("vkCreateBuffer"))
require(self.next_call_of("vkCreateBuffer"))
require(self.next_call_of("vkCreateEvent"))
require(self.next_call_of("vkCreateEvent"))
# Thread 1: submit [vkCmdWaitEvents] -> -> queue wait idle
# Thread 2: setEvent
require(self.next_call_of("vkCmdWaitEvents"))
require(self.next_call_of("vkCmdCopyBuffer"))
require(self.next_call_of("vkQueueSubmit"))
set_event = require(self.next_call_of("vkSetEvent"))
require(self.next_call_of("vkQueueWaitIdle"))
require(self.next_call_of("vkResetEvent"))
require_equal(0x11111111, little_endian_bytes_to_int(require(
set_event.get_read_data(pData, BUFFER_SIZE))))
# Thread 1: submit [vkCmdWaitEvents (multiple events)] -> -> queue idle
# Thread 2: setEvent(s)
require(self.next_call_of("vkCmdWaitEvents"))
require(self.next_call_of("vkCmdCopyBuffer"))
require(self.next_call_of("vkQueueSubmit"))
require(self.next_call_of("vkSetEvent"))
second_set_event = require(self.next_call_of("vkSetEvent"))
require(self.next_call_of("vkQueueWaitIdle"))
require(self.next_call_of("vkResetEvent"))
require(self.next_call_of("vkResetEvent"))
require_equal(0x33333333, little_endian_bytes_to_int(require(
second_set_event.get_read_data(pData, BUFFER_SIZE))))
# Thread 1: submit [wait, reset, semaphore, fence]-> submit [wait]-> idle
# Thread 2: setEvent -> setEvent
require(self.next_call_of("vkCmdWaitEvents"))
require(self.next_call_of("vkCmdCopyBuffer"))
require(self.next_call_of("vkCmdResetEvent"))
require(self.next_call_of("vkQueueSubmit"))
require(self.next_call_of("vkCmdWaitEvents"))
require(self.next_call_of("vkCmdCopyBuffer"))
require(self.next_call_of("vkQueueSubmit"))
first_set = require(self.next_call_of("vkSetEvent"))
second_set = require(self.next_call_of("vkSetEvent"))
require(self.next_call_of("vkQueueWaitIdle"))
require(self.next_call_of("vkResetEvent"))
require_equal(0x44444444, little_endian_bytes_to_int(require(
first_set.get_read_data(pData, BUFFER_SIZE))))
require_equal(0x55555555, little_endian_bytes_to_int(require(
second_set.get_read_data(pData, BUFFER_SIZE))))
# Thread 1: submit [wait x, wait y] -> idle
# Thread 2: set y -> set x
require(self.next_call_of("vkCmdWaitEvents"))
require(self.next_call_of("vkCmdWaitEvents"))
require(self.next_call_of("vkCmdCopyBuffer"))
require(self.next_call_of("vkQueueSubmit"))
require(self.next_call_of("vkSetEvent"))
second_set = require(self.next_call_of("vkSetEvent"))
require(self.next_call_of("vkQueueWaitIdle"))
require(self.next_call_of("vkResetEvent"))
require(self.next_call_of("vkResetEvent"))
require_equal(0x77777777, little_endian_bytes_to_int(require(
second_set.get_read_data(pData, BUFFER_SIZE))))
def GetCreatedBuffer(create_buffer):
require_equal(VK_SUCCESS, int(create_buffer.return_val))
require_not_equal(0, create_buffer.int_device)
require_not_equal(0, create_buffer.hex_pCreateInfo)
require_not_equal(0, create_buffer.hex_pBuffer)
buf = little_endian_bytes_to_int(require(create_buffer.get_write_data(
create_buffer.hex_pBuffer, NON_DISPATCHABLE_HANDLE_SIZE)))
return buf
def GetCreatedImage(create_image):
require_equal(VK_SUCCESS, int(create_image.return_val))
require_not_equal(0, create_image.int_device)
require_not_equal(0, create_image.hex_pCreateInfo)
require_not_equal(0, create_image.hex_pImage)
img = little_endian_bytes_to_int(require(create_image.get_write_data(
create_image.hex_pImage, NON_DISPATCHABLE_HANDLE_SIZE)))
return img
@gapit_test("Event_test")
class MemoryBarrierTest(GapitTest):
def expect(self):
architecture = self.architecture
GetStart(self, 4)
require(self.next_call_of("vkAllocateCommandBuffers"))
src_buf = GetCreatedBuffer(
require(self.next_call_of("vkCreateBuffer")))
dst_buf = GetCreatedBuffer(
require(self.next_call_of("vkCreateBuffer")))
img = GetCreatedImage(require(self.next_call_of("vkCreateImage")))
event = GetCreatedEvent(architecture,
require(self.next_call_of("vkCreateEvent")))
wait_events = require(self.next_call_of("vkCmdWaitEvents"))
require(self.next_call_of("vkCmdCopyBuffer"))
require(self.next_call_of("vkQueueSubmit"))
require(self.next_call_of("vkSetEvent"))
require(self.next_call_of("vkQueueWaitIdle"))
# Check vkCmdWaitEvents
require_equal(1, wait_events.int_eventCount)
require_not_equal(0, wait_events.hex_pEvents)
require_equal(
event,
little_endian_bytes_to_int(require(wait_events.get_read_data(
wait_events.hex_pEvents, NON_DISPATCHABLE_HANDLE_SIZE))))
require_equal(VK_PIPELINE_STAGE_HOST_BIT, wait_events.int_srcStageMask)
require_equal(VK_PIPELINE_STAGE_TRANSFER_BIT,
wait_events.int_dstStageMask)
require_equal(1, wait_events.int_memoryBarrierCount)
memory_barrier = VulkanStruct(architecture, [
("sType", UINT32_T),
("pNext", POINTER),
("srcAccessMask", UINT32_T),
("dstAccessMask", UINT32_T),
], get_read_offset_function(wait_events, wait_events.hex_pMemoryBarriers))
require_equal(VK_STRUCTURE_TYPE_MEMORY_BARRIER, memory_barrier.sType)
require_equal(0, memory_barrier.pNext)
require_equal(VK_ACCESS_HOST_WRITE_BIT, memory_barrier.srcAccessMask)
require_equal(VK_ACCESS_TRANSFER_WRITE_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
memory_barrier.dstAccessMask)
BUFFER_MEMORY_BARRIER = [
("sType", UINT32_T),
("pNext", POINTER),
("srcAccessMask", UINT32_T),
("dstAccessMask", UINT32_T),
("srcQueueFamilyIndex", UINT32_T),
("dstQueueFamilyIndex", UINT32_T),
("buffer", HANDLE),
("offset", UINT64_T),
("size", UINT64_T),
]
require_equal(2, wait_events.int_bufferMemoryBarrierCount)
first_buffer_barrier = VulkanStruct(
architecture, BUFFER_MEMORY_BARRIER, get_read_offset_function(
wait_events, wait_events.hex_pBufferMemoryBarriers))
require_equal(VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
first_buffer_barrier.sType)
require_equal(0, first_buffer_barrier.pNext)
require_equal(0, first_buffer_barrier.srcAccessMask)
require_equal(VK_ACCESS_TRANSFER_WRITE_BIT,
first_buffer_barrier.dstAccessMask)
require_equal(VK_QUEUE_FAMILY_IGNORED,
first_buffer_barrier.srcQueueFamilyIndex)
require_equal(VK_QUEUE_FAMILY_IGNORED,
first_buffer_barrier.dstQueueFamilyIndex)
require_equal(dst_buf, first_buffer_barrier.buffer)
require_equal(0, first_buffer_barrier.offset)
require_equal(BUFFER_SIZE, first_buffer_barrier.size)
second_buffer_barrier = VulkanStruct(
architecture, BUFFER_MEMORY_BARRIER, get_read_offset_function(
wait_events, wait_events.hex_pBufferMemoryBarriers +
first_buffer_barrier.total_size))
require_equal(VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
second_buffer_barrier.sType)
require_equal(0, second_buffer_barrier.pNext)
require_equal(VK_ACCESS_HOST_WRITE_BIT,
second_buffer_barrier.srcAccessMask)
require_equal(VK_ACCESS_TRANSFER_READ_BIT,
second_buffer_barrier.dstAccessMask)
require_equal(VK_QUEUE_FAMILY_IGNORED,
second_buffer_barrier.srcQueueFamilyIndex)
require_equal(VK_QUEUE_FAMILY_IGNORED,
second_buffer_barrier.dstQueueFamilyIndex)
require_equal(src_buf, second_buffer_barrier.buffer)
require_equal(0, second_buffer_barrier.offset)
require_equal(BUFFER_SIZE, second_buffer_barrier.size)
require_equal(1, wait_events.int_imageMemoryBarrierCount)
image_barrier = VulkanStruct(architecture, [
("sType", UINT32_T),
("pNext", POINTER),
("srcAccessMask", UINT32_T),
("dstAccessMask", UINT32_T),
("oldLayout", UINT32_T),
("newLayout", UINT32_T),
("srcQueueFamilyIndex", UINT32_T),
("dstQueueFamilyIndex", UINT32_T),
("image", HANDLE),
("subresourceRange_aspectMask", UINT32_T),
("subresourceRange_baseMipLevel", UINT32_T),
("subresourceRange_levelCount", UINT32_T),
("subresourceRange_baseArrayLayer", UINT32_T),
("subresourceRange_layerCount", UINT32_T),
], get_read_offset_function(wait_events,
wait_events.hex_pImageMemoryBarriers))
require_equal(VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
image_barrier.sType)
require_equal(0, image_barrier.pNext)
require_equal(0, image_barrier.srcAccessMask)
require_equal(VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
image_barrier.dstAccessMask)
require_equal(VK_IMAGE_LAYOUT_UNDEFINED, image_barrier.oldLayout)
require_equal(VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
image_barrier.newLayout)
require_equal(VK_QUEUE_FAMILY_IGNORED,
image_barrier.srcQueueFamilyIndex)
require_equal(VK_QUEUE_FAMILY_IGNORED,
image_barrier.dstQueueFamilyIndex)
require_equal(img, image_barrier.image)
require_equal(VK_IMAGE_ASPECT_COLOR_BIT,
image_barrier.subresourceRange_aspectMask)
require_equal(0, image_barrier.subresourceRange_baseMipLevel)
require_equal(1, image_barrier.subresourceRange_levelCount)
require_equal(0, image_barrier.subresourceRange_baseArrayLayer)
require_equal(1, image_barrier.subresourceRange_layerCount)
| |
from __future__ import print_function
import sys
import os
from getopt import getopt
try:
import czipfile as zipfile
except ImportError:
import zipfile
import reststore
from reststore import config
def command_web():
from reststore import webapp
webapp.run()
return 0
def command_get(FilesClass, hexdigest):
fs = FilesClass()
try:
print(fs[hexdigest])
except KeyError:
print("Could not find a file for %s..." % hexdigest, file=sys.stderr)
return -1
return 0
def command_read(FilesClass, hexdigest, outfile=sys.stdout):
fs = FilesClass()
try:
with open(fs[hexdigest], 'rb') as f:
outfile.write(f.read())
except KeyError:
print("Could not find a file for %s..." % hexdigest, file=sys.stderr)
return -1
return 0
def command_put(FilesClass, filepaths):
for filepath in filepaths:
try:
with open(filepath, 'rb') as f:
data = f.read()
except Exception as exc:
print("Failed to read file %s - %s" % (filepath, exc),
file=sys.stderr)
return -1
fs = FilesClass()
hexdigest = fs.put(data)
print("%s: %s" % (hexdigest, filepath))
return 0
def command_unzip(FilesClass, filepath, password=None, flush_every=1000):
"""Add files from the zip file at filepath"""
if not zipfile.is_zipfile(filepath):
raise TypeError("Not a zipfile %s" % filepath)
fs = FilesClass()
zf = zipfile.ZipFile(filepath)
if password is not None:
zf.setpassword(password)
datalen = 0
for i, name in enumerate(zf.namelist()):
data = zf.read(name, pwd=password)
datalen += len(data)
hexdigest = fs.bulk_put(data)
print("%s: %s" % (hexdigest, name))
if i % flush_every == 0:
print("flush %s bytes of data..." % datalen)
txdatalen = fs.bulk_flush()
print("sent %s bytes of compressed data" % txdatalen)
print("flush ...")
fs.bulk_flush()
def command_list(FilesClass, select_from=0, select_to=-1):
fs = FilesClass()
for hexdigest in fs.select(select_from, select_to):
print(hexdigest)
return 0
def command_len(FilesClass):
fs = FilesClass()
print(len(fs))
return 0
defaults = {}
for interface, kwargs in config.values.items():
c = {"%s_%s" % (interface, key) : value for key, value in kwargs.items()}
defaults.update(c)
__help__ = """
NAME reststore - control over the reststore
SYNOPSIS
reststore [COMMAND]
Commands:
get [FILE-OPTIONS] [HEXDIGEST]
Return a filepath to the data behind hexdigest.
arguments
HEXDIGEST of the data to lookup in reststore.
read [FILE-OPTIONS] [HEXDIGEST] > stdout
Attempt to retrieve a file and write it out to stdout. A check is
made in the local reststore first, if the file is in available, an
attempt to read the file from the web reststore is made.
arguments
HEXDIGEST of the data to lookup in reststore.
put [FILE-OPTIONS] FILEPATH(s)
Put a file into the reststore.
arguments
Path(s) of files to be loaded into the reststore.
unzip [OPTIONS FILE-OPTIONS] ZIPFILE
Extra files from a zipfile straight into the reststore.
arguments
A path to the zip file to extract into the reststore.
options
--password=
Define a password for unzipping the zip file.
--flush=1000
Number of files to read into memory before flushing through
to the reststore.
list [OPTIONS FILE-OPTIONS]
list out hexdigests found in the reststore.
options
--select=[A:B]
List all of the hashes between A:B. Hashes are stored
chronologically. 0 is the first file inserted, -1 is the last
file inserted. i.e. select the last 1000 hexdigests -1001:-1
len [FILE-OPTIONS]
print out the number of files stored in the reststore.
web [OPTIONS FILE-OPTIONS] [[HOST:][PORT]]
Run the RESTful web app.
arguments
HOST:PORT defaults to %(webapp_host)s:%(webapp_port)s
options
--server=%(webapp_server)s
Choose the server adapter to use.
--debug=%(webapp_debug)s
Run in debug mode.
--quiet=%(webapp_quiet)s
Run in quite mode.
--proxy_requests=%(webapp_proxy_requests)s
If True, this web app will proxy requests through to
the authoritative server defined by the client uri.
File options:
--name=%(files_name)s
Set the default reststore name (i.e. domain or realm)
--hash_function=%(files_hash_function)s
Set the hash function to be used
--tune_size=%(files_tune_size)s
Set the approximate size the reststore may grow up to.
--root=%(files_root)s
Set the root for the reststore.
--assert_data_ok=%(files_assert_data_ok)s
Do extra checks when reading and writing data.
--weboff
This flag forces access to a local repository only.
--uri=%(client_uri)s
The uri to the upstream reststore web server.
""" % defaults
def main(args):
if not args:
print("No arguments provided" , file=sys.stderr)
return -1
if '-h' in args or '--help' in args:
print(__help__)
return 0
command = args.pop(0)
try:
opts, args = getopt(args, '', [
'server=', 'debug=', 'quiet=', 'proxy_requests=',
'name=', 'hash_function=', 'tune_size=', 'root=', 'assert_data_ok=',
'uri=',
'password=', 'flush=',
'select=',
'weboff',
])
except Exception as exc:
print("Getopt error: %s" % (exc), file=sys.stderr)
return -1
webapp_config = config.values['webapp']
files_config = config.values['files']
client_config = config.values['client']
list_command = dict()
unzip_command = dict()
FilesClass = reststore.FilesClient
for opt, arg in opts:
if opt in ['--server']:
webapp_config['server'] = arg
elif opt in ['--quiet']:
webapp_config['quite'] = arg.lower() != 'false'
elif opt in ['--debug']:
webapp_config['debug'] = arg.lower() != 'false'
elif opt in ['--proxy_requests']:
webapp_config['proxy_requests'] = arg.lower() != 'false'
elif opt in ['--name']:
files_config['name'] = arg
elif opt in ['--hash_function']:
files_config['hash_function'] = arg
elif opt in ['--tune_size']:
try:
files_config['tune_size'] = int(arg)
except ValueError:
print("%s is not a valid int" % arg, file=sys.stderr)
return -1
elif opt in ['--root']:
files_config['root'] = arg
elif opt in ['--assert_data_ok']:
files_config['assert_data_ok'] = arg.lower() != 'false'
elif opt in ['--password']:
unzip_command['password'] = arg
elif opt in ['--flush']:
try:
unzip_command['flush_every'] = int(arg)
except ValueError as err:
print("Failed to convert int for %s" % arg, file=sys.stderr)
return -1
elif opt in ['--select']:
try:
a, b = arg.split(':')
if not a:
a = 0
if not b:
b = -1
except Exception as err:
print("Failed to split select range %s" % arg, file=sys.stderr)
return -1
try:
list_command = dict(select_from = int(a),
select_to = int(b))
except ValueError as err:
print("Failed to convert int for %s" % arg, file=sys.stderr)
return -1
elif opt in ['--uri']:
client_config['uri'] = arg
elif opt in ['--weboff']:
FilesClass = reststore.Files
if command == 'web':
if args:
hostport = args[0]
host = webapp_config['host']
port = webapp_config['port']
if ':' in hostport:
host, p = hostport.split(':')
# may not have a port value
if p:
port = p
else:
port = hostport
try:
port = int(port)
except ValueError:
print("failed to convert port to int (%s)" % port)
return -1
webapp_config['host'] = host
webapp_config['port'] = port
return command_web()
elif command == 'get':
hexdigest = args[0]
return command_get(FilesClass, hexdigest)
elif command == 'read':
hexdigest = args[0]
return command_read(FilesClass, hexdigest)
elif command == 'put':
filepaths = args
return command_put(FilesClass, filepaths)
elif command == 'unzip':
filepath = args[0]
return command_unzip(FilesClass, filepath, **unzip_command)
elif command == 'list':
return command_list(FilesClass, **list_command)
elif command == 'len':
return command_len(FilesClass)
else:
print("%s is not a valid command " % command, file=sys.stderr)
return -1
entry = lambda :main(sys.argv[1:])
if __name__ == "__main__":
sys.exit(entry())
| |
"""Provides a common component interface.
Filters should subclass this module and implement
the run() method.
"""
import stackless # pylint: disable=F0401
from pypes.scheduler import connect_graph_components
class Component(object):
"""Provides methods common to all filters.
Anyone building a custom filter object should
subclass this module and implement their own
run() method.
Keep in mind that filters are stackless.tasklets
and the run method should yield rather return.
"""
__metatype__ = None
def __init__(self):
"""Class constructor
Provides default input of 'in' and output of 'out'.
"""
self._inputs = {'in': [None, 'Default input port']}
self._outputs = {'out': [None, 'Default output port']}
self._parameters = {}
def run(self):
"""Starts this component as a stackless tasklet
This method is meant to be overridden in derived subclass.
The subclass should implement its own logic.
"""
raise NotImplementedError
def yield_ctrl(self):
"""Causes this tasklet to relinquish control of the
CPU to allow another tasklet to run. This tasklet is
re-scheduled to run again.
@return: Nothing
"""
stackless.schedule()
def add_input(self, name, desc=None):
"""Adds a new input port to this component.
This is most typically called from the object
subclassing this component. Adding a new port means
you are adding some filter logic that utilizes
the new port in some way.
@param name: The string used to represent this port
@type name: String
@keyword desc: An optional description of what this port is used for.
@note: Although desc is optional, it is considered good practice
to provide a brief description.
@return: Nothing
"""
status = False
if not name in self._inputs:
self._inputs[name] = [None, desc]
status = True
return status
def remove_input(self, name):
"""Removes the given port from this components
list of available input ports.
@param name: The string used to represent this port
@type name: String
@return: Nothing
"""
status = False
if name in self._inputs:
self._inputs.pop(name)
status = True
return status
def add_output(self, name, desc=None):
"""Adds a new output port to this component.
This is most typically called from the object
subclassing this component. Adding a new port means
you are adding some filter logic that utilizes
the new port in some way.
@param name: The string used to represent this port
@type name: String
@keyword desc: An optional description of what this port is used for.
@note: Although desc is optional, it is considered good practice
to provide a brief description.
@return: Nothing
"""
status = False
if not name in self._outputs:
self._outputs[name] = [None, desc]
status = True
return status
def remove_output(self, name):
"""Removes the given port from this
components list of available output ports.
@param name: The string used to represent this port
@type name: String
@return: Nothing
"""
status = False
if name in self._outputs:
self._outputs.pop(name)
status = True
return status
def connect_input(self, name, edge):
"""Connects a edge (pype) to the specified
input port of this component.
This only represents half of an actual connection between two nodes.
Typically, one side of the edge is connected to the output of one
node while the other side is connected to the input of another node.
@see: L{connect_output}
@param name: The string used to represent this port
@type name: String
@param edge: The edge you would like to connect
@type edge: L{Pype}
@return: Nothing
@todo: Need to raise custom excpetion when trying to connect
a non-existant port.
"""
try:
item = self._inputs[name]
except:
print('Input does not exist')
else:
item[0] = edge
self._inputs[name] = item
def connect_output(self, name, edge):
"""Connects a edge (pype) to the
specified output port of this component.
This only represents half of an actual connection between two nodes.
Typically, one side of the edge is connected to the output of one
node while the other side is connected to the input of another node.
@see: L{connect_input}
@param name: The string used to represent this port
@type name: String
@param edge: The edge you would like to connect
@type edge: L{Pype}
@return: Nothing
@todo: Need to raise custom exception when trying to connect
a non-existant port.
"""
try:
item = self._outputs[name]
except:
print('Output does not exist')
else:
item[0] = edge
self._outputs[name] = item
def is_connected(self, name):
"""Returns True is the specified port is connected to an edge.
@param name: The port being referenced
@type name: String
@return: Boolean
"""
status = False
in_connected = False
out_connected = False
if name in self._inputs:
in_connected = self._inputs[name][0]
if name in self._outputs:
out_connected = self._outputs[name][0]
connected = in_connected or out_connected
if connected:
status = True
return status
def get_port_description(self, port):
"""Returns the ports description.
@param port: The port being referenced
@type port: String
@return: String
@todo: Need to raise custom exception when trying to query
a non-existant port.
"""
desc = None
if self.has_port(port):
try:
desc = self._inputs[port][1]
except:
desc = self._outputs[port][1]
return desc
def set_port_description(self, port, desc):
"""Sets the ports description.
@param port: The port being referenced
@type port: String
@return: Nothing
@todo: Need to raise custom exception when trying to query
a non-existant port.
"""
if self.has_port(port):
try:
item = self._inputs[port]
item[1] = desc
self._inputs[port] = item
except:
item = self._outputs[port]
item[1] = desc
self._outputs[port] = item
def has_port(self, port):
"""Returns True if the component contains this port, False otherwise.
@param port: The port being referenced
@type port: String
@return: Boolean
"""
return port in self._inputs or port in self._outputs
def receive(self, port):
"""Tries recieving data on the specified port.
@param port: The port being referenced
@type port: String
@return: Incoming data or None if no data is available
@todo: Nothing
"""
try:
data = self._inputs[port][0].recv()
except:
data = None
return data
def receive_all(self, port):
"""Tries recieving all data waiting on the specified port.
@param port: The port being referenced
@type port: String
@return: An iterator over this ports available data
"""
for _ in range(self._inputs[port][0].size):
yield self._inputs[port][0].recv()
def send(self, port, data):
"""Sends data on specified port.
@param port: The port being referenced
@type port: String
@param data: Data to be sent
@type data: Application specific
@return: Boolean (depending on the success)
"""
status = True
try:
self._outputs[port][0].send(data)
except:
status = False
return status
def get_in_ports(self):
"""Returns a list of current inputs ports for this component.
"""
return sorted(self._inputs.keys())
def get_out_ports(self):
"""Returns a list of current output ports for this component.
"""
return sorted(self._outputs.keys())
def get_parameters(self):
"""Returns a dict of parameters used by this component.
"""
return self._parameters
def set_parameters(self, parameters):
"""Sets parameters for this component.
@param parameters: The parameters being set on this component
@type parameters: dict
"""
self._parameters = parameters
def get_parameter(self, name):
"""Returns a specific parameter for this component.
@param name: The name of the parameter you want
@type name: String
@return: String
"""
try:
p = self._parameters[name][0]
except:
p = None
return p
def set_parameter(self, name, parameter, options=None):
"""Sets a specific parameter for this component.
@param name: The name of teh parameter being set
@type name: String
@param parameter: The value being set for this parameter
@type parameter: String
"""
if options is None and name in self._parameters:
self._parameters[name][0] = parameter
else:
if options is None or not isinstance(options, list):
options = []
try:
self._parameters[name] = [parameter, options]
except:
pass
def get_type(self):
"""Get the metatype of the component."""
return self.__metatype__
class HigherOrderComponent(Component):
"""Reuse a network of component as if it was a simple component.
"""
__metatype__ = "TRANSFORMER"
def __init__(self, graph):
"""Initialize the Component from the graph description
@param graph: The graph representing the work flow
@type graph: Python dict organized as a graph
"""
Component.__init__(self)
self.subnodes = connect_graph_components(graph)
self._inputs = self.subnodes[0]._inputs # pylint: disable=W0212
self._outputs = self.subnodes[-1]._outputs # pylint: disable=W0212
def run(self):
pass
| |
"""Implementation of local API.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import collections
import errno
import glob
import io
import json
import logging
import os
import re
import tarfile
import six
from six.moves import _thread
from treadmill import exc
from treadmill import appenv
from treadmill import logcontext as lc
from treadmill import rrdutils
_LOGGER = lc.ContainerAdapter(logging.getLogger(__name__))
def _app_path(tm_env, instance, uniq):
"""Return application path given app env, app id and uniq."""
return os.path.join(tm_env.apps_dir,
'%s-%s' % (instance.replace('#', '-'), uniq))
def _archive_path(tm_env, archive_type, instance, uniq):
"""Return archive path given app env, archive id and type."""
return os.path.join(tm_env.archives_dir, '%s-%s.%s.tar.gz' %
(instance.replace('#', '-'), uniq, archive_type))
def _temp_file_name():
"""Construct a temporary file name for each thread."""
f_name = 'local-{}.temp'.format(_thread.get_ident())
return os.path.join(os.path.sep, 'tmp', f_name)
def _get_file(fname=None,
arch_fname=None,
arch_extract=True,
arch_extract_fname=None):
"""Return the file pointed by "fname" or extract it from archive.
Return fname if the specified file exists or extract
'arch_extract_fname' from 'arch_fname' first and return the path to
the extracted file.
"""
if os.path.exists(fname):
return fname
if not arch_extract:
raise exc.LocalFileNotFoundError(
'{} cannot be found.'.format(fname)
)
_LOGGER.info('Extract %s from archive %s', arch_extract_fname, arch_fname)
if not os.path.exists(arch_fname):
raise exc.LocalFileNotFoundError(
'{} cannot be found.'.format(arch_fname)
)
try:
# extract the req. file from the archive and copy it to a temp file
copy = _temp_file_name()
with tarfile.open(arch_fname) as arch, io.open(copy, 'wb') as copy_fd:
member = arch.extractfile(arch_extract_fname)
copy_fd.writelines(member.readlines())
copy_fd.close()
except KeyError as err:
_LOGGER.error(err)
raise exc.LocalFileNotFoundError(
'The file {} cannot be found in {}'.format(arch_extract_fname,
arch_fname))
return copy
def _fragment(iterable, start=0, limit=None):
"""
Selects a fragment of the iterable and returns the items in 'normal order'.
The lowest index is 0 and designates the first line of the file.
'Limit' specifies the number of lines to return.
"""
# TODO: Naive implementation. Needs to be rewritten to a solution with
# file pointer moving around etc. if it turns out that this isn't
# performant enough.
if limit >= 0:
try:
fragment = collections.deque(maxlen=limit)
steps_to_make = start + limit
while steps_to_make:
fragment.append(six.next(iterable))
steps_to_make = steps_to_make - 1
except StopIteration:
pass
try:
for _ in six.moves.range(min(steps_to_make, start)):
fragment.popleft()
except IndexError:
raise exc.InvalidInputError(
__name__,
'Index start=%s is out of range.' % str(start))
return fragment
try:
for _ in six.moves.range(start):
six.next(iterable)
except StopIteration:
raise exc.InvalidInputError(__name__,
'Index start=%s is out of range.'
% str(start))
return list(iterable)
def _fragment_in_reverse(iterable, start=0, limit=None):
"""
Selects a fragment of the iterable and returns the items in reverse order.
The lowest index is 0 and designates the last line of the file.
'Limit' specifies the number of lines to return.
"""
# TODO: Naive implementation. Needs to be rewritten to a solution with
# file pointer moving around etc. if it turns out that this isn't
# performant enough.
maxlen = None
if limit >= 0:
maxlen = start + limit
fragment = collections.deque(iterable, maxlen)
try:
for _ in six.moves.range(start):
fragment.pop()
except IndexError:
raise exc.InvalidInputError(__name__,
'Index start=%s is out of range.'
% str(start))
fragment.reverse()
return fragment
def mk_metrics_api(tm_env):
"""Factory to create metrics api."""
class _MetricsAPI(object):
"""Acess to the locally gathered metrics."""
def __init__(self):
def _get(rsrc_id, timeframe, as_json=False):
"""Return the rrd metrics."""
with lc.LogContext(_LOGGER, rsrc_id):
_LOGGER.info('Get metrics')
id_ = self._unpack_id(rsrc_id)
file_ = self._get_rrd_file(**id_)
if as_json:
return rrdutils.get_json_metrics(file_, timeframe)
return file_
def _file_path(rsrc_id):
"""Return the rrd metrics file path."""
id_ = self._unpack_id(rsrc_id)
return self._metrics_fpath(**id_)
self.file_path = _file_path
self.get = _get
def _remove_ext(self, fname, extension='.rrd'):
"""Returns the basename of a file and removes the extension as well.
"""
res = os.path.basename(fname)
res = res[:-len(extension)]
return res
def _unpack_id(self, rsrc_id):
"""Decompose resource_id to a dictionary.
Unpack the (core) service or the application name and "uniq name"
from rsrc_id to a dictionary.
"""
if '/' in rsrc_id:
app, uniq = rsrc_id.split('/')
return {'app': app, 'uniq': uniq}
return {'service': rsrc_id}
def _get_rrd_file(
self, service=None,
app=None, uniq=None,
arch_extract=True):
"""Return the rrd file path of an app or a core service."""
if uniq is None:
return self._core_rrd_file(service)
if uniq == 'running':
arch_extract = False
# find out uniq ...
state_json = os.path.join(tm_env().running_dir, app, 'data',
'state.json')
with io.open(state_json) as f:
uniq = json.load(f)['uniqueid']
return self._app_rrd_file(app, uniq, arch_extract)
def _app_rrd_file(self, app, uniq, arch_extract=True):
"""Return an application's rrd file."""
_LOGGER.info('Return %s', self._metrics_fpath(app=app, uniq=uniq))
return _get_file(
self._metrics_fpath(app=app, uniq=uniq),
arch_extract=arch_extract,
arch_fname=_archive_path(tm_env(), 'sys', app, uniq),
arch_extract_fname='metrics.rrd')
def _core_rrd_file(self, service):
"""Return the given service's rrd file."""
_LOGGER.info('Return %s', self._metrics_fpath(service))
return _get_file(self._metrics_fpath(service), arch_extract=False)
def _metrics_fpath(self, service=None, app=None, uniq=None):
"""Return the rrd metrics file's full path."""
if service is not None:
return os.path.join(tm_env().metrics_dir, 'core',
service + '.rrd')
return os.path.join(tm_env().metrics_dir, 'apps',
'%s-%s.rrd' % (app.replace('#', '-'), uniq))
return _MetricsAPI
def mk_logapi(tm_env):
"""Factory for log api."""
class _LogAPI(object):
"""Access to log files."""
def __init__(self):
def _get(log_id, start=0, limit=None, order=None):
"""Get log file."""
instance, uniq, logtype, component = log_id.split('/')
with lc.LogContext(_LOGGER, '{}/{}'.format(instance, uniq)):
log_f = self._get_logfile(instance, uniq,
logtype, component)
_LOGGER.info('Requested {} items starting from line {} '
'in {} order'.format(limit, start, order))
if start is not None and start < 0:
raise exc.InvalidInputError(
__name__,
'Index cannot be less than 0, got: {}'.format(
start
)
)
with io.open(log_f) as log:
if order == 'desc':
return _fragment_in_reverse(log, start, limit)
return _fragment(log, start, limit)
self.get = _get
def _get_logfile(self, instance, uniq, logtype, component):
"""Return the corresponding log file."""
_LOGGER.info('Log: %s %s %s %s',
instance, uniq, logtype, component)
try:
return self._get_logfile_new(instance, uniq, logtype,
component)
except exc.LocalFileNotFoundError:
return self._get_logfile_old(instance, uniq, logtype,
component)
def _get_logfile_new(self, instance, uniq, logtype, component):
"""Return the log file according to the newer file layout."""
if logtype == 'sys':
logfile = os.path.join('sys', component, 'data',
'log', 'current')
else:
logfile = os.path.join('services', component, 'data',
'log', 'current')
if uniq == 'running':
fname = os.path.join(tm_env().running_dir, instance, 'data',
logfile)
else:
fname = os.path.join(
_app_path(tm_env(), instance, uniq), 'data', logfile)
_LOGGER.info('Logfile: %s', fname)
return _get_file(fname,
arch_fname=_archive_path(tm_env(), logtype,
instance, uniq),
arch_extract=bool(uniq != 'running'),
arch_extract_fname=logfile)
def _get_logfile_old(self, instance, uniq, logtype, component):
"""Return the log file according to the old file layout."""
# TODO: method should be deleted once the old containers disappear
if logtype == 'sys':
logfile = os.path.join('sys', component, 'log', 'current')
else:
logfile = os.path.join('services', component, 'log', 'current')
if uniq == 'running':
fname = os.path.join(tm_env().running_dir, instance, logfile)
else:
fname = os.path.join(
_app_path(tm_env(), instance, uniq), logfile)
_LOGGER.info('Logfile: %s', fname)
return _get_file(fname,
arch_fname=_archive_path(tm_env(), logtype,
instance, uniq),
arch_extract=bool(uniq != 'running'),
arch_extract_fname=logfile)
return _LogAPI
class API(object):
"""Treadmill Local REST api."""
def __init__(self):
self._tm_env = None
def tm_env(_metrics_api=None):
"""Lazy instantiate app environment."""
if not self._tm_env:
# TODO: we need to pass this parameter to api, unfortunately
# in current api framework it is not trivial.
approot = os.environ['TREADMILL_APPROOT']
_LOGGER.info('Using approot: %s', approot)
self._tm_env = appenv.AppEnvironment(approot)
return self._tm_env
def _list_running():
"""List all running instances."""
result = {}
running_glob = os.path.join(tm_env().running_dir, '*')
for running in glob.glob(running_glob):
try:
app_path = os.readlink(running)
full_name = os.path.basename(app_path)
name, instance, uniq = full_name.rsplit('-', 2)
ctime = os.stat(app_path).st_ctime
result[full_name] = {
'_id': '%s#%s/%s' % (name, instance, uniq),
'ctime': ctime,
'state': 'running',
}
except OSError as oserr:
if oserr.errno == errno.ENOENT:
continue
return result
def _list_finished():
"""List all finished instances."""
result = {}
archive_glob = os.path.join(tm_env().archives_dir, '*.sys.tar.gz')
pattern = re.compile(r'''.*/ # archives dir
\w+ # proid
\. # .
\w+ # app
-\d+ # id
-\w+ # uniq
.sys.tar.gz''', re.X)
for archive in [f for f in glob.glob(archive_glob)
if pattern.match(f)]:
try:
full_name = os.path.basename(archive)[:-len('.sys.tar.gz')]
name, instance, uniq = full_name.rsplit('-', 2)
ctime = os.stat(archive).st_ctime
result[full_name] = {
'_id': '%s#%s/%s' % (name, instance, uniq),
'ctime': ctime,
'state': 'finished',
}
except OSError as oserr:
if oserr.errno == errno.ENOENT:
continue
return result
def _list_services():
"""List the local services."""
result = {}
services_glob = os.path.join(tm_env().init_dir, '*')
for svc in glob.glob(services_glob):
try:
svc_name = os.path.basename(svc)
ctime = os.stat(os.path.join(svc, 'log', 'data',
'current')).st_ctime
result[svc] = {
'_id': svc_name,
'ctime': ctime,
'state': 'running',
}
except OSError as oserr:
if oserr.errno == errno.ENOENT:
continue
return result
def _list(state=None, inc_svc=False):
"""List all instances on the node."""
result = {}
if state is None or state == 'running':
result.update(_list_running())
if inc_svc:
result.update(_list_services())
if state is None or state == 'finished':
result.update(_list_finished())
return result.values()
# TODO: implementation of this is placeholder, need to think about
# more relevant info.
def _get(uniqid):
"""Get instance info."""
instance, uniq = uniqid.split('/')
if uniq == 'running':
fname = os.path.join(tm_env().running_dir, instance, 'data',
'state.json')
else:
fname = os.path.join(
_app_path(tm_env(), instance, uniq), 'data', 'state.json')
try:
with io.open(fname) as f:
return json.load(f)
except EnvironmentError as err:
if uniq == 'running' or err.errno != errno.ENOENT:
raise
fname = _archive_path(tm_env(), 'sys', instance, uniq)
with tarfile.open(fname) as archive:
member = archive.extractfile('state.json')
return json.load(member)
class _ArchiveAPI(object):
"""Access to archive files."""
def __init__(self):
def _get(archive_id):
"""Get arch file path."""
instance, uniq, arch_type = archive_id.split('/')
arch_path = _archive_path(tm_env(), arch_type, instance,
uniq)
if not os.path.exists(arch_path):
raise exc.LocalFileNotFoundError(
'{} cannot be found.'.format(arch_path))
return arch_path
self.get = _get
self.list = _list
self.get = _get
self.log = mk_logapi(tm_env)()
self.archive = _ArchiveAPI()
self.metrics = mk_metrics_api(tm_env)()
def init(_authorizer):
"""Returns module API wrapped with authorizer function."""
api = API()
return api
| |
""" discover and run doctests in modules and test files."""
import inspect
import platform
import sys
import traceback
import warnings
from contextlib import contextmanager
import pytest
from _pytest._code.code import ExceptionInfo
from _pytest._code.code import ReprFileLocation
from _pytest._code.code import TerminalRepr
from _pytest.compat import safe_getattr
from _pytest.fixtures import FixtureRequest
from _pytest.outcomes import Skipped
from _pytest.python_api import approx
from _pytest.warning_types import PytestWarning
DOCTEST_REPORT_CHOICE_NONE = "none"
DOCTEST_REPORT_CHOICE_CDIFF = "cdiff"
DOCTEST_REPORT_CHOICE_NDIFF = "ndiff"
DOCTEST_REPORT_CHOICE_UDIFF = "udiff"
DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE = "only_first_failure"
DOCTEST_REPORT_CHOICES = (
DOCTEST_REPORT_CHOICE_NONE,
DOCTEST_REPORT_CHOICE_CDIFF,
DOCTEST_REPORT_CHOICE_NDIFF,
DOCTEST_REPORT_CHOICE_UDIFF,
DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE,
)
# Lazy definition of runner class
RUNNER_CLASS = None
def pytest_addoption(parser):
parser.addini(
"doctest_optionflags",
"option flags for doctests",
type="args",
default=["ELLIPSIS"],
)
parser.addini(
"doctest_encoding", "encoding used for doctest files", default="utf-8"
)
group = parser.getgroup("collect")
group.addoption(
"--doctest-modules",
action="store_true",
default=False,
help="run doctests in all .py modules",
dest="doctestmodules",
)
group.addoption(
"--doctest-report",
type=str.lower,
default="udiff",
help="choose another output format for diffs on doctest failure",
choices=DOCTEST_REPORT_CHOICES,
dest="doctestreport",
)
group.addoption(
"--doctest-glob",
action="append",
default=[],
metavar="pat",
help="doctests file matching pattern, default: test*.txt",
dest="doctestglob",
)
group.addoption(
"--doctest-ignore-import-errors",
action="store_true",
default=False,
help="ignore doctest ImportErrors",
dest="doctest_ignore_import_errors",
)
group.addoption(
"--doctest-continue-on-failure",
action="store_true",
default=False,
help="for a given doctest, continue to run after the first failure",
dest="doctest_continue_on_failure",
)
def pytest_collect_file(path, parent):
config = parent.config
if path.ext == ".py":
if config.option.doctestmodules and not _is_setup_py(config, path, parent):
return DoctestModule(path, parent)
elif _is_doctest(config, path, parent):
return DoctestTextfile(path, parent)
def _is_setup_py(config, path, parent):
if path.basename != "setup.py":
return False
contents = path.read()
return "setuptools" in contents or "distutils" in contents
def _is_doctest(config, path, parent):
if path.ext in (".txt", ".rst") and parent.session.isinitpath(path):
return True
globs = config.getoption("doctestglob") or ["test*.txt"]
for glob in globs:
if path.check(fnmatch=glob):
return True
return False
class ReprFailDoctest(TerminalRepr):
def __init__(self, reprlocation_lines):
# List of (reprlocation, lines) tuples
self.reprlocation_lines = reprlocation_lines
def toterminal(self, tw):
for reprlocation, lines in self.reprlocation_lines:
for line in lines:
tw.line(line)
reprlocation.toterminal(tw)
class MultipleDoctestFailures(Exception):
def __init__(self, failures):
super().__init__()
self.failures = failures
def _init_runner_class():
import doctest
class PytestDoctestRunner(doctest.DebugRunner):
"""
Runner to collect failures. Note that the out variable in this case is
a list instead of a stdout-like object
"""
def __init__(
self, checker=None, verbose=None, optionflags=0, continue_on_failure=True
):
doctest.DebugRunner.__init__(
self, checker=checker, verbose=verbose, optionflags=optionflags
)
self.continue_on_failure = continue_on_failure
def report_failure(self, out, test, example, got):
failure = doctest.DocTestFailure(test, example, got)
if self.continue_on_failure:
out.append(failure)
else:
raise failure
def report_unexpected_exception(self, out, test, example, exc_info):
if isinstance(exc_info[1], Skipped):
raise exc_info[1]
failure = doctest.UnexpectedException(test, example, exc_info)
if self.continue_on_failure:
out.append(failure)
else:
raise failure
return PytestDoctestRunner
def _get_runner(checker=None, verbose=None, optionflags=0, continue_on_failure=True):
# We need this in order to do a lazy import on doctest
global RUNNER_CLASS
if RUNNER_CLASS is None:
RUNNER_CLASS = _init_runner_class()
return RUNNER_CLASS(
checker=checker,
verbose=verbose,
optionflags=optionflags,
continue_on_failure=continue_on_failure,
)
class DoctestItem(pytest.Item):
def __init__(self, name, parent, runner=None, dtest=None):
super().__init__(name, parent)
self.runner = runner
self.dtest = dtest
self.obj = None
self.fixture_request = None
def setup(self):
if self.dtest is not None:
self.fixture_request = _setup_fixtures(self)
globs = dict(getfixture=self.fixture_request.getfixturevalue)
for name, value in self.fixture_request.getfixturevalue(
"doctest_namespace"
).items():
globs[name] = value
self.dtest.globs.update(globs)
def runtest(self):
_check_all_skipped(self.dtest)
self._disable_output_capturing_for_darwin()
failures = []
self.runner.run(self.dtest, out=failures)
if failures:
raise MultipleDoctestFailures(failures)
def _disable_output_capturing_for_darwin(self):
"""
Disable output capturing. Otherwise, stdout is lost to doctest (#985)
"""
if platform.system() != "Darwin":
return
capman = self.config.pluginmanager.getplugin("capturemanager")
if capman:
capman.suspend_global_capture(in_=True)
out, err = capman.read_global_capture()
sys.stdout.write(out)
sys.stderr.write(err)
def repr_failure(self, excinfo):
import doctest
failures = None
if excinfo.errisinstance((doctest.DocTestFailure, doctest.UnexpectedException)):
failures = [excinfo.value]
elif excinfo.errisinstance(MultipleDoctestFailures):
failures = excinfo.value.failures
if failures is not None:
reprlocation_lines = []
for failure in failures:
example = failure.example
test = failure.test
filename = test.filename
if test.lineno is None:
lineno = None
else:
lineno = test.lineno + example.lineno + 1
message = type(failure).__name__
reprlocation = ReprFileLocation(filename, lineno, message)
checker = _get_checker()
report_choice = _get_report_choice(
self.config.getoption("doctestreport")
)
if lineno is not None:
lines = failure.test.docstring.splitlines(False)
# add line numbers to the left of the error message
lines = [
"%03d %s" % (i + test.lineno + 1, x)
for (i, x) in enumerate(lines)
]
# trim docstring error lines to 10
lines = lines[max(example.lineno - 9, 0) : example.lineno + 1]
else:
lines = [
"EXAMPLE LOCATION UNKNOWN, not showing all tests of that example"
]
indent = ">>>"
for line in example.source.splitlines():
lines.append("??? {} {}".format(indent, line))
indent = "..."
if isinstance(failure, doctest.DocTestFailure):
lines += checker.output_difference(
example, failure.got, report_choice
).split("\n")
else:
inner_excinfo = ExceptionInfo(failure.exc_info)
lines += ["UNEXPECTED EXCEPTION: %s" % repr(inner_excinfo.value)]
lines += traceback.format_exception(*failure.exc_info)
reprlocation_lines.append((reprlocation, lines))
return ReprFailDoctest(reprlocation_lines)
else:
return super().repr_failure(excinfo)
def reportinfo(self):
return self.fspath, self.dtest.lineno, "[doctest] %s" % self.name
def _get_flag_lookup():
import doctest
return dict(
DONT_ACCEPT_TRUE_FOR_1=doctest.DONT_ACCEPT_TRUE_FOR_1,
DONT_ACCEPT_BLANKLINE=doctest.DONT_ACCEPT_BLANKLINE,
NORMALIZE_WHITESPACE=doctest.NORMALIZE_WHITESPACE,
ELLIPSIS=doctest.ELLIPSIS,
IGNORE_EXCEPTION_DETAIL=doctest.IGNORE_EXCEPTION_DETAIL,
COMPARISON_FLAGS=doctest.COMPARISON_FLAGS,
ALLOW_UNICODE=_get_allow_unicode_flag(),
ALLOW_BYTES=_get_allow_bytes_flag(),
NUMBER=_get_number_flag(),
)
def get_optionflags(parent):
optionflags_str = parent.config.getini("doctest_optionflags")
flag_lookup_table = _get_flag_lookup()
flag_acc = 0
for flag in optionflags_str:
flag_acc |= flag_lookup_table[flag]
return flag_acc
def _get_continue_on_failure(config):
continue_on_failure = config.getvalue("doctest_continue_on_failure")
if continue_on_failure:
# We need to turn off this if we use pdb since we should stop at
# the first failure
if config.getvalue("usepdb"):
continue_on_failure = False
return continue_on_failure
class DoctestTextfile(pytest.Module):
obj = None
def collect(self):
import doctest
# inspired by doctest.testfile; ideally we would use it directly,
# but it doesn't support passing a custom checker
encoding = self.config.getini("doctest_encoding")
text = self.fspath.read_text(encoding)
filename = str(self.fspath)
name = self.fspath.basename
globs = {"__name__": "__main__"}
optionflags = get_optionflags(self)
runner = _get_runner(
verbose=0,
optionflags=optionflags,
checker=_get_checker(),
continue_on_failure=_get_continue_on_failure(self.config),
)
parser = doctest.DocTestParser()
test = parser.get_doctest(text, globs, name, filename, 0)
if test.examples:
yield DoctestItem(test.name, self, runner, test)
def _check_all_skipped(test):
"""raises pytest.skip() if all examples in the given DocTest have the SKIP
option set.
"""
import doctest
all_skipped = all(x.options.get(doctest.SKIP, False) for x in test.examples)
if all_skipped:
pytest.skip("all tests skipped by +SKIP option")
def _is_mocked(obj):
"""
returns if a object is possibly a mock object by checking the existence of a highly improbable attribute
"""
return (
safe_getattr(obj, "pytest_mock_example_attribute_that_shouldnt_exist", None)
is not None
)
@contextmanager
def _patch_unwrap_mock_aware():
"""
contextmanager which replaces ``inspect.unwrap`` with a version
that's aware of mock objects and doesn't recurse on them
"""
real_unwrap = inspect.unwrap
def _mock_aware_unwrap(obj, stop=None):
try:
if stop is None or stop is _is_mocked:
return real_unwrap(obj, stop=_is_mocked)
return real_unwrap(obj, stop=lambda obj: _is_mocked(obj) or stop(obj))
except Exception as e:
warnings.warn(
"Got %r when unwrapping %r. This is usually caused "
"by a violation of Python's object protocol; see e.g. "
"https://github.com/pytest-dev/pytest/issues/5080" % (e, obj),
PytestWarning,
)
raise
inspect.unwrap = _mock_aware_unwrap
try:
yield
finally:
inspect.unwrap = real_unwrap
class DoctestModule(pytest.Module):
def collect(self):
import doctest
class MockAwareDocTestFinder(doctest.DocTestFinder):
"""
a hackish doctest finder that overrides stdlib internals to fix a stdlib bug
https://github.com/pytest-dev/pytest/issues/3456
https://bugs.python.org/issue25532
"""
def _find(self, tests, obj, name, module, source_lines, globs, seen):
if _is_mocked(obj):
return
with _patch_unwrap_mock_aware():
doctest.DocTestFinder._find(
self, tests, obj, name, module, source_lines, globs, seen
)
if self.fspath.basename == "conftest.py":
module = self.config.pluginmanager._importconftest(self.fspath)
else:
try:
module = self.fspath.pyimport()
except ImportError:
if self.config.getvalue("doctest_ignore_import_errors"):
pytest.skip("unable to import module %r" % self.fspath)
else:
raise
# uses internal doctest module parsing mechanism
finder = MockAwareDocTestFinder()
optionflags = get_optionflags(self)
runner = _get_runner(
verbose=0,
optionflags=optionflags,
checker=_get_checker(),
continue_on_failure=_get_continue_on_failure(self.config),
)
for test in finder.find(module, module.__name__):
if test.examples: # skip empty doctests
yield DoctestItem(test.name, self, runner, test)
def _setup_fixtures(doctest_item):
"""
Used by DoctestTextfile and DoctestItem to setup fixture information.
"""
def func():
pass
doctest_item.funcargs = {}
fm = doctest_item.session._fixturemanager
doctest_item._fixtureinfo = fm.getfixtureinfo(
node=doctest_item, func=func, cls=None, funcargs=False
)
fixture_request = FixtureRequest(doctest_item)
fixture_request._fillfixtures()
return fixture_request
def _get_checker():
"""
Returns a doctest.OutputChecker subclass that supports some
additional options:
* ALLOW_UNICODE and ALLOW_BYTES options to ignore u'' and b''
prefixes (respectively) in string literals. Useful when the same
doctest should run in Python 2 and Python 3.
* NUMBER to ignore floating-point differences smaller than the
precision of the literal number in the doctest.
An inner class is used to avoid importing "doctest" at the module
level.
"""
if hasattr(_get_checker, "LiteralsOutputChecker"):
return _get_checker.LiteralsOutputChecker()
import doctest
import re
class LiteralsOutputChecker(doctest.OutputChecker):
"""
Based on doctest_nose_plugin.py from the nltk project
(https://github.com/nltk/nltk) and on the "numtest" doctest extension
by Sebastien Boisgerault (https://github.com/boisgera/numtest).
"""
_unicode_literal_re = re.compile(r"(\W|^)[uU]([rR]?[\'\"])", re.UNICODE)
_bytes_literal_re = re.compile(r"(\W|^)[bB]([rR]?[\'\"])", re.UNICODE)
_number_re = re.compile(
r"""
(?P<number>
(?P<mantissa>
(?P<integer1> [+-]?\d*)\.(?P<fraction>\d+)
|
(?P<integer2> [+-]?\d+)\.
)
(?:
[Ee]
(?P<exponent1> [+-]?\d+)
)?
|
(?P<integer3> [+-]?\d+)
(?:
[Ee]
(?P<exponent2> [+-]?\d+)
)
)
""",
re.VERBOSE,
)
def check_output(self, want, got, optionflags):
if doctest.OutputChecker.check_output(self, want, got, optionflags):
return True
allow_unicode = optionflags & _get_allow_unicode_flag()
allow_bytes = optionflags & _get_allow_bytes_flag()
allow_number = optionflags & _get_number_flag()
if not allow_unicode and not allow_bytes and not allow_number:
return False
def remove_prefixes(regex, txt):
return re.sub(regex, r"\1\2", txt)
if allow_unicode:
want = remove_prefixes(self._unicode_literal_re, want)
got = remove_prefixes(self._unicode_literal_re, got)
if allow_bytes:
want = remove_prefixes(self._bytes_literal_re, want)
got = remove_prefixes(self._bytes_literal_re, got)
if allow_number:
got = self._remove_unwanted_precision(want, got)
return doctest.OutputChecker.check_output(self, want, got, optionflags)
def _remove_unwanted_precision(self, want, got):
wants = list(self._number_re.finditer(want))
gots = list(self._number_re.finditer(got))
if len(wants) != len(gots):
return got
offset = 0
for w, g in zip(wants, gots):
fraction = w.group("fraction")
exponent = w.group("exponent1")
if exponent is None:
exponent = w.group("exponent2")
if fraction is None:
precision = 0
else:
precision = len(fraction)
if exponent is not None:
precision -= int(exponent)
if float(w.group()) == approx(float(g.group()), abs=10 ** -precision):
# They're close enough. Replace the text we actually
# got with the text we want, so that it will match when we
# check the string literally.
got = (
got[: g.start() + offset] + w.group() + got[g.end() + offset :]
)
offset += w.end() - w.start() - (g.end() - g.start())
return got
_get_checker.LiteralsOutputChecker = LiteralsOutputChecker
return _get_checker.LiteralsOutputChecker()
def _get_allow_unicode_flag():
"""
Registers and returns the ALLOW_UNICODE flag.
"""
import doctest
return doctest.register_optionflag("ALLOW_UNICODE")
def _get_allow_bytes_flag():
"""
Registers and returns the ALLOW_BYTES flag.
"""
import doctest
return doctest.register_optionflag("ALLOW_BYTES")
def _get_number_flag():
"""
Registers and returns the NUMBER flag.
"""
import doctest
return doctest.register_optionflag("NUMBER")
def _get_report_choice(key):
"""
This function returns the actual `doctest` module flag value, we want to do it as late as possible to avoid
importing `doctest` and all its dependencies when parsing options, as it adds overhead and breaks tests.
"""
import doctest
return {
DOCTEST_REPORT_CHOICE_UDIFF: doctest.REPORT_UDIFF,
DOCTEST_REPORT_CHOICE_CDIFF: doctest.REPORT_CDIFF,
DOCTEST_REPORT_CHOICE_NDIFF: doctest.REPORT_NDIFF,
DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE: doctest.REPORT_ONLY_FIRST_FAILURE,
DOCTEST_REPORT_CHOICE_NONE: 0,
}[key]
@pytest.fixture(scope="session")
def doctest_namespace():
"""
Fixture that returns a :py:class:`dict` that will be injected into the namespace of doctests.
"""
return dict()
| |
# Copyright (c) 2010 Luke McCarthy <luke@iogopro.co.uk>
#
# This is free software released under the MIT license.
# See COPYING file for details, or visit:
# http://www.opensource.org/licenses/mit-license.php
#
# The file is part of FSMonitor, a file-system monitoring library.
# https://github.com/shaurz/fsmonitor
import os, threading
import win32file, win32con, pywintypes
import ctypes
from .common import FSEvent, FSMonitorError
# set to None when unloaded
module_loaded = True
FILE_LIST_DIRECTORY = 0x0001
FILE_NOTIFY_CHANGE_LAST_ACCESS = 0x20
FILE_NOTIFY_CHANGE_CREATION = 0x40
action_map = {
1 : FSEvent.Create,
2 : FSEvent.Delete,
3 : FSEvent.Modify,
4 : FSEvent.MoveFrom,
5 : FSEvent.MoveTo,
}
flags_map = {
FSEvent.Access : FILE_NOTIFY_CHANGE_LAST_ACCESS,
FSEvent.Modify : win32con.FILE_NOTIFY_CHANGE_LAST_WRITE | win32con.FILE_NOTIFY_CHANGE_SIZE,
FSEvent.Attrib : win32con.FILE_NOTIFY_CHANGE_ATTRIBUTES | win32con.FILE_NOTIFY_CHANGE_SECURITY,
FSEvent.Create : FILE_NOTIFY_CHANGE_CREATION,
FSEvent.Delete : win32con.FILE_NOTIFY_CHANGE_FILE_NAME | win32con.FILE_NOTIFY_CHANGE_DIR_NAME,
FSEvent.DeleteSelf : 0,
FSEvent.MoveFrom : win32con.FILE_NOTIFY_CHANGE_FILE_NAME | win32con.FILE_NOTIFY_CHANGE_DIR_NAME,
FSEvent.MoveTo : win32con.FILE_NOTIFY_CHANGE_FILE_NAME | win32con.FILE_NOTIFY_CHANGE_DIR_NAME,
}
def convert_flags(flags):
os_flags = 0
flag = 1
while flag < FSEvent.All + 1:
if flags & flag:
os_flags |= flags_map[flag]
flag <<= 1
return os_flags
def get_dir_handle(path):
return win32file.CreateFile(
path,
FILE_LIST_DIRECTORY,
win32con.FILE_SHARE_READ | win32con.FILE_SHARE_WRITE | win32con.FILE_SHARE_DELETE,
None,
win32con.OPEN_EXISTING,
win32con.FILE_FLAG_BACKUP_SEMANTICS | win32con.FILE_FLAG_OVERLAPPED,
None)
class FSMonitorWindowsError(WindowsError, FSMonitorError):
pass
class FSMonitorWatch(object):
def __init__(self, path, flags, user, recursive):
self.path = path
self.flags = flags
self.user = user
self.enabled = True
self._recursive = recursive
self._win32_flags = convert_flags(flags)
self._key = None
self._hDir = None
self._hDir = get_dir_handle(path)
self._overlapped = pywintypes.OVERLAPPED()
self._buf = ctypes.create_string_buffer(1024)
self._removed = False
def __del__(self):
if module_loaded:
close_watch(self)
def __repr__(self):
return "<FSMonitorWatch %r>" % self.path
def close_watch(watch):
if watch._hDir is not None:
win32file.CancelIo(watch._hDir)
win32file.CloseHandle(watch._hDir)
watch._hDir = None
def read_changes(watch):
win32file.ReadDirectoryChangesW(
watch._hDir, watch._buf, watch._recursive, watch._win32_flags,
watch._overlapped, None)
def process_events(watch, num):
for action, name in win32file.FILE_NOTIFY_INFORMATION(watch._buf.raw, num):
action = action_map.get(action)
if action is not None and (action & watch.flags):
yield FSEvent(watch, action, name)
try:
read_changes(watch)
except pywintypes.error as e:
if e.args[0] == 5:
close_watch(watch)
yield FSEvent(watch, FSEvent.DeleteSelf)
else:
raise FSMonitorWindowsError(*e.args)
class FSMonitor(object):
def __init__(self):
self.__key_to_watch = {}
self.__last_key = 0
self.__lock = threading.Lock()
self.__cphandle = win32file.CreateIoCompletionPort(-1, None, 0, 0)
def __del__(self):
if module_loaded:
self.close()
def close(self):
self.remove_all_watches()
if self.__cphandle is not None:
win32file.CloseHandle(self.__cphandle)
self.__cphandle = None
def add_dir_watch(self, path, flags=FSEvent.All, user=None, recursive=False):
try:
flags |= FSEvent.DeleteSelf
watch = FSMonitorWatch(path, flags, user, recursive)
with self.__lock:
key = self.__last_key
self.__last_key += 1
watch._key = key
self.__key_to_watch[key] = watch
win32file.CreateIoCompletionPort(watch._hDir, self.__cphandle, key, 0)
read_changes(watch)
return watch
except pywintypes.error as e:
raise FSMonitorWindowsError(*e.args)
def add_file_watch(self, path, flags=FSEvent.All, user=None):
raise NotImplementedError()
def remove_watch(self, watch):
if not watch._removed:
watch._removed = True
try:
close_watch(watch)
return True
except pywintypes.error:
pass
return False
def remove_all_watches(self):
with self.__lock:
watches_to_close = []
for watch in self.__key_to_watch.values():
if not watch._removed:
watch._removed = True
watches_to_close.append(watch)
for watch in watches_to_close:
try:
close_watch(watch)
except pywintypes.error:
pass
def enable_watch(self, watch, enable=True):
watch.enabled = enable
def disable_watch(self, watch):
watch.enabled = False
def read_events(self, timeout=None):
timeout_ms = 0x7FFFFFFF
if timeout is not None:
timeout_ms = int(timeout * 1000)
if timeout_ms < 0 or timeout_ms >= 0x7FFFFFFF:
raise ValueError("Timeout value out of range")
try:
events = []
rc, num, key, _ = win32file.GetQueuedCompletionStatus(self.__cphandle, timeout_ms)
if rc == 0:
with self.__lock:
watch = self.__key_to_watch.get(key)
if watch is not None and watch.enabled and not watch._removed:
events.extend(process_events(watch, num))
elif rc == 5:
with self.__lock:
watch = self.__key_to_watch.get(key)
if watch is not None and watch.enabled:
close_watch(watch)
del self.__key_to_watch[key]
events.append(FSEvent(watch, FSEvent.DeleteSelf))
return events
except pywintypes.error as e:
raise FSMonitorWindowsError(*e.args)
@property
def watches(self):
with self.__lock:
return self.__key_to_watch.values()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.